1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/kernel/resource.c 4 * 5 * Copyright (C) 1999 Linus Torvalds 6 * Copyright (C) 1999 Martin Mares <mj@ucw.cz> 7 * 8 * Arbitrary resource management. 9 */ 10 11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 12 13 #include <linux/export.h> 14 #include <linux/errno.h> 15 #include <linux/ioport.h> 16 #include <linux/init.h> 17 #include <linux/slab.h> 18 #include <linux/spinlock.h> 19 #include <linux/fs.h> 20 #include <linux/proc_fs.h> 21 #include <linux/pseudo_fs.h> 22 #include <linux/sched.h> 23 #include <linux/seq_file.h> 24 #include <linux/device.h> 25 #include <linux/pfn.h> 26 #include <linux/mm.h> 27 #include <linux/mount.h> 28 #include <linux/resource_ext.h> 29 #include <uapi/linux/magic.h> 30 #include <linux/string.h> 31 #include <linux/vmalloc.h> 32 #include <asm/io.h> 33 34 35 struct resource ioport_resource = { 36 .name = "PCI IO", 37 .start = 0, 38 .end = IO_SPACE_LIMIT, 39 .flags = IORESOURCE_IO, 40 }; 41 EXPORT_SYMBOL(ioport_resource); 42 43 struct resource iomem_resource = { 44 .name = "PCI mem", 45 .start = 0, 46 .end = -1, 47 .flags = IORESOURCE_MEM, 48 }; 49 EXPORT_SYMBOL(iomem_resource); 50 51 static DEFINE_RWLOCK(resource_lock); 52 53 /* 54 * Return the next node of @p in pre-order tree traversal. If 55 * @skip_children is true, skip the descendant nodes of @p in 56 * traversal. If @p is a descendant of @subtree_root, only traverse 57 * the subtree under @subtree_root. 58 */ 59 static struct resource *next_resource(struct resource *p, bool skip_children, 60 struct resource *subtree_root) 61 { 62 if (!skip_children && p->child) 63 return p->child; 64 while (!p->sibling && p->parent) { 65 p = p->parent; 66 if (p == subtree_root) 67 return NULL; 68 } 69 return p->sibling; 70 } 71 72 /* 73 * Traverse the resource subtree under @_root in pre-order, excluding 74 * @_root itself. 75 * 76 * NOTE: '__p' is introduced to avoid shadowing '_p' outside of loop. 77 * And it is referenced to avoid unused variable warning. 78 */ 79 #define for_each_resource(_root, _p, _skip_children) \ 80 for (typeof(_root) __root = (_root), __p = _p = __root->child; \ 81 __p && _p; _p = next_resource(_p, _skip_children, __root)) 82 83 #ifdef CONFIG_PROC_FS 84 85 enum { MAX_IORES_LEVEL = 5 }; 86 87 static void *r_start(struct seq_file *m, loff_t *pos) 88 __acquires(resource_lock) 89 { 90 struct resource *root = pde_data(file_inode(m->file)); 91 struct resource *p; 92 loff_t l = *pos; 93 94 read_lock(&resource_lock); 95 for_each_resource(root, p, false) { 96 if (l-- == 0) 97 break; 98 } 99 100 return p; 101 } 102 103 static void *r_next(struct seq_file *m, void *v, loff_t *pos) 104 { 105 struct resource *p = v; 106 107 (*pos)++; 108 109 return (void *)next_resource(p, false, NULL); 110 } 111 112 static void r_stop(struct seq_file *m, void *v) 113 __releases(resource_lock) 114 { 115 read_unlock(&resource_lock); 116 } 117 118 static int r_show(struct seq_file *m, void *v) 119 { 120 struct resource *root = pde_data(file_inode(m->file)); 121 struct resource *r = v, *p; 122 unsigned long long start, end; 123 int width = root->end < 0x10000 ? 4 : 8; 124 int depth; 125 126 for (depth = 0, p = r; depth < MAX_IORES_LEVEL; depth++, p = p->parent) 127 if (p->parent == root) 128 break; 129 130 if (file_ns_capable(m->file, &init_user_ns, CAP_SYS_ADMIN)) { 131 start = r->start; 132 end = r->end; 133 } else { 134 start = end = 0; 135 } 136 137 seq_printf(m, "%*s%0*llx-%0*llx : %s\n", 138 depth * 2, "", 139 width, start, 140 width, end, 141 r->name ? r->name : "<BAD>"); 142 return 0; 143 } 144 145 static const struct seq_operations resource_op = { 146 .start = r_start, 147 .next = r_next, 148 .stop = r_stop, 149 .show = r_show, 150 }; 151 152 static int __init ioresources_init(void) 153 { 154 proc_create_seq_data("ioports", 0, NULL, &resource_op, 155 &ioport_resource); 156 proc_create_seq_data("iomem", 0, NULL, &resource_op, &iomem_resource); 157 return 0; 158 } 159 __initcall(ioresources_init); 160 161 #endif /* CONFIG_PROC_FS */ 162 163 static void free_resource(struct resource *res) 164 { 165 /** 166 * If the resource was allocated using memblock early during boot 167 * we'll leak it here: we can only return full pages back to the 168 * buddy and trying to be smart and reusing them eventually in 169 * alloc_resource() overcomplicates resource handling. 170 */ 171 if (res && PageSlab(virt_to_head_page(res))) 172 kfree(res); 173 } 174 175 static struct resource *alloc_resource(gfp_t flags) 176 { 177 return kzalloc(sizeof(struct resource), flags); 178 } 179 180 /* Return the conflict entry if you can't request it */ 181 static struct resource * __request_resource(struct resource *root, struct resource *new) 182 { 183 resource_size_t start = new->start; 184 resource_size_t end = new->end; 185 struct resource *tmp, **p; 186 187 if (end < start) 188 return root; 189 if (start < root->start) 190 return root; 191 if (end > root->end) 192 return root; 193 p = &root->child; 194 for (;;) { 195 tmp = *p; 196 if (!tmp || tmp->start > end) { 197 new->sibling = tmp; 198 *p = new; 199 new->parent = root; 200 return NULL; 201 } 202 p = &tmp->sibling; 203 if (tmp->end < start) 204 continue; 205 return tmp; 206 } 207 } 208 209 static int __release_resource(struct resource *old, bool release_child) 210 { 211 struct resource *tmp, **p, *chd; 212 213 p = &old->parent->child; 214 for (;;) { 215 tmp = *p; 216 if (!tmp) 217 break; 218 if (tmp == old) { 219 if (release_child || !(tmp->child)) { 220 *p = tmp->sibling; 221 } else { 222 for (chd = tmp->child;; chd = chd->sibling) { 223 chd->parent = tmp->parent; 224 if (!(chd->sibling)) 225 break; 226 } 227 *p = tmp->child; 228 chd->sibling = tmp->sibling; 229 } 230 old->parent = NULL; 231 return 0; 232 } 233 p = &tmp->sibling; 234 } 235 return -EINVAL; 236 } 237 238 static void __release_child_resources(struct resource *r) 239 { 240 struct resource *tmp, *p; 241 resource_size_t size; 242 243 p = r->child; 244 r->child = NULL; 245 while (p) { 246 tmp = p; 247 p = p->sibling; 248 249 tmp->parent = NULL; 250 tmp->sibling = NULL; 251 __release_child_resources(tmp); 252 253 printk(KERN_DEBUG "release child resource %pR\n", tmp); 254 /* need to restore size, and keep flags */ 255 size = resource_size(tmp); 256 tmp->start = 0; 257 tmp->end = size - 1; 258 } 259 } 260 261 void release_child_resources(struct resource *r) 262 { 263 write_lock(&resource_lock); 264 __release_child_resources(r); 265 write_unlock(&resource_lock); 266 } 267 268 /** 269 * request_resource_conflict - request and reserve an I/O or memory resource 270 * @root: root resource descriptor 271 * @new: resource descriptor desired by caller 272 * 273 * Returns 0 for success, conflict resource on error. 274 */ 275 struct resource *request_resource_conflict(struct resource *root, struct resource *new) 276 { 277 struct resource *conflict; 278 279 write_lock(&resource_lock); 280 conflict = __request_resource(root, new); 281 write_unlock(&resource_lock); 282 return conflict; 283 } 284 285 /** 286 * request_resource - request and reserve an I/O or memory resource 287 * @root: root resource descriptor 288 * @new: resource descriptor desired by caller 289 * 290 * Returns 0 for success, negative error code on error. 291 */ 292 int request_resource(struct resource *root, struct resource *new) 293 { 294 struct resource *conflict; 295 296 conflict = request_resource_conflict(root, new); 297 return conflict ? -EBUSY : 0; 298 } 299 300 EXPORT_SYMBOL(request_resource); 301 302 /** 303 * release_resource - release a previously reserved resource 304 * @old: resource pointer 305 */ 306 int release_resource(struct resource *old) 307 { 308 int retval; 309 310 write_lock(&resource_lock); 311 retval = __release_resource(old, true); 312 write_unlock(&resource_lock); 313 return retval; 314 } 315 316 EXPORT_SYMBOL(release_resource); 317 318 static bool is_type_match(struct resource *p, unsigned long flags, unsigned long desc) 319 { 320 return (p->flags & flags) == flags && (desc == IORES_DESC_NONE || desc == p->desc); 321 } 322 323 /** 324 * find_next_iomem_res - Finds the lowest iomem resource that covers part of 325 * [@start..@end]. 326 * 327 * If a resource is found, returns 0 and @*res is overwritten with the part 328 * of the resource that's within [@start..@end]; if none is found, returns 329 * -ENODEV. Returns -EINVAL for invalid parameters. 330 * 331 * @start: start address of the resource searched for 332 * @end: end address of same resource 333 * @flags: flags which the resource must have 334 * @desc: descriptor the resource must have 335 * @res: return ptr, if resource found 336 * 337 * The caller must specify @start, @end, @flags, and @desc 338 * (which may be IORES_DESC_NONE). 339 */ 340 static int find_next_iomem_res(resource_size_t start, resource_size_t end, 341 unsigned long flags, unsigned long desc, 342 struct resource *res) 343 { 344 struct resource *p; 345 346 if (!res) 347 return -EINVAL; 348 349 if (start >= end) 350 return -EINVAL; 351 352 read_lock(&resource_lock); 353 354 for_each_resource(&iomem_resource, p, false) { 355 /* If we passed the resource we are looking for, stop */ 356 if (p->start > end) { 357 p = NULL; 358 break; 359 } 360 361 /* Skip until we find a range that matches what we look for */ 362 if (p->end < start) 363 continue; 364 365 /* Found a match, break */ 366 if (is_type_match(p, flags, desc)) 367 break; 368 } 369 370 if (p) { 371 /* copy data */ 372 *res = (struct resource) { 373 .start = max(start, p->start), 374 .end = min(end, p->end), 375 .flags = p->flags, 376 .desc = p->desc, 377 .parent = p->parent, 378 }; 379 } 380 381 read_unlock(&resource_lock); 382 return p ? 0 : -ENODEV; 383 } 384 385 static int __walk_iomem_res_desc(resource_size_t start, resource_size_t end, 386 unsigned long flags, unsigned long desc, 387 void *arg, 388 int (*func)(struct resource *, void *)) 389 { 390 struct resource res; 391 int ret = -EINVAL; 392 393 while (start < end && 394 !find_next_iomem_res(start, end, flags, desc, &res)) { 395 ret = (*func)(&res, arg); 396 if (ret) 397 break; 398 399 start = res.end + 1; 400 } 401 402 return ret; 403 } 404 405 /** 406 * walk_iomem_res_desc - Walks through iomem resources and calls func() 407 * with matching resource ranges. 408 * * 409 * @desc: I/O resource descriptor. Use IORES_DESC_NONE to skip @desc check. 410 * @flags: I/O resource flags 411 * @start: start addr 412 * @end: end addr 413 * @arg: function argument for the callback @func 414 * @func: callback function that is called for each qualifying resource area 415 * 416 * All the memory ranges which overlap start,end and also match flags and 417 * desc are valid candidates. 418 * 419 * NOTE: For a new descriptor search, define a new IORES_DESC in 420 * <linux/ioport.h> and set it in 'desc' of a target resource entry. 421 */ 422 int walk_iomem_res_desc(unsigned long desc, unsigned long flags, u64 start, 423 u64 end, void *arg, int (*func)(struct resource *, void *)) 424 { 425 return __walk_iomem_res_desc(start, end, flags, desc, arg, func); 426 } 427 EXPORT_SYMBOL_GPL(walk_iomem_res_desc); 428 429 /* 430 * This function calls the @func callback against all memory ranges of type 431 * System RAM which are marked as IORESOURCE_SYSTEM_RAM and IORESOUCE_BUSY. 432 * Now, this function is only for System RAM, it deals with full ranges and 433 * not PFNs. If resources are not PFN-aligned, dealing with PFNs can truncate 434 * ranges. 435 */ 436 int walk_system_ram_res(u64 start, u64 end, void *arg, 437 int (*func)(struct resource *, void *)) 438 { 439 unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; 440 441 return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, arg, 442 func); 443 } 444 445 /* 446 * This function, being a variant of walk_system_ram_res(), calls the @func 447 * callback against all memory ranges of type System RAM which are marked as 448 * IORESOURCE_SYSTEM_RAM and IORESOUCE_BUSY in reversed order, i.e., from 449 * higher to lower. 450 */ 451 int walk_system_ram_res_rev(u64 start, u64 end, void *arg, 452 int (*func)(struct resource *, void *)) 453 { 454 struct resource res, *rams; 455 int rams_size = 16, i; 456 unsigned long flags; 457 int ret = -1; 458 459 /* create a list */ 460 rams = kvcalloc(rams_size, sizeof(struct resource), GFP_KERNEL); 461 if (!rams) 462 return ret; 463 464 flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; 465 i = 0; 466 while ((start < end) && 467 (!find_next_iomem_res(start, end, flags, IORES_DESC_NONE, &res))) { 468 if (i >= rams_size) { 469 /* re-alloc */ 470 struct resource *rams_new; 471 472 rams_new = kvrealloc(rams, (rams_size + 16) * sizeof(struct resource), 473 GFP_KERNEL); 474 if (!rams_new) 475 goto out; 476 477 rams = rams_new; 478 rams_size += 16; 479 } 480 481 rams[i++] = res; 482 start = res.end + 1; 483 } 484 485 /* go reverse */ 486 for (i--; i >= 0; i--) { 487 ret = (*func)(&rams[i], arg); 488 if (ret) 489 break; 490 } 491 492 out: 493 kvfree(rams); 494 return ret; 495 } 496 497 /* 498 * This function calls the @func callback against all memory ranges, which 499 * are ranges marked as IORESOURCE_MEM and IORESOUCE_BUSY. 500 */ 501 int walk_mem_res(u64 start, u64 end, void *arg, 502 int (*func)(struct resource *, void *)) 503 { 504 unsigned long flags = IORESOURCE_MEM | IORESOURCE_BUSY; 505 506 return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, arg, 507 func); 508 } 509 510 /* 511 * This function calls the @func callback against all memory ranges of type 512 * System RAM which are marked as IORESOURCE_SYSTEM_RAM and IORESOUCE_BUSY. 513 * It is to be used only for System RAM. 514 */ 515 int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, 516 void *arg, int (*func)(unsigned long, unsigned long, void *)) 517 { 518 resource_size_t start, end; 519 unsigned long flags; 520 struct resource res; 521 unsigned long pfn, end_pfn; 522 int ret = -EINVAL; 523 524 start = (u64) start_pfn << PAGE_SHIFT; 525 end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1; 526 flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; 527 while (start < end && 528 !find_next_iomem_res(start, end, flags, IORES_DESC_NONE, &res)) { 529 pfn = PFN_UP(res.start); 530 end_pfn = PFN_DOWN(res.end + 1); 531 if (end_pfn > pfn) 532 ret = (*func)(pfn, end_pfn - pfn, arg); 533 if (ret) 534 break; 535 start = res.end + 1; 536 } 537 return ret; 538 } 539 540 static int __is_ram(unsigned long pfn, unsigned long nr_pages, void *arg) 541 { 542 return 1; 543 } 544 545 /* 546 * This generic page_is_ram() returns true if specified address is 547 * registered as System RAM in iomem_resource list. 548 */ 549 int __weak page_is_ram(unsigned long pfn) 550 { 551 return walk_system_ram_range(pfn, 1, NULL, __is_ram) == 1; 552 } 553 EXPORT_SYMBOL_GPL(page_is_ram); 554 555 static int __region_intersects(struct resource *parent, resource_size_t start, 556 size_t size, unsigned long flags, 557 unsigned long desc) 558 { 559 int type = 0; int other = 0; 560 struct resource *p, *dp; 561 struct resource res, o; 562 bool covered; 563 564 res.start = start; 565 res.end = start + size - 1; 566 567 for (p = parent->child; p ; p = p->sibling) { 568 if (!resource_intersection(p, &res, &o)) 569 continue; 570 if (is_type_match(p, flags, desc)) { 571 type++; 572 continue; 573 } 574 /* 575 * Continue to search in descendant resources as if the 576 * matched descendant resources cover some ranges of 'p'. 577 * 578 * |------------- "CXL Window 0" ------------| 579 * |-- "System RAM" --| 580 * 581 * will behave similar as the following fake resource 582 * tree when searching "System RAM". 583 * 584 * |-- "System RAM" --||-- "CXL Window 0a" --| 585 */ 586 covered = false; 587 for_each_resource(p, dp, false) { 588 if (!resource_overlaps(dp, &res)) 589 continue; 590 if (is_type_match(dp, flags, desc)) { 591 type++; 592 /* 593 * Range from 'o.start' to 'dp->start' 594 * isn't covered by matched resource. 595 */ 596 if (dp->start > o.start) 597 break; 598 if (dp->end >= o.end) { 599 covered = true; 600 break; 601 } 602 /* Remove covered range */ 603 o.start = max(o.start, dp->end + 1); 604 } 605 } 606 if (!covered) 607 other++; 608 } 609 610 if (type == 0) 611 return REGION_DISJOINT; 612 613 if (other == 0) 614 return REGION_INTERSECTS; 615 616 return REGION_MIXED; 617 } 618 619 /** 620 * region_intersects() - determine intersection of region with known resources 621 * @start: region start address 622 * @size: size of region 623 * @flags: flags of resource (in iomem_resource) 624 * @desc: descriptor of resource (in iomem_resource) or IORES_DESC_NONE 625 * 626 * Check if the specified region partially overlaps or fully eclipses a 627 * resource identified by @flags and @desc (optional with IORES_DESC_NONE). 628 * Return REGION_DISJOINT if the region does not overlap @flags/@desc, 629 * return REGION_MIXED if the region overlaps @flags/@desc and another 630 * resource, and return REGION_INTERSECTS if the region overlaps @flags/@desc 631 * and no other defined resource. Note that REGION_INTERSECTS is also 632 * returned in the case when the specified region overlaps RAM and undefined 633 * memory holes. 634 * 635 * region_intersect() is used by memory remapping functions to ensure 636 * the user is not remapping RAM and is a vast speed up over walking 637 * through the resource table page by page. 638 */ 639 int region_intersects(resource_size_t start, size_t size, unsigned long flags, 640 unsigned long desc) 641 { 642 int ret; 643 644 read_lock(&resource_lock); 645 ret = __region_intersects(&iomem_resource, start, size, flags, desc); 646 read_unlock(&resource_lock); 647 648 return ret; 649 } 650 EXPORT_SYMBOL_GPL(region_intersects); 651 652 void __weak arch_remove_reservations(struct resource *avail) 653 { 654 } 655 656 static void resource_clip(struct resource *res, resource_size_t min, 657 resource_size_t max) 658 { 659 if (res->start < min) 660 res->start = min; 661 if (res->end > max) 662 res->end = max; 663 } 664 665 /* 666 * Find empty space in the resource tree with the given range and 667 * alignment constraints 668 */ 669 static int __find_resource_space(struct resource *root, struct resource *old, 670 struct resource *new, resource_size_t size, 671 struct resource_constraint *constraint) 672 { 673 struct resource *this = root->child; 674 struct resource tmp = *new, avail, alloc; 675 resource_alignf alignf = constraint->alignf; 676 677 tmp.start = root->start; 678 /* 679 * Skip past an allocated resource that starts at 0, since the assignment 680 * of this->start - 1 to tmp->end below would cause an underflow. 681 */ 682 if (this && this->start == root->start) { 683 tmp.start = (this == old) ? old->start : this->end + 1; 684 this = this->sibling; 685 } 686 for(;;) { 687 if (this) 688 tmp.end = (this == old) ? this->end : this->start - 1; 689 else 690 tmp.end = root->end; 691 692 if (tmp.end < tmp.start) 693 goto next; 694 695 resource_clip(&tmp, constraint->min, constraint->max); 696 arch_remove_reservations(&tmp); 697 698 /* Check for overflow after ALIGN() */ 699 avail.start = ALIGN(tmp.start, constraint->align); 700 avail.end = tmp.end; 701 avail.flags = new->flags & ~IORESOURCE_UNSET; 702 if (avail.start >= tmp.start) { 703 alloc.flags = avail.flags; 704 if (alignf) { 705 alloc.start = alignf(constraint->alignf_data, 706 &avail, size, constraint->align); 707 } else { 708 alloc.start = avail.start; 709 } 710 alloc.end = alloc.start + size - 1; 711 if (alloc.start <= alloc.end && 712 resource_contains(&avail, &alloc)) { 713 new->start = alloc.start; 714 new->end = alloc.end; 715 return 0; 716 } 717 } 718 719 next: if (!this || this->end == root->end) 720 break; 721 722 if (this != old) 723 tmp.start = this->end + 1; 724 this = this->sibling; 725 } 726 return -EBUSY; 727 } 728 729 /** 730 * find_resource_space - Find empty space in the resource tree 731 * @root: Root resource descriptor 732 * @new: Resource descriptor awaiting an empty resource space 733 * @size: The minimum size of the empty space 734 * @constraint: The range and alignment constraints to be met 735 * 736 * Finds an empty space under @root in the resource tree satisfying range and 737 * alignment @constraints. 738 * 739 * Return: 740 * * %0 - if successful, @new members start, end, and flags are altered. 741 * * %-EBUSY - if no empty space was found. 742 */ 743 int find_resource_space(struct resource *root, struct resource *new, 744 resource_size_t size, 745 struct resource_constraint *constraint) 746 { 747 return __find_resource_space(root, NULL, new, size, constraint); 748 } 749 EXPORT_SYMBOL_GPL(find_resource_space); 750 751 /** 752 * reallocate_resource - allocate a slot in the resource tree given range & alignment. 753 * The resource will be relocated if the new size cannot be reallocated in the 754 * current location. 755 * 756 * @root: root resource descriptor 757 * @old: resource descriptor desired by caller 758 * @newsize: new size of the resource descriptor 759 * @constraint: the memory range and alignment constraints to be met. 760 */ 761 static int reallocate_resource(struct resource *root, struct resource *old, 762 resource_size_t newsize, 763 struct resource_constraint *constraint) 764 { 765 int err=0; 766 struct resource new = *old; 767 struct resource *conflict; 768 769 write_lock(&resource_lock); 770 771 if ((err = __find_resource_space(root, old, &new, newsize, constraint))) 772 goto out; 773 774 if (resource_contains(&new, old)) { 775 old->start = new.start; 776 old->end = new.end; 777 goto out; 778 } 779 780 if (old->child) { 781 err = -EBUSY; 782 goto out; 783 } 784 785 if (resource_contains(old, &new)) { 786 old->start = new.start; 787 old->end = new.end; 788 } else { 789 __release_resource(old, true); 790 *old = new; 791 conflict = __request_resource(root, old); 792 BUG_ON(conflict); 793 } 794 out: 795 write_unlock(&resource_lock); 796 return err; 797 } 798 799 800 /** 801 * allocate_resource - allocate empty slot in the resource tree given range & alignment. 802 * The resource will be reallocated with a new size if it was already allocated 803 * @root: root resource descriptor 804 * @new: resource descriptor desired by caller 805 * @size: requested resource region size 806 * @min: minimum boundary to allocate 807 * @max: maximum boundary to allocate 808 * @align: alignment requested, in bytes 809 * @alignf: alignment function, optional, called if not NULL 810 * @alignf_data: arbitrary data to pass to the @alignf function 811 */ 812 int allocate_resource(struct resource *root, struct resource *new, 813 resource_size_t size, resource_size_t min, 814 resource_size_t max, resource_size_t align, 815 resource_alignf alignf, 816 void *alignf_data) 817 { 818 int err; 819 struct resource_constraint constraint; 820 821 constraint.min = min; 822 constraint.max = max; 823 constraint.align = align; 824 constraint.alignf = alignf; 825 constraint.alignf_data = alignf_data; 826 827 if ( new->parent ) { 828 /* resource is already allocated, try reallocating with 829 the new constraints */ 830 return reallocate_resource(root, new, size, &constraint); 831 } 832 833 write_lock(&resource_lock); 834 err = find_resource_space(root, new, size, &constraint); 835 if (err >= 0 && __request_resource(root, new)) 836 err = -EBUSY; 837 write_unlock(&resource_lock); 838 return err; 839 } 840 841 EXPORT_SYMBOL(allocate_resource); 842 843 /** 844 * lookup_resource - find an existing resource by a resource start address 845 * @root: root resource descriptor 846 * @start: resource start address 847 * 848 * Returns a pointer to the resource if found, NULL otherwise 849 */ 850 struct resource *lookup_resource(struct resource *root, resource_size_t start) 851 { 852 struct resource *res; 853 854 read_lock(&resource_lock); 855 for (res = root->child; res; res = res->sibling) { 856 if (res->start == start) 857 break; 858 } 859 read_unlock(&resource_lock); 860 861 return res; 862 } 863 864 /* 865 * Insert a resource into the resource tree. If successful, return NULL, 866 * otherwise return the conflicting resource (compare to __request_resource()) 867 */ 868 static struct resource * __insert_resource(struct resource *parent, struct resource *new) 869 { 870 struct resource *first, *next; 871 872 for (;; parent = first) { 873 first = __request_resource(parent, new); 874 if (!first) 875 return first; 876 877 if (first == parent) 878 return first; 879 if (WARN_ON(first == new)) /* duplicated insertion */ 880 return first; 881 882 if ((first->start > new->start) || (first->end < new->end)) 883 break; 884 if ((first->start == new->start) && (first->end == new->end)) 885 break; 886 } 887 888 for (next = first; ; next = next->sibling) { 889 /* Partial overlap? Bad, and unfixable */ 890 if (next->start < new->start || next->end > new->end) 891 return next; 892 if (!next->sibling) 893 break; 894 if (next->sibling->start > new->end) 895 break; 896 } 897 898 new->parent = parent; 899 new->sibling = next->sibling; 900 new->child = first; 901 902 next->sibling = NULL; 903 for (next = first; next; next = next->sibling) 904 next->parent = new; 905 906 if (parent->child == first) { 907 parent->child = new; 908 } else { 909 next = parent->child; 910 while (next->sibling != first) 911 next = next->sibling; 912 next->sibling = new; 913 } 914 return NULL; 915 } 916 917 /** 918 * insert_resource_conflict - Inserts resource in the resource tree 919 * @parent: parent of the new resource 920 * @new: new resource to insert 921 * 922 * Returns 0 on success, conflict resource if the resource can't be inserted. 923 * 924 * This function is equivalent to request_resource_conflict when no conflict 925 * happens. If a conflict happens, and the conflicting resources 926 * entirely fit within the range of the new resource, then the new 927 * resource is inserted and the conflicting resources become children of 928 * the new resource. 929 * 930 * This function is intended for producers of resources, such as FW modules 931 * and bus drivers. 932 */ 933 struct resource *insert_resource_conflict(struct resource *parent, struct resource *new) 934 { 935 struct resource *conflict; 936 937 write_lock(&resource_lock); 938 conflict = __insert_resource(parent, new); 939 write_unlock(&resource_lock); 940 return conflict; 941 } 942 943 /** 944 * insert_resource - Inserts a resource in the resource tree 945 * @parent: parent of the new resource 946 * @new: new resource to insert 947 * 948 * Returns 0 on success, -EBUSY if the resource can't be inserted. 949 * 950 * This function is intended for producers of resources, such as FW modules 951 * and bus drivers. 952 */ 953 int insert_resource(struct resource *parent, struct resource *new) 954 { 955 struct resource *conflict; 956 957 conflict = insert_resource_conflict(parent, new); 958 return conflict ? -EBUSY : 0; 959 } 960 EXPORT_SYMBOL_GPL(insert_resource); 961 962 /** 963 * insert_resource_expand_to_fit - Insert a resource into the resource tree 964 * @root: root resource descriptor 965 * @new: new resource to insert 966 * 967 * Insert a resource into the resource tree, possibly expanding it in order 968 * to make it encompass any conflicting resources. 969 */ 970 void insert_resource_expand_to_fit(struct resource *root, struct resource *new) 971 { 972 if (new->parent) 973 return; 974 975 write_lock(&resource_lock); 976 for (;;) { 977 struct resource *conflict; 978 979 conflict = __insert_resource(root, new); 980 if (!conflict) 981 break; 982 if (conflict == root) 983 break; 984 985 /* Ok, expand resource to cover the conflict, then try again .. */ 986 if (conflict->start < new->start) 987 new->start = conflict->start; 988 if (conflict->end > new->end) 989 new->end = conflict->end; 990 991 pr_info("Expanded resource %s due to conflict with %s\n", new->name, conflict->name); 992 } 993 write_unlock(&resource_lock); 994 } 995 /* 996 * Not for general consumption, only early boot memory map parsing, PCI 997 * resource discovery, and late discovery of CXL resources are expected 998 * to use this interface. The former are built-in and only the latter, 999 * CXL, is a module. 1000 */ 1001 EXPORT_SYMBOL_NS_GPL(insert_resource_expand_to_fit, "CXL"); 1002 1003 /** 1004 * remove_resource - Remove a resource in the resource tree 1005 * @old: resource to remove 1006 * 1007 * Returns 0 on success, -EINVAL if the resource is not valid. 1008 * 1009 * This function removes a resource previously inserted by insert_resource() 1010 * or insert_resource_conflict(), and moves the children (if any) up to 1011 * where they were before. insert_resource() and insert_resource_conflict() 1012 * insert a new resource, and move any conflicting resources down to the 1013 * children of the new resource. 1014 * 1015 * insert_resource(), insert_resource_conflict() and remove_resource() are 1016 * intended for producers of resources, such as FW modules and bus drivers. 1017 */ 1018 int remove_resource(struct resource *old) 1019 { 1020 int retval; 1021 1022 write_lock(&resource_lock); 1023 retval = __release_resource(old, false); 1024 write_unlock(&resource_lock); 1025 return retval; 1026 } 1027 EXPORT_SYMBOL_GPL(remove_resource); 1028 1029 static int __adjust_resource(struct resource *res, resource_size_t start, 1030 resource_size_t size) 1031 { 1032 struct resource *tmp, *parent = res->parent; 1033 resource_size_t end = start + size - 1; 1034 int result = -EBUSY; 1035 1036 if (!parent) 1037 goto skip; 1038 1039 if ((start < parent->start) || (end > parent->end)) 1040 goto out; 1041 1042 if (res->sibling && (res->sibling->start <= end)) 1043 goto out; 1044 1045 tmp = parent->child; 1046 if (tmp != res) { 1047 while (tmp->sibling != res) 1048 tmp = tmp->sibling; 1049 if (start <= tmp->end) 1050 goto out; 1051 } 1052 1053 skip: 1054 for (tmp = res->child; tmp; tmp = tmp->sibling) 1055 if ((tmp->start < start) || (tmp->end > end)) 1056 goto out; 1057 1058 res->start = start; 1059 res->end = end; 1060 result = 0; 1061 1062 out: 1063 return result; 1064 } 1065 1066 /** 1067 * adjust_resource - modify a resource's start and size 1068 * @res: resource to modify 1069 * @start: new start value 1070 * @size: new size 1071 * 1072 * Given an existing resource, change its start and size to match the 1073 * arguments. Returns 0 on success, -EBUSY if it can't fit. 1074 * Existing children of the resource are assumed to be immutable. 1075 */ 1076 int adjust_resource(struct resource *res, resource_size_t start, 1077 resource_size_t size) 1078 { 1079 int result; 1080 1081 write_lock(&resource_lock); 1082 result = __adjust_resource(res, start, size); 1083 write_unlock(&resource_lock); 1084 return result; 1085 } 1086 EXPORT_SYMBOL(adjust_resource); 1087 1088 static void __init 1089 __reserve_region_with_split(struct resource *root, resource_size_t start, 1090 resource_size_t end, const char *name) 1091 { 1092 struct resource *parent = root; 1093 struct resource *conflict; 1094 struct resource *res = alloc_resource(GFP_ATOMIC); 1095 struct resource *next_res = NULL; 1096 int type = resource_type(root); 1097 1098 if (!res) 1099 return; 1100 1101 res->name = name; 1102 res->start = start; 1103 res->end = end; 1104 res->flags = type | IORESOURCE_BUSY; 1105 res->desc = IORES_DESC_NONE; 1106 1107 while (1) { 1108 1109 conflict = __request_resource(parent, res); 1110 if (!conflict) { 1111 if (!next_res) 1112 break; 1113 res = next_res; 1114 next_res = NULL; 1115 continue; 1116 } 1117 1118 /* conflict covered whole area */ 1119 if (conflict->start <= res->start && 1120 conflict->end >= res->end) { 1121 free_resource(res); 1122 WARN_ON(next_res); 1123 break; 1124 } 1125 1126 /* failed, split and try again */ 1127 if (conflict->start > res->start) { 1128 end = res->end; 1129 res->end = conflict->start - 1; 1130 if (conflict->end < end) { 1131 next_res = alloc_resource(GFP_ATOMIC); 1132 if (!next_res) { 1133 free_resource(res); 1134 break; 1135 } 1136 next_res->name = name; 1137 next_res->start = conflict->end + 1; 1138 next_res->end = end; 1139 next_res->flags = type | IORESOURCE_BUSY; 1140 next_res->desc = IORES_DESC_NONE; 1141 } 1142 } else { 1143 res->start = conflict->end + 1; 1144 } 1145 } 1146 1147 } 1148 1149 void __init 1150 reserve_region_with_split(struct resource *root, resource_size_t start, 1151 resource_size_t end, const char *name) 1152 { 1153 int abort = 0; 1154 1155 write_lock(&resource_lock); 1156 if (root->start > start || root->end < end) { 1157 pr_err("requested range [0x%llx-0x%llx] not in root %pr\n", 1158 (unsigned long long)start, (unsigned long long)end, 1159 root); 1160 if (start > root->end || end < root->start) 1161 abort = 1; 1162 else { 1163 if (end > root->end) 1164 end = root->end; 1165 if (start < root->start) 1166 start = root->start; 1167 pr_err("fixing request to [0x%llx-0x%llx]\n", 1168 (unsigned long long)start, 1169 (unsigned long long)end); 1170 } 1171 dump_stack(); 1172 } 1173 if (!abort) 1174 __reserve_region_with_split(root, start, end, name); 1175 write_unlock(&resource_lock); 1176 } 1177 1178 /** 1179 * resource_alignment - calculate resource's alignment 1180 * @res: resource pointer 1181 * 1182 * Returns alignment on success, 0 (invalid alignment) on failure. 1183 */ 1184 resource_size_t resource_alignment(struct resource *res) 1185 { 1186 switch (res->flags & (IORESOURCE_SIZEALIGN | IORESOURCE_STARTALIGN)) { 1187 case IORESOURCE_SIZEALIGN: 1188 return resource_size(res); 1189 case IORESOURCE_STARTALIGN: 1190 return res->start; 1191 default: 1192 return 0; 1193 } 1194 } 1195 1196 /* 1197 * This is compatibility stuff for IO resources. 1198 * 1199 * Note how this, unlike the above, knows about 1200 * the IO flag meanings (busy etc). 1201 * 1202 * request_region creates a new busy region. 1203 * 1204 * release_region releases a matching busy region. 1205 */ 1206 1207 static DECLARE_WAIT_QUEUE_HEAD(muxed_resource_wait); 1208 1209 static struct inode *iomem_inode; 1210 1211 #ifdef CONFIG_IO_STRICT_DEVMEM 1212 static void revoke_iomem(struct resource *res) 1213 { 1214 /* pairs with smp_store_release() in iomem_init_inode() */ 1215 struct inode *inode = smp_load_acquire(&iomem_inode); 1216 1217 /* 1218 * Check that the initialization has completed. Losing the race 1219 * is ok because it means drivers are claiming resources before 1220 * the fs_initcall level of init and prevent iomem_get_mapping users 1221 * from establishing mappings. 1222 */ 1223 if (!inode) 1224 return; 1225 1226 /* 1227 * The expectation is that the driver has successfully marked 1228 * the resource busy by this point, so devmem_is_allowed() 1229 * should start returning false, however for performance this 1230 * does not iterate the entire resource range. 1231 */ 1232 if (devmem_is_allowed(PHYS_PFN(res->start)) && 1233 devmem_is_allowed(PHYS_PFN(res->end))) { 1234 /* 1235 * *cringe* iomem=relaxed says "go ahead, what's the 1236 * worst that can happen?" 1237 */ 1238 return; 1239 } 1240 1241 unmap_mapping_range(inode->i_mapping, res->start, resource_size(res), 1); 1242 } 1243 #else 1244 static void revoke_iomem(struct resource *res) {} 1245 #endif 1246 1247 struct address_space *iomem_get_mapping(void) 1248 { 1249 /* 1250 * This function is only called from file open paths, hence guaranteed 1251 * that fs_initcalls have completed and no need to check for NULL. But 1252 * since revoke_iomem can be called before the initcall we still need 1253 * the barrier to appease checkers. 1254 */ 1255 return smp_load_acquire(&iomem_inode)->i_mapping; 1256 } 1257 1258 static int __request_region_locked(struct resource *res, struct resource *parent, 1259 resource_size_t start, resource_size_t n, 1260 const char *name, int flags) 1261 { 1262 DECLARE_WAITQUEUE(wait, current); 1263 1264 res->name = name; 1265 res->start = start; 1266 res->end = start + n - 1; 1267 1268 for (;;) { 1269 struct resource *conflict; 1270 1271 res->flags = resource_type(parent) | resource_ext_type(parent); 1272 res->flags |= IORESOURCE_BUSY | flags; 1273 res->desc = parent->desc; 1274 1275 conflict = __request_resource(parent, res); 1276 if (!conflict) 1277 break; 1278 /* 1279 * mm/hmm.c reserves physical addresses which then 1280 * become unavailable to other users. Conflicts are 1281 * not expected. Warn to aid debugging if encountered. 1282 */ 1283 if (conflict->desc == IORES_DESC_DEVICE_PRIVATE_MEMORY) { 1284 pr_warn("Unaddressable device %s %pR conflicts with %pR", 1285 conflict->name, conflict, res); 1286 } 1287 if (conflict != parent) { 1288 if (!(conflict->flags & IORESOURCE_BUSY)) { 1289 parent = conflict; 1290 continue; 1291 } 1292 } 1293 if (conflict->flags & flags & IORESOURCE_MUXED) { 1294 add_wait_queue(&muxed_resource_wait, &wait); 1295 write_unlock(&resource_lock); 1296 set_current_state(TASK_UNINTERRUPTIBLE); 1297 schedule(); 1298 remove_wait_queue(&muxed_resource_wait, &wait); 1299 write_lock(&resource_lock); 1300 continue; 1301 } 1302 /* Uhhuh, that didn't work out.. */ 1303 return -EBUSY; 1304 } 1305 1306 return 0; 1307 } 1308 1309 /** 1310 * __request_region - create a new busy resource region 1311 * @parent: parent resource descriptor 1312 * @start: resource start address 1313 * @n: resource region size 1314 * @name: reserving caller's ID string 1315 * @flags: IO resource flags 1316 */ 1317 struct resource *__request_region(struct resource *parent, 1318 resource_size_t start, resource_size_t n, 1319 const char *name, int flags) 1320 { 1321 struct resource *res = alloc_resource(GFP_KERNEL); 1322 int ret; 1323 1324 if (!res) 1325 return NULL; 1326 1327 write_lock(&resource_lock); 1328 ret = __request_region_locked(res, parent, start, n, name, flags); 1329 write_unlock(&resource_lock); 1330 1331 if (ret) { 1332 free_resource(res); 1333 return NULL; 1334 } 1335 1336 if (parent == &iomem_resource) 1337 revoke_iomem(res); 1338 1339 return res; 1340 } 1341 EXPORT_SYMBOL(__request_region); 1342 1343 /** 1344 * __release_region - release a previously reserved resource region 1345 * @parent: parent resource descriptor 1346 * @start: resource start address 1347 * @n: resource region size 1348 * 1349 * The described resource region must match a currently busy region. 1350 */ 1351 void __release_region(struct resource *parent, resource_size_t start, 1352 resource_size_t n) 1353 { 1354 struct resource **p; 1355 resource_size_t end; 1356 1357 p = &parent->child; 1358 end = start + n - 1; 1359 1360 write_lock(&resource_lock); 1361 1362 for (;;) { 1363 struct resource *res = *p; 1364 1365 if (!res) 1366 break; 1367 if (res->start <= start && res->end >= end) { 1368 if (!(res->flags & IORESOURCE_BUSY)) { 1369 p = &res->child; 1370 continue; 1371 } 1372 if (res->start != start || res->end != end) 1373 break; 1374 *p = res->sibling; 1375 write_unlock(&resource_lock); 1376 if (res->flags & IORESOURCE_MUXED) 1377 wake_up(&muxed_resource_wait); 1378 free_resource(res); 1379 return; 1380 } 1381 p = &res->sibling; 1382 } 1383 1384 write_unlock(&resource_lock); 1385 1386 pr_warn("Trying to free nonexistent resource <%pa-%pa>\n", &start, &end); 1387 } 1388 EXPORT_SYMBOL(__release_region); 1389 1390 #ifdef CONFIG_MEMORY_HOTREMOVE 1391 /** 1392 * release_mem_region_adjustable - release a previously reserved memory region 1393 * @start: resource start address 1394 * @size: resource region size 1395 * 1396 * This interface is intended for memory hot-delete. The requested region 1397 * is released from a currently busy memory resource. The requested region 1398 * must either match exactly or fit into a single busy resource entry. In 1399 * the latter case, the remaining resource is adjusted accordingly. 1400 * Existing children of the busy memory resource must be immutable in the 1401 * request. 1402 * 1403 * Note: 1404 * - Additional release conditions, such as overlapping region, can be 1405 * supported after they are confirmed as valid cases. 1406 * - When a busy memory resource gets split into two entries, the code 1407 * assumes that all children remain in the lower address entry for 1408 * simplicity. Enhance this logic when necessary. 1409 */ 1410 void release_mem_region_adjustable(resource_size_t start, resource_size_t size) 1411 { 1412 struct resource *parent = &iomem_resource; 1413 struct resource *new_res = NULL; 1414 bool alloc_nofail = false; 1415 struct resource **p; 1416 struct resource *res; 1417 resource_size_t end; 1418 1419 end = start + size - 1; 1420 if (WARN_ON_ONCE((start < parent->start) || (end > parent->end))) 1421 return; 1422 1423 /* 1424 * We free up quite a lot of memory on memory hotunplug (esp., memap), 1425 * just before releasing the region. This is highly unlikely to 1426 * fail - let's play save and make it never fail as the caller cannot 1427 * perform any error handling (e.g., trying to re-add memory will fail 1428 * similarly). 1429 */ 1430 retry: 1431 new_res = alloc_resource(GFP_KERNEL | (alloc_nofail ? __GFP_NOFAIL : 0)); 1432 1433 p = &parent->child; 1434 write_lock(&resource_lock); 1435 1436 while ((res = *p)) { 1437 if (res->start >= end) 1438 break; 1439 1440 /* look for the next resource if it does not fit into */ 1441 if (res->start > start || res->end < end) { 1442 p = &res->sibling; 1443 continue; 1444 } 1445 1446 if (!(res->flags & IORESOURCE_MEM)) 1447 break; 1448 1449 if (!(res->flags & IORESOURCE_BUSY)) { 1450 p = &res->child; 1451 continue; 1452 } 1453 1454 /* found the target resource; let's adjust accordingly */ 1455 if (res->start == start && res->end == end) { 1456 /* free the whole entry */ 1457 *p = res->sibling; 1458 free_resource(res); 1459 } else if (res->start == start && res->end != end) { 1460 /* adjust the start */ 1461 WARN_ON_ONCE(__adjust_resource(res, end + 1, 1462 res->end - end)); 1463 } else if (res->start != start && res->end == end) { 1464 /* adjust the end */ 1465 WARN_ON_ONCE(__adjust_resource(res, res->start, 1466 start - res->start)); 1467 } else { 1468 /* split into two entries - we need a new resource */ 1469 if (!new_res) { 1470 new_res = alloc_resource(GFP_ATOMIC); 1471 if (!new_res) { 1472 alloc_nofail = true; 1473 write_unlock(&resource_lock); 1474 goto retry; 1475 } 1476 } 1477 new_res->name = res->name; 1478 new_res->start = end + 1; 1479 new_res->end = res->end; 1480 new_res->flags = res->flags; 1481 new_res->desc = res->desc; 1482 new_res->parent = res->parent; 1483 new_res->sibling = res->sibling; 1484 new_res->child = NULL; 1485 1486 if (WARN_ON_ONCE(__adjust_resource(res, res->start, 1487 start - res->start))) 1488 break; 1489 res->sibling = new_res; 1490 new_res = NULL; 1491 } 1492 1493 break; 1494 } 1495 1496 write_unlock(&resource_lock); 1497 free_resource(new_res); 1498 } 1499 #endif /* CONFIG_MEMORY_HOTREMOVE */ 1500 1501 #ifdef CONFIG_MEMORY_HOTPLUG 1502 static bool system_ram_resources_mergeable(struct resource *r1, 1503 struct resource *r2) 1504 { 1505 /* We assume either r1 or r2 is IORESOURCE_SYSRAM_MERGEABLE. */ 1506 return r1->flags == r2->flags && r1->end + 1 == r2->start && 1507 r1->name == r2->name && r1->desc == r2->desc && 1508 !r1->child && !r2->child; 1509 } 1510 1511 /** 1512 * merge_system_ram_resource - mark the System RAM resource mergeable and try to 1513 * merge it with adjacent, mergeable resources 1514 * @res: resource descriptor 1515 * 1516 * This interface is intended for memory hotplug, whereby lots of contiguous 1517 * system ram resources are added (e.g., via add_memory*()) by a driver, and 1518 * the actual resource boundaries are not of interest (e.g., it might be 1519 * relevant for DIMMs). Only resources that are marked mergeable, that have the 1520 * same parent, and that don't have any children are considered. All mergeable 1521 * resources must be immutable during the request. 1522 * 1523 * Note: 1524 * - The caller has to make sure that no pointers to resources that are 1525 * marked mergeable are used anymore after this call - the resource might 1526 * be freed and the pointer might be stale! 1527 * - release_mem_region_adjustable() will split on demand on memory hotunplug 1528 */ 1529 void merge_system_ram_resource(struct resource *res) 1530 { 1531 const unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; 1532 struct resource *cur; 1533 1534 if (WARN_ON_ONCE((res->flags & flags) != flags)) 1535 return; 1536 1537 write_lock(&resource_lock); 1538 res->flags |= IORESOURCE_SYSRAM_MERGEABLE; 1539 1540 /* Try to merge with next item in the list. */ 1541 cur = res->sibling; 1542 if (cur && system_ram_resources_mergeable(res, cur)) { 1543 res->end = cur->end; 1544 res->sibling = cur->sibling; 1545 free_resource(cur); 1546 } 1547 1548 /* Try to merge with previous item in the list. */ 1549 cur = res->parent->child; 1550 while (cur && cur->sibling != res) 1551 cur = cur->sibling; 1552 if (cur && system_ram_resources_mergeable(cur, res)) { 1553 cur->end = res->end; 1554 cur->sibling = res->sibling; 1555 free_resource(res); 1556 } 1557 write_unlock(&resource_lock); 1558 } 1559 #endif /* CONFIG_MEMORY_HOTPLUG */ 1560 1561 /* 1562 * Managed region resource 1563 */ 1564 static void devm_resource_release(struct device *dev, void *ptr) 1565 { 1566 struct resource **r = ptr; 1567 1568 release_resource(*r); 1569 } 1570 1571 /** 1572 * devm_request_resource() - request and reserve an I/O or memory resource 1573 * @dev: device for which to request the resource 1574 * @root: root of the resource tree from which to request the resource 1575 * @new: descriptor of the resource to request 1576 * 1577 * This is a device-managed version of request_resource(). There is usually 1578 * no need to release resources requested by this function explicitly since 1579 * that will be taken care of when the device is unbound from its driver. 1580 * If for some reason the resource needs to be released explicitly, because 1581 * of ordering issues for example, drivers must call devm_release_resource() 1582 * rather than the regular release_resource(). 1583 * 1584 * When a conflict is detected between any existing resources and the newly 1585 * requested resource, an error message will be printed. 1586 * 1587 * Returns 0 on success or a negative error code on failure. 1588 */ 1589 int devm_request_resource(struct device *dev, struct resource *root, 1590 struct resource *new) 1591 { 1592 struct resource *conflict, **ptr; 1593 1594 ptr = devres_alloc(devm_resource_release, sizeof(*ptr), GFP_KERNEL); 1595 if (!ptr) 1596 return -ENOMEM; 1597 1598 *ptr = new; 1599 1600 conflict = request_resource_conflict(root, new); 1601 if (conflict) { 1602 dev_err(dev, "resource collision: %pR conflicts with %s %pR\n", 1603 new, conflict->name, conflict); 1604 devres_free(ptr); 1605 return -EBUSY; 1606 } 1607 1608 devres_add(dev, ptr); 1609 return 0; 1610 } 1611 EXPORT_SYMBOL(devm_request_resource); 1612 1613 static int devm_resource_match(struct device *dev, void *res, void *data) 1614 { 1615 struct resource **ptr = res; 1616 1617 return *ptr == data; 1618 } 1619 1620 /** 1621 * devm_release_resource() - release a previously requested resource 1622 * @dev: device for which to release the resource 1623 * @new: descriptor of the resource to release 1624 * 1625 * Releases a resource previously requested using devm_request_resource(). 1626 */ 1627 void devm_release_resource(struct device *dev, struct resource *new) 1628 { 1629 WARN_ON(devres_release(dev, devm_resource_release, devm_resource_match, 1630 new)); 1631 } 1632 EXPORT_SYMBOL(devm_release_resource); 1633 1634 struct region_devres { 1635 struct resource *parent; 1636 resource_size_t start; 1637 resource_size_t n; 1638 }; 1639 1640 static void devm_region_release(struct device *dev, void *res) 1641 { 1642 struct region_devres *this = res; 1643 1644 __release_region(this->parent, this->start, this->n); 1645 } 1646 1647 static int devm_region_match(struct device *dev, void *res, void *match_data) 1648 { 1649 struct region_devres *this = res, *match = match_data; 1650 1651 return this->parent == match->parent && 1652 this->start == match->start && this->n == match->n; 1653 } 1654 1655 struct resource * 1656 __devm_request_region(struct device *dev, struct resource *parent, 1657 resource_size_t start, resource_size_t n, const char *name) 1658 { 1659 struct region_devres *dr = NULL; 1660 struct resource *res; 1661 1662 dr = devres_alloc(devm_region_release, sizeof(struct region_devres), 1663 GFP_KERNEL); 1664 if (!dr) 1665 return NULL; 1666 1667 dr->parent = parent; 1668 dr->start = start; 1669 dr->n = n; 1670 1671 res = __request_region(parent, start, n, name, 0); 1672 if (res) 1673 devres_add(dev, dr); 1674 else 1675 devres_free(dr); 1676 1677 return res; 1678 } 1679 EXPORT_SYMBOL(__devm_request_region); 1680 1681 void __devm_release_region(struct device *dev, struct resource *parent, 1682 resource_size_t start, resource_size_t n) 1683 { 1684 struct region_devres match_data = { parent, start, n }; 1685 1686 __release_region(parent, start, n); 1687 WARN_ON(devres_destroy(dev, devm_region_release, devm_region_match, 1688 &match_data)); 1689 } 1690 EXPORT_SYMBOL(__devm_release_region); 1691 1692 /* 1693 * Reserve I/O ports or memory based on "reserve=" kernel parameter. 1694 */ 1695 #define MAXRESERVE 4 1696 static int __init reserve_setup(char *str) 1697 { 1698 static int reserved; 1699 static struct resource reserve[MAXRESERVE]; 1700 1701 for (;;) { 1702 unsigned int io_start, io_num; 1703 int x = reserved; 1704 struct resource *parent; 1705 1706 if (get_option(&str, &io_start) != 2) 1707 break; 1708 if (get_option(&str, &io_num) == 0) 1709 break; 1710 if (x < MAXRESERVE) { 1711 struct resource *res = reserve + x; 1712 1713 /* 1714 * If the region starts below 0x10000, we assume it's 1715 * I/O port space; otherwise assume it's memory. 1716 */ 1717 if (io_start < 0x10000) { 1718 res->flags = IORESOURCE_IO; 1719 parent = &ioport_resource; 1720 } else { 1721 res->flags = IORESOURCE_MEM; 1722 parent = &iomem_resource; 1723 } 1724 res->name = "reserved"; 1725 res->start = io_start; 1726 res->end = io_start + io_num - 1; 1727 res->flags |= IORESOURCE_BUSY; 1728 res->desc = IORES_DESC_NONE; 1729 res->child = NULL; 1730 if (request_resource(parent, res) == 0) 1731 reserved = x+1; 1732 } 1733 } 1734 return 1; 1735 } 1736 __setup("reserve=", reserve_setup); 1737 1738 /* 1739 * Check if the requested addr and size spans more than any slot in the 1740 * iomem resource tree. 1741 */ 1742 int iomem_map_sanity_check(resource_size_t addr, unsigned long size) 1743 { 1744 resource_size_t end = addr + size - 1; 1745 struct resource *p; 1746 int err = 0; 1747 1748 read_lock(&resource_lock); 1749 for_each_resource(&iomem_resource, p, false) { 1750 /* 1751 * We can probably skip the resources without 1752 * IORESOURCE_IO attribute? 1753 */ 1754 if (p->start > end) 1755 continue; 1756 if (p->end < addr) 1757 continue; 1758 if (PFN_DOWN(p->start) <= PFN_DOWN(addr) && 1759 PFN_DOWN(p->end) >= PFN_DOWN(end)) 1760 continue; 1761 /* 1762 * if a resource is "BUSY", it's not a hardware resource 1763 * but a driver mapping of such a resource; we don't want 1764 * to warn for those; some drivers legitimately map only 1765 * partial hardware resources. (example: vesafb) 1766 */ 1767 if (p->flags & IORESOURCE_BUSY) 1768 continue; 1769 1770 pr_warn("resource sanity check: requesting [mem %pa-%pa], which spans more than %s %pR\n", 1771 &addr, &end, p->name, p); 1772 err = -1; 1773 break; 1774 } 1775 read_unlock(&resource_lock); 1776 1777 return err; 1778 } 1779 1780 #ifdef CONFIG_STRICT_DEVMEM 1781 static int strict_iomem_checks = 1; 1782 #else 1783 static int strict_iomem_checks; 1784 #endif 1785 1786 /* 1787 * Check if an address is exclusive to the kernel and must not be mapped to 1788 * user space, for example, via /dev/mem. 1789 * 1790 * Returns true if exclusive to the kernel, otherwise returns false. 1791 */ 1792 bool resource_is_exclusive(struct resource *root, u64 addr, resource_size_t size) 1793 { 1794 const unsigned int exclusive_system_ram = IORESOURCE_SYSTEM_RAM | 1795 IORESOURCE_EXCLUSIVE; 1796 bool skip_children = false, err = false; 1797 struct resource *p; 1798 1799 read_lock(&resource_lock); 1800 for_each_resource(root, p, skip_children) { 1801 if (p->start >= addr + size) 1802 break; 1803 if (p->end < addr) { 1804 skip_children = true; 1805 continue; 1806 } 1807 skip_children = false; 1808 1809 /* 1810 * IORESOURCE_SYSTEM_RAM resources are exclusive if 1811 * IORESOURCE_EXCLUSIVE is set, even if they 1812 * are not busy and even if "iomem=relaxed" is set. The 1813 * responsible driver dynamically adds/removes system RAM within 1814 * such an area and uncontrolled access is dangerous. 1815 */ 1816 if ((p->flags & exclusive_system_ram) == exclusive_system_ram) { 1817 err = true; 1818 break; 1819 } 1820 1821 /* 1822 * A resource is exclusive if IORESOURCE_EXCLUSIVE is set 1823 * or CONFIG_IO_STRICT_DEVMEM is enabled and the 1824 * resource is busy. 1825 */ 1826 if (!strict_iomem_checks || !(p->flags & IORESOURCE_BUSY)) 1827 continue; 1828 if (IS_ENABLED(CONFIG_IO_STRICT_DEVMEM) 1829 || p->flags & IORESOURCE_EXCLUSIVE) { 1830 err = true; 1831 break; 1832 } 1833 } 1834 read_unlock(&resource_lock); 1835 1836 return err; 1837 } 1838 1839 bool iomem_is_exclusive(u64 addr) 1840 { 1841 return resource_is_exclusive(&iomem_resource, addr & PAGE_MASK, 1842 PAGE_SIZE); 1843 } 1844 1845 struct resource_entry *resource_list_create_entry(struct resource *res, 1846 size_t extra_size) 1847 { 1848 struct resource_entry *entry; 1849 1850 entry = kzalloc(sizeof(*entry) + extra_size, GFP_KERNEL); 1851 if (entry) { 1852 INIT_LIST_HEAD(&entry->node); 1853 entry->res = res ? res : &entry->__res; 1854 } 1855 1856 return entry; 1857 } 1858 EXPORT_SYMBOL(resource_list_create_entry); 1859 1860 void resource_list_free(struct list_head *head) 1861 { 1862 struct resource_entry *entry, *tmp; 1863 1864 list_for_each_entry_safe(entry, tmp, head, node) 1865 resource_list_destroy_entry(entry); 1866 } 1867 EXPORT_SYMBOL(resource_list_free); 1868 1869 #ifdef CONFIG_GET_FREE_REGION 1870 #define GFR_DESCENDING (1UL << 0) 1871 #define GFR_REQUEST_REGION (1UL << 1) 1872 #ifdef PA_SECTION_SHIFT 1873 #define GFR_DEFAULT_ALIGN (1UL << PA_SECTION_SHIFT) 1874 #else 1875 #define GFR_DEFAULT_ALIGN PAGE_SIZE 1876 #endif 1877 1878 static resource_size_t gfr_start(struct resource *base, resource_size_t size, 1879 resource_size_t align, unsigned long flags) 1880 { 1881 if (flags & GFR_DESCENDING) { 1882 resource_size_t end; 1883 1884 end = min_t(resource_size_t, base->end, DIRECT_MAP_PHYSMEM_END); 1885 return end - size + 1; 1886 } 1887 1888 return ALIGN(max(base->start, align), align); 1889 } 1890 1891 static bool gfr_continue(struct resource *base, resource_size_t addr, 1892 resource_size_t size, unsigned long flags) 1893 { 1894 if (flags & GFR_DESCENDING) 1895 return addr > size && addr >= base->start; 1896 /* 1897 * In the ascend case be careful that the last increment by 1898 * @size did not wrap 0. 1899 */ 1900 return addr > addr - size && 1901 addr <= min_t(resource_size_t, base->end, DIRECT_MAP_PHYSMEM_END); 1902 } 1903 1904 static resource_size_t gfr_next(resource_size_t addr, resource_size_t size, 1905 unsigned long flags) 1906 { 1907 if (flags & GFR_DESCENDING) 1908 return addr - size; 1909 return addr + size; 1910 } 1911 1912 static void remove_free_mem_region(void *_res) 1913 { 1914 struct resource *res = _res; 1915 1916 if (res->parent) 1917 remove_resource(res); 1918 free_resource(res); 1919 } 1920 1921 static struct resource * 1922 get_free_mem_region(struct device *dev, struct resource *base, 1923 resource_size_t size, const unsigned long align, 1924 const char *name, const unsigned long desc, 1925 const unsigned long flags) 1926 { 1927 resource_size_t addr; 1928 struct resource *res; 1929 struct region_devres *dr = NULL; 1930 1931 size = ALIGN(size, align); 1932 1933 res = alloc_resource(GFP_KERNEL); 1934 if (!res) 1935 return ERR_PTR(-ENOMEM); 1936 1937 if (dev && (flags & GFR_REQUEST_REGION)) { 1938 dr = devres_alloc(devm_region_release, 1939 sizeof(struct region_devres), GFP_KERNEL); 1940 if (!dr) { 1941 free_resource(res); 1942 return ERR_PTR(-ENOMEM); 1943 } 1944 } else if (dev) { 1945 if (devm_add_action_or_reset(dev, remove_free_mem_region, res)) 1946 return ERR_PTR(-ENOMEM); 1947 } 1948 1949 write_lock(&resource_lock); 1950 for (addr = gfr_start(base, size, align, flags); 1951 gfr_continue(base, addr, align, flags); 1952 addr = gfr_next(addr, align, flags)) { 1953 if (__region_intersects(base, addr, size, 0, IORES_DESC_NONE) != 1954 REGION_DISJOINT) 1955 continue; 1956 1957 if (flags & GFR_REQUEST_REGION) { 1958 if (__request_region_locked(res, &iomem_resource, addr, 1959 size, name, 0)) 1960 break; 1961 1962 if (dev) { 1963 dr->parent = &iomem_resource; 1964 dr->start = addr; 1965 dr->n = size; 1966 devres_add(dev, dr); 1967 } 1968 1969 res->desc = desc; 1970 write_unlock(&resource_lock); 1971 1972 1973 /* 1974 * A driver is claiming this region so revoke any 1975 * mappings. 1976 */ 1977 revoke_iomem(res); 1978 } else { 1979 res->start = addr; 1980 res->end = addr + size - 1; 1981 res->name = name; 1982 res->desc = desc; 1983 res->flags = IORESOURCE_MEM; 1984 1985 /* 1986 * Only succeed if the resource hosts an exclusive 1987 * range after the insert 1988 */ 1989 if (__insert_resource(base, res) || res->child) 1990 break; 1991 1992 write_unlock(&resource_lock); 1993 } 1994 1995 return res; 1996 } 1997 write_unlock(&resource_lock); 1998 1999 if (flags & GFR_REQUEST_REGION) { 2000 free_resource(res); 2001 devres_free(dr); 2002 } else if (dev) 2003 devm_release_action(dev, remove_free_mem_region, res); 2004 2005 return ERR_PTR(-ERANGE); 2006 } 2007 2008 /** 2009 * devm_request_free_mem_region - find free region for device private memory 2010 * 2011 * @dev: device struct to bind the resource to 2012 * @size: size in bytes of the device memory to add 2013 * @base: resource tree to look in 2014 * 2015 * This function tries to find an empty range of physical address big enough to 2016 * contain the new resource, so that it can later be hotplugged as ZONE_DEVICE 2017 * memory, which in turn allocates struct pages. 2018 */ 2019 struct resource *devm_request_free_mem_region(struct device *dev, 2020 struct resource *base, unsigned long size) 2021 { 2022 unsigned long flags = GFR_DESCENDING | GFR_REQUEST_REGION; 2023 2024 return get_free_mem_region(dev, base, size, GFR_DEFAULT_ALIGN, 2025 dev_name(dev), 2026 IORES_DESC_DEVICE_PRIVATE_MEMORY, flags); 2027 } 2028 EXPORT_SYMBOL_GPL(devm_request_free_mem_region); 2029 2030 struct resource *request_free_mem_region(struct resource *base, 2031 unsigned long size, const char *name) 2032 { 2033 unsigned long flags = GFR_DESCENDING | GFR_REQUEST_REGION; 2034 2035 return get_free_mem_region(NULL, base, size, GFR_DEFAULT_ALIGN, name, 2036 IORES_DESC_DEVICE_PRIVATE_MEMORY, flags); 2037 } 2038 EXPORT_SYMBOL_GPL(request_free_mem_region); 2039 2040 /** 2041 * alloc_free_mem_region - find a free region relative to @base 2042 * @base: resource that will parent the new resource 2043 * @size: size in bytes of memory to allocate from @base 2044 * @align: alignment requirements for the allocation 2045 * @name: resource name 2046 * 2047 * Buses like CXL, that can dynamically instantiate new memory regions, 2048 * need a method to allocate physical address space for those regions. 2049 * Allocate and insert a new resource to cover a free, unclaimed by a 2050 * descendant of @base, range in the span of @base. 2051 */ 2052 struct resource *alloc_free_mem_region(struct resource *base, 2053 unsigned long size, unsigned long align, 2054 const char *name) 2055 { 2056 /* Default of ascending direction and insert resource */ 2057 unsigned long flags = 0; 2058 2059 return get_free_mem_region(NULL, base, size, align, name, 2060 IORES_DESC_NONE, flags); 2061 } 2062 EXPORT_SYMBOL_GPL(alloc_free_mem_region); 2063 #endif /* CONFIG_GET_FREE_REGION */ 2064 2065 static int __init strict_iomem(char *str) 2066 { 2067 if (strstr(str, "relaxed")) 2068 strict_iomem_checks = 0; 2069 if (strstr(str, "strict")) 2070 strict_iomem_checks = 1; 2071 return 1; 2072 } 2073 2074 static int iomem_fs_init_fs_context(struct fs_context *fc) 2075 { 2076 return init_pseudo(fc, DEVMEM_MAGIC) ? 0 : -ENOMEM; 2077 } 2078 2079 static struct file_system_type iomem_fs_type = { 2080 .name = "iomem", 2081 .owner = THIS_MODULE, 2082 .init_fs_context = iomem_fs_init_fs_context, 2083 .kill_sb = kill_anon_super, 2084 }; 2085 2086 static int __init iomem_init_inode(void) 2087 { 2088 static struct vfsmount *iomem_vfs_mount; 2089 static int iomem_fs_cnt; 2090 struct inode *inode; 2091 int rc; 2092 2093 rc = simple_pin_fs(&iomem_fs_type, &iomem_vfs_mount, &iomem_fs_cnt); 2094 if (rc < 0) { 2095 pr_err("Cannot mount iomem pseudo filesystem: %d\n", rc); 2096 return rc; 2097 } 2098 2099 inode = alloc_anon_inode(iomem_vfs_mount->mnt_sb); 2100 if (IS_ERR(inode)) { 2101 rc = PTR_ERR(inode); 2102 pr_err("Cannot allocate inode for iomem: %d\n", rc); 2103 simple_release_fs(&iomem_vfs_mount, &iomem_fs_cnt); 2104 return rc; 2105 } 2106 2107 /* 2108 * Publish iomem revocation inode initialized. 2109 * Pairs with smp_load_acquire() in revoke_iomem(). 2110 */ 2111 smp_store_release(&iomem_inode, inode); 2112 2113 return 0; 2114 } 2115 2116 fs_initcall(iomem_init_inode); 2117 2118 __setup("iomem=", strict_iomem); 2119