1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/kernel/resource.c 4 * 5 * Copyright (C) 1999 Linus Torvalds 6 * Copyright (C) 1999 Martin Mares <mj@ucw.cz> 7 * 8 * Arbitrary resource management. 9 */ 10 11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 12 13 #include <linux/export.h> 14 #include <linux/errno.h> 15 #include <linux/ioport.h> 16 #include <linux/init.h> 17 #include <linux/slab.h> 18 #include <linux/spinlock.h> 19 #include <linux/fs.h> 20 #include <linux/proc_fs.h> 21 #include <linux/pseudo_fs.h> 22 #include <linux/sched.h> 23 #include <linux/seq_file.h> 24 #include <linux/device.h> 25 #include <linux/pfn.h> 26 #include <linux/mm.h> 27 #include <linux/mount.h> 28 #include <linux/resource_ext.h> 29 #include <uapi/linux/magic.h> 30 #include <linux/string.h> 31 #include <linux/vmalloc.h> 32 #include <asm/io.h> 33 34 35 struct resource ioport_resource = { 36 .name = "PCI IO", 37 .start = 0, 38 .end = IO_SPACE_LIMIT, 39 .flags = IORESOURCE_IO, 40 }; 41 EXPORT_SYMBOL(ioport_resource); 42 43 struct resource iomem_resource = { 44 .name = "PCI mem", 45 .start = 0, 46 .end = -1, 47 .flags = IORESOURCE_MEM, 48 }; 49 EXPORT_SYMBOL(iomem_resource); 50 51 static DEFINE_RWLOCK(resource_lock); 52 53 static struct resource *next_resource(struct resource *p, bool skip_children) 54 { 55 if (!skip_children && p->child) 56 return p->child; 57 while (!p->sibling && p->parent) 58 p = p->parent; 59 return p->sibling; 60 } 61 62 #define for_each_resource(_root, _p, _skip_children) \ 63 for ((_p) = (_root)->child; (_p); (_p) = next_resource(_p, _skip_children)) 64 65 #ifdef CONFIG_PROC_FS 66 67 enum { MAX_IORES_LEVEL = 5 }; 68 69 static void *r_start(struct seq_file *m, loff_t *pos) 70 __acquires(resource_lock) 71 { 72 struct resource *root = pde_data(file_inode(m->file)); 73 struct resource *p; 74 loff_t l = *pos; 75 76 read_lock(&resource_lock); 77 for_each_resource(root, p, false) { 78 if (l-- == 0) 79 break; 80 } 81 82 return p; 83 } 84 85 static void *r_next(struct seq_file *m, void *v, loff_t *pos) 86 { 87 struct resource *p = v; 88 89 (*pos)++; 90 91 return (void *)next_resource(p, false); 92 } 93 94 static void r_stop(struct seq_file *m, void *v) 95 __releases(resource_lock) 96 { 97 read_unlock(&resource_lock); 98 } 99 100 static int r_show(struct seq_file *m, void *v) 101 { 102 struct resource *root = pde_data(file_inode(m->file)); 103 struct resource *r = v, *p; 104 unsigned long long start, end; 105 int width = root->end < 0x10000 ? 4 : 8; 106 int depth; 107 108 for (depth = 0, p = r; depth < MAX_IORES_LEVEL; depth++, p = p->parent) 109 if (p->parent == root) 110 break; 111 112 if (file_ns_capable(m->file, &init_user_ns, CAP_SYS_ADMIN)) { 113 start = r->start; 114 end = r->end; 115 } else { 116 start = end = 0; 117 } 118 119 seq_printf(m, "%*s%0*llx-%0*llx : %s\n", 120 depth * 2, "", 121 width, start, 122 width, end, 123 r->name ? r->name : "<BAD>"); 124 return 0; 125 } 126 127 static const struct seq_operations resource_op = { 128 .start = r_start, 129 .next = r_next, 130 .stop = r_stop, 131 .show = r_show, 132 }; 133 134 static int __init ioresources_init(void) 135 { 136 proc_create_seq_data("ioports", 0, NULL, &resource_op, 137 &ioport_resource); 138 proc_create_seq_data("iomem", 0, NULL, &resource_op, &iomem_resource); 139 return 0; 140 } 141 __initcall(ioresources_init); 142 143 #endif /* CONFIG_PROC_FS */ 144 145 static void free_resource(struct resource *res) 146 { 147 /** 148 * If the resource was allocated using memblock early during boot 149 * we'll leak it here: we can only return full pages back to the 150 * buddy and trying to be smart and reusing them eventually in 151 * alloc_resource() overcomplicates resource handling. 152 */ 153 if (res && PageSlab(virt_to_head_page(res))) 154 kfree(res); 155 } 156 157 static struct resource *alloc_resource(gfp_t flags) 158 { 159 return kzalloc(sizeof(struct resource), flags); 160 } 161 162 /* Return the conflict entry if you can't request it */ 163 static struct resource * __request_resource(struct resource *root, struct resource *new) 164 { 165 resource_size_t start = new->start; 166 resource_size_t end = new->end; 167 struct resource *tmp, **p; 168 169 if (end < start) 170 return root; 171 if (start < root->start) 172 return root; 173 if (end > root->end) 174 return root; 175 p = &root->child; 176 for (;;) { 177 tmp = *p; 178 if (!tmp || tmp->start > end) { 179 new->sibling = tmp; 180 *p = new; 181 new->parent = root; 182 return NULL; 183 } 184 p = &tmp->sibling; 185 if (tmp->end < start) 186 continue; 187 return tmp; 188 } 189 } 190 191 static int __release_resource(struct resource *old, bool release_child) 192 { 193 struct resource *tmp, **p, *chd; 194 195 p = &old->parent->child; 196 for (;;) { 197 tmp = *p; 198 if (!tmp) 199 break; 200 if (tmp == old) { 201 if (release_child || !(tmp->child)) { 202 *p = tmp->sibling; 203 } else { 204 for (chd = tmp->child;; chd = chd->sibling) { 205 chd->parent = tmp->parent; 206 if (!(chd->sibling)) 207 break; 208 } 209 *p = tmp->child; 210 chd->sibling = tmp->sibling; 211 } 212 old->parent = NULL; 213 return 0; 214 } 215 p = &tmp->sibling; 216 } 217 return -EINVAL; 218 } 219 220 static void __release_child_resources(struct resource *r) 221 { 222 struct resource *tmp, *p; 223 resource_size_t size; 224 225 p = r->child; 226 r->child = NULL; 227 while (p) { 228 tmp = p; 229 p = p->sibling; 230 231 tmp->parent = NULL; 232 tmp->sibling = NULL; 233 __release_child_resources(tmp); 234 235 printk(KERN_DEBUG "release child resource %pR\n", tmp); 236 /* need to restore size, and keep flags */ 237 size = resource_size(tmp); 238 tmp->start = 0; 239 tmp->end = size - 1; 240 } 241 } 242 243 void release_child_resources(struct resource *r) 244 { 245 write_lock(&resource_lock); 246 __release_child_resources(r); 247 write_unlock(&resource_lock); 248 } 249 250 /** 251 * request_resource_conflict - request and reserve an I/O or memory resource 252 * @root: root resource descriptor 253 * @new: resource descriptor desired by caller 254 * 255 * Returns 0 for success, conflict resource on error. 256 */ 257 struct resource *request_resource_conflict(struct resource *root, struct resource *new) 258 { 259 struct resource *conflict; 260 261 write_lock(&resource_lock); 262 conflict = __request_resource(root, new); 263 write_unlock(&resource_lock); 264 return conflict; 265 } 266 267 /** 268 * request_resource - request and reserve an I/O or memory resource 269 * @root: root resource descriptor 270 * @new: resource descriptor desired by caller 271 * 272 * Returns 0 for success, negative error code on error. 273 */ 274 int request_resource(struct resource *root, struct resource *new) 275 { 276 struct resource *conflict; 277 278 conflict = request_resource_conflict(root, new); 279 return conflict ? -EBUSY : 0; 280 } 281 282 EXPORT_SYMBOL(request_resource); 283 284 /** 285 * release_resource - release a previously reserved resource 286 * @old: resource pointer 287 */ 288 int release_resource(struct resource *old) 289 { 290 int retval; 291 292 write_lock(&resource_lock); 293 retval = __release_resource(old, true); 294 write_unlock(&resource_lock); 295 return retval; 296 } 297 298 EXPORT_SYMBOL(release_resource); 299 300 static bool is_type_match(struct resource *p, unsigned long flags, unsigned long desc) 301 { 302 return (p->flags & flags) == flags && (desc == IORES_DESC_NONE || desc == p->desc); 303 } 304 305 /** 306 * find_next_iomem_res - Finds the lowest iomem resource that covers part of 307 * [@start..@end]. 308 * 309 * If a resource is found, returns 0 and @*res is overwritten with the part 310 * of the resource that's within [@start..@end]; if none is found, returns 311 * -ENODEV. Returns -EINVAL for invalid parameters. 312 * 313 * @start: start address of the resource searched for 314 * @end: end address of same resource 315 * @flags: flags which the resource must have 316 * @desc: descriptor the resource must have 317 * @res: return ptr, if resource found 318 * 319 * The caller must specify @start, @end, @flags, and @desc 320 * (which may be IORES_DESC_NONE). 321 */ 322 static int find_next_iomem_res(resource_size_t start, resource_size_t end, 323 unsigned long flags, unsigned long desc, 324 struct resource *res) 325 { 326 struct resource *p; 327 328 if (!res) 329 return -EINVAL; 330 331 if (start >= end) 332 return -EINVAL; 333 334 read_lock(&resource_lock); 335 336 for_each_resource(&iomem_resource, p, false) { 337 /* If we passed the resource we are looking for, stop */ 338 if (p->start > end) { 339 p = NULL; 340 break; 341 } 342 343 /* Skip until we find a range that matches what we look for */ 344 if (p->end < start) 345 continue; 346 347 /* Found a match, break */ 348 if (is_type_match(p, flags, desc)) 349 break; 350 } 351 352 if (p) { 353 /* copy data */ 354 *res = (struct resource) { 355 .start = max(start, p->start), 356 .end = min(end, p->end), 357 .flags = p->flags, 358 .desc = p->desc, 359 .parent = p->parent, 360 }; 361 } 362 363 read_unlock(&resource_lock); 364 return p ? 0 : -ENODEV; 365 } 366 367 static int __walk_iomem_res_desc(resource_size_t start, resource_size_t end, 368 unsigned long flags, unsigned long desc, 369 void *arg, 370 int (*func)(struct resource *, void *)) 371 { 372 struct resource res; 373 int ret = -EINVAL; 374 375 while (start < end && 376 !find_next_iomem_res(start, end, flags, desc, &res)) { 377 ret = (*func)(&res, arg); 378 if (ret) 379 break; 380 381 start = res.end + 1; 382 } 383 384 return ret; 385 } 386 387 /** 388 * walk_iomem_res_desc - Walks through iomem resources and calls func() 389 * with matching resource ranges. 390 * * 391 * @desc: I/O resource descriptor. Use IORES_DESC_NONE to skip @desc check. 392 * @flags: I/O resource flags 393 * @start: start addr 394 * @end: end addr 395 * @arg: function argument for the callback @func 396 * @func: callback function that is called for each qualifying resource area 397 * 398 * All the memory ranges which overlap start,end and also match flags and 399 * desc are valid candidates. 400 * 401 * NOTE: For a new descriptor search, define a new IORES_DESC in 402 * <linux/ioport.h> and set it in 'desc' of a target resource entry. 403 */ 404 int walk_iomem_res_desc(unsigned long desc, unsigned long flags, u64 start, 405 u64 end, void *arg, int (*func)(struct resource *, void *)) 406 { 407 return __walk_iomem_res_desc(start, end, flags, desc, arg, func); 408 } 409 EXPORT_SYMBOL_GPL(walk_iomem_res_desc); 410 411 /* 412 * This function calls the @func callback against all memory ranges of type 413 * System RAM which are marked as IORESOURCE_SYSTEM_RAM and IORESOUCE_BUSY. 414 * Now, this function is only for System RAM, it deals with full ranges and 415 * not PFNs. If resources are not PFN-aligned, dealing with PFNs can truncate 416 * ranges. 417 */ 418 int walk_system_ram_res(u64 start, u64 end, void *arg, 419 int (*func)(struct resource *, void *)) 420 { 421 unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; 422 423 return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, arg, 424 func); 425 } 426 427 /* 428 * This function, being a variant of walk_system_ram_res(), calls the @func 429 * callback against all memory ranges of type System RAM which are marked as 430 * IORESOURCE_SYSTEM_RAM and IORESOUCE_BUSY in reversed order, i.e., from 431 * higher to lower. 432 */ 433 int walk_system_ram_res_rev(u64 start, u64 end, void *arg, 434 int (*func)(struct resource *, void *)) 435 { 436 struct resource res, *rams; 437 int rams_size = 16, i; 438 unsigned long flags; 439 int ret = -1; 440 441 /* create a list */ 442 rams = kvcalloc(rams_size, sizeof(struct resource), GFP_KERNEL); 443 if (!rams) 444 return ret; 445 446 flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; 447 i = 0; 448 while ((start < end) && 449 (!find_next_iomem_res(start, end, flags, IORES_DESC_NONE, &res))) { 450 if (i >= rams_size) { 451 /* re-alloc */ 452 struct resource *rams_new; 453 454 rams_new = kvrealloc(rams, (rams_size + 16) * sizeof(struct resource), 455 GFP_KERNEL); 456 if (!rams_new) 457 goto out; 458 459 rams = rams_new; 460 rams_size += 16; 461 } 462 463 rams[i++] = res; 464 start = res.end + 1; 465 } 466 467 /* go reverse */ 468 for (i--; i >= 0; i--) { 469 ret = (*func)(&rams[i], arg); 470 if (ret) 471 break; 472 } 473 474 out: 475 kvfree(rams); 476 return ret; 477 } 478 479 /* 480 * This function calls the @func callback against all memory ranges, which 481 * are ranges marked as IORESOURCE_MEM and IORESOUCE_BUSY. 482 */ 483 int walk_mem_res(u64 start, u64 end, void *arg, 484 int (*func)(struct resource *, void *)) 485 { 486 unsigned long flags = IORESOURCE_MEM | IORESOURCE_BUSY; 487 488 return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, arg, 489 func); 490 } 491 492 /* 493 * This function calls the @func callback against all memory ranges of type 494 * System RAM which are marked as IORESOURCE_SYSTEM_RAM and IORESOUCE_BUSY. 495 * It is to be used only for System RAM. 496 */ 497 int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, 498 void *arg, int (*func)(unsigned long, unsigned long, void *)) 499 { 500 resource_size_t start, end; 501 unsigned long flags; 502 struct resource res; 503 unsigned long pfn, end_pfn; 504 int ret = -EINVAL; 505 506 start = (u64) start_pfn << PAGE_SHIFT; 507 end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1; 508 flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; 509 while (start < end && 510 !find_next_iomem_res(start, end, flags, IORES_DESC_NONE, &res)) { 511 pfn = PFN_UP(res.start); 512 end_pfn = PFN_DOWN(res.end + 1); 513 if (end_pfn > pfn) 514 ret = (*func)(pfn, end_pfn - pfn, arg); 515 if (ret) 516 break; 517 start = res.end + 1; 518 } 519 return ret; 520 } 521 522 static int __is_ram(unsigned long pfn, unsigned long nr_pages, void *arg) 523 { 524 return 1; 525 } 526 527 /* 528 * This generic page_is_ram() returns true if specified address is 529 * registered as System RAM in iomem_resource list. 530 */ 531 int __weak page_is_ram(unsigned long pfn) 532 { 533 return walk_system_ram_range(pfn, 1, NULL, __is_ram) == 1; 534 } 535 EXPORT_SYMBOL_GPL(page_is_ram); 536 537 static int __region_intersects(struct resource *parent, resource_size_t start, 538 size_t size, unsigned long flags, 539 unsigned long desc) 540 { 541 int type = 0; int other = 0; 542 struct resource *p, *dp; 543 struct resource res, o; 544 bool covered; 545 546 res.start = start; 547 res.end = start + size - 1; 548 549 for (p = parent->child; p ; p = p->sibling) { 550 if (!resource_intersection(p, &res, &o)) 551 continue; 552 if (is_type_match(p, flags, desc)) { 553 type++; 554 continue; 555 } 556 /* 557 * Continue to search in descendant resources as if the 558 * matched descendant resources cover some ranges of 'p'. 559 * 560 * |------------- "CXL Window 0" ------------| 561 * |-- "System RAM" --| 562 * 563 * will behave similar as the following fake resource 564 * tree when searching "System RAM". 565 * 566 * |-- "System RAM" --||-- "CXL Window 0a" --| 567 */ 568 covered = false; 569 for_each_resource(p, dp, false) { 570 if (!resource_overlaps(dp, &res)) 571 continue; 572 if (is_type_match(dp, flags, desc)) { 573 type++; 574 /* 575 * Range from 'o.start' to 'dp->start' 576 * isn't covered by matched resource. 577 */ 578 if (dp->start > o.start) 579 break; 580 if (dp->end >= o.end) { 581 covered = true; 582 break; 583 } 584 /* Remove covered range */ 585 o.start = max(o.start, dp->end + 1); 586 } 587 } 588 if (!covered) 589 other++; 590 } 591 592 if (type == 0) 593 return REGION_DISJOINT; 594 595 if (other == 0) 596 return REGION_INTERSECTS; 597 598 return REGION_MIXED; 599 } 600 601 /** 602 * region_intersects() - determine intersection of region with known resources 603 * @start: region start address 604 * @size: size of region 605 * @flags: flags of resource (in iomem_resource) 606 * @desc: descriptor of resource (in iomem_resource) or IORES_DESC_NONE 607 * 608 * Check if the specified region partially overlaps or fully eclipses a 609 * resource identified by @flags and @desc (optional with IORES_DESC_NONE). 610 * Return REGION_DISJOINT if the region does not overlap @flags/@desc, 611 * return REGION_MIXED if the region overlaps @flags/@desc and another 612 * resource, and return REGION_INTERSECTS if the region overlaps @flags/@desc 613 * and no other defined resource. Note that REGION_INTERSECTS is also 614 * returned in the case when the specified region overlaps RAM and undefined 615 * memory holes. 616 * 617 * region_intersect() is used by memory remapping functions to ensure 618 * the user is not remapping RAM and is a vast speed up over walking 619 * through the resource table page by page. 620 */ 621 int region_intersects(resource_size_t start, size_t size, unsigned long flags, 622 unsigned long desc) 623 { 624 int ret; 625 626 read_lock(&resource_lock); 627 ret = __region_intersects(&iomem_resource, start, size, flags, desc); 628 read_unlock(&resource_lock); 629 630 return ret; 631 } 632 EXPORT_SYMBOL_GPL(region_intersects); 633 634 void __weak arch_remove_reservations(struct resource *avail) 635 { 636 } 637 638 static void resource_clip(struct resource *res, resource_size_t min, 639 resource_size_t max) 640 { 641 if (res->start < min) 642 res->start = min; 643 if (res->end > max) 644 res->end = max; 645 } 646 647 /* 648 * Find empty space in the resource tree with the given range and 649 * alignment constraints 650 */ 651 static int __find_resource_space(struct resource *root, struct resource *old, 652 struct resource *new, resource_size_t size, 653 struct resource_constraint *constraint) 654 { 655 struct resource *this = root->child; 656 struct resource tmp = *new, avail, alloc; 657 resource_alignf alignf = constraint->alignf; 658 659 tmp.start = root->start; 660 /* 661 * Skip past an allocated resource that starts at 0, since the assignment 662 * of this->start - 1 to tmp->end below would cause an underflow. 663 */ 664 if (this && this->start == root->start) { 665 tmp.start = (this == old) ? old->start : this->end + 1; 666 this = this->sibling; 667 } 668 for(;;) { 669 if (this) 670 tmp.end = (this == old) ? this->end : this->start - 1; 671 else 672 tmp.end = root->end; 673 674 if (tmp.end < tmp.start) 675 goto next; 676 677 resource_clip(&tmp, constraint->min, constraint->max); 678 arch_remove_reservations(&tmp); 679 680 /* Check for overflow after ALIGN() */ 681 avail.start = ALIGN(tmp.start, constraint->align); 682 avail.end = tmp.end; 683 avail.flags = new->flags & ~IORESOURCE_UNSET; 684 if (avail.start >= tmp.start) { 685 alloc.flags = avail.flags; 686 if (alignf) { 687 alloc.start = alignf(constraint->alignf_data, 688 &avail, size, constraint->align); 689 } else { 690 alloc.start = avail.start; 691 } 692 alloc.end = alloc.start + size - 1; 693 if (alloc.start <= alloc.end && 694 resource_contains(&avail, &alloc)) { 695 new->start = alloc.start; 696 new->end = alloc.end; 697 return 0; 698 } 699 } 700 701 next: if (!this || this->end == root->end) 702 break; 703 704 if (this != old) 705 tmp.start = this->end + 1; 706 this = this->sibling; 707 } 708 return -EBUSY; 709 } 710 711 /** 712 * find_resource_space - Find empty space in the resource tree 713 * @root: Root resource descriptor 714 * @new: Resource descriptor awaiting an empty resource space 715 * @size: The minimum size of the empty space 716 * @constraint: The range and alignment constraints to be met 717 * 718 * Finds an empty space under @root in the resource tree satisfying range and 719 * alignment @constraints. 720 * 721 * Return: 722 * * %0 - if successful, @new members start, end, and flags are altered. 723 * * %-EBUSY - if no empty space was found. 724 */ 725 int find_resource_space(struct resource *root, struct resource *new, 726 resource_size_t size, 727 struct resource_constraint *constraint) 728 { 729 return __find_resource_space(root, NULL, new, size, constraint); 730 } 731 EXPORT_SYMBOL_GPL(find_resource_space); 732 733 /** 734 * reallocate_resource - allocate a slot in the resource tree given range & alignment. 735 * The resource will be relocated if the new size cannot be reallocated in the 736 * current location. 737 * 738 * @root: root resource descriptor 739 * @old: resource descriptor desired by caller 740 * @newsize: new size of the resource descriptor 741 * @constraint: the memory range and alignment constraints to be met. 742 */ 743 static int reallocate_resource(struct resource *root, struct resource *old, 744 resource_size_t newsize, 745 struct resource_constraint *constraint) 746 { 747 int err=0; 748 struct resource new = *old; 749 struct resource *conflict; 750 751 write_lock(&resource_lock); 752 753 if ((err = __find_resource_space(root, old, &new, newsize, constraint))) 754 goto out; 755 756 if (resource_contains(&new, old)) { 757 old->start = new.start; 758 old->end = new.end; 759 goto out; 760 } 761 762 if (old->child) { 763 err = -EBUSY; 764 goto out; 765 } 766 767 if (resource_contains(old, &new)) { 768 old->start = new.start; 769 old->end = new.end; 770 } else { 771 __release_resource(old, true); 772 *old = new; 773 conflict = __request_resource(root, old); 774 BUG_ON(conflict); 775 } 776 out: 777 write_unlock(&resource_lock); 778 return err; 779 } 780 781 782 /** 783 * allocate_resource - allocate empty slot in the resource tree given range & alignment. 784 * The resource will be reallocated with a new size if it was already allocated 785 * @root: root resource descriptor 786 * @new: resource descriptor desired by caller 787 * @size: requested resource region size 788 * @min: minimum boundary to allocate 789 * @max: maximum boundary to allocate 790 * @align: alignment requested, in bytes 791 * @alignf: alignment function, optional, called if not NULL 792 * @alignf_data: arbitrary data to pass to the @alignf function 793 */ 794 int allocate_resource(struct resource *root, struct resource *new, 795 resource_size_t size, resource_size_t min, 796 resource_size_t max, resource_size_t align, 797 resource_alignf alignf, 798 void *alignf_data) 799 { 800 int err; 801 struct resource_constraint constraint; 802 803 constraint.min = min; 804 constraint.max = max; 805 constraint.align = align; 806 constraint.alignf = alignf; 807 constraint.alignf_data = alignf_data; 808 809 if ( new->parent ) { 810 /* resource is already allocated, try reallocating with 811 the new constraints */ 812 return reallocate_resource(root, new, size, &constraint); 813 } 814 815 write_lock(&resource_lock); 816 err = find_resource_space(root, new, size, &constraint); 817 if (err >= 0 && __request_resource(root, new)) 818 err = -EBUSY; 819 write_unlock(&resource_lock); 820 return err; 821 } 822 823 EXPORT_SYMBOL(allocate_resource); 824 825 /** 826 * lookup_resource - find an existing resource by a resource start address 827 * @root: root resource descriptor 828 * @start: resource start address 829 * 830 * Returns a pointer to the resource if found, NULL otherwise 831 */ 832 struct resource *lookup_resource(struct resource *root, resource_size_t start) 833 { 834 struct resource *res; 835 836 read_lock(&resource_lock); 837 for (res = root->child; res; res = res->sibling) { 838 if (res->start == start) 839 break; 840 } 841 read_unlock(&resource_lock); 842 843 return res; 844 } 845 846 /* 847 * Insert a resource into the resource tree. If successful, return NULL, 848 * otherwise return the conflicting resource (compare to __request_resource()) 849 */ 850 static struct resource * __insert_resource(struct resource *parent, struct resource *new) 851 { 852 struct resource *first, *next; 853 854 for (;; parent = first) { 855 first = __request_resource(parent, new); 856 if (!first) 857 return first; 858 859 if (first == parent) 860 return first; 861 if (WARN_ON(first == new)) /* duplicated insertion */ 862 return first; 863 864 if ((first->start > new->start) || (first->end < new->end)) 865 break; 866 if ((first->start == new->start) && (first->end == new->end)) 867 break; 868 } 869 870 for (next = first; ; next = next->sibling) { 871 /* Partial overlap? Bad, and unfixable */ 872 if (next->start < new->start || next->end > new->end) 873 return next; 874 if (!next->sibling) 875 break; 876 if (next->sibling->start > new->end) 877 break; 878 } 879 880 new->parent = parent; 881 new->sibling = next->sibling; 882 new->child = first; 883 884 next->sibling = NULL; 885 for (next = first; next; next = next->sibling) 886 next->parent = new; 887 888 if (parent->child == first) { 889 parent->child = new; 890 } else { 891 next = parent->child; 892 while (next->sibling != first) 893 next = next->sibling; 894 next->sibling = new; 895 } 896 return NULL; 897 } 898 899 /** 900 * insert_resource_conflict - Inserts resource in the resource tree 901 * @parent: parent of the new resource 902 * @new: new resource to insert 903 * 904 * Returns 0 on success, conflict resource if the resource can't be inserted. 905 * 906 * This function is equivalent to request_resource_conflict when no conflict 907 * happens. If a conflict happens, and the conflicting resources 908 * entirely fit within the range of the new resource, then the new 909 * resource is inserted and the conflicting resources become children of 910 * the new resource. 911 * 912 * This function is intended for producers of resources, such as FW modules 913 * and bus drivers. 914 */ 915 struct resource *insert_resource_conflict(struct resource *parent, struct resource *new) 916 { 917 struct resource *conflict; 918 919 write_lock(&resource_lock); 920 conflict = __insert_resource(parent, new); 921 write_unlock(&resource_lock); 922 return conflict; 923 } 924 925 /** 926 * insert_resource - Inserts a resource in the resource tree 927 * @parent: parent of the new resource 928 * @new: new resource to insert 929 * 930 * Returns 0 on success, -EBUSY if the resource can't be inserted. 931 * 932 * This function is intended for producers of resources, such as FW modules 933 * and bus drivers. 934 */ 935 int insert_resource(struct resource *parent, struct resource *new) 936 { 937 struct resource *conflict; 938 939 conflict = insert_resource_conflict(parent, new); 940 return conflict ? -EBUSY : 0; 941 } 942 EXPORT_SYMBOL_GPL(insert_resource); 943 944 /** 945 * insert_resource_expand_to_fit - Insert a resource into the resource tree 946 * @root: root resource descriptor 947 * @new: new resource to insert 948 * 949 * Insert a resource into the resource tree, possibly expanding it in order 950 * to make it encompass any conflicting resources. 951 */ 952 void insert_resource_expand_to_fit(struct resource *root, struct resource *new) 953 { 954 if (new->parent) 955 return; 956 957 write_lock(&resource_lock); 958 for (;;) { 959 struct resource *conflict; 960 961 conflict = __insert_resource(root, new); 962 if (!conflict) 963 break; 964 if (conflict == root) 965 break; 966 967 /* Ok, expand resource to cover the conflict, then try again .. */ 968 if (conflict->start < new->start) 969 new->start = conflict->start; 970 if (conflict->end > new->end) 971 new->end = conflict->end; 972 973 pr_info("Expanded resource %s due to conflict with %s\n", new->name, conflict->name); 974 } 975 write_unlock(&resource_lock); 976 } 977 /* 978 * Not for general consumption, only early boot memory map parsing, PCI 979 * resource discovery, and late discovery of CXL resources are expected 980 * to use this interface. The former are built-in and only the latter, 981 * CXL, is a module. 982 */ 983 EXPORT_SYMBOL_NS_GPL(insert_resource_expand_to_fit, CXL); 984 985 /** 986 * remove_resource - Remove a resource in the resource tree 987 * @old: resource to remove 988 * 989 * Returns 0 on success, -EINVAL if the resource is not valid. 990 * 991 * This function removes a resource previously inserted by insert_resource() 992 * or insert_resource_conflict(), and moves the children (if any) up to 993 * where they were before. insert_resource() and insert_resource_conflict() 994 * insert a new resource, and move any conflicting resources down to the 995 * children of the new resource. 996 * 997 * insert_resource(), insert_resource_conflict() and remove_resource() are 998 * intended for producers of resources, such as FW modules and bus drivers. 999 */ 1000 int remove_resource(struct resource *old) 1001 { 1002 int retval; 1003 1004 write_lock(&resource_lock); 1005 retval = __release_resource(old, false); 1006 write_unlock(&resource_lock); 1007 return retval; 1008 } 1009 EXPORT_SYMBOL_GPL(remove_resource); 1010 1011 static int __adjust_resource(struct resource *res, resource_size_t start, 1012 resource_size_t size) 1013 { 1014 struct resource *tmp, *parent = res->parent; 1015 resource_size_t end = start + size - 1; 1016 int result = -EBUSY; 1017 1018 if (!parent) 1019 goto skip; 1020 1021 if ((start < parent->start) || (end > parent->end)) 1022 goto out; 1023 1024 if (res->sibling && (res->sibling->start <= end)) 1025 goto out; 1026 1027 tmp = parent->child; 1028 if (tmp != res) { 1029 while (tmp->sibling != res) 1030 tmp = tmp->sibling; 1031 if (start <= tmp->end) 1032 goto out; 1033 } 1034 1035 skip: 1036 for (tmp = res->child; tmp; tmp = tmp->sibling) 1037 if ((tmp->start < start) || (tmp->end > end)) 1038 goto out; 1039 1040 res->start = start; 1041 res->end = end; 1042 result = 0; 1043 1044 out: 1045 return result; 1046 } 1047 1048 /** 1049 * adjust_resource - modify a resource's start and size 1050 * @res: resource to modify 1051 * @start: new start value 1052 * @size: new size 1053 * 1054 * Given an existing resource, change its start and size to match the 1055 * arguments. Returns 0 on success, -EBUSY if it can't fit. 1056 * Existing children of the resource are assumed to be immutable. 1057 */ 1058 int adjust_resource(struct resource *res, resource_size_t start, 1059 resource_size_t size) 1060 { 1061 int result; 1062 1063 write_lock(&resource_lock); 1064 result = __adjust_resource(res, start, size); 1065 write_unlock(&resource_lock); 1066 return result; 1067 } 1068 EXPORT_SYMBOL(adjust_resource); 1069 1070 static void __init 1071 __reserve_region_with_split(struct resource *root, resource_size_t start, 1072 resource_size_t end, const char *name) 1073 { 1074 struct resource *parent = root; 1075 struct resource *conflict; 1076 struct resource *res = alloc_resource(GFP_ATOMIC); 1077 struct resource *next_res = NULL; 1078 int type = resource_type(root); 1079 1080 if (!res) 1081 return; 1082 1083 res->name = name; 1084 res->start = start; 1085 res->end = end; 1086 res->flags = type | IORESOURCE_BUSY; 1087 res->desc = IORES_DESC_NONE; 1088 1089 while (1) { 1090 1091 conflict = __request_resource(parent, res); 1092 if (!conflict) { 1093 if (!next_res) 1094 break; 1095 res = next_res; 1096 next_res = NULL; 1097 continue; 1098 } 1099 1100 /* conflict covered whole area */ 1101 if (conflict->start <= res->start && 1102 conflict->end >= res->end) { 1103 free_resource(res); 1104 WARN_ON(next_res); 1105 break; 1106 } 1107 1108 /* failed, split and try again */ 1109 if (conflict->start > res->start) { 1110 end = res->end; 1111 res->end = conflict->start - 1; 1112 if (conflict->end < end) { 1113 next_res = alloc_resource(GFP_ATOMIC); 1114 if (!next_res) { 1115 free_resource(res); 1116 break; 1117 } 1118 next_res->name = name; 1119 next_res->start = conflict->end + 1; 1120 next_res->end = end; 1121 next_res->flags = type | IORESOURCE_BUSY; 1122 next_res->desc = IORES_DESC_NONE; 1123 } 1124 } else { 1125 res->start = conflict->end + 1; 1126 } 1127 } 1128 1129 } 1130 1131 void __init 1132 reserve_region_with_split(struct resource *root, resource_size_t start, 1133 resource_size_t end, const char *name) 1134 { 1135 int abort = 0; 1136 1137 write_lock(&resource_lock); 1138 if (root->start > start || root->end < end) { 1139 pr_err("requested range [0x%llx-0x%llx] not in root %pr\n", 1140 (unsigned long long)start, (unsigned long long)end, 1141 root); 1142 if (start > root->end || end < root->start) 1143 abort = 1; 1144 else { 1145 if (end > root->end) 1146 end = root->end; 1147 if (start < root->start) 1148 start = root->start; 1149 pr_err("fixing request to [0x%llx-0x%llx]\n", 1150 (unsigned long long)start, 1151 (unsigned long long)end); 1152 } 1153 dump_stack(); 1154 } 1155 if (!abort) 1156 __reserve_region_with_split(root, start, end, name); 1157 write_unlock(&resource_lock); 1158 } 1159 1160 /** 1161 * resource_alignment - calculate resource's alignment 1162 * @res: resource pointer 1163 * 1164 * Returns alignment on success, 0 (invalid alignment) on failure. 1165 */ 1166 resource_size_t resource_alignment(struct resource *res) 1167 { 1168 switch (res->flags & (IORESOURCE_SIZEALIGN | IORESOURCE_STARTALIGN)) { 1169 case IORESOURCE_SIZEALIGN: 1170 return resource_size(res); 1171 case IORESOURCE_STARTALIGN: 1172 return res->start; 1173 default: 1174 return 0; 1175 } 1176 } 1177 1178 /* 1179 * This is compatibility stuff for IO resources. 1180 * 1181 * Note how this, unlike the above, knows about 1182 * the IO flag meanings (busy etc). 1183 * 1184 * request_region creates a new busy region. 1185 * 1186 * release_region releases a matching busy region. 1187 */ 1188 1189 static DECLARE_WAIT_QUEUE_HEAD(muxed_resource_wait); 1190 1191 static struct inode *iomem_inode; 1192 1193 #ifdef CONFIG_IO_STRICT_DEVMEM 1194 static void revoke_iomem(struct resource *res) 1195 { 1196 /* pairs with smp_store_release() in iomem_init_inode() */ 1197 struct inode *inode = smp_load_acquire(&iomem_inode); 1198 1199 /* 1200 * Check that the initialization has completed. Losing the race 1201 * is ok because it means drivers are claiming resources before 1202 * the fs_initcall level of init and prevent iomem_get_mapping users 1203 * from establishing mappings. 1204 */ 1205 if (!inode) 1206 return; 1207 1208 /* 1209 * The expectation is that the driver has successfully marked 1210 * the resource busy by this point, so devmem_is_allowed() 1211 * should start returning false, however for performance this 1212 * does not iterate the entire resource range. 1213 */ 1214 if (devmem_is_allowed(PHYS_PFN(res->start)) && 1215 devmem_is_allowed(PHYS_PFN(res->end))) { 1216 /* 1217 * *cringe* iomem=relaxed says "go ahead, what's the 1218 * worst that can happen?" 1219 */ 1220 return; 1221 } 1222 1223 unmap_mapping_range(inode->i_mapping, res->start, resource_size(res), 1); 1224 } 1225 #else 1226 static void revoke_iomem(struct resource *res) {} 1227 #endif 1228 1229 struct address_space *iomem_get_mapping(void) 1230 { 1231 /* 1232 * This function is only called from file open paths, hence guaranteed 1233 * that fs_initcalls have completed and no need to check for NULL. But 1234 * since revoke_iomem can be called before the initcall we still need 1235 * the barrier to appease checkers. 1236 */ 1237 return smp_load_acquire(&iomem_inode)->i_mapping; 1238 } 1239 1240 static int __request_region_locked(struct resource *res, struct resource *parent, 1241 resource_size_t start, resource_size_t n, 1242 const char *name, int flags) 1243 { 1244 DECLARE_WAITQUEUE(wait, current); 1245 1246 res->name = name; 1247 res->start = start; 1248 res->end = start + n - 1; 1249 1250 for (;;) { 1251 struct resource *conflict; 1252 1253 res->flags = resource_type(parent) | resource_ext_type(parent); 1254 res->flags |= IORESOURCE_BUSY | flags; 1255 res->desc = parent->desc; 1256 1257 conflict = __request_resource(parent, res); 1258 if (!conflict) 1259 break; 1260 /* 1261 * mm/hmm.c reserves physical addresses which then 1262 * become unavailable to other users. Conflicts are 1263 * not expected. Warn to aid debugging if encountered. 1264 */ 1265 if (conflict->desc == IORES_DESC_DEVICE_PRIVATE_MEMORY) { 1266 pr_warn("Unaddressable device %s %pR conflicts with %pR", 1267 conflict->name, conflict, res); 1268 } 1269 if (conflict != parent) { 1270 if (!(conflict->flags & IORESOURCE_BUSY)) { 1271 parent = conflict; 1272 continue; 1273 } 1274 } 1275 if (conflict->flags & flags & IORESOURCE_MUXED) { 1276 add_wait_queue(&muxed_resource_wait, &wait); 1277 write_unlock(&resource_lock); 1278 set_current_state(TASK_UNINTERRUPTIBLE); 1279 schedule(); 1280 remove_wait_queue(&muxed_resource_wait, &wait); 1281 write_lock(&resource_lock); 1282 continue; 1283 } 1284 /* Uhhuh, that didn't work out.. */ 1285 return -EBUSY; 1286 } 1287 1288 return 0; 1289 } 1290 1291 /** 1292 * __request_region - create a new busy resource region 1293 * @parent: parent resource descriptor 1294 * @start: resource start address 1295 * @n: resource region size 1296 * @name: reserving caller's ID string 1297 * @flags: IO resource flags 1298 */ 1299 struct resource *__request_region(struct resource *parent, 1300 resource_size_t start, resource_size_t n, 1301 const char *name, int flags) 1302 { 1303 struct resource *res = alloc_resource(GFP_KERNEL); 1304 int ret; 1305 1306 if (!res) 1307 return NULL; 1308 1309 write_lock(&resource_lock); 1310 ret = __request_region_locked(res, parent, start, n, name, flags); 1311 write_unlock(&resource_lock); 1312 1313 if (ret) { 1314 free_resource(res); 1315 return NULL; 1316 } 1317 1318 if (parent == &iomem_resource) 1319 revoke_iomem(res); 1320 1321 return res; 1322 } 1323 EXPORT_SYMBOL(__request_region); 1324 1325 /** 1326 * __release_region - release a previously reserved resource region 1327 * @parent: parent resource descriptor 1328 * @start: resource start address 1329 * @n: resource region size 1330 * 1331 * The described resource region must match a currently busy region. 1332 */ 1333 void __release_region(struct resource *parent, resource_size_t start, 1334 resource_size_t n) 1335 { 1336 struct resource **p; 1337 resource_size_t end; 1338 1339 p = &parent->child; 1340 end = start + n - 1; 1341 1342 write_lock(&resource_lock); 1343 1344 for (;;) { 1345 struct resource *res = *p; 1346 1347 if (!res) 1348 break; 1349 if (res->start <= start && res->end >= end) { 1350 if (!(res->flags & IORESOURCE_BUSY)) { 1351 p = &res->child; 1352 continue; 1353 } 1354 if (res->start != start || res->end != end) 1355 break; 1356 *p = res->sibling; 1357 write_unlock(&resource_lock); 1358 if (res->flags & IORESOURCE_MUXED) 1359 wake_up(&muxed_resource_wait); 1360 free_resource(res); 1361 return; 1362 } 1363 p = &res->sibling; 1364 } 1365 1366 write_unlock(&resource_lock); 1367 1368 pr_warn("Trying to free nonexistent resource <%pa-%pa>\n", &start, &end); 1369 } 1370 EXPORT_SYMBOL(__release_region); 1371 1372 #ifdef CONFIG_MEMORY_HOTREMOVE 1373 /** 1374 * release_mem_region_adjustable - release a previously reserved memory region 1375 * @start: resource start address 1376 * @size: resource region size 1377 * 1378 * This interface is intended for memory hot-delete. The requested region 1379 * is released from a currently busy memory resource. The requested region 1380 * must either match exactly or fit into a single busy resource entry. In 1381 * the latter case, the remaining resource is adjusted accordingly. 1382 * Existing children of the busy memory resource must be immutable in the 1383 * request. 1384 * 1385 * Note: 1386 * - Additional release conditions, such as overlapping region, can be 1387 * supported after they are confirmed as valid cases. 1388 * - When a busy memory resource gets split into two entries, the code 1389 * assumes that all children remain in the lower address entry for 1390 * simplicity. Enhance this logic when necessary. 1391 */ 1392 void release_mem_region_adjustable(resource_size_t start, resource_size_t size) 1393 { 1394 struct resource *parent = &iomem_resource; 1395 struct resource *new_res = NULL; 1396 bool alloc_nofail = false; 1397 struct resource **p; 1398 struct resource *res; 1399 resource_size_t end; 1400 1401 end = start + size - 1; 1402 if (WARN_ON_ONCE((start < parent->start) || (end > parent->end))) 1403 return; 1404 1405 /* 1406 * We free up quite a lot of memory on memory hotunplug (esp., memap), 1407 * just before releasing the region. This is highly unlikely to 1408 * fail - let's play save and make it never fail as the caller cannot 1409 * perform any error handling (e.g., trying to re-add memory will fail 1410 * similarly). 1411 */ 1412 retry: 1413 new_res = alloc_resource(GFP_KERNEL | (alloc_nofail ? __GFP_NOFAIL : 0)); 1414 1415 p = &parent->child; 1416 write_lock(&resource_lock); 1417 1418 while ((res = *p)) { 1419 if (res->start >= end) 1420 break; 1421 1422 /* look for the next resource if it does not fit into */ 1423 if (res->start > start || res->end < end) { 1424 p = &res->sibling; 1425 continue; 1426 } 1427 1428 if (!(res->flags & IORESOURCE_MEM)) 1429 break; 1430 1431 if (!(res->flags & IORESOURCE_BUSY)) { 1432 p = &res->child; 1433 continue; 1434 } 1435 1436 /* found the target resource; let's adjust accordingly */ 1437 if (res->start == start && res->end == end) { 1438 /* free the whole entry */ 1439 *p = res->sibling; 1440 free_resource(res); 1441 } else if (res->start == start && res->end != end) { 1442 /* adjust the start */ 1443 WARN_ON_ONCE(__adjust_resource(res, end + 1, 1444 res->end - end)); 1445 } else if (res->start != start && res->end == end) { 1446 /* adjust the end */ 1447 WARN_ON_ONCE(__adjust_resource(res, res->start, 1448 start - res->start)); 1449 } else { 1450 /* split into two entries - we need a new resource */ 1451 if (!new_res) { 1452 new_res = alloc_resource(GFP_ATOMIC); 1453 if (!new_res) { 1454 alloc_nofail = true; 1455 write_unlock(&resource_lock); 1456 goto retry; 1457 } 1458 } 1459 new_res->name = res->name; 1460 new_res->start = end + 1; 1461 new_res->end = res->end; 1462 new_res->flags = res->flags; 1463 new_res->desc = res->desc; 1464 new_res->parent = res->parent; 1465 new_res->sibling = res->sibling; 1466 new_res->child = NULL; 1467 1468 if (WARN_ON_ONCE(__adjust_resource(res, res->start, 1469 start - res->start))) 1470 break; 1471 res->sibling = new_res; 1472 new_res = NULL; 1473 } 1474 1475 break; 1476 } 1477 1478 write_unlock(&resource_lock); 1479 free_resource(new_res); 1480 } 1481 #endif /* CONFIG_MEMORY_HOTREMOVE */ 1482 1483 #ifdef CONFIG_MEMORY_HOTPLUG 1484 static bool system_ram_resources_mergeable(struct resource *r1, 1485 struct resource *r2) 1486 { 1487 /* We assume either r1 or r2 is IORESOURCE_SYSRAM_MERGEABLE. */ 1488 return r1->flags == r2->flags && r1->end + 1 == r2->start && 1489 r1->name == r2->name && r1->desc == r2->desc && 1490 !r1->child && !r2->child; 1491 } 1492 1493 /** 1494 * merge_system_ram_resource - mark the System RAM resource mergeable and try to 1495 * merge it with adjacent, mergeable resources 1496 * @res: resource descriptor 1497 * 1498 * This interface is intended for memory hotplug, whereby lots of contiguous 1499 * system ram resources are added (e.g., via add_memory*()) by a driver, and 1500 * the actual resource boundaries are not of interest (e.g., it might be 1501 * relevant for DIMMs). Only resources that are marked mergeable, that have the 1502 * same parent, and that don't have any children are considered. All mergeable 1503 * resources must be immutable during the request. 1504 * 1505 * Note: 1506 * - The caller has to make sure that no pointers to resources that are 1507 * marked mergeable are used anymore after this call - the resource might 1508 * be freed and the pointer might be stale! 1509 * - release_mem_region_adjustable() will split on demand on memory hotunplug 1510 */ 1511 void merge_system_ram_resource(struct resource *res) 1512 { 1513 const unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; 1514 struct resource *cur; 1515 1516 if (WARN_ON_ONCE((res->flags & flags) != flags)) 1517 return; 1518 1519 write_lock(&resource_lock); 1520 res->flags |= IORESOURCE_SYSRAM_MERGEABLE; 1521 1522 /* Try to merge with next item in the list. */ 1523 cur = res->sibling; 1524 if (cur && system_ram_resources_mergeable(res, cur)) { 1525 res->end = cur->end; 1526 res->sibling = cur->sibling; 1527 free_resource(cur); 1528 } 1529 1530 /* Try to merge with previous item in the list. */ 1531 cur = res->parent->child; 1532 while (cur && cur->sibling != res) 1533 cur = cur->sibling; 1534 if (cur && system_ram_resources_mergeable(cur, res)) { 1535 cur->end = res->end; 1536 cur->sibling = res->sibling; 1537 free_resource(res); 1538 } 1539 write_unlock(&resource_lock); 1540 } 1541 #endif /* CONFIG_MEMORY_HOTPLUG */ 1542 1543 /* 1544 * Managed region resource 1545 */ 1546 static void devm_resource_release(struct device *dev, void *ptr) 1547 { 1548 struct resource **r = ptr; 1549 1550 release_resource(*r); 1551 } 1552 1553 /** 1554 * devm_request_resource() - request and reserve an I/O or memory resource 1555 * @dev: device for which to request the resource 1556 * @root: root of the resource tree from which to request the resource 1557 * @new: descriptor of the resource to request 1558 * 1559 * This is a device-managed version of request_resource(). There is usually 1560 * no need to release resources requested by this function explicitly since 1561 * that will be taken care of when the device is unbound from its driver. 1562 * If for some reason the resource needs to be released explicitly, because 1563 * of ordering issues for example, drivers must call devm_release_resource() 1564 * rather than the regular release_resource(). 1565 * 1566 * When a conflict is detected between any existing resources and the newly 1567 * requested resource, an error message will be printed. 1568 * 1569 * Returns 0 on success or a negative error code on failure. 1570 */ 1571 int devm_request_resource(struct device *dev, struct resource *root, 1572 struct resource *new) 1573 { 1574 struct resource *conflict, **ptr; 1575 1576 ptr = devres_alloc(devm_resource_release, sizeof(*ptr), GFP_KERNEL); 1577 if (!ptr) 1578 return -ENOMEM; 1579 1580 *ptr = new; 1581 1582 conflict = request_resource_conflict(root, new); 1583 if (conflict) { 1584 dev_err(dev, "resource collision: %pR conflicts with %s %pR\n", 1585 new, conflict->name, conflict); 1586 devres_free(ptr); 1587 return -EBUSY; 1588 } 1589 1590 devres_add(dev, ptr); 1591 return 0; 1592 } 1593 EXPORT_SYMBOL(devm_request_resource); 1594 1595 static int devm_resource_match(struct device *dev, void *res, void *data) 1596 { 1597 struct resource **ptr = res; 1598 1599 return *ptr == data; 1600 } 1601 1602 /** 1603 * devm_release_resource() - release a previously requested resource 1604 * @dev: device for which to release the resource 1605 * @new: descriptor of the resource to release 1606 * 1607 * Releases a resource previously requested using devm_request_resource(). 1608 */ 1609 void devm_release_resource(struct device *dev, struct resource *new) 1610 { 1611 WARN_ON(devres_release(dev, devm_resource_release, devm_resource_match, 1612 new)); 1613 } 1614 EXPORT_SYMBOL(devm_release_resource); 1615 1616 struct region_devres { 1617 struct resource *parent; 1618 resource_size_t start; 1619 resource_size_t n; 1620 }; 1621 1622 static void devm_region_release(struct device *dev, void *res) 1623 { 1624 struct region_devres *this = res; 1625 1626 __release_region(this->parent, this->start, this->n); 1627 } 1628 1629 static int devm_region_match(struct device *dev, void *res, void *match_data) 1630 { 1631 struct region_devres *this = res, *match = match_data; 1632 1633 return this->parent == match->parent && 1634 this->start == match->start && this->n == match->n; 1635 } 1636 1637 struct resource * 1638 __devm_request_region(struct device *dev, struct resource *parent, 1639 resource_size_t start, resource_size_t n, const char *name) 1640 { 1641 struct region_devres *dr = NULL; 1642 struct resource *res; 1643 1644 dr = devres_alloc(devm_region_release, sizeof(struct region_devres), 1645 GFP_KERNEL); 1646 if (!dr) 1647 return NULL; 1648 1649 dr->parent = parent; 1650 dr->start = start; 1651 dr->n = n; 1652 1653 res = __request_region(parent, start, n, name, 0); 1654 if (res) 1655 devres_add(dev, dr); 1656 else 1657 devres_free(dr); 1658 1659 return res; 1660 } 1661 EXPORT_SYMBOL(__devm_request_region); 1662 1663 void __devm_release_region(struct device *dev, struct resource *parent, 1664 resource_size_t start, resource_size_t n) 1665 { 1666 struct region_devres match_data = { parent, start, n }; 1667 1668 __release_region(parent, start, n); 1669 WARN_ON(devres_destroy(dev, devm_region_release, devm_region_match, 1670 &match_data)); 1671 } 1672 EXPORT_SYMBOL(__devm_release_region); 1673 1674 /* 1675 * Reserve I/O ports or memory based on "reserve=" kernel parameter. 1676 */ 1677 #define MAXRESERVE 4 1678 static int __init reserve_setup(char *str) 1679 { 1680 static int reserved; 1681 static struct resource reserve[MAXRESERVE]; 1682 1683 for (;;) { 1684 unsigned int io_start, io_num; 1685 int x = reserved; 1686 struct resource *parent; 1687 1688 if (get_option(&str, &io_start) != 2) 1689 break; 1690 if (get_option(&str, &io_num) == 0) 1691 break; 1692 if (x < MAXRESERVE) { 1693 struct resource *res = reserve + x; 1694 1695 /* 1696 * If the region starts below 0x10000, we assume it's 1697 * I/O port space; otherwise assume it's memory. 1698 */ 1699 if (io_start < 0x10000) { 1700 res->flags = IORESOURCE_IO; 1701 parent = &ioport_resource; 1702 } else { 1703 res->flags = IORESOURCE_MEM; 1704 parent = &iomem_resource; 1705 } 1706 res->name = "reserved"; 1707 res->start = io_start; 1708 res->end = io_start + io_num - 1; 1709 res->flags |= IORESOURCE_BUSY; 1710 res->desc = IORES_DESC_NONE; 1711 res->child = NULL; 1712 if (request_resource(parent, res) == 0) 1713 reserved = x+1; 1714 } 1715 } 1716 return 1; 1717 } 1718 __setup("reserve=", reserve_setup); 1719 1720 /* 1721 * Check if the requested addr and size spans more than any slot in the 1722 * iomem resource tree. 1723 */ 1724 int iomem_map_sanity_check(resource_size_t addr, unsigned long size) 1725 { 1726 resource_size_t end = addr + size - 1; 1727 struct resource *p; 1728 int err = 0; 1729 1730 read_lock(&resource_lock); 1731 for_each_resource(&iomem_resource, p, false) { 1732 /* 1733 * We can probably skip the resources without 1734 * IORESOURCE_IO attribute? 1735 */ 1736 if (p->start > end) 1737 continue; 1738 if (p->end < addr) 1739 continue; 1740 if (PFN_DOWN(p->start) <= PFN_DOWN(addr) && 1741 PFN_DOWN(p->end) >= PFN_DOWN(end)) 1742 continue; 1743 /* 1744 * if a resource is "BUSY", it's not a hardware resource 1745 * but a driver mapping of such a resource; we don't want 1746 * to warn for those; some drivers legitimately map only 1747 * partial hardware resources. (example: vesafb) 1748 */ 1749 if (p->flags & IORESOURCE_BUSY) 1750 continue; 1751 1752 pr_warn("resource sanity check: requesting [mem %pa-%pa], which spans more than %s %pR\n", 1753 &addr, &end, p->name, p); 1754 err = -1; 1755 break; 1756 } 1757 read_unlock(&resource_lock); 1758 1759 return err; 1760 } 1761 1762 #ifdef CONFIG_STRICT_DEVMEM 1763 static int strict_iomem_checks = 1; 1764 #else 1765 static int strict_iomem_checks; 1766 #endif 1767 1768 /* 1769 * Check if an address is exclusive to the kernel and must not be mapped to 1770 * user space, for example, via /dev/mem. 1771 * 1772 * Returns true if exclusive to the kernel, otherwise returns false. 1773 */ 1774 bool resource_is_exclusive(struct resource *root, u64 addr, resource_size_t size) 1775 { 1776 const unsigned int exclusive_system_ram = IORESOURCE_SYSTEM_RAM | 1777 IORESOURCE_EXCLUSIVE; 1778 bool skip_children = false, err = false; 1779 struct resource *p; 1780 1781 read_lock(&resource_lock); 1782 for_each_resource(root, p, skip_children) { 1783 if (p->start >= addr + size) 1784 break; 1785 if (p->end < addr) { 1786 skip_children = true; 1787 continue; 1788 } 1789 skip_children = false; 1790 1791 /* 1792 * IORESOURCE_SYSTEM_RAM resources are exclusive if 1793 * IORESOURCE_EXCLUSIVE is set, even if they 1794 * are not busy and even if "iomem=relaxed" is set. The 1795 * responsible driver dynamically adds/removes system RAM within 1796 * such an area and uncontrolled access is dangerous. 1797 */ 1798 if ((p->flags & exclusive_system_ram) == exclusive_system_ram) { 1799 err = true; 1800 break; 1801 } 1802 1803 /* 1804 * A resource is exclusive if IORESOURCE_EXCLUSIVE is set 1805 * or CONFIG_IO_STRICT_DEVMEM is enabled and the 1806 * resource is busy. 1807 */ 1808 if (!strict_iomem_checks || !(p->flags & IORESOURCE_BUSY)) 1809 continue; 1810 if (IS_ENABLED(CONFIG_IO_STRICT_DEVMEM) 1811 || p->flags & IORESOURCE_EXCLUSIVE) { 1812 err = true; 1813 break; 1814 } 1815 } 1816 read_unlock(&resource_lock); 1817 1818 return err; 1819 } 1820 1821 bool iomem_is_exclusive(u64 addr) 1822 { 1823 return resource_is_exclusive(&iomem_resource, addr & PAGE_MASK, 1824 PAGE_SIZE); 1825 } 1826 1827 struct resource_entry *resource_list_create_entry(struct resource *res, 1828 size_t extra_size) 1829 { 1830 struct resource_entry *entry; 1831 1832 entry = kzalloc(sizeof(*entry) + extra_size, GFP_KERNEL); 1833 if (entry) { 1834 INIT_LIST_HEAD(&entry->node); 1835 entry->res = res ? res : &entry->__res; 1836 } 1837 1838 return entry; 1839 } 1840 EXPORT_SYMBOL(resource_list_create_entry); 1841 1842 void resource_list_free(struct list_head *head) 1843 { 1844 struct resource_entry *entry, *tmp; 1845 1846 list_for_each_entry_safe(entry, tmp, head, node) 1847 resource_list_destroy_entry(entry); 1848 } 1849 EXPORT_SYMBOL(resource_list_free); 1850 1851 #ifdef CONFIG_GET_FREE_REGION 1852 #define GFR_DESCENDING (1UL << 0) 1853 #define GFR_REQUEST_REGION (1UL << 1) 1854 #ifdef PA_SECTION_SHIFT 1855 #define GFR_DEFAULT_ALIGN (1UL << PA_SECTION_SHIFT) 1856 #else 1857 #define GFR_DEFAULT_ALIGN PAGE_SIZE 1858 #endif 1859 1860 static resource_size_t gfr_start(struct resource *base, resource_size_t size, 1861 resource_size_t align, unsigned long flags) 1862 { 1863 if (flags & GFR_DESCENDING) { 1864 resource_size_t end; 1865 1866 end = min_t(resource_size_t, base->end, PHYSMEM_END); 1867 return end - size + 1; 1868 } 1869 1870 return ALIGN(max(base->start, align), align); 1871 } 1872 1873 static bool gfr_continue(struct resource *base, resource_size_t addr, 1874 resource_size_t size, unsigned long flags) 1875 { 1876 if (flags & GFR_DESCENDING) 1877 return addr > size && addr >= base->start; 1878 /* 1879 * In the ascend case be careful that the last increment by 1880 * @size did not wrap 0. 1881 */ 1882 return addr > addr - size && 1883 addr <= min_t(resource_size_t, base->end, PHYSMEM_END); 1884 } 1885 1886 static resource_size_t gfr_next(resource_size_t addr, resource_size_t size, 1887 unsigned long flags) 1888 { 1889 if (flags & GFR_DESCENDING) 1890 return addr - size; 1891 return addr + size; 1892 } 1893 1894 static void remove_free_mem_region(void *_res) 1895 { 1896 struct resource *res = _res; 1897 1898 if (res->parent) 1899 remove_resource(res); 1900 free_resource(res); 1901 } 1902 1903 static struct resource * 1904 get_free_mem_region(struct device *dev, struct resource *base, 1905 resource_size_t size, const unsigned long align, 1906 const char *name, const unsigned long desc, 1907 const unsigned long flags) 1908 { 1909 resource_size_t addr; 1910 struct resource *res; 1911 struct region_devres *dr = NULL; 1912 1913 size = ALIGN(size, align); 1914 1915 res = alloc_resource(GFP_KERNEL); 1916 if (!res) 1917 return ERR_PTR(-ENOMEM); 1918 1919 if (dev && (flags & GFR_REQUEST_REGION)) { 1920 dr = devres_alloc(devm_region_release, 1921 sizeof(struct region_devres), GFP_KERNEL); 1922 if (!dr) { 1923 free_resource(res); 1924 return ERR_PTR(-ENOMEM); 1925 } 1926 } else if (dev) { 1927 if (devm_add_action_or_reset(dev, remove_free_mem_region, res)) 1928 return ERR_PTR(-ENOMEM); 1929 } 1930 1931 write_lock(&resource_lock); 1932 for (addr = gfr_start(base, size, align, flags); 1933 gfr_continue(base, addr, align, flags); 1934 addr = gfr_next(addr, align, flags)) { 1935 if (__region_intersects(base, addr, size, 0, IORES_DESC_NONE) != 1936 REGION_DISJOINT) 1937 continue; 1938 1939 if (flags & GFR_REQUEST_REGION) { 1940 if (__request_region_locked(res, &iomem_resource, addr, 1941 size, name, 0)) 1942 break; 1943 1944 if (dev) { 1945 dr->parent = &iomem_resource; 1946 dr->start = addr; 1947 dr->n = size; 1948 devres_add(dev, dr); 1949 } 1950 1951 res->desc = desc; 1952 write_unlock(&resource_lock); 1953 1954 1955 /* 1956 * A driver is claiming this region so revoke any 1957 * mappings. 1958 */ 1959 revoke_iomem(res); 1960 } else { 1961 res->start = addr; 1962 res->end = addr + size - 1; 1963 res->name = name; 1964 res->desc = desc; 1965 res->flags = IORESOURCE_MEM; 1966 1967 /* 1968 * Only succeed if the resource hosts an exclusive 1969 * range after the insert 1970 */ 1971 if (__insert_resource(base, res) || res->child) 1972 break; 1973 1974 write_unlock(&resource_lock); 1975 } 1976 1977 return res; 1978 } 1979 write_unlock(&resource_lock); 1980 1981 if (flags & GFR_REQUEST_REGION) { 1982 free_resource(res); 1983 devres_free(dr); 1984 } else if (dev) 1985 devm_release_action(dev, remove_free_mem_region, res); 1986 1987 return ERR_PTR(-ERANGE); 1988 } 1989 1990 /** 1991 * devm_request_free_mem_region - find free region for device private memory 1992 * 1993 * @dev: device struct to bind the resource to 1994 * @size: size in bytes of the device memory to add 1995 * @base: resource tree to look in 1996 * 1997 * This function tries to find an empty range of physical address big enough to 1998 * contain the new resource, so that it can later be hotplugged as ZONE_DEVICE 1999 * memory, which in turn allocates struct pages. 2000 */ 2001 struct resource *devm_request_free_mem_region(struct device *dev, 2002 struct resource *base, unsigned long size) 2003 { 2004 unsigned long flags = GFR_DESCENDING | GFR_REQUEST_REGION; 2005 2006 return get_free_mem_region(dev, base, size, GFR_DEFAULT_ALIGN, 2007 dev_name(dev), 2008 IORES_DESC_DEVICE_PRIVATE_MEMORY, flags); 2009 } 2010 EXPORT_SYMBOL_GPL(devm_request_free_mem_region); 2011 2012 struct resource *request_free_mem_region(struct resource *base, 2013 unsigned long size, const char *name) 2014 { 2015 unsigned long flags = GFR_DESCENDING | GFR_REQUEST_REGION; 2016 2017 return get_free_mem_region(NULL, base, size, GFR_DEFAULT_ALIGN, name, 2018 IORES_DESC_DEVICE_PRIVATE_MEMORY, flags); 2019 } 2020 EXPORT_SYMBOL_GPL(request_free_mem_region); 2021 2022 /** 2023 * alloc_free_mem_region - find a free region relative to @base 2024 * @base: resource that will parent the new resource 2025 * @size: size in bytes of memory to allocate from @base 2026 * @align: alignment requirements for the allocation 2027 * @name: resource name 2028 * 2029 * Buses like CXL, that can dynamically instantiate new memory regions, 2030 * need a method to allocate physical address space for those regions. 2031 * Allocate and insert a new resource to cover a free, unclaimed by a 2032 * descendant of @base, range in the span of @base. 2033 */ 2034 struct resource *alloc_free_mem_region(struct resource *base, 2035 unsigned long size, unsigned long align, 2036 const char *name) 2037 { 2038 /* Default of ascending direction and insert resource */ 2039 unsigned long flags = 0; 2040 2041 return get_free_mem_region(NULL, base, size, align, name, 2042 IORES_DESC_NONE, flags); 2043 } 2044 EXPORT_SYMBOL_GPL(alloc_free_mem_region); 2045 #endif /* CONFIG_GET_FREE_REGION */ 2046 2047 static int __init strict_iomem(char *str) 2048 { 2049 if (strstr(str, "relaxed")) 2050 strict_iomem_checks = 0; 2051 if (strstr(str, "strict")) 2052 strict_iomem_checks = 1; 2053 return 1; 2054 } 2055 2056 static int iomem_fs_init_fs_context(struct fs_context *fc) 2057 { 2058 return init_pseudo(fc, DEVMEM_MAGIC) ? 0 : -ENOMEM; 2059 } 2060 2061 static struct file_system_type iomem_fs_type = { 2062 .name = "iomem", 2063 .owner = THIS_MODULE, 2064 .init_fs_context = iomem_fs_init_fs_context, 2065 .kill_sb = kill_anon_super, 2066 }; 2067 2068 static int __init iomem_init_inode(void) 2069 { 2070 static struct vfsmount *iomem_vfs_mount; 2071 static int iomem_fs_cnt; 2072 struct inode *inode; 2073 int rc; 2074 2075 rc = simple_pin_fs(&iomem_fs_type, &iomem_vfs_mount, &iomem_fs_cnt); 2076 if (rc < 0) { 2077 pr_err("Cannot mount iomem pseudo filesystem: %d\n", rc); 2078 return rc; 2079 } 2080 2081 inode = alloc_anon_inode(iomem_vfs_mount->mnt_sb); 2082 if (IS_ERR(inode)) { 2083 rc = PTR_ERR(inode); 2084 pr_err("Cannot allocate inode for iomem: %d\n", rc); 2085 simple_release_fs(&iomem_vfs_mount, &iomem_fs_cnt); 2086 return rc; 2087 } 2088 2089 /* 2090 * Publish iomem revocation inode initialized. 2091 * Pairs with smp_load_acquire() in revoke_iomem(). 2092 */ 2093 smp_store_release(&iomem_inode, inode); 2094 2095 return 0; 2096 } 2097 2098 fs_initcall(iomem_init_inode); 2099 2100 __setup("iomem=", strict_iomem); 2101