1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/kernel/resource.c 4 * 5 * Copyright (C) 1999 Linus Torvalds 6 * Copyright (C) 1999 Martin Mares <mj@ucw.cz> 7 * 8 * Arbitrary resource management. 9 */ 10 11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 12 13 #include <linux/export.h> 14 #include <linux/errno.h> 15 #include <linux/ioport.h> 16 #include <linux/init.h> 17 #include <linux/slab.h> 18 #include <linux/spinlock.h> 19 #include <linux/fs.h> 20 #include <linux/proc_fs.h> 21 #include <linux/pseudo_fs.h> 22 #include <linux/sched.h> 23 #include <linux/seq_file.h> 24 #include <linux/device.h> 25 #include <linux/pfn.h> 26 #include <linux/mm.h> 27 #include <linux/mount.h> 28 #include <linux/resource_ext.h> 29 #include <uapi/linux/magic.h> 30 #include <linux/string.h> 31 #include <linux/vmalloc.h> 32 #include <asm/io.h> 33 34 35 struct resource ioport_resource = { 36 .name = "PCI IO", 37 .start = 0, 38 .end = IO_SPACE_LIMIT, 39 .flags = IORESOURCE_IO, 40 }; 41 EXPORT_SYMBOL(ioport_resource); 42 43 struct resource iomem_resource = { 44 .name = "PCI mem", 45 .start = 0, 46 .end = -1, 47 .flags = IORESOURCE_MEM, 48 }; 49 EXPORT_SYMBOL(iomem_resource); 50 51 static DEFINE_RWLOCK(resource_lock); 52 53 static struct resource *next_resource(struct resource *p, bool skip_children) 54 { 55 if (!skip_children && p->child) 56 return p->child; 57 while (!p->sibling && p->parent) 58 p = p->parent; 59 return p->sibling; 60 } 61 62 #define for_each_resource(_root, _p, _skip_children) \ 63 for ((_p) = (_root)->child; (_p); (_p) = next_resource(_p, _skip_children)) 64 65 #ifdef CONFIG_PROC_FS 66 67 enum { MAX_IORES_LEVEL = 5 }; 68 69 static void *r_start(struct seq_file *m, loff_t *pos) 70 __acquires(resource_lock) 71 { 72 struct resource *root = pde_data(file_inode(m->file)); 73 struct resource *p; 74 loff_t l = *pos; 75 76 read_lock(&resource_lock); 77 for_each_resource(root, p, false) { 78 if (l-- == 0) 79 break; 80 } 81 82 return p; 83 } 84 85 static void *r_next(struct seq_file *m, void *v, loff_t *pos) 86 { 87 struct resource *p = v; 88 89 (*pos)++; 90 91 return (void *)next_resource(p, false); 92 } 93 94 static void r_stop(struct seq_file *m, void *v) 95 __releases(resource_lock) 96 { 97 read_unlock(&resource_lock); 98 } 99 100 static int r_show(struct seq_file *m, void *v) 101 { 102 struct resource *root = pde_data(file_inode(m->file)); 103 struct resource *r = v, *p; 104 unsigned long long start, end; 105 int width = root->end < 0x10000 ? 4 : 8; 106 int depth; 107 108 for (depth = 0, p = r; depth < MAX_IORES_LEVEL; depth++, p = p->parent) 109 if (p->parent == root) 110 break; 111 112 if (file_ns_capable(m->file, &init_user_ns, CAP_SYS_ADMIN)) { 113 start = r->start; 114 end = r->end; 115 } else { 116 start = end = 0; 117 } 118 119 seq_printf(m, "%*s%0*llx-%0*llx : %s\n", 120 depth * 2, "", 121 width, start, 122 width, end, 123 r->name ? r->name : "<BAD>"); 124 return 0; 125 } 126 127 static const struct seq_operations resource_op = { 128 .start = r_start, 129 .next = r_next, 130 .stop = r_stop, 131 .show = r_show, 132 }; 133 134 static int __init ioresources_init(void) 135 { 136 proc_create_seq_data("ioports", 0, NULL, &resource_op, 137 &ioport_resource); 138 proc_create_seq_data("iomem", 0, NULL, &resource_op, &iomem_resource); 139 return 0; 140 } 141 __initcall(ioresources_init); 142 143 #endif /* CONFIG_PROC_FS */ 144 145 static void free_resource(struct resource *res) 146 { 147 /** 148 * If the resource was allocated using memblock early during boot 149 * we'll leak it here: we can only return full pages back to the 150 * buddy and trying to be smart and reusing them eventually in 151 * alloc_resource() overcomplicates resource handling. 152 */ 153 if (res && PageSlab(virt_to_head_page(res))) 154 kfree(res); 155 } 156 157 static struct resource *alloc_resource(gfp_t flags) 158 { 159 return kzalloc(sizeof(struct resource), flags); 160 } 161 162 /* Return the conflict entry if you can't request it */ 163 static struct resource * __request_resource(struct resource *root, struct resource *new) 164 { 165 resource_size_t start = new->start; 166 resource_size_t end = new->end; 167 struct resource *tmp, **p; 168 169 if (end < start) 170 return root; 171 if (start < root->start) 172 return root; 173 if (end > root->end) 174 return root; 175 p = &root->child; 176 for (;;) { 177 tmp = *p; 178 if (!tmp || tmp->start > end) { 179 new->sibling = tmp; 180 *p = new; 181 new->parent = root; 182 return NULL; 183 } 184 p = &tmp->sibling; 185 if (tmp->end < start) 186 continue; 187 return tmp; 188 } 189 } 190 191 static int __release_resource(struct resource *old, bool release_child) 192 { 193 struct resource *tmp, **p, *chd; 194 195 p = &old->parent->child; 196 for (;;) { 197 tmp = *p; 198 if (!tmp) 199 break; 200 if (tmp == old) { 201 if (release_child || !(tmp->child)) { 202 *p = tmp->sibling; 203 } else { 204 for (chd = tmp->child;; chd = chd->sibling) { 205 chd->parent = tmp->parent; 206 if (!(chd->sibling)) 207 break; 208 } 209 *p = tmp->child; 210 chd->sibling = tmp->sibling; 211 } 212 old->parent = NULL; 213 return 0; 214 } 215 p = &tmp->sibling; 216 } 217 return -EINVAL; 218 } 219 220 static void __release_child_resources(struct resource *r) 221 { 222 struct resource *tmp, *p; 223 resource_size_t size; 224 225 p = r->child; 226 r->child = NULL; 227 while (p) { 228 tmp = p; 229 p = p->sibling; 230 231 tmp->parent = NULL; 232 tmp->sibling = NULL; 233 __release_child_resources(tmp); 234 235 printk(KERN_DEBUG "release child resource %pR\n", tmp); 236 /* need to restore size, and keep flags */ 237 size = resource_size(tmp); 238 tmp->start = 0; 239 tmp->end = size - 1; 240 } 241 } 242 243 void release_child_resources(struct resource *r) 244 { 245 write_lock(&resource_lock); 246 __release_child_resources(r); 247 write_unlock(&resource_lock); 248 } 249 250 /** 251 * request_resource_conflict - request and reserve an I/O or memory resource 252 * @root: root resource descriptor 253 * @new: resource descriptor desired by caller 254 * 255 * Returns 0 for success, conflict resource on error. 256 */ 257 struct resource *request_resource_conflict(struct resource *root, struct resource *new) 258 { 259 struct resource *conflict; 260 261 write_lock(&resource_lock); 262 conflict = __request_resource(root, new); 263 write_unlock(&resource_lock); 264 return conflict; 265 } 266 267 /** 268 * request_resource - request and reserve an I/O or memory resource 269 * @root: root resource descriptor 270 * @new: resource descriptor desired by caller 271 * 272 * Returns 0 for success, negative error code on error. 273 */ 274 int request_resource(struct resource *root, struct resource *new) 275 { 276 struct resource *conflict; 277 278 conflict = request_resource_conflict(root, new); 279 return conflict ? -EBUSY : 0; 280 } 281 282 EXPORT_SYMBOL(request_resource); 283 284 /** 285 * release_resource - release a previously reserved resource 286 * @old: resource pointer 287 */ 288 int release_resource(struct resource *old) 289 { 290 int retval; 291 292 write_lock(&resource_lock); 293 retval = __release_resource(old, true); 294 write_unlock(&resource_lock); 295 return retval; 296 } 297 298 EXPORT_SYMBOL(release_resource); 299 300 /** 301 * find_next_iomem_res - Finds the lowest iomem resource that covers part of 302 * [@start..@end]. 303 * 304 * If a resource is found, returns 0 and @*res is overwritten with the part 305 * of the resource that's within [@start..@end]; if none is found, returns 306 * -ENODEV. Returns -EINVAL for invalid parameters. 307 * 308 * @start: start address of the resource searched for 309 * @end: end address of same resource 310 * @flags: flags which the resource must have 311 * @desc: descriptor the resource must have 312 * @res: return ptr, if resource found 313 * 314 * The caller must specify @start, @end, @flags, and @desc 315 * (which may be IORES_DESC_NONE). 316 */ 317 static int find_next_iomem_res(resource_size_t start, resource_size_t end, 318 unsigned long flags, unsigned long desc, 319 struct resource *res) 320 { 321 struct resource *p; 322 323 if (!res) 324 return -EINVAL; 325 326 if (start >= end) 327 return -EINVAL; 328 329 read_lock(&resource_lock); 330 331 for_each_resource(&iomem_resource, p, false) { 332 /* If we passed the resource we are looking for, stop */ 333 if (p->start > end) { 334 p = NULL; 335 break; 336 } 337 338 /* Skip until we find a range that matches what we look for */ 339 if (p->end < start) 340 continue; 341 342 if ((p->flags & flags) != flags) 343 continue; 344 if ((desc != IORES_DESC_NONE) && (desc != p->desc)) 345 continue; 346 347 /* Found a match, break */ 348 break; 349 } 350 351 if (p) { 352 /* copy data */ 353 *res = (struct resource) { 354 .start = max(start, p->start), 355 .end = min(end, p->end), 356 .flags = p->flags, 357 .desc = p->desc, 358 .parent = p->parent, 359 }; 360 } 361 362 read_unlock(&resource_lock); 363 return p ? 0 : -ENODEV; 364 } 365 366 static int __walk_iomem_res_desc(resource_size_t start, resource_size_t end, 367 unsigned long flags, unsigned long desc, 368 void *arg, 369 int (*func)(struct resource *, void *)) 370 { 371 struct resource res; 372 int ret = -EINVAL; 373 374 while (start < end && 375 !find_next_iomem_res(start, end, flags, desc, &res)) { 376 ret = (*func)(&res, arg); 377 if (ret) 378 break; 379 380 start = res.end + 1; 381 } 382 383 return ret; 384 } 385 386 /** 387 * walk_iomem_res_desc - Walks through iomem resources and calls func() 388 * with matching resource ranges. 389 * * 390 * @desc: I/O resource descriptor. Use IORES_DESC_NONE to skip @desc check. 391 * @flags: I/O resource flags 392 * @start: start addr 393 * @end: end addr 394 * @arg: function argument for the callback @func 395 * @func: callback function that is called for each qualifying resource area 396 * 397 * All the memory ranges which overlap start,end and also match flags and 398 * desc are valid candidates. 399 * 400 * NOTE: For a new descriptor search, define a new IORES_DESC in 401 * <linux/ioport.h> and set it in 'desc' of a target resource entry. 402 */ 403 int walk_iomem_res_desc(unsigned long desc, unsigned long flags, u64 start, 404 u64 end, void *arg, int (*func)(struct resource *, void *)) 405 { 406 return __walk_iomem_res_desc(start, end, flags, desc, arg, func); 407 } 408 EXPORT_SYMBOL_GPL(walk_iomem_res_desc); 409 410 /* 411 * This function calls the @func callback against all memory ranges of type 412 * System RAM which are marked as IORESOURCE_SYSTEM_RAM and IORESOUCE_BUSY. 413 * Now, this function is only for System RAM, it deals with full ranges and 414 * not PFNs. If resources are not PFN-aligned, dealing with PFNs can truncate 415 * ranges. 416 */ 417 int walk_system_ram_res(u64 start, u64 end, void *arg, 418 int (*func)(struct resource *, void *)) 419 { 420 unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; 421 422 return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, arg, 423 func); 424 } 425 426 /* 427 * This function, being a variant of walk_system_ram_res(), calls the @func 428 * callback against all memory ranges of type System RAM which are marked as 429 * IORESOURCE_SYSTEM_RAM and IORESOUCE_BUSY in reversed order, i.e., from 430 * higher to lower. 431 */ 432 int walk_system_ram_res_rev(u64 start, u64 end, void *arg, 433 int (*func)(struct resource *, void *)) 434 { 435 struct resource res, *rams; 436 int rams_size = 16, i; 437 unsigned long flags; 438 int ret = -1; 439 440 /* create a list */ 441 rams = kvcalloc(rams_size, sizeof(struct resource), GFP_KERNEL); 442 if (!rams) 443 return ret; 444 445 flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; 446 i = 0; 447 while ((start < end) && 448 (!find_next_iomem_res(start, end, flags, IORES_DESC_NONE, &res))) { 449 if (i >= rams_size) { 450 /* re-alloc */ 451 struct resource *rams_new; 452 453 rams_new = kvrealloc(rams, (rams_size + 16) * sizeof(struct resource), 454 GFP_KERNEL); 455 if (!rams_new) 456 goto out; 457 458 rams = rams_new; 459 rams_size += 16; 460 } 461 462 rams[i].start = res.start; 463 rams[i++].end = res.end; 464 465 start = res.end + 1; 466 } 467 468 /* go reverse */ 469 for (i--; i >= 0; i--) { 470 ret = (*func)(&rams[i], arg); 471 if (ret) 472 break; 473 } 474 475 out: 476 kvfree(rams); 477 return ret; 478 } 479 480 /* 481 * This function calls the @func callback against all memory ranges, which 482 * are ranges marked as IORESOURCE_MEM and IORESOUCE_BUSY. 483 */ 484 int walk_mem_res(u64 start, u64 end, void *arg, 485 int (*func)(struct resource *, void *)) 486 { 487 unsigned long flags = IORESOURCE_MEM | IORESOURCE_BUSY; 488 489 return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, arg, 490 func); 491 } 492 493 /* 494 * This function calls the @func callback against all memory ranges of type 495 * System RAM which are marked as IORESOURCE_SYSTEM_RAM and IORESOUCE_BUSY. 496 * It is to be used only for System RAM. 497 */ 498 int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, 499 void *arg, int (*func)(unsigned long, unsigned long, void *)) 500 { 501 resource_size_t start, end; 502 unsigned long flags; 503 struct resource res; 504 unsigned long pfn, end_pfn; 505 int ret = -EINVAL; 506 507 start = (u64) start_pfn << PAGE_SHIFT; 508 end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1; 509 flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; 510 while (start < end && 511 !find_next_iomem_res(start, end, flags, IORES_DESC_NONE, &res)) { 512 pfn = PFN_UP(res.start); 513 end_pfn = PFN_DOWN(res.end + 1); 514 if (end_pfn > pfn) 515 ret = (*func)(pfn, end_pfn - pfn, arg); 516 if (ret) 517 break; 518 start = res.end + 1; 519 } 520 return ret; 521 } 522 523 static int __is_ram(unsigned long pfn, unsigned long nr_pages, void *arg) 524 { 525 return 1; 526 } 527 528 /* 529 * This generic page_is_ram() returns true if specified address is 530 * registered as System RAM in iomem_resource list. 531 */ 532 int __weak page_is_ram(unsigned long pfn) 533 { 534 return walk_system_ram_range(pfn, 1, NULL, __is_ram) == 1; 535 } 536 EXPORT_SYMBOL_GPL(page_is_ram); 537 538 static int __region_intersects(struct resource *parent, resource_size_t start, 539 size_t size, unsigned long flags, 540 unsigned long desc) 541 { 542 resource_size_t ostart, oend; 543 int type = 0; int other = 0; 544 struct resource *p, *dp; 545 bool is_type, covered; 546 struct resource res; 547 548 res.start = start; 549 res.end = start + size - 1; 550 551 for (p = parent->child; p ; p = p->sibling) { 552 if (!resource_overlaps(p, &res)) 553 continue; 554 is_type = (p->flags & flags) == flags && 555 (desc == IORES_DESC_NONE || desc == p->desc); 556 if (is_type) { 557 type++; 558 continue; 559 } 560 /* 561 * Continue to search in descendant resources as if the 562 * matched descendant resources cover some ranges of 'p'. 563 * 564 * |------------- "CXL Window 0" ------------| 565 * |-- "System RAM" --| 566 * 567 * will behave similar as the following fake resource 568 * tree when searching "System RAM". 569 * 570 * |-- "System RAM" --||-- "CXL Window 0a" --| 571 */ 572 covered = false; 573 ostart = max(res.start, p->start); 574 oend = min(res.end, p->end); 575 for_each_resource(p, dp, false) { 576 if (!resource_overlaps(dp, &res)) 577 continue; 578 is_type = (dp->flags & flags) == flags && 579 (desc == IORES_DESC_NONE || desc == dp->desc); 580 if (is_type) { 581 type++; 582 /* 583 * Range from 'ostart' to 'dp->start' 584 * isn't covered by matched resource. 585 */ 586 if (dp->start > ostart) 587 break; 588 if (dp->end >= oend) { 589 covered = true; 590 break; 591 } 592 /* Remove covered range */ 593 ostart = max(ostart, dp->end + 1); 594 } 595 } 596 if (!covered) 597 other++; 598 } 599 600 if (type == 0) 601 return REGION_DISJOINT; 602 603 if (other == 0) 604 return REGION_INTERSECTS; 605 606 return REGION_MIXED; 607 } 608 609 /** 610 * region_intersects() - determine intersection of region with known resources 611 * @start: region start address 612 * @size: size of region 613 * @flags: flags of resource (in iomem_resource) 614 * @desc: descriptor of resource (in iomem_resource) or IORES_DESC_NONE 615 * 616 * Check if the specified region partially overlaps or fully eclipses a 617 * resource identified by @flags and @desc (optional with IORES_DESC_NONE). 618 * Return REGION_DISJOINT if the region does not overlap @flags/@desc, 619 * return REGION_MIXED if the region overlaps @flags/@desc and another 620 * resource, and return REGION_INTERSECTS if the region overlaps @flags/@desc 621 * and no other defined resource. Note that REGION_INTERSECTS is also 622 * returned in the case when the specified region overlaps RAM and undefined 623 * memory holes. 624 * 625 * region_intersect() is used by memory remapping functions to ensure 626 * the user is not remapping RAM and is a vast speed up over walking 627 * through the resource table page by page. 628 */ 629 int region_intersects(resource_size_t start, size_t size, unsigned long flags, 630 unsigned long desc) 631 { 632 int ret; 633 634 read_lock(&resource_lock); 635 ret = __region_intersects(&iomem_resource, start, size, flags, desc); 636 read_unlock(&resource_lock); 637 638 return ret; 639 } 640 EXPORT_SYMBOL_GPL(region_intersects); 641 642 void __weak arch_remove_reservations(struct resource *avail) 643 { 644 } 645 646 static void resource_clip(struct resource *res, resource_size_t min, 647 resource_size_t max) 648 { 649 if (res->start < min) 650 res->start = min; 651 if (res->end > max) 652 res->end = max; 653 } 654 655 /* 656 * Find empty space in the resource tree with the given range and 657 * alignment constraints 658 */ 659 static int __find_resource_space(struct resource *root, struct resource *old, 660 struct resource *new, resource_size_t size, 661 struct resource_constraint *constraint) 662 { 663 struct resource *this = root->child; 664 struct resource tmp = *new, avail, alloc; 665 resource_alignf alignf = constraint->alignf; 666 667 tmp.start = root->start; 668 /* 669 * Skip past an allocated resource that starts at 0, since the assignment 670 * of this->start - 1 to tmp->end below would cause an underflow. 671 */ 672 if (this && this->start == root->start) { 673 tmp.start = (this == old) ? old->start : this->end + 1; 674 this = this->sibling; 675 } 676 for(;;) { 677 if (this) 678 tmp.end = (this == old) ? this->end : this->start - 1; 679 else 680 tmp.end = root->end; 681 682 if (tmp.end < tmp.start) 683 goto next; 684 685 resource_clip(&tmp, constraint->min, constraint->max); 686 arch_remove_reservations(&tmp); 687 688 /* Check for overflow after ALIGN() */ 689 avail.start = ALIGN(tmp.start, constraint->align); 690 avail.end = tmp.end; 691 avail.flags = new->flags & ~IORESOURCE_UNSET; 692 if (avail.start >= tmp.start) { 693 alloc.flags = avail.flags; 694 if (alignf) { 695 alloc.start = alignf(constraint->alignf_data, 696 &avail, size, constraint->align); 697 } else { 698 alloc.start = avail.start; 699 } 700 alloc.end = alloc.start + size - 1; 701 if (alloc.start <= alloc.end && 702 resource_contains(&avail, &alloc)) { 703 new->start = alloc.start; 704 new->end = alloc.end; 705 return 0; 706 } 707 } 708 709 next: if (!this || this->end == root->end) 710 break; 711 712 if (this != old) 713 tmp.start = this->end + 1; 714 this = this->sibling; 715 } 716 return -EBUSY; 717 } 718 719 /** 720 * find_resource_space - Find empty space in the resource tree 721 * @root: Root resource descriptor 722 * @new: Resource descriptor awaiting an empty resource space 723 * @size: The minimum size of the empty space 724 * @constraint: The range and alignment constraints to be met 725 * 726 * Finds an empty space under @root in the resource tree satisfying range and 727 * alignment @constraints. 728 * 729 * Return: 730 * * %0 - if successful, @new members start, end, and flags are altered. 731 * * %-EBUSY - if no empty space was found. 732 */ 733 int find_resource_space(struct resource *root, struct resource *new, 734 resource_size_t size, 735 struct resource_constraint *constraint) 736 { 737 return __find_resource_space(root, NULL, new, size, constraint); 738 } 739 EXPORT_SYMBOL_GPL(find_resource_space); 740 741 /** 742 * reallocate_resource - allocate a slot in the resource tree given range & alignment. 743 * The resource will be relocated if the new size cannot be reallocated in the 744 * current location. 745 * 746 * @root: root resource descriptor 747 * @old: resource descriptor desired by caller 748 * @newsize: new size of the resource descriptor 749 * @constraint: the size and alignment constraints to be met. 750 */ 751 static int reallocate_resource(struct resource *root, struct resource *old, 752 resource_size_t newsize, 753 struct resource_constraint *constraint) 754 { 755 int err=0; 756 struct resource new = *old; 757 struct resource *conflict; 758 759 write_lock(&resource_lock); 760 761 if ((err = __find_resource_space(root, old, &new, newsize, constraint))) 762 goto out; 763 764 if (resource_contains(&new, old)) { 765 old->start = new.start; 766 old->end = new.end; 767 goto out; 768 } 769 770 if (old->child) { 771 err = -EBUSY; 772 goto out; 773 } 774 775 if (resource_contains(old, &new)) { 776 old->start = new.start; 777 old->end = new.end; 778 } else { 779 __release_resource(old, true); 780 *old = new; 781 conflict = __request_resource(root, old); 782 BUG_ON(conflict); 783 } 784 out: 785 write_unlock(&resource_lock); 786 return err; 787 } 788 789 790 /** 791 * allocate_resource - allocate empty slot in the resource tree given range & alignment. 792 * The resource will be reallocated with a new size if it was already allocated 793 * @root: root resource descriptor 794 * @new: resource descriptor desired by caller 795 * @size: requested resource region size 796 * @min: minimum boundary to allocate 797 * @max: maximum boundary to allocate 798 * @align: alignment requested, in bytes 799 * @alignf: alignment function, optional, called if not NULL 800 * @alignf_data: arbitrary data to pass to the @alignf function 801 */ 802 int allocate_resource(struct resource *root, struct resource *new, 803 resource_size_t size, resource_size_t min, 804 resource_size_t max, resource_size_t align, 805 resource_alignf alignf, 806 void *alignf_data) 807 { 808 int err; 809 struct resource_constraint constraint; 810 811 constraint.min = min; 812 constraint.max = max; 813 constraint.align = align; 814 constraint.alignf = alignf; 815 constraint.alignf_data = alignf_data; 816 817 if ( new->parent ) { 818 /* resource is already allocated, try reallocating with 819 the new constraints */ 820 return reallocate_resource(root, new, size, &constraint); 821 } 822 823 write_lock(&resource_lock); 824 err = find_resource_space(root, new, size, &constraint); 825 if (err >= 0 && __request_resource(root, new)) 826 err = -EBUSY; 827 write_unlock(&resource_lock); 828 return err; 829 } 830 831 EXPORT_SYMBOL(allocate_resource); 832 833 /** 834 * lookup_resource - find an existing resource by a resource start address 835 * @root: root resource descriptor 836 * @start: resource start address 837 * 838 * Returns a pointer to the resource if found, NULL otherwise 839 */ 840 struct resource *lookup_resource(struct resource *root, resource_size_t start) 841 { 842 struct resource *res; 843 844 read_lock(&resource_lock); 845 for (res = root->child; res; res = res->sibling) { 846 if (res->start == start) 847 break; 848 } 849 read_unlock(&resource_lock); 850 851 return res; 852 } 853 854 /* 855 * Insert a resource into the resource tree. If successful, return NULL, 856 * otherwise return the conflicting resource (compare to __request_resource()) 857 */ 858 static struct resource * __insert_resource(struct resource *parent, struct resource *new) 859 { 860 struct resource *first, *next; 861 862 for (;; parent = first) { 863 first = __request_resource(parent, new); 864 if (!first) 865 return first; 866 867 if (first == parent) 868 return first; 869 if (WARN_ON(first == new)) /* duplicated insertion */ 870 return first; 871 872 if ((first->start > new->start) || (first->end < new->end)) 873 break; 874 if ((first->start == new->start) && (first->end == new->end)) 875 break; 876 } 877 878 for (next = first; ; next = next->sibling) { 879 /* Partial overlap? Bad, and unfixable */ 880 if (next->start < new->start || next->end > new->end) 881 return next; 882 if (!next->sibling) 883 break; 884 if (next->sibling->start > new->end) 885 break; 886 } 887 888 new->parent = parent; 889 new->sibling = next->sibling; 890 new->child = first; 891 892 next->sibling = NULL; 893 for (next = first; next; next = next->sibling) 894 next->parent = new; 895 896 if (parent->child == first) { 897 parent->child = new; 898 } else { 899 next = parent->child; 900 while (next->sibling != first) 901 next = next->sibling; 902 next->sibling = new; 903 } 904 return NULL; 905 } 906 907 /** 908 * insert_resource_conflict - Inserts resource in the resource tree 909 * @parent: parent of the new resource 910 * @new: new resource to insert 911 * 912 * Returns 0 on success, conflict resource if the resource can't be inserted. 913 * 914 * This function is equivalent to request_resource_conflict when no conflict 915 * happens. If a conflict happens, and the conflicting resources 916 * entirely fit within the range of the new resource, then the new 917 * resource is inserted and the conflicting resources become children of 918 * the new resource. 919 * 920 * This function is intended for producers of resources, such as FW modules 921 * and bus drivers. 922 */ 923 struct resource *insert_resource_conflict(struct resource *parent, struct resource *new) 924 { 925 struct resource *conflict; 926 927 write_lock(&resource_lock); 928 conflict = __insert_resource(parent, new); 929 write_unlock(&resource_lock); 930 return conflict; 931 } 932 933 /** 934 * insert_resource - Inserts a resource in the resource tree 935 * @parent: parent of the new resource 936 * @new: new resource to insert 937 * 938 * Returns 0 on success, -EBUSY if the resource can't be inserted. 939 * 940 * This function is intended for producers of resources, such as FW modules 941 * and bus drivers. 942 */ 943 int insert_resource(struct resource *parent, struct resource *new) 944 { 945 struct resource *conflict; 946 947 conflict = insert_resource_conflict(parent, new); 948 return conflict ? -EBUSY : 0; 949 } 950 EXPORT_SYMBOL_GPL(insert_resource); 951 952 /** 953 * insert_resource_expand_to_fit - Insert a resource into the resource tree 954 * @root: root resource descriptor 955 * @new: new resource to insert 956 * 957 * Insert a resource into the resource tree, possibly expanding it in order 958 * to make it encompass any conflicting resources. 959 */ 960 void insert_resource_expand_to_fit(struct resource *root, struct resource *new) 961 { 962 if (new->parent) 963 return; 964 965 write_lock(&resource_lock); 966 for (;;) { 967 struct resource *conflict; 968 969 conflict = __insert_resource(root, new); 970 if (!conflict) 971 break; 972 if (conflict == root) 973 break; 974 975 /* Ok, expand resource to cover the conflict, then try again .. */ 976 if (conflict->start < new->start) 977 new->start = conflict->start; 978 if (conflict->end > new->end) 979 new->end = conflict->end; 980 981 pr_info("Expanded resource %s due to conflict with %s\n", new->name, conflict->name); 982 } 983 write_unlock(&resource_lock); 984 } 985 /* 986 * Not for general consumption, only early boot memory map parsing, PCI 987 * resource discovery, and late discovery of CXL resources are expected 988 * to use this interface. The former are built-in and only the latter, 989 * CXL, is a module. 990 */ 991 EXPORT_SYMBOL_NS_GPL(insert_resource_expand_to_fit, CXL); 992 993 /** 994 * remove_resource - Remove a resource in the resource tree 995 * @old: resource to remove 996 * 997 * Returns 0 on success, -EINVAL if the resource is not valid. 998 * 999 * This function removes a resource previously inserted by insert_resource() 1000 * or insert_resource_conflict(), and moves the children (if any) up to 1001 * where they were before. insert_resource() and insert_resource_conflict() 1002 * insert a new resource, and move any conflicting resources down to the 1003 * children of the new resource. 1004 * 1005 * insert_resource(), insert_resource_conflict() and remove_resource() are 1006 * intended for producers of resources, such as FW modules and bus drivers. 1007 */ 1008 int remove_resource(struct resource *old) 1009 { 1010 int retval; 1011 1012 write_lock(&resource_lock); 1013 retval = __release_resource(old, false); 1014 write_unlock(&resource_lock); 1015 return retval; 1016 } 1017 EXPORT_SYMBOL_GPL(remove_resource); 1018 1019 static int __adjust_resource(struct resource *res, resource_size_t start, 1020 resource_size_t size) 1021 { 1022 struct resource *tmp, *parent = res->parent; 1023 resource_size_t end = start + size - 1; 1024 int result = -EBUSY; 1025 1026 if (!parent) 1027 goto skip; 1028 1029 if ((start < parent->start) || (end > parent->end)) 1030 goto out; 1031 1032 if (res->sibling && (res->sibling->start <= end)) 1033 goto out; 1034 1035 tmp = parent->child; 1036 if (tmp != res) { 1037 while (tmp->sibling != res) 1038 tmp = tmp->sibling; 1039 if (start <= tmp->end) 1040 goto out; 1041 } 1042 1043 skip: 1044 for (tmp = res->child; tmp; tmp = tmp->sibling) 1045 if ((tmp->start < start) || (tmp->end > end)) 1046 goto out; 1047 1048 res->start = start; 1049 res->end = end; 1050 result = 0; 1051 1052 out: 1053 return result; 1054 } 1055 1056 /** 1057 * adjust_resource - modify a resource's start and size 1058 * @res: resource to modify 1059 * @start: new start value 1060 * @size: new size 1061 * 1062 * Given an existing resource, change its start and size to match the 1063 * arguments. Returns 0 on success, -EBUSY if it can't fit. 1064 * Existing children of the resource are assumed to be immutable. 1065 */ 1066 int adjust_resource(struct resource *res, resource_size_t start, 1067 resource_size_t size) 1068 { 1069 int result; 1070 1071 write_lock(&resource_lock); 1072 result = __adjust_resource(res, start, size); 1073 write_unlock(&resource_lock); 1074 return result; 1075 } 1076 EXPORT_SYMBOL(adjust_resource); 1077 1078 static void __init 1079 __reserve_region_with_split(struct resource *root, resource_size_t start, 1080 resource_size_t end, const char *name) 1081 { 1082 struct resource *parent = root; 1083 struct resource *conflict; 1084 struct resource *res = alloc_resource(GFP_ATOMIC); 1085 struct resource *next_res = NULL; 1086 int type = resource_type(root); 1087 1088 if (!res) 1089 return; 1090 1091 res->name = name; 1092 res->start = start; 1093 res->end = end; 1094 res->flags = type | IORESOURCE_BUSY; 1095 res->desc = IORES_DESC_NONE; 1096 1097 while (1) { 1098 1099 conflict = __request_resource(parent, res); 1100 if (!conflict) { 1101 if (!next_res) 1102 break; 1103 res = next_res; 1104 next_res = NULL; 1105 continue; 1106 } 1107 1108 /* conflict covered whole area */ 1109 if (conflict->start <= res->start && 1110 conflict->end >= res->end) { 1111 free_resource(res); 1112 WARN_ON(next_res); 1113 break; 1114 } 1115 1116 /* failed, split and try again */ 1117 if (conflict->start > res->start) { 1118 end = res->end; 1119 res->end = conflict->start - 1; 1120 if (conflict->end < end) { 1121 next_res = alloc_resource(GFP_ATOMIC); 1122 if (!next_res) { 1123 free_resource(res); 1124 break; 1125 } 1126 next_res->name = name; 1127 next_res->start = conflict->end + 1; 1128 next_res->end = end; 1129 next_res->flags = type | IORESOURCE_BUSY; 1130 next_res->desc = IORES_DESC_NONE; 1131 } 1132 } else { 1133 res->start = conflict->end + 1; 1134 } 1135 } 1136 1137 } 1138 1139 void __init 1140 reserve_region_with_split(struct resource *root, resource_size_t start, 1141 resource_size_t end, const char *name) 1142 { 1143 int abort = 0; 1144 1145 write_lock(&resource_lock); 1146 if (root->start > start || root->end < end) { 1147 pr_err("requested range [0x%llx-0x%llx] not in root %pr\n", 1148 (unsigned long long)start, (unsigned long long)end, 1149 root); 1150 if (start > root->end || end < root->start) 1151 abort = 1; 1152 else { 1153 if (end > root->end) 1154 end = root->end; 1155 if (start < root->start) 1156 start = root->start; 1157 pr_err("fixing request to [0x%llx-0x%llx]\n", 1158 (unsigned long long)start, 1159 (unsigned long long)end); 1160 } 1161 dump_stack(); 1162 } 1163 if (!abort) 1164 __reserve_region_with_split(root, start, end, name); 1165 write_unlock(&resource_lock); 1166 } 1167 1168 /** 1169 * resource_alignment - calculate resource's alignment 1170 * @res: resource pointer 1171 * 1172 * Returns alignment on success, 0 (invalid alignment) on failure. 1173 */ 1174 resource_size_t resource_alignment(struct resource *res) 1175 { 1176 switch (res->flags & (IORESOURCE_SIZEALIGN | IORESOURCE_STARTALIGN)) { 1177 case IORESOURCE_SIZEALIGN: 1178 return resource_size(res); 1179 case IORESOURCE_STARTALIGN: 1180 return res->start; 1181 default: 1182 return 0; 1183 } 1184 } 1185 1186 /* 1187 * This is compatibility stuff for IO resources. 1188 * 1189 * Note how this, unlike the above, knows about 1190 * the IO flag meanings (busy etc). 1191 * 1192 * request_region creates a new busy region. 1193 * 1194 * release_region releases a matching busy region. 1195 */ 1196 1197 static DECLARE_WAIT_QUEUE_HEAD(muxed_resource_wait); 1198 1199 static struct inode *iomem_inode; 1200 1201 #ifdef CONFIG_IO_STRICT_DEVMEM 1202 static void revoke_iomem(struct resource *res) 1203 { 1204 /* pairs with smp_store_release() in iomem_init_inode() */ 1205 struct inode *inode = smp_load_acquire(&iomem_inode); 1206 1207 /* 1208 * Check that the initialization has completed. Losing the race 1209 * is ok because it means drivers are claiming resources before 1210 * the fs_initcall level of init and prevent iomem_get_mapping users 1211 * from establishing mappings. 1212 */ 1213 if (!inode) 1214 return; 1215 1216 /* 1217 * The expectation is that the driver has successfully marked 1218 * the resource busy by this point, so devmem_is_allowed() 1219 * should start returning false, however for performance this 1220 * does not iterate the entire resource range. 1221 */ 1222 if (devmem_is_allowed(PHYS_PFN(res->start)) && 1223 devmem_is_allowed(PHYS_PFN(res->end))) { 1224 /* 1225 * *cringe* iomem=relaxed says "go ahead, what's the 1226 * worst that can happen?" 1227 */ 1228 return; 1229 } 1230 1231 unmap_mapping_range(inode->i_mapping, res->start, resource_size(res), 1); 1232 } 1233 #else 1234 static void revoke_iomem(struct resource *res) {} 1235 #endif 1236 1237 struct address_space *iomem_get_mapping(void) 1238 { 1239 /* 1240 * This function is only called from file open paths, hence guaranteed 1241 * that fs_initcalls have completed and no need to check for NULL. But 1242 * since revoke_iomem can be called before the initcall we still need 1243 * the barrier to appease checkers. 1244 */ 1245 return smp_load_acquire(&iomem_inode)->i_mapping; 1246 } 1247 1248 static int __request_region_locked(struct resource *res, struct resource *parent, 1249 resource_size_t start, resource_size_t n, 1250 const char *name, int flags) 1251 { 1252 DECLARE_WAITQUEUE(wait, current); 1253 1254 res->name = name; 1255 res->start = start; 1256 res->end = start + n - 1; 1257 1258 for (;;) { 1259 struct resource *conflict; 1260 1261 res->flags = resource_type(parent) | resource_ext_type(parent); 1262 res->flags |= IORESOURCE_BUSY | flags; 1263 res->desc = parent->desc; 1264 1265 conflict = __request_resource(parent, res); 1266 if (!conflict) 1267 break; 1268 /* 1269 * mm/hmm.c reserves physical addresses which then 1270 * become unavailable to other users. Conflicts are 1271 * not expected. Warn to aid debugging if encountered. 1272 */ 1273 if (conflict->desc == IORES_DESC_DEVICE_PRIVATE_MEMORY) { 1274 pr_warn("Unaddressable device %s %pR conflicts with %pR", 1275 conflict->name, conflict, res); 1276 } 1277 if (conflict != parent) { 1278 if (!(conflict->flags & IORESOURCE_BUSY)) { 1279 parent = conflict; 1280 continue; 1281 } 1282 } 1283 if (conflict->flags & flags & IORESOURCE_MUXED) { 1284 add_wait_queue(&muxed_resource_wait, &wait); 1285 write_unlock(&resource_lock); 1286 set_current_state(TASK_UNINTERRUPTIBLE); 1287 schedule(); 1288 remove_wait_queue(&muxed_resource_wait, &wait); 1289 write_lock(&resource_lock); 1290 continue; 1291 } 1292 /* Uhhuh, that didn't work out.. */ 1293 return -EBUSY; 1294 } 1295 1296 return 0; 1297 } 1298 1299 /** 1300 * __request_region - create a new busy resource region 1301 * @parent: parent resource descriptor 1302 * @start: resource start address 1303 * @n: resource region size 1304 * @name: reserving caller's ID string 1305 * @flags: IO resource flags 1306 */ 1307 struct resource *__request_region(struct resource *parent, 1308 resource_size_t start, resource_size_t n, 1309 const char *name, int flags) 1310 { 1311 struct resource *res = alloc_resource(GFP_KERNEL); 1312 int ret; 1313 1314 if (!res) 1315 return NULL; 1316 1317 write_lock(&resource_lock); 1318 ret = __request_region_locked(res, parent, start, n, name, flags); 1319 write_unlock(&resource_lock); 1320 1321 if (ret) { 1322 free_resource(res); 1323 return NULL; 1324 } 1325 1326 if (parent == &iomem_resource) 1327 revoke_iomem(res); 1328 1329 return res; 1330 } 1331 EXPORT_SYMBOL(__request_region); 1332 1333 /** 1334 * __release_region - release a previously reserved resource region 1335 * @parent: parent resource descriptor 1336 * @start: resource start address 1337 * @n: resource region size 1338 * 1339 * The described resource region must match a currently busy region. 1340 */ 1341 void __release_region(struct resource *parent, resource_size_t start, 1342 resource_size_t n) 1343 { 1344 struct resource **p; 1345 resource_size_t end; 1346 1347 p = &parent->child; 1348 end = start + n - 1; 1349 1350 write_lock(&resource_lock); 1351 1352 for (;;) { 1353 struct resource *res = *p; 1354 1355 if (!res) 1356 break; 1357 if (res->start <= start && res->end >= end) { 1358 if (!(res->flags & IORESOURCE_BUSY)) { 1359 p = &res->child; 1360 continue; 1361 } 1362 if (res->start != start || res->end != end) 1363 break; 1364 *p = res->sibling; 1365 write_unlock(&resource_lock); 1366 if (res->flags & IORESOURCE_MUXED) 1367 wake_up(&muxed_resource_wait); 1368 free_resource(res); 1369 return; 1370 } 1371 p = &res->sibling; 1372 } 1373 1374 write_unlock(&resource_lock); 1375 1376 pr_warn("Trying to free nonexistent resource <%pa-%pa>\n", &start, &end); 1377 } 1378 EXPORT_SYMBOL(__release_region); 1379 1380 #ifdef CONFIG_MEMORY_HOTREMOVE 1381 /** 1382 * release_mem_region_adjustable - release a previously reserved memory region 1383 * @start: resource start address 1384 * @size: resource region size 1385 * 1386 * This interface is intended for memory hot-delete. The requested region 1387 * is released from a currently busy memory resource. The requested region 1388 * must either match exactly or fit into a single busy resource entry. In 1389 * the latter case, the remaining resource is adjusted accordingly. 1390 * Existing children of the busy memory resource must be immutable in the 1391 * request. 1392 * 1393 * Note: 1394 * - Additional release conditions, such as overlapping region, can be 1395 * supported after they are confirmed as valid cases. 1396 * - When a busy memory resource gets split into two entries, the code 1397 * assumes that all children remain in the lower address entry for 1398 * simplicity. Enhance this logic when necessary. 1399 */ 1400 void release_mem_region_adjustable(resource_size_t start, resource_size_t size) 1401 { 1402 struct resource *parent = &iomem_resource; 1403 struct resource *new_res = NULL; 1404 bool alloc_nofail = false; 1405 struct resource **p; 1406 struct resource *res; 1407 resource_size_t end; 1408 1409 end = start + size - 1; 1410 if (WARN_ON_ONCE((start < parent->start) || (end > parent->end))) 1411 return; 1412 1413 /* 1414 * We free up quite a lot of memory on memory hotunplug (esp., memap), 1415 * just before releasing the region. This is highly unlikely to 1416 * fail - let's play save and make it never fail as the caller cannot 1417 * perform any error handling (e.g., trying to re-add memory will fail 1418 * similarly). 1419 */ 1420 retry: 1421 new_res = alloc_resource(GFP_KERNEL | (alloc_nofail ? __GFP_NOFAIL : 0)); 1422 1423 p = &parent->child; 1424 write_lock(&resource_lock); 1425 1426 while ((res = *p)) { 1427 if (res->start >= end) 1428 break; 1429 1430 /* look for the next resource if it does not fit into */ 1431 if (res->start > start || res->end < end) { 1432 p = &res->sibling; 1433 continue; 1434 } 1435 1436 if (!(res->flags & IORESOURCE_MEM)) 1437 break; 1438 1439 if (!(res->flags & IORESOURCE_BUSY)) { 1440 p = &res->child; 1441 continue; 1442 } 1443 1444 /* found the target resource; let's adjust accordingly */ 1445 if (res->start == start && res->end == end) { 1446 /* free the whole entry */ 1447 *p = res->sibling; 1448 free_resource(res); 1449 } else if (res->start == start && res->end != end) { 1450 /* adjust the start */ 1451 WARN_ON_ONCE(__adjust_resource(res, end + 1, 1452 res->end - end)); 1453 } else if (res->start != start && res->end == end) { 1454 /* adjust the end */ 1455 WARN_ON_ONCE(__adjust_resource(res, res->start, 1456 start - res->start)); 1457 } else { 1458 /* split into two entries - we need a new resource */ 1459 if (!new_res) { 1460 new_res = alloc_resource(GFP_ATOMIC); 1461 if (!new_res) { 1462 alloc_nofail = true; 1463 write_unlock(&resource_lock); 1464 goto retry; 1465 } 1466 } 1467 new_res->name = res->name; 1468 new_res->start = end + 1; 1469 new_res->end = res->end; 1470 new_res->flags = res->flags; 1471 new_res->desc = res->desc; 1472 new_res->parent = res->parent; 1473 new_res->sibling = res->sibling; 1474 new_res->child = NULL; 1475 1476 if (WARN_ON_ONCE(__adjust_resource(res, res->start, 1477 start - res->start))) 1478 break; 1479 res->sibling = new_res; 1480 new_res = NULL; 1481 } 1482 1483 break; 1484 } 1485 1486 write_unlock(&resource_lock); 1487 free_resource(new_res); 1488 } 1489 #endif /* CONFIG_MEMORY_HOTREMOVE */ 1490 1491 #ifdef CONFIG_MEMORY_HOTPLUG 1492 static bool system_ram_resources_mergeable(struct resource *r1, 1493 struct resource *r2) 1494 { 1495 /* We assume either r1 or r2 is IORESOURCE_SYSRAM_MERGEABLE. */ 1496 return r1->flags == r2->flags && r1->end + 1 == r2->start && 1497 r1->name == r2->name && r1->desc == r2->desc && 1498 !r1->child && !r2->child; 1499 } 1500 1501 /** 1502 * merge_system_ram_resource - mark the System RAM resource mergeable and try to 1503 * merge it with adjacent, mergeable resources 1504 * @res: resource descriptor 1505 * 1506 * This interface is intended for memory hotplug, whereby lots of contiguous 1507 * system ram resources are added (e.g., via add_memory*()) by a driver, and 1508 * the actual resource boundaries are not of interest (e.g., it might be 1509 * relevant for DIMMs). Only resources that are marked mergeable, that have the 1510 * same parent, and that don't have any children are considered. All mergeable 1511 * resources must be immutable during the request. 1512 * 1513 * Note: 1514 * - The caller has to make sure that no pointers to resources that are 1515 * marked mergeable are used anymore after this call - the resource might 1516 * be freed and the pointer might be stale! 1517 * - release_mem_region_adjustable() will split on demand on memory hotunplug 1518 */ 1519 void merge_system_ram_resource(struct resource *res) 1520 { 1521 const unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; 1522 struct resource *cur; 1523 1524 if (WARN_ON_ONCE((res->flags & flags) != flags)) 1525 return; 1526 1527 write_lock(&resource_lock); 1528 res->flags |= IORESOURCE_SYSRAM_MERGEABLE; 1529 1530 /* Try to merge with next item in the list. */ 1531 cur = res->sibling; 1532 if (cur && system_ram_resources_mergeable(res, cur)) { 1533 res->end = cur->end; 1534 res->sibling = cur->sibling; 1535 free_resource(cur); 1536 } 1537 1538 /* Try to merge with previous item in the list. */ 1539 cur = res->parent->child; 1540 while (cur && cur->sibling != res) 1541 cur = cur->sibling; 1542 if (cur && system_ram_resources_mergeable(cur, res)) { 1543 cur->end = res->end; 1544 cur->sibling = res->sibling; 1545 free_resource(res); 1546 } 1547 write_unlock(&resource_lock); 1548 } 1549 #endif /* CONFIG_MEMORY_HOTPLUG */ 1550 1551 /* 1552 * Managed region resource 1553 */ 1554 static void devm_resource_release(struct device *dev, void *ptr) 1555 { 1556 struct resource **r = ptr; 1557 1558 release_resource(*r); 1559 } 1560 1561 /** 1562 * devm_request_resource() - request and reserve an I/O or memory resource 1563 * @dev: device for which to request the resource 1564 * @root: root of the resource tree from which to request the resource 1565 * @new: descriptor of the resource to request 1566 * 1567 * This is a device-managed version of request_resource(). There is usually 1568 * no need to release resources requested by this function explicitly since 1569 * that will be taken care of when the device is unbound from its driver. 1570 * If for some reason the resource needs to be released explicitly, because 1571 * of ordering issues for example, drivers must call devm_release_resource() 1572 * rather than the regular release_resource(). 1573 * 1574 * When a conflict is detected between any existing resources and the newly 1575 * requested resource, an error message will be printed. 1576 * 1577 * Returns 0 on success or a negative error code on failure. 1578 */ 1579 int devm_request_resource(struct device *dev, struct resource *root, 1580 struct resource *new) 1581 { 1582 struct resource *conflict, **ptr; 1583 1584 ptr = devres_alloc(devm_resource_release, sizeof(*ptr), GFP_KERNEL); 1585 if (!ptr) 1586 return -ENOMEM; 1587 1588 *ptr = new; 1589 1590 conflict = request_resource_conflict(root, new); 1591 if (conflict) { 1592 dev_err(dev, "resource collision: %pR conflicts with %s %pR\n", 1593 new, conflict->name, conflict); 1594 devres_free(ptr); 1595 return -EBUSY; 1596 } 1597 1598 devres_add(dev, ptr); 1599 return 0; 1600 } 1601 EXPORT_SYMBOL(devm_request_resource); 1602 1603 static int devm_resource_match(struct device *dev, void *res, void *data) 1604 { 1605 struct resource **ptr = res; 1606 1607 return *ptr == data; 1608 } 1609 1610 /** 1611 * devm_release_resource() - release a previously requested resource 1612 * @dev: device for which to release the resource 1613 * @new: descriptor of the resource to release 1614 * 1615 * Releases a resource previously requested using devm_request_resource(). 1616 */ 1617 void devm_release_resource(struct device *dev, struct resource *new) 1618 { 1619 WARN_ON(devres_release(dev, devm_resource_release, devm_resource_match, 1620 new)); 1621 } 1622 EXPORT_SYMBOL(devm_release_resource); 1623 1624 struct region_devres { 1625 struct resource *parent; 1626 resource_size_t start; 1627 resource_size_t n; 1628 }; 1629 1630 static void devm_region_release(struct device *dev, void *res) 1631 { 1632 struct region_devres *this = res; 1633 1634 __release_region(this->parent, this->start, this->n); 1635 } 1636 1637 static int devm_region_match(struct device *dev, void *res, void *match_data) 1638 { 1639 struct region_devres *this = res, *match = match_data; 1640 1641 return this->parent == match->parent && 1642 this->start == match->start && this->n == match->n; 1643 } 1644 1645 struct resource * 1646 __devm_request_region(struct device *dev, struct resource *parent, 1647 resource_size_t start, resource_size_t n, const char *name) 1648 { 1649 struct region_devres *dr = NULL; 1650 struct resource *res; 1651 1652 dr = devres_alloc(devm_region_release, sizeof(struct region_devres), 1653 GFP_KERNEL); 1654 if (!dr) 1655 return NULL; 1656 1657 dr->parent = parent; 1658 dr->start = start; 1659 dr->n = n; 1660 1661 res = __request_region(parent, start, n, name, 0); 1662 if (res) 1663 devres_add(dev, dr); 1664 else 1665 devres_free(dr); 1666 1667 return res; 1668 } 1669 EXPORT_SYMBOL(__devm_request_region); 1670 1671 void __devm_release_region(struct device *dev, struct resource *parent, 1672 resource_size_t start, resource_size_t n) 1673 { 1674 struct region_devres match_data = { parent, start, n }; 1675 1676 __release_region(parent, start, n); 1677 WARN_ON(devres_destroy(dev, devm_region_release, devm_region_match, 1678 &match_data)); 1679 } 1680 EXPORT_SYMBOL(__devm_release_region); 1681 1682 /* 1683 * Reserve I/O ports or memory based on "reserve=" kernel parameter. 1684 */ 1685 #define MAXRESERVE 4 1686 static int __init reserve_setup(char *str) 1687 { 1688 static int reserved; 1689 static struct resource reserve[MAXRESERVE]; 1690 1691 for (;;) { 1692 unsigned int io_start, io_num; 1693 int x = reserved; 1694 struct resource *parent; 1695 1696 if (get_option(&str, &io_start) != 2) 1697 break; 1698 if (get_option(&str, &io_num) == 0) 1699 break; 1700 if (x < MAXRESERVE) { 1701 struct resource *res = reserve + x; 1702 1703 /* 1704 * If the region starts below 0x10000, we assume it's 1705 * I/O port space; otherwise assume it's memory. 1706 */ 1707 if (io_start < 0x10000) { 1708 res->flags = IORESOURCE_IO; 1709 parent = &ioport_resource; 1710 } else { 1711 res->flags = IORESOURCE_MEM; 1712 parent = &iomem_resource; 1713 } 1714 res->name = "reserved"; 1715 res->start = io_start; 1716 res->end = io_start + io_num - 1; 1717 res->flags |= IORESOURCE_BUSY; 1718 res->desc = IORES_DESC_NONE; 1719 res->child = NULL; 1720 if (request_resource(parent, res) == 0) 1721 reserved = x+1; 1722 } 1723 } 1724 return 1; 1725 } 1726 __setup("reserve=", reserve_setup); 1727 1728 /* 1729 * Check if the requested addr and size spans more than any slot in the 1730 * iomem resource tree. 1731 */ 1732 int iomem_map_sanity_check(resource_size_t addr, unsigned long size) 1733 { 1734 resource_size_t end = addr + size - 1; 1735 struct resource *p; 1736 int err = 0; 1737 1738 read_lock(&resource_lock); 1739 for_each_resource(&iomem_resource, p, false) { 1740 /* 1741 * We can probably skip the resources without 1742 * IORESOURCE_IO attribute? 1743 */ 1744 if (p->start > end) 1745 continue; 1746 if (p->end < addr) 1747 continue; 1748 if (PFN_DOWN(p->start) <= PFN_DOWN(addr) && 1749 PFN_DOWN(p->end) >= PFN_DOWN(end)) 1750 continue; 1751 /* 1752 * if a resource is "BUSY", it's not a hardware resource 1753 * but a driver mapping of such a resource; we don't want 1754 * to warn for those; some drivers legitimately map only 1755 * partial hardware resources. (example: vesafb) 1756 */ 1757 if (p->flags & IORESOURCE_BUSY) 1758 continue; 1759 1760 pr_warn("resource sanity check: requesting [mem %pa-%pa], which spans more than %s %pR\n", 1761 &addr, &end, p->name, p); 1762 err = -1; 1763 break; 1764 } 1765 read_unlock(&resource_lock); 1766 1767 return err; 1768 } 1769 1770 #ifdef CONFIG_STRICT_DEVMEM 1771 static int strict_iomem_checks = 1; 1772 #else 1773 static int strict_iomem_checks; 1774 #endif 1775 1776 /* 1777 * Check if an address is exclusive to the kernel and must not be mapped to 1778 * user space, for example, via /dev/mem. 1779 * 1780 * Returns true if exclusive to the kernel, otherwise returns false. 1781 */ 1782 bool resource_is_exclusive(struct resource *root, u64 addr, resource_size_t size) 1783 { 1784 const unsigned int exclusive_system_ram = IORESOURCE_SYSTEM_RAM | 1785 IORESOURCE_EXCLUSIVE; 1786 bool skip_children = false, err = false; 1787 struct resource *p; 1788 1789 read_lock(&resource_lock); 1790 for_each_resource(root, p, skip_children) { 1791 if (p->start >= addr + size) 1792 break; 1793 if (p->end < addr) { 1794 skip_children = true; 1795 continue; 1796 } 1797 skip_children = false; 1798 1799 /* 1800 * IORESOURCE_SYSTEM_RAM resources are exclusive if 1801 * IORESOURCE_EXCLUSIVE is set, even if they 1802 * are not busy and even if "iomem=relaxed" is set. The 1803 * responsible driver dynamically adds/removes system RAM within 1804 * such an area and uncontrolled access is dangerous. 1805 */ 1806 if ((p->flags & exclusive_system_ram) == exclusive_system_ram) { 1807 err = true; 1808 break; 1809 } 1810 1811 /* 1812 * A resource is exclusive if IORESOURCE_EXCLUSIVE is set 1813 * or CONFIG_IO_STRICT_DEVMEM is enabled and the 1814 * resource is busy. 1815 */ 1816 if (!strict_iomem_checks || !(p->flags & IORESOURCE_BUSY)) 1817 continue; 1818 if (IS_ENABLED(CONFIG_IO_STRICT_DEVMEM) 1819 || p->flags & IORESOURCE_EXCLUSIVE) { 1820 err = true; 1821 break; 1822 } 1823 } 1824 read_unlock(&resource_lock); 1825 1826 return err; 1827 } 1828 1829 bool iomem_is_exclusive(u64 addr) 1830 { 1831 return resource_is_exclusive(&iomem_resource, addr & PAGE_MASK, 1832 PAGE_SIZE); 1833 } 1834 1835 struct resource_entry *resource_list_create_entry(struct resource *res, 1836 size_t extra_size) 1837 { 1838 struct resource_entry *entry; 1839 1840 entry = kzalloc(sizeof(*entry) + extra_size, GFP_KERNEL); 1841 if (entry) { 1842 INIT_LIST_HEAD(&entry->node); 1843 entry->res = res ? res : &entry->__res; 1844 } 1845 1846 return entry; 1847 } 1848 EXPORT_SYMBOL(resource_list_create_entry); 1849 1850 void resource_list_free(struct list_head *head) 1851 { 1852 struct resource_entry *entry, *tmp; 1853 1854 list_for_each_entry_safe(entry, tmp, head, node) 1855 resource_list_destroy_entry(entry); 1856 } 1857 EXPORT_SYMBOL(resource_list_free); 1858 1859 #ifdef CONFIG_GET_FREE_REGION 1860 #define GFR_DESCENDING (1UL << 0) 1861 #define GFR_REQUEST_REGION (1UL << 1) 1862 #ifdef PA_SECTION_SHIFT 1863 #define GFR_DEFAULT_ALIGN (1UL << PA_SECTION_SHIFT) 1864 #else 1865 #define GFR_DEFAULT_ALIGN PAGE_SIZE 1866 #endif 1867 1868 static resource_size_t gfr_start(struct resource *base, resource_size_t size, 1869 resource_size_t align, unsigned long flags) 1870 { 1871 if (flags & GFR_DESCENDING) { 1872 resource_size_t end; 1873 1874 end = min_t(resource_size_t, base->end, PHYSMEM_END); 1875 return end - size + 1; 1876 } 1877 1878 return ALIGN(max(base->start, align), align); 1879 } 1880 1881 static bool gfr_continue(struct resource *base, resource_size_t addr, 1882 resource_size_t size, unsigned long flags) 1883 { 1884 if (flags & GFR_DESCENDING) 1885 return addr > size && addr >= base->start; 1886 /* 1887 * In the ascend case be careful that the last increment by 1888 * @size did not wrap 0. 1889 */ 1890 return addr > addr - size && 1891 addr <= min_t(resource_size_t, base->end, PHYSMEM_END); 1892 } 1893 1894 static resource_size_t gfr_next(resource_size_t addr, resource_size_t size, 1895 unsigned long flags) 1896 { 1897 if (flags & GFR_DESCENDING) 1898 return addr - size; 1899 return addr + size; 1900 } 1901 1902 static void remove_free_mem_region(void *_res) 1903 { 1904 struct resource *res = _res; 1905 1906 if (res->parent) 1907 remove_resource(res); 1908 free_resource(res); 1909 } 1910 1911 static struct resource * 1912 get_free_mem_region(struct device *dev, struct resource *base, 1913 resource_size_t size, const unsigned long align, 1914 const char *name, const unsigned long desc, 1915 const unsigned long flags) 1916 { 1917 resource_size_t addr; 1918 struct resource *res; 1919 struct region_devres *dr = NULL; 1920 1921 size = ALIGN(size, align); 1922 1923 res = alloc_resource(GFP_KERNEL); 1924 if (!res) 1925 return ERR_PTR(-ENOMEM); 1926 1927 if (dev && (flags & GFR_REQUEST_REGION)) { 1928 dr = devres_alloc(devm_region_release, 1929 sizeof(struct region_devres), GFP_KERNEL); 1930 if (!dr) { 1931 free_resource(res); 1932 return ERR_PTR(-ENOMEM); 1933 } 1934 } else if (dev) { 1935 if (devm_add_action_or_reset(dev, remove_free_mem_region, res)) 1936 return ERR_PTR(-ENOMEM); 1937 } 1938 1939 write_lock(&resource_lock); 1940 for (addr = gfr_start(base, size, align, flags); 1941 gfr_continue(base, addr, align, flags); 1942 addr = gfr_next(addr, align, flags)) { 1943 if (__region_intersects(base, addr, size, 0, IORES_DESC_NONE) != 1944 REGION_DISJOINT) 1945 continue; 1946 1947 if (flags & GFR_REQUEST_REGION) { 1948 if (__request_region_locked(res, &iomem_resource, addr, 1949 size, name, 0)) 1950 break; 1951 1952 if (dev) { 1953 dr->parent = &iomem_resource; 1954 dr->start = addr; 1955 dr->n = size; 1956 devres_add(dev, dr); 1957 } 1958 1959 res->desc = desc; 1960 write_unlock(&resource_lock); 1961 1962 1963 /* 1964 * A driver is claiming this region so revoke any 1965 * mappings. 1966 */ 1967 revoke_iomem(res); 1968 } else { 1969 res->start = addr; 1970 res->end = addr + size - 1; 1971 res->name = name; 1972 res->desc = desc; 1973 res->flags = IORESOURCE_MEM; 1974 1975 /* 1976 * Only succeed if the resource hosts an exclusive 1977 * range after the insert 1978 */ 1979 if (__insert_resource(base, res) || res->child) 1980 break; 1981 1982 write_unlock(&resource_lock); 1983 } 1984 1985 return res; 1986 } 1987 write_unlock(&resource_lock); 1988 1989 if (flags & GFR_REQUEST_REGION) { 1990 free_resource(res); 1991 devres_free(dr); 1992 } else if (dev) 1993 devm_release_action(dev, remove_free_mem_region, res); 1994 1995 return ERR_PTR(-ERANGE); 1996 } 1997 1998 /** 1999 * devm_request_free_mem_region - find free region for device private memory 2000 * 2001 * @dev: device struct to bind the resource to 2002 * @size: size in bytes of the device memory to add 2003 * @base: resource tree to look in 2004 * 2005 * This function tries to find an empty range of physical address big enough to 2006 * contain the new resource, so that it can later be hotplugged as ZONE_DEVICE 2007 * memory, which in turn allocates struct pages. 2008 */ 2009 struct resource *devm_request_free_mem_region(struct device *dev, 2010 struct resource *base, unsigned long size) 2011 { 2012 unsigned long flags = GFR_DESCENDING | GFR_REQUEST_REGION; 2013 2014 return get_free_mem_region(dev, base, size, GFR_DEFAULT_ALIGN, 2015 dev_name(dev), 2016 IORES_DESC_DEVICE_PRIVATE_MEMORY, flags); 2017 } 2018 EXPORT_SYMBOL_GPL(devm_request_free_mem_region); 2019 2020 struct resource *request_free_mem_region(struct resource *base, 2021 unsigned long size, const char *name) 2022 { 2023 unsigned long flags = GFR_DESCENDING | GFR_REQUEST_REGION; 2024 2025 return get_free_mem_region(NULL, base, size, GFR_DEFAULT_ALIGN, name, 2026 IORES_DESC_DEVICE_PRIVATE_MEMORY, flags); 2027 } 2028 EXPORT_SYMBOL_GPL(request_free_mem_region); 2029 2030 /** 2031 * alloc_free_mem_region - find a free region relative to @base 2032 * @base: resource that will parent the new resource 2033 * @size: size in bytes of memory to allocate from @base 2034 * @align: alignment requirements for the allocation 2035 * @name: resource name 2036 * 2037 * Buses like CXL, that can dynamically instantiate new memory regions, 2038 * need a method to allocate physical address space for those regions. 2039 * Allocate and insert a new resource to cover a free, unclaimed by a 2040 * descendant of @base, range in the span of @base. 2041 */ 2042 struct resource *alloc_free_mem_region(struct resource *base, 2043 unsigned long size, unsigned long align, 2044 const char *name) 2045 { 2046 /* Default of ascending direction and insert resource */ 2047 unsigned long flags = 0; 2048 2049 return get_free_mem_region(NULL, base, size, align, name, 2050 IORES_DESC_NONE, flags); 2051 } 2052 EXPORT_SYMBOL_GPL(alloc_free_mem_region); 2053 #endif /* CONFIG_GET_FREE_REGION */ 2054 2055 static int __init strict_iomem(char *str) 2056 { 2057 if (strstr(str, "relaxed")) 2058 strict_iomem_checks = 0; 2059 if (strstr(str, "strict")) 2060 strict_iomem_checks = 1; 2061 return 1; 2062 } 2063 2064 static int iomem_fs_init_fs_context(struct fs_context *fc) 2065 { 2066 return init_pseudo(fc, DEVMEM_MAGIC) ? 0 : -ENOMEM; 2067 } 2068 2069 static struct file_system_type iomem_fs_type = { 2070 .name = "iomem", 2071 .owner = THIS_MODULE, 2072 .init_fs_context = iomem_fs_init_fs_context, 2073 .kill_sb = kill_anon_super, 2074 }; 2075 2076 static int __init iomem_init_inode(void) 2077 { 2078 static struct vfsmount *iomem_vfs_mount; 2079 static int iomem_fs_cnt; 2080 struct inode *inode; 2081 int rc; 2082 2083 rc = simple_pin_fs(&iomem_fs_type, &iomem_vfs_mount, &iomem_fs_cnt); 2084 if (rc < 0) { 2085 pr_err("Cannot mount iomem pseudo filesystem: %d\n", rc); 2086 return rc; 2087 } 2088 2089 inode = alloc_anon_inode(iomem_vfs_mount->mnt_sb); 2090 if (IS_ERR(inode)) { 2091 rc = PTR_ERR(inode); 2092 pr_err("Cannot allocate inode for iomem: %d\n", rc); 2093 simple_release_fs(&iomem_vfs_mount, &iomem_fs_cnt); 2094 return rc; 2095 } 2096 2097 /* 2098 * Publish iomem revocation inode initialized. 2099 * Pairs with smp_load_acquire() in revoke_iomem(). 2100 */ 2101 smp_store_release(&iomem_inode, inode); 2102 2103 return 0; 2104 } 2105 2106 fs_initcall(iomem_init_inode); 2107 2108 __setup("iomem=", strict_iomem); 2109