1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/kernel/resource.c 4 * 5 * Copyright (C) 1999 Linus Torvalds 6 * Copyright (C) 1999 Martin Mares <mj@ucw.cz> 7 * 8 * Arbitrary resource management. 9 */ 10 11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 12 13 #include <linux/export.h> 14 #include <linux/errno.h> 15 #include <linux/ioport.h> 16 #include <linux/init.h> 17 #include <linux/slab.h> 18 #include <linux/spinlock.h> 19 #include <linux/fs.h> 20 #include <linux/proc_fs.h> 21 #include <linux/pseudo_fs.h> 22 #include <linux/sched.h> 23 #include <linux/seq_file.h> 24 #include <linux/device.h> 25 #include <linux/pfn.h> 26 #include <linux/mm.h> 27 #include <linux/mount.h> 28 #include <linux/resource_ext.h> 29 #include <uapi/linux/magic.h> 30 #include <linux/string.h> 31 #include <linux/vmalloc.h> 32 #include <asm/io.h> 33 34 35 struct resource ioport_resource = { 36 .name = "PCI IO", 37 .start = 0, 38 .end = IO_SPACE_LIMIT, 39 .flags = IORESOURCE_IO, 40 }; 41 EXPORT_SYMBOL(ioport_resource); 42 43 struct resource iomem_resource = { 44 .name = "PCI mem", 45 .start = 0, 46 .end = -1, 47 .flags = IORESOURCE_MEM, 48 }; 49 EXPORT_SYMBOL(iomem_resource); 50 51 /* constraints to be met while allocating resources */ 52 struct resource_constraint { 53 resource_size_t min, max, align; 54 resource_size_t (*alignf)(void *, const struct resource *, 55 resource_size_t, resource_size_t); 56 void *alignf_data; 57 }; 58 59 static DEFINE_RWLOCK(resource_lock); 60 61 static struct resource *next_resource(struct resource *p, bool skip_children) 62 { 63 if (!skip_children && p->child) 64 return p->child; 65 while (!p->sibling && p->parent) 66 p = p->parent; 67 return p->sibling; 68 } 69 70 #define for_each_resource(_root, _p, _skip_children) \ 71 for ((_p) = (_root)->child; (_p); (_p) = next_resource(_p, _skip_children)) 72 73 #ifdef CONFIG_PROC_FS 74 75 enum { MAX_IORES_LEVEL = 5 }; 76 77 static void *r_start(struct seq_file *m, loff_t *pos) 78 __acquires(resource_lock) 79 { 80 struct resource *root = pde_data(file_inode(m->file)); 81 struct resource *p; 82 loff_t l = *pos; 83 84 read_lock(&resource_lock); 85 for_each_resource(root, p, false) { 86 if (l-- == 0) 87 break; 88 } 89 90 return p; 91 } 92 93 static void *r_next(struct seq_file *m, void *v, loff_t *pos) 94 { 95 struct resource *p = v; 96 97 (*pos)++; 98 99 return (void *)next_resource(p, false); 100 } 101 102 static void r_stop(struct seq_file *m, void *v) 103 __releases(resource_lock) 104 { 105 read_unlock(&resource_lock); 106 } 107 108 static int r_show(struct seq_file *m, void *v) 109 { 110 struct resource *root = pde_data(file_inode(m->file)); 111 struct resource *r = v, *p; 112 unsigned long long start, end; 113 int width = root->end < 0x10000 ? 4 : 8; 114 int depth; 115 116 for (depth = 0, p = r; depth < MAX_IORES_LEVEL; depth++, p = p->parent) 117 if (p->parent == root) 118 break; 119 120 if (file_ns_capable(m->file, &init_user_ns, CAP_SYS_ADMIN)) { 121 start = r->start; 122 end = r->end; 123 } else { 124 start = end = 0; 125 } 126 127 seq_printf(m, "%*s%0*llx-%0*llx : %s\n", 128 depth * 2, "", 129 width, start, 130 width, end, 131 r->name ? r->name : "<BAD>"); 132 return 0; 133 } 134 135 static const struct seq_operations resource_op = { 136 .start = r_start, 137 .next = r_next, 138 .stop = r_stop, 139 .show = r_show, 140 }; 141 142 static int __init ioresources_init(void) 143 { 144 proc_create_seq_data("ioports", 0, NULL, &resource_op, 145 &ioport_resource); 146 proc_create_seq_data("iomem", 0, NULL, &resource_op, &iomem_resource); 147 return 0; 148 } 149 __initcall(ioresources_init); 150 151 #endif /* CONFIG_PROC_FS */ 152 153 static void free_resource(struct resource *res) 154 { 155 /** 156 * If the resource was allocated using memblock early during boot 157 * we'll leak it here: we can only return full pages back to the 158 * buddy and trying to be smart and reusing them eventually in 159 * alloc_resource() overcomplicates resource handling. 160 */ 161 if (res && PageSlab(virt_to_head_page(res))) 162 kfree(res); 163 } 164 165 static struct resource *alloc_resource(gfp_t flags) 166 { 167 return kzalloc(sizeof(struct resource), flags); 168 } 169 170 /* Return the conflict entry if you can't request it */ 171 static struct resource * __request_resource(struct resource *root, struct resource *new) 172 { 173 resource_size_t start = new->start; 174 resource_size_t end = new->end; 175 struct resource *tmp, **p; 176 177 if (end < start) 178 return root; 179 if (start < root->start) 180 return root; 181 if (end > root->end) 182 return root; 183 p = &root->child; 184 for (;;) { 185 tmp = *p; 186 if (!tmp || tmp->start > end) { 187 new->sibling = tmp; 188 *p = new; 189 new->parent = root; 190 return NULL; 191 } 192 p = &tmp->sibling; 193 if (tmp->end < start) 194 continue; 195 return tmp; 196 } 197 } 198 199 static int __release_resource(struct resource *old, bool release_child) 200 { 201 struct resource *tmp, **p, *chd; 202 203 p = &old->parent->child; 204 for (;;) { 205 tmp = *p; 206 if (!tmp) 207 break; 208 if (tmp == old) { 209 if (release_child || !(tmp->child)) { 210 *p = tmp->sibling; 211 } else { 212 for (chd = tmp->child;; chd = chd->sibling) { 213 chd->parent = tmp->parent; 214 if (!(chd->sibling)) 215 break; 216 } 217 *p = tmp->child; 218 chd->sibling = tmp->sibling; 219 } 220 old->parent = NULL; 221 return 0; 222 } 223 p = &tmp->sibling; 224 } 225 return -EINVAL; 226 } 227 228 static void __release_child_resources(struct resource *r) 229 { 230 struct resource *tmp, *p; 231 resource_size_t size; 232 233 p = r->child; 234 r->child = NULL; 235 while (p) { 236 tmp = p; 237 p = p->sibling; 238 239 tmp->parent = NULL; 240 tmp->sibling = NULL; 241 __release_child_resources(tmp); 242 243 printk(KERN_DEBUG "release child resource %pR\n", tmp); 244 /* need to restore size, and keep flags */ 245 size = resource_size(tmp); 246 tmp->start = 0; 247 tmp->end = size - 1; 248 } 249 } 250 251 void release_child_resources(struct resource *r) 252 { 253 write_lock(&resource_lock); 254 __release_child_resources(r); 255 write_unlock(&resource_lock); 256 } 257 258 /** 259 * request_resource_conflict - request and reserve an I/O or memory resource 260 * @root: root resource descriptor 261 * @new: resource descriptor desired by caller 262 * 263 * Returns 0 for success, conflict resource on error. 264 */ 265 struct resource *request_resource_conflict(struct resource *root, struct resource *new) 266 { 267 struct resource *conflict; 268 269 write_lock(&resource_lock); 270 conflict = __request_resource(root, new); 271 write_unlock(&resource_lock); 272 return conflict; 273 } 274 275 /** 276 * request_resource - request and reserve an I/O or memory resource 277 * @root: root resource descriptor 278 * @new: resource descriptor desired by caller 279 * 280 * Returns 0 for success, negative error code on error. 281 */ 282 int request_resource(struct resource *root, struct resource *new) 283 { 284 struct resource *conflict; 285 286 conflict = request_resource_conflict(root, new); 287 return conflict ? -EBUSY : 0; 288 } 289 290 EXPORT_SYMBOL(request_resource); 291 292 /** 293 * release_resource - release a previously reserved resource 294 * @old: resource pointer 295 */ 296 int release_resource(struct resource *old) 297 { 298 int retval; 299 300 write_lock(&resource_lock); 301 retval = __release_resource(old, true); 302 write_unlock(&resource_lock); 303 return retval; 304 } 305 306 EXPORT_SYMBOL(release_resource); 307 308 /** 309 * find_next_iomem_res - Finds the lowest iomem resource that covers part of 310 * [@start..@end]. 311 * 312 * If a resource is found, returns 0 and @*res is overwritten with the part 313 * of the resource that's within [@start..@end]; if none is found, returns 314 * -ENODEV. Returns -EINVAL for invalid parameters. 315 * 316 * @start: start address of the resource searched for 317 * @end: end address of same resource 318 * @flags: flags which the resource must have 319 * @desc: descriptor the resource must have 320 * @res: return ptr, if resource found 321 * 322 * The caller must specify @start, @end, @flags, and @desc 323 * (which may be IORES_DESC_NONE). 324 */ 325 static int find_next_iomem_res(resource_size_t start, resource_size_t end, 326 unsigned long flags, unsigned long desc, 327 struct resource *res) 328 { 329 struct resource *p; 330 331 if (!res) 332 return -EINVAL; 333 334 if (start >= end) 335 return -EINVAL; 336 337 read_lock(&resource_lock); 338 339 for_each_resource(&iomem_resource, p, false) { 340 /* If we passed the resource we are looking for, stop */ 341 if (p->start > end) { 342 p = NULL; 343 break; 344 } 345 346 /* Skip until we find a range that matches what we look for */ 347 if (p->end < start) 348 continue; 349 350 if ((p->flags & flags) != flags) 351 continue; 352 if ((desc != IORES_DESC_NONE) && (desc != p->desc)) 353 continue; 354 355 /* Found a match, break */ 356 break; 357 } 358 359 if (p) { 360 /* copy data */ 361 *res = (struct resource) { 362 .start = max(start, p->start), 363 .end = min(end, p->end), 364 .flags = p->flags, 365 .desc = p->desc, 366 .parent = p->parent, 367 }; 368 } 369 370 read_unlock(&resource_lock); 371 return p ? 0 : -ENODEV; 372 } 373 374 static int __walk_iomem_res_desc(resource_size_t start, resource_size_t end, 375 unsigned long flags, unsigned long desc, 376 void *arg, 377 int (*func)(struct resource *, void *)) 378 { 379 struct resource res; 380 int ret = -EINVAL; 381 382 while (start < end && 383 !find_next_iomem_res(start, end, flags, desc, &res)) { 384 ret = (*func)(&res, arg); 385 if (ret) 386 break; 387 388 start = res.end + 1; 389 } 390 391 return ret; 392 } 393 394 /** 395 * walk_iomem_res_desc - Walks through iomem resources and calls func() 396 * with matching resource ranges. 397 * * 398 * @desc: I/O resource descriptor. Use IORES_DESC_NONE to skip @desc check. 399 * @flags: I/O resource flags 400 * @start: start addr 401 * @end: end addr 402 * @arg: function argument for the callback @func 403 * @func: callback function that is called for each qualifying resource area 404 * 405 * All the memory ranges which overlap start,end and also match flags and 406 * desc are valid candidates. 407 * 408 * NOTE: For a new descriptor search, define a new IORES_DESC in 409 * <linux/ioport.h> and set it in 'desc' of a target resource entry. 410 */ 411 int walk_iomem_res_desc(unsigned long desc, unsigned long flags, u64 start, 412 u64 end, void *arg, int (*func)(struct resource *, void *)) 413 { 414 return __walk_iomem_res_desc(start, end, flags, desc, arg, func); 415 } 416 EXPORT_SYMBOL_GPL(walk_iomem_res_desc); 417 418 /* 419 * This function calls the @func callback against all memory ranges of type 420 * System RAM which are marked as IORESOURCE_SYSTEM_RAM and IORESOUCE_BUSY. 421 * Now, this function is only for System RAM, it deals with full ranges and 422 * not PFNs. If resources are not PFN-aligned, dealing with PFNs can truncate 423 * ranges. 424 */ 425 int walk_system_ram_res(u64 start, u64 end, void *arg, 426 int (*func)(struct resource *, void *)) 427 { 428 unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; 429 430 return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, arg, 431 func); 432 } 433 434 /* 435 * This function, being a variant of walk_system_ram_res(), calls the @func 436 * callback against all memory ranges of type System RAM which are marked as 437 * IORESOURCE_SYSTEM_RAM and IORESOUCE_BUSY in reversed order, i.e., from 438 * higher to lower. 439 */ 440 int walk_system_ram_res_rev(u64 start, u64 end, void *arg, 441 int (*func)(struct resource *, void *)) 442 { 443 struct resource res, *rams; 444 int rams_size = 16, i; 445 unsigned long flags; 446 int ret = -1; 447 448 /* create a list */ 449 rams = kvcalloc(rams_size, sizeof(struct resource), GFP_KERNEL); 450 if (!rams) 451 return ret; 452 453 flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; 454 i = 0; 455 while ((start < end) && 456 (!find_next_iomem_res(start, end, flags, IORES_DESC_NONE, &res))) { 457 if (i >= rams_size) { 458 /* re-alloc */ 459 struct resource *rams_new; 460 461 rams_new = kvrealloc(rams, rams_size * sizeof(struct resource), 462 (rams_size + 16) * sizeof(struct resource), 463 GFP_KERNEL); 464 if (!rams_new) 465 goto out; 466 467 rams = rams_new; 468 rams_size += 16; 469 } 470 471 rams[i].start = res.start; 472 rams[i++].end = res.end; 473 474 start = res.end + 1; 475 } 476 477 /* go reverse */ 478 for (i--; i >= 0; i--) { 479 ret = (*func)(&rams[i], arg); 480 if (ret) 481 break; 482 } 483 484 out: 485 kvfree(rams); 486 return ret; 487 } 488 489 /* 490 * This function calls the @func callback against all memory ranges, which 491 * are ranges marked as IORESOURCE_MEM and IORESOUCE_BUSY. 492 */ 493 int walk_mem_res(u64 start, u64 end, void *arg, 494 int (*func)(struct resource *, void *)) 495 { 496 unsigned long flags = IORESOURCE_MEM | IORESOURCE_BUSY; 497 498 return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, arg, 499 func); 500 } 501 502 /* 503 * This function calls the @func callback against all memory ranges of type 504 * System RAM which are marked as IORESOURCE_SYSTEM_RAM and IORESOUCE_BUSY. 505 * It is to be used only for System RAM. 506 */ 507 int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, 508 void *arg, int (*func)(unsigned long, unsigned long, void *)) 509 { 510 resource_size_t start, end; 511 unsigned long flags; 512 struct resource res; 513 unsigned long pfn, end_pfn; 514 int ret = -EINVAL; 515 516 start = (u64) start_pfn << PAGE_SHIFT; 517 end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1; 518 flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; 519 while (start < end && 520 !find_next_iomem_res(start, end, flags, IORES_DESC_NONE, &res)) { 521 pfn = PFN_UP(res.start); 522 end_pfn = PFN_DOWN(res.end + 1); 523 if (end_pfn > pfn) 524 ret = (*func)(pfn, end_pfn - pfn, arg); 525 if (ret) 526 break; 527 start = res.end + 1; 528 } 529 return ret; 530 } 531 532 static int __is_ram(unsigned long pfn, unsigned long nr_pages, void *arg) 533 { 534 return 1; 535 } 536 537 /* 538 * This generic page_is_ram() returns true if specified address is 539 * registered as System RAM in iomem_resource list. 540 */ 541 int __weak page_is_ram(unsigned long pfn) 542 { 543 return walk_system_ram_range(pfn, 1, NULL, __is_ram) == 1; 544 } 545 EXPORT_SYMBOL_GPL(page_is_ram); 546 547 static int __region_intersects(struct resource *parent, resource_size_t start, 548 size_t size, unsigned long flags, 549 unsigned long desc) 550 { 551 struct resource res; 552 int type = 0; int other = 0; 553 struct resource *p; 554 555 res.start = start; 556 res.end = start + size - 1; 557 558 for (p = parent->child; p ; p = p->sibling) { 559 bool is_type = (((p->flags & flags) == flags) && 560 ((desc == IORES_DESC_NONE) || 561 (desc == p->desc))); 562 563 if (resource_overlaps(p, &res)) 564 is_type ? type++ : other++; 565 } 566 567 if (type == 0) 568 return REGION_DISJOINT; 569 570 if (other == 0) 571 return REGION_INTERSECTS; 572 573 return REGION_MIXED; 574 } 575 576 /** 577 * region_intersects() - determine intersection of region with known resources 578 * @start: region start address 579 * @size: size of region 580 * @flags: flags of resource (in iomem_resource) 581 * @desc: descriptor of resource (in iomem_resource) or IORES_DESC_NONE 582 * 583 * Check if the specified region partially overlaps or fully eclipses a 584 * resource identified by @flags and @desc (optional with IORES_DESC_NONE). 585 * Return REGION_DISJOINT if the region does not overlap @flags/@desc, 586 * return REGION_MIXED if the region overlaps @flags/@desc and another 587 * resource, and return REGION_INTERSECTS if the region overlaps @flags/@desc 588 * and no other defined resource. Note that REGION_INTERSECTS is also 589 * returned in the case when the specified region overlaps RAM and undefined 590 * memory holes. 591 * 592 * region_intersect() is used by memory remapping functions to ensure 593 * the user is not remapping RAM and is a vast speed up over walking 594 * through the resource table page by page. 595 */ 596 int region_intersects(resource_size_t start, size_t size, unsigned long flags, 597 unsigned long desc) 598 { 599 int ret; 600 601 read_lock(&resource_lock); 602 ret = __region_intersects(&iomem_resource, start, size, flags, desc); 603 read_unlock(&resource_lock); 604 605 return ret; 606 } 607 EXPORT_SYMBOL_GPL(region_intersects); 608 609 void __weak arch_remove_reservations(struct resource *avail) 610 { 611 } 612 613 static resource_size_t simple_align_resource(void *data, 614 const struct resource *avail, 615 resource_size_t size, 616 resource_size_t align) 617 { 618 return avail->start; 619 } 620 621 static void resource_clip(struct resource *res, resource_size_t min, 622 resource_size_t max) 623 { 624 if (res->start < min) 625 res->start = min; 626 if (res->end > max) 627 res->end = max; 628 } 629 630 /* 631 * Find empty slot in the resource tree with the given range and 632 * alignment constraints 633 */ 634 static int __find_resource(struct resource *root, struct resource *old, 635 struct resource *new, 636 resource_size_t size, 637 struct resource_constraint *constraint) 638 { 639 struct resource *this = root->child; 640 struct resource tmp = *new, avail, alloc; 641 642 tmp.start = root->start; 643 /* 644 * Skip past an allocated resource that starts at 0, since the assignment 645 * of this->start - 1 to tmp->end below would cause an underflow. 646 */ 647 if (this && this->start == root->start) { 648 tmp.start = (this == old) ? old->start : this->end + 1; 649 this = this->sibling; 650 } 651 for(;;) { 652 if (this) 653 tmp.end = (this == old) ? this->end : this->start - 1; 654 else 655 tmp.end = root->end; 656 657 if (tmp.end < tmp.start) 658 goto next; 659 660 resource_clip(&tmp, constraint->min, constraint->max); 661 arch_remove_reservations(&tmp); 662 663 /* Check for overflow after ALIGN() */ 664 avail.start = ALIGN(tmp.start, constraint->align); 665 avail.end = tmp.end; 666 avail.flags = new->flags & ~IORESOURCE_UNSET; 667 if (avail.start >= tmp.start) { 668 alloc.flags = avail.flags; 669 alloc.start = constraint->alignf(constraint->alignf_data, &avail, 670 size, constraint->align); 671 alloc.end = alloc.start + size - 1; 672 if (alloc.start <= alloc.end && 673 resource_contains(&avail, &alloc)) { 674 new->start = alloc.start; 675 new->end = alloc.end; 676 return 0; 677 } 678 } 679 680 next: if (!this || this->end == root->end) 681 break; 682 683 if (this != old) 684 tmp.start = this->end + 1; 685 this = this->sibling; 686 } 687 return -EBUSY; 688 } 689 690 /* 691 * Find empty slot in the resource tree given range and alignment. 692 */ 693 static int find_resource(struct resource *root, struct resource *new, 694 resource_size_t size, 695 struct resource_constraint *constraint) 696 { 697 return __find_resource(root, NULL, new, size, constraint); 698 } 699 700 /** 701 * reallocate_resource - allocate a slot in the resource tree given range & alignment. 702 * The resource will be relocated if the new size cannot be reallocated in the 703 * current location. 704 * 705 * @root: root resource descriptor 706 * @old: resource descriptor desired by caller 707 * @newsize: new size of the resource descriptor 708 * @constraint: the size and alignment constraints to be met. 709 */ 710 static int reallocate_resource(struct resource *root, struct resource *old, 711 resource_size_t newsize, 712 struct resource_constraint *constraint) 713 { 714 int err=0; 715 struct resource new = *old; 716 struct resource *conflict; 717 718 write_lock(&resource_lock); 719 720 if ((err = __find_resource(root, old, &new, newsize, constraint))) 721 goto out; 722 723 if (resource_contains(&new, old)) { 724 old->start = new.start; 725 old->end = new.end; 726 goto out; 727 } 728 729 if (old->child) { 730 err = -EBUSY; 731 goto out; 732 } 733 734 if (resource_contains(old, &new)) { 735 old->start = new.start; 736 old->end = new.end; 737 } else { 738 __release_resource(old, true); 739 *old = new; 740 conflict = __request_resource(root, old); 741 BUG_ON(conflict); 742 } 743 out: 744 write_unlock(&resource_lock); 745 return err; 746 } 747 748 749 /** 750 * allocate_resource - allocate empty slot in the resource tree given range & alignment. 751 * The resource will be reallocated with a new size if it was already allocated 752 * @root: root resource descriptor 753 * @new: resource descriptor desired by caller 754 * @size: requested resource region size 755 * @min: minimum boundary to allocate 756 * @max: maximum boundary to allocate 757 * @align: alignment requested, in bytes 758 * @alignf: alignment function, optional, called if not NULL 759 * @alignf_data: arbitrary data to pass to the @alignf function 760 */ 761 int allocate_resource(struct resource *root, struct resource *new, 762 resource_size_t size, resource_size_t min, 763 resource_size_t max, resource_size_t align, 764 resource_size_t (*alignf)(void *, 765 const struct resource *, 766 resource_size_t, 767 resource_size_t), 768 void *alignf_data) 769 { 770 int err; 771 struct resource_constraint constraint; 772 773 if (!alignf) 774 alignf = simple_align_resource; 775 776 constraint.min = min; 777 constraint.max = max; 778 constraint.align = align; 779 constraint.alignf = alignf; 780 constraint.alignf_data = alignf_data; 781 782 if ( new->parent ) { 783 /* resource is already allocated, try reallocating with 784 the new constraints */ 785 return reallocate_resource(root, new, size, &constraint); 786 } 787 788 write_lock(&resource_lock); 789 err = find_resource(root, new, size, &constraint); 790 if (err >= 0 && __request_resource(root, new)) 791 err = -EBUSY; 792 write_unlock(&resource_lock); 793 return err; 794 } 795 796 EXPORT_SYMBOL(allocate_resource); 797 798 /** 799 * lookup_resource - find an existing resource by a resource start address 800 * @root: root resource descriptor 801 * @start: resource start address 802 * 803 * Returns a pointer to the resource if found, NULL otherwise 804 */ 805 struct resource *lookup_resource(struct resource *root, resource_size_t start) 806 { 807 struct resource *res; 808 809 read_lock(&resource_lock); 810 for (res = root->child; res; res = res->sibling) { 811 if (res->start == start) 812 break; 813 } 814 read_unlock(&resource_lock); 815 816 return res; 817 } 818 819 /* 820 * Insert a resource into the resource tree. If successful, return NULL, 821 * otherwise return the conflicting resource (compare to __request_resource()) 822 */ 823 static struct resource * __insert_resource(struct resource *parent, struct resource *new) 824 { 825 struct resource *first, *next; 826 827 for (;; parent = first) { 828 first = __request_resource(parent, new); 829 if (!first) 830 return first; 831 832 if (first == parent) 833 return first; 834 if (WARN_ON(first == new)) /* duplicated insertion */ 835 return first; 836 837 if ((first->start > new->start) || (first->end < new->end)) 838 break; 839 if ((first->start == new->start) && (first->end == new->end)) 840 break; 841 } 842 843 for (next = first; ; next = next->sibling) { 844 /* Partial overlap? Bad, and unfixable */ 845 if (next->start < new->start || next->end > new->end) 846 return next; 847 if (!next->sibling) 848 break; 849 if (next->sibling->start > new->end) 850 break; 851 } 852 853 new->parent = parent; 854 new->sibling = next->sibling; 855 new->child = first; 856 857 next->sibling = NULL; 858 for (next = first; next; next = next->sibling) 859 next->parent = new; 860 861 if (parent->child == first) { 862 parent->child = new; 863 } else { 864 next = parent->child; 865 while (next->sibling != first) 866 next = next->sibling; 867 next->sibling = new; 868 } 869 return NULL; 870 } 871 872 /** 873 * insert_resource_conflict - Inserts resource in the resource tree 874 * @parent: parent of the new resource 875 * @new: new resource to insert 876 * 877 * Returns 0 on success, conflict resource if the resource can't be inserted. 878 * 879 * This function is equivalent to request_resource_conflict when no conflict 880 * happens. If a conflict happens, and the conflicting resources 881 * entirely fit within the range of the new resource, then the new 882 * resource is inserted and the conflicting resources become children of 883 * the new resource. 884 * 885 * This function is intended for producers of resources, such as FW modules 886 * and bus drivers. 887 */ 888 struct resource *insert_resource_conflict(struct resource *parent, struct resource *new) 889 { 890 struct resource *conflict; 891 892 write_lock(&resource_lock); 893 conflict = __insert_resource(parent, new); 894 write_unlock(&resource_lock); 895 return conflict; 896 } 897 898 /** 899 * insert_resource - Inserts a resource in the resource tree 900 * @parent: parent of the new resource 901 * @new: new resource to insert 902 * 903 * Returns 0 on success, -EBUSY if the resource can't be inserted. 904 * 905 * This function is intended for producers of resources, such as FW modules 906 * and bus drivers. 907 */ 908 int insert_resource(struct resource *parent, struct resource *new) 909 { 910 struct resource *conflict; 911 912 conflict = insert_resource_conflict(parent, new); 913 return conflict ? -EBUSY : 0; 914 } 915 EXPORT_SYMBOL_GPL(insert_resource); 916 917 /** 918 * insert_resource_expand_to_fit - Insert a resource into the resource tree 919 * @root: root resource descriptor 920 * @new: new resource to insert 921 * 922 * Insert a resource into the resource tree, possibly expanding it in order 923 * to make it encompass any conflicting resources. 924 */ 925 void insert_resource_expand_to_fit(struct resource *root, struct resource *new) 926 { 927 if (new->parent) 928 return; 929 930 write_lock(&resource_lock); 931 for (;;) { 932 struct resource *conflict; 933 934 conflict = __insert_resource(root, new); 935 if (!conflict) 936 break; 937 if (conflict == root) 938 break; 939 940 /* Ok, expand resource to cover the conflict, then try again .. */ 941 if (conflict->start < new->start) 942 new->start = conflict->start; 943 if (conflict->end > new->end) 944 new->end = conflict->end; 945 946 pr_info("Expanded resource %s due to conflict with %s\n", new->name, conflict->name); 947 } 948 write_unlock(&resource_lock); 949 } 950 /* 951 * Not for general consumption, only early boot memory map parsing, PCI 952 * resource discovery, and late discovery of CXL resources are expected 953 * to use this interface. The former are built-in and only the latter, 954 * CXL, is a module. 955 */ 956 EXPORT_SYMBOL_NS_GPL(insert_resource_expand_to_fit, CXL); 957 958 /** 959 * remove_resource - Remove a resource in the resource tree 960 * @old: resource to remove 961 * 962 * Returns 0 on success, -EINVAL if the resource is not valid. 963 * 964 * This function removes a resource previously inserted by insert_resource() 965 * or insert_resource_conflict(), and moves the children (if any) up to 966 * where they were before. insert_resource() and insert_resource_conflict() 967 * insert a new resource, and move any conflicting resources down to the 968 * children of the new resource. 969 * 970 * insert_resource(), insert_resource_conflict() and remove_resource() are 971 * intended for producers of resources, such as FW modules and bus drivers. 972 */ 973 int remove_resource(struct resource *old) 974 { 975 int retval; 976 977 write_lock(&resource_lock); 978 retval = __release_resource(old, false); 979 write_unlock(&resource_lock); 980 return retval; 981 } 982 EXPORT_SYMBOL_GPL(remove_resource); 983 984 static int __adjust_resource(struct resource *res, resource_size_t start, 985 resource_size_t size) 986 { 987 struct resource *tmp, *parent = res->parent; 988 resource_size_t end = start + size - 1; 989 int result = -EBUSY; 990 991 if (!parent) 992 goto skip; 993 994 if ((start < parent->start) || (end > parent->end)) 995 goto out; 996 997 if (res->sibling && (res->sibling->start <= end)) 998 goto out; 999 1000 tmp = parent->child; 1001 if (tmp != res) { 1002 while (tmp->sibling != res) 1003 tmp = tmp->sibling; 1004 if (start <= tmp->end) 1005 goto out; 1006 } 1007 1008 skip: 1009 for (tmp = res->child; tmp; tmp = tmp->sibling) 1010 if ((tmp->start < start) || (tmp->end > end)) 1011 goto out; 1012 1013 res->start = start; 1014 res->end = end; 1015 result = 0; 1016 1017 out: 1018 return result; 1019 } 1020 1021 /** 1022 * adjust_resource - modify a resource's start and size 1023 * @res: resource to modify 1024 * @start: new start value 1025 * @size: new size 1026 * 1027 * Given an existing resource, change its start and size to match the 1028 * arguments. Returns 0 on success, -EBUSY if it can't fit. 1029 * Existing children of the resource are assumed to be immutable. 1030 */ 1031 int adjust_resource(struct resource *res, resource_size_t start, 1032 resource_size_t size) 1033 { 1034 int result; 1035 1036 write_lock(&resource_lock); 1037 result = __adjust_resource(res, start, size); 1038 write_unlock(&resource_lock); 1039 return result; 1040 } 1041 EXPORT_SYMBOL(adjust_resource); 1042 1043 static void __init 1044 __reserve_region_with_split(struct resource *root, resource_size_t start, 1045 resource_size_t end, const char *name) 1046 { 1047 struct resource *parent = root; 1048 struct resource *conflict; 1049 struct resource *res = alloc_resource(GFP_ATOMIC); 1050 struct resource *next_res = NULL; 1051 int type = resource_type(root); 1052 1053 if (!res) 1054 return; 1055 1056 res->name = name; 1057 res->start = start; 1058 res->end = end; 1059 res->flags = type | IORESOURCE_BUSY; 1060 res->desc = IORES_DESC_NONE; 1061 1062 while (1) { 1063 1064 conflict = __request_resource(parent, res); 1065 if (!conflict) { 1066 if (!next_res) 1067 break; 1068 res = next_res; 1069 next_res = NULL; 1070 continue; 1071 } 1072 1073 /* conflict covered whole area */ 1074 if (conflict->start <= res->start && 1075 conflict->end >= res->end) { 1076 free_resource(res); 1077 WARN_ON(next_res); 1078 break; 1079 } 1080 1081 /* failed, split and try again */ 1082 if (conflict->start > res->start) { 1083 end = res->end; 1084 res->end = conflict->start - 1; 1085 if (conflict->end < end) { 1086 next_res = alloc_resource(GFP_ATOMIC); 1087 if (!next_res) { 1088 free_resource(res); 1089 break; 1090 } 1091 next_res->name = name; 1092 next_res->start = conflict->end + 1; 1093 next_res->end = end; 1094 next_res->flags = type | IORESOURCE_BUSY; 1095 next_res->desc = IORES_DESC_NONE; 1096 } 1097 } else { 1098 res->start = conflict->end + 1; 1099 } 1100 } 1101 1102 } 1103 1104 void __init 1105 reserve_region_with_split(struct resource *root, resource_size_t start, 1106 resource_size_t end, const char *name) 1107 { 1108 int abort = 0; 1109 1110 write_lock(&resource_lock); 1111 if (root->start > start || root->end < end) { 1112 pr_err("requested range [0x%llx-0x%llx] not in root %pr\n", 1113 (unsigned long long)start, (unsigned long long)end, 1114 root); 1115 if (start > root->end || end < root->start) 1116 abort = 1; 1117 else { 1118 if (end > root->end) 1119 end = root->end; 1120 if (start < root->start) 1121 start = root->start; 1122 pr_err("fixing request to [0x%llx-0x%llx]\n", 1123 (unsigned long long)start, 1124 (unsigned long long)end); 1125 } 1126 dump_stack(); 1127 } 1128 if (!abort) 1129 __reserve_region_with_split(root, start, end, name); 1130 write_unlock(&resource_lock); 1131 } 1132 1133 /** 1134 * resource_alignment - calculate resource's alignment 1135 * @res: resource pointer 1136 * 1137 * Returns alignment on success, 0 (invalid alignment) on failure. 1138 */ 1139 resource_size_t resource_alignment(struct resource *res) 1140 { 1141 switch (res->flags & (IORESOURCE_SIZEALIGN | IORESOURCE_STARTALIGN)) { 1142 case IORESOURCE_SIZEALIGN: 1143 return resource_size(res); 1144 case IORESOURCE_STARTALIGN: 1145 return res->start; 1146 default: 1147 return 0; 1148 } 1149 } 1150 1151 /* 1152 * This is compatibility stuff for IO resources. 1153 * 1154 * Note how this, unlike the above, knows about 1155 * the IO flag meanings (busy etc). 1156 * 1157 * request_region creates a new busy region. 1158 * 1159 * release_region releases a matching busy region. 1160 */ 1161 1162 static DECLARE_WAIT_QUEUE_HEAD(muxed_resource_wait); 1163 1164 static struct inode *iomem_inode; 1165 1166 #ifdef CONFIG_IO_STRICT_DEVMEM 1167 static void revoke_iomem(struct resource *res) 1168 { 1169 /* pairs with smp_store_release() in iomem_init_inode() */ 1170 struct inode *inode = smp_load_acquire(&iomem_inode); 1171 1172 /* 1173 * Check that the initialization has completed. Losing the race 1174 * is ok because it means drivers are claiming resources before 1175 * the fs_initcall level of init and prevent iomem_get_mapping users 1176 * from establishing mappings. 1177 */ 1178 if (!inode) 1179 return; 1180 1181 /* 1182 * The expectation is that the driver has successfully marked 1183 * the resource busy by this point, so devmem_is_allowed() 1184 * should start returning false, however for performance this 1185 * does not iterate the entire resource range. 1186 */ 1187 if (devmem_is_allowed(PHYS_PFN(res->start)) && 1188 devmem_is_allowed(PHYS_PFN(res->end))) { 1189 /* 1190 * *cringe* iomem=relaxed says "go ahead, what's the 1191 * worst that can happen?" 1192 */ 1193 return; 1194 } 1195 1196 unmap_mapping_range(inode->i_mapping, res->start, resource_size(res), 1); 1197 } 1198 #else 1199 static void revoke_iomem(struct resource *res) {} 1200 #endif 1201 1202 struct address_space *iomem_get_mapping(void) 1203 { 1204 /* 1205 * This function is only called from file open paths, hence guaranteed 1206 * that fs_initcalls have completed and no need to check for NULL. But 1207 * since revoke_iomem can be called before the initcall we still need 1208 * the barrier to appease checkers. 1209 */ 1210 return smp_load_acquire(&iomem_inode)->i_mapping; 1211 } 1212 1213 static int __request_region_locked(struct resource *res, struct resource *parent, 1214 resource_size_t start, resource_size_t n, 1215 const char *name, int flags) 1216 { 1217 DECLARE_WAITQUEUE(wait, current); 1218 1219 res->name = name; 1220 res->start = start; 1221 res->end = start + n - 1; 1222 1223 for (;;) { 1224 struct resource *conflict; 1225 1226 res->flags = resource_type(parent) | resource_ext_type(parent); 1227 res->flags |= IORESOURCE_BUSY | flags; 1228 res->desc = parent->desc; 1229 1230 conflict = __request_resource(parent, res); 1231 if (!conflict) 1232 break; 1233 /* 1234 * mm/hmm.c reserves physical addresses which then 1235 * become unavailable to other users. Conflicts are 1236 * not expected. Warn to aid debugging if encountered. 1237 */ 1238 if (conflict->desc == IORES_DESC_DEVICE_PRIVATE_MEMORY) { 1239 pr_warn("Unaddressable device %s %pR conflicts with %pR", 1240 conflict->name, conflict, res); 1241 } 1242 if (conflict != parent) { 1243 if (!(conflict->flags & IORESOURCE_BUSY)) { 1244 parent = conflict; 1245 continue; 1246 } 1247 } 1248 if (conflict->flags & flags & IORESOURCE_MUXED) { 1249 add_wait_queue(&muxed_resource_wait, &wait); 1250 write_unlock(&resource_lock); 1251 set_current_state(TASK_UNINTERRUPTIBLE); 1252 schedule(); 1253 remove_wait_queue(&muxed_resource_wait, &wait); 1254 write_lock(&resource_lock); 1255 continue; 1256 } 1257 /* Uhhuh, that didn't work out.. */ 1258 return -EBUSY; 1259 } 1260 1261 return 0; 1262 } 1263 1264 /** 1265 * __request_region - create a new busy resource region 1266 * @parent: parent resource descriptor 1267 * @start: resource start address 1268 * @n: resource region size 1269 * @name: reserving caller's ID string 1270 * @flags: IO resource flags 1271 */ 1272 struct resource *__request_region(struct resource *parent, 1273 resource_size_t start, resource_size_t n, 1274 const char *name, int flags) 1275 { 1276 struct resource *res = alloc_resource(GFP_KERNEL); 1277 int ret; 1278 1279 if (!res) 1280 return NULL; 1281 1282 write_lock(&resource_lock); 1283 ret = __request_region_locked(res, parent, start, n, name, flags); 1284 write_unlock(&resource_lock); 1285 1286 if (ret) { 1287 free_resource(res); 1288 return NULL; 1289 } 1290 1291 if (parent == &iomem_resource) 1292 revoke_iomem(res); 1293 1294 return res; 1295 } 1296 EXPORT_SYMBOL(__request_region); 1297 1298 /** 1299 * __release_region - release a previously reserved resource region 1300 * @parent: parent resource descriptor 1301 * @start: resource start address 1302 * @n: resource region size 1303 * 1304 * The described resource region must match a currently busy region. 1305 */ 1306 void __release_region(struct resource *parent, resource_size_t start, 1307 resource_size_t n) 1308 { 1309 struct resource **p; 1310 resource_size_t end; 1311 1312 p = &parent->child; 1313 end = start + n - 1; 1314 1315 write_lock(&resource_lock); 1316 1317 for (;;) { 1318 struct resource *res = *p; 1319 1320 if (!res) 1321 break; 1322 if (res->start <= start && res->end >= end) { 1323 if (!(res->flags & IORESOURCE_BUSY)) { 1324 p = &res->child; 1325 continue; 1326 } 1327 if (res->start != start || res->end != end) 1328 break; 1329 *p = res->sibling; 1330 write_unlock(&resource_lock); 1331 if (res->flags & IORESOURCE_MUXED) 1332 wake_up(&muxed_resource_wait); 1333 free_resource(res); 1334 return; 1335 } 1336 p = &res->sibling; 1337 } 1338 1339 write_unlock(&resource_lock); 1340 1341 pr_warn("Trying to free nonexistent resource <%pa-%pa>\n", &start, &end); 1342 } 1343 EXPORT_SYMBOL(__release_region); 1344 1345 #ifdef CONFIG_MEMORY_HOTREMOVE 1346 /** 1347 * release_mem_region_adjustable - release a previously reserved memory region 1348 * @start: resource start address 1349 * @size: resource region size 1350 * 1351 * This interface is intended for memory hot-delete. The requested region 1352 * is released from a currently busy memory resource. The requested region 1353 * must either match exactly or fit into a single busy resource entry. In 1354 * the latter case, the remaining resource is adjusted accordingly. 1355 * Existing children of the busy memory resource must be immutable in the 1356 * request. 1357 * 1358 * Note: 1359 * - Additional release conditions, such as overlapping region, can be 1360 * supported after they are confirmed as valid cases. 1361 * - When a busy memory resource gets split into two entries, the code 1362 * assumes that all children remain in the lower address entry for 1363 * simplicity. Enhance this logic when necessary. 1364 */ 1365 void release_mem_region_adjustable(resource_size_t start, resource_size_t size) 1366 { 1367 struct resource *parent = &iomem_resource; 1368 struct resource *new_res = NULL; 1369 bool alloc_nofail = false; 1370 struct resource **p; 1371 struct resource *res; 1372 resource_size_t end; 1373 1374 end = start + size - 1; 1375 if (WARN_ON_ONCE((start < parent->start) || (end > parent->end))) 1376 return; 1377 1378 /* 1379 * We free up quite a lot of memory on memory hotunplug (esp., memap), 1380 * just before releasing the region. This is highly unlikely to 1381 * fail - let's play save and make it never fail as the caller cannot 1382 * perform any error handling (e.g., trying to re-add memory will fail 1383 * similarly). 1384 */ 1385 retry: 1386 new_res = alloc_resource(GFP_KERNEL | (alloc_nofail ? __GFP_NOFAIL : 0)); 1387 1388 p = &parent->child; 1389 write_lock(&resource_lock); 1390 1391 while ((res = *p)) { 1392 if (res->start >= end) 1393 break; 1394 1395 /* look for the next resource if it does not fit into */ 1396 if (res->start > start || res->end < end) { 1397 p = &res->sibling; 1398 continue; 1399 } 1400 1401 if (!(res->flags & IORESOURCE_MEM)) 1402 break; 1403 1404 if (!(res->flags & IORESOURCE_BUSY)) { 1405 p = &res->child; 1406 continue; 1407 } 1408 1409 /* found the target resource; let's adjust accordingly */ 1410 if (res->start == start && res->end == end) { 1411 /* free the whole entry */ 1412 *p = res->sibling; 1413 free_resource(res); 1414 } else if (res->start == start && res->end != end) { 1415 /* adjust the start */ 1416 WARN_ON_ONCE(__adjust_resource(res, end + 1, 1417 res->end - end)); 1418 } else if (res->start != start && res->end == end) { 1419 /* adjust the end */ 1420 WARN_ON_ONCE(__adjust_resource(res, res->start, 1421 start - res->start)); 1422 } else { 1423 /* split into two entries - we need a new resource */ 1424 if (!new_res) { 1425 new_res = alloc_resource(GFP_ATOMIC); 1426 if (!new_res) { 1427 alloc_nofail = true; 1428 write_unlock(&resource_lock); 1429 goto retry; 1430 } 1431 } 1432 new_res->name = res->name; 1433 new_res->start = end + 1; 1434 new_res->end = res->end; 1435 new_res->flags = res->flags; 1436 new_res->desc = res->desc; 1437 new_res->parent = res->parent; 1438 new_res->sibling = res->sibling; 1439 new_res->child = NULL; 1440 1441 if (WARN_ON_ONCE(__adjust_resource(res, res->start, 1442 start - res->start))) 1443 break; 1444 res->sibling = new_res; 1445 new_res = NULL; 1446 } 1447 1448 break; 1449 } 1450 1451 write_unlock(&resource_lock); 1452 free_resource(new_res); 1453 } 1454 #endif /* CONFIG_MEMORY_HOTREMOVE */ 1455 1456 #ifdef CONFIG_MEMORY_HOTPLUG 1457 static bool system_ram_resources_mergeable(struct resource *r1, 1458 struct resource *r2) 1459 { 1460 /* We assume either r1 or r2 is IORESOURCE_SYSRAM_MERGEABLE. */ 1461 return r1->flags == r2->flags && r1->end + 1 == r2->start && 1462 r1->name == r2->name && r1->desc == r2->desc && 1463 !r1->child && !r2->child; 1464 } 1465 1466 /** 1467 * merge_system_ram_resource - mark the System RAM resource mergeable and try to 1468 * merge it with adjacent, mergeable resources 1469 * @res: resource descriptor 1470 * 1471 * This interface is intended for memory hotplug, whereby lots of contiguous 1472 * system ram resources are added (e.g., via add_memory*()) by a driver, and 1473 * the actual resource boundaries are not of interest (e.g., it might be 1474 * relevant for DIMMs). Only resources that are marked mergeable, that have the 1475 * same parent, and that don't have any children are considered. All mergeable 1476 * resources must be immutable during the request. 1477 * 1478 * Note: 1479 * - The caller has to make sure that no pointers to resources that are 1480 * marked mergeable are used anymore after this call - the resource might 1481 * be freed and the pointer might be stale! 1482 * - release_mem_region_adjustable() will split on demand on memory hotunplug 1483 */ 1484 void merge_system_ram_resource(struct resource *res) 1485 { 1486 const unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; 1487 struct resource *cur; 1488 1489 if (WARN_ON_ONCE((res->flags & flags) != flags)) 1490 return; 1491 1492 write_lock(&resource_lock); 1493 res->flags |= IORESOURCE_SYSRAM_MERGEABLE; 1494 1495 /* Try to merge with next item in the list. */ 1496 cur = res->sibling; 1497 if (cur && system_ram_resources_mergeable(res, cur)) { 1498 res->end = cur->end; 1499 res->sibling = cur->sibling; 1500 free_resource(cur); 1501 } 1502 1503 /* Try to merge with previous item in the list. */ 1504 cur = res->parent->child; 1505 while (cur && cur->sibling != res) 1506 cur = cur->sibling; 1507 if (cur && system_ram_resources_mergeable(cur, res)) { 1508 cur->end = res->end; 1509 cur->sibling = res->sibling; 1510 free_resource(res); 1511 } 1512 write_unlock(&resource_lock); 1513 } 1514 #endif /* CONFIG_MEMORY_HOTPLUG */ 1515 1516 /* 1517 * Managed region resource 1518 */ 1519 static void devm_resource_release(struct device *dev, void *ptr) 1520 { 1521 struct resource **r = ptr; 1522 1523 release_resource(*r); 1524 } 1525 1526 /** 1527 * devm_request_resource() - request and reserve an I/O or memory resource 1528 * @dev: device for which to request the resource 1529 * @root: root of the resource tree from which to request the resource 1530 * @new: descriptor of the resource to request 1531 * 1532 * This is a device-managed version of request_resource(). There is usually 1533 * no need to release resources requested by this function explicitly since 1534 * that will be taken care of when the device is unbound from its driver. 1535 * If for some reason the resource needs to be released explicitly, because 1536 * of ordering issues for example, drivers must call devm_release_resource() 1537 * rather than the regular release_resource(). 1538 * 1539 * When a conflict is detected between any existing resources and the newly 1540 * requested resource, an error message will be printed. 1541 * 1542 * Returns 0 on success or a negative error code on failure. 1543 */ 1544 int devm_request_resource(struct device *dev, struct resource *root, 1545 struct resource *new) 1546 { 1547 struct resource *conflict, **ptr; 1548 1549 ptr = devres_alloc(devm_resource_release, sizeof(*ptr), GFP_KERNEL); 1550 if (!ptr) 1551 return -ENOMEM; 1552 1553 *ptr = new; 1554 1555 conflict = request_resource_conflict(root, new); 1556 if (conflict) { 1557 dev_err(dev, "resource collision: %pR conflicts with %s %pR\n", 1558 new, conflict->name, conflict); 1559 devres_free(ptr); 1560 return -EBUSY; 1561 } 1562 1563 devres_add(dev, ptr); 1564 return 0; 1565 } 1566 EXPORT_SYMBOL(devm_request_resource); 1567 1568 static int devm_resource_match(struct device *dev, void *res, void *data) 1569 { 1570 struct resource **ptr = res; 1571 1572 return *ptr == data; 1573 } 1574 1575 /** 1576 * devm_release_resource() - release a previously requested resource 1577 * @dev: device for which to release the resource 1578 * @new: descriptor of the resource to release 1579 * 1580 * Releases a resource previously requested using devm_request_resource(). 1581 */ 1582 void devm_release_resource(struct device *dev, struct resource *new) 1583 { 1584 WARN_ON(devres_release(dev, devm_resource_release, devm_resource_match, 1585 new)); 1586 } 1587 EXPORT_SYMBOL(devm_release_resource); 1588 1589 struct region_devres { 1590 struct resource *parent; 1591 resource_size_t start; 1592 resource_size_t n; 1593 }; 1594 1595 static void devm_region_release(struct device *dev, void *res) 1596 { 1597 struct region_devres *this = res; 1598 1599 __release_region(this->parent, this->start, this->n); 1600 } 1601 1602 static int devm_region_match(struct device *dev, void *res, void *match_data) 1603 { 1604 struct region_devres *this = res, *match = match_data; 1605 1606 return this->parent == match->parent && 1607 this->start == match->start && this->n == match->n; 1608 } 1609 1610 struct resource * 1611 __devm_request_region(struct device *dev, struct resource *parent, 1612 resource_size_t start, resource_size_t n, const char *name) 1613 { 1614 struct region_devres *dr = NULL; 1615 struct resource *res; 1616 1617 dr = devres_alloc(devm_region_release, sizeof(struct region_devres), 1618 GFP_KERNEL); 1619 if (!dr) 1620 return NULL; 1621 1622 dr->parent = parent; 1623 dr->start = start; 1624 dr->n = n; 1625 1626 res = __request_region(parent, start, n, name, 0); 1627 if (res) 1628 devres_add(dev, dr); 1629 else 1630 devres_free(dr); 1631 1632 return res; 1633 } 1634 EXPORT_SYMBOL(__devm_request_region); 1635 1636 void __devm_release_region(struct device *dev, struct resource *parent, 1637 resource_size_t start, resource_size_t n) 1638 { 1639 struct region_devres match_data = { parent, start, n }; 1640 1641 __release_region(parent, start, n); 1642 WARN_ON(devres_destroy(dev, devm_region_release, devm_region_match, 1643 &match_data)); 1644 } 1645 EXPORT_SYMBOL(__devm_release_region); 1646 1647 /* 1648 * Reserve I/O ports or memory based on "reserve=" kernel parameter. 1649 */ 1650 #define MAXRESERVE 4 1651 static int __init reserve_setup(char *str) 1652 { 1653 static int reserved; 1654 static struct resource reserve[MAXRESERVE]; 1655 1656 for (;;) { 1657 unsigned int io_start, io_num; 1658 int x = reserved; 1659 struct resource *parent; 1660 1661 if (get_option(&str, &io_start) != 2) 1662 break; 1663 if (get_option(&str, &io_num) == 0) 1664 break; 1665 if (x < MAXRESERVE) { 1666 struct resource *res = reserve + x; 1667 1668 /* 1669 * If the region starts below 0x10000, we assume it's 1670 * I/O port space; otherwise assume it's memory. 1671 */ 1672 if (io_start < 0x10000) { 1673 res->flags = IORESOURCE_IO; 1674 parent = &ioport_resource; 1675 } else { 1676 res->flags = IORESOURCE_MEM; 1677 parent = &iomem_resource; 1678 } 1679 res->name = "reserved"; 1680 res->start = io_start; 1681 res->end = io_start + io_num - 1; 1682 res->flags |= IORESOURCE_BUSY; 1683 res->desc = IORES_DESC_NONE; 1684 res->child = NULL; 1685 if (request_resource(parent, res) == 0) 1686 reserved = x+1; 1687 } 1688 } 1689 return 1; 1690 } 1691 __setup("reserve=", reserve_setup); 1692 1693 /* 1694 * Check if the requested addr and size spans more than any slot in the 1695 * iomem resource tree. 1696 */ 1697 int iomem_map_sanity_check(resource_size_t addr, unsigned long size) 1698 { 1699 resource_size_t end = addr + size - 1; 1700 struct resource *p; 1701 int err = 0; 1702 1703 read_lock(&resource_lock); 1704 for_each_resource(&iomem_resource, p, false) { 1705 /* 1706 * We can probably skip the resources without 1707 * IORESOURCE_IO attribute? 1708 */ 1709 if (p->start > end) 1710 continue; 1711 if (p->end < addr) 1712 continue; 1713 if (PFN_DOWN(p->start) <= PFN_DOWN(addr) && 1714 PFN_DOWN(p->end) >= PFN_DOWN(end)) 1715 continue; 1716 /* 1717 * if a resource is "BUSY", it's not a hardware resource 1718 * but a driver mapping of such a resource; we don't want 1719 * to warn for those; some drivers legitimately map only 1720 * partial hardware resources. (example: vesafb) 1721 */ 1722 if (p->flags & IORESOURCE_BUSY) 1723 continue; 1724 1725 pr_warn("resource sanity check: requesting [mem %pa-%pa], which spans more than %s %pR\n", 1726 &addr, &end, p->name, p); 1727 err = -1; 1728 break; 1729 } 1730 read_unlock(&resource_lock); 1731 1732 return err; 1733 } 1734 1735 #ifdef CONFIG_STRICT_DEVMEM 1736 static int strict_iomem_checks = 1; 1737 #else 1738 static int strict_iomem_checks; 1739 #endif 1740 1741 /* 1742 * Check if an address is exclusive to the kernel and must not be mapped to 1743 * user space, for example, via /dev/mem. 1744 * 1745 * Returns true if exclusive to the kernel, otherwise returns false. 1746 */ 1747 bool resource_is_exclusive(struct resource *root, u64 addr, resource_size_t size) 1748 { 1749 const unsigned int exclusive_system_ram = IORESOURCE_SYSTEM_RAM | 1750 IORESOURCE_EXCLUSIVE; 1751 bool skip_children = false, err = false; 1752 struct resource *p; 1753 1754 read_lock(&resource_lock); 1755 for_each_resource(root, p, skip_children) { 1756 if (p->start >= addr + size) 1757 break; 1758 if (p->end < addr) { 1759 skip_children = true; 1760 continue; 1761 } 1762 skip_children = false; 1763 1764 /* 1765 * IORESOURCE_SYSTEM_RAM resources are exclusive if 1766 * IORESOURCE_EXCLUSIVE is set, even if they 1767 * are not busy and even if "iomem=relaxed" is set. The 1768 * responsible driver dynamically adds/removes system RAM within 1769 * such an area and uncontrolled access is dangerous. 1770 */ 1771 if ((p->flags & exclusive_system_ram) == exclusive_system_ram) { 1772 err = true; 1773 break; 1774 } 1775 1776 /* 1777 * A resource is exclusive if IORESOURCE_EXCLUSIVE is set 1778 * or CONFIG_IO_STRICT_DEVMEM is enabled and the 1779 * resource is busy. 1780 */ 1781 if (!strict_iomem_checks || !(p->flags & IORESOURCE_BUSY)) 1782 continue; 1783 if (IS_ENABLED(CONFIG_IO_STRICT_DEVMEM) 1784 || p->flags & IORESOURCE_EXCLUSIVE) { 1785 err = true; 1786 break; 1787 } 1788 } 1789 read_unlock(&resource_lock); 1790 1791 return err; 1792 } 1793 1794 bool iomem_is_exclusive(u64 addr) 1795 { 1796 return resource_is_exclusive(&iomem_resource, addr & PAGE_MASK, 1797 PAGE_SIZE); 1798 } 1799 1800 struct resource_entry *resource_list_create_entry(struct resource *res, 1801 size_t extra_size) 1802 { 1803 struct resource_entry *entry; 1804 1805 entry = kzalloc(sizeof(*entry) + extra_size, GFP_KERNEL); 1806 if (entry) { 1807 INIT_LIST_HEAD(&entry->node); 1808 entry->res = res ? res : &entry->__res; 1809 } 1810 1811 return entry; 1812 } 1813 EXPORT_SYMBOL(resource_list_create_entry); 1814 1815 void resource_list_free(struct list_head *head) 1816 { 1817 struct resource_entry *entry, *tmp; 1818 1819 list_for_each_entry_safe(entry, tmp, head, node) 1820 resource_list_destroy_entry(entry); 1821 } 1822 EXPORT_SYMBOL(resource_list_free); 1823 1824 #ifdef CONFIG_GET_FREE_REGION 1825 #define GFR_DESCENDING (1UL << 0) 1826 #define GFR_REQUEST_REGION (1UL << 1) 1827 #define GFR_DEFAULT_ALIGN (1UL << PA_SECTION_SHIFT) 1828 1829 static resource_size_t gfr_start(struct resource *base, resource_size_t size, 1830 resource_size_t align, unsigned long flags) 1831 { 1832 if (flags & GFR_DESCENDING) { 1833 resource_size_t end; 1834 1835 end = min_t(resource_size_t, base->end, 1836 (1ULL << MAX_PHYSMEM_BITS) - 1); 1837 return end - size + 1; 1838 } 1839 1840 return ALIGN(base->start, align); 1841 } 1842 1843 static bool gfr_continue(struct resource *base, resource_size_t addr, 1844 resource_size_t size, unsigned long flags) 1845 { 1846 if (flags & GFR_DESCENDING) 1847 return addr > size && addr >= base->start; 1848 /* 1849 * In the ascend case be careful that the last increment by 1850 * @size did not wrap 0. 1851 */ 1852 return addr > addr - size && 1853 addr <= min_t(resource_size_t, base->end, 1854 (1ULL << MAX_PHYSMEM_BITS) - 1); 1855 } 1856 1857 static resource_size_t gfr_next(resource_size_t addr, resource_size_t size, 1858 unsigned long flags) 1859 { 1860 if (flags & GFR_DESCENDING) 1861 return addr - size; 1862 return addr + size; 1863 } 1864 1865 static void remove_free_mem_region(void *_res) 1866 { 1867 struct resource *res = _res; 1868 1869 if (res->parent) 1870 remove_resource(res); 1871 free_resource(res); 1872 } 1873 1874 static struct resource * 1875 get_free_mem_region(struct device *dev, struct resource *base, 1876 resource_size_t size, const unsigned long align, 1877 const char *name, const unsigned long desc, 1878 const unsigned long flags) 1879 { 1880 resource_size_t addr; 1881 struct resource *res; 1882 struct region_devres *dr = NULL; 1883 1884 size = ALIGN(size, align); 1885 1886 res = alloc_resource(GFP_KERNEL); 1887 if (!res) 1888 return ERR_PTR(-ENOMEM); 1889 1890 if (dev && (flags & GFR_REQUEST_REGION)) { 1891 dr = devres_alloc(devm_region_release, 1892 sizeof(struct region_devres), GFP_KERNEL); 1893 if (!dr) { 1894 free_resource(res); 1895 return ERR_PTR(-ENOMEM); 1896 } 1897 } else if (dev) { 1898 if (devm_add_action_or_reset(dev, remove_free_mem_region, res)) 1899 return ERR_PTR(-ENOMEM); 1900 } 1901 1902 write_lock(&resource_lock); 1903 for (addr = gfr_start(base, size, align, flags); 1904 gfr_continue(base, addr, align, flags); 1905 addr = gfr_next(addr, align, flags)) { 1906 if (__region_intersects(base, addr, size, 0, IORES_DESC_NONE) != 1907 REGION_DISJOINT) 1908 continue; 1909 1910 if (flags & GFR_REQUEST_REGION) { 1911 if (__request_region_locked(res, &iomem_resource, addr, 1912 size, name, 0)) 1913 break; 1914 1915 if (dev) { 1916 dr->parent = &iomem_resource; 1917 dr->start = addr; 1918 dr->n = size; 1919 devres_add(dev, dr); 1920 } 1921 1922 res->desc = desc; 1923 write_unlock(&resource_lock); 1924 1925 1926 /* 1927 * A driver is claiming this region so revoke any 1928 * mappings. 1929 */ 1930 revoke_iomem(res); 1931 } else { 1932 res->start = addr; 1933 res->end = addr + size - 1; 1934 res->name = name; 1935 res->desc = desc; 1936 res->flags = IORESOURCE_MEM; 1937 1938 /* 1939 * Only succeed if the resource hosts an exclusive 1940 * range after the insert 1941 */ 1942 if (__insert_resource(base, res) || res->child) 1943 break; 1944 1945 write_unlock(&resource_lock); 1946 } 1947 1948 return res; 1949 } 1950 write_unlock(&resource_lock); 1951 1952 if (flags & GFR_REQUEST_REGION) { 1953 free_resource(res); 1954 devres_free(dr); 1955 } else if (dev) 1956 devm_release_action(dev, remove_free_mem_region, res); 1957 1958 return ERR_PTR(-ERANGE); 1959 } 1960 1961 /** 1962 * devm_request_free_mem_region - find free region for device private memory 1963 * 1964 * @dev: device struct to bind the resource to 1965 * @size: size in bytes of the device memory to add 1966 * @base: resource tree to look in 1967 * 1968 * This function tries to find an empty range of physical address big enough to 1969 * contain the new resource, so that it can later be hotplugged as ZONE_DEVICE 1970 * memory, which in turn allocates struct pages. 1971 */ 1972 struct resource *devm_request_free_mem_region(struct device *dev, 1973 struct resource *base, unsigned long size) 1974 { 1975 unsigned long flags = GFR_DESCENDING | GFR_REQUEST_REGION; 1976 1977 return get_free_mem_region(dev, base, size, GFR_DEFAULT_ALIGN, 1978 dev_name(dev), 1979 IORES_DESC_DEVICE_PRIVATE_MEMORY, flags); 1980 } 1981 EXPORT_SYMBOL_GPL(devm_request_free_mem_region); 1982 1983 struct resource *request_free_mem_region(struct resource *base, 1984 unsigned long size, const char *name) 1985 { 1986 unsigned long flags = GFR_DESCENDING | GFR_REQUEST_REGION; 1987 1988 return get_free_mem_region(NULL, base, size, GFR_DEFAULT_ALIGN, name, 1989 IORES_DESC_DEVICE_PRIVATE_MEMORY, flags); 1990 } 1991 EXPORT_SYMBOL_GPL(request_free_mem_region); 1992 1993 /** 1994 * alloc_free_mem_region - find a free region relative to @base 1995 * @base: resource that will parent the new resource 1996 * @size: size in bytes of memory to allocate from @base 1997 * @align: alignment requirements for the allocation 1998 * @name: resource name 1999 * 2000 * Buses like CXL, that can dynamically instantiate new memory regions, 2001 * need a method to allocate physical address space for those regions. 2002 * Allocate and insert a new resource to cover a free, unclaimed by a 2003 * descendant of @base, range in the span of @base. 2004 */ 2005 struct resource *alloc_free_mem_region(struct resource *base, 2006 unsigned long size, unsigned long align, 2007 const char *name) 2008 { 2009 /* Default of ascending direction and insert resource */ 2010 unsigned long flags = 0; 2011 2012 return get_free_mem_region(NULL, base, size, align, name, 2013 IORES_DESC_NONE, flags); 2014 } 2015 EXPORT_SYMBOL_NS_GPL(alloc_free_mem_region, CXL); 2016 #endif /* CONFIG_GET_FREE_REGION */ 2017 2018 static int __init strict_iomem(char *str) 2019 { 2020 if (strstr(str, "relaxed")) 2021 strict_iomem_checks = 0; 2022 if (strstr(str, "strict")) 2023 strict_iomem_checks = 1; 2024 return 1; 2025 } 2026 2027 static int iomem_fs_init_fs_context(struct fs_context *fc) 2028 { 2029 return init_pseudo(fc, DEVMEM_MAGIC) ? 0 : -ENOMEM; 2030 } 2031 2032 static struct file_system_type iomem_fs_type = { 2033 .name = "iomem", 2034 .owner = THIS_MODULE, 2035 .init_fs_context = iomem_fs_init_fs_context, 2036 .kill_sb = kill_anon_super, 2037 }; 2038 2039 static int __init iomem_init_inode(void) 2040 { 2041 static struct vfsmount *iomem_vfs_mount; 2042 static int iomem_fs_cnt; 2043 struct inode *inode; 2044 int rc; 2045 2046 rc = simple_pin_fs(&iomem_fs_type, &iomem_vfs_mount, &iomem_fs_cnt); 2047 if (rc < 0) { 2048 pr_err("Cannot mount iomem pseudo filesystem: %d\n", rc); 2049 return rc; 2050 } 2051 2052 inode = alloc_anon_inode(iomem_vfs_mount->mnt_sb); 2053 if (IS_ERR(inode)) { 2054 rc = PTR_ERR(inode); 2055 pr_err("Cannot allocate inode for iomem: %d\n", rc); 2056 simple_release_fs(&iomem_vfs_mount, &iomem_fs_cnt); 2057 return rc; 2058 } 2059 2060 /* 2061 * Publish iomem revocation inode initialized. 2062 * Pairs with smp_load_acquire() in revoke_iomem(). 2063 */ 2064 smp_store_release(&iomem_inode, inode); 2065 2066 return 0; 2067 } 2068 2069 fs_initcall(iomem_init_inode); 2070 2071 __setup("iomem=", strict_iomem); 2072