1 /* 2 * linux/kernel/resource.c 3 * 4 * Copyright (C) 1999 Linus Torvalds 5 * Copyright (C) 1999 Martin Mares <mj@ucw.cz> 6 * 7 * Arbitrary resource management. 8 */ 9 10 #include <linux/module.h> 11 #include <linux/errno.h> 12 #include <linux/ioport.h> 13 #include <linux/init.h> 14 #include <linux/slab.h> 15 #include <linux/spinlock.h> 16 #include <linux/fs.h> 17 #include <linux/proc_fs.h> 18 #include <linux/sched.h> 19 #include <linux/seq_file.h> 20 #include <linux/device.h> 21 #include <linux/pfn.h> 22 #include <asm/io.h> 23 24 25 struct resource ioport_resource = { 26 .name = "PCI IO", 27 .start = 0, 28 .end = IO_SPACE_LIMIT, 29 .flags = IORESOURCE_IO, 30 }; 31 EXPORT_SYMBOL(ioport_resource); 32 33 struct resource iomem_resource = { 34 .name = "PCI mem", 35 .start = 0, 36 .end = -1, 37 .flags = IORESOURCE_MEM, 38 }; 39 EXPORT_SYMBOL(iomem_resource); 40 41 static DEFINE_RWLOCK(resource_lock); 42 43 /* 44 * By default, we allocate free space bottom-up. The architecture can request 45 * top-down by clearing this flag. The user can override the architecture's 46 * choice with the "resource_alloc_from_bottom" kernel boot option, but that 47 * should only be a debugging tool. 48 */ 49 int resource_alloc_from_bottom = 1; 50 51 static __init int setup_alloc_from_bottom(char *s) 52 { 53 printk(KERN_INFO 54 "resource: allocating from bottom-up; please report a bug\n"); 55 resource_alloc_from_bottom = 1; 56 return 0; 57 } 58 early_param("resource_alloc_from_bottom", setup_alloc_from_bottom); 59 60 static void *r_next(struct seq_file *m, void *v, loff_t *pos) 61 { 62 struct resource *p = v; 63 (*pos)++; 64 if (p->child) 65 return p->child; 66 while (!p->sibling && p->parent) 67 p = p->parent; 68 return p->sibling; 69 } 70 71 #ifdef CONFIG_PROC_FS 72 73 enum { MAX_IORES_LEVEL = 5 }; 74 75 static void *r_start(struct seq_file *m, loff_t *pos) 76 __acquires(resource_lock) 77 { 78 struct resource *p = m->private; 79 loff_t l = 0; 80 read_lock(&resource_lock); 81 for (p = p->child; p && l < *pos; p = r_next(m, p, &l)) 82 ; 83 return p; 84 } 85 86 static void r_stop(struct seq_file *m, void *v) 87 __releases(resource_lock) 88 { 89 read_unlock(&resource_lock); 90 } 91 92 static int r_show(struct seq_file *m, void *v) 93 { 94 struct resource *root = m->private; 95 struct resource *r = v, *p; 96 int width = root->end < 0x10000 ? 4 : 8; 97 int depth; 98 99 for (depth = 0, p = r; depth < MAX_IORES_LEVEL; depth++, p = p->parent) 100 if (p->parent == root) 101 break; 102 seq_printf(m, "%*s%0*llx-%0*llx : %s\n", 103 depth * 2, "", 104 width, (unsigned long long) r->start, 105 width, (unsigned long long) r->end, 106 r->name ? r->name : "<BAD>"); 107 return 0; 108 } 109 110 static const struct seq_operations resource_op = { 111 .start = r_start, 112 .next = r_next, 113 .stop = r_stop, 114 .show = r_show, 115 }; 116 117 static int ioports_open(struct inode *inode, struct file *file) 118 { 119 int res = seq_open(file, &resource_op); 120 if (!res) { 121 struct seq_file *m = file->private_data; 122 m->private = &ioport_resource; 123 } 124 return res; 125 } 126 127 static int iomem_open(struct inode *inode, struct file *file) 128 { 129 int res = seq_open(file, &resource_op); 130 if (!res) { 131 struct seq_file *m = file->private_data; 132 m->private = &iomem_resource; 133 } 134 return res; 135 } 136 137 static const struct file_operations proc_ioports_operations = { 138 .open = ioports_open, 139 .read = seq_read, 140 .llseek = seq_lseek, 141 .release = seq_release, 142 }; 143 144 static const struct file_operations proc_iomem_operations = { 145 .open = iomem_open, 146 .read = seq_read, 147 .llseek = seq_lseek, 148 .release = seq_release, 149 }; 150 151 static int __init ioresources_init(void) 152 { 153 proc_create("ioports", 0, NULL, &proc_ioports_operations); 154 proc_create("iomem", 0, NULL, &proc_iomem_operations); 155 return 0; 156 } 157 __initcall(ioresources_init); 158 159 #endif /* CONFIG_PROC_FS */ 160 161 /* Return the conflict entry if you can't request it */ 162 static struct resource * __request_resource(struct resource *root, struct resource *new) 163 { 164 resource_size_t start = new->start; 165 resource_size_t end = new->end; 166 struct resource *tmp, **p; 167 168 if (end < start) 169 return root; 170 if (start < root->start) 171 return root; 172 if (end > root->end) 173 return root; 174 p = &root->child; 175 for (;;) { 176 tmp = *p; 177 if (!tmp || tmp->start > end) { 178 new->sibling = tmp; 179 *p = new; 180 new->parent = root; 181 return NULL; 182 } 183 p = &tmp->sibling; 184 if (tmp->end < start) 185 continue; 186 return tmp; 187 } 188 } 189 190 static int __release_resource(struct resource *old) 191 { 192 struct resource *tmp, **p; 193 194 p = &old->parent->child; 195 for (;;) { 196 tmp = *p; 197 if (!tmp) 198 break; 199 if (tmp == old) { 200 *p = tmp->sibling; 201 old->parent = NULL; 202 return 0; 203 } 204 p = &tmp->sibling; 205 } 206 return -EINVAL; 207 } 208 209 static void __release_child_resources(struct resource *r) 210 { 211 struct resource *tmp, *p; 212 resource_size_t size; 213 214 p = r->child; 215 r->child = NULL; 216 while (p) { 217 tmp = p; 218 p = p->sibling; 219 220 tmp->parent = NULL; 221 tmp->sibling = NULL; 222 __release_child_resources(tmp); 223 224 printk(KERN_DEBUG "release child resource %pR\n", tmp); 225 /* need to restore size, and keep flags */ 226 size = resource_size(tmp); 227 tmp->start = 0; 228 tmp->end = size - 1; 229 } 230 } 231 232 void release_child_resources(struct resource *r) 233 { 234 write_lock(&resource_lock); 235 __release_child_resources(r); 236 write_unlock(&resource_lock); 237 } 238 239 /** 240 * request_resource_conflict - request and reserve an I/O or memory resource 241 * @root: root resource descriptor 242 * @new: resource descriptor desired by caller 243 * 244 * Returns 0 for success, conflict resource on error. 245 */ 246 struct resource *request_resource_conflict(struct resource *root, struct resource *new) 247 { 248 struct resource *conflict; 249 250 write_lock(&resource_lock); 251 conflict = __request_resource(root, new); 252 write_unlock(&resource_lock); 253 return conflict; 254 } 255 256 /** 257 * request_resource - request and reserve an I/O or memory resource 258 * @root: root resource descriptor 259 * @new: resource descriptor desired by caller 260 * 261 * Returns 0 for success, negative error code on error. 262 */ 263 int request_resource(struct resource *root, struct resource *new) 264 { 265 struct resource *conflict; 266 267 conflict = request_resource_conflict(root, new); 268 return conflict ? -EBUSY : 0; 269 } 270 271 EXPORT_SYMBOL(request_resource); 272 273 /** 274 * release_resource - release a previously reserved resource 275 * @old: resource pointer 276 */ 277 int release_resource(struct resource *old) 278 { 279 int retval; 280 281 write_lock(&resource_lock); 282 retval = __release_resource(old); 283 write_unlock(&resource_lock); 284 return retval; 285 } 286 287 EXPORT_SYMBOL(release_resource); 288 289 #if !defined(CONFIG_ARCH_HAS_WALK_MEMORY) 290 /* 291 * Finds the lowest memory reosurce exists within [res->start.res->end) 292 * the caller must specify res->start, res->end, res->flags and "name". 293 * If found, returns 0, res is overwritten, if not found, returns -1. 294 */ 295 static int find_next_system_ram(struct resource *res, char *name) 296 { 297 resource_size_t start, end; 298 struct resource *p; 299 300 BUG_ON(!res); 301 302 start = res->start; 303 end = res->end; 304 BUG_ON(start >= end); 305 306 read_lock(&resource_lock); 307 for (p = iomem_resource.child; p ; p = p->sibling) { 308 /* system ram is just marked as IORESOURCE_MEM */ 309 if (p->flags != res->flags) 310 continue; 311 if (name && strcmp(p->name, name)) 312 continue; 313 if (p->start > end) { 314 p = NULL; 315 break; 316 } 317 if ((p->end >= start) && (p->start < end)) 318 break; 319 } 320 read_unlock(&resource_lock); 321 if (!p) 322 return -1; 323 /* copy data */ 324 if (res->start < p->start) 325 res->start = p->start; 326 if (res->end > p->end) 327 res->end = p->end; 328 return 0; 329 } 330 331 /* 332 * This function calls callback against all memory range of "System RAM" 333 * which are marked as IORESOURCE_MEM and IORESOUCE_BUSY. 334 * Now, this function is only for "System RAM". 335 */ 336 int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, 337 void *arg, int (*func)(unsigned long, unsigned long, void *)) 338 { 339 struct resource res; 340 unsigned long pfn, end_pfn; 341 u64 orig_end; 342 int ret = -1; 343 344 res.start = (u64) start_pfn << PAGE_SHIFT; 345 res.end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1; 346 res.flags = IORESOURCE_MEM | IORESOURCE_BUSY; 347 orig_end = res.end; 348 while ((res.start < res.end) && 349 (find_next_system_ram(&res, "System RAM") >= 0)) { 350 pfn = (res.start + PAGE_SIZE - 1) >> PAGE_SHIFT; 351 end_pfn = (res.end + 1) >> PAGE_SHIFT; 352 if (end_pfn > pfn) 353 ret = (*func)(pfn, end_pfn - pfn, arg); 354 if (ret) 355 break; 356 res.start = res.end + 1; 357 res.end = orig_end; 358 } 359 return ret; 360 } 361 362 #endif 363 364 static int __is_ram(unsigned long pfn, unsigned long nr_pages, void *arg) 365 { 366 return 1; 367 } 368 /* 369 * This generic page_is_ram() returns true if specified address is 370 * registered as "System RAM" in iomem_resource list. 371 */ 372 int __weak page_is_ram(unsigned long pfn) 373 { 374 return walk_system_ram_range(pfn, 1, NULL, __is_ram) == 1; 375 } 376 377 static resource_size_t simple_align_resource(void *data, 378 const struct resource *avail, 379 resource_size_t size, 380 resource_size_t align) 381 { 382 return avail->start; 383 } 384 385 static void resource_clip(struct resource *res, resource_size_t min, 386 resource_size_t max) 387 { 388 if (res->start < min) 389 res->start = min; 390 if (res->end > max) 391 res->end = max; 392 } 393 394 static bool resource_contains(struct resource *res1, struct resource *res2) 395 { 396 return res1->start <= res2->start && res1->end >= res2->end; 397 } 398 399 /* 400 * Find the resource before "child" in the sibling list of "root" children. 401 */ 402 static struct resource *find_sibling_prev(struct resource *root, struct resource *child) 403 { 404 struct resource *this; 405 406 for (this = root->child; this; this = this->sibling) 407 if (this->sibling == child) 408 return this; 409 410 return NULL; 411 } 412 413 /* 414 * Find empty slot in the resource tree given range and alignment. 415 * This version allocates from the end of the root resource first. 416 */ 417 static int find_resource_from_top(struct resource *root, struct resource *new, 418 resource_size_t size, resource_size_t min, 419 resource_size_t max, resource_size_t align, 420 resource_size_t (*alignf)(void *, 421 const struct resource *, 422 resource_size_t, 423 resource_size_t), 424 void *alignf_data) 425 { 426 struct resource *this; 427 struct resource tmp, avail, alloc; 428 429 tmp.start = root->end; 430 tmp.end = root->end; 431 432 this = find_sibling_prev(root, NULL); 433 for (;;) { 434 if (this) { 435 if (this->end < root->end) 436 tmp.start = this->end + 1; 437 } else 438 tmp.start = root->start; 439 440 resource_clip(&tmp, min, max); 441 442 /* Check for overflow after ALIGN() */ 443 avail = *new; 444 avail.start = ALIGN(tmp.start, align); 445 avail.end = tmp.end; 446 if (avail.start >= tmp.start) { 447 alloc.start = alignf(alignf_data, &avail, size, align); 448 alloc.end = alloc.start + size - 1; 449 if (resource_contains(&avail, &alloc)) { 450 new->start = alloc.start; 451 new->end = alloc.end; 452 return 0; 453 } 454 } 455 456 if (!this || this->start == root->start) 457 break; 458 459 tmp.end = this->start - 1; 460 this = find_sibling_prev(root, this); 461 } 462 return -EBUSY; 463 } 464 465 /* 466 * Find empty slot in the resource tree given range and alignment. 467 * This version allocates from the beginning of the root resource first. 468 */ 469 static int find_resource(struct resource *root, struct resource *new, 470 resource_size_t size, resource_size_t min, 471 resource_size_t max, resource_size_t align, 472 resource_size_t (*alignf)(void *, 473 const struct resource *, 474 resource_size_t, 475 resource_size_t), 476 void *alignf_data) 477 { 478 struct resource *this = root->child; 479 struct resource tmp = *new, avail, alloc; 480 481 tmp.start = root->start; 482 /* 483 * Skip past an allocated resource that starts at 0, since the 484 * assignment of this->start - 1 to tmp->end below would cause an 485 * underflow. 486 */ 487 if (this && this->start == 0) { 488 tmp.start = this->end + 1; 489 this = this->sibling; 490 } 491 for (;;) { 492 if (this) 493 tmp.end = this->start - 1; 494 else 495 tmp.end = root->end; 496 497 resource_clip(&tmp, min, max); 498 499 /* Check for overflow after ALIGN() */ 500 avail = *new; 501 avail.start = ALIGN(tmp.start, align); 502 avail.end = tmp.end; 503 if (avail.start >= tmp.start) { 504 alloc.start = alignf(alignf_data, &avail, size, align); 505 alloc.end = alloc.start + size - 1; 506 if (resource_contains(&avail, &alloc)) { 507 new->start = alloc.start; 508 new->end = alloc.end; 509 return 0; 510 } 511 } 512 513 if (!this) 514 break; 515 516 tmp.start = this->end + 1; 517 this = this->sibling; 518 } 519 return -EBUSY; 520 } 521 522 /** 523 * allocate_resource - allocate empty slot in the resource tree given range & alignment 524 * @root: root resource descriptor 525 * @new: resource descriptor desired by caller 526 * @size: requested resource region size 527 * @min: minimum size to allocate 528 * @max: maximum size to allocate 529 * @align: alignment requested, in bytes 530 * @alignf: alignment function, optional, called if not NULL 531 * @alignf_data: arbitrary data to pass to the @alignf function 532 */ 533 int allocate_resource(struct resource *root, struct resource *new, 534 resource_size_t size, resource_size_t min, 535 resource_size_t max, resource_size_t align, 536 resource_size_t (*alignf)(void *, 537 const struct resource *, 538 resource_size_t, 539 resource_size_t), 540 void *alignf_data) 541 { 542 int err; 543 544 if (!alignf) 545 alignf = simple_align_resource; 546 547 write_lock(&resource_lock); 548 if (resource_alloc_from_bottom) 549 err = find_resource(root, new, size, min, max, align, alignf, alignf_data); 550 else 551 err = find_resource_from_top(root, new, size, min, max, align, alignf, alignf_data); 552 if (err >= 0 && __request_resource(root, new)) 553 err = -EBUSY; 554 write_unlock(&resource_lock); 555 return err; 556 } 557 558 EXPORT_SYMBOL(allocate_resource); 559 560 /* 561 * Insert a resource into the resource tree. If successful, return NULL, 562 * otherwise return the conflicting resource (compare to __request_resource()) 563 */ 564 static struct resource * __insert_resource(struct resource *parent, struct resource *new) 565 { 566 struct resource *first, *next; 567 568 for (;; parent = first) { 569 first = __request_resource(parent, new); 570 if (!first) 571 return first; 572 573 if (first == parent) 574 return first; 575 if (WARN_ON(first == new)) /* duplicated insertion */ 576 return first; 577 578 if ((first->start > new->start) || (first->end < new->end)) 579 break; 580 if ((first->start == new->start) && (first->end == new->end)) 581 break; 582 } 583 584 for (next = first; ; next = next->sibling) { 585 /* Partial overlap? Bad, and unfixable */ 586 if (next->start < new->start || next->end > new->end) 587 return next; 588 if (!next->sibling) 589 break; 590 if (next->sibling->start > new->end) 591 break; 592 } 593 594 new->parent = parent; 595 new->sibling = next->sibling; 596 new->child = first; 597 598 next->sibling = NULL; 599 for (next = first; next; next = next->sibling) 600 next->parent = new; 601 602 if (parent->child == first) { 603 parent->child = new; 604 } else { 605 next = parent->child; 606 while (next->sibling != first) 607 next = next->sibling; 608 next->sibling = new; 609 } 610 return NULL; 611 } 612 613 /** 614 * insert_resource_conflict - Inserts resource in the resource tree 615 * @parent: parent of the new resource 616 * @new: new resource to insert 617 * 618 * Returns 0 on success, conflict resource if the resource can't be inserted. 619 * 620 * This function is equivalent to request_resource_conflict when no conflict 621 * happens. If a conflict happens, and the conflicting resources 622 * entirely fit within the range of the new resource, then the new 623 * resource is inserted and the conflicting resources become children of 624 * the new resource. 625 */ 626 struct resource *insert_resource_conflict(struct resource *parent, struct resource *new) 627 { 628 struct resource *conflict; 629 630 write_lock(&resource_lock); 631 conflict = __insert_resource(parent, new); 632 write_unlock(&resource_lock); 633 return conflict; 634 } 635 636 /** 637 * insert_resource - Inserts a resource in the resource tree 638 * @parent: parent of the new resource 639 * @new: new resource to insert 640 * 641 * Returns 0 on success, -EBUSY if the resource can't be inserted. 642 */ 643 int insert_resource(struct resource *parent, struct resource *new) 644 { 645 struct resource *conflict; 646 647 conflict = insert_resource_conflict(parent, new); 648 return conflict ? -EBUSY : 0; 649 } 650 651 /** 652 * insert_resource_expand_to_fit - Insert a resource into the resource tree 653 * @root: root resource descriptor 654 * @new: new resource to insert 655 * 656 * Insert a resource into the resource tree, possibly expanding it in order 657 * to make it encompass any conflicting resources. 658 */ 659 void insert_resource_expand_to_fit(struct resource *root, struct resource *new) 660 { 661 if (new->parent) 662 return; 663 664 write_lock(&resource_lock); 665 for (;;) { 666 struct resource *conflict; 667 668 conflict = __insert_resource(root, new); 669 if (!conflict) 670 break; 671 if (conflict == root) 672 break; 673 674 /* Ok, expand resource to cover the conflict, then try again .. */ 675 if (conflict->start < new->start) 676 new->start = conflict->start; 677 if (conflict->end > new->end) 678 new->end = conflict->end; 679 680 printk("Expanded resource %s due to conflict with %s\n", new->name, conflict->name); 681 } 682 write_unlock(&resource_lock); 683 } 684 685 /** 686 * adjust_resource - modify a resource's start and size 687 * @res: resource to modify 688 * @start: new start value 689 * @size: new size 690 * 691 * Given an existing resource, change its start and size to match the 692 * arguments. Returns 0 on success, -EBUSY if it can't fit. 693 * Existing children of the resource are assumed to be immutable. 694 */ 695 int adjust_resource(struct resource *res, resource_size_t start, resource_size_t size) 696 { 697 struct resource *tmp, *parent = res->parent; 698 resource_size_t end = start + size - 1; 699 int result = -EBUSY; 700 701 write_lock(&resource_lock); 702 703 if ((start < parent->start) || (end > parent->end)) 704 goto out; 705 706 for (tmp = res->child; tmp; tmp = tmp->sibling) { 707 if ((tmp->start < start) || (tmp->end > end)) 708 goto out; 709 } 710 711 if (res->sibling && (res->sibling->start <= end)) 712 goto out; 713 714 tmp = parent->child; 715 if (tmp != res) { 716 while (tmp->sibling != res) 717 tmp = tmp->sibling; 718 if (start <= tmp->end) 719 goto out; 720 } 721 722 res->start = start; 723 res->end = end; 724 result = 0; 725 726 out: 727 write_unlock(&resource_lock); 728 return result; 729 } 730 731 static void __init __reserve_region_with_split(struct resource *root, 732 resource_size_t start, resource_size_t end, 733 const char *name) 734 { 735 struct resource *parent = root; 736 struct resource *conflict; 737 struct resource *res = kzalloc(sizeof(*res), GFP_ATOMIC); 738 739 if (!res) 740 return; 741 742 res->name = name; 743 res->start = start; 744 res->end = end; 745 res->flags = IORESOURCE_BUSY; 746 747 conflict = __request_resource(parent, res); 748 if (!conflict) 749 return; 750 751 /* failed, split and try again */ 752 kfree(res); 753 754 /* conflict covered whole area */ 755 if (conflict->start <= start && conflict->end >= end) 756 return; 757 758 if (conflict->start > start) 759 __reserve_region_with_split(root, start, conflict->start-1, name); 760 if (conflict->end < end) 761 __reserve_region_with_split(root, conflict->end+1, end, name); 762 } 763 764 void __init reserve_region_with_split(struct resource *root, 765 resource_size_t start, resource_size_t end, 766 const char *name) 767 { 768 write_lock(&resource_lock); 769 __reserve_region_with_split(root, start, end, name); 770 write_unlock(&resource_lock); 771 } 772 773 EXPORT_SYMBOL(adjust_resource); 774 775 /** 776 * resource_alignment - calculate resource's alignment 777 * @res: resource pointer 778 * 779 * Returns alignment on success, 0 (invalid alignment) on failure. 780 */ 781 resource_size_t resource_alignment(struct resource *res) 782 { 783 switch (res->flags & (IORESOURCE_SIZEALIGN | IORESOURCE_STARTALIGN)) { 784 case IORESOURCE_SIZEALIGN: 785 return resource_size(res); 786 case IORESOURCE_STARTALIGN: 787 return res->start; 788 default: 789 return 0; 790 } 791 } 792 793 /* 794 * This is compatibility stuff for IO resources. 795 * 796 * Note how this, unlike the above, knows about 797 * the IO flag meanings (busy etc). 798 * 799 * request_region creates a new busy region. 800 * 801 * check_region returns non-zero if the area is already busy. 802 * 803 * release_region releases a matching busy region. 804 */ 805 806 static DECLARE_WAIT_QUEUE_HEAD(muxed_resource_wait); 807 808 /** 809 * __request_region - create a new busy resource region 810 * @parent: parent resource descriptor 811 * @start: resource start address 812 * @n: resource region size 813 * @name: reserving caller's ID string 814 * @flags: IO resource flags 815 */ 816 struct resource * __request_region(struct resource *parent, 817 resource_size_t start, resource_size_t n, 818 const char *name, int flags) 819 { 820 DECLARE_WAITQUEUE(wait, current); 821 struct resource *res = kzalloc(sizeof(*res), GFP_KERNEL); 822 823 if (!res) 824 return NULL; 825 826 res->name = name; 827 res->start = start; 828 res->end = start + n - 1; 829 res->flags = IORESOURCE_BUSY; 830 res->flags |= flags; 831 832 write_lock(&resource_lock); 833 834 for (;;) { 835 struct resource *conflict; 836 837 conflict = __request_resource(parent, res); 838 if (!conflict) 839 break; 840 if (conflict != parent) { 841 parent = conflict; 842 if (!(conflict->flags & IORESOURCE_BUSY)) 843 continue; 844 } 845 if (conflict->flags & flags & IORESOURCE_MUXED) { 846 add_wait_queue(&muxed_resource_wait, &wait); 847 write_unlock(&resource_lock); 848 set_current_state(TASK_UNINTERRUPTIBLE); 849 schedule(); 850 remove_wait_queue(&muxed_resource_wait, &wait); 851 write_lock(&resource_lock); 852 continue; 853 } 854 /* Uhhuh, that didn't work out.. */ 855 kfree(res); 856 res = NULL; 857 break; 858 } 859 write_unlock(&resource_lock); 860 return res; 861 } 862 EXPORT_SYMBOL(__request_region); 863 864 /** 865 * __check_region - check if a resource region is busy or free 866 * @parent: parent resource descriptor 867 * @start: resource start address 868 * @n: resource region size 869 * 870 * Returns 0 if the region is free at the moment it is checked, 871 * returns %-EBUSY if the region is busy. 872 * 873 * NOTE: 874 * This function is deprecated because its use is racy. 875 * Even if it returns 0, a subsequent call to request_region() 876 * may fail because another driver etc. just allocated the region. 877 * Do NOT use it. It will be removed from the kernel. 878 */ 879 int __check_region(struct resource *parent, resource_size_t start, 880 resource_size_t n) 881 { 882 struct resource * res; 883 884 res = __request_region(parent, start, n, "check-region", 0); 885 if (!res) 886 return -EBUSY; 887 888 release_resource(res); 889 kfree(res); 890 return 0; 891 } 892 EXPORT_SYMBOL(__check_region); 893 894 /** 895 * __release_region - release a previously reserved resource region 896 * @parent: parent resource descriptor 897 * @start: resource start address 898 * @n: resource region size 899 * 900 * The described resource region must match a currently busy region. 901 */ 902 void __release_region(struct resource *parent, resource_size_t start, 903 resource_size_t n) 904 { 905 struct resource **p; 906 resource_size_t end; 907 908 p = &parent->child; 909 end = start + n - 1; 910 911 write_lock(&resource_lock); 912 913 for (;;) { 914 struct resource *res = *p; 915 916 if (!res) 917 break; 918 if (res->start <= start && res->end >= end) { 919 if (!(res->flags & IORESOURCE_BUSY)) { 920 p = &res->child; 921 continue; 922 } 923 if (res->start != start || res->end != end) 924 break; 925 *p = res->sibling; 926 write_unlock(&resource_lock); 927 if (res->flags & IORESOURCE_MUXED) 928 wake_up(&muxed_resource_wait); 929 kfree(res); 930 return; 931 } 932 p = &res->sibling; 933 } 934 935 write_unlock(&resource_lock); 936 937 printk(KERN_WARNING "Trying to free nonexistent resource " 938 "<%016llx-%016llx>\n", (unsigned long long)start, 939 (unsigned long long)end); 940 } 941 EXPORT_SYMBOL(__release_region); 942 943 /* 944 * Managed region resource 945 */ 946 struct region_devres { 947 struct resource *parent; 948 resource_size_t start; 949 resource_size_t n; 950 }; 951 952 static void devm_region_release(struct device *dev, void *res) 953 { 954 struct region_devres *this = res; 955 956 __release_region(this->parent, this->start, this->n); 957 } 958 959 static int devm_region_match(struct device *dev, void *res, void *match_data) 960 { 961 struct region_devres *this = res, *match = match_data; 962 963 return this->parent == match->parent && 964 this->start == match->start && this->n == match->n; 965 } 966 967 struct resource * __devm_request_region(struct device *dev, 968 struct resource *parent, resource_size_t start, 969 resource_size_t n, const char *name) 970 { 971 struct region_devres *dr = NULL; 972 struct resource *res; 973 974 dr = devres_alloc(devm_region_release, sizeof(struct region_devres), 975 GFP_KERNEL); 976 if (!dr) 977 return NULL; 978 979 dr->parent = parent; 980 dr->start = start; 981 dr->n = n; 982 983 res = __request_region(parent, start, n, name, 0); 984 if (res) 985 devres_add(dev, dr); 986 else 987 devres_free(dr); 988 989 return res; 990 } 991 EXPORT_SYMBOL(__devm_request_region); 992 993 void __devm_release_region(struct device *dev, struct resource *parent, 994 resource_size_t start, resource_size_t n) 995 { 996 struct region_devres match_data = { parent, start, n }; 997 998 __release_region(parent, start, n); 999 WARN_ON(devres_destroy(dev, devm_region_release, devm_region_match, 1000 &match_data)); 1001 } 1002 EXPORT_SYMBOL(__devm_release_region); 1003 1004 /* 1005 * Called from init/main.c to reserve IO ports. 1006 */ 1007 #define MAXRESERVE 4 1008 static int __init reserve_setup(char *str) 1009 { 1010 static int reserved; 1011 static struct resource reserve[MAXRESERVE]; 1012 1013 for (;;) { 1014 unsigned int io_start, io_num; 1015 int x = reserved; 1016 1017 if (get_option (&str, &io_start) != 2) 1018 break; 1019 if (get_option (&str, &io_num) == 0) 1020 break; 1021 if (x < MAXRESERVE) { 1022 struct resource *res = reserve + x; 1023 res->name = "reserved"; 1024 res->start = io_start; 1025 res->end = io_start + io_num - 1; 1026 res->flags = IORESOURCE_BUSY; 1027 res->child = NULL; 1028 if (request_resource(res->start >= 0x10000 ? &iomem_resource : &ioport_resource, res) == 0) 1029 reserved = x+1; 1030 } 1031 } 1032 return 1; 1033 } 1034 1035 __setup("reserve=", reserve_setup); 1036 1037 /* 1038 * Check if the requested addr and size spans more than any slot in the 1039 * iomem resource tree. 1040 */ 1041 int iomem_map_sanity_check(resource_size_t addr, unsigned long size) 1042 { 1043 struct resource *p = &iomem_resource; 1044 int err = 0; 1045 loff_t l; 1046 1047 read_lock(&resource_lock); 1048 for (p = p->child; p ; p = r_next(NULL, p, &l)) { 1049 /* 1050 * We can probably skip the resources without 1051 * IORESOURCE_IO attribute? 1052 */ 1053 if (p->start >= addr + size) 1054 continue; 1055 if (p->end < addr) 1056 continue; 1057 if (PFN_DOWN(p->start) <= PFN_DOWN(addr) && 1058 PFN_DOWN(p->end) >= PFN_DOWN(addr + size - 1)) 1059 continue; 1060 /* 1061 * if a resource is "BUSY", it's not a hardware resource 1062 * but a driver mapping of such a resource; we don't want 1063 * to warn for those; some drivers legitimately map only 1064 * partial hardware resources. (example: vesafb) 1065 */ 1066 if (p->flags & IORESOURCE_BUSY) 1067 continue; 1068 1069 printk(KERN_WARNING "resource map sanity check conflict: " 1070 "0x%llx 0x%llx 0x%llx 0x%llx %s\n", 1071 (unsigned long long)addr, 1072 (unsigned long long)(addr + size - 1), 1073 (unsigned long long)p->start, 1074 (unsigned long long)p->end, 1075 p->name); 1076 err = -1; 1077 break; 1078 } 1079 read_unlock(&resource_lock); 1080 1081 return err; 1082 } 1083 1084 #ifdef CONFIG_STRICT_DEVMEM 1085 static int strict_iomem_checks = 1; 1086 #else 1087 static int strict_iomem_checks; 1088 #endif 1089 1090 /* 1091 * check if an address is reserved in the iomem resource tree 1092 * returns 1 if reserved, 0 if not reserved. 1093 */ 1094 int iomem_is_exclusive(u64 addr) 1095 { 1096 struct resource *p = &iomem_resource; 1097 int err = 0; 1098 loff_t l; 1099 int size = PAGE_SIZE; 1100 1101 if (!strict_iomem_checks) 1102 return 0; 1103 1104 addr = addr & PAGE_MASK; 1105 1106 read_lock(&resource_lock); 1107 for (p = p->child; p ; p = r_next(NULL, p, &l)) { 1108 /* 1109 * We can probably skip the resources without 1110 * IORESOURCE_IO attribute? 1111 */ 1112 if (p->start >= addr + size) 1113 break; 1114 if (p->end < addr) 1115 continue; 1116 if (p->flags & IORESOURCE_BUSY && 1117 p->flags & IORESOURCE_EXCLUSIVE) { 1118 err = 1; 1119 break; 1120 } 1121 } 1122 read_unlock(&resource_lock); 1123 1124 return err; 1125 } 1126 1127 static int __init strict_iomem(char *str) 1128 { 1129 if (strstr(str, "relaxed")) 1130 strict_iomem_checks = 0; 1131 if (strstr(str, "strict")) 1132 strict_iomem_checks = 1; 1133 return 1; 1134 } 1135 1136 __setup("iomem=", strict_iomem); 1137