1 /* 2 * linux/kernel/resource.c 3 * 4 * Copyright (C) 1999 Linus Torvalds 5 * Copyright (C) 1999 Martin Mares <mj@ucw.cz> 6 * 7 * Arbitrary resource management. 8 */ 9 10 #include <linux/module.h> 11 #include <linux/errno.h> 12 #include <linux/ioport.h> 13 #include <linux/init.h> 14 #include <linux/slab.h> 15 #include <linux/spinlock.h> 16 #include <linux/fs.h> 17 #include <linux/proc_fs.h> 18 #include <linux/sched.h> 19 #include <linux/seq_file.h> 20 #include <linux/device.h> 21 #include <linux/pfn.h> 22 #include <asm/io.h> 23 24 25 struct resource ioport_resource = { 26 .name = "PCI IO", 27 .start = 0, 28 .end = IO_SPACE_LIMIT, 29 .flags = IORESOURCE_IO, 30 }; 31 EXPORT_SYMBOL(ioport_resource); 32 33 struct resource iomem_resource = { 34 .name = "PCI mem", 35 .start = 0, 36 .end = -1, 37 .flags = IORESOURCE_MEM, 38 }; 39 EXPORT_SYMBOL(iomem_resource); 40 41 static DEFINE_RWLOCK(resource_lock); 42 43 static void *r_next(struct seq_file *m, void *v, loff_t *pos) 44 { 45 struct resource *p = v; 46 (*pos)++; 47 if (p->child) 48 return p->child; 49 while (!p->sibling && p->parent) 50 p = p->parent; 51 return p->sibling; 52 } 53 54 #ifdef CONFIG_PROC_FS 55 56 enum { MAX_IORES_LEVEL = 5 }; 57 58 static void *r_start(struct seq_file *m, loff_t *pos) 59 __acquires(resource_lock) 60 { 61 struct resource *p = m->private; 62 loff_t l = 0; 63 read_lock(&resource_lock); 64 for (p = p->child; p && l < *pos; p = r_next(m, p, &l)) 65 ; 66 return p; 67 } 68 69 static void r_stop(struct seq_file *m, void *v) 70 __releases(resource_lock) 71 { 72 read_unlock(&resource_lock); 73 } 74 75 static int r_show(struct seq_file *m, void *v) 76 { 77 struct resource *root = m->private; 78 struct resource *r = v, *p; 79 int width = root->end < 0x10000 ? 4 : 8; 80 int depth; 81 82 for (depth = 0, p = r; depth < MAX_IORES_LEVEL; depth++, p = p->parent) 83 if (p->parent == root) 84 break; 85 seq_printf(m, "%*s%0*llx-%0*llx : %s\n", 86 depth * 2, "", 87 width, (unsigned long long) r->start, 88 width, (unsigned long long) r->end, 89 r->name ? r->name : "<BAD>"); 90 return 0; 91 } 92 93 static const struct seq_operations resource_op = { 94 .start = r_start, 95 .next = r_next, 96 .stop = r_stop, 97 .show = r_show, 98 }; 99 100 static int ioports_open(struct inode *inode, struct file *file) 101 { 102 int res = seq_open(file, &resource_op); 103 if (!res) { 104 struct seq_file *m = file->private_data; 105 m->private = &ioport_resource; 106 } 107 return res; 108 } 109 110 static int iomem_open(struct inode *inode, struct file *file) 111 { 112 int res = seq_open(file, &resource_op); 113 if (!res) { 114 struct seq_file *m = file->private_data; 115 m->private = &iomem_resource; 116 } 117 return res; 118 } 119 120 static const struct file_operations proc_ioports_operations = { 121 .open = ioports_open, 122 .read = seq_read, 123 .llseek = seq_lseek, 124 .release = seq_release, 125 }; 126 127 static const struct file_operations proc_iomem_operations = { 128 .open = iomem_open, 129 .read = seq_read, 130 .llseek = seq_lseek, 131 .release = seq_release, 132 }; 133 134 static int __init ioresources_init(void) 135 { 136 proc_create("ioports", 0, NULL, &proc_ioports_operations); 137 proc_create("iomem", 0, NULL, &proc_iomem_operations); 138 return 0; 139 } 140 __initcall(ioresources_init); 141 142 #endif /* CONFIG_PROC_FS */ 143 144 /* Return the conflict entry if you can't request it */ 145 static struct resource * __request_resource(struct resource *root, struct resource *new) 146 { 147 resource_size_t start = new->start; 148 resource_size_t end = new->end; 149 struct resource *tmp, **p; 150 151 if (end < start) 152 return root; 153 if (start < root->start) 154 return root; 155 if (end > root->end) 156 return root; 157 p = &root->child; 158 for (;;) { 159 tmp = *p; 160 if (!tmp || tmp->start > end) { 161 new->sibling = tmp; 162 *p = new; 163 new->parent = root; 164 return NULL; 165 } 166 p = &tmp->sibling; 167 if (tmp->end < start) 168 continue; 169 return tmp; 170 } 171 } 172 173 static int __release_resource(struct resource *old) 174 { 175 struct resource *tmp, **p; 176 177 p = &old->parent->child; 178 for (;;) { 179 tmp = *p; 180 if (!tmp) 181 break; 182 if (tmp == old) { 183 *p = tmp->sibling; 184 old->parent = NULL; 185 return 0; 186 } 187 p = &tmp->sibling; 188 } 189 return -EINVAL; 190 } 191 192 static void __release_child_resources(struct resource *r) 193 { 194 struct resource *tmp, *p; 195 resource_size_t size; 196 197 p = r->child; 198 r->child = NULL; 199 while (p) { 200 tmp = p; 201 p = p->sibling; 202 203 tmp->parent = NULL; 204 tmp->sibling = NULL; 205 __release_child_resources(tmp); 206 207 printk(KERN_DEBUG "release child resource %pR\n", tmp); 208 /* need to restore size, and keep flags */ 209 size = resource_size(tmp); 210 tmp->start = 0; 211 tmp->end = size - 1; 212 } 213 } 214 215 void release_child_resources(struct resource *r) 216 { 217 write_lock(&resource_lock); 218 __release_child_resources(r); 219 write_unlock(&resource_lock); 220 } 221 222 /** 223 * request_resource_conflict - request and reserve an I/O or memory resource 224 * @root: root resource descriptor 225 * @new: resource descriptor desired by caller 226 * 227 * Returns 0 for success, conflict resource on error. 228 */ 229 struct resource *request_resource_conflict(struct resource *root, struct resource *new) 230 { 231 struct resource *conflict; 232 233 write_lock(&resource_lock); 234 conflict = __request_resource(root, new); 235 write_unlock(&resource_lock); 236 return conflict; 237 } 238 239 /** 240 * request_resource - request and reserve an I/O or memory resource 241 * @root: root resource descriptor 242 * @new: resource descriptor desired by caller 243 * 244 * Returns 0 for success, negative error code on error. 245 */ 246 int request_resource(struct resource *root, struct resource *new) 247 { 248 struct resource *conflict; 249 250 conflict = request_resource_conflict(root, new); 251 return conflict ? -EBUSY : 0; 252 } 253 254 EXPORT_SYMBOL(request_resource); 255 256 /** 257 * release_resource - release a previously reserved resource 258 * @old: resource pointer 259 */ 260 int release_resource(struct resource *old) 261 { 262 int retval; 263 264 write_lock(&resource_lock); 265 retval = __release_resource(old); 266 write_unlock(&resource_lock); 267 return retval; 268 } 269 270 EXPORT_SYMBOL(release_resource); 271 272 #if !defined(CONFIG_ARCH_HAS_WALK_MEMORY) 273 /* 274 * Finds the lowest memory reosurce exists within [res->start.res->end) 275 * the caller must specify res->start, res->end, res->flags and "name". 276 * If found, returns 0, res is overwritten, if not found, returns -1. 277 */ 278 static int find_next_system_ram(struct resource *res, char *name) 279 { 280 resource_size_t start, end; 281 struct resource *p; 282 283 BUG_ON(!res); 284 285 start = res->start; 286 end = res->end; 287 BUG_ON(start >= end); 288 289 read_lock(&resource_lock); 290 for (p = iomem_resource.child; p ; p = p->sibling) { 291 /* system ram is just marked as IORESOURCE_MEM */ 292 if (p->flags != res->flags) 293 continue; 294 if (name && strcmp(p->name, name)) 295 continue; 296 if (p->start > end) { 297 p = NULL; 298 break; 299 } 300 if ((p->end >= start) && (p->start < end)) 301 break; 302 } 303 read_unlock(&resource_lock); 304 if (!p) 305 return -1; 306 /* copy data */ 307 if (res->start < p->start) 308 res->start = p->start; 309 if (res->end > p->end) 310 res->end = p->end; 311 return 0; 312 } 313 314 /* 315 * This function calls callback against all memory range of "System RAM" 316 * which are marked as IORESOURCE_MEM and IORESOUCE_BUSY. 317 * Now, this function is only for "System RAM". 318 */ 319 int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, 320 void *arg, int (*func)(unsigned long, unsigned long, void *)) 321 { 322 struct resource res; 323 unsigned long pfn, end_pfn; 324 u64 orig_end; 325 int ret = -1; 326 327 res.start = (u64) start_pfn << PAGE_SHIFT; 328 res.end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1; 329 res.flags = IORESOURCE_MEM | IORESOURCE_BUSY; 330 orig_end = res.end; 331 while ((res.start < res.end) && 332 (find_next_system_ram(&res, "System RAM") >= 0)) { 333 pfn = (res.start + PAGE_SIZE - 1) >> PAGE_SHIFT; 334 end_pfn = (res.end + 1) >> PAGE_SHIFT; 335 if (end_pfn > pfn) 336 ret = (*func)(pfn, end_pfn - pfn, arg); 337 if (ret) 338 break; 339 res.start = res.end + 1; 340 res.end = orig_end; 341 } 342 return ret; 343 } 344 345 #endif 346 347 static int __is_ram(unsigned long pfn, unsigned long nr_pages, void *arg) 348 { 349 return 1; 350 } 351 /* 352 * This generic page_is_ram() returns true if specified address is 353 * registered as "System RAM" in iomem_resource list. 354 */ 355 int __weak page_is_ram(unsigned long pfn) 356 { 357 return walk_system_ram_range(pfn, 1, NULL, __is_ram) == 1; 358 } 359 360 void __weak arch_remove_reservations(struct resource *avail) 361 { 362 } 363 364 static resource_size_t simple_align_resource(void *data, 365 const struct resource *avail, 366 resource_size_t size, 367 resource_size_t align) 368 { 369 return avail->start; 370 } 371 372 static void resource_clip(struct resource *res, resource_size_t min, 373 resource_size_t max) 374 { 375 if (res->start < min) 376 res->start = min; 377 if (res->end > max) 378 res->end = max; 379 } 380 381 static bool resource_contains(struct resource *res1, struct resource *res2) 382 { 383 return res1->start <= res2->start && res1->end >= res2->end; 384 } 385 386 /* 387 * Find empty slot in the resource tree given range and alignment. 388 */ 389 static int find_resource(struct resource *root, struct resource *new, 390 resource_size_t size, resource_size_t min, 391 resource_size_t max, resource_size_t align, 392 resource_size_t (*alignf)(void *, 393 const struct resource *, 394 resource_size_t, 395 resource_size_t), 396 void *alignf_data) 397 { 398 struct resource *this = root->child; 399 struct resource tmp = *new, avail, alloc; 400 401 tmp.flags = new->flags; 402 tmp.start = root->start; 403 /* 404 * Skip past an allocated resource that starts at 0, since the assignment 405 * of this->start - 1 to tmp->end below would cause an underflow. 406 */ 407 if (this && this->start == 0) { 408 tmp.start = this->end + 1; 409 this = this->sibling; 410 } 411 for(;;) { 412 if (this) 413 tmp.end = this->start - 1; 414 else 415 tmp.end = root->end; 416 417 resource_clip(&tmp, min, max); 418 arch_remove_reservations(&tmp); 419 420 /* Check for overflow after ALIGN() */ 421 avail = *new; 422 avail.start = ALIGN(tmp.start, align); 423 avail.end = tmp.end; 424 if (avail.start >= tmp.start) { 425 alloc.start = alignf(alignf_data, &avail, size, align); 426 alloc.end = alloc.start + size - 1; 427 if (resource_contains(&avail, &alloc)) { 428 new->start = alloc.start; 429 new->end = alloc.end; 430 return 0; 431 } 432 } 433 if (!this) 434 break; 435 tmp.start = this->end + 1; 436 this = this->sibling; 437 } 438 return -EBUSY; 439 } 440 441 /** 442 * allocate_resource - allocate empty slot in the resource tree given range & alignment 443 * @root: root resource descriptor 444 * @new: resource descriptor desired by caller 445 * @size: requested resource region size 446 * @min: minimum size to allocate 447 * @max: maximum size to allocate 448 * @align: alignment requested, in bytes 449 * @alignf: alignment function, optional, called if not NULL 450 * @alignf_data: arbitrary data to pass to the @alignf function 451 */ 452 int allocate_resource(struct resource *root, struct resource *new, 453 resource_size_t size, resource_size_t min, 454 resource_size_t max, resource_size_t align, 455 resource_size_t (*alignf)(void *, 456 const struct resource *, 457 resource_size_t, 458 resource_size_t), 459 void *alignf_data) 460 { 461 int err; 462 463 if (!alignf) 464 alignf = simple_align_resource; 465 466 write_lock(&resource_lock); 467 err = find_resource(root, new, size, min, max, align, alignf, alignf_data); 468 if (err >= 0 && __request_resource(root, new)) 469 err = -EBUSY; 470 write_unlock(&resource_lock); 471 return err; 472 } 473 474 EXPORT_SYMBOL(allocate_resource); 475 476 /* 477 * Insert a resource into the resource tree. If successful, return NULL, 478 * otherwise return the conflicting resource (compare to __request_resource()) 479 */ 480 static struct resource * __insert_resource(struct resource *parent, struct resource *new) 481 { 482 struct resource *first, *next; 483 484 for (;; parent = first) { 485 first = __request_resource(parent, new); 486 if (!first) 487 return first; 488 489 if (first == parent) 490 return first; 491 if (WARN_ON(first == new)) /* duplicated insertion */ 492 return first; 493 494 if ((first->start > new->start) || (first->end < new->end)) 495 break; 496 if ((first->start == new->start) && (first->end == new->end)) 497 break; 498 } 499 500 for (next = first; ; next = next->sibling) { 501 /* Partial overlap? Bad, and unfixable */ 502 if (next->start < new->start || next->end > new->end) 503 return next; 504 if (!next->sibling) 505 break; 506 if (next->sibling->start > new->end) 507 break; 508 } 509 510 new->parent = parent; 511 new->sibling = next->sibling; 512 new->child = first; 513 514 next->sibling = NULL; 515 for (next = first; next; next = next->sibling) 516 next->parent = new; 517 518 if (parent->child == first) { 519 parent->child = new; 520 } else { 521 next = parent->child; 522 while (next->sibling != first) 523 next = next->sibling; 524 next->sibling = new; 525 } 526 return NULL; 527 } 528 529 /** 530 * insert_resource_conflict - Inserts resource in the resource tree 531 * @parent: parent of the new resource 532 * @new: new resource to insert 533 * 534 * Returns 0 on success, conflict resource if the resource can't be inserted. 535 * 536 * This function is equivalent to request_resource_conflict when no conflict 537 * happens. If a conflict happens, and the conflicting resources 538 * entirely fit within the range of the new resource, then the new 539 * resource is inserted and the conflicting resources become children of 540 * the new resource. 541 */ 542 struct resource *insert_resource_conflict(struct resource *parent, struct resource *new) 543 { 544 struct resource *conflict; 545 546 write_lock(&resource_lock); 547 conflict = __insert_resource(parent, new); 548 write_unlock(&resource_lock); 549 return conflict; 550 } 551 552 /** 553 * insert_resource - Inserts a resource in the resource tree 554 * @parent: parent of the new resource 555 * @new: new resource to insert 556 * 557 * Returns 0 on success, -EBUSY if the resource can't be inserted. 558 */ 559 int insert_resource(struct resource *parent, struct resource *new) 560 { 561 struct resource *conflict; 562 563 conflict = insert_resource_conflict(parent, new); 564 return conflict ? -EBUSY : 0; 565 } 566 567 /** 568 * insert_resource_expand_to_fit - Insert a resource into the resource tree 569 * @root: root resource descriptor 570 * @new: new resource to insert 571 * 572 * Insert a resource into the resource tree, possibly expanding it in order 573 * to make it encompass any conflicting resources. 574 */ 575 void insert_resource_expand_to_fit(struct resource *root, struct resource *new) 576 { 577 if (new->parent) 578 return; 579 580 write_lock(&resource_lock); 581 for (;;) { 582 struct resource *conflict; 583 584 conflict = __insert_resource(root, new); 585 if (!conflict) 586 break; 587 if (conflict == root) 588 break; 589 590 /* Ok, expand resource to cover the conflict, then try again .. */ 591 if (conflict->start < new->start) 592 new->start = conflict->start; 593 if (conflict->end > new->end) 594 new->end = conflict->end; 595 596 printk("Expanded resource %s due to conflict with %s\n", new->name, conflict->name); 597 } 598 write_unlock(&resource_lock); 599 } 600 601 /** 602 * adjust_resource - modify a resource's start and size 603 * @res: resource to modify 604 * @start: new start value 605 * @size: new size 606 * 607 * Given an existing resource, change its start and size to match the 608 * arguments. Returns 0 on success, -EBUSY if it can't fit. 609 * Existing children of the resource are assumed to be immutable. 610 */ 611 int adjust_resource(struct resource *res, resource_size_t start, resource_size_t size) 612 { 613 struct resource *tmp, *parent = res->parent; 614 resource_size_t end = start + size - 1; 615 int result = -EBUSY; 616 617 write_lock(&resource_lock); 618 619 if ((start < parent->start) || (end > parent->end)) 620 goto out; 621 622 for (tmp = res->child; tmp; tmp = tmp->sibling) { 623 if ((tmp->start < start) || (tmp->end > end)) 624 goto out; 625 } 626 627 if (res->sibling && (res->sibling->start <= end)) 628 goto out; 629 630 tmp = parent->child; 631 if (tmp != res) { 632 while (tmp->sibling != res) 633 tmp = tmp->sibling; 634 if (start <= tmp->end) 635 goto out; 636 } 637 638 res->start = start; 639 res->end = end; 640 result = 0; 641 642 out: 643 write_unlock(&resource_lock); 644 return result; 645 } 646 647 static void __init __reserve_region_with_split(struct resource *root, 648 resource_size_t start, resource_size_t end, 649 const char *name) 650 { 651 struct resource *parent = root; 652 struct resource *conflict; 653 struct resource *res = kzalloc(sizeof(*res), GFP_ATOMIC); 654 655 if (!res) 656 return; 657 658 res->name = name; 659 res->start = start; 660 res->end = end; 661 res->flags = IORESOURCE_BUSY; 662 663 conflict = __request_resource(parent, res); 664 if (!conflict) 665 return; 666 667 /* failed, split and try again */ 668 kfree(res); 669 670 /* conflict covered whole area */ 671 if (conflict->start <= start && conflict->end >= end) 672 return; 673 674 if (conflict->start > start) 675 __reserve_region_with_split(root, start, conflict->start-1, name); 676 if (conflict->end < end) 677 __reserve_region_with_split(root, conflict->end+1, end, name); 678 } 679 680 void __init reserve_region_with_split(struct resource *root, 681 resource_size_t start, resource_size_t end, 682 const char *name) 683 { 684 write_lock(&resource_lock); 685 __reserve_region_with_split(root, start, end, name); 686 write_unlock(&resource_lock); 687 } 688 689 EXPORT_SYMBOL(adjust_resource); 690 691 /** 692 * resource_alignment - calculate resource's alignment 693 * @res: resource pointer 694 * 695 * Returns alignment on success, 0 (invalid alignment) on failure. 696 */ 697 resource_size_t resource_alignment(struct resource *res) 698 { 699 switch (res->flags & (IORESOURCE_SIZEALIGN | IORESOURCE_STARTALIGN)) { 700 case IORESOURCE_SIZEALIGN: 701 return resource_size(res); 702 case IORESOURCE_STARTALIGN: 703 return res->start; 704 default: 705 return 0; 706 } 707 } 708 709 /* 710 * This is compatibility stuff for IO resources. 711 * 712 * Note how this, unlike the above, knows about 713 * the IO flag meanings (busy etc). 714 * 715 * request_region creates a new busy region. 716 * 717 * check_region returns non-zero if the area is already busy. 718 * 719 * release_region releases a matching busy region. 720 */ 721 722 static DECLARE_WAIT_QUEUE_HEAD(muxed_resource_wait); 723 724 /** 725 * __request_region - create a new busy resource region 726 * @parent: parent resource descriptor 727 * @start: resource start address 728 * @n: resource region size 729 * @name: reserving caller's ID string 730 * @flags: IO resource flags 731 */ 732 struct resource * __request_region(struct resource *parent, 733 resource_size_t start, resource_size_t n, 734 const char *name, int flags) 735 { 736 DECLARE_WAITQUEUE(wait, current); 737 struct resource *res = kzalloc(sizeof(*res), GFP_KERNEL); 738 739 if (!res) 740 return NULL; 741 742 res->name = name; 743 res->start = start; 744 res->end = start + n - 1; 745 res->flags = IORESOURCE_BUSY; 746 res->flags |= flags; 747 748 write_lock(&resource_lock); 749 750 for (;;) { 751 struct resource *conflict; 752 753 conflict = __request_resource(parent, res); 754 if (!conflict) 755 break; 756 if (conflict != parent) { 757 parent = conflict; 758 if (!(conflict->flags & IORESOURCE_BUSY)) 759 continue; 760 } 761 if (conflict->flags & flags & IORESOURCE_MUXED) { 762 add_wait_queue(&muxed_resource_wait, &wait); 763 write_unlock(&resource_lock); 764 set_current_state(TASK_UNINTERRUPTIBLE); 765 schedule(); 766 remove_wait_queue(&muxed_resource_wait, &wait); 767 write_lock(&resource_lock); 768 continue; 769 } 770 /* Uhhuh, that didn't work out.. */ 771 kfree(res); 772 res = NULL; 773 break; 774 } 775 write_unlock(&resource_lock); 776 return res; 777 } 778 EXPORT_SYMBOL(__request_region); 779 780 /** 781 * __check_region - check if a resource region is busy or free 782 * @parent: parent resource descriptor 783 * @start: resource start address 784 * @n: resource region size 785 * 786 * Returns 0 if the region is free at the moment it is checked, 787 * returns %-EBUSY if the region is busy. 788 * 789 * NOTE: 790 * This function is deprecated because its use is racy. 791 * Even if it returns 0, a subsequent call to request_region() 792 * may fail because another driver etc. just allocated the region. 793 * Do NOT use it. It will be removed from the kernel. 794 */ 795 int __check_region(struct resource *parent, resource_size_t start, 796 resource_size_t n) 797 { 798 struct resource * res; 799 800 res = __request_region(parent, start, n, "check-region", 0); 801 if (!res) 802 return -EBUSY; 803 804 release_resource(res); 805 kfree(res); 806 return 0; 807 } 808 EXPORT_SYMBOL(__check_region); 809 810 /** 811 * __release_region - release a previously reserved resource region 812 * @parent: parent resource descriptor 813 * @start: resource start address 814 * @n: resource region size 815 * 816 * The described resource region must match a currently busy region. 817 */ 818 void __release_region(struct resource *parent, resource_size_t start, 819 resource_size_t n) 820 { 821 struct resource **p; 822 resource_size_t end; 823 824 p = &parent->child; 825 end = start + n - 1; 826 827 write_lock(&resource_lock); 828 829 for (;;) { 830 struct resource *res = *p; 831 832 if (!res) 833 break; 834 if (res->start <= start && res->end >= end) { 835 if (!(res->flags & IORESOURCE_BUSY)) { 836 p = &res->child; 837 continue; 838 } 839 if (res->start != start || res->end != end) 840 break; 841 *p = res->sibling; 842 write_unlock(&resource_lock); 843 if (res->flags & IORESOURCE_MUXED) 844 wake_up(&muxed_resource_wait); 845 kfree(res); 846 return; 847 } 848 p = &res->sibling; 849 } 850 851 write_unlock(&resource_lock); 852 853 printk(KERN_WARNING "Trying to free nonexistent resource " 854 "<%016llx-%016llx>\n", (unsigned long long)start, 855 (unsigned long long)end); 856 } 857 EXPORT_SYMBOL(__release_region); 858 859 /* 860 * Managed region resource 861 */ 862 struct region_devres { 863 struct resource *parent; 864 resource_size_t start; 865 resource_size_t n; 866 }; 867 868 static void devm_region_release(struct device *dev, void *res) 869 { 870 struct region_devres *this = res; 871 872 __release_region(this->parent, this->start, this->n); 873 } 874 875 static int devm_region_match(struct device *dev, void *res, void *match_data) 876 { 877 struct region_devres *this = res, *match = match_data; 878 879 return this->parent == match->parent && 880 this->start == match->start && this->n == match->n; 881 } 882 883 struct resource * __devm_request_region(struct device *dev, 884 struct resource *parent, resource_size_t start, 885 resource_size_t n, const char *name) 886 { 887 struct region_devres *dr = NULL; 888 struct resource *res; 889 890 dr = devres_alloc(devm_region_release, sizeof(struct region_devres), 891 GFP_KERNEL); 892 if (!dr) 893 return NULL; 894 895 dr->parent = parent; 896 dr->start = start; 897 dr->n = n; 898 899 res = __request_region(parent, start, n, name, 0); 900 if (res) 901 devres_add(dev, dr); 902 else 903 devres_free(dr); 904 905 return res; 906 } 907 EXPORT_SYMBOL(__devm_request_region); 908 909 void __devm_release_region(struct device *dev, struct resource *parent, 910 resource_size_t start, resource_size_t n) 911 { 912 struct region_devres match_data = { parent, start, n }; 913 914 __release_region(parent, start, n); 915 WARN_ON(devres_destroy(dev, devm_region_release, devm_region_match, 916 &match_data)); 917 } 918 EXPORT_SYMBOL(__devm_release_region); 919 920 /* 921 * Called from init/main.c to reserve IO ports. 922 */ 923 #define MAXRESERVE 4 924 static int __init reserve_setup(char *str) 925 { 926 static int reserved; 927 static struct resource reserve[MAXRESERVE]; 928 929 for (;;) { 930 unsigned int io_start, io_num; 931 int x = reserved; 932 933 if (get_option (&str, &io_start) != 2) 934 break; 935 if (get_option (&str, &io_num) == 0) 936 break; 937 if (x < MAXRESERVE) { 938 struct resource *res = reserve + x; 939 res->name = "reserved"; 940 res->start = io_start; 941 res->end = io_start + io_num - 1; 942 res->flags = IORESOURCE_BUSY; 943 res->child = NULL; 944 if (request_resource(res->start >= 0x10000 ? &iomem_resource : &ioport_resource, res) == 0) 945 reserved = x+1; 946 } 947 } 948 return 1; 949 } 950 951 __setup("reserve=", reserve_setup); 952 953 /* 954 * Check if the requested addr and size spans more than any slot in the 955 * iomem resource tree. 956 */ 957 int iomem_map_sanity_check(resource_size_t addr, unsigned long size) 958 { 959 struct resource *p = &iomem_resource; 960 int err = 0; 961 loff_t l; 962 963 read_lock(&resource_lock); 964 for (p = p->child; p ; p = r_next(NULL, p, &l)) { 965 /* 966 * We can probably skip the resources without 967 * IORESOURCE_IO attribute? 968 */ 969 if (p->start >= addr + size) 970 continue; 971 if (p->end < addr) 972 continue; 973 if (PFN_DOWN(p->start) <= PFN_DOWN(addr) && 974 PFN_DOWN(p->end) >= PFN_DOWN(addr + size - 1)) 975 continue; 976 /* 977 * if a resource is "BUSY", it's not a hardware resource 978 * but a driver mapping of such a resource; we don't want 979 * to warn for those; some drivers legitimately map only 980 * partial hardware resources. (example: vesafb) 981 */ 982 if (p->flags & IORESOURCE_BUSY) 983 continue; 984 985 printk(KERN_WARNING "resource map sanity check conflict: " 986 "0x%llx 0x%llx 0x%llx 0x%llx %s\n", 987 (unsigned long long)addr, 988 (unsigned long long)(addr + size - 1), 989 (unsigned long long)p->start, 990 (unsigned long long)p->end, 991 p->name); 992 err = -1; 993 break; 994 } 995 read_unlock(&resource_lock); 996 997 return err; 998 } 999 1000 #ifdef CONFIG_STRICT_DEVMEM 1001 static int strict_iomem_checks = 1; 1002 #else 1003 static int strict_iomem_checks; 1004 #endif 1005 1006 /* 1007 * check if an address is reserved in the iomem resource tree 1008 * returns 1 if reserved, 0 if not reserved. 1009 */ 1010 int iomem_is_exclusive(u64 addr) 1011 { 1012 struct resource *p = &iomem_resource; 1013 int err = 0; 1014 loff_t l; 1015 int size = PAGE_SIZE; 1016 1017 if (!strict_iomem_checks) 1018 return 0; 1019 1020 addr = addr & PAGE_MASK; 1021 1022 read_lock(&resource_lock); 1023 for (p = p->child; p ; p = r_next(NULL, p, &l)) { 1024 /* 1025 * We can probably skip the resources without 1026 * IORESOURCE_IO attribute? 1027 */ 1028 if (p->start >= addr + size) 1029 break; 1030 if (p->end < addr) 1031 continue; 1032 if (p->flags & IORESOURCE_BUSY && 1033 p->flags & IORESOURCE_EXCLUSIVE) { 1034 err = 1; 1035 break; 1036 } 1037 } 1038 read_unlock(&resource_lock); 1039 1040 return err; 1041 } 1042 1043 static int __init strict_iomem(char *str) 1044 { 1045 if (strstr(str, "relaxed")) 1046 strict_iomem_checks = 0; 1047 if (strstr(str, "strict")) 1048 strict_iomem_checks = 1; 1049 return 1; 1050 } 1051 1052 __setup("iomem=", strict_iomem); 1053