Lines Matching refs:res
163 static void free_resource(struct resource *res) in free_resource() argument
171 if (res && PageSlab(virt_to_head_page(res))) in free_resource()
172 kfree(res); in free_resource()
342 struct resource *res) in find_next_iomem_res() argument
348 if (!res) in find_next_iomem_res()
380 *res = (struct resource) { in find_next_iomem_res()
398 struct resource res; in __walk_iomem_res_desc() local
402 !find_next_iomem_res(start, end, flags, desc, &res)) { in __walk_iomem_res_desc()
403 ret = (*func)(&res, arg); in __walk_iomem_res_desc()
407 start = res.end + 1; in __walk_iomem_res_desc()
462 struct resource res, *rams; in walk_system_ram_res_rev() local
475 (!find_next_iomem_res(start, end, flags, IORES_DESC_NONE, &res))) { in walk_system_ram_res_rev()
489 rams[i++] = res; in walk_system_ram_res_rev()
490 start = res.end + 1; in walk_system_ram_res_rev()
528 struct resource res; in walk_system_ram_range() local
536 !find_next_iomem_res(start, end, flags, IORES_DESC_NONE, &res)) { in walk_system_ram_range()
537 pfn = PFN_UP(res.start); in walk_system_ram_range()
538 end_pfn = PFN_DOWN(res.end + 1); in walk_system_ram_range()
543 start = res.end + 1; in walk_system_ram_range()
569 struct resource res, o; in __region_intersects() local
572 res = DEFINE_RES(start, size, 0); in __region_intersects()
575 if (!resource_intersection(p, &res, &o)) in __region_intersects()
595 if (!resource_overlaps(dp, &res)) in __region_intersects()
663 static void resource_clip(struct resource *res, resource_size_t min, in resource_clip() argument
666 if (res->start < min) in resource_clip()
667 res->start = min; in resource_clip()
668 if (res->end > max) in resource_clip()
669 res->end = max; in resource_clip()
859 struct resource *res; in lookup_resource() local
862 for (res = root->child; res; res = res->sibling) { in lookup_resource()
863 if (res->start == start) in lookup_resource()
868 return res; in lookup_resource()
1036 static int __adjust_resource(struct resource *res, resource_size_t start, in __adjust_resource() argument
1039 struct resource *tmp, *parent = res->parent; in __adjust_resource()
1049 if (res->sibling && (res->sibling->start <= end)) in __adjust_resource()
1053 if (tmp != res) { in __adjust_resource()
1054 while (tmp->sibling != res) in __adjust_resource()
1061 for (tmp = res->child; tmp; tmp = tmp->sibling) in __adjust_resource()
1065 res->start = start; in __adjust_resource()
1066 res->end = end; in __adjust_resource()
1083 int adjust_resource(struct resource *res, resource_size_t start, in adjust_resource() argument
1089 result = __adjust_resource(res, start, size); in adjust_resource()
1101 struct resource *res = alloc_resource(GFP_ATOMIC); in __reserve_region_with_split() local
1105 if (!res) in __reserve_region_with_split()
1108 res->name = name; in __reserve_region_with_split()
1109 res->start = start; in __reserve_region_with_split()
1110 res->end = end; in __reserve_region_with_split()
1111 res->flags = type | IORESOURCE_BUSY; in __reserve_region_with_split()
1112 res->desc = IORES_DESC_NONE; in __reserve_region_with_split()
1116 conflict = __request_resource(parent, res); in __reserve_region_with_split()
1120 res = next_res; in __reserve_region_with_split()
1126 if (conflict->start <= res->start && in __reserve_region_with_split()
1127 conflict->end >= res->end) { in __reserve_region_with_split()
1128 free_resource(res); in __reserve_region_with_split()
1134 if (conflict->start > res->start) { in __reserve_region_with_split()
1135 end = res->end; in __reserve_region_with_split()
1136 res->end = conflict->start - 1; in __reserve_region_with_split()
1140 free_resource(res); in __reserve_region_with_split()
1150 res->start = conflict->end + 1; in __reserve_region_with_split()
1191 resource_size_t resource_alignment(struct resource *res) in resource_alignment() argument
1193 switch (res->flags & (IORESOURCE_SIZEALIGN | IORESOURCE_STARTALIGN)) { in resource_alignment()
1195 return resource_size(res); in resource_alignment()
1197 return res->start; in resource_alignment()
1219 static void revoke_iomem(struct resource *res) in revoke_iomem() argument
1239 if (devmem_is_allowed(PHYS_PFN(res->start)) && in revoke_iomem()
1240 devmem_is_allowed(PHYS_PFN(res->end))) { in revoke_iomem()
1248 unmap_mapping_range(inode->i_mapping, res->start, resource_size(res), 1); in revoke_iomem()
1251 static void revoke_iomem(struct resource *res) {} in revoke_iomem() argument
1265 static int __request_region_locked(struct resource *res, struct resource *parent, in __request_region_locked() argument
1271 res->name = name; in __request_region_locked()
1272 res->start = start; in __request_region_locked()
1273 res->end = start + n - 1; in __request_region_locked()
1278 res->flags = resource_type(parent) | resource_ext_type(parent); in __request_region_locked()
1279 res->flags |= IORESOURCE_BUSY | flags; in __request_region_locked()
1280 res->desc = parent->desc; in __request_region_locked()
1282 conflict = __request_resource(parent, res); in __request_region_locked()
1293 conflict->name, conflict, res); in __request_region_locked()
1329 struct resource *res = alloc_resource(GFP_KERNEL); in __request_region() local
1332 if (!res) in __request_region()
1336 ret = __request_region_locked(res, parent, start, n, name, flags); in __request_region()
1340 free_resource(res); in __request_region()
1345 revoke_iomem(res); in __request_region()
1347 return res; in __request_region()
1371 struct resource *res = *p; in __release_region() local
1373 if (!res) in __release_region()
1375 if (res->start <= start && res->end >= end) { in __release_region()
1376 if (!(res->flags & IORESOURCE_BUSY)) { in __release_region()
1377 p = &res->child; in __release_region()
1380 if (res->start != start || res->end != end) in __release_region()
1382 *p = res->sibling; in __release_region()
1384 if (res->flags & IORESOURCE_MUXED) in __release_region()
1386 free_resource(res); in __release_region()
1389 p = &res->sibling; in __release_region()
1463 struct resource *res; in release_mem_region_adjustable() local
1483 while ((res = *p)) { in release_mem_region_adjustable()
1484 if (res->start >= end) in release_mem_region_adjustable()
1488 if (res->start > start || res->end < end) { in release_mem_region_adjustable()
1489 p = &res->sibling; in release_mem_region_adjustable()
1493 if (!(res->flags & IORESOURCE_MEM)) in release_mem_region_adjustable()
1496 if (!(res->flags & IORESOURCE_BUSY)) { in release_mem_region_adjustable()
1497 p = &res->child; in release_mem_region_adjustable()
1502 if (res->start == start && res->end == end) { in release_mem_region_adjustable()
1504 *p = res->sibling; in release_mem_region_adjustable()
1505 free_resource(res); in release_mem_region_adjustable()
1506 } else if (res->start == start && res->end != end) { in release_mem_region_adjustable()
1508 WARN_ON_ONCE(__adjust_resource(res, end + 1, in release_mem_region_adjustable()
1509 res->end - end)); in release_mem_region_adjustable()
1510 } else if (res->start != start && res->end == end) { in release_mem_region_adjustable()
1512 WARN_ON_ONCE(__adjust_resource(res, res->start, in release_mem_region_adjustable()
1513 start - res->start)); in release_mem_region_adjustable()
1524 new_res->name = res->name; in release_mem_region_adjustable()
1526 new_res->end = res->end; in release_mem_region_adjustable()
1527 new_res->flags = res->flags; in release_mem_region_adjustable()
1528 new_res->desc = res->desc; in release_mem_region_adjustable()
1529 new_res->parent = res->parent; in release_mem_region_adjustable()
1530 new_res->sibling = res->sibling; in release_mem_region_adjustable()
1532 reparent_children_after_split(res, new_res, end); in release_mem_region_adjustable()
1534 if (WARN_ON_ONCE(__adjust_resource(res, res->start, in release_mem_region_adjustable()
1535 start - res->start))) in release_mem_region_adjustable()
1537 res->sibling = new_res; in release_mem_region_adjustable()
1577 void merge_system_ram_resource(struct resource *res) in merge_system_ram_resource() argument
1582 if (WARN_ON_ONCE((res->flags & flags) != flags)) in merge_system_ram_resource()
1586 res->flags |= IORESOURCE_SYSRAM_MERGEABLE; in merge_system_ram_resource()
1589 cur = res->sibling; in merge_system_ram_resource()
1590 if (cur && system_ram_resources_mergeable(res, cur)) { in merge_system_ram_resource()
1591 res->end = cur->end; in merge_system_ram_resource()
1592 res->sibling = cur->sibling; in merge_system_ram_resource()
1597 cur = res->parent->child; in merge_system_ram_resource()
1598 while (cur && cur->sibling != res) in merge_system_ram_resource()
1600 if (cur && system_ram_resources_mergeable(cur, res)) { in merge_system_ram_resource()
1601 cur->end = res->end; in merge_system_ram_resource()
1602 cur->sibling = res->sibling; in merge_system_ram_resource()
1603 free_resource(res); in merge_system_ram_resource()
1661 static int devm_resource_match(struct device *dev, void *res, void *data) in devm_resource_match() argument
1663 struct resource **ptr = res; in devm_resource_match()
1688 static void devm_region_release(struct device *dev, void *res) in devm_region_release() argument
1690 struct region_devres *this = res; in devm_region_release()
1695 static int devm_region_match(struct device *dev, void *res, void *match_data) in devm_region_match() argument
1697 struct region_devres *this = res, *match = match_data; in devm_region_match()
1708 struct resource *res; in __devm_request_region() local
1719 res = __request_region(parent, start, n, name, 0); in __devm_request_region()
1720 if (res) in __devm_request_region()
1725 return res; in __devm_request_region()
1758 struct resource *res = reserve + x; in reserve_setup() local
1765 *res = DEFINE_RES_IO_NAMED(io_start, io_num, "reserved"); in reserve_setup()
1768 *res = DEFINE_RES_MEM_NAMED(io_start, io_num, "reserved"); in reserve_setup()
1771 res->flags |= IORESOURCE_BUSY; in reserve_setup()
1772 if (request_resource(parent, res) == 0) in reserve_setup()
1887 struct resource_entry *resource_list_create_entry(struct resource *res, in resource_list_create_entry() argument
1895 entry->res = res ? res : &entry->__res; in resource_list_create_entry()
1956 struct resource *res = _res; in remove_free_mem_region() local
1958 if (res->parent) in remove_free_mem_region()
1959 remove_resource(res); in remove_free_mem_region()
1960 free_resource(res); in remove_free_mem_region()
1970 struct resource *res; in get_free_mem_region() local
1975 res = alloc_resource(GFP_KERNEL); in get_free_mem_region()
1976 if (!res) in get_free_mem_region()
1983 free_resource(res); in get_free_mem_region()
1987 if (devm_add_action_or_reset(dev, remove_free_mem_region, res)) in get_free_mem_region()
2000 if (__request_region_locked(res, &iomem_resource, addr, in get_free_mem_region()
2011 res->desc = desc; in get_free_mem_region()
2019 revoke_iomem(res); in get_free_mem_region()
2021 *res = DEFINE_RES_NAMED_DESC(addr, size, name, IORESOURCE_MEM, desc); in get_free_mem_region()
2027 if (__insert_resource(base, res) || res->child) in get_free_mem_region()
2033 return res; in get_free_mem_region()
2038 free_resource(res); in get_free_mem_region()
2041 devm_release_action(dev, remove_free_mem_region, res); in get_free_mem_region()