1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/kernel/resource.c
4 *
5 * Copyright (C) 1999 Linus Torvalds
6 * Copyright (C) 1999 Martin Mares <mj@ucw.cz>
7 *
8 * Arbitrary resource management.
9 */
10
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
13 #include <linux/export.h>
14 #include <linux/errno.h>
15 #include <linux/ioport.h>
16 #include <linux/init.h>
17 #include <linux/slab.h>
18 #include <linux/spinlock.h>
19 #include <linux/fs.h>
20 #include <linux/proc_fs.h>
21 #include <linux/pseudo_fs.h>
22 #include <linux/sched.h>
23 #include <linux/seq_file.h>
24 #include <linux/device.h>
25 #include <linux/pfn.h>
26 #include <linux/mm.h>
27 #include <linux/mount.h>
28 #include <linux/resource_ext.h>
29 #include <uapi/linux/magic.h>
30 #include <linux/string.h>
31 #include <linux/vmalloc.h>
32 #include <asm/io.h>
33
34
35 struct resource ioport_resource = {
36 .name = "PCI IO",
37 .start = 0,
38 .end = IO_SPACE_LIMIT,
39 .flags = IORESOURCE_IO,
40 };
41 EXPORT_SYMBOL(ioport_resource);
42
43 struct resource iomem_resource = {
44 .name = "PCI mem",
45 .start = 0,
46 .end = -1,
47 .flags = IORESOURCE_MEM,
48 };
49 EXPORT_SYMBOL(iomem_resource);
50
51 struct resource soft_reserve_resource = {
52 .name = "Soft Reserved",
53 .start = 0,
54 .end = -1,
55 .desc = IORES_DESC_SOFT_RESERVED,
56 .flags = IORESOURCE_MEM,
57 };
58
59 static DEFINE_RWLOCK(resource_lock);
60
61 /*
62 * Return the next node of @p in pre-order tree traversal. If
63 * @skip_children is true, skip the descendant nodes of @p in
64 * traversal. If @p is a descendant of @subtree_root, only traverse
65 * the subtree under @subtree_root.
66 */
next_resource(struct resource * p,bool skip_children,struct resource * subtree_root)67 static struct resource *next_resource(struct resource *p, bool skip_children,
68 struct resource *subtree_root)
69 {
70 if (!skip_children && p->child)
71 return p->child;
72 while (!p->sibling && p->parent) {
73 p = p->parent;
74 if (p == subtree_root)
75 return NULL;
76 }
77 return p->sibling;
78 }
79
80 /*
81 * Traverse the resource subtree under @_root in pre-order, excluding
82 * @_root itself.
83 *
84 * NOTE: '__p' is introduced to avoid shadowing '_p' outside of loop.
85 * And it is referenced to avoid unused variable warning.
86 */
87 #define for_each_resource(_root, _p, _skip_children) \
88 for (typeof(_root) __root = (_root), __p = _p = __root->child; \
89 __p && _p; _p = next_resource(_p, _skip_children, __root))
90
91 #ifdef CONFIG_PROC_FS
92
93 enum { MAX_IORES_LEVEL = 8 };
94
r_start(struct seq_file * m,loff_t * pos)95 static void *r_start(struct seq_file *m, loff_t *pos)
96 __acquires(resource_lock)
97 {
98 struct resource *root = pde_data(file_inode(m->file));
99 struct resource *p;
100 loff_t l = *pos;
101
102 read_lock(&resource_lock);
103 for_each_resource(root, p, false) {
104 if (l-- == 0)
105 break;
106 }
107
108 return p;
109 }
110
r_next(struct seq_file * m,void * v,loff_t * pos)111 static void *r_next(struct seq_file *m, void *v, loff_t *pos)
112 {
113 struct resource *p = v;
114
115 (*pos)++;
116
117 return (void *)next_resource(p, false, NULL);
118 }
119
r_stop(struct seq_file * m,void * v)120 static void r_stop(struct seq_file *m, void *v)
121 __releases(resource_lock)
122 {
123 read_unlock(&resource_lock);
124 }
125
r_show(struct seq_file * m,void * v)126 static int r_show(struct seq_file *m, void *v)
127 {
128 struct resource *root = pde_data(file_inode(m->file));
129 struct resource *r = v, *p;
130 unsigned long long start, end;
131 int width = root->end < 0x10000 ? 4 : 8;
132 int depth;
133
134 for (depth = 0, p = r; depth < MAX_IORES_LEVEL; depth++, p = p->parent)
135 if (p->parent == root)
136 break;
137
138 if (file_ns_capable(m->file, &init_user_ns, CAP_SYS_ADMIN)) {
139 start = r->start;
140 end = r->end;
141 } else {
142 start = end = 0;
143 }
144
145 seq_printf(m, "%*s%0*llx-%0*llx : %s\n",
146 depth * 2, "",
147 width, start,
148 width, end,
149 r->name ? r->name : "<BAD>");
150 return 0;
151 }
152
153 static const struct seq_operations resource_op = {
154 .start = r_start,
155 .next = r_next,
156 .stop = r_stop,
157 .show = r_show,
158 };
159
ioresources_init(void)160 static int __init ioresources_init(void)
161 {
162 proc_create_seq_data("ioports", 0, NULL, &resource_op,
163 &ioport_resource);
164 proc_create_seq_data("iomem", 0, NULL, &resource_op, &iomem_resource);
165 return 0;
166 }
167 __initcall(ioresources_init);
168
169 #endif /* CONFIG_PROC_FS */
170
free_resource(struct resource * res)171 static void free_resource(struct resource *res)
172 {
173 /**
174 * If the resource was allocated using memblock early during boot
175 * we'll leak it here: we can only return full pages back to the
176 * buddy and trying to be smart and reusing them eventually in
177 * alloc_resource() overcomplicates resource handling.
178 */
179 if (res && PageSlab(virt_to_head_page(res)))
180 kfree(res);
181 }
182
alloc_resource(gfp_t flags)183 static struct resource *alloc_resource(gfp_t flags)
184 {
185 return kzalloc_obj(struct resource, flags);
186 }
187
188 /* Return the conflict entry if you can't request it */
__request_resource(struct resource * root,struct resource * new)189 static struct resource * __request_resource(struct resource *root, struct resource *new)
190 {
191 resource_size_t start = new->start;
192 resource_size_t end = new->end;
193 struct resource *tmp, **p;
194
195 if (end < start)
196 return root;
197 if (start < root->start)
198 return root;
199 if (end > root->end)
200 return root;
201 p = &root->child;
202 for (;;) {
203 tmp = *p;
204 if (!tmp || tmp->start > end) {
205 new->sibling = tmp;
206 *p = new;
207 new->parent = root;
208 return NULL;
209 }
210 p = &tmp->sibling;
211 if (tmp->end < start)
212 continue;
213 return tmp;
214 }
215 }
216
__release_resource(struct resource * old,bool release_child)217 static int __release_resource(struct resource *old, bool release_child)
218 {
219 struct resource *tmp, **p, *chd;
220
221 p = &old->parent->child;
222 for (;;) {
223 tmp = *p;
224 if (!tmp)
225 break;
226 if (tmp == old) {
227 if (release_child || !(tmp->child)) {
228 *p = tmp->sibling;
229 } else {
230 for (chd = tmp->child;; chd = chd->sibling) {
231 chd->parent = tmp->parent;
232 if (!(chd->sibling))
233 break;
234 }
235 *p = tmp->child;
236 chd->sibling = tmp->sibling;
237 }
238 old->parent = NULL;
239 return 0;
240 }
241 p = &tmp->sibling;
242 }
243 return -EINVAL;
244 }
245
__release_child_resources(struct resource * r)246 static void __release_child_resources(struct resource *r)
247 {
248 struct resource *tmp, *p;
249 resource_size_t size;
250
251 p = r->child;
252 r->child = NULL;
253 while (p) {
254 tmp = p;
255 p = p->sibling;
256
257 tmp->parent = NULL;
258 tmp->sibling = NULL;
259 __release_child_resources(tmp);
260
261 printk(KERN_DEBUG "release child resource %pR\n", tmp);
262 /* need to restore size, and keep flags */
263 size = resource_size(tmp);
264 tmp->start = 0;
265 tmp->end = size - 1;
266 }
267 }
268
release_child_resources(struct resource * r)269 void release_child_resources(struct resource *r)
270 {
271 write_lock(&resource_lock);
272 __release_child_resources(r);
273 write_unlock(&resource_lock);
274 }
275
276 /**
277 * request_resource_conflict - request and reserve an I/O or memory resource
278 * @root: root resource descriptor
279 * @new: resource descriptor desired by caller
280 *
281 * Returns 0 for success, conflict resource on error.
282 */
request_resource_conflict(struct resource * root,struct resource * new)283 struct resource *request_resource_conflict(struct resource *root, struct resource *new)
284 {
285 struct resource *conflict;
286
287 write_lock(&resource_lock);
288 conflict = __request_resource(root, new);
289 write_unlock(&resource_lock);
290 return conflict;
291 }
292
293 /**
294 * request_resource - request and reserve an I/O or memory resource
295 * @root: root resource descriptor
296 * @new: resource descriptor desired by caller
297 *
298 * Returns 0 for success, negative error code on error.
299 */
request_resource(struct resource * root,struct resource * new)300 int request_resource(struct resource *root, struct resource *new)
301 {
302 struct resource *conflict;
303
304 conflict = request_resource_conflict(root, new);
305 return conflict ? -EBUSY : 0;
306 }
307
308 EXPORT_SYMBOL(request_resource);
309
310 /**
311 * release_resource - release a previously reserved resource
312 * @old: resource pointer
313 */
release_resource(struct resource * old)314 int release_resource(struct resource *old)
315 {
316 int retval;
317
318 write_lock(&resource_lock);
319 retval = __release_resource(old, true);
320 write_unlock(&resource_lock);
321 return retval;
322 }
323
324 EXPORT_SYMBOL(release_resource);
325
is_type_match(struct resource * p,unsigned long flags,unsigned long desc)326 static bool is_type_match(struct resource *p, unsigned long flags, unsigned long desc)
327 {
328 return (p->flags & flags) == flags && (desc == IORES_DESC_NONE || desc == p->desc);
329 }
330
331 /**
332 * find_next_res - Finds the lowest resource that covers part of
333 * [@start..@end].
334 *
335 * If a resource is found, returns 0 and @*res is overwritten with the part
336 * of the resource that's within [@start..@end]; if none is found, returns
337 * -ENODEV. Returns -EINVAL for invalid parameters.
338 *
339 * @parent: resource tree root to search
340 * @start: start address of the resource searched for
341 * @end: end address of same resource
342 * @flags: flags which the resource must have
343 * @desc: descriptor the resource must have
344 * @res: return ptr, if resource found
345 *
346 * The caller must specify @start, @end, @flags, and @desc
347 * (which may be IORES_DESC_NONE).
348 */
find_next_res(struct resource * parent,resource_size_t start,resource_size_t end,unsigned long flags,unsigned long desc,struct resource * res)349 static int find_next_res(struct resource *parent, resource_size_t start,
350 resource_size_t end, unsigned long flags,
351 unsigned long desc, struct resource *res)
352 {
353 /* Skip children until we find a top level range that matches */
354 bool skip_children = true;
355 struct resource *p;
356
357 if (!res)
358 return -EINVAL;
359
360 if (start >= end)
361 return -EINVAL;
362
363 read_lock(&resource_lock);
364
365 for_each_resource(parent, p, skip_children) {
366 /* If we passed the resource we are looking for, stop */
367 if (p->start > end) {
368 p = NULL;
369 break;
370 }
371
372 /* Skip until we find a range that matches what we look for */
373 if (p->end < start)
374 continue;
375
376 /*
377 * We found a top level range that matches what we are looking
378 * for. Time to start checking children too.
379 */
380 skip_children = false;
381
382 /* Found a match, break */
383 if (is_type_match(p, flags, desc))
384 break;
385 }
386
387 if (p) {
388 /* copy data */
389 *res = (struct resource) {
390 .start = max(start, p->start),
391 .end = min(end, p->end),
392 .flags = p->flags,
393 .desc = p->desc,
394 .parent = p->parent,
395 };
396 }
397
398 read_unlock(&resource_lock);
399 return p ? 0 : -ENODEV;
400 }
401
find_next_iomem_res(resource_size_t start,resource_size_t end,unsigned long flags,unsigned long desc,struct resource * res)402 static int find_next_iomem_res(resource_size_t start, resource_size_t end,
403 unsigned long flags, unsigned long desc,
404 struct resource *res)
405 {
406 return find_next_res(&iomem_resource, start, end, flags, desc, res);
407 }
408
walk_res_desc(struct resource * parent,resource_size_t start,resource_size_t end,unsigned long flags,unsigned long desc,void * arg,int (* func)(struct resource *,void *))409 static int walk_res_desc(struct resource *parent, resource_size_t start,
410 resource_size_t end, unsigned long flags,
411 unsigned long desc, void *arg,
412 int (*func)(struct resource *, void *))
413 {
414 struct resource res;
415 int ret = -EINVAL;
416
417 while (start < end &&
418 !find_next_res(parent, start, end, flags, desc, &res)) {
419 ret = (*func)(&res, arg);
420 if (ret)
421 break;
422
423 start = res.end + 1;
424 }
425
426 return ret;
427 }
428
__walk_iomem_res_desc(resource_size_t start,resource_size_t end,unsigned long flags,unsigned long desc,void * arg,int (* func)(struct resource *,void *))429 static int __walk_iomem_res_desc(resource_size_t start, resource_size_t end,
430 unsigned long flags, unsigned long desc,
431 void *arg,
432 int (*func)(struct resource *, void *))
433 {
434 return walk_res_desc(&iomem_resource, start, end, flags, desc, arg, func);
435 }
436
437
438 /**
439 * walk_iomem_res_desc - Walks through iomem resources and calls func()
440 * with matching resource ranges.
441 * *
442 * @desc: I/O resource descriptor. Use IORES_DESC_NONE to skip @desc check.
443 * @flags: I/O resource flags
444 * @start: start addr
445 * @end: end addr
446 * @arg: function argument for the callback @func
447 * @func: callback function that is called for each qualifying resource area
448 *
449 * All the memory ranges which overlap start,end and also match flags and
450 * desc are valid candidates.
451 *
452 * NOTE: For a new descriptor search, define a new IORES_DESC in
453 * <linux/ioport.h> and set it in 'desc' of a target resource entry.
454 */
walk_iomem_res_desc(unsigned long desc,unsigned long flags,u64 start,u64 end,void * arg,int (* func)(struct resource *,void *))455 int walk_iomem_res_desc(unsigned long desc, unsigned long flags, u64 start,
456 u64 end, void *arg, int (*func)(struct resource *, void *))
457 {
458 return __walk_iomem_res_desc(start, end, flags, desc, arg, func);
459 }
460 EXPORT_SYMBOL_GPL(walk_iomem_res_desc);
461
462 /*
463 * In support of device drivers claiming Soft Reserved resources, walk the Soft
464 * Reserved resource deferral tree.
465 */
walk_soft_reserve_res(u64 start,u64 end,void * arg,int (* func)(struct resource *,void *))466 int walk_soft_reserve_res(u64 start, u64 end, void *arg,
467 int (*func)(struct resource *, void *))
468 {
469 return walk_res_desc(&soft_reserve_resource, start, end, IORESOURCE_MEM,
470 IORES_DESC_SOFT_RESERVED, arg, func);
471 }
472 EXPORT_SYMBOL_GPL(walk_soft_reserve_res);
473
474 /*
475 * This function calls the @func callback against all memory ranges of type
476 * System RAM which are marked as IORESOURCE_SYSTEM_RAM and IORESOUCE_BUSY.
477 * Now, this function is only for System RAM, it deals with full ranges and
478 * not PFNs. If resources are not PFN-aligned, dealing with PFNs can truncate
479 * ranges.
480 */
walk_system_ram_res(u64 start,u64 end,void * arg,int (* func)(struct resource *,void *))481 int walk_system_ram_res(u64 start, u64 end, void *arg,
482 int (*func)(struct resource *, void *))
483 {
484 unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
485
486 return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, arg,
487 func);
488 }
489
490 /*
491 * This function, being a variant of walk_system_ram_res(), calls the @func
492 * callback against all memory ranges of type System RAM which are marked as
493 * IORESOURCE_SYSTEM_RAM and IORESOUCE_BUSY in reversed order, i.e., from
494 * higher to lower.
495 */
walk_system_ram_res_rev(u64 start,u64 end,void * arg,int (* func)(struct resource *,void *))496 int walk_system_ram_res_rev(u64 start, u64 end, void *arg,
497 int (*func)(struct resource *, void *))
498 {
499 struct resource res, *rams;
500 int rams_size = 16, i;
501 unsigned long flags;
502 int ret = -1;
503
504 /* create a list */
505 rams = kvzalloc_objs(struct resource, rams_size);
506 if (!rams)
507 return ret;
508
509 flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
510 i = 0;
511 while ((start < end) &&
512 (!find_next_iomem_res(start, end, flags, IORES_DESC_NONE, &res))) {
513 if (i >= rams_size) {
514 /* re-alloc */
515 struct resource *rams_new;
516
517 rams_new = kvrealloc(rams, (rams_size + 16) * sizeof(struct resource),
518 GFP_KERNEL);
519 if (!rams_new)
520 goto out;
521
522 rams = rams_new;
523 rams_size += 16;
524 }
525
526 rams[i++] = res;
527 start = res.end + 1;
528 }
529
530 /* go reverse */
531 for (i--; i >= 0; i--) {
532 ret = (*func)(&rams[i], arg);
533 if (ret)
534 break;
535 }
536
537 out:
538 kvfree(rams);
539 return ret;
540 }
541
542 /*
543 * This function calls the @func callback against all memory ranges, which
544 * are ranges marked as IORESOURCE_MEM and IORESOUCE_BUSY.
545 */
walk_mem_res(u64 start,u64 end,void * arg,int (* func)(struct resource *,void *))546 int walk_mem_res(u64 start, u64 end, void *arg,
547 int (*func)(struct resource *, void *))
548 {
549 unsigned long flags = IORESOURCE_MEM | IORESOURCE_BUSY;
550
551 return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, arg,
552 func);
553 }
554
555 /*
556 * This function calls the @func callback against all memory ranges of type
557 * System RAM which are marked as IORESOURCE_SYSTEM_RAM and IORESOUCE_BUSY.
558 * It is to be used only for System RAM.
559 */
walk_system_ram_range(unsigned long start_pfn,unsigned long nr_pages,void * arg,int (* func)(unsigned long,unsigned long,void *))560 int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
561 void *arg, int (*func)(unsigned long, unsigned long, void *))
562 {
563 resource_size_t start, end;
564 unsigned long flags;
565 struct resource res;
566 unsigned long pfn, end_pfn;
567 int ret = -EINVAL;
568
569 start = (u64) start_pfn << PAGE_SHIFT;
570 end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1;
571 flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
572 while (start < end &&
573 !find_next_iomem_res(start, end, flags, IORES_DESC_NONE, &res)) {
574 pfn = PFN_UP(res.start);
575 end_pfn = PFN_DOWN(res.end + 1);
576 if (end_pfn > pfn)
577 ret = (*func)(pfn, end_pfn - pfn, arg);
578 if (ret)
579 break;
580 start = res.end + 1;
581 }
582 return ret;
583 }
584
__is_ram(unsigned long pfn,unsigned long nr_pages,void * arg)585 static int __is_ram(unsigned long pfn, unsigned long nr_pages, void *arg)
586 {
587 return 1;
588 }
589
590 /*
591 * This generic page_is_ram() returns true if specified address is
592 * registered as System RAM in iomem_resource list.
593 */
page_is_ram(unsigned long pfn)594 int __weak page_is_ram(unsigned long pfn)
595 {
596 return walk_system_ram_range(pfn, 1, NULL, __is_ram) == 1;
597 }
598 EXPORT_SYMBOL_GPL(page_is_ram);
599
__region_intersects(struct resource * parent,resource_size_t start,size_t size,unsigned long flags,unsigned long desc)600 static int __region_intersects(struct resource *parent, resource_size_t start,
601 size_t size, unsigned long flags,
602 unsigned long desc)
603 {
604 int type = 0; int other = 0;
605 struct resource *p, *dp;
606 struct resource res, o;
607 bool covered;
608
609 res = DEFINE_RES(start, size, 0);
610
611 for (p = parent->child; p ; p = p->sibling) {
612 if (!resource_intersection(p, &res, &o))
613 continue;
614 if (is_type_match(p, flags, desc)) {
615 type++;
616 continue;
617 }
618 /*
619 * Continue to search in descendant resources as if the
620 * matched descendant resources cover some ranges of 'p'.
621 *
622 * |------------- "CXL Window 0" ------------|
623 * |-- "System RAM" --|
624 *
625 * will behave similar as the following fake resource
626 * tree when searching "System RAM".
627 *
628 * |-- "System RAM" --||-- "CXL Window 0a" --|
629 */
630 covered = false;
631 for_each_resource(p, dp, false) {
632 if (!resource_overlaps(dp, &res))
633 continue;
634 if (is_type_match(dp, flags, desc)) {
635 type++;
636 /*
637 * Range from 'o.start' to 'dp->start'
638 * isn't covered by matched resource.
639 */
640 if (dp->start > o.start)
641 break;
642 if (dp->end >= o.end) {
643 covered = true;
644 break;
645 }
646 /* Remove covered range */
647 o.start = max(o.start, dp->end + 1);
648 }
649 }
650 if (!covered)
651 other++;
652 }
653
654 if (type == 0)
655 return REGION_DISJOINT;
656
657 if (other == 0)
658 return REGION_INTERSECTS;
659
660 return REGION_MIXED;
661 }
662
663 /**
664 * region_intersects() - determine intersection of region with known resources
665 * @start: region start address
666 * @size: size of region
667 * @flags: flags of resource (in iomem_resource)
668 * @desc: descriptor of resource (in iomem_resource) or IORES_DESC_NONE
669 *
670 * Check if the specified region partially overlaps or fully eclipses a
671 * resource identified by @flags and @desc (optional with IORES_DESC_NONE).
672 * Return REGION_DISJOINT if the region does not overlap @flags/@desc,
673 * return REGION_MIXED if the region overlaps @flags/@desc and another
674 * resource, and return REGION_INTERSECTS if the region overlaps @flags/@desc
675 * and no other defined resource. Note that REGION_INTERSECTS is also
676 * returned in the case when the specified region overlaps RAM and undefined
677 * memory holes.
678 *
679 * region_intersect() is used by memory remapping functions to ensure
680 * the user is not remapping RAM and is a vast speed up over walking
681 * through the resource table page by page.
682 */
region_intersects(resource_size_t start,size_t size,unsigned long flags,unsigned long desc)683 int region_intersects(resource_size_t start, size_t size, unsigned long flags,
684 unsigned long desc)
685 {
686 int ret;
687
688 read_lock(&resource_lock);
689 ret = __region_intersects(&iomem_resource, start, size, flags, desc);
690 read_unlock(&resource_lock);
691
692 return ret;
693 }
694 EXPORT_SYMBOL_GPL(region_intersects);
695
696 /*
697 * Check if the provided range is registered in the Soft Reserved resource
698 * deferral tree for driver consideration.
699 */
region_intersects_soft_reserve(resource_size_t start,size_t size)700 int region_intersects_soft_reserve(resource_size_t start, size_t size)
701 {
702 guard(read_lock)(&resource_lock);
703 return __region_intersects(&soft_reserve_resource, start, size,
704 IORESOURCE_MEM, IORES_DESC_SOFT_RESERVED);
705 }
706 EXPORT_SYMBOL_GPL(region_intersects_soft_reserve);
707
arch_remove_reservations(struct resource * avail)708 void __weak arch_remove_reservations(struct resource *avail)
709 {
710 }
711
resource_clip(struct resource * res,resource_size_t min,resource_size_t max)712 static void resource_clip(struct resource *res, resource_size_t min,
713 resource_size_t max)
714 {
715 if (res->start < min)
716 res->start = min;
717 if (res->end > max)
718 res->end = max;
719 }
720
721 /*
722 * Find empty space in the resource tree with the given range and
723 * alignment constraints
724 */
__find_resource_space(struct resource * root,struct resource * old,struct resource * new,resource_size_t size,struct resource_constraint * constraint)725 static int __find_resource_space(struct resource *root, struct resource *old,
726 struct resource *new, resource_size_t size,
727 struct resource_constraint *constraint)
728 {
729 struct resource *this = root->child;
730 struct resource tmp = *new, avail, alloc;
731 resource_alignf alignf = constraint->alignf;
732
733 tmp.start = root->start;
734 /*
735 * Skip past an allocated resource that starts at 0, since the assignment
736 * of this->start - 1 to tmp->end below would cause an underflow.
737 */
738 if (this && this->start == root->start) {
739 tmp.start = (this == old) ? old->start : this->end + 1;
740 this = this->sibling;
741 }
742 for(;;) {
743 if (this)
744 tmp.end = (this == old) ? this->end : this->start - 1;
745 else
746 tmp.end = root->end;
747
748 if (tmp.end < tmp.start)
749 goto next;
750
751 resource_clip(&tmp, constraint->min, constraint->max);
752 arch_remove_reservations(&tmp);
753
754 /* Check for overflow after ALIGN() */
755 avail.start = ALIGN(tmp.start, constraint->align);
756 avail.end = tmp.end;
757 avail.flags = new->flags & ~IORESOURCE_UNSET;
758 if (avail.start >= tmp.start) {
759 alloc.flags = avail.flags;
760 if (alignf) {
761 alloc.start = alignf(constraint->alignf_data,
762 &avail, size, constraint->align);
763 } else {
764 alloc.start = avail.start;
765 }
766 alloc.end = alloc.start + size - 1;
767 if (alloc.start <= alloc.end &&
768 resource_contains(&avail, &alloc)) {
769 new->start = alloc.start;
770 new->end = alloc.end;
771 return 0;
772 }
773 }
774
775 next: if (!this || this->end == root->end)
776 break;
777
778 if (this != old)
779 tmp.start = this->end + 1;
780 this = this->sibling;
781 }
782 return -EBUSY;
783 }
784
785 /**
786 * find_resource_space - Find empty space in the resource tree
787 * @root: Root resource descriptor
788 * @new: Resource descriptor awaiting an empty resource space
789 * @size: The minimum size of the empty space
790 * @constraint: The range and alignment constraints to be met
791 *
792 * Finds an empty space under @root in the resource tree satisfying range and
793 * alignment @constraints.
794 *
795 * Return:
796 * * %0 - if successful, @new members start, end, and flags are altered.
797 * * %-EBUSY - if no empty space was found.
798 */
find_resource_space(struct resource * root,struct resource * new,resource_size_t size,struct resource_constraint * constraint)799 int find_resource_space(struct resource *root, struct resource *new,
800 resource_size_t size,
801 struct resource_constraint *constraint)
802 {
803 return __find_resource_space(root, NULL, new, size, constraint);
804 }
805 EXPORT_SYMBOL_GPL(find_resource_space);
806
807 /**
808 * reallocate_resource - allocate a slot in the resource tree given range & alignment.
809 * The resource will be relocated if the new size cannot be reallocated in the
810 * current location.
811 *
812 * @root: root resource descriptor
813 * @old: resource descriptor desired by caller
814 * @newsize: new size of the resource descriptor
815 * @constraint: the memory range and alignment constraints to be met.
816 */
reallocate_resource(struct resource * root,struct resource * old,resource_size_t newsize,struct resource_constraint * constraint)817 static int reallocate_resource(struct resource *root, struct resource *old,
818 resource_size_t newsize,
819 struct resource_constraint *constraint)
820 {
821 int err=0;
822 struct resource new = *old;
823 struct resource *conflict;
824
825 write_lock(&resource_lock);
826
827 if ((err = __find_resource_space(root, old, &new, newsize, constraint)))
828 goto out;
829
830 if (resource_contains(&new, old)) {
831 old->start = new.start;
832 old->end = new.end;
833 goto out;
834 }
835
836 if (old->child) {
837 err = -EBUSY;
838 goto out;
839 }
840
841 if (resource_contains(old, &new)) {
842 old->start = new.start;
843 old->end = new.end;
844 } else {
845 __release_resource(old, true);
846 *old = new;
847 conflict = __request_resource(root, old);
848 BUG_ON(conflict);
849 }
850 out:
851 write_unlock(&resource_lock);
852 return err;
853 }
854
855
856 /**
857 * allocate_resource - allocate empty slot in the resource tree given range & alignment.
858 * The resource will be reallocated with a new size if it was already allocated
859 * @root: root resource descriptor
860 * @new: resource descriptor desired by caller
861 * @size: requested resource region size
862 * @min: minimum boundary to allocate
863 * @max: maximum boundary to allocate
864 * @align: alignment requested, in bytes
865 * @alignf: alignment function, optional, called if not NULL
866 * @alignf_data: arbitrary data to pass to the @alignf function
867 */
allocate_resource(struct resource * root,struct resource * new,resource_size_t size,resource_size_t min,resource_size_t max,resource_size_t align,resource_alignf alignf,void * alignf_data)868 int allocate_resource(struct resource *root, struct resource *new,
869 resource_size_t size, resource_size_t min,
870 resource_size_t max, resource_size_t align,
871 resource_alignf alignf,
872 void *alignf_data)
873 {
874 int err;
875 struct resource_constraint constraint;
876
877 constraint.min = min;
878 constraint.max = max;
879 constraint.align = align;
880 constraint.alignf = alignf;
881 constraint.alignf_data = alignf_data;
882
883 if ( new->parent ) {
884 /* resource is already allocated, try reallocating with
885 the new constraints */
886 return reallocate_resource(root, new, size, &constraint);
887 }
888
889 write_lock(&resource_lock);
890 err = find_resource_space(root, new, size, &constraint);
891 if (err >= 0 && __request_resource(root, new))
892 err = -EBUSY;
893 write_unlock(&resource_lock);
894 return err;
895 }
896
897 EXPORT_SYMBOL(allocate_resource);
898
899 /**
900 * lookup_resource - find an existing resource by a resource start address
901 * @root: root resource descriptor
902 * @start: resource start address
903 *
904 * Returns a pointer to the resource if found, NULL otherwise
905 */
lookup_resource(struct resource * root,resource_size_t start)906 struct resource *lookup_resource(struct resource *root, resource_size_t start)
907 {
908 struct resource *res;
909
910 read_lock(&resource_lock);
911 for (res = root->child; res; res = res->sibling) {
912 if (res->start == start)
913 break;
914 }
915 read_unlock(&resource_lock);
916
917 return res;
918 }
919
920 /*
921 * Insert a resource into the resource tree. If successful, return NULL,
922 * otherwise return the conflicting resource (compare to __request_resource())
923 */
__insert_resource(struct resource * parent,struct resource * new)924 static struct resource * __insert_resource(struct resource *parent, struct resource *new)
925 {
926 struct resource *first, *next;
927
928 for (;; parent = first) {
929 first = __request_resource(parent, new);
930 if (!first)
931 return first;
932
933 if (first == parent)
934 return first;
935 if (WARN_ON(first == new)) /* duplicated insertion */
936 return first;
937
938 if ((first->start > new->start) || (first->end < new->end))
939 break;
940 if ((first->start == new->start) && (first->end == new->end))
941 break;
942 }
943
944 for (next = first; ; next = next->sibling) {
945 /* Partial overlap? Bad, and unfixable */
946 if (next->start < new->start || next->end > new->end)
947 return next;
948 if (!next->sibling)
949 break;
950 if (next->sibling->start > new->end)
951 break;
952 }
953
954 new->parent = parent;
955 new->sibling = next->sibling;
956 new->child = first;
957
958 next->sibling = NULL;
959 for (next = first; next; next = next->sibling)
960 next->parent = new;
961
962 if (parent->child == first) {
963 parent->child = new;
964 } else {
965 next = parent->child;
966 while (next->sibling != first)
967 next = next->sibling;
968 next->sibling = new;
969 }
970 return NULL;
971 }
972
973 /**
974 * insert_resource_conflict - Inserts resource in the resource tree
975 * @parent: parent of the new resource
976 * @new: new resource to insert
977 *
978 * Returns 0 on success, conflict resource if the resource can't be inserted.
979 *
980 * This function is equivalent to request_resource_conflict when no conflict
981 * happens. If a conflict happens, and the conflicting resources
982 * entirely fit within the range of the new resource, then the new
983 * resource is inserted and the conflicting resources become children of
984 * the new resource.
985 *
986 * This function is intended for producers of resources, such as FW modules
987 * and bus drivers.
988 */
insert_resource_conflict(struct resource * parent,struct resource * new)989 struct resource *insert_resource_conflict(struct resource *parent, struct resource *new)
990 {
991 struct resource *conflict;
992
993 write_lock(&resource_lock);
994 conflict = __insert_resource(parent, new);
995 write_unlock(&resource_lock);
996 return conflict;
997 }
998
999 /**
1000 * insert_resource - Inserts a resource in the resource tree
1001 * @parent: parent of the new resource
1002 * @new: new resource to insert
1003 *
1004 * Returns 0 on success, -EBUSY if the resource can't be inserted.
1005 *
1006 * This function is intended for producers of resources, such as FW modules
1007 * and bus drivers.
1008 */
insert_resource(struct resource * parent,struct resource * new)1009 int insert_resource(struct resource *parent, struct resource *new)
1010 {
1011 struct resource *conflict;
1012
1013 conflict = insert_resource_conflict(parent, new);
1014 return conflict ? -EBUSY : 0;
1015 }
1016 EXPORT_SYMBOL_GPL(insert_resource);
1017
1018 /**
1019 * insert_resource_expand_to_fit - Insert a resource into the resource tree
1020 * @root: root resource descriptor
1021 * @new: new resource to insert
1022 *
1023 * Insert a resource into the resource tree, possibly expanding it in order
1024 * to make it encompass any conflicting resources.
1025 */
insert_resource_expand_to_fit(struct resource * root,struct resource * new)1026 void insert_resource_expand_to_fit(struct resource *root, struct resource *new)
1027 {
1028 if (new->parent)
1029 return;
1030
1031 write_lock(&resource_lock);
1032 for (;;) {
1033 struct resource *conflict;
1034
1035 conflict = __insert_resource(root, new);
1036 if (!conflict)
1037 break;
1038 if (conflict == root)
1039 break;
1040
1041 /* Ok, expand resource to cover the conflict, then try again .. */
1042 if (conflict->start < new->start)
1043 new->start = conflict->start;
1044 if (conflict->end > new->end)
1045 new->end = conflict->end;
1046
1047 pr_info("Expanded resource %s due to conflict with %s\n", new->name, conflict->name);
1048 }
1049 write_unlock(&resource_lock);
1050 }
1051 /*
1052 * Not for general consumption, only early boot memory map parsing, PCI
1053 * resource discovery, and late discovery of CXL resources are expected
1054 * to use this interface. The former are built-in and only the latter,
1055 * CXL, is a module.
1056 */
1057 EXPORT_SYMBOL_NS_GPL(insert_resource_expand_to_fit, "CXL");
1058
1059 /**
1060 * remove_resource - Remove a resource in the resource tree
1061 * @old: resource to remove
1062 *
1063 * Returns 0 on success, -EINVAL if the resource is not valid.
1064 *
1065 * This function removes a resource previously inserted by insert_resource()
1066 * or insert_resource_conflict(), and moves the children (if any) up to
1067 * where they were before. insert_resource() and insert_resource_conflict()
1068 * insert a new resource, and move any conflicting resources down to the
1069 * children of the new resource.
1070 *
1071 * insert_resource(), insert_resource_conflict() and remove_resource() are
1072 * intended for producers of resources, such as FW modules and bus drivers.
1073 */
remove_resource(struct resource * old)1074 int remove_resource(struct resource *old)
1075 {
1076 int retval;
1077
1078 write_lock(&resource_lock);
1079 retval = __release_resource(old, false);
1080 write_unlock(&resource_lock);
1081 return retval;
1082 }
1083 EXPORT_SYMBOL_GPL(remove_resource);
1084
__adjust_resource(struct resource * res,resource_size_t start,resource_size_t size)1085 static int __adjust_resource(struct resource *res, resource_size_t start,
1086 resource_size_t size)
1087 {
1088 struct resource *tmp, *parent = res->parent;
1089 resource_size_t end = start + size - 1;
1090 int result = -EBUSY;
1091
1092 if (!parent)
1093 goto skip;
1094
1095 if ((start < parent->start) || (end > parent->end))
1096 goto out;
1097
1098 if (res->sibling && (res->sibling->start <= end))
1099 goto out;
1100
1101 tmp = parent->child;
1102 if (tmp != res) {
1103 while (tmp->sibling != res)
1104 tmp = tmp->sibling;
1105 if (start <= tmp->end)
1106 goto out;
1107 }
1108
1109 skip:
1110 for (tmp = res->child; tmp; tmp = tmp->sibling)
1111 if ((tmp->start < start) || (tmp->end > end))
1112 goto out;
1113
1114 res->start = start;
1115 res->end = end;
1116 result = 0;
1117
1118 out:
1119 return result;
1120 }
1121
1122 /**
1123 * adjust_resource - modify a resource's start and size
1124 * @res: resource to modify
1125 * @start: new start value
1126 * @size: new size
1127 *
1128 * Given an existing resource, change its start and size to match the
1129 * arguments. Returns 0 on success, -EBUSY if it can't fit.
1130 * Existing children of the resource are assumed to be immutable.
1131 */
adjust_resource(struct resource * res,resource_size_t start,resource_size_t size)1132 int adjust_resource(struct resource *res, resource_size_t start,
1133 resource_size_t size)
1134 {
1135 int result;
1136
1137 write_lock(&resource_lock);
1138 result = __adjust_resource(res, start, size);
1139 write_unlock(&resource_lock);
1140 return result;
1141 }
1142 EXPORT_SYMBOL(adjust_resource);
1143
1144 static void __init
__reserve_region_with_split(struct resource * root,resource_size_t start,resource_size_t end,const char * name)1145 __reserve_region_with_split(struct resource *root, resource_size_t start,
1146 resource_size_t end, const char *name)
1147 {
1148 struct resource *parent = root;
1149 struct resource *conflict;
1150 struct resource *res = alloc_resource(GFP_ATOMIC);
1151 struct resource *next_res = NULL;
1152 int type = resource_type(root);
1153
1154 if (!res)
1155 return;
1156
1157 res->name = name;
1158 res->start = start;
1159 res->end = end;
1160 res->flags = type | IORESOURCE_BUSY;
1161 res->desc = IORES_DESC_NONE;
1162
1163 while (1) {
1164
1165 conflict = __request_resource(parent, res);
1166 if (!conflict) {
1167 if (!next_res)
1168 break;
1169 res = next_res;
1170 next_res = NULL;
1171 continue;
1172 }
1173
1174 /* conflict covered whole area */
1175 if (conflict->start <= res->start &&
1176 conflict->end >= res->end) {
1177 free_resource(res);
1178 WARN_ON(next_res);
1179 break;
1180 }
1181
1182 /* failed, split and try again */
1183 if (conflict->start > res->start) {
1184 end = res->end;
1185 res->end = conflict->start - 1;
1186 if (conflict->end < end) {
1187 next_res = alloc_resource(GFP_ATOMIC);
1188 if (!next_res) {
1189 free_resource(res);
1190 break;
1191 }
1192 next_res->name = name;
1193 next_res->start = conflict->end + 1;
1194 next_res->end = end;
1195 next_res->flags = type | IORESOURCE_BUSY;
1196 next_res->desc = IORES_DESC_NONE;
1197 }
1198 } else {
1199 res->start = conflict->end + 1;
1200 }
1201 }
1202
1203 }
1204
1205 void __init
reserve_region_with_split(struct resource * root,resource_size_t start,resource_size_t end,const char * name)1206 reserve_region_with_split(struct resource *root, resource_size_t start,
1207 resource_size_t end, const char *name)
1208 {
1209 int abort = 0;
1210
1211 write_lock(&resource_lock);
1212 if (root->start > start || root->end < end) {
1213 pr_err("requested range [0x%llx-0x%llx] not in root %pr\n",
1214 (unsigned long long)start, (unsigned long long)end,
1215 root);
1216 if (start > root->end || end < root->start)
1217 abort = 1;
1218 else {
1219 if (end > root->end)
1220 end = root->end;
1221 if (start < root->start)
1222 start = root->start;
1223 pr_err("fixing request to [0x%llx-0x%llx]\n",
1224 (unsigned long long)start,
1225 (unsigned long long)end);
1226 }
1227 dump_stack();
1228 }
1229 if (!abort)
1230 __reserve_region_with_split(root, start, end, name);
1231 write_unlock(&resource_lock);
1232 }
1233
1234 /**
1235 * resource_alignment - calculate resource's alignment
1236 * @res: resource pointer
1237 *
1238 * Returns alignment on success, 0 (invalid alignment) on failure.
1239 */
resource_alignment(struct resource * res)1240 resource_size_t resource_alignment(struct resource *res)
1241 {
1242 switch (res->flags & (IORESOURCE_SIZEALIGN | IORESOURCE_STARTALIGN)) {
1243 case IORESOURCE_SIZEALIGN:
1244 return resource_size(res);
1245 case IORESOURCE_STARTALIGN:
1246 return res->start;
1247 default:
1248 return 0;
1249 }
1250 }
1251
1252 /*
1253 * This is compatibility stuff for IO resources.
1254 *
1255 * Note how this, unlike the above, knows about
1256 * the IO flag meanings (busy etc).
1257 *
1258 * request_region creates a new busy region.
1259 *
1260 * release_region releases a matching busy region.
1261 */
1262
1263 static DECLARE_WAIT_QUEUE_HEAD(muxed_resource_wait);
1264
1265 static struct inode *iomem_inode;
1266
1267 #ifdef CONFIG_IO_STRICT_DEVMEM
revoke_iomem(struct resource * res)1268 static void revoke_iomem(struct resource *res)
1269 {
1270 /* pairs with smp_store_release() in iomem_init_inode() */
1271 struct inode *inode = smp_load_acquire(&iomem_inode);
1272
1273 /*
1274 * Check that the initialization has completed. Losing the race
1275 * is ok because it means drivers are claiming resources before
1276 * the fs_initcall level of init and prevent iomem_get_mapping users
1277 * from establishing mappings.
1278 */
1279 if (!inode)
1280 return;
1281
1282 /*
1283 * The expectation is that the driver has successfully marked
1284 * the resource busy by this point, so devmem_is_allowed()
1285 * should start returning false, however for performance this
1286 * does not iterate the entire resource range.
1287 */
1288 if (devmem_is_allowed(PHYS_PFN(res->start)) &&
1289 devmem_is_allowed(PHYS_PFN(res->end))) {
1290 /*
1291 * *cringe* iomem=relaxed says "go ahead, what's the
1292 * worst that can happen?"
1293 */
1294 return;
1295 }
1296
1297 unmap_mapping_range(inode->i_mapping, res->start, resource_size(res), 1);
1298 }
1299 #else
revoke_iomem(struct resource * res)1300 static void revoke_iomem(struct resource *res) {}
1301 #endif
1302
iomem_get_mapping(void)1303 struct address_space *iomem_get_mapping(void)
1304 {
1305 /*
1306 * This function is only called from file open paths, hence guaranteed
1307 * that fs_initcalls have completed and no need to check for NULL. But
1308 * since revoke_iomem can be called before the initcall we still need
1309 * the barrier to appease checkers.
1310 */
1311 return smp_load_acquire(&iomem_inode)->i_mapping;
1312 }
1313
__request_region_locked(struct resource * res,struct resource * parent,resource_size_t start,resource_size_t n,const char * name,int flags)1314 static int __request_region_locked(struct resource *res, struct resource *parent,
1315 resource_size_t start, resource_size_t n,
1316 const char *name, int flags)
1317 {
1318 DECLARE_WAITQUEUE(wait, current);
1319
1320 res->name = name;
1321 res->start = start;
1322 res->end = start + n - 1;
1323
1324 for (;;) {
1325 struct resource *conflict;
1326
1327 res->flags = resource_type(parent) | resource_ext_type(parent);
1328 res->flags |= IORESOURCE_BUSY | flags;
1329 res->desc = parent->desc;
1330
1331 conflict = __request_resource(parent, res);
1332 if (!conflict)
1333 break;
1334 /*
1335 * mm/hmm.c reserves physical addresses which then
1336 * become unavailable to other users. Conflicts are
1337 * not expected. Warn to aid debugging if encountered.
1338 */
1339 if (parent == &iomem_resource &&
1340 conflict->desc == IORES_DESC_DEVICE_PRIVATE_MEMORY) {
1341 pr_warn("Unaddressable device %s %pR conflicts with %pR\n",
1342 conflict->name, conflict, res);
1343 }
1344 if (conflict != parent) {
1345 if (!(conflict->flags & IORESOURCE_BUSY)) {
1346 parent = conflict;
1347 continue;
1348 }
1349 }
1350 if (conflict->flags & flags & IORESOURCE_MUXED) {
1351 add_wait_queue(&muxed_resource_wait, &wait);
1352 write_unlock(&resource_lock);
1353 set_current_state(TASK_UNINTERRUPTIBLE);
1354 schedule();
1355 remove_wait_queue(&muxed_resource_wait, &wait);
1356 write_lock(&resource_lock);
1357 continue;
1358 }
1359 /* Uhhuh, that didn't work out.. */
1360 return -EBUSY;
1361 }
1362
1363 return 0;
1364 }
1365
1366 /**
1367 * __request_region - create a new busy resource region
1368 * @parent: parent resource descriptor
1369 * @start: resource start address
1370 * @n: resource region size
1371 * @name: reserving caller's ID string
1372 * @flags: IO resource flags
1373 */
__request_region(struct resource * parent,resource_size_t start,resource_size_t n,const char * name,int flags)1374 struct resource *__request_region(struct resource *parent,
1375 resource_size_t start, resource_size_t n,
1376 const char *name, int flags)
1377 {
1378 struct resource *res = alloc_resource(GFP_KERNEL);
1379 int ret;
1380
1381 if (!res)
1382 return NULL;
1383
1384 write_lock(&resource_lock);
1385 ret = __request_region_locked(res, parent, start, n, name, flags);
1386 write_unlock(&resource_lock);
1387
1388 if (ret) {
1389 free_resource(res);
1390 return NULL;
1391 }
1392
1393 if (parent == &iomem_resource)
1394 revoke_iomem(res);
1395
1396 return res;
1397 }
1398 EXPORT_SYMBOL(__request_region);
1399
1400 /**
1401 * __release_region - release a previously reserved resource region
1402 * @parent: parent resource descriptor
1403 * @start: resource start address
1404 * @n: resource region size
1405 *
1406 * The described resource region must match a currently busy region.
1407 */
__release_region(struct resource * parent,resource_size_t start,resource_size_t n)1408 void __release_region(struct resource *parent, resource_size_t start,
1409 resource_size_t n)
1410 {
1411 struct resource **p;
1412 resource_size_t end;
1413
1414 p = &parent->child;
1415 end = start + n - 1;
1416
1417 write_lock(&resource_lock);
1418
1419 for (;;) {
1420 struct resource *res = *p;
1421
1422 if (!res)
1423 break;
1424 if (res->start <= start && res->end >= end) {
1425 if (!(res->flags & IORESOURCE_BUSY)) {
1426 p = &res->child;
1427 continue;
1428 }
1429 if (res->start != start || res->end != end)
1430 break;
1431 *p = res->sibling;
1432 write_unlock(&resource_lock);
1433 if (res->flags & IORESOURCE_MUXED)
1434 wake_up(&muxed_resource_wait);
1435 free_resource(res);
1436 return;
1437 }
1438 p = &res->sibling;
1439 }
1440
1441 write_unlock(&resource_lock);
1442
1443 pr_warn("Trying to free nonexistent resource <%pa-%pa>\n", &start, &end);
1444 }
1445 EXPORT_SYMBOL(__release_region);
1446
1447 #ifdef CONFIG_MEMORY_HOTREMOVE
append_child_to_parent(struct resource * new_parent,struct resource * new_child)1448 static void append_child_to_parent(struct resource *new_parent, struct resource *new_child)
1449 {
1450 struct resource *child;
1451
1452 child = new_parent->child;
1453 if (child) {
1454 while (child->sibling)
1455 child = child->sibling;
1456 child->sibling = new_child;
1457 } else {
1458 new_parent->child = new_child;
1459 }
1460 new_child->parent = new_parent;
1461 new_child->sibling = NULL;
1462 }
1463
1464 /*
1465 * Reparent all child resources that no longer belong to "low" after a split to
1466 * "high". Note that "high" does not have any children, because "low" is the
1467 * original resource and "high" is a new resource. Treat "low" as the original
1468 * resource being split and defer its range adjustment to __adjust_resource().
1469 */
reparent_children_after_split(struct resource * low,struct resource * high,resource_size_t split_addr)1470 static void reparent_children_after_split(struct resource *low,
1471 struct resource *high,
1472 resource_size_t split_addr)
1473 {
1474 struct resource *child, *next, **p;
1475
1476 p = &low->child;
1477 while ((child = *p)) {
1478 next = child->sibling;
1479 if (child->start > split_addr) {
1480 /* unlink child */
1481 *p = next;
1482 append_child_to_parent(high, child);
1483 } else {
1484 p = &child->sibling;
1485 }
1486 }
1487 }
1488
1489 /**
1490 * release_mem_region_adjustable - release a previously reserved memory region
1491 * @start: resource start address
1492 * @size: resource region size
1493 *
1494 * This interface is intended for memory hot-delete. The requested region
1495 * is released from a currently busy memory resource. The requested region
1496 * must either match exactly or fit into a single busy resource entry. In
1497 * the latter case, the remaining resource is adjusted accordingly.
1498 *
1499 * Note:
1500 * - Additional release conditions, such as overlapping region, can be
1501 * supported after they are confirmed as valid cases.
1502 * - When a busy memory resource gets split into two entries, its children are
1503 * reassigned to the correct parent based on their range. If a child memory
1504 * resource overlaps with more than one parent, enhance the logic as needed.
1505 */
release_mem_region_adjustable(resource_size_t start,resource_size_t size)1506 void release_mem_region_adjustable(resource_size_t start, resource_size_t size)
1507 {
1508 struct resource *parent = &iomem_resource;
1509 struct resource *new_res = NULL;
1510 bool alloc_nofail = false;
1511 struct resource **p;
1512 struct resource *res;
1513 resource_size_t end;
1514
1515 end = start + size - 1;
1516 if (WARN_ON_ONCE((start < parent->start) || (end > parent->end)))
1517 return;
1518
1519 /*
1520 * We free up quite a lot of memory on memory hotunplug (esp., memap),
1521 * just before releasing the region. This is highly unlikely to
1522 * fail - let's play save and make it never fail as the caller cannot
1523 * perform any error handling (e.g., trying to re-add memory will fail
1524 * similarly).
1525 */
1526 retry:
1527 new_res = alloc_resource(GFP_KERNEL | (alloc_nofail ? __GFP_NOFAIL : 0));
1528
1529 p = &parent->child;
1530 write_lock(&resource_lock);
1531
1532 while ((res = *p)) {
1533 if (res->start >= end)
1534 break;
1535
1536 /* look for the next resource if it does not fit into */
1537 if (res->start > start || res->end < end) {
1538 p = &res->sibling;
1539 continue;
1540 }
1541
1542 if (!(res->flags & IORESOURCE_MEM))
1543 break;
1544
1545 if (!(res->flags & IORESOURCE_BUSY)) {
1546 p = &res->child;
1547 continue;
1548 }
1549
1550 /* found the target resource; let's adjust accordingly */
1551 if (res->start == start && res->end == end) {
1552 /* free the whole entry */
1553 *p = res->sibling;
1554 free_resource(res);
1555 } else if (res->start == start && res->end != end) {
1556 /* adjust the start */
1557 WARN_ON_ONCE(__adjust_resource(res, end + 1,
1558 res->end - end));
1559 } else if (res->start != start && res->end == end) {
1560 /* adjust the end */
1561 WARN_ON_ONCE(__adjust_resource(res, res->start,
1562 start - res->start));
1563 } else {
1564 /* split into two entries - we need a new resource */
1565 if (!new_res) {
1566 new_res = alloc_resource(GFP_ATOMIC);
1567 if (!new_res) {
1568 alloc_nofail = true;
1569 write_unlock(&resource_lock);
1570 goto retry;
1571 }
1572 }
1573 new_res->name = res->name;
1574 new_res->start = end + 1;
1575 new_res->end = res->end;
1576 new_res->flags = res->flags;
1577 new_res->desc = res->desc;
1578 new_res->parent = res->parent;
1579 new_res->sibling = res->sibling;
1580 new_res->child = NULL;
1581 reparent_children_after_split(res, new_res, end);
1582
1583 if (WARN_ON_ONCE(__adjust_resource(res, res->start,
1584 start - res->start)))
1585 break;
1586 res->sibling = new_res;
1587 new_res = NULL;
1588 }
1589
1590 break;
1591 }
1592
1593 write_unlock(&resource_lock);
1594 free_resource(new_res);
1595 }
1596 #endif /* CONFIG_MEMORY_HOTREMOVE */
1597
1598 #ifdef CONFIG_MEMORY_HOTPLUG
system_ram_resources_mergeable(struct resource * r1,struct resource * r2)1599 static bool system_ram_resources_mergeable(struct resource *r1,
1600 struct resource *r2)
1601 {
1602 /* We assume either r1 or r2 is IORESOURCE_SYSRAM_MERGEABLE. */
1603 return r1->flags == r2->flags && r1->end + 1 == r2->start &&
1604 r1->name == r2->name && r1->desc == r2->desc &&
1605 !r1->child && !r2->child;
1606 }
1607
1608 /**
1609 * merge_system_ram_resource - mark the System RAM resource mergeable and try to
1610 * merge it with adjacent, mergeable resources
1611 * @res: resource descriptor
1612 *
1613 * This interface is intended for memory hotplug, whereby lots of contiguous
1614 * system ram resources are added (e.g., via add_memory*()) by a driver, and
1615 * the actual resource boundaries are not of interest (e.g., it might be
1616 * relevant for DIMMs). Only resources that are marked mergeable, that have the
1617 * same parent, and that don't have any children are considered. All mergeable
1618 * resources must be immutable during the request.
1619 *
1620 * Note:
1621 * - The caller has to make sure that no pointers to resources that are
1622 * marked mergeable are used anymore after this call - the resource might
1623 * be freed and the pointer might be stale!
1624 * - release_mem_region_adjustable() will split on demand on memory hotunplug
1625 */
merge_system_ram_resource(struct resource * res)1626 void merge_system_ram_resource(struct resource *res)
1627 {
1628 const unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
1629 struct resource *cur;
1630
1631 if (WARN_ON_ONCE((res->flags & flags) != flags))
1632 return;
1633
1634 write_lock(&resource_lock);
1635 res->flags |= IORESOURCE_SYSRAM_MERGEABLE;
1636
1637 /* Try to merge with next item in the list. */
1638 cur = res->sibling;
1639 if (cur && system_ram_resources_mergeable(res, cur)) {
1640 res->end = cur->end;
1641 res->sibling = cur->sibling;
1642 free_resource(cur);
1643 }
1644
1645 /* Try to merge with previous item in the list. */
1646 cur = res->parent->child;
1647 while (cur && cur->sibling != res)
1648 cur = cur->sibling;
1649 if (cur && system_ram_resources_mergeable(cur, res)) {
1650 cur->end = res->end;
1651 cur->sibling = res->sibling;
1652 free_resource(res);
1653 }
1654 write_unlock(&resource_lock);
1655 }
1656 #endif /* CONFIG_MEMORY_HOTPLUG */
1657
1658 /*
1659 * Managed region resource
1660 */
devm_resource_release(struct device * dev,void * ptr)1661 static void devm_resource_release(struct device *dev, void *ptr)
1662 {
1663 struct resource **r = ptr;
1664
1665 release_resource(*r);
1666 }
1667
1668 /**
1669 * devm_request_resource() - request and reserve an I/O or memory resource
1670 * @dev: device for which to request the resource
1671 * @root: root of the resource tree from which to request the resource
1672 * @new: descriptor of the resource to request
1673 *
1674 * This is a device-managed version of request_resource(). There is usually
1675 * no need to release resources requested by this function explicitly since
1676 * that will be taken care of when the device is unbound from its driver.
1677 * If for some reason the resource needs to be released explicitly, because
1678 * of ordering issues for example, drivers must call devm_release_resource()
1679 * rather than the regular release_resource().
1680 *
1681 * When a conflict is detected between any existing resources and the newly
1682 * requested resource, an error message will be printed.
1683 *
1684 * Returns 0 on success or a negative error code on failure.
1685 */
devm_request_resource(struct device * dev,struct resource * root,struct resource * new)1686 int devm_request_resource(struct device *dev, struct resource *root,
1687 struct resource *new)
1688 {
1689 struct resource *conflict, **ptr;
1690
1691 ptr = devres_alloc(devm_resource_release, sizeof(*ptr), GFP_KERNEL);
1692 if (!ptr)
1693 return -ENOMEM;
1694
1695 *ptr = new;
1696
1697 conflict = request_resource_conflict(root, new);
1698 if (conflict) {
1699 dev_err(dev, "resource collision: %pR conflicts with %s %pR\n",
1700 new, conflict->name, conflict);
1701 devres_free(ptr);
1702 return -EBUSY;
1703 }
1704
1705 devres_add(dev, ptr);
1706 return 0;
1707 }
1708 EXPORT_SYMBOL(devm_request_resource);
1709
devm_resource_match(struct device * dev,void * res,void * data)1710 static int devm_resource_match(struct device *dev, void *res, void *data)
1711 {
1712 struct resource **ptr = res;
1713
1714 return *ptr == data;
1715 }
1716
1717 /**
1718 * devm_release_resource() - release a previously requested resource
1719 * @dev: device for which to release the resource
1720 * @new: descriptor of the resource to release
1721 *
1722 * Releases a resource previously requested using devm_request_resource().
1723 */
devm_release_resource(struct device * dev,struct resource * new)1724 void devm_release_resource(struct device *dev, struct resource *new)
1725 {
1726 WARN_ON(devres_release(dev, devm_resource_release, devm_resource_match,
1727 new));
1728 }
1729 EXPORT_SYMBOL(devm_release_resource);
1730
1731 struct region_devres {
1732 struct resource *parent;
1733 resource_size_t start;
1734 resource_size_t n;
1735 };
1736
devm_region_release(struct device * dev,void * res)1737 static void devm_region_release(struct device *dev, void *res)
1738 {
1739 struct region_devres *this = res;
1740
1741 __release_region(this->parent, this->start, this->n);
1742 }
1743
devm_region_match(struct device * dev,void * res,void * match_data)1744 static int devm_region_match(struct device *dev, void *res, void *match_data)
1745 {
1746 struct region_devres *this = res, *match = match_data;
1747
1748 return this->parent == match->parent &&
1749 this->start == match->start && this->n == match->n;
1750 }
1751
1752 struct resource *
__devm_request_region(struct device * dev,struct resource * parent,resource_size_t start,resource_size_t n,const char * name)1753 __devm_request_region(struct device *dev, struct resource *parent,
1754 resource_size_t start, resource_size_t n, const char *name)
1755 {
1756 struct region_devres *dr = NULL;
1757 struct resource *res;
1758
1759 dr = devres_alloc(devm_region_release, sizeof(struct region_devres),
1760 GFP_KERNEL);
1761 if (!dr)
1762 return NULL;
1763
1764 dr->parent = parent;
1765 dr->start = start;
1766 dr->n = n;
1767
1768 res = __request_region(parent, start, n, name, 0);
1769 if (res)
1770 devres_add(dev, dr);
1771 else
1772 devres_free(dr);
1773
1774 return res;
1775 }
1776 EXPORT_SYMBOL(__devm_request_region);
1777
__devm_release_region(struct device * dev,struct resource * parent,resource_size_t start,resource_size_t n)1778 void __devm_release_region(struct device *dev, struct resource *parent,
1779 resource_size_t start, resource_size_t n)
1780 {
1781 struct region_devres match_data = { parent, start, n };
1782
1783 WARN_ON(devres_release(dev, devm_region_release, devm_region_match,
1784 &match_data));
1785 }
1786 EXPORT_SYMBOL(__devm_release_region);
1787
1788 /*
1789 * Reserve I/O ports or memory based on "reserve=" kernel parameter.
1790 */
1791 #define MAXRESERVE 4
reserve_setup(char * str)1792 static int __init reserve_setup(char *str)
1793 {
1794 static int reserved;
1795 static struct resource reserve[MAXRESERVE];
1796
1797 for (;;) {
1798 unsigned int io_start, io_num;
1799 int x = reserved;
1800 struct resource *parent;
1801
1802 if (get_option(&str, &io_start) != 2)
1803 break;
1804 if (get_option(&str, &io_num) == 0)
1805 break;
1806 if (x < MAXRESERVE) {
1807 struct resource *res = reserve + x;
1808
1809 /*
1810 * If the region starts below 0x10000, we assume it's
1811 * I/O port space; otherwise assume it's memory.
1812 */
1813 if (io_start < 0x10000) {
1814 *res = DEFINE_RES_IO_NAMED(io_start, io_num, "reserved");
1815 parent = &ioport_resource;
1816 } else {
1817 *res = DEFINE_RES_MEM_NAMED(io_start, io_num, "reserved");
1818 parent = &iomem_resource;
1819 }
1820 res->flags |= IORESOURCE_BUSY;
1821 if (request_resource(parent, res) == 0)
1822 reserved = x+1;
1823 }
1824 }
1825 return 1;
1826 }
1827 __setup("reserve=", reserve_setup);
1828
1829 /*
1830 * Check if the requested addr and size spans more than any slot in the
1831 * iomem resource tree.
1832 */
iomem_map_sanity_check(resource_size_t addr,unsigned long size)1833 int iomem_map_sanity_check(resource_size_t addr, unsigned long size)
1834 {
1835 resource_size_t end = addr + size - 1;
1836 struct resource *p;
1837 int err = 0;
1838
1839 read_lock(&resource_lock);
1840 for_each_resource(&iomem_resource, p, false) {
1841 /*
1842 * We can probably skip the resources without
1843 * IORESOURCE_IO attribute?
1844 */
1845 if (p->start > end)
1846 continue;
1847 if (p->end < addr)
1848 continue;
1849 if (PFN_DOWN(p->start) <= PFN_DOWN(addr) &&
1850 PFN_DOWN(p->end) >= PFN_DOWN(end))
1851 continue;
1852 /*
1853 * if a resource is "BUSY", it's not a hardware resource
1854 * but a driver mapping of such a resource; we don't want
1855 * to warn for those; some drivers legitimately map only
1856 * partial hardware resources. (example: vesafb)
1857 */
1858 if (p->flags & IORESOURCE_BUSY)
1859 continue;
1860
1861 pr_warn("resource sanity check: requesting [mem %pa-%pa], which spans more than %s %pR\n",
1862 &addr, &end, p->name, p);
1863 err = -1;
1864 break;
1865 }
1866 read_unlock(&resource_lock);
1867
1868 return err;
1869 }
1870
1871 #ifdef CONFIG_STRICT_DEVMEM
1872 static int strict_iomem_checks = 1;
1873 #else
1874 static int strict_iomem_checks;
1875 #endif
1876
1877 /*
1878 * Check if an address is exclusive to the kernel and must not be mapped to
1879 * user space, for example, via /dev/mem.
1880 *
1881 * Returns true if exclusive to the kernel, otherwise returns false.
1882 */
resource_is_exclusive(struct resource * root,u64 addr,resource_size_t size)1883 bool resource_is_exclusive(struct resource *root, u64 addr, resource_size_t size)
1884 {
1885 const unsigned int exclusive_system_ram = IORESOURCE_SYSTEM_RAM |
1886 IORESOURCE_EXCLUSIVE;
1887 bool skip_children = false, err = false;
1888 struct resource *p;
1889
1890 read_lock(&resource_lock);
1891 for_each_resource(root, p, skip_children) {
1892 if (p->start >= addr + size)
1893 break;
1894 if (p->end < addr) {
1895 skip_children = true;
1896 continue;
1897 }
1898 skip_children = false;
1899
1900 /*
1901 * IORESOURCE_SYSTEM_RAM resources are exclusive if
1902 * IORESOURCE_EXCLUSIVE is set, even if they
1903 * are not busy and even if "iomem=relaxed" is set. The
1904 * responsible driver dynamically adds/removes system RAM within
1905 * such an area and uncontrolled access is dangerous.
1906 */
1907 if ((p->flags & exclusive_system_ram) == exclusive_system_ram) {
1908 err = true;
1909 break;
1910 }
1911
1912 /*
1913 * A resource is exclusive if IORESOURCE_EXCLUSIVE is set
1914 * or CONFIG_IO_STRICT_DEVMEM is enabled and the
1915 * resource is busy.
1916 */
1917 if (!strict_iomem_checks || !(p->flags & IORESOURCE_BUSY))
1918 continue;
1919 if (IS_ENABLED(CONFIG_IO_STRICT_DEVMEM)
1920 || p->flags & IORESOURCE_EXCLUSIVE) {
1921 err = true;
1922 break;
1923 }
1924 }
1925 read_unlock(&resource_lock);
1926
1927 return err;
1928 }
1929
iomem_is_exclusive(u64 addr)1930 bool iomem_is_exclusive(u64 addr)
1931 {
1932 return resource_is_exclusive(&iomem_resource, addr & PAGE_MASK,
1933 PAGE_SIZE);
1934 }
1935
resource_list_create_entry(struct resource * res,size_t extra_size)1936 struct resource_entry *resource_list_create_entry(struct resource *res,
1937 size_t extra_size)
1938 {
1939 struct resource_entry *entry;
1940
1941 entry = kzalloc(sizeof(*entry) + extra_size, GFP_KERNEL);
1942 if (entry) {
1943 INIT_LIST_HEAD(&entry->node);
1944 entry->res = res ? res : &entry->__res;
1945 }
1946
1947 return entry;
1948 }
1949 EXPORT_SYMBOL(resource_list_create_entry);
1950
resource_list_free(struct list_head * head)1951 void resource_list_free(struct list_head *head)
1952 {
1953 struct resource_entry *entry, *tmp;
1954
1955 list_for_each_entry_safe(entry, tmp, head, node)
1956 resource_list_destroy_entry(entry);
1957 }
1958 EXPORT_SYMBOL(resource_list_free);
1959
1960 #ifdef CONFIG_GET_FREE_REGION
1961 #define GFR_DESCENDING (1UL << 0)
1962 #define GFR_REQUEST_REGION (1UL << 1)
1963 #ifdef PA_SECTION_SHIFT
1964 #define GFR_DEFAULT_ALIGN (1UL << PA_SECTION_SHIFT)
1965 #else
1966 #define GFR_DEFAULT_ALIGN PAGE_SIZE
1967 #endif
1968
gfr_start(struct resource * base,resource_size_t size,resource_size_t align,unsigned long flags)1969 static resource_size_t gfr_start(struct resource *base, resource_size_t size,
1970 resource_size_t align, unsigned long flags)
1971 {
1972 if (flags & GFR_DESCENDING) {
1973 resource_size_t end;
1974
1975 end = min_t(resource_size_t, base->end, DIRECT_MAP_PHYSMEM_END);
1976 return end - size + 1;
1977 }
1978
1979 return ALIGN(max(base->start, align), align);
1980 }
1981
gfr_continue(struct resource * base,resource_size_t addr,resource_size_t size,unsigned long flags)1982 static bool gfr_continue(struct resource *base, resource_size_t addr,
1983 resource_size_t size, unsigned long flags)
1984 {
1985 if (flags & GFR_DESCENDING)
1986 return addr > size && addr >= base->start;
1987 /*
1988 * In the ascend case be careful that the last increment by
1989 * @size did not wrap 0.
1990 */
1991 return addr > addr - size &&
1992 addr <= min_t(resource_size_t, base->end, DIRECT_MAP_PHYSMEM_END);
1993 }
1994
gfr_next(resource_size_t addr,resource_size_t size,unsigned long flags)1995 static resource_size_t gfr_next(resource_size_t addr, resource_size_t size,
1996 unsigned long flags)
1997 {
1998 if (flags & GFR_DESCENDING)
1999 return addr - size;
2000 return addr + size;
2001 }
2002
remove_free_mem_region(void * _res)2003 static void remove_free_mem_region(void *_res)
2004 {
2005 struct resource *res = _res;
2006
2007 if (res->parent)
2008 remove_resource(res);
2009 free_resource(res);
2010 }
2011
2012 static struct resource *
get_free_mem_region(struct device * dev,struct resource * base,resource_size_t size,const unsigned long align,const char * name,const unsigned long desc,const unsigned long flags)2013 get_free_mem_region(struct device *dev, struct resource *base,
2014 resource_size_t size, const unsigned long align,
2015 const char *name, const unsigned long desc,
2016 const unsigned long flags)
2017 {
2018 resource_size_t addr;
2019 struct resource *res;
2020 struct region_devres *dr = NULL;
2021
2022 size = ALIGN(size, align);
2023
2024 res = alloc_resource(GFP_KERNEL);
2025 if (!res)
2026 return ERR_PTR(-ENOMEM);
2027
2028 if (dev && (flags & GFR_REQUEST_REGION)) {
2029 dr = devres_alloc(devm_region_release,
2030 sizeof(struct region_devres), GFP_KERNEL);
2031 if (!dr) {
2032 free_resource(res);
2033 return ERR_PTR(-ENOMEM);
2034 }
2035 } else if (dev) {
2036 if (devm_add_action_or_reset(dev, remove_free_mem_region, res))
2037 return ERR_PTR(-ENOMEM);
2038 }
2039
2040 write_lock(&resource_lock);
2041 for (addr = gfr_start(base, size, align, flags);
2042 gfr_continue(base, addr, align, flags);
2043 addr = gfr_next(addr, align, flags)) {
2044 if (__region_intersects(base, addr, size, 0, IORES_DESC_NONE) !=
2045 REGION_DISJOINT)
2046 continue;
2047
2048 if (flags & GFR_REQUEST_REGION) {
2049 if (__request_region_locked(res, &iomem_resource, addr,
2050 size, name, 0))
2051 break;
2052
2053 if (dev) {
2054 dr->parent = &iomem_resource;
2055 dr->start = addr;
2056 dr->n = size;
2057 devres_add(dev, dr);
2058 }
2059
2060 res->desc = desc;
2061 write_unlock(&resource_lock);
2062
2063
2064 /*
2065 * A driver is claiming this region so revoke any
2066 * mappings.
2067 */
2068 revoke_iomem(res);
2069 } else {
2070 *res = DEFINE_RES_NAMED_DESC(addr, size, name, IORESOURCE_MEM, desc);
2071
2072 /*
2073 * Only succeed if the resource hosts an exclusive
2074 * range after the insert
2075 */
2076 if (__insert_resource(base, res) || res->child)
2077 break;
2078
2079 write_unlock(&resource_lock);
2080 }
2081
2082 return res;
2083 }
2084 write_unlock(&resource_lock);
2085
2086 if (flags & GFR_REQUEST_REGION) {
2087 free_resource(res);
2088 devres_free(dr);
2089 } else if (dev)
2090 devm_release_action(dev, remove_free_mem_region, res);
2091
2092 return ERR_PTR(-ERANGE);
2093 }
2094
2095 /**
2096 * devm_request_free_mem_region - find free region for device private memory
2097 *
2098 * @dev: device struct to bind the resource to
2099 * @size: size in bytes of the device memory to add
2100 * @base: resource tree to look in
2101 *
2102 * This function tries to find an empty range of physical address big enough to
2103 * contain the new resource, so that it can later be hotplugged as ZONE_DEVICE
2104 * memory, which in turn allocates struct pages.
2105 */
devm_request_free_mem_region(struct device * dev,struct resource * base,unsigned long size)2106 struct resource *devm_request_free_mem_region(struct device *dev,
2107 struct resource *base, unsigned long size)
2108 {
2109 unsigned long flags = GFR_DESCENDING | GFR_REQUEST_REGION;
2110
2111 return get_free_mem_region(dev, base, size, GFR_DEFAULT_ALIGN,
2112 dev_name(dev),
2113 IORES_DESC_DEVICE_PRIVATE_MEMORY, flags);
2114 }
2115 EXPORT_SYMBOL_GPL(devm_request_free_mem_region);
2116
request_free_mem_region(struct resource * base,unsigned long size,const char * name)2117 struct resource *request_free_mem_region(struct resource *base,
2118 unsigned long size, const char *name)
2119 {
2120 unsigned long flags = GFR_DESCENDING | GFR_REQUEST_REGION;
2121
2122 return get_free_mem_region(NULL, base, size, GFR_DEFAULT_ALIGN, name,
2123 IORES_DESC_DEVICE_PRIVATE_MEMORY, flags);
2124 }
2125 EXPORT_SYMBOL_GPL(request_free_mem_region);
2126
2127 /**
2128 * alloc_free_mem_region - find a free region relative to @base
2129 * @base: resource that will parent the new resource
2130 * @size: size in bytes of memory to allocate from @base
2131 * @align: alignment requirements for the allocation
2132 * @name: resource name
2133 *
2134 * Buses like CXL, that can dynamically instantiate new memory regions,
2135 * need a method to allocate physical address space for those regions.
2136 * Allocate and insert a new resource to cover a free, unclaimed by a
2137 * descendant of @base, range in the span of @base.
2138 */
alloc_free_mem_region(struct resource * base,unsigned long size,unsigned long align,const char * name)2139 struct resource *alloc_free_mem_region(struct resource *base,
2140 unsigned long size, unsigned long align,
2141 const char *name)
2142 {
2143 /* Default of ascending direction and insert resource */
2144 unsigned long flags = 0;
2145
2146 return get_free_mem_region(NULL, base, size, align, name,
2147 IORES_DESC_NONE, flags);
2148 }
2149 EXPORT_SYMBOL_GPL(alloc_free_mem_region);
2150 #endif /* CONFIG_GET_FREE_REGION */
2151
strict_iomem(char * str)2152 static int __init strict_iomem(char *str)
2153 {
2154 if (strstr(str, "relaxed"))
2155 strict_iomem_checks = 0;
2156 if (strstr(str, "strict"))
2157 strict_iomem_checks = 1;
2158 return 1;
2159 }
2160
iomem_fs_init_fs_context(struct fs_context * fc)2161 static int iomem_fs_init_fs_context(struct fs_context *fc)
2162 {
2163 return init_pseudo(fc, DEVMEM_MAGIC) ? 0 : -ENOMEM;
2164 }
2165
2166 static struct file_system_type iomem_fs_type = {
2167 .name = "iomem",
2168 .owner = THIS_MODULE,
2169 .init_fs_context = iomem_fs_init_fs_context,
2170 .kill_sb = kill_anon_super,
2171 };
2172
iomem_init_inode(void)2173 static int __init iomem_init_inode(void)
2174 {
2175 static struct vfsmount *iomem_vfs_mount;
2176 static int iomem_fs_cnt;
2177 struct inode *inode;
2178 int rc;
2179
2180 rc = simple_pin_fs(&iomem_fs_type, &iomem_vfs_mount, &iomem_fs_cnt);
2181 if (rc < 0) {
2182 pr_err("Cannot mount iomem pseudo filesystem: %d\n", rc);
2183 return rc;
2184 }
2185
2186 inode = alloc_anon_inode(iomem_vfs_mount->mnt_sb);
2187 if (IS_ERR(inode)) {
2188 rc = PTR_ERR(inode);
2189 pr_err("Cannot allocate inode for iomem: %d\n", rc);
2190 simple_release_fs(&iomem_vfs_mount, &iomem_fs_cnt);
2191 return rc;
2192 }
2193
2194 /*
2195 * Publish iomem revocation inode initialized.
2196 * Pairs with smp_load_acquire() in revoke_iomem().
2197 */
2198 smp_store_release(&iomem_inode, inode);
2199
2200 return 0;
2201 }
2202
2203 fs_initcall(iomem_init_inode);
2204
2205 __setup("iomem=", strict_iomem);
2206