xref: /linux/kernel/resource.c (revision 509d3f45847627f4c5cdce004c3ec79262b5239c)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *	linux/kernel/resource.c
4  *
5  * Copyright (C) 1999	Linus Torvalds
6  * Copyright (C) 1999	Martin Mares <mj@ucw.cz>
7  *
8  * Arbitrary resource management.
9  */
10 
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 
13 #include <linux/export.h>
14 #include <linux/errno.h>
15 #include <linux/ioport.h>
16 #include <linux/init.h>
17 #include <linux/slab.h>
18 #include <linux/spinlock.h>
19 #include <linux/fs.h>
20 #include <linux/proc_fs.h>
21 #include <linux/pseudo_fs.h>
22 #include <linux/sched.h>
23 #include <linux/seq_file.h>
24 #include <linux/device.h>
25 #include <linux/pfn.h>
26 #include <linux/mm.h>
27 #include <linux/mount.h>
28 #include <linux/resource_ext.h>
29 #include <uapi/linux/magic.h>
30 #include <linux/string.h>
31 #include <linux/vmalloc.h>
32 #include <asm/io.h>
33 
34 
35 struct resource ioport_resource = {
36 	.name	= "PCI IO",
37 	.start	= 0,
38 	.end	= IO_SPACE_LIMIT,
39 	.flags	= IORESOURCE_IO,
40 };
41 EXPORT_SYMBOL(ioport_resource);
42 
43 struct resource iomem_resource = {
44 	.name	= "PCI mem",
45 	.start	= 0,
46 	.end	= -1,
47 	.flags	= IORESOURCE_MEM,
48 };
49 EXPORT_SYMBOL(iomem_resource);
50 
51 static DEFINE_RWLOCK(resource_lock);
52 
53 /*
54  * Return the next node of @p in pre-order tree traversal.  If
55  * @skip_children is true, skip the descendant nodes of @p in
56  * traversal.  If @p is a descendant of @subtree_root, only traverse
57  * the subtree under @subtree_root.
58  */
next_resource(struct resource * p,bool skip_children,struct resource * subtree_root)59 static struct resource *next_resource(struct resource *p, bool skip_children,
60 				      struct resource *subtree_root)
61 {
62 	if (!skip_children && p->child)
63 		return p->child;
64 	while (!p->sibling && p->parent) {
65 		p = p->parent;
66 		if (p == subtree_root)
67 			return NULL;
68 	}
69 	return p->sibling;
70 }
71 
72 /*
73  * Traverse the resource subtree under @_root in pre-order, excluding
74  * @_root itself.
75  *
76  * NOTE: '__p' is introduced to avoid shadowing '_p' outside of loop.
77  * And it is referenced to avoid unused variable warning.
78  */
79 #define for_each_resource(_root, _p, _skip_children) \
80 	for (typeof(_root) __root = (_root), __p = _p = __root->child;	\
81 	     __p && _p; _p = next_resource(_p, _skip_children, __root))
82 
83 #ifdef CONFIG_PROC_FS
84 
85 enum { MAX_IORES_LEVEL = 5 };
86 
r_start(struct seq_file * m,loff_t * pos)87 static void *r_start(struct seq_file *m, loff_t *pos)
88 	__acquires(resource_lock)
89 {
90 	struct resource *root = pde_data(file_inode(m->file));
91 	struct resource *p;
92 	loff_t l = *pos;
93 
94 	read_lock(&resource_lock);
95 	for_each_resource(root, p, false) {
96 		if (l-- == 0)
97 			break;
98 	}
99 
100 	return p;
101 }
102 
r_next(struct seq_file * m,void * v,loff_t * pos)103 static void *r_next(struct seq_file *m, void *v, loff_t *pos)
104 {
105 	struct resource *p = v;
106 
107 	(*pos)++;
108 
109 	return (void *)next_resource(p, false, NULL);
110 }
111 
r_stop(struct seq_file * m,void * v)112 static void r_stop(struct seq_file *m, void *v)
113 	__releases(resource_lock)
114 {
115 	read_unlock(&resource_lock);
116 }
117 
r_show(struct seq_file * m,void * v)118 static int r_show(struct seq_file *m, void *v)
119 {
120 	struct resource *root = pde_data(file_inode(m->file));
121 	struct resource *r = v, *p;
122 	unsigned long long start, end;
123 	int width = root->end < 0x10000 ? 4 : 8;
124 	int depth;
125 
126 	for (depth = 0, p = r; depth < MAX_IORES_LEVEL; depth++, p = p->parent)
127 		if (p->parent == root)
128 			break;
129 
130 	if (file_ns_capable(m->file, &init_user_ns, CAP_SYS_ADMIN)) {
131 		start = r->start;
132 		end = r->end;
133 	} else {
134 		start = end = 0;
135 	}
136 
137 	seq_printf(m, "%*s%0*llx-%0*llx : %s\n",
138 			depth * 2, "",
139 			width, start,
140 			width, end,
141 			r->name ? r->name : "<BAD>");
142 	return 0;
143 }
144 
145 static const struct seq_operations resource_op = {
146 	.start	= r_start,
147 	.next	= r_next,
148 	.stop	= r_stop,
149 	.show	= r_show,
150 };
151 
ioresources_init(void)152 static int __init ioresources_init(void)
153 {
154 	proc_create_seq_data("ioports", 0, NULL, &resource_op,
155 			&ioport_resource);
156 	proc_create_seq_data("iomem", 0, NULL, &resource_op, &iomem_resource);
157 	return 0;
158 }
159 __initcall(ioresources_init);
160 
161 #endif /* CONFIG_PROC_FS */
162 
free_resource(struct resource * res)163 static void free_resource(struct resource *res)
164 {
165 	/**
166 	 * If the resource was allocated using memblock early during boot
167 	 * we'll leak it here: we can only return full pages back to the
168 	 * buddy and trying to be smart and reusing them eventually in
169 	 * alloc_resource() overcomplicates resource handling.
170 	 */
171 	if (res && PageSlab(virt_to_head_page(res)))
172 		kfree(res);
173 }
174 
alloc_resource(gfp_t flags)175 static struct resource *alloc_resource(gfp_t flags)
176 {
177 	return kzalloc(sizeof(struct resource), flags);
178 }
179 
180 /* Return the conflict entry if you can't request it */
__request_resource(struct resource * root,struct resource * new)181 static struct resource * __request_resource(struct resource *root, struct resource *new)
182 {
183 	resource_size_t start = new->start;
184 	resource_size_t end = new->end;
185 	struct resource *tmp, **p;
186 
187 	if (end < start)
188 		return root;
189 	if (start < root->start)
190 		return root;
191 	if (end > root->end)
192 		return root;
193 	p = &root->child;
194 	for (;;) {
195 		tmp = *p;
196 		if (!tmp || tmp->start > end) {
197 			new->sibling = tmp;
198 			*p = new;
199 			new->parent = root;
200 			return NULL;
201 		}
202 		p = &tmp->sibling;
203 		if (tmp->end < start)
204 			continue;
205 		return tmp;
206 	}
207 }
208 
__release_resource(struct resource * old,bool release_child)209 static int __release_resource(struct resource *old, bool release_child)
210 {
211 	struct resource *tmp, **p, *chd;
212 
213 	p = &old->parent->child;
214 	for (;;) {
215 		tmp = *p;
216 		if (!tmp)
217 			break;
218 		if (tmp == old) {
219 			if (release_child || !(tmp->child)) {
220 				*p = tmp->sibling;
221 			} else {
222 				for (chd = tmp->child;; chd = chd->sibling) {
223 					chd->parent = tmp->parent;
224 					if (!(chd->sibling))
225 						break;
226 				}
227 				*p = tmp->child;
228 				chd->sibling = tmp->sibling;
229 			}
230 			old->parent = NULL;
231 			return 0;
232 		}
233 		p = &tmp->sibling;
234 	}
235 	return -EINVAL;
236 }
237 
__release_child_resources(struct resource * r)238 static void __release_child_resources(struct resource *r)
239 {
240 	struct resource *tmp, *p;
241 	resource_size_t size;
242 
243 	p = r->child;
244 	r->child = NULL;
245 	while (p) {
246 		tmp = p;
247 		p = p->sibling;
248 
249 		tmp->parent = NULL;
250 		tmp->sibling = NULL;
251 		__release_child_resources(tmp);
252 
253 		printk(KERN_DEBUG "release child resource %pR\n", tmp);
254 		/* need to restore size, and keep flags */
255 		size = resource_size(tmp);
256 		tmp->start = 0;
257 		tmp->end = size - 1;
258 	}
259 }
260 
release_child_resources(struct resource * r)261 void release_child_resources(struct resource *r)
262 {
263 	write_lock(&resource_lock);
264 	__release_child_resources(r);
265 	write_unlock(&resource_lock);
266 }
267 
268 /**
269  * request_resource_conflict - request and reserve an I/O or memory resource
270  * @root: root resource descriptor
271  * @new: resource descriptor desired by caller
272  *
273  * Returns 0 for success, conflict resource on error.
274  */
request_resource_conflict(struct resource * root,struct resource * new)275 struct resource *request_resource_conflict(struct resource *root, struct resource *new)
276 {
277 	struct resource *conflict;
278 
279 	write_lock(&resource_lock);
280 	conflict = __request_resource(root, new);
281 	write_unlock(&resource_lock);
282 	return conflict;
283 }
284 
285 /**
286  * request_resource - request and reserve an I/O or memory resource
287  * @root: root resource descriptor
288  * @new: resource descriptor desired by caller
289  *
290  * Returns 0 for success, negative error code on error.
291  */
request_resource(struct resource * root,struct resource * new)292 int request_resource(struct resource *root, struct resource *new)
293 {
294 	struct resource *conflict;
295 
296 	conflict = request_resource_conflict(root, new);
297 	return conflict ? -EBUSY : 0;
298 }
299 
300 EXPORT_SYMBOL(request_resource);
301 
302 /**
303  * release_resource - release a previously reserved resource
304  * @old: resource pointer
305  */
release_resource(struct resource * old)306 int release_resource(struct resource *old)
307 {
308 	int retval;
309 
310 	write_lock(&resource_lock);
311 	retval = __release_resource(old, true);
312 	write_unlock(&resource_lock);
313 	return retval;
314 }
315 
316 EXPORT_SYMBOL(release_resource);
317 
is_type_match(struct resource * p,unsigned long flags,unsigned long desc)318 static bool is_type_match(struct resource *p, unsigned long flags, unsigned long desc)
319 {
320 	return (p->flags & flags) == flags && (desc == IORES_DESC_NONE || desc == p->desc);
321 }
322 
323 /**
324  * find_next_iomem_res - Finds the lowest iomem resource that covers part of
325  *			 [@start..@end].
326  *
327  * If a resource is found, returns 0 and @*res is overwritten with the part
328  * of the resource that's within [@start..@end]; if none is found, returns
329  * -ENODEV.  Returns -EINVAL for invalid parameters.
330  *
331  * @start:	start address of the resource searched for
332  * @end:	end address of same resource
333  * @flags:	flags which the resource must have
334  * @desc:	descriptor the resource must have
335  * @res:	return ptr, if resource found
336  *
337  * The caller must specify @start, @end, @flags, and @desc
338  * (which may be IORES_DESC_NONE).
339  */
find_next_iomem_res(resource_size_t start,resource_size_t end,unsigned long flags,unsigned long desc,struct resource * res)340 static int find_next_iomem_res(resource_size_t start, resource_size_t end,
341 			       unsigned long flags, unsigned long desc,
342 			       struct resource *res)
343 {
344 	/* Skip children until we find a top level range that matches */
345 	bool skip_children = true;
346 	struct resource *p;
347 
348 	if (!res)
349 		return -EINVAL;
350 
351 	if (start >= end)
352 		return -EINVAL;
353 
354 	read_lock(&resource_lock);
355 
356 	for_each_resource(&iomem_resource, p, skip_children) {
357 		/* If we passed the resource we are looking for, stop */
358 		if (p->start > end) {
359 			p = NULL;
360 			break;
361 		}
362 
363 		/* Skip until we find a range that matches what we look for */
364 		if (p->end < start)
365 			continue;
366 
367 		/*
368 		 * We found a top level range that matches what we are looking
369 		 * for. Time to start checking children too.
370 		 */
371 		skip_children = false;
372 
373 		/* Found a match, break */
374 		if (is_type_match(p, flags, desc))
375 			break;
376 	}
377 
378 	if (p) {
379 		/* copy data */
380 		*res = (struct resource) {
381 			.start = max(start, p->start),
382 			.end = min(end, p->end),
383 			.flags = p->flags,
384 			.desc = p->desc,
385 			.parent = p->parent,
386 		};
387 	}
388 
389 	read_unlock(&resource_lock);
390 	return p ? 0 : -ENODEV;
391 }
392 
__walk_iomem_res_desc(resource_size_t start,resource_size_t end,unsigned long flags,unsigned long desc,void * arg,int (* func)(struct resource *,void *))393 static int __walk_iomem_res_desc(resource_size_t start, resource_size_t end,
394 				 unsigned long flags, unsigned long desc,
395 				 void *arg,
396 				 int (*func)(struct resource *, void *))
397 {
398 	struct resource res;
399 	int ret = -EINVAL;
400 
401 	while (start < end &&
402 	       !find_next_iomem_res(start, end, flags, desc, &res)) {
403 		ret = (*func)(&res, arg);
404 		if (ret)
405 			break;
406 
407 		start = res.end + 1;
408 	}
409 
410 	return ret;
411 }
412 
413 /**
414  * walk_iomem_res_desc - Walks through iomem resources and calls func()
415  *			 with matching resource ranges.
416  * *
417  * @desc: I/O resource descriptor. Use IORES_DESC_NONE to skip @desc check.
418  * @flags: I/O resource flags
419  * @start: start addr
420  * @end: end addr
421  * @arg: function argument for the callback @func
422  * @func: callback function that is called for each qualifying resource area
423  *
424  * All the memory ranges which overlap start,end and also match flags and
425  * desc are valid candidates.
426  *
427  * NOTE: For a new descriptor search, define a new IORES_DESC in
428  * <linux/ioport.h> and set it in 'desc' of a target resource entry.
429  */
walk_iomem_res_desc(unsigned long desc,unsigned long flags,u64 start,u64 end,void * arg,int (* func)(struct resource *,void *))430 int walk_iomem_res_desc(unsigned long desc, unsigned long flags, u64 start,
431 		u64 end, void *arg, int (*func)(struct resource *, void *))
432 {
433 	return __walk_iomem_res_desc(start, end, flags, desc, arg, func);
434 }
435 EXPORT_SYMBOL_GPL(walk_iomem_res_desc);
436 
437 /*
438  * This function calls the @func callback against all memory ranges of type
439  * System RAM which are marked as IORESOURCE_SYSTEM_RAM and IORESOUCE_BUSY.
440  * Now, this function is only for System RAM, it deals with full ranges and
441  * not PFNs. If resources are not PFN-aligned, dealing with PFNs can truncate
442  * ranges.
443  */
walk_system_ram_res(u64 start,u64 end,void * arg,int (* func)(struct resource *,void *))444 int walk_system_ram_res(u64 start, u64 end, void *arg,
445 			int (*func)(struct resource *, void *))
446 {
447 	unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
448 
449 	return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, arg,
450 				     func);
451 }
452 
453 /*
454  * This function, being a variant of walk_system_ram_res(), calls the @func
455  * callback against all memory ranges of type System RAM which are marked as
456  * IORESOURCE_SYSTEM_RAM and IORESOUCE_BUSY in reversed order, i.e., from
457  * higher to lower.
458  */
walk_system_ram_res_rev(u64 start,u64 end,void * arg,int (* func)(struct resource *,void *))459 int walk_system_ram_res_rev(u64 start, u64 end, void *arg,
460 				int (*func)(struct resource *, void *))
461 {
462 	struct resource res, *rams;
463 	int rams_size = 16, i;
464 	unsigned long flags;
465 	int ret = -1;
466 
467 	/* create a list */
468 	rams = kvcalloc(rams_size, sizeof(struct resource), GFP_KERNEL);
469 	if (!rams)
470 		return ret;
471 
472 	flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
473 	i = 0;
474 	while ((start < end) &&
475 		(!find_next_iomem_res(start, end, flags, IORES_DESC_NONE, &res))) {
476 		if (i >= rams_size) {
477 			/* re-alloc */
478 			struct resource *rams_new;
479 
480 			rams_new = kvrealloc(rams, (rams_size + 16) * sizeof(struct resource),
481 					     GFP_KERNEL);
482 			if (!rams_new)
483 				goto out;
484 
485 			rams = rams_new;
486 			rams_size += 16;
487 		}
488 
489 		rams[i++] = res;
490 		start = res.end + 1;
491 	}
492 
493 	/* go reverse */
494 	for (i--; i >= 0; i--) {
495 		ret = (*func)(&rams[i], arg);
496 		if (ret)
497 			break;
498 	}
499 
500 out:
501 	kvfree(rams);
502 	return ret;
503 }
504 
505 /*
506  * This function calls the @func callback against all memory ranges, which
507  * are ranges marked as IORESOURCE_MEM and IORESOUCE_BUSY.
508  */
walk_mem_res(u64 start,u64 end,void * arg,int (* func)(struct resource *,void *))509 int walk_mem_res(u64 start, u64 end, void *arg,
510 		 int (*func)(struct resource *, void *))
511 {
512 	unsigned long flags = IORESOURCE_MEM | IORESOURCE_BUSY;
513 
514 	return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, arg,
515 				     func);
516 }
517 
518 /*
519  * This function calls the @func callback against all memory ranges of type
520  * System RAM which are marked as IORESOURCE_SYSTEM_RAM and IORESOUCE_BUSY.
521  * It is to be used only for System RAM.
522  */
walk_system_ram_range(unsigned long start_pfn,unsigned long nr_pages,void * arg,int (* func)(unsigned long,unsigned long,void *))523 int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
524 			  void *arg, int (*func)(unsigned long, unsigned long, void *))
525 {
526 	resource_size_t start, end;
527 	unsigned long flags;
528 	struct resource res;
529 	unsigned long pfn, end_pfn;
530 	int ret = -EINVAL;
531 
532 	start = (u64) start_pfn << PAGE_SHIFT;
533 	end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1;
534 	flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
535 	while (start < end &&
536 	       !find_next_iomem_res(start, end, flags, IORES_DESC_NONE, &res)) {
537 		pfn = PFN_UP(res.start);
538 		end_pfn = PFN_DOWN(res.end + 1);
539 		if (end_pfn > pfn)
540 			ret = (*func)(pfn, end_pfn - pfn, arg);
541 		if (ret)
542 			break;
543 		start = res.end + 1;
544 	}
545 	return ret;
546 }
547 
__is_ram(unsigned long pfn,unsigned long nr_pages,void * arg)548 static int __is_ram(unsigned long pfn, unsigned long nr_pages, void *arg)
549 {
550 	return 1;
551 }
552 
553 /*
554  * This generic page_is_ram() returns true if specified address is
555  * registered as System RAM in iomem_resource list.
556  */
page_is_ram(unsigned long pfn)557 int __weak page_is_ram(unsigned long pfn)
558 {
559 	return walk_system_ram_range(pfn, 1, NULL, __is_ram) == 1;
560 }
561 EXPORT_SYMBOL_GPL(page_is_ram);
562 
__region_intersects(struct resource * parent,resource_size_t start,size_t size,unsigned long flags,unsigned long desc)563 static int __region_intersects(struct resource *parent, resource_size_t start,
564 			       size_t size, unsigned long flags,
565 			       unsigned long desc)
566 {
567 	int type = 0; int other = 0;
568 	struct resource *p, *dp;
569 	struct resource res, o;
570 	bool covered;
571 
572 	res = DEFINE_RES(start, size, 0);
573 
574 	for (p = parent->child; p ; p = p->sibling) {
575 		if (!resource_intersection(p, &res, &o))
576 			continue;
577 		if (is_type_match(p, flags, desc)) {
578 			type++;
579 			continue;
580 		}
581 		/*
582 		 * Continue to search in descendant resources as if the
583 		 * matched descendant resources cover some ranges of 'p'.
584 		 *
585 		 * |------------- "CXL Window 0" ------------|
586 		 * |-- "System RAM" --|
587 		 *
588 		 * will behave similar as the following fake resource
589 		 * tree when searching "System RAM".
590 		 *
591 		 * |-- "System RAM" --||-- "CXL Window 0a" --|
592 		 */
593 		covered = false;
594 		for_each_resource(p, dp, false) {
595 			if (!resource_overlaps(dp, &res))
596 				continue;
597 			if (is_type_match(dp, flags, desc)) {
598 				type++;
599 				/*
600 				 * Range from 'o.start' to 'dp->start'
601 				 * isn't covered by matched resource.
602 				 */
603 				if (dp->start > o.start)
604 					break;
605 				if (dp->end >= o.end) {
606 					covered = true;
607 					break;
608 				}
609 				/* Remove covered range */
610 				o.start = max(o.start, dp->end + 1);
611 			}
612 		}
613 		if (!covered)
614 			other++;
615 	}
616 
617 	if (type == 0)
618 		return REGION_DISJOINT;
619 
620 	if (other == 0)
621 		return REGION_INTERSECTS;
622 
623 	return REGION_MIXED;
624 }
625 
626 /**
627  * region_intersects() - determine intersection of region with known resources
628  * @start: region start address
629  * @size: size of region
630  * @flags: flags of resource (in iomem_resource)
631  * @desc: descriptor of resource (in iomem_resource) or IORES_DESC_NONE
632  *
633  * Check if the specified region partially overlaps or fully eclipses a
634  * resource identified by @flags and @desc (optional with IORES_DESC_NONE).
635  * Return REGION_DISJOINT if the region does not overlap @flags/@desc,
636  * return REGION_MIXED if the region overlaps @flags/@desc and another
637  * resource, and return REGION_INTERSECTS if the region overlaps @flags/@desc
638  * and no other defined resource. Note that REGION_INTERSECTS is also
639  * returned in the case when the specified region overlaps RAM and undefined
640  * memory holes.
641  *
642  * region_intersect() is used by memory remapping functions to ensure
643  * the user is not remapping RAM and is a vast speed up over walking
644  * through the resource table page by page.
645  */
region_intersects(resource_size_t start,size_t size,unsigned long flags,unsigned long desc)646 int region_intersects(resource_size_t start, size_t size, unsigned long flags,
647 		      unsigned long desc)
648 {
649 	int ret;
650 
651 	read_lock(&resource_lock);
652 	ret = __region_intersects(&iomem_resource, start, size, flags, desc);
653 	read_unlock(&resource_lock);
654 
655 	return ret;
656 }
657 EXPORT_SYMBOL_GPL(region_intersects);
658 
arch_remove_reservations(struct resource * avail)659 void __weak arch_remove_reservations(struct resource *avail)
660 {
661 }
662 
resource_clip(struct resource * res,resource_size_t min,resource_size_t max)663 static void resource_clip(struct resource *res, resource_size_t min,
664 			  resource_size_t max)
665 {
666 	if (res->start < min)
667 		res->start = min;
668 	if (res->end > max)
669 		res->end = max;
670 }
671 
672 /*
673  * Find empty space in the resource tree with the given range and
674  * alignment constraints
675  */
__find_resource_space(struct resource * root,struct resource * old,struct resource * new,resource_size_t size,struct resource_constraint * constraint)676 static int __find_resource_space(struct resource *root, struct resource *old,
677 				 struct resource *new, resource_size_t size,
678 				 struct resource_constraint *constraint)
679 {
680 	struct resource *this = root->child;
681 	struct resource tmp = *new, avail, alloc;
682 	resource_alignf alignf = constraint->alignf;
683 
684 	tmp.start = root->start;
685 	/*
686 	 * Skip past an allocated resource that starts at 0, since the assignment
687 	 * of this->start - 1 to tmp->end below would cause an underflow.
688 	 */
689 	if (this && this->start == root->start) {
690 		tmp.start = (this == old) ? old->start : this->end + 1;
691 		this = this->sibling;
692 	}
693 	for(;;) {
694 		if (this)
695 			tmp.end = (this == old) ?  this->end : this->start - 1;
696 		else
697 			tmp.end = root->end;
698 
699 		if (tmp.end < tmp.start)
700 			goto next;
701 
702 		resource_clip(&tmp, constraint->min, constraint->max);
703 		arch_remove_reservations(&tmp);
704 
705 		/* Check for overflow after ALIGN() */
706 		avail.start = ALIGN(tmp.start, constraint->align);
707 		avail.end = tmp.end;
708 		avail.flags = new->flags & ~IORESOURCE_UNSET;
709 		if (avail.start >= tmp.start) {
710 			alloc.flags = avail.flags;
711 			if (alignf) {
712 				alloc.start = alignf(constraint->alignf_data,
713 						     &avail, size, constraint->align);
714 			} else {
715 				alloc.start = avail.start;
716 			}
717 			alloc.end = alloc.start + size - 1;
718 			if (alloc.start <= alloc.end &&
719 			    resource_contains(&avail, &alloc)) {
720 				new->start = alloc.start;
721 				new->end = alloc.end;
722 				return 0;
723 			}
724 		}
725 
726 next:		if (!this || this->end == root->end)
727 			break;
728 
729 		if (this != old)
730 			tmp.start = this->end + 1;
731 		this = this->sibling;
732 	}
733 	return -EBUSY;
734 }
735 
736 /**
737  * find_resource_space - Find empty space in the resource tree
738  * @root:	Root resource descriptor
739  * @new:	Resource descriptor awaiting an empty resource space
740  * @size:	The minimum size of the empty space
741  * @constraint:	The range and alignment constraints to be met
742  *
743  * Finds an empty space under @root in the resource tree satisfying range and
744  * alignment @constraints.
745  *
746  * Return:
747  * * %0		- if successful, @new members start, end, and flags are altered.
748  * * %-EBUSY	- if no empty space was found.
749  */
find_resource_space(struct resource * root,struct resource * new,resource_size_t size,struct resource_constraint * constraint)750 int find_resource_space(struct resource *root, struct resource *new,
751 			resource_size_t size,
752 			struct resource_constraint *constraint)
753 {
754 	return  __find_resource_space(root, NULL, new, size, constraint);
755 }
756 EXPORT_SYMBOL_GPL(find_resource_space);
757 
758 /**
759  * reallocate_resource - allocate a slot in the resource tree given range & alignment.
760  *	The resource will be relocated if the new size cannot be reallocated in the
761  *	current location.
762  *
763  * @root: root resource descriptor
764  * @old:  resource descriptor desired by caller
765  * @newsize: new size of the resource descriptor
766  * @constraint: the memory range and alignment constraints to be met.
767  */
reallocate_resource(struct resource * root,struct resource * old,resource_size_t newsize,struct resource_constraint * constraint)768 static int reallocate_resource(struct resource *root, struct resource *old,
769 			       resource_size_t newsize,
770 			       struct resource_constraint *constraint)
771 {
772 	int err=0;
773 	struct resource new = *old;
774 	struct resource *conflict;
775 
776 	write_lock(&resource_lock);
777 
778 	if ((err = __find_resource_space(root, old, &new, newsize, constraint)))
779 		goto out;
780 
781 	if (resource_contains(&new, old)) {
782 		old->start = new.start;
783 		old->end = new.end;
784 		goto out;
785 	}
786 
787 	if (old->child) {
788 		err = -EBUSY;
789 		goto out;
790 	}
791 
792 	if (resource_contains(old, &new)) {
793 		old->start = new.start;
794 		old->end = new.end;
795 	} else {
796 		__release_resource(old, true);
797 		*old = new;
798 		conflict = __request_resource(root, old);
799 		BUG_ON(conflict);
800 	}
801 out:
802 	write_unlock(&resource_lock);
803 	return err;
804 }
805 
806 
807 /**
808  * allocate_resource - allocate empty slot in the resource tree given range & alignment.
809  * 	The resource will be reallocated with a new size if it was already allocated
810  * @root: root resource descriptor
811  * @new: resource descriptor desired by caller
812  * @size: requested resource region size
813  * @min: minimum boundary to allocate
814  * @max: maximum boundary to allocate
815  * @align: alignment requested, in bytes
816  * @alignf: alignment function, optional, called if not NULL
817  * @alignf_data: arbitrary data to pass to the @alignf function
818  */
allocate_resource(struct resource * root,struct resource * new,resource_size_t size,resource_size_t min,resource_size_t max,resource_size_t align,resource_alignf alignf,void * alignf_data)819 int allocate_resource(struct resource *root, struct resource *new,
820 		      resource_size_t size, resource_size_t min,
821 		      resource_size_t max, resource_size_t align,
822 		      resource_alignf alignf,
823 		      void *alignf_data)
824 {
825 	int err;
826 	struct resource_constraint constraint;
827 
828 	constraint.min = min;
829 	constraint.max = max;
830 	constraint.align = align;
831 	constraint.alignf = alignf;
832 	constraint.alignf_data = alignf_data;
833 
834 	if ( new->parent ) {
835 		/* resource is already allocated, try reallocating with
836 		   the new constraints */
837 		return reallocate_resource(root, new, size, &constraint);
838 	}
839 
840 	write_lock(&resource_lock);
841 	err = find_resource_space(root, new, size, &constraint);
842 	if (err >= 0 && __request_resource(root, new))
843 		err = -EBUSY;
844 	write_unlock(&resource_lock);
845 	return err;
846 }
847 
848 EXPORT_SYMBOL(allocate_resource);
849 
850 /**
851  * lookup_resource - find an existing resource by a resource start address
852  * @root: root resource descriptor
853  * @start: resource start address
854  *
855  * Returns a pointer to the resource if found, NULL otherwise
856  */
lookup_resource(struct resource * root,resource_size_t start)857 struct resource *lookup_resource(struct resource *root, resource_size_t start)
858 {
859 	struct resource *res;
860 
861 	read_lock(&resource_lock);
862 	for (res = root->child; res; res = res->sibling) {
863 		if (res->start == start)
864 			break;
865 	}
866 	read_unlock(&resource_lock);
867 
868 	return res;
869 }
870 
871 /*
872  * Insert a resource into the resource tree. If successful, return NULL,
873  * otherwise return the conflicting resource (compare to __request_resource())
874  */
__insert_resource(struct resource * parent,struct resource * new)875 static struct resource * __insert_resource(struct resource *parent, struct resource *new)
876 {
877 	struct resource *first, *next;
878 
879 	for (;; parent = first) {
880 		first = __request_resource(parent, new);
881 		if (!first)
882 			return first;
883 
884 		if (first == parent)
885 			return first;
886 		if (WARN_ON(first == new))	/* duplicated insertion */
887 			return first;
888 
889 		if ((first->start > new->start) || (first->end < new->end))
890 			break;
891 		if ((first->start == new->start) && (first->end == new->end))
892 			break;
893 	}
894 
895 	for (next = first; ; next = next->sibling) {
896 		/* Partial overlap? Bad, and unfixable */
897 		if (next->start < new->start || next->end > new->end)
898 			return next;
899 		if (!next->sibling)
900 			break;
901 		if (next->sibling->start > new->end)
902 			break;
903 	}
904 
905 	new->parent = parent;
906 	new->sibling = next->sibling;
907 	new->child = first;
908 
909 	next->sibling = NULL;
910 	for (next = first; next; next = next->sibling)
911 		next->parent = new;
912 
913 	if (parent->child == first) {
914 		parent->child = new;
915 	} else {
916 		next = parent->child;
917 		while (next->sibling != first)
918 			next = next->sibling;
919 		next->sibling = new;
920 	}
921 	return NULL;
922 }
923 
924 /**
925  * insert_resource_conflict - Inserts resource in the resource tree
926  * @parent: parent of the new resource
927  * @new: new resource to insert
928  *
929  * Returns 0 on success, conflict resource if the resource can't be inserted.
930  *
931  * This function is equivalent to request_resource_conflict when no conflict
932  * happens. If a conflict happens, and the conflicting resources
933  * entirely fit within the range of the new resource, then the new
934  * resource is inserted and the conflicting resources become children of
935  * the new resource.
936  *
937  * This function is intended for producers of resources, such as FW modules
938  * and bus drivers.
939  */
insert_resource_conflict(struct resource * parent,struct resource * new)940 struct resource *insert_resource_conflict(struct resource *parent, struct resource *new)
941 {
942 	struct resource *conflict;
943 
944 	write_lock(&resource_lock);
945 	conflict = __insert_resource(parent, new);
946 	write_unlock(&resource_lock);
947 	return conflict;
948 }
949 
950 /**
951  * insert_resource - Inserts a resource in the resource tree
952  * @parent: parent of the new resource
953  * @new: new resource to insert
954  *
955  * Returns 0 on success, -EBUSY if the resource can't be inserted.
956  *
957  * This function is intended for producers of resources, such as FW modules
958  * and bus drivers.
959  */
insert_resource(struct resource * parent,struct resource * new)960 int insert_resource(struct resource *parent, struct resource *new)
961 {
962 	struct resource *conflict;
963 
964 	conflict = insert_resource_conflict(parent, new);
965 	return conflict ? -EBUSY : 0;
966 }
967 EXPORT_SYMBOL_GPL(insert_resource);
968 
969 /**
970  * insert_resource_expand_to_fit - Insert a resource into the resource tree
971  * @root: root resource descriptor
972  * @new: new resource to insert
973  *
974  * Insert a resource into the resource tree, possibly expanding it in order
975  * to make it encompass any conflicting resources.
976  */
insert_resource_expand_to_fit(struct resource * root,struct resource * new)977 void insert_resource_expand_to_fit(struct resource *root, struct resource *new)
978 {
979 	if (new->parent)
980 		return;
981 
982 	write_lock(&resource_lock);
983 	for (;;) {
984 		struct resource *conflict;
985 
986 		conflict = __insert_resource(root, new);
987 		if (!conflict)
988 			break;
989 		if (conflict == root)
990 			break;
991 
992 		/* Ok, expand resource to cover the conflict, then try again .. */
993 		if (conflict->start < new->start)
994 			new->start = conflict->start;
995 		if (conflict->end > new->end)
996 			new->end = conflict->end;
997 
998 		pr_info("Expanded resource %s due to conflict with %s\n", new->name, conflict->name);
999 	}
1000 	write_unlock(&resource_lock);
1001 }
1002 /*
1003  * Not for general consumption, only early boot memory map parsing, PCI
1004  * resource discovery, and late discovery of CXL resources are expected
1005  * to use this interface. The former are built-in and only the latter,
1006  * CXL, is a module.
1007  */
1008 EXPORT_SYMBOL_NS_GPL(insert_resource_expand_to_fit, "CXL");
1009 
1010 /**
1011  * remove_resource - Remove a resource in the resource tree
1012  * @old: resource to remove
1013  *
1014  * Returns 0 on success, -EINVAL if the resource is not valid.
1015  *
1016  * This function removes a resource previously inserted by insert_resource()
1017  * or insert_resource_conflict(), and moves the children (if any) up to
1018  * where they were before.  insert_resource() and insert_resource_conflict()
1019  * insert a new resource, and move any conflicting resources down to the
1020  * children of the new resource.
1021  *
1022  * insert_resource(), insert_resource_conflict() and remove_resource() are
1023  * intended for producers of resources, such as FW modules and bus drivers.
1024  */
remove_resource(struct resource * old)1025 int remove_resource(struct resource *old)
1026 {
1027 	int retval;
1028 
1029 	write_lock(&resource_lock);
1030 	retval = __release_resource(old, false);
1031 	write_unlock(&resource_lock);
1032 	return retval;
1033 }
1034 EXPORT_SYMBOL_GPL(remove_resource);
1035 
__adjust_resource(struct resource * res,resource_size_t start,resource_size_t size)1036 static int __adjust_resource(struct resource *res, resource_size_t start,
1037 				resource_size_t size)
1038 {
1039 	struct resource *tmp, *parent = res->parent;
1040 	resource_size_t end = start + size - 1;
1041 	int result = -EBUSY;
1042 
1043 	if (!parent)
1044 		goto skip;
1045 
1046 	if ((start < parent->start) || (end > parent->end))
1047 		goto out;
1048 
1049 	if (res->sibling && (res->sibling->start <= end))
1050 		goto out;
1051 
1052 	tmp = parent->child;
1053 	if (tmp != res) {
1054 		while (tmp->sibling != res)
1055 			tmp = tmp->sibling;
1056 		if (start <= tmp->end)
1057 			goto out;
1058 	}
1059 
1060 skip:
1061 	for (tmp = res->child; tmp; tmp = tmp->sibling)
1062 		if ((tmp->start < start) || (tmp->end > end))
1063 			goto out;
1064 
1065 	res->start = start;
1066 	res->end = end;
1067 	result = 0;
1068 
1069  out:
1070 	return result;
1071 }
1072 
1073 /**
1074  * adjust_resource - modify a resource's start and size
1075  * @res: resource to modify
1076  * @start: new start value
1077  * @size: new size
1078  *
1079  * Given an existing resource, change its start and size to match the
1080  * arguments.  Returns 0 on success, -EBUSY if it can't fit.
1081  * Existing children of the resource are assumed to be immutable.
1082  */
adjust_resource(struct resource * res,resource_size_t start,resource_size_t size)1083 int adjust_resource(struct resource *res, resource_size_t start,
1084 		    resource_size_t size)
1085 {
1086 	int result;
1087 
1088 	write_lock(&resource_lock);
1089 	result = __adjust_resource(res, start, size);
1090 	write_unlock(&resource_lock);
1091 	return result;
1092 }
1093 EXPORT_SYMBOL(adjust_resource);
1094 
1095 static void __init
__reserve_region_with_split(struct resource * root,resource_size_t start,resource_size_t end,const char * name)1096 __reserve_region_with_split(struct resource *root, resource_size_t start,
1097 			    resource_size_t end, const char *name)
1098 {
1099 	struct resource *parent = root;
1100 	struct resource *conflict;
1101 	struct resource *res = alloc_resource(GFP_ATOMIC);
1102 	struct resource *next_res = NULL;
1103 	int type = resource_type(root);
1104 
1105 	if (!res)
1106 		return;
1107 
1108 	res->name = name;
1109 	res->start = start;
1110 	res->end = end;
1111 	res->flags = type | IORESOURCE_BUSY;
1112 	res->desc = IORES_DESC_NONE;
1113 
1114 	while (1) {
1115 
1116 		conflict = __request_resource(parent, res);
1117 		if (!conflict) {
1118 			if (!next_res)
1119 				break;
1120 			res = next_res;
1121 			next_res = NULL;
1122 			continue;
1123 		}
1124 
1125 		/* conflict covered whole area */
1126 		if (conflict->start <= res->start &&
1127 				conflict->end >= res->end) {
1128 			free_resource(res);
1129 			WARN_ON(next_res);
1130 			break;
1131 		}
1132 
1133 		/* failed, split and try again */
1134 		if (conflict->start > res->start) {
1135 			end = res->end;
1136 			res->end = conflict->start - 1;
1137 			if (conflict->end < end) {
1138 				next_res = alloc_resource(GFP_ATOMIC);
1139 				if (!next_res) {
1140 					free_resource(res);
1141 					break;
1142 				}
1143 				next_res->name = name;
1144 				next_res->start = conflict->end + 1;
1145 				next_res->end = end;
1146 				next_res->flags = type | IORESOURCE_BUSY;
1147 				next_res->desc = IORES_DESC_NONE;
1148 			}
1149 		} else {
1150 			res->start = conflict->end + 1;
1151 		}
1152 	}
1153 
1154 }
1155 
1156 void __init
reserve_region_with_split(struct resource * root,resource_size_t start,resource_size_t end,const char * name)1157 reserve_region_with_split(struct resource *root, resource_size_t start,
1158 			  resource_size_t end, const char *name)
1159 {
1160 	int abort = 0;
1161 
1162 	write_lock(&resource_lock);
1163 	if (root->start > start || root->end < end) {
1164 		pr_err("requested range [0x%llx-0x%llx] not in root %pr\n",
1165 		       (unsigned long long)start, (unsigned long long)end,
1166 		       root);
1167 		if (start > root->end || end < root->start)
1168 			abort = 1;
1169 		else {
1170 			if (end > root->end)
1171 				end = root->end;
1172 			if (start < root->start)
1173 				start = root->start;
1174 			pr_err("fixing request to [0x%llx-0x%llx]\n",
1175 			       (unsigned long long)start,
1176 			       (unsigned long long)end);
1177 		}
1178 		dump_stack();
1179 	}
1180 	if (!abort)
1181 		__reserve_region_with_split(root, start, end, name);
1182 	write_unlock(&resource_lock);
1183 }
1184 
1185 /**
1186  * resource_alignment - calculate resource's alignment
1187  * @res: resource pointer
1188  *
1189  * Returns alignment on success, 0 (invalid alignment) on failure.
1190  */
resource_alignment(struct resource * res)1191 resource_size_t resource_alignment(struct resource *res)
1192 {
1193 	switch (res->flags & (IORESOURCE_SIZEALIGN | IORESOURCE_STARTALIGN)) {
1194 	case IORESOURCE_SIZEALIGN:
1195 		return resource_size(res);
1196 	case IORESOURCE_STARTALIGN:
1197 		return res->start;
1198 	default:
1199 		return 0;
1200 	}
1201 }
1202 
1203 /*
1204  * This is compatibility stuff for IO resources.
1205  *
1206  * Note how this, unlike the above, knows about
1207  * the IO flag meanings (busy etc).
1208  *
1209  * request_region creates a new busy region.
1210  *
1211  * release_region releases a matching busy region.
1212  */
1213 
1214 static DECLARE_WAIT_QUEUE_HEAD(muxed_resource_wait);
1215 
1216 static struct inode *iomem_inode;
1217 
1218 #ifdef CONFIG_IO_STRICT_DEVMEM
revoke_iomem(struct resource * res)1219 static void revoke_iomem(struct resource *res)
1220 {
1221 	/* pairs with smp_store_release() in iomem_init_inode() */
1222 	struct inode *inode = smp_load_acquire(&iomem_inode);
1223 
1224 	/*
1225 	 * Check that the initialization has completed. Losing the race
1226 	 * is ok because it means drivers are claiming resources before
1227 	 * the fs_initcall level of init and prevent iomem_get_mapping users
1228 	 * from establishing mappings.
1229 	 */
1230 	if (!inode)
1231 		return;
1232 
1233 	/*
1234 	 * The expectation is that the driver has successfully marked
1235 	 * the resource busy by this point, so devmem_is_allowed()
1236 	 * should start returning false, however for performance this
1237 	 * does not iterate the entire resource range.
1238 	 */
1239 	if (devmem_is_allowed(PHYS_PFN(res->start)) &&
1240 	    devmem_is_allowed(PHYS_PFN(res->end))) {
1241 		/*
1242 		 * *cringe* iomem=relaxed says "go ahead, what's the
1243 		 * worst that can happen?"
1244 		 */
1245 		return;
1246 	}
1247 
1248 	unmap_mapping_range(inode->i_mapping, res->start, resource_size(res), 1);
1249 }
1250 #else
revoke_iomem(struct resource * res)1251 static void revoke_iomem(struct resource *res) {}
1252 #endif
1253 
iomem_get_mapping(void)1254 struct address_space *iomem_get_mapping(void)
1255 {
1256 	/*
1257 	 * This function is only called from file open paths, hence guaranteed
1258 	 * that fs_initcalls have completed and no need to check for NULL. But
1259 	 * since revoke_iomem can be called before the initcall we still need
1260 	 * the barrier to appease checkers.
1261 	 */
1262 	return smp_load_acquire(&iomem_inode)->i_mapping;
1263 }
1264 
__request_region_locked(struct resource * res,struct resource * parent,resource_size_t start,resource_size_t n,const char * name,int flags)1265 static int __request_region_locked(struct resource *res, struct resource *parent,
1266 				   resource_size_t start, resource_size_t n,
1267 				   const char *name, int flags)
1268 {
1269 	DECLARE_WAITQUEUE(wait, current);
1270 
1271 	res->name = name;
1272 	res->start = start;
1273 	res->end = start + n - 1;
1274 
1275 	for (;;) {
1276 		struct resource *conflict;
1277 
1278 		res->flags = resource_type(parent) | resource_ext_type(parent);
1279 		res->flags |= IORESOURCE_BUSY | flags;
1280 		res->desc = parent->desc;
1281 
1282 		conflict = __request_resource(parent, res);
1283 		if (!conflict)
1284 			break;
1285 		/*
1286 		 * mm/hmm.c reserves physical addresses which then
1287 		 * become unavailable to other users.  Conflicts are
1288 		 * not expected.  Warn to aid debugging if encountered.
1289 		 */
1290 		if (parent == &iomem_resource &&
1291 		    conflict->desc == IORES_DESC_DEVICE_PRIVATE_MEMORY) {
1292 			pr_warn("Unaddressable device %s %pR conflicts with %pR\n",
1293 				conflict->name, conflict, res);
1294 		}
1295 		if (conflict != parent) {
1296 			if (!(conflict->flags & IORESOURCE_BUSY)) {
1297 				parent = conflict;
1298 				continue;
1299 			}
1300 		}
1301 		if (conflict->flags & flags & IORESOURCE_MUXED) {
1302 			add_wait_queue(&muxed_resource_wait, &wait);
1303 			write_unlock(&resource_lock);
1304 			set_current_state(TASK_UNINTERRUPTIBLE);
1305 			schedule();
1306 			remove_wait_queue(&muxed_resource_wait, &wait);
1307 			write_lock(&resource_lock);
1308 			continue;
1309 		}
1310 		/* Uhhuh, that didn't work out.. */
1311 		return -EBUSY;
1312 	}
1313 
1314 	return 0;
1315 }
1316 
1317 /**
1318  * __request_region - create a new busy resource region
1319  * @parent: parent resource descriptor
1320  * @start: resource start address
1321  * @n: resource region size
1322  * @name: reserving caller's ID string
1323  * @flags: IO resource flags
1324  */
__request_region(struct resource * parent,resource_size_t start,resource_size_t n,const char * name,int flags)1325 struct resource *__request_region(struct resource *parent,
1326 				  resource_size_t start, resource_size_t n,
1327 				  const char *name, int flags)
1328 {
1329 	struct resource *res = alloc_resource(GFP_KERNEL);
1330 	int ret;
1331 
1332 	if (!res)
1333 		return NULL;
1334 
1335 	write_lock(&resource_lock);
1336 	ret = __request_region_locked(res, parent, start, n, name, flags);
1337 	write_unlock(&resource_lock);
1338 
1339 	if (ret) {
1340 		free_resource(res);
1341 		return NULL;
1342 	}
1343 
1344 	if (parent == &iomem_resource)
1345 		revoke_iomem(res);
1346 
1347 	return res;
1348 }
1349 EXPORT_SYMBOL(__request_region);
1350 
1351 /**
1352  * __release_region - release a previously reserved resource region
1353  * @parent: parent resource descriptor
1354  * @start: resource start address
1355  * @n: resource region size
1356  *
1357  * The described resource region must match a currently busy region.
1358  */
__release_region(struct resource * parent,resource_size_t start,resource_size_t n)1359 void __release_region(struct resource *parent, resource_size_t start,
1360 		      resource_size_t n)
1361 {
1362 	struct resource **p;
1363 	resource_size_t end;
1364 
1365 	p = &parent->child;
1366 	end = start + n - 1;
1367 
1368 	write_lock(&resource_lock);
1369 
1370 	for (;;) {
1371 		struct resource *res = *p;
1372 
1373 		if (!res)
1374 			break;
1375 		if (res->start <= start && res->end >= end) {
1376 			if (!(res->flags & IORESOURCE_BUSY)) {
1377 				p = &res->child;
1378 				continue;
1379 			}
1380 			if (res->start != start || res->end != end)
1381 				break;
1382 			*p = res->sibling;
1383 			write_unlock(&resource_lock);
1384 			if (res->flags & IORESOURCE_MUXED)
1385 				wake_up(&muxed_resource_wait);
1386 			free_resource(res);
1387 			return;
1388 		}
1389 		p = &res->sibling;
1390 	}
1391 
1392 	write_unlock(&resource_lock);
1393 
1394 	pr_warn("Trying to free nonexistent resource <%pa-%pa>\n", &start, &end);
1395 }
1396 EXPORT_SYMBOL(__release_region);
1397 
1398 #ifdef CONFIG_MEMORY_HOTREMOVE
append_child_to_parent(struct resource * new_parent,struct resource * new_child)1399 static void append_child_to_parent(struct resource *new_parent, struct resource *new_child)
1400 {
1401 	struct resource *child;
1402 
1403 	child = new_parent->child;
1404 	if (child) {
1405 		while (child->sibling)
1406 			child = child->sibling;
1407 		child->sibling = new_child;
1408 	} else {
1409 		new_parent->child = new_child;
1410 	}
1411 	new_child->parent = new_parent;
1412 	new_child->sibling = NULL;
1413 }
1414 
1415 /*
1416  * Reparent all child resources that no longer belong to "low" after a split to
1417  * "high". Note that "high" does not have any children, because "low" is the
1418  * original resource and "high" is a new resource. Treat "low" as the original
1419  * resource being split and defer its range adjustment to __adjust_resource().
1420  */
reparent_children_after_split(struct resource * low,struct resource * high,resource_size_t split_addr)1421 static void reparent_children_after_split(struct resource *low,
1422 					  struct resource *high,
1423 					  resource_size_t split_addr)
1424 {
1425 	struct resource *child, *next, **p;
1426 
1427 	p = &low->child;
1428 	while ((child = *p)) {
1429 		next = child->sibling;
1430 		if (child->start > split_addr) {
1431 			/* unlink child */
1432 			*p = next;
1433 			append_child_to_parent(high, child);
1434 		} else {
1435 			p = &child->sibling;
1436 		}
1437 	}
1438 }
1439 
1440 /**
1441  * release_mem_region_adjustable - release a previously reserved memory region
1442  * @start: resource start address
1443  * @size: resource region size
1444  *
1445  * This interface is intended for memory hot-delete.  The requested region
1446  * is released from a currently busy memory resource.  The requested region
1447  * must either match exactly or fit into a single busy resource entry.  In
1448  * the latter case, the remaining resource is adjusted accordingly.
1449  *
1450  * Note:
1451  * - Additional release conditions, such as overlapping region, can be
1452  *   supported after they are confirmed as valid cases.
1453  * - When a busy memory resource gets split into two entries, its children are
1454  *   reassigned to the correct parent based on their range. If a child memory
1455  *   resource overlaps with more than one parent, enhance the logic as needed.
1456  */
release_mem_region_adjustable(resource_size_t start,resource_size_t size)1457 void release_mem_region_adjustable(resource_size_t start, resource_size_t size)
1458 {
1459 	struct resource *parent = &iomem_resource;
1460 	struct resource *new_res = NULL;
1461 	bool alloc_nofail = false;
1462 	struct resource **p;
1463 	struct resource *res;
1464 	resource_size_t end;
1465 
1466 	end = start + size - 1;
1467 	if (WARN_ON_ONCE((start < parent->start) || (end > parent->end)))
1468 		return;
1469 
1470 	/*
1471 	 * We free up quite a lot of memory on memory hotunplug (esp., memap),
1472 	 * just before releasing the region. This is highly unlikely to
1473 	 * fail - let's play save and make it never fail as the caller cannot
1474 	 * perform any error handling (e.g., trying to re-add memory will fail
1475 	 * similarly).
1476 	 */
1477 retry:
1478 	new_res = alloc_resource(GFP_KERNEL | (alloc_nofail ? __GFP_NOFAIL : 0));
1479 
1480 	p = &parent->child;
1481 	write_lock(&resource_lock);
1482 
1483 	while ((res = *p)) {
1484 		if (res->start >= end)
1485 			break;
1486 
1487 		/* look for the next resource if it does not fit into */
1488 		if (res->start > start || res->end < end) {
1489 			p = &res->sibling;
1490 			continue;
1491 		}
1492 
1493 		if (!(res->flags & IORESOURCE_MEM))
1494 			break;
1495 
1496 		if (!(res->flags & IORESOURCE_BUSY)) {
1497 			p = &res->child;
1498 			continue;
1499 		}
1500 
1501 		/* found the target resource; let's adjust accordingly */
1502 		if (res->start == start && res->end == end) {
1503 			/* free the whole entry */
1504 			*p = res->sibling;
1505 			free_resource(res);
1506 		} else if (res->start == start && res->end != end) {
1507 			/* adjust the start */
1508 			WARN_ON_ONCE(__adjust_resource(res, end + 1,
1509 						       res->end - end));
1510 		} else if (res->start != start && res->end == end) {
1511 			/* adjust the end */
1512 			WARN_ON_ONCE(__adjust_resource(res, res->start,
1513 						       start - res->start));
1514 		} else {
1515 			/* split into two entries - we need a new resource */
1516 			if (!new_res) {
1517 				new_res = alloc_resource(GFP_ATOMIC);
1518 				if (!new_res) {
1519 					alloc_nofail = true;
1520 					write_unlock(&resource_lock);
1521 					goto retry;
1522 				}
1523 			}
1524 			new_res->name = res->name;
1525 			new_res->start = end + 1;
1526 			new_res->end = res->end;
1527 			new_res->flags = res->flags;
1528 			new_res->desc = res->desc;
1529 			new_res->parent = res->parent;
1530 			new_res->sibling = res->sibling;
1531 			new_res->child = NULL;
1532 			reparent_children_after_split(res, new_res, end);
1533 
1534 			if (WARN_ON_ONCE(__adjust_resource(res, res->start,
1535 							   start - res->start)))
1536 				break;
1537 			res->sibling = new_res;
1538 			new_res = NULL;
1539 		}
1540 
1541 		break;
1542 	}
1543 
1544 	write_unlock(&resource_lock);
1545 	free_resource(new_res);
1546 }
1547 #endif	/* CONFIG_MEMORY_HOTREMOVE */
1548 
1549 #ifdef CONFIG_MEMORY_HOTPLUG
system_ram_resources_mergeable(struct resource * r1,struct resource * r2)1550 static bool system_ram_resources_mergeable(struct resource *r1,
1551 					   struct resource *r2)
1552 {
1553 	/* We assume either r1 or r2 is IORESOURCE_SYSRAM_MERGEABLE. */
1554 	return r1->flags == r2->flags && r1->end + 1 == r2->start &&
1555 	       r1->name == r2->name && r1->desc == r2->desc &&
1556 	       !r1->child && !r2->child;
1557 }
1558 
1559 /**
1560  * merge_system_ram_resource - mark the System RAM resource mergeable and try to
1561  *	merge it with adjacent, mergeable resources
1562  * @res: resource descriptor
1563  *
1564  * This interface is intended for memory hotplug, whereby lots of contiguous
1565  * system ram resources are added (e.g., via add_memory*()) by a driver, and
1566  * the actual resource boundaries are not of interest (e.g., it might be
1567  * relevant for DIMMs). Only resources that are marked mergeable, that have the
1568  * same parent, and that don't have any children are considered. All mergeable
1569  * resources must be immutable during the request.
1570  *
1571  * Note:
1572  * - The caller has to make sure that no pointers to resources that are
1573  *   marked mergeable are used anymore after this call - the resource might
1574  *   be freed and the pointer might be stale!
1575  * - release_mem_region_adjustable() will split on demand on memory hotunplug
1576  */
merge_system_ram_resource(struct resource * res)1577 void merge_system_ram_resource(struct resource *res)
1578 {
1579 	const unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
1580 	struct resource *cur;
1581 
1582 	if (WARN_ON_ONCE((res->flags & flags) != flags))
1583 		return;
1584 
1585 	write_lock(&resource_lock);
1586 	res->flags |= IORESOURCE_SYSRAM_MERGEABLE;
1587 
1588 	/* Try to merge with next item in the list. */
1589 	cur = res->sibling;
1590 	if (cur && system_ram_resources_mergeable(res, cur)) {
1591 		res->end = cur->end;
1592 		res->sibling = cur->sibling;
1593 		free_resource(cur);
1594 	}
1595 
1596 	/* Try to merge with previous item in the list. */
1597 	cur = res->parent->child;
1598 	while (cur && cur->sibling != res)
1599 		cur = cur->sibling;
1600 	if (cur && system_ram_resources_mergeable(cur, res)) {
1601 		cur->end = res->end;
1602 		cur->sibling = res->sibling;
1603 		free_resource(res);
1604 	}
1605 	write_unlock(&resource_lock);
1606 }
1607 #endif	/* CONFIG_MEMORY_HOTPLUG */
1608 
1609 /*
1610  * Managed region resource
1611  */
devm_resource_release(struct device * dev,void * ptr)1612 static void devm_resource_release(struct device *dev, void *ptr)
1613 {
1614 	struct resource **r = ptr;
1615 
1616 	release_resource(*r);
1617 }
1618 
1619 /**
1620  * devm_request_resource() - request and reserve an I/O or memory resource
1621  * @dev: device for which to request the resource
1622  * @root: root of the resource tree from which to request the resource
1623  * @new: descriptor of the resource to request
1624  *
1625  * This is a device-managed version of request_resource(). There is usually
1626  * no need to release resources requested by this function explicitly since
1627  * that will be taken care of when the device is unbound from its driver.
1628  * If for some reason the resource needs to be released explicitly, because
1629  * of ordering issues for example, drivers must call devm_release_resource()
1630  * rather than the regular release_resource().
1631  *
1632  * When a conflict is detected between any existing resources and the newly
1633  * requested resource, an error message will be printed.
1634  *
1635  * Returns 0 on success or a negative error code on failure.
1636  */
devm_request_resource(struct device * dev,struct resource * root,struct resource * new)1637 int devm_request_resource(struct device *dev, struct resource *root,
1638 			  struct resource *new)
1639 {
1640 	struct resource *conflict, **ptr;
1641 
1642 	ptr = devres_alloc(devm_resource_release, sizeof(*ptr), GFP_KERNEL);
1643 	if (!ptr)
1644 		return -ENOMEM;
1645 
1646 	*ptr = new;
1647 
1648 	conflict = request_resource_conflict(root, new);
1649 	if (conflict) {
1650 		dev_err(dev, "resource collision: %pR conflicts with %s %pR\n",
1651 			new, conflict->name, conflict);
1652 		devres_free(ptr);
1653 		return -EBUSY;
1654 	}
1655 
1656 	devres_add(dev, ptr);
1657 	return 0;
1658 }
1659 EXPORT_SYMBOL(devm_request_resource);
1660 
devm_resource_match(struct device * dev,void * res,void * data)1661 static int devm_resource_match(struct device *dev, void *res, void *data)
1662 {
1663 	struct resource **ptr = res;
1664 
1665 	return *ptr == data;
1666 }
1667 
1668 /**
1669  * devm_release_resource() - release a previously requested resource
1670  * @dev: device for which to release the resource
1671  * @new: descriptor of the resource to release
1672  *
1673  * Releases a resource previously requested using devm_request_resource().
1674  */
devm_release_resource(struct device * dev,struct resource * new)1675 void devm_release_resource(struct device *dev, struct resource *new)
1676 {
1677 	WARN_ON(devres_release(dev, devm_resource_release, devm_resource_match,
1678 			       new));
1679 }
1680 EXPORT_SYMBOL(devm_release_resource);
1681 
1682 struct region_devres {
1683 	struct resource *parent;
1684 	resource_size_t start;
1685 	resource_size_t n;
1686 };
1687 
devm_region_release(struct device * dev,void * res)1688 static void devm_region_release(struct device *dev, void *res)
1689 {
1690 	struct region_devres *this = res;
1691 
1692 	__release_region(this->parent, this->start, this->n);
1693 }
1694 
devm_region_match(struct device * dev,void * res,void * match_data)1695 static int devm_region_match(struct device *dev, void *res, void *match_data)
1696 {
1697 	struct region_devres *this = res, *match = match_data;
1698 
1699 	return this->parent == match->parent &&
1700 		this->start == match->start && this->n == match->n;
1701 }
1702 
1703 struct resource *
__devm_request_region(struct device * dev,struct resource * parent,resource_size_t start,resource_size_t n,const char * name)1704 __devm_request_region(struct device *dev, struct resource *parent,
1705 		      resource_size_t start, resource_size_t n, const char *name)
1706 {
1707 	struct region_devres *dr = NULL;
1708 	struct resource *res;
1709 
1710 	dr = devres_alloc(devm_region_release, sizeof(struct region_devres),
1711 			  GFP_KERNEL);
1712 	if (!dr)
1713 		return NULL;
1714 
1715 	dr->parent = parent;
1716 	dr->start = start;
1717 	dr->n = n;
1718 
1719 	res = __request_region(parent, start, n, name, 0);
1720 	if (res)
1721 		devres_add(dev, dr);
1722 	else
1723 		devres_free(dr);
1724 
1725 	return res;
1726 }
1727 EXPORT_SYMBOL(__devm_request_region);
1728 
__devm_release_region(struct device * dev,struct resource * parent,resource_size_t start,resource_size_t n)1729 void __devm_release_region(struct device *dev, struct resource *parent,
1730 			   resource_size_t start, resource_size_t n)
1731 {
1732 	struct region_devres match_data = { parent, start, n };
1733 
1734 	WARN_ON(devres_release(dev, devm_region_release, devm_region_match,
1735 			       &match_data));
1736 }
1737 EXPORT_SYMBOL(__devm_release_region);
1738 
1739 /*
1740  * Reserve I/O ports or memory based on "reserve=" kernel parameter.
1741  */
1742 #define MAXRESERVE 4
reserve_setup(char * str)1743 static int __init reserve_setup(char *str)
1744 {
1745 	static int reserved;
1746 	static struct resource reserve[MAXRESERVE];
1747 
1748 	for (;;) {
1749 		unsigned int io_start, io_num;
1750 		int x = reserved;
1751 		struct resource *parent;
1752 
1753 		if (get_option(&str, &io_start) != 2)
1754 			break;
1755 		if (get_option(&str, &io_num) == 0)
1756 			break;
1757 		if (x < MAXRESERVE) {
1758 			struct resource *res = reserve + x;
1759 
1760 			/*
1761 			 * If the region starts below 0x10000, we assume it's
1762 			 * I/O port space; otherwise assume it's memory.
1763 			 */
1764 			if (io_start < 0x10000) {
1765 				*res = DEFINE_RES_IO_NAMED(io_start, io_num, "reserved");
1766 				parent = &ioport_resource;
1767 			} else {
1768 				*res = DEFINE_RES_MEM_NAMED(io_start, io_num, "reserved");
1769 				parent = &iomem_resource;
1770 			}
1771 			res->flags |= IORESOURCE_BUSY;
1772 			if (request_resource(parent, res) == 0)
1773 				reserved = x+1;
1774 		}
1775 	}
1776 	return 1;
1777 }
1778 __setup("reserve=", reserve_setup);
1779 
1780 /*
1781  * Check if the requested addr and size spans more than any slot in the
1782  * iomem resource tree.
1783  */
iomem_map_sanity_check(resource_size_t addr,unsigned long size)1784 int iomem_map_sanity_check(resource_size_t addr, unsigned long size)
1785 {
1786 	resource_size_t end = addr + size - 1;
1787 	struct resource *p;
1788 	int err = 0;
1789 
1790 	read_lock(&resource_lock);
1791 	for_each_resource(&iomem_resource, p, false) {
1792 		/*
1793 		 * We can probably skip the resources without
1794 		 * IORESOURCE_IO attribute?
1795 		 */
1796 		if (p->start > end)
1797 			continue;
1798 		if (p->end < addr)
1799 			continue;
1800 		if (PFN_DOWN(p->start) <= PFN_DOWN(addr) &&
1801 		    PFN_DOWN(p->end) >= PFN_DOWN(end))
1802 			continue;
1803 		/*
1804 		 * if a resource is "BUSY", it's not a hardware resource
1805 		 * but a driver mapping of such a resource; we don't want
1806 		 * to warn for those; some drivers legitimately map only
1807 		 * partial hardware resources. (example: vesafb)
1808 		 */
1809 		if (p->flags & IORESOURCE_BUSY)
1810 			continue;
1811 
1812 		pr_warn("resource sanity check: requesting [mem %pa-%pa], which spans more than %s %pR\n",
1813 			&addr, &end, p->name, p);
1814 		err = -1;
1815 		break;
1816 	}
1817 	read_unlock(&resource_lock);
1818 
1819 	return err;
1820 }
1821 
1822 #ifdef CONFIG_STRICT_DEVMEM
1823 static int strict_iomem_checks = 1;
1824 #else
1825 static int strict_iomem_checks;
1826 #endif
1827 
1828 /*
1829  * Check if an address is exclusive to the kernel and must not be mapped to
1830  * user space, for example, via /dev/mem.
1831  *
1832  * Returns true if exclusive to the kernel, otherwise returns false.
1833  */
resource_is_exclusive(struct resource * root,u64 addr,resource_size_t size)1834 bool resource_is_exclusive(struct resource *root, u64 addr, resource_size_t size)
1835 {
1836 	const unsigned int exclusive_system_ram = IORESOURCE_SYSTEM_RAM |
1837 						  IORESOURCE_EXCLUSIVE;
1838 	bool skip_children = false, err = false;
1839 	struct resource *p;
1840 
1841 	read_lock(&resource_lock);
1842 	for_each_resource(root, p, skip_children) {
1843 		if (p->start >= addr + size)
1844 			break;
1845 		if (p->end < addr) {
1846 			skip_children = true;
1847 			continue;
1848 		}
1849 		skip_children = false;
1850 
1851 		/*
1852 		 * IORESOURCE_SYSTEM_RAM resources are exclusive if
1853 		 * IORESOURCE_EXCLUSIVE is set, even if they
1854 		 * are not busy and even if "iomem=relaxed" is set. The
1855 		 * responsible driver dynamically adds/removes system RAM within
1856 		 * such an area and uncontrolled access is dangerous.
1857 		 */
1858 		if ((p->flags & exclusive_system_ram) == exclusive_system_ram) {
1859 			err = true;
1860 			break;
1861 		}
1862 
1863 		/*
1864 		 * A resource is exclusive if IORESOURCE_EXCLUSIVE is set
1865 		 * or CONFIG_IO_STRICT_DEVMEM is enabled and the
1866 		 * resource is busy.
1867 		 */
1868 		if (!strict_iomem_checks || !(p->flags & IORESOURCE_BUSY))
1869 			continue;
1870 		if (IS_ENABLED(CONFIG_IO_STRICT_DEVMEM)
1871 				|| p->flags & IORESOURCE_EXCLUSIVE) {
1872 			err = true;
1873 			break;
1874 		}
1875 	}
1876 	read_unlock(&resource_lock);
1877 
1878 	return err;
1879 }
1880 
iomem_is_exclusive(u64 addr)1881 bool iomem_is_exclusive(u64 addr)
1882 {
1883 	return resource_is_exclusive(&iomem_resource, addr & PAGE_MASK,
1884 				     PAGE_SIZE);
1885 }
1886 
resource_list_create_entry(struct resource * res,size_t extra_size)1887 struct resource_entry *resource_list_create_entry(struct resource *res,
1888 						  size_t extra_size)
1889 {
1890 	struct resource_entry *entry;
1891 
1892 	entry = kzalloc(sizeof(*entry) + extra_size, GFP_KERNEL);
1893 	if (entry) {
1894 		INIT_LIST_HEAD(&entry->node);
1895 		entry->res = res ? res : &entry->__res;
1896 	}
1897 
1898 	return entry;
1899 }
1900 EXPORT_SYMBOL(resource_list_create_entry);
1901 
resource_list_free(struct list_head * head)1902 void resource_list_free(struct list_head *head)
1903 {
1904 	struct resource_entry *entry, *tmp;
1905 
1906 	list_for_each_entry_safe(entry, tmp, head, node)
1907 		resource_list_destroy_entry(entry);
1908 }
1909 EXPORT_SYMBOL(resource_list_free);
1910 
1911 #ifdef CONFIG_GET_FREE_REGION
1912 #define GFR_DESCENDING		(1UL << 0)
1913 #define GFR_REQUEST_REGION	(1UL << 1)
1914 #ifdef PA_SECTION_SHIFT
1915 #define GFR_DEFAULT_ALIGN	(1UL << PA_SECTION_SHIFT)
1916 #else
1917 #define GFR_DEFAULT_ALIGN	PAGE_SIZE
1918 #endif
1919 
gfr_start(struct resource * base,resource_size_t size,resource_size_t align,unsigned long flags)1920 static resource_size_t gfr_start(struct resource *base, resource_size_t size,
1921 				 resource_size_t align, unsigned long flags)
1922 {
1923 	if (flags & GFR_DESCENDING) {
1924 		resource_size_t end;
1925 
1926 		end = min_t(resource_size_t, base->end, DIRECT_MAP_PHYSMEM_END);
1927 		return end - size + 1;
1928 	}
1929 
1930 	return ALIGN(max(base->start, align), align);
1931 }
1932 
gfr_continue(struct resource * base,resource_size_t addr,resource_size_t size,unsigned long flags)1933 static bool gfr_continue(struct resource *base, resource_size_t addr,
1934 			 resource_size_t size, unsigned long flags)
1935 {
1936 	if (flags & GFR_DESCENDING)
1937 		return addr > size && addr >= base->start;
1938 	/*
1939 	 * In the ascend case be careful that the last increment by
1940 	 * @size did not wrap 0.
1941 	 */
1942 	return addr > addr - size &&
1943 	       addr <= min_t(resource_size_t, base->end, DIRECT_MAP_PHYSMEM_END);
1944 }
1945 
gfr_next(resource_size_t addr,resource_size_t size,unsigned long flags)1946 static resource_size_t gfr_next(resource_size_t addr, resource_size_t size,
1947 				unsigned long flags)
1948 {
1949 	if (flags & GFR_DESCENDING)
1950 		return addr - size;
1951 	return addr + size;
1952 }
1953 
remove_free_mem_region(void * _res)1954 static void remove_free_mem_region(void *_res)
1955 {
1956 	struct resource *res = _res;
1957 
1958 	if (res->parent)
1959 		remove_resource(res);
1960 	free_resource(res);
1961 }
1962 
1963 static struct resource *
get_free_mem_region(struct device * dev,struct resource * base,resource_size_t size,const unsigned long align,const char * name,const unsigned long desc,const unsigned long flags)1964 get_free_mem_region(struct device *dev, struct resource *base,
1965 		    resource_size_t size, const unsigned long align,
1966 		    const char *name, const unsigned long desc,
1967 		    const unsigned long flags)
1968 {
1969 	resource_size_t addr;
1970 	struct resource *res;
1971 	struct region_devres *dr = NULL;
1972 
1973 	size = ALIGN(size, align);
1974 
1975 	res = alloc_resource(GFP_KERNEL);
1976 	if (!res)
1977 		return ERR_PTR(-ENOMEM);
1978 
1979 	if (dev && (flags & GFR_REQUEST_REGION)) {
1980 		dr = devres_alloc(devm_region_release,
1981 				sizeof(struct region_devres), GFP_KERNEL);
1982 		if (!dr) {
1983 			free_resource(res);
1984 			return ERR_PTR(-ENOMEM);
1985 		}
1986 	} else if (dev) {
1987 		if (devm_add_action_or_reset(dev, remove_free_mem_region, res))
1988 			return ERR_PTR(-ENOMEM);
1989 	}
1990 
1991 	write_lock(&resource_lock);
1992 	for (addr = gfr_start(base, size, align, flags);
1993 	     gfr_continue(base, addr, align, flags);
1994 	     addr = gfr_next(addr, align, flags)) {
1995 		if (__region_intersects(base, addr, size, 0, IORES_DESC_NONE) !=
1996 		    REGION_DISJOINT)
1997 			continue;
1998 
1999 		if (flags & GFR_REQUEST_REGION) {
2000 			if (__request_region_locked(res, &iomem_resource, addr,
2001 						    size, name, 0))
2002 				break;
2003 
2004 			if (dev) {
2005 				dr->parent = &iomem_resource;
2006 				dr->start = addr;
2007 				dr->n = size;
2008 				devres_add(dev, dr);
2009 			}
2010 
2011 			res->desc = desc;
2012 			write_unlock(&resource_lock);
2013 
2014 
2015 			/*
2016 			 * A driver is claiming this region so revoke any
2017 			 * mappings.
2018 			 */
2019 			revoke_iomem(res);
2020 		} else {
2021 			*res = DEFINE_RES_NAMED_DESC(addr, size, name, IORESOURCE_MEM, desc);
2022 
2023 			/*
2024 			 * Only succeed if the resource hosts an exclusive
2025 			 * range after the insert
2026 			 */
2027 			if (__insert_resource(base, res) || res->child)
2028 				break;
2029 
2030 			write_unlock(&resource_lock);
2031 		}
2032 
2033 		return res;
2034 	}
2035 	write_unlock(&resource_lock);
2036 
2037 	if (flags & GFR_REQUEST_REGION) {
2038 		free_resource(res);
2039 		devres_free(dr);
2040 	} else if (dev)
2041 		devm_release_action(dev, remove_free_mem_region, res);
2042 
2043 	return ERR_PTR(-ERANGE);
2044 }
2045 
2046 /**
2047  * devm_request_free_mem_region - find free region for device private memory
2048  *
2049  * @dev: device struct to bind the resource to
2050  * @size: size in bytes of the device memory to add
2051  * @base: resource tree to look in
2052  *
2053  * This function tries to find an empty range of physical address big enough to
2054  * contain the new resource, so that it can later be hotplugged as ZONE_DEVICE
2055  * memory, which in turn allocates struct pages.
2056  */
devm_request_free_mem_region(struct device * dev,struct resource * base,unsigned long size)2057 struct resource *devm_request_free_mem_region(struct device *dev,
2058 		struct resource *base, unsigned long size)
2059 {
2060 	unsigned long flags = GFR_DESCENDING | GFR_REQUEST_REGION;
2061 
2062 	return get_free_mem_region(dev, base, size, GFR_DEFAULT_ALIGN,
2063 				   dev_name(dev),
2064 				   IORES_DESC_DEVICE_PRIVATE_MEMORY, flags);
2065 }
2066 EXPORT_SYMBOL_GPL(devm_request_free_mem_region);
2067 
request_free_mem_region(struct resource * base,unsigned long size,const char * name)2068 struct resource *request_free_mem_region(struct resource *base,
2069 		unsigned long size, const char *name)
2070 {
2071 	unsigned long flags = GFR_DESCENDING | GFR_REQUEST_REGION;
2072 
2073 	return get_free_mem_region(NULL, base, size, GFR_DEFAULT_ALIGN, name,
2074 				   IORES_DESC_DEVICE_PRIVATE_MEMORY, flags);
2075 }
2076 EXPORT_SYMBOL_GPL(request_free_mem_region);
2077 
2078 /**
2079  * alloc_free_mem_region - find a free region relative to @base
2080  * @base: resource that will parent the new resource
2081  * @size: size in bytes of memory to allocate from @base
2082  * @align: alignment requirements for the allocation
2083  * @name: resource name
2084  *
2085  * Buses like CXL, that can dynamically instantiate new memory regions,
2086  * need a method to allocate physical address space for those regions.
2087  * Allocate and insert a new resource to cover a free, unclaimed by a
2088  * descendant of @base, range in the span of @base.
2089  */
alloc_free_mem_region(struct resource * base,unsigned long size,unsigned long align,const char * name)2090 struct resource *alloc_free_mem_region(struct resource *base,
2091 				       unsigned long size, unsigned long align,
2092 				       const char *name)
2093 {
2094 	/* Default of ascending direction and insert resource */
2095 	unsigned long flags = 0;
2096 
2097 	return get_free_mem_region(NULL, base, size, align, name,
2098 				   IORES_DESC_NONE, flags);
2099 }
2100 EXPORT_SYMBOL_GPL(alloc_free_mem_region);
2101 #endif /* CONFIG_GET_FREE_REGION */
2102 
strict_iomem(char * str)2103 static int __init strict_iomem(char *str)
2104 {
2105 	if (strstr(str, "relaxed"))
2106 		strict_iomem_checks = 0;
2107 	if (strstr(str, "strict"))
2108 		strict_iomem_checks = 1;
2109 	return 1;
2110 }
2111 
iomem_fs_init_fs_context(struct fs_context * fc)2112 static int iomem_fs_init_fs_context(struct fs_context *fc)
2113 {
2114 	return init_pseudo(fc, DEVMEM_MAGIC) ? 0 : -ENOMEM;
2115 }
2116 
2117 static struct file_system_type iomem_fs_type = {
2118 	.name		= "iomem",
2119 	.owner		= THIS_MODULE,
2120 	.init_fs_context = iomem_fs_init_fs_context,
2121 	.kill_sb	= kill_anon_super,
2122 };
2123 
iomem_init_inode(void)2124 static int __init iomem_init_inode(void)
2125 {
2126 	static struct vfsmount *iomem_vfs_mount;
2127 	static int iomem_fs_cnt;
2128 	struct inode *inode;
2129 	int rc;
2130 
2131 	rc = simple_pin_fs(&iomem_fs_type, &iomem_vfs_mount, &iomem_fs_cnt);
2132 	if (rc < 0) {
2133 		pr_err("Cannot mount iomem pseudo filesystem: %d\n", rc);
2134 		return rc;
2135 	}
2136 
2137 	inode = alloc_anon_inode(iomem_vfs_mount->mnt_sb);
2138 	if (IS_ERR(inode)) {
2139 		rc = PTR_ERR(inode);
2140 		pr_err("Cannot allocate inode for iomem: %d\n", rc);
2141 		simple_release_fs(&iomem_vfs_mount, &iomem_fs_cnt);
2142 		return rc;
2143 	}
2144 
2145 	/*
2146 	 * Publish iomem revocation inode initialized.
2147 	 * Pairs with smp_load_acquire() in revoke_iomem().
2148 	 */
2149 	smp_store_release(&iomem_inode, inode);
2150 
2151 	return 0;
2152 }
2153 
2154 fs_initcall(iomem_init_inode);
2155 
2156 __setup("iomem=", strict_iomem);
2157