xref: /linux/kernel/resource.c (revision 839c4f596f898edc424070dc8b517381572f8502)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *	linux/kernel/resource.c
4  *
5  * Copyright (C) 1999	Linus Torvalds
6  * Copyright (C) 1999	Martin Mares <mj@ucw.cz>
7  *
8  * Arbitrary resource management.
9  */
10 
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 
13 #include <linux/export.h>
14 #include <linux/errno.h>
15 #include <linux/ioport.h>
16 #include <linux/init.h>
17 #include <linux/slab.h>
18 #include <linux/spinlock.h>
19 #include <linux/fs.h>
20 #include <linux/proc_fs.h>
21 #include <linux/pseudo_fs.h>
22 #include <linux/sched.h>
23 #include <linux/seq_file.h>
24 #include <linux/device.h>
25 #include <linux/pfn.h>
26 #include <linux/mm.h>
27 #include <linux/mount.h>
28 #include <linux/resource_ext.h>
29 #include <uapi/linux/magic.h>
30 #include <linux/string.h>
31 #include <linux/vmalloc.h>
32 #include <asm/io.h>
33 
34 
35 struct resource ioport_resource = {
36 	.name	= "PCI IO",
37 	.start	= 0,
38 	.end	= IO_SPACE_LIMIT,
39 	.flags	= IORESOURCE_IO,
40 };
41 EXPORT_SYMBOL(ioport_resource);
42 
43 struct resource iomem_resource = {
44 	.name	= "PCI mem",
45 	.start	= 0,
46 	.end	= -1,
47 	.flags	= IORESOURCE_MEM,
48 };
49 EXPORT_SYMBOL(iomem_resource);
50 
51 static DEFINE_RWLOCK(resource_lock);
52 
53 static struct resource *next_resource(struct resource *p, bool skip_children)
54 {
55 	if (!skip_children && p->child)
56 		return p->child;
57 	while (!p->sibling && p->parent)
58 		p = p->parent;
59 	return p->sibling;
60 }
61 
62 #define for_each_resource(_root, _p, _skip_children) \
63 	for ((_p) = (_root)->child; (_p); (_p) = next_resource(_p, _skip_children))
64 
65 #ifdef CONFIG_PROC_FS
66 
67 enum { MAX_IORES_LEVEL = 5 };
68 
69 static void *r_start(struct seq_file *m, loff_t *pos)
70 	__acquires(resource_lock)
71 {
72 	struct resource *root = pde_data(file_inode(m->file));
73 	struct resource *p;
74 	loff_t l = *pos;
75 
76 	read_lock(&resource_lock);
77 	for_each_resource(root, p, false) {
78 		if (l-- == 0)
79 			break;
80 	}
81 
82 	return p;
83 }
84 
85 static void *r_next(struct seq_file *m, void *v, loff_t *pos)
86 {
87 	struct resource *p = v;
88 
89 	(*pos)++;
90 
91 	return (void *)next_resource(p, false);
92 }
93 
94 static void r_stop(struct seq_file *m, void *v)
95 	__releases(resource_lock)
96 {
97 	read_unlock(&resource_lock);
98 }
99 
100 static int r_show(struct seq_file *m, void *v)
101 {
102 	struct resource *root = pde_data(file_inode(m->file));
103 	struct resource *r = v, *p;
104 	unsigned long long start, end;
105 	int width = root->end < 0x10000 ? 4 : 8;
106 	int depth;
107 
108 	for (depth = 0, p = r; depth < MAX_IORES_LEVEL; depth++, p = p->parent)
109 		if (p->parent == root)
110 			break;
111 
112 	if (file_ns_capable(m->file, &init_user_ns, CAP_SYS_ADMIN)) {
113 		start = r->start;
114 		end = r->end;
115 	} else {
116 		start = end = 0;
117 	}
118 
119 	seq_printf(m, "%*s%0*llx-%0*llx : %s\n",
120 			depth * 2, "",
121 			width, start,
122 			width, end,
123 			r->name ? r->name : "<BAD>");
124 	return 0;
125 }
126 
127 static const struct seq_operations resource_op = {
128 	.start	= r_start,
129 	.next	= r_next,
130 	.stop	= r_stop,
131 	.show	= r_show,
132 };
133 
134 static int __init ioresources_init(void)
135 {
136 	proc_create_seq_data("ioports", 0, NULL, &resource_op,
137 			&ioport_resource);
138 	proc_create_seq_data("iomem", 0, NULL, &resource_op, &iomem_resource);
139 	return 0;
140 }
141 __initcall(ioresources_init);
142 
143 #endif /* CONFIG_PROC_FS */
144 
145 static void free_resource(struct resource *res)
146 {
147 	/**
148 	 * If the resource was allocated using memblock early during boot
149 	 * we'll leak it here: we can only return full pages back to the
150 	 * buddy and trying to be smart and reusing them eventually in
151 	 * alloc_resource() overcomplicates resource handling.
152 	 */
153 	if (res && PageSlab(virt_to_head_page(res)))
154 		kfree(res);
155 }
156 
157 static struct resource *alloc_resource(gfp_t flags)
158 {
159 	return kzalloc(sizeof(struct resource), flags);
160 }
161 
162 /* Return the conflict entry if you can't request it */
163 static struct resource * __request_resource(struct resource *root, struct resource *new)
164 {
165 	resource_size_t start = new->start;
166 	resource_size_t end = new->end;
167 	struct resource *tmp, **p;
168 
169 	if (end < start)
170 		return root;
171 	if (start < root->start)
172 		return root;
173 	if (end > root->end)
174 		return root;
175 	p = &root->child;
176 	for (;;) {
177 		tmp = *p;
178 		if (!tmp || tmp->start > end) {
179 			new->sibling = tmp;
180 			*p = new;
181 			new->parent = root;
182 			return NULL;
183 		}
184 		p = &tmp->sibling;
185 		if (tmp->end < start)
186 			continue;
187 		return tmp;
188 	}
189 }
190 
191 static int __release_resource(struct resource *old, bool release_child)
192 {
193 	struct resource *tmp, **p, *chd;
194 
195 	p = &old->parent->child;
196 	for (;;) {
197 		tmp = *p;
198 		if (!tmp)
199 			break;
200 		if (tmp == old) {
201 			if (release_child || !(tmp->child)) {
202 				*p = tmp->sibling;
203 			} else {
204 				for (chd = tmp->child;; chd = chd->sibling) {
205 					chd->parent = tmp->parent;
206 					if (!(chd->sibling))
207 						break;
208 				}
209 				*p = tmp->child;
210 				chd->sibling = tmp->sibling;
211 			}
212 			old->parent = NULL;
213 			return 0;
214 		}
215 		p = &tmp->sibling;
216 	}
217 	return -EINVAL;
218 }
219 
220 static void __release_child_resources(struct resource *r)
221 {
222 	struct resource *tmp, *p;
223 	resource_size_t size;
224 
225 	p = r->child;
226 	r->child = NULL;
227 	while (p) {
228 		tmp = p;
229 		p = p->sibling;
230 
231 		tmp->parent = NULL;
232 		tmp->sibling = NULL;
233 		__release_child_resources(tmp);
234 
235 		printk(KERN_DEBUG "release child resource %pR\n", tmp);
236 		/* need to restore size, and keep flags */
237 		size = resource_size(tmp);
238 		tmp->start = 0;
239 		tmp->end = size - 1;
240 	}
241 }
242 
243 void release_child_resources(struct resource *r)
244 {
245 	write_lock(&resource_lock);
246 	__release_child_resources(r);
247 	write_unlock(&resource_lock);
248 }
249 
250 /**
251  * request_resource_conflict - request and reserve an I/O or memory resource
252  * @root: root resource descriptor
253  * @new: resource descriptor desired by caller
254  *
255  * Returns 0 for success, conflict resource on error.
256  */
257 struct resource *request_resource_conflict(struct resource *root, struct resource *new)
258 {
259 	struct resource *conflict;
260 
261 	write_lock(&resource_lock);
262 	conflict = __request_resource(root, new);
263 	write_unlock(&resource_lock);
264 	return conflict;
265 }
266 
267 /**
268  * request_resource - request and reserve an I/O or memory resource
269  * @root: root resource descriptor
270  * @new: resource descriptor desired by caller
271  *
272  * Returns 0 for success, negative error code on error.
273  */
274 int request_resource(struct resource *root, struct resource *new)
275 {
276 	struct resource *conflict;
277 
278 	conflict = request_resource_conflict(root, new);
279 	return conflict ? -EBUSY : 0;
280 }
281 
282 EXPORT_SYMBOL(request_resource);
283 
284 /**
285  * release_resource - release a previously reserved resource
286  * @old: resource pointer
287  */
288 int release_resource(struct resource *old)
289 {
290 	int retval;
291 
292 	write_lock(&resource_lock);
293 	retval = __release_resource(old, true);
294 	write_unlock(&resource_lock);
295 	return retval;
296 }
297 
298 EXPORT_SYMBOL(release_resource);
299 
300 /**
301  * find_next_iomem_res - Finds the lowest iomem resource that covers part of
302  *			 [@start..@end].
303  *
304  * If a resource is found, returns 0 and @*res is overwritten with the part
305  * of the resource that's within [@start..@end]; if none is found, returns
306  * -ENODEV.  Returns -EINVAL for invalid parameters.
307  *
308  * @start:	start address of the resource searched for
309  * @end:	end address of same resource
310  * @flags:	flags which the resource must have
311  * @desc:	descriptor the resource must have
312  * @res:	return ptr, if resource found
313  *
314  * The caller must specify @start, @end, @flags, and @desc
315  * (which may be IORES_DESC_NONE).
316  */
317 static int find_next_iomem_res(resource_size_t start, resource_size_t end,
318 			       unsigned long flags, unsigned long desc,
319 			       struct resource *res)
320 {
321 	struct resource *p;
322 
323 	if (!res)
324 		return -EINVAL;
325 
326 	if (start >= end)
327 		return -EINVAL;
328 
329 	read_lock(&resource_lock);
330 
331 	for_each_resource(&iomem_resource, p, false) {
332 		/* If we passed the resource we are looking for, stop */
333 		if (p->start > end) {
334 			p = NULL;
335 			break;
336 		}
337 
338 		/* Skip until we find a range that matches what we look for */
339 		if (p->end < start)
340 			continue;
341 
342 		if ((p->flags & flags) != flags)
343 			continue;
344 		if ((desc != IORES_DESC_NONE) && (desc != p->desc))
345 			continue;
346 
347 		/* Found a match, break */
348 		break;
349 	}
350 
351 	if (p) {
352 		/* copy data */
353 		*res = (struct resource) {
354 			.start = max(start, p->start),
355 			.end = min(end, p->end),
356 			.flags = p->flags,
357 			.desc = p->desc,
358 			.parent = p->parent,
359 		};
360 	}
361 
362 	read_unlock(&resource_lock);
363 	return p ? 0 : -ENODEV;
364 }
365 
366 static int __walk_iomem_res_desc(resource_size_t start, resource_size_t end,
367 				 unsigned long flags, unsigned long desc,
368 				 void *arg,
369 				 int (*func)(struct resource *, void *))
370 {
371 	struct resource res;
372 	int ret = -EINVAL;
373 
374 	while (start < end &&
375 	       !find_next_iomem_res(start, end, flags, desc, &res)) {
376 		ret = (*func)(&res, arg);
377 		if (ret)
378 			break;
379 
380 		start = res.end + 1;
381 	}
382 
383 	return ret;
384 }
385 
386 /**
387  * walk_iomem_res_desc - Walks through iomem resources and calls func()
388  *			 with matching resource ranges.
389  * *
390  * @desc: I/O resource descriptor. Use IORES_DESC_NONE to skip @desc check.
391  * @flags: I/O resource flags
392  * @start: start addr
393  * @end: end addr
394  * @arg: function argument for the callback @func
395  * @func: callback function that is called for each qualifying resource area
396  *
397  * All the memory ranges which overlap start,end and also match flags and
398  * desc are valid candidates.
399  *
400  * NOTE: For a new descriptor search, define a new IORES_DESC in
401  * <linux/ioport.h> and set it in 'desc' of a target resource entry.
402  */
403 int walk_iomem_res_desc(unsigned long desc, unsigned long flags, u64 start,
404 		u64 end, void *arg, int (*func)(struct resource *, void *))
405 {
406 	return __walk_iomem_res_desc(start, end, flags, desc, arg, func);
407 }
408 EXPORT_SYMBOL_GPL(walk_iomem_res_desc);
409 
410 /*
411  * This function calls the @func callback against all memory ranges of type
412  * System RAM which are marked as IORESOURCE_SYSTEM_RAM and IORESOUCE_BUSY.
413  * Now, this function is only for System RAM, it deals with full ranges and
414  * not PFNs. If resources are not PFN-aligned, dealing with PFNs can truncate
415  * ranges.
416  */
417 int walk_system_ram_res(u64 start, u64 end, void *arg,
418 			int (*func)(struct resource *, void *))
419 {
420 	unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
421 
422 	return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, arg,
423 				     func);
424 }
425 
426 /*
427  * This function, being a variant of walk_system_ram_res(), calls the @func
428  * callback against all memory ranges of type System RAM which are marked as
429  * IORESOURCE_SYSTEM_RAM and IORESOUCE_BUSY in reversed order, i.e., from
430  * higher to lower.
431  */
432 int walk_system_ram_res_rev(u64 start, u64 end, void *arg,
433 				int (*func)(struct resource *, void *))
434 {
435 	struct resource res, *rams;
436 	int rams_size = 16, i;
437 	unsigned long flags;
438 	int ret = -1;
439 
440 	/* create a list */
441 	rams = kvcalloc(rams_size, sizeof(struct resource), GFP_KERNEL);
442 	if (!rams)
443 		return ret;
444 
445 	flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
446 	i = 0;
447 	while ((start < end) &&
448 		(!find_next_iomem_res(start, end, flags, IORES_DESC_NONE, &res))) {
449 		if (i >= rams_size) {
450 			/* re-alloc */
451 			struct resource *rams_new;
452 
453 			rams_new = kvrealloc(rams, rams_size * sizeof(struct resource),
454 					     (rams_size + 16) * sizeof(struct resource),
455 					     GFP_KERNEL);
456 			if (!rams_new)
457 				goto out;
458 
459 			rams = rams_new;
460 			rams_size += 16;
461 		}
462 
463 		rams[i].start = res.start;
464 		rams[i++].end = res.end;
465 
466 		start = res.end + 1;
467 	}
468 
469 	/* go reverse */
470 	for (i--; i >= 0; i--) {
471 		ret = (*func)(&rams[i], arg);
472 		if (ret)
473 			break;
474 	}
475 
476 out:
477 	kvfree(rams);
478 	return ret;
479 }
480 
481 /*
482  * This function calls the @func callback against all memory ranges, which
483  * are ranges marked as IORESOURCE_MEM and IORESOUCE_BUSY.
484  */
485 int walk_mem_res(u64 start, u64 end, void *arg,
486 		 int (*func)(struct resource *, void *))
487 {
488 	unsigned long flags = IORESOURCE_MEM | IORESOURCE_BUSY;
489 
490 	return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, arg,
491 				     func);
492 }
493 
494 /*
495  * This function calls the @func callback against all memory ranges of type
496  * System RAM which are marked as IORESOURCE_SYSTEM_RAM and IORESOUCE_BUSY.
497  * It is to be used only for System RAM.
498  */
499 int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
500 			  void *arg, int (*func)(unsigned long, unsigned long, void *))
501 {
502 	resource_size_t start, end;
503 	unsigned long flags;
504 	struct resource res;
505 	unsigned long pfn, end_pfn;
506 	int ret = -EINVAL;
507 
508 	start = (u64) start_pfn << PAGE_SHIFT;
509 	end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1;
510 	flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
511 	while (start < end &&
512 	       !find_next_iomem_res(start, end, flags, IORES_DESC_NONE, &res)) {
513 		pfn = PFN_UP(res.start);
514 		end_pfn = PFN_DOWN(res.end + 1);
515 		if (end_pfn > pfn)
516 			ret = (*func)(pfn, end_pfn - pfn, arg);
517 		if (ret)
518 			break;
519 		start = res.end + 1;
520 	}
521 	return ret;
522 }
523 
524 static int __is_ram(unsigned long pfn, unsigned long nr_pages, void *arg)
525 {
526 	return 1;
527 }
528 
529 /*
530  * This generic page_is_ram() returns true if specified address is
531  * registered as System RAM in iomem_resource list.
532  */
533 int __weak page_is_ram(unsigned long pfn)
534 {
535 	return walk_system_ram_range(pfn, 1, NULL, __is_ram) == 1;
536 }
537 EXPORT_SYMBOL_GPL(page_is_ram);
538 
539 static int __region_intersects(struct resource *parent, resource_size_t start,
540 			       size_t size, unsigned long flags,
541 			       unsigned long desc)
542 {
543 	resource_size_t ostart, oend;
544 	int type = 0; int other = 0;
545 	struct resource *p, *dp;
546 	bool is_type, covered;
547 	struct resource res;
548 
549 	res.start = start;
550 	res.end = start + size - 1;
551 
552 	for (p = parent->child; p ; p = p->sibling) {
553 		if (!resource_overlaps(p, &res))
554 			continue;
555 		is_type = (p->flags & flags) == flags &&
556 			(desc == IORES_DESC_NONE || desc == p->desc);
557 		if (is_type) {
558 			type++;
559 			continue;
560 		}
561 		/*
562 		 * Continue to search in descendant resources as if the
563 		 * matched descendant resources cover some ranges of 'p'.
564 		 *
565 		 * |------------- "CXL Window 0" ------------|
566 		 * |-- "System RAM" --|
567 		 *
568 		 * will behave similar as the following fake resource
569 		 * tree when searching "System RAM".
570 		 *
571 		 * |-- "System RAM" --||-- "CXL Window 0a" --|
572 		 */
573 		covered = false;
574 		ostart = max(res.start, p->start);
575 		oend = min(res.end, p->end);
576 		for_each_resource(p, dp, false) {
577 			if (!resource_overlaps(dp, &res))
578 				continue;
579 			is_type = (dp->flags & flags) == flags &&
580 				(desc == IORES_DESC_NONE || desc == dp->desc);
581 			if (is_type) {
582 				type++;
583 				/*
584 				 * Range from 'ostart' to 'dp->start'
585 				 * isn't covered by matched resource.
586 				 */
587 				if (dp->start > ostart)
588 					break;
589 				if (dp->end >= oend) {
590 					covered = true;
591 					break;
592 				}
593 				/* Remove covered range */
594 				ostart = max(ostart, dp->end + 1);
595 			}
596 		}
597 		if (!covered)
598 			other++;
599 	}
600 
601 	if (type == 0)
602 		return REGION_DISJOINT;
603 
604 	if (other == 0)
605 		return REGION_INTERSECTS;
606 
607 	return REGION_MIXED;
608 }
609 
610 /**
611  * region_intersects() - determine intersection of region with known resources
612  * @start: region start address
613  * @size: size of region
614  * @flags: flags of resource (in iomem_resource)
615  * @desc: descriptor of resource (in iomem_resource) or IORES_DESC_NONE
616  *
617  * Check if the specified region partially overlaps or fully eclipses a
618  * resource identified by @flags and @desc (optional with IORES_DESC_NONE).
619  * Return REGION_DISJOINT if the region does not overlap @flags/@desc,
620  * return REGION_MIXED if the region overlaps @flags/@desc and another
621  * resource, and return REGION_INTERSECTS if the region overlaps @flags/@desc
622  * and no other defined resource. Note that REGION_INTERSECTS is also
623  * returned in the case when the specified region overlaps RAM and undefined
624  * memory holes.
625  *
626  * region_intersect() is used by memory remapping functions to ensure
627  * the user is not remapping RAM and is a vast speed up over walking
628  * through the resource table page by page.
629  */
630 int region_intersects(resource_size_t start, size_t size, unsigned long flags,
631 		      unsigned long desc)
632 {
633 	int ret;
634 
635 	read_lock(&resource_lock);
636 	ret = __region_intersects(&iomem_resource, start, size, flags, desc);
637 	read_unlock(&resource_lock);
638 
639 	return ret;
640 }
641 EXPORT_SYMBOL_GPL(region_intersects);
642 
643 void __weak arch_remove_reservations(struct resource *avail)
644 {
645 }
646 
647 static void resource_clip(struct resource *res, resource_size_t min,
648 			  resource_size_t max)
649 {
650 	if (res->start < min)
651 		res->start = min;
652 	if (res->end > max)
653 		res->end = max;
654 }
655 
656 /*
657  * Find empty space in the resource tree with the given range and
658  * alignment constraints
659  */
660 static int __find_resource_space(struct resource *root, struct resource *old,
661 				 struct resource *new, resource_size_t size,
662 				 struct resource_constraint *constraint)
663 {
664 	struct resource *this = root->child;
665 	struct resource tmp = *new, avail, alloc;
666 	resource_alignf alignf = constraint->alignf;
667 
668 	tmp.start = root->start;
669 	/*
670 	 * Skip past an allocated resource that starts at 0, since the assignment
671 	 * of this->start - 1 to tmp->end below would cause an underflow.
672 	 */
673 	if (this && this->start == root->start) {
674 		tmp.start = (this == old) ? old->start : this->end + 1;
675 		this = this->sibling;
676 	}
677 	for(;;) {
678 		if (this)
679 			tmp.end = (this == old) ?  this->end : this->start - 1;
680 		else
681 			tmp.end = root->end;
682 
683 		if (tmp.end < tmp.start)
684 			goto next;
685 
686 		resource_clip(&tmp, constraint->min, constraint->max);
687 		arch_remove_reservations(&tmp);
688 
689 		/* Check for overflow after ALIGN() */
690 		avail.start = ALIGN(tmp.start, constraint->align);
691 		avail.end = tmp.end;
692 		avail.flags = new->flags & ~IORESOURCE_UNSET;
693 		if (avail.start >= tmp.start) {
694 			alloc.flags = avail.flags;
695 			if (alignf) {
696 				alloc.start = alignf(constraint->alignf_data,
697 						     &avail, size, constraint->align);
698 			} else {
699 				alloc.start = avail.start;
700 			}
701 			alloc.end = alloc.start + size - 1;
702 			if (alloc.start <= alloc.end &&
703 			    resource_contains(&avail, &alloc)) {
704 				new->start = alloc.start;
705 				new->end = alloc.end;
706 				return 0;
707 			}
708 		}
709 
710 next:		if (!this || this->end == root->end)
711 			break;
712 
713 		if (this != old)
714 			tmp.start = this->end + 1;
715 		this = this->sibling;
716 	}
717 	return -EBUSY;
718 }
719 
720 /**
721  * find_resource_space - Find empty space in the resource tree
722  * @root:	Root resource descriptor
723  * @new:	Resource descriptor awaiting an empty resource space
724  * @size:	The minimum size of the empty space
725  * @constraint:	The range and alignment constraints to be met
726  *
727  * Finds an empty space under @root in the resource tree satisfying range and
728  * alignment @constraints.
729  *
730  * Return:
731  * * %0		- if successful, @new members start, end, and flags are altered.
732  * * %-EBUSY	- if no empty space was found.
733  */
734 int find_resource_space(struct resource *root, struct resource *new,
735 			resource_size_t size,
736 			struct resource_constraint *constraint)
737 {
738 	return  __find_resource_space(root, NULL, new, size, constraint);
739 }
740 EXPORT_SYMBOL_GPL(find_resource_space);
741 
742 /**
743  * reallocate_resource - allocate a slot in the resource tree given range & alignment.
744  *	The resource will be relocated if the new size cannot be reallocated in the
745  *	current location.
746  *
747  * @root: root resource descriptor
748  * @old:  resource descriptor desired by caller
749  * @newsize: new size of the resource descriptor
750  * @constraint: the size and alignment constraints to be met.
751  */
752 static int reallocate_resource(struct resource *root, struct resource *old,
753 			       resource_size_t newsize,
754 			       struct resource_constraint *constraint)
755 {
756 	int err=0;
757 	struct resource new = *old;
758 	struct resource *conflict;
759 
760 	write_lock(&resource_lock);
761 
762 	if ((err = __find_resource_space(root, old, &new, newsize, constraint)))
763 		goto out;
764 
765 	if (resource_contains(&new, old)) {
766 		old->start = new.start;
767 		old->end = new.end;
768 		goto out;
769 	}
770 
771 	if (old->child) {
772 		err = -EBUSY;
773 		goto out;
774 	}
775 
776 	if (resource_contains(old, &new)) {
777 		old->start = new.start;
778 		old->end = new.end;
779 	} else {
780 		__release_resource(old, true);
781 		*old = new;
782 		conflict = __request_resource(root, old);
783 		BUG_ON(conflict);
784 	}
785 out:
786 	write_unlock(&resource_lock);
787 	return err;
788 }
789 
790 
791 /**
792  * allocate_resource - allocate empty slot in the resource tree given range & alignment.
793  * 	The resource will be reallocated with a new size if it was already allocated
794  * @root: root resource descriptor
795  * @new: resource descriptor desired by caller
796  * @size: requested resource region size
797  * @min: minimum boundary to allocate
798  * @max: maximum boundary to allocate
799  * @align: alignment requested, in bytes
800  * @alignf: alignment function, optional, called if not NULL
801  * @alignf_data: arbitrary data to pass to the @alignf function
802  */
803 int allocate_resource(struct resource *root, struct resource *new,
804 		      resource_size_t size, resource_size_t min,
805 		      resource_size_t max, resource_size_t align,
806 		      resource_alignf alignf,
807 		      void *alignf_data)
808 {
809 	int err;
810 	struct resource_constraint constraint;
811 
812 	constraint.min = min;
813 	constraint.max = max;
814 	constraint.align = align;
815 	constraint.alignf = alignf;
816 	constraint.alignf_data = alignf_data;
817 
818 	if ( new->parent ) {
819 		/* resource is already allocated, try reallocating with
820 		   the new constraints */
821 		return reallocate_resource(root, new, size, &constraint);
822 	}
823 
824 	write_lock(&resource_lock);
825 	err = find_resource_space(root, new, size, &constraint);
826 	if (err >= 0 && __request_resource(root, new))
827 		err = -EBUSY;
828 	write_unlock(&resource_lock);
829 	return err;
830 }
831 
832 EXPORT_SYMBOL(allocate_resource);
833 
834 /**
835  * lookup_resource - find an existing resource by a resource start address
836  * @root: root resource descriptor
837  * @start: resource start address
838  *
839  * Returns a pointer to the resource if found, NULL otherwise
840  */
841 struct resource *lookup_resource(struct resource *root, resource_size_t start)
842 {
843 	struct resource *res;
844 
845 	read_lock(&resource_lock);
846 	for (res = root->child; res; res = res->sibling) {
847 		if (res->start == start)
848 			break;
849 	}
850 	read_unlock(&resource_lock);
851 
852 	return res;
853 }
854 
855 /*
856  * Insert a resource into the resource tree. If successful, return NULL,
857  * otherwise return the conflicting resource (compare to __request_resource())
858  */
859 static struct resource * __insert_resource(struct resource *parent, struct resource *new)
860 {
861 	struct resource *first, *next;
862 
863 	for (;; parent = first) {
864 		first = __request_resource(parent, new);
865 		if (!first)
866 			return first;
867 
868 		if (first == parent)
869 			return first;
870 		if (WARN_ON(first == new))	/* duplicated insertion */
871 			return first;
872 
873 		if ((first->start > new->start) || (first->end < new->end))
874 			break;
875 		if ((first->start == new->start) && (first->end == new->end))
876 			break;
877 	}
878 
879 	for (next = first; ; next = next->sibling) {
880 		/* Partial overlap? Bad, and unfixable */
881 		if (next->start < new->start || next->end > new->end)
882 			return next;
883 		if (!next->sibling)
884 			break;
885 		if (next->sibling->start > new->end)
886 			break;
887 	}
888 
889 	new->parent = parent;
890 	new->sibling = next->sibling;
891 	new->child = first;
892 
893 	next->sibling = NULL;
894 	for (next = first; next; next = next->sibling)
895 		next->parent = new;
896 
897 	if (parent->child == first) {
898 		parent->child = new;
899 	} else {
900 		next = parent->child;
901 		while (next->sibling != first)
902 			next = next->sibling;
903 		next->sibling = new;
904 	}
905 	return NULL;
906 }
907 
908 /**
909  * insert_resource_conflict - Inserts resource in the resource tree
910  * @parent: parent of the new resource
911  * @new: new resource to insert
912  *
913  * Returns 0 on success, conflict resource if the resource can't be inserted.
914  *
915  * This function is equivalent to request_resource_conflict when no conflict
916  * happens. If a conflict happens, and the conflicting resources
917  * entirely fit within the range of the new resource, then the new
918  * resource is inserted and the conflicting resources become children of
919  * the new resource.
920  *
921  * This function is intended for producers of resources, such as FW modules
922  * and bus drivers.
923  */
924 struct resource *insert_resource_conflict(struct resource *parent, struct resource *new)
925 {
926 	struct resource *conflict;
927 
928 	write_lock(&resource_lock);
929 	conflict = __insert_resource(parent, new);
930 	write_unlock(&resource_lock);
931 	return conflict;
932 }
933 
934 /**
935  * insert_resource - Inserts a resource in the resource tree
936  * @parent: parent of the new resource
937  * @new: new resource to insert
938  *
939  * Returns 0 on success, -EBUSY if the resource can't be inserted.
940  *
941  * This function is intended for producers of resources, such as FW modules
942  * and bus drivers.
943  */
944 int insert_resource(struct resource *parent, struct resource *new)
945 {
946 	struct resource *conflict;
947 
948 	conflict = insert_resource_conflict(parent, new);
949 	return conflict ? -EBUSY : 0;
950 }
951 EXPORT_SYMBOL_GPL(insert_resource);
952 
953 /**
954  * insert_resource_expand_to_fit - Insert a resource into the resource tree
955  * @root: root resource descriptor
956  * @new: new resource to insert
957  *
958  * Insert a resource into the resource tree, possibly expanding it in order
959  * to make it encompass any conflicting resources.
960  */
961 void insert_resource_expand_to_fit(struct resource *root, struct resource *new)
962 {
963 	if (new->parent)
964 		return;
965 
966 	write_lock(&resource_lock);
967 	for (;;) {
968 		struct resource *conflict;
969 
970 		conflict = __insert_resource(root, new);
971 		if (!conflict)
972 			break;
973 		if (conflict == root)
974 			break;
975 
976 		/* Ok, expand resource to cover the conflict, then try again .. */
977 		if (conflict->start < new->start)
978 			new->start = conflict->start;
979 		if (conflict->end > new->end)
980 			new->end = conflict->end;
981 
982 		pr_info("Expanded resource %s due to conflict with %s\n", new->name, conflict->name);
983 	}
984 	write_unlock(&resource_lock);
985 }
986 /*
987  * Not for general consumption, only early boot memory map parsing, PCI
988  * resource discovery, and late discovery of CXL resources are expected
989  * to use this interface. The former are built-in and only the latter,
990  * CXL, is a module.
991  */
992 EXPORT_SYMBOL_NS_GPL(insert_resource_expand_to_fit, CXL);
993 
994 /**
995  * remove_resource - Remove a resource in the resource tree
996  * @old: resource to remove
997  *
998  * Returns 0 on success, -EINVAL if the resource is not valid.
999  *
1000  * This function removes a resource previously inserted by insert_resource()
1001  * or insert_resource_conflict(), and moves the children (if any) up to
1002  * where they were before.  insert_resource() and insert_resource_conflict()
1003  * insert a new resource, and move any conflicting resources down to the
1004  * children of the new resource.
1005  *
1006  * insert_resource(), insert_resource_conflict() and remove_resource() are
1007  * intended for producers of resources, such as FW modules and bus drivers.
1008  */
1009 int remove_resource(struct resource *old)
1010 {
1011 	int retval;
1012 
1013 	write_lock(&resource_lock);
1014 	retval = __release_resource(old, false);
1015 	write_unlock(&resource_lock);
1016 	return retval;
1017 }
1018 EXPORT_SYMBOL_GPL(remove_resource);
1019 
1020 static int __adjust_resource(struct resource *res, resource_size_t start,
1021 				resource_size_t size)
1022 {
1023 	struct resource *tmp, *parent = res->parent;
1024 	resource_size_t end = start + size - 1;
1025 	int result = -EBUSY;
1026 
1027 	if (!parent)
1028 		goto skip;
1029 
1030 	if ((start < parent->start) || (end > parent->end))
1031 		goto out;
1032 
1033 	if (res->sibling && (res->sibling->start <= end))
1034 		goto out;
1035 
1036 	tmp = parent->child;
1037 	if (tmp != res) {
1038 		while (tmp->sibling != res)
1039 			tmp = tmp->sibling;
1040 		if (start <= tmp->end)
1041 			goto out;
1042 	}
1043 
1044 skip:
1045 	for (tmp = res->child; tmp; tmp = tmp->sibling)
1046 		if ((tmp->start < start) || (tmp->end > end))
1047 			goto out;
1048 
1049 	res->start = start;
1050 	res->end = end;
1051 	result = 0;
1052 
1053  out:
1054 	return result;
1055 }
1056 
1057 /**
1058  * adjust_resource - modify a resource's start and size
1059  * @res: resource to modify
1060  * @start: new start value
1061  * @size: new size
1062  *
1063  * Given an existing resource, change its start and size to match the
1064  * arguments.  Returns 0 on success, -EBUSY if it can't fit.
1065  * Existing children of the resource are assumed to be immutable.
1066  */
1067 int adjust_resource(struct resource *res, resource_size_t start,
1068 		    resource_size_t size)
1069 {
1070 	int result;
1071 
1072 	write_lock(&resource_lock);
1073 	result = __adjust_resource(res, start, size);
1074 	write_unlock(&resource_lock);
1075 	return result;
1076 }
1077 EXPORT_SYMBOL(adjust_resource);
1078 
1079 static void __init
1080 __reserve_region_with_split(struct resource *root, resource_size_t start,
1081 			    resource_size_t end, const char *name)
1082 {
1083 	struct resource *parent = root;
1084 	struct resource *conflict;
1085 	struct resource *res = alloc_resource(GFP_ATOMIC);
1086 	struct resource *next_res = NULL;
1087 	int type = resource_type(root);
1088 
1089 	if (!res)
1090 		return;
1091 
1092 	res->name = name;
1093 	res->start = start;
1094 	res->end = end;
1095 	res->flags = type | IORESOURCE_BUSY;
1096 	res->desc = IORES_DESC_NONE;
1097 
1098 	while (1) {
1099 
1100 		conflict = __request_resource(parent, res);
1101 		if (!conflict) {
1102 			if (!next_res)
1103 				break;
1104 			res = next_res;
1105 			next_res = NULL;
1106 			continue;
1107 		}
1108 
1109 		/* conflict covered whole area */
1110 		if (conflict->start <= res->start &&
1111 				conflict->end >= res->end) {
1112 			free_resource(res);
1113 			WARN_ON(next_res);
1114 			break;
1115 		}
1116 
1117 		/* failed, split and try again */
1118 		if (conflict->start > res->start) {
1119 			end = res->end;
1120 			res->end = conflict->start - 1;
1121 			if (conflict->end < end) {
1122 				next_res = alloc_resource(GFP_ATOMIC);
1123 				if (!next_res) {
1124 					free_resource(res);
1125 					break;
1126 				}
1127 				next_res->name = name;
1128 				next_res->start = conflict->end + 1;
1129 				next_res->end = end;
1130 				next_res->flags = type | IORESOURCE_BUSY;
1131 				next_res->desc = IORES_DESC_NONE;
1132 			}
1133 		} else {
1134 			res->start = conflict->end + 1;
1135 		}
1136 	}
1137 
1138 }
1139 
1140 void __init
1141 reserve_region_with_split(struct resource *root, resource_size_t start,
1142 			  resource_size_t end, const char *name)
1143 {
1144 	int abort = 0;
1145 
1146 	write_lock(&resource_lock);
1147 	if (root->start > start || root->end < end) {
1148 		pr_err("requested range [0x%llx-0x%llx] not in root %pr\n",
1149 		       (unsigned long long)start, (unsigned long long)end,
1150 		       root);
1151 		if (start > root->end || end < root->start)
1152 			abort = 1;
1153 		else {
1154 			if (end > root->end)
1155 				end = root->end;
1156 			if (start < root->start)
1157 				start = root->start;
1158 			pr_err("fixing request to [0x%llx-0x%llx]\n",
1159 			       (unsigned long long)start,
1160 			       (unsigned long long)end);
1161 		}
1162 		dump_stack();
1163 	}
1164 	if (!abort)
1165 		__reserve_region_with_split(root, start, end, name);
1166 	write_unlock(&resource_lock);
1167 }
1168 
1169 /**
1170  * resource_alignment - calculate resource's alignment
1171  * @res: resource pointer
1172  *
1173  * Returns alignment on success, 0 (invalid alignment) on failure.
1174  */
1175 resource_size_t resource_alignment(struct resource *res)
1176 {
1177 	switch (res->flags & (IORESOURCE_SIZEALIGN | IORESOURCE_STARTALIGN)) {
1178 	case IORESOURCE_SIZEALIGN:
1179 		return resource_size(res);
1180 	case IORESOURCE_STARTALIGN:
1181 		return res->start;
1182 	default:
1183 		return 0;
1184 	}
1185 }
1186 
1187 /*
1188  * This is compatibility stuff for IO resources.
1189  *
1190  * Note how this, unlike the above, knows about
1191  * the IO flag meanings (busy etc).
1192  *
1193  * request_region creates a new busy region.
1194  *
1195  * release_region releases a matching busy region.
1196  */
1197 
1198 static DECLARE_WAIT_QUEUE_HEAD(muxed_resource_wait);
1199 
1200 static struct inode *iomem_inode;
1201 
1202 #ifdef CONFIG_IO_STRICT_DEVMEM
1203 static void revoke_iomem(struct resource *res)
1204 {
1205 	/* pairs with smp_store_release() in iomem_init_inode() */
1206 	struct inode *inode = smp_load_acquire(&iomem_inode);
1207 
1208 	/*
1209 	 * Check that the initialization has completed. Losing the race
1210 	 * is ok because it means drivers are claiming resources before
1211 	 * the fs_initcall level of init and prevent iomem_get_mapping users
1212 	 * from establishing mappings.
1213 	 */
1214 	if (!inode)
1215 		return;
1216 
1217 	/*
1218 	 * The expectation is that the driver has successfully marked
1219 	 * the resource busy by this point, so devmem_is_allowed()
1220 	 * should start returning false, however for performance this
1221 	 * does not iterate the entire resource range.
1222 	 */
1223 	if (devmem_is_allowed(PHYS_PFN(res->start)) &&
1224 	    devmem_is_allowed(PHYS_PFN(res->end))) {
1225 		/*
1226 		 * *cringe* iomem=relaxed says "go ahead, what's the
1227 		 * worst that can happen?"
1228 		 */
1229 		return;
1230 	}
1231 
1232 	unmap_mapping_range(inode->i_mapping, res->start, resource_size(res), 1);
1233 }
1234 #else
1235 static void revoke_iomem(struct resource *res) {}
1236 #endif
1237 
1238 struct address_space *iomem_get_mapping(void)
1239 {
1240 	/*
1241 	 * This function is only called from file open paths, hence guaranteed
1242 	 * that fs_initcalls have completed and no need to check for NULL. But
1243 	 * since revoke_iomem can be called before the initcall we still need
1244 	 * the barrier to appease checkers.
1245 	 */
1246 	return smp_load_acquire(&iomem_inode)->i_mapping;
1247 }
1248 
1249 static int __request_region_locked(struct resource *res, struct resource *parent,
1250 				   resource_size_t start, resource_size_t n,
1251 				   const char *name, int flags)
1252 {
1253 	DECLARE_WAITQUEUE(wait, current);
1254 
1255 	res->name = name;
1256 	res->start = start;
1257 	res->end = start + n - 1;
1258 
1259 	for (;;) {
1260 		struct resource *conflict;
1261 
1262 		res->flags = resource_type(parent) | resource_ext_type(parent);
1263 		res->flags |= IORESOURCE_BUSY | flags;
1264 		res->desc = parent->desc;
1265 
1266 		conflict = __request_resource(parent, res);
1267 		if (!conflict)
1268 			break;
1269 		/*
1270 		 * mm/hmm.c reserves physical addresses which then
1271 		 * become unavailable to other users.  Conflicts are
1272 		 * not expected.  Warn to aid debugging if encountered.
1273 		 */
1274 		if (conflict->desc == IORES_DESC_DEVICE_PRIVATE_MEMORY) {
1275 			pr_warn("Unaddressable device %s %pR conflicts with %pR",
1276 				conflict->name, conflict, res);
1277 		}
1278 		if (conflict != parent) {
1279 			if (!(conflict->flags & IORESOURCE_BUSY)) {
1280 				parent = conflict;
1281 				continue;
1282 			}
1283 		}
1284 		if (conflict->flags & flags & IORESOURCE_MUXED) {
1285 			add_wait_queue(&muxed_resource_wait, &wait);
1286 			write_unlock(&resource_lock);
1287 			set_current_state(TASK_UNINTERRUPTIBLE);
1288 			schedule();
1289 			remove_wait_queue(&muxed_resource_wait, &wait);
1290 			write_lock(&resource_lock);
1291 			continue;
1292 		}
1293 		/* Uhhuh, that didn't work out.. */
1294 		return -EBUSY;
1295 	}
1296 
1297 	return 0;
1298 }
1299 
1300 /**
1301  * __request_region - create a new busy resource region
1302  * @parent: parent resource descriptor
1303  * @start: resource start address
1304  * @n: resource region size
1305  * @name: reserving caller's ID string
1306  * @flags: IO resource flags
1307  */
1308 struct resource *__request_region(struct resource *parent,
1309 				  resource_size_t start, resource_size_t n,
1310 				  const char *name, int flags)
1311 {
1312 	struct resource *res = alloc_resource(GFP_KERNEL);
1313 	int ret;
1314 
1315 	if (!res)
1316 		return NULL;
1317 
1318 	write_lock(&resource_lock);
1319 	ret = __request_region_locked(res, parent, start, n, name, flags);
1320 	write_unlock(&resource_lock);
1321 
1322 	if (ret) {
1323 		free_resource(res);
1324 		return NULL;
1325 	}
1326 
1327 	if (parent == &iomem_resource)
1328 		revoke_iomem(res);
1329 
1330 	return res;
1331 }
1332 EXPORT_SYMBOL(__request_region);
1333 
1334 /**
1335  * __release_region - release a previously reserved resource region
1336  * @parent: parent resource descriptor
1337  * @start: resource start address
1338  * @n: resource region size
1339  *
1340  * The described resource region must match a currently busy region.
1341  */
1342 void __release_region(struct resource *parent, resource_size_t start,
1343 		      resource_size_t n)
1344 {
1345 	struct resource **p;
1346 	resource_size_t end;
1347 
1348 	p = &parent->child;
1349 	end = start + n - 1;
1350 
1351 	write_lock(&resource_lock);
1352 
1353 	for (;;) {
1354 		struct resource *res = *p;
1355 
1356 		if (!res)
1357 			break;
1358 		if (res->start <= start && res->end >= end) {
1359 			if (!(res->flags & IORESOURCE_BUSY)) {
1360 				p = &res->child;
1361 				continue;
1362 			}
1363 			if (res->start != start || res->end != end)
1364 				break;
1365 			*p = res->sibling;
1366 			write_unlock(&resource_lock);
1367 			if (res->flags & IORESOURCE_MUXED)
1368 				wake_up(&muxed_resource_wait);
1369 			free_resource(res);
1370 			return;
1371 		}
1372 		p = &res->sibling;
1373 	}
1374 
1375 	write_unlock(&resource_lock);
1376 
1377 	pr_warn("Trying to free nonexistent resource <%pa-%pa>\n", &start, &end);
1378 }
1379 EXPORT_SYMBOL(__release_region);
1380 
1381 #ifdef CONFIG_MEMORY_HOTREMOVE
1382 /**
1383  * release_mem_region_adjustable - release a previously reserved memory region
1384  * @start: resource start address
1385  * @size: resource region size
1386  *
1387  * This interface is intended for memory hot-delete.  The requested region
1388  * is released from a currently busy memory resource.  The requested region
1389  * must either match exactly or fit into a single busy resource entry.  In
1390  * the latter case, the remaining resource is adjusted accordingly.
1391  * Existing children of the busy memory resource must be immutable in the
1392  * request.
1393  *
1394  * Note:
1395  * - Additional release conditions, such as overlapping region, can be
1396  *   supported after they are confirmed as valid cases.
1397  * - When a busy memory resource gets split into two entries, the code
1398  *   assumes that all children remain in the lower address entry for
1399  *   simplicity.  Enhance this logic when necessary.
1400  */
1401 void release_mem_region_adjustable(resource_size_t start, resource_size_t size)
1402 {
1403 	struct resource *parent = &iomem_resource;
1404 	struct resource *new_res = NULL;
1405 	bool alloc_nofail = false;
1406 	struct resource **p;
1407 	struct resource *res;
1408 	resource_size_t end;
1409 
1410 	end = start + size - 1;
1411 	if (WARN_ON_ONCE((start < parent->start) || (end > parent->end)))
1412 		return;
1413 
1414 	/*
1415 	 * We free up quite a lot of memory on memory hotunplug (esp., memap),
1416 	 * just before releasing the region. This is highly unlikely to
1417 	 * fail - let's play save and make it never fail as the caller cannot
1418 	 * perform any error handling (e.g., trying to re-add memory will fail
1419 	 * similarly).
1420 	 */
1421 retry:
1422 	new_res = alloc_resource(GFP_KERNEL | (alloc_nofail ? __GFP_NOFAIL : 0));
1423 
1424 	p = &parent->child;
1425 	write_lock(&resource_lock);
1426 
1427 	while ((res = *p)) {
1428 		if (res->start >= end)
1429 			break;
1430 
1431 		/* look for the next resource if it does not fit into */
1432 		if (res->start > start || res->end < end) {
1433 			p = &res->sibling;
1434 			continue;
1435 		}
1436 
1437 		if (!(res->flags & IORESOURCE_MEM))
1438 			break;
1439 
1440 		if (!(res->flags & IORESOURCE_BUSY)) {
1441 			p = &res->child;
1442 			continue;
1443 		}
1444 
1445 		/* found the target resource; let's adjust accordingly */
1446 		if (res->start == start && res->end == end) {
1447 			/* free the whole entry */
1448 			*p = res->sibling;
1449 			free_resource(res);
1450 		} else if (res->start == start && res->end != end) {
1451 			/* adjust the start */
1452 			WARN_ON_ONCE(__adjust_resource(res, end + 1,
1453 						       res->end - end));
1454 		} else if (res->start != start && res->end == end) {
1455 			/* adjust the end */
1456 			WARN_ON_ONCE(__adjust_resource(res, res->start,
1457 						       start - res->start));
1458 		} else {
1459 			/* split into two entries - we need a new resource */
1460 			if (!new_res) {
1461 				new_res = alloc_resource(GFP_ATOMIC);
1462 				if (!new_res) {
1463 					alloc_nofail = true;
1464 					write_unlock(&resource_lock);
1465 					goto retry;
1466 				}
1467 			}
1468 			new_res->name = res->name;
1469 			new_res->start = end + 1;
1470 			new_res->end = res->end;
1471 			new_res->flags = res->flags;
1472 			new_res->desc = res->desc;
1473 			new_res->parent = res->parent;
1474 			new_res->sibling = res->sibling;
1475 			new_res->child = NULL;
1476 
1477 			if (WARN_ON_ONCE(__adjust_resource(res, res->start,
1478 							   start - res->start)))
1479 				break;
1480 			res->sibling = new_res;
1481 			new_res = NULL;
1482 		}
1483 
1484 		break;
1485 	}
1486 
1487 	write_unlock(&resource_lock);
1488 	free_resource(new_res);
1489 }
1490 #endif	/* CONFIG_MEMORY_HOTREMOVE */
1491 
1492 #ifdef CONFIG_MEMORY_HOTPLUG
1493 static bool system_ram_resources_mergeable(struct resource *r1,
1494 					   struct resource *r2)
1495 {
1496 	/* We assume either r1 or r2 is IORESOURCE_SYSRAM_MERGEABLE. */
1497 	return r1->flags == r2->flags && r1->end + 1 == r2->start &&
1498 	       r1->name == r2->name && r1->desc == r2->desc &&
1499 	       !r1->child && !r2->child;
1500 }
1501 
1502 /**
1503  * merge_system_ram_resource - mark the System RAM resource mergeable and try to
1504  *	merge it with adjacent, mergeable resources
1505  * @res: resource descriptor
1506  *
1507  * This interface is intended for memory hotplug, whereby lots of contiguous
1508  * system ram resources are added (e.g., via add_memory*()) by a driver, and
1509  * the actual resource boundaries are not of interest (e.g., it might be
1510  * relevant for DIMMs). Only resources that are marked mergeable, that have the
1511  * same parent, and that don't have any children are considered. All mergeable
1512  * resources must be immutable during the request.
1513  *
1514  * Note:
1515  * - The caller has to make sure that no pointers to resources that are
1516  *   marked mergeable are used anymore after this call - the resource might
1517  *   be freed and the pointer might be stale!
1518  * - release_mem_region_adjustable() will split on demand on memory hotunplug
1519  */
1520 void merge_system_ram_resource(struct resource *res)
1521 {
1522 	const unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
1523 	struct resource *cur;
1524 
1525 	if (WARN_ON_ONCE((res->flags & flags) != flags))
1526 		return;
1527 
1528 	write_lock(&resource_lock);
1529 	res->flags |= IORESOURCE_SYSRAM_MERGEABLE;
1530 
1531 	/* Try to merge with next item in the list. */
1532 	cur = res->sibling;
1533 	if (cur && system_ram_resources_mergeable(res, cur)) {
1534 		res->end = cur->end;
1535 		res->sibling = cur->sibling;
1536 		free_resource(cur);
1537 	}
1538 
1539 	/* Try to merge with previous item in the list. */
1540 	cur = res->parent->child;
1541 	while (cur && cur->sibling != res)
1542 		cur = cur->sibling;
1543 	if (cur && system_ram_resources_mergeable(cur, res)) {
1544 		cur->end = res->end;
1545 		cur->sibling = res->sibling;
1546 		free_resource(res);
1547 	}
1548 	write_unlock(&resource_lock);
1549 }
1550 #endif	/* CONFIG_MEMORY_HOTPLUG */
1551 
1552 /*
1553  * Managed region resource
1554  */
1555 static void devm_resource_release(struct device *dev, void *ptr)
1556 {
1557 	struct resource **r = ptr;
1558 
1559 	release_resource(*r);
1560 }
1561 
1562 /**
1563  * devm_request_resource() - request and reserve an I/O or memory resource
1564  * @dev: device for which to request the resource
1565  * @root: root of the resource tree from which to request the resource
1566  * @new: descriptor of the resource to request
1567  *
1568  * This is a device-managed version of request_resource(). There is usually
1569  * no need to release resources requested by this function explicitly since
1570  * that will be taken care of when the device is unbound from its driver.
1571  * If for some reason the resource needs to be released explicitly, because
1572  * of ordering issues for example, drivers must call devm_release_resource()
1573  * rather than the regular release_resource().
1574  *
1575  * When a conflict is detected between any existing resources and the newly
1576  * requested resource, an error message will be printed.
1577  *
1578  * Returns 0 on success or a negative error code on failure.
1579  */
1580 int devm_request_resource(struct device *dev, struct resource *root,
1581 			  struct resource *new)
1582 {
1583 	struct resource *conflict, **ptr;
1584 
1585 	ptr = devres_alloc(devm_resource_release, sizeof(*ptr), GFP_KERNEL);
1586 	if (!ptr)
1587 		return -ENOMEM;
1588 
1589 	*ptr = new;
1590 
1591 	conflict = request_resource_conflict(root, new);
1592 	if (conflict) {
1593 		dev_err(dev, "resource collision: %pR conflicts with %s %pR\n",
1594 			new, conflict->name, conflict);
1595 		devres_free(ptr);
1596 		return -EBUSY;
1597 	}
1598 
1599 	devres_add(dev, ptr);
1600 	return 0;
1601 }
1602 EXPORT_SYMBOL(devm_request_resource);
1603 
1604 static int devm_resource_match(struct device *dev, void *res, void *data)
1605 {
1606 	struct resource **ptr = res;
1607 
1608 	return *ptr == data;
1609 }
1610 
1611 /**
1612  * devm_release_resource() - release a previously requested resource
1613  * @dev: device for which to release the resource
1614  * @new: descriptor of the resource to release
1615  *
1616  * Releases a resource previously requested using devm_request_resource().
1617  */
1618 void devm_release_resource(struct device *dev, struct resource *new)
1619 {
1620 	WARN_ON(devres_release(dev, devm_resource_release, devm_resource_match,
1621 			       new));
1622 }
1623 EXPORT_SYMBOL(devm_release_resource);
1624 
1625 struct region_devres {
1626 	struct resource *parent;
1627 	resource_size_t start;
1628 	resource_size_t n;
1629 };
1630 
1631 static void devm_region_release(struct device *dev, void *res)
1632 {
1633 	struct region_devres *this = res;
1634 
1635 	__release_region(this->parent, this->start, this->n);
1636 }
1637 
1638 static int devm_region_match(struct device *dev, void *res, void *match_data)
1639 {
1640 	struct region_devres *this = res, *match = match_data;
1641 
1642 	return this->parent == match->parent &&
1643 		this->start == match->start && this->n == match->n;
1644 }
1645 
1646 struct resource *
1647 __devm_request_region(struct device *dev, struct resource *parent,
1648 		      resource_size_t start, resource_size_t n, const char *name)
1649 {
1650 	struct region_devres *dr = NULL;
1651 	struct resource *res;
1652 
1653 	dr = devres_alloc(devm_region_release, sizeof(struct region_devres),
1654 			  GFP_KERNEL);
1655 	if (!dr)
1656 		return NULL;
1657 
1658 	dr->parent = parent;
1659 	dr->start = start;
1660 	dr->n = n;
1661 
1662 	res = __request_region(parent, start, n, name, 0);
1663 	if (res)
1664 		devres_add(dev, dr);
1665 	else
1666 		devres_free(dr);
1667 
1668 	return res;
1669 }
1670 EXPORT_SYMBOL(__devm_request_region);
1671 
1672 void __devm_release_region(struct device *dev, struct resource *parent,
1673 			   resource_size_t start, resource_size_t n)
1674 {
1675 	struct region_devres match_data = { parent, start, n };
1676 
1677 	__release_region(parent, start, n);
1678 	WARN_ON(devres_destroy(dev, devm_region_release, devm_region_match,
1679 			       &match_data));
1680 }
1681 EXPORT_SYMBOL(__devm_release_region);
1682 
1683 /*
1684  * Reserve I/O ports or memory based on "reserve=" kernel parameter.
1685  */
1686 #define MAXRESERVE 4
1687 static int __init reserve_setup(char *str)
1688 {
1689 	static int reserved;
1690 	static struct resource reserve[MAXRESERVE];
1691 
1692 	for (;;) {
1693 		unsigned int io_start, io_num;
1694 		int x = reserved;
1695 		struct resource *parent;
1696 
1697 		if (get_option(&str, &io_start) != 2)
1698 			break;
1699 		if (get_option(&str, &io_num) == 0)
1700 			break;
1701 		if (x < MAXRESERVE) {
1702 			struct resource *res = reserve + x;
1703 
1704 			/*
1705 			 * If the region starts below 0x10000, we assume it's
1706 			 * I/O port space; otherwise assume it's memory.
1707 			 */
1708 			if (io_start < 0x10000) {
1709 				res->flags = IORESOURCE_IO;
1710 				parent = &ioport_resource;
1711 			} else {
1712 				res->flags = IORESOURCE_MEM;
1713 				parent = &iomem_resource;
1714 			}
1715 			res->name = "reserved";
1716 			res->start = io_start;
1717 			res->end = io_start + io_num - 1;
1718 			res->flags |= IORESOURCE_BUSY;
1719 			res->desc = IORES_DESC_NONE;
1720 			res->child = NULL;
1721 			if (request_resource(parent, res) == 0)
1722 				reserved = x+1;
1723 		}
1724 	}
1725 	return 1;
1726 }
1727 __setup("reserve=", reserve_setup);
1728 
1729 /*
1730  * Check if the requested addr and size spans more than any slot in the
1731  * iomem resource tree.
1732  */
1733 int iomem_map_sanity_check(resource_size_t addr, unsigned long size)
1734 {
1735 	resource_size_t end = addr + size - 1;
1736 	struct resource *p;
1737 	int err = 0;
1738 
1739 	read_lock(&resource_lock);
1740 	for_each_resource(&iomem_resource, p, false) {
1741 		/*
1742 		 * We can probably skip the resources without
1743 		 * IORESOURCE_IO attribute?
1744 		 */
1745 		if (p->start > end)
1746 			continue;
1747 		if (p->end < addr)
1748 			continue;
1749 		if (PFN_DOWN(p->start) <= PFN_DOWN(addr) &&
1750 		    PFN_DOWN(p->end) >= PFN_DOWN(end))
1751 			continue;
1752 		/*
1753 		 * if a resource is "BUSY", it's not a hardware resource
1754 		 * but a driver mapping of such a resource; we don't want
1755 		 * to warn for those; some drivers legitimately map only
1756 		 * partial hardware resources. (example: vesafb)
1757 		 */
1758 		if (p->flags & IORESOURCE_BUSY)
1759 			continue;
1760 
1761 		pr_warn("resource sanity check: requesting [mem %pa-%pa], which spans more than %s %pR\n",
1762 			&addr, &end, p->name, p);
1763 		err = -1;
1764 		break;
1765 	}
1766 	read_unlock(&resource_lock);
1767 
1768 	return err;
1769 }
1770 
1771 #ifdef CONFIG_STRICT_DEVMEM
1772 static int strict_iomem_checks = 1;
1773 #else
1774 static int strict_iomem_checks;
1775 #endif
1776 
1777 /*
1778  * Check if an address is exclusive to the kernel and must not be mapped to
1779  * user space, for example, via /dev/mem.
1780  *
1781  * Returns true if exclusive to the kernel, otherwise returns false.
1782  */
1783 bool resource_is_exclusive(struct resource *root, u64 addr, resource_size_t size)
1784 {
1785 	const unsigned int exclusive_system_ram = IORESOURCE_SYSTEM_RAM |
1786 						  IORESOURCE_EXCLUSIVE;
1787 	bool skip_children = false, err = false;
1788 	struct resource *p;
1789 
1790 	read_lock(&resource_lock);
1791 	for_each_resource(root, p, skip_children) {
1792 		if (p->start >= addr + size)
1793 			break;
1794 		if (p->end < addr) {
1795 			skip_children = true;
1796 			continue;
1797 		}
1798 		skip_children = false;
1799 
1800 		/*
1801 		 * IORESOURCE_SYSTEM_RAM resources are exclusive if
1802 		 * IORESOURCE_EXCLUSIVE is set, even if they
1803 		 * are not busy and even if "iomem=relaxed" is set. The
1804 		 * responsible driver dynamically adds/removes system RAM within
1805 		 * such an area and uncontrolled access is dangerous.
1806 		 */
1807 		if ((p->flags & exclusive_system_ram) == exclusive_system_ram) {
1808 			err = true;
1809 			break;
1810 		}
1811 
1812 		/*
1813 		 * A resource is exclusive if IORESOURCE_EXCLUSIVE is set
1814 		 * or CONFIG_IO_STRICT_DEVMEM is enabled and the
1815 		 * resource is busy.
1816 		 */
1817 		if (!strict_iomem_checks || !(p->flags & IORESOURCE_BUSY))
1818 			continue;
1819 		if (IS_ENABLED(CONFIG_IO_STRICT_DEVMEM)
1820 				|| p->flags & IORESOURCE_EXCLUSIVE) {
1821 			err = true;
1822 			break;
1823 		}
1824 	}
1825 	read_unlock(&resource_lock);
1826 
1827 	return err;
1828 }
1829 
1830 bool iomem_is_exclusive(u64 addr)
1831 {
1832 	return resource_is_exclusive(&iomem_resource, addr & PAGE_MASK,
1833 				     PAGE_SIZE);
1834 }
1835 
1836 struct resource_entry *resource_list_create_entry(struct resource *res,
1837 						  size_t extra_size)
1838 {
1839 	struct resource_entry *entry;
1840 
1841 	entry = kzalloc(sizeof(*entry) + extra_size, GFP_KERNEL);
1842 	if (entry) {
1843 		INIT_LIST_HEAD(&entry->node);
1844 		entry->res = res ? res : &entry->__res;
1845 	}
1846 
1847 	return entry;
1848 }
1849 EXPORT_SYMBOL(resource_list_create_entry);
1850 
1851 void resource_list_free(struct list_head *head)
1852 {
1853 	struct resource_entry *entry, *tmp;
1854 
1855 	list_for_each_entry_safe(entry, tmp, head, node)
1856 		resource_list_destroy_entry(entry);
1857 }
1858 EXPORT_SYMBOL(resource_list_free);
1859 
1860 #ifdef CONFIG_GET_FREE_REGION
1861 #define GFR_DESCENDING		(1UL << 0)
1862 #define GFR_REQUEST_REGION	(1UL << 1)
1863 #define GFR_DEFAULT_ALIGN (1UL << PA_SECTION_SHIFT)
1864 
1865 static resource_size_t gfr_start(struct resource *base, resource_size_t size,
1866 				 resource_size_t align, unsigned long flags)
1867 {
1868 	if (flags & GFR_DESCENDING) {
1869 		resource_size_t end;
1870 
1871 		end = min_t(resource_size_t, base->end, PHYSMEM_END);
1872 		return end - size + 1;
1873 	}
1874 
1875 	return ALIGN(base->start, align);
1876 }
1877 
1878 static bool gfr_continue(struct resource *base, resource_size_t addr,
1879 			 resource_size_t size, unsigned long flags)
1880 {
1881 	if (flags & GFR_DESCENDING)
1882 		return addr > size && addr >= base->start;
1883 	/*
1884 	 * In the ascend case be careful that the last increment by
1885 	 * @size did not wrap 0.
1886 	 */
1887 	return addr > addr - size &&
1888 	       addr <= min_t(resource_size_t, base->end, PHYSMEM_END);
1889 }
1890 
1891 static resource_size_t gfr_next(resource_size_t addr, resource_size_t size,
1892 				unsigned long flags)
1893 {
1894 	if (flags & GFR_DESCENDING)
1895 		return addr - size;
1896 	return addr + size;
1897 }
1898 
1899 static void remove_free_mem_region(void *_res)
1900 {
1901 	struct resource *res = _res;
1902 
1903 	if (res->parent)
1904 		remove_resource(res);
1905 	free_resource(res);
1906 }
1907 
1908 static struct resource *
1909 get_free_mem_region(struct device *dev, struct resource *base,
1910 		    resource_size_t size, const unsigned long align,
1911 		    const char *name, const unsigned long desc,
1912 		    const unsigned long flags)
1913 {
1914 	resource_size_t addr;
1915 	struct resource *res;
1916 	struct region_devres *dr = NULL;
1917 
1918 	size = ALIGN(size, align);
1919 
1920 	res = alloc_resource(GFP_KERNEL);
1921 	if (!res)
1922 		return ERR_PTR(-ENOMEM);
1923 
1924 	if (dev && (flags & GFR_REQUEST_REGION)) {
1925 		dr = devres_alloc(devm_region_release,
1926 				sizeof(struct region_devres), GFP_KERNEL);
1927 		if (!dr) {
1928 			free_resource(res);
1929 			return ERR_PTR(-ENOMEM);
1930 		}
1931 	} else if (dev) {
1932 		if (devm_add_action_or_reset(dev, remove_free_mem_region, res))
1933 			return ERR_PTR(-ENOMEM);
1934 	}
1935 
1936 	write_lock(&resource_lock);
1937 	for (addr = gfr_start(base, size, align, flags);
1938 	     gfr_continue(base, addr, align, flags);
1939 	     addr = gfr_next(addr, align, flags)) {
1940 		if (__region_intersects(base, addr, size, 0, IORES_DESC_NONE) !=
1941 		    REGION_DISJOINT)
1942 			continue;
1943 
1944 		if (flags & GFR_REQUEST_REGION) {
1945 			if (__request_region_locked(res, &iomem_resource, addr,
1946 						    size, name, 0))
1947 				break;
1948 
1949 			if (dev) {
1950 				dr->parent = &iomem_resource;
1951 				dr->start = addr;
1952 				dr->n = size;
1953 				devres_add(dev, dr);
1954 			}
1955 
1956 			res->desc = desc;
1957 			write_unlock(&resource_lock);
1958 
1959 
1960 			/*
1961 			 * A driver is claiming this region so revoke any
1962 			 * mappings.
1963 			 */
1964 			revoke_iomem(res);
1965 		} else {
1966 			res->start = addr;
1967 			res->end = addr + size - 1;
1968 			res->name = name;
1969 			res->desc = desc;
1970 			res->flags = IORESOURCE_MEM;
1971 
1972 			/*
1973 			 * Only succeed if the resource hosts an exclusive
1974 			 * range after the insert
1975 			 */
1976 			if (__insert_resource(base, res) || res->child)
1977 				break;
1978 
1979 			write_unlock(&resource_lock);
1980 		}
1981 
1982 		return res;
1983 	}
1984 	write_unlock(&resource_lock);
1985 
1986 	if (flags & GFR_REQUEST_REGION) {
1987 		free_resource(res);
1988 		devres_free(dr);
1989 	} else if (dev)
1990 		devm_release_action(dev, remove_free_mem_region, res);
1991 
1992 	return ERR_PTR(-ERANGE);
1993 }
1994 
1995 /**
1996  * devm_request_free_mem_region - find free region for device private memory
1997  *
1998  * @dev: device struct to bind the resource to
1999  * @size: size in bytes of the device memory to add
2000  * @base: resource tree to look in
2001  *
2002  * This function tries to find an empty range of physical address big enough to
2003  * contain the new resource, so that it can later be hotplugged as ZONE_DEVICE
2004  * memory, which in turn allocates struct pages.
2005  */
2006 struct resource *devm_request_free_mem_region(struct device *dev,
2007 		struct resource *base, unsigned long size)
2008 {
2009 	unsigned long flags = GFR_DESCENDING | GFR_REQUEST_REGION;
2010 
2011 	return get_free_mem_region(dev, base, size, GFR_DEFAULT_ALIGN,
2012 				   dev_name(dev),
2013 				   IORES_DESC_DEVICE_PRIVATE_MEMORY, flags);
2014 }
2015 EXPORT_SYMBOL_GPL(devm_request_free_mem_region);
2016 
2017 struct resource *request_free_mem_region(struct resource *base,
2018 		unsigned long size, const char *name)
2019 {
2020 	unsigned long flags = GFR_DESCENDING | GFR_REQUEST_REGION;
2021 
2022 	return get_free_mem_region(NULL, base, size, GFR_DEFAULT_ALIGN, name,
2023 				   IORES_DESC_DEVICE_PRIVATE_MEMORY, flags);
2024 }
2025 EXPORT_SYMBOL_GPL(request_free_mem_region);
2026 
2027 /**
2028  * alloc_free_mem_region - find a free region relative to @base
2029  * @base: resource that will parent the new resource
2030  * @size: size in bytes of memory to allocate from @base
2031  * @align: alignment requirements for the allocation
2032  * @name: resource name
2033  *
2034  * Buses like CXL, that can dynamically instantiate new memory regions,
2035  * need a method to allocate physical address space for those regions.
2036  * Allocate and insert a new resource to cover a free, unclaimed by a
2037  * descendant of @base, range in the span of @base.
2038  */
2039 struct resource *alloc_free_mem_region(struct resource *base,
2040 				       unsigned long size, unsigned long align,
2041 				       const char *name)
2042 {
2043 	/* Default of ascending direction and insert resource */
2044 	unsigned long flags = 0;
2045 
2046 	return get_free_mem_region(NULL, base, size, align, name,
2047 				   IORES_DESC_NONE, flags);
2048 }
2049 EXPORT_SYMBOL_NS_GPL(alloc_free_mem_region, CXL);
2050 #endif /* CONFIG_GET_FREE_REGION */
2051 
2052 static int __init strict_iomem(char *str)
2053 {
2054 	if (strstr(str, "relaxed"))
2055 		strict_iomem_checks = 0;
2056 	if (strstr(str, "strict"))
2057 		strict_iomem_checks = 1;
2058 	return 1;
2059 }
2060 
2061 static int iomem_fs_init_fs_context(struct fs_context *fc)
2062 {
2063 	return init_pseudo(fc, DEVMEM_MAGIC) ? 0 : -ENOMEM;
2064 }
2065 
2066 static struct file_system_type iomem_fs_type = {
2067 	.name		= "iomem",
2068 	.owner		= THIS_MODULE,
2069 	.init_fs_context = iomem_fs_init_fs_context,
2070 	.kill_sb	= kill_anon_super,
2071 };
2072 
2073 static int __init iomem_init_inode(void)
2074 {
2075 	static struct vfsmount *iomem_vfs_mount;
2076 	static int iomem_fs_cnt;
2077 	struct inode *inode;
2078 	int rc;
2079 
2080 	rc = simple_pin_fs(&iomem_fs_type, &iomem_vfs_mount, &iomem_fs_cnt);
2081 	if (rc < 0) {
2082 		pr_err("Cannot mount iomem pseudo filesystem: %d\n", rc);
2083 		return rc;
2084 	}
2085 
2086 	inode = alloc_anon_inode(iomem_vfs_mount->mnt_sb);
2087 	if (IS_ERR(inode)) {
2088 		rc = PTR_ERR(inode);
2089 		pr_err("Cannot allocate inode for iomem: %d\n", rc);
2090 		simple_release_fs(&iomem_vfs_mount, &iomem_fs_cnt);
2091 		return rc;
2092 	}
2093 
2094 	/*
2095 	 * Publish iomem revocation inode initialized.
2096 	 * Pairs with smp_load_acquire() in revoke_iomem().
2097 	 */
2098 	smp_store_release(&iomem_inode, inode);
2099 
2100 	return 0;
2101 }
2102 
2103 fs_initcall(iomem_init_inode);
2104 
2105 __setup("iomem=", strict_iomem);
2106