xref: /linux/kernel/resource.c (revision 2942242dde896ea8544f321617c86f941899c544)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *	linux/kernel/resource.c
4  *
5  * Copyright (C) 1999	Linus Torvalds
6  * Copyright (C) 1999	Martin Mares <mj@ucw.cz>
7  *
8  * Arbitrary resource management.
9  */
10 
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 
13 #include <linux/export.h>
14 #include <linux/errno.h>
15 #include <linux/ioport.h>
16 #include <linux/init.h>
17 #include <linux/slab.h>
18 #include <linux/spinlock.h>
19 #include <linux/fs.h>
20 #include <linux/proc_fs.h>
21 #include <linux/pseudo_fs.h>
22 #include <linux/sched.h>
23 #include <linux/seq_file.h>
24 #include <linux/device.h>
25 #include <linux/pfn.h>
26 #include <linux/mm.h>
27 #include <linux/mount.h>
28 #include <linux/resource_ext.h>
29 #include <uapi/linux/magic.h>
30 #include <linux/string.h>
31 #include <linux/vmalloc.h>
32 #include <asm/io.h>
33 
34 
35 struct resource ioport_resource = {
36 	.name	= "PCI IO",
37 	.start	= 0,
38 	.end	= IO_SPACE_LIMIT,
39 	.flags	= IORESOURCE_IO,
40 };
41 EXPORT_SYMBOL(ioport_resource);
42 
43 struct resource iomem_resource = {
44 	.name	= "PCI mem",
45 	.start	= 0,
46 	.end	= -1,
47 	.flags	= IORESOURCE_MEM,
48 };
49 EXPORT_SYMBOL(iomem_resource);
50 
51 static DEFINE_RWLOCK(resource_lock);
52 
53 /*
54  * Return the next node of @p in pre-order tree traversal.  If
55  * @skip_children is true, skip the descendant nodes of @p in
56  * traversal.  If @p is a descendant of @subtree_root, only traverse
57  * the subtree under @subtree_root.
58  */
next_resource(struct resource * p,bool skip_children,struct resource * subtree_root)59 static struct resource *next_resource(struct resource *p, bool skip_children,
60 				      struct resource *subtree_root)
61 {
62 	if (!skip_children && p->child)
63 		return p->child;
64 	while (!p->sibling && p->parent) {
65 		p = p->parent;
66 		if (p == subtree_root)
67 			return NULL;
68 	}
69 	return p->sibling;
70 }
71 
72 /*
73  * Traverse the resource subtree under @_root in pre-order, excluding
74  * @_root itself.
75  *
76  * NOTE: '__p' is introduced to avoid shadowing '_p' outside of loop.
77  * And it is referenced to avoid unused variable warning.
78  */
79 #define for_each_resource(_root, _p, _skip_children) \
80 	for (typeof(_root) __root = (_root), __p = _p = __root->child;	\
81 	     __p && _p; _p = next_resource(_p, _skip_children, __root))
82 
83 #ifdef CONFIG_PROC_FS
84 
85 enum { MAX_IORES_LEVEL = 5 };
86 
r_start(struct seq_file * m,loff_t * pos)87 static void *r_start(struct seq_file *m, loff_t *pos)
88 	__acquires(resource_lock)
89 {
90 	struct resource *root = pde_data(file_inode(m->file));
91 	struct resource *p;
92 	loff_t l = *pos;
93 
94 	read_lock(&resource_lock);
95 	for_each_resource(root, p, false) {
96 		if (l-- == 0)
97 			break;
98 	}
99 
100 	return p;
101 }
102 
r_next(struct seq_file * m,void * v,loff_t * pos)103 static void *r_next(struct seq_file *m, void *v, loff_t *pos)
104 {
105 	struct resource *p = v;
106 
107 	(*pos)++;
108 
109 	return (void *)next_resource(p, false, NULL);
110 }
111 
r_stop(struct seq_file * m,void * v)112 static void r_stop(struct seq_file *m, void *v)
113 	__releases(resource_lock)
114 {
115 	read_unlock(&resource_lock);
116 }
117 
r_show(struct seq_file * m,void * v)118 static int r_show(struct seq_file *m, void *v)
119 {
120 	struct resource *root = pde_data(file_inode(m->file));
121 	struct resource *r = v, *p;
122 	unsigned long long start, end;
123 	int width = root->end < 0x10000 ? 4 : 8;
124 	int depth;
125 
126 	for (depth = 0, p = r; depth < MAX_IORES_LEVEL; depth++, p = p->parent)
127 		if (p->parent == root)
128 			break;
129 
130 	if (file_ns_capable(m->file, &init_user_ns, CAP_SYS_ADMIN)) {
131 		start = r->start;
132 		end = r->end;
133 	} else {
134 		start = end = 0;
135 	}
136 
137 	seq_printf(m, "%*s%0*llx-%0*llx : %s\n",
138 			depth * 2, "",
139 			width, start,
140 			width, end,
141 			r->name ? r->name : "<BAD>");
142 	return 0;
143 }
144 
145 static const struct seq_operations resource_op = {
146 	.start	= r_start,
147 	.next	= r_next,
148 	.stop	= r_stop,
149 	.show	= r_show,
150 };
151 
ioresources_init(void)152 static int __init ioresources_init(void)
153 {
154 	proc_create_seq_data("ioports", 0, NULL, &resource_op,
155 			&ioport_resource);
156 	proc_create_seq_data("iomem", 0, NULL, &resource_op, &iomem_resource);
157 	return 0;
158 }
159 __initcall(ioresources_init);
160 
161 #endif /* CONFIG_PROC_FS */
162 
free_resource(struct resource * res)163 static void free_resource(struct resource *res)
164 {
165 	/**
166 	 * If the resource was allocated using memblock early during boot
167 	 * we'll leak it here: we can only return full pages back to the
168 	 * buddy and trying to be smart and reusing them eventually in
169 	 * alloc_resource() overcomplicates resource handling.
170 	 */
171 	if (res && PageSlab(virt_to_head_page(res)))
172 		kfree(res);
173 }
174 
alloc_resource(gfp_t flags)175 static struct resource *alloc_resource(gfp_t flags)
176 {
177 	return kzalloc(sizeof(struct resource), flags);
178 }
179 
180 /* Return the conflict entry if you can't request it */
__request_resource(struct resource * root,struct resource * new)181 static struct resource * __request_resource(struct resource *root, struct resource *new)
182 {
183 	resource_size_t start = new->start;
184 	resource_size_t end = new->end;
185 	struct resource *tmp, **p;
186 
187 	if (end < start)
188 		return root;
189 	if (start < root->start)
190 		return root;
191 	if (end > root->end)
192 		return root;
193 	p = &root->child;
194 	for (;;) {
195 		tmp = *p;
196 		if (!tmp || tmp->start > end) {
197 			new->sibling = tmp;
198 			*p = new;
199 			new->parent = root;
200 			return NULL;
201 		}
202 		p = &tmp->sibling;
203 		if (tmp->end < start)
204 			continue;
205 		return tmp;
206 	}
207 }
208 
__release_resource(struct resource * old,bool release_child)209 static int __release_resource(struct resource *old, bool release_child)
210 {
211 	struct resource *tmp, **p, *chd;
212 
213 	p = &old->parent->child;
214 	for (;;) {
215 		tmp = *p;
216 		if (!tmp)
217 			break;
218 		if (tmp == old) {
219 			if (release_child || !(tmp->child)) {
220 				*p = tmp->sibling;
221 			} else {
222 				for (chd = tmp->child;; chd = chd->sibling) {
223 					chd->parent = tmp->parent;
224 					if (!(chd->sibling))
225 						break;
226 				}
227 				*p = tmp->child;
228 				chd->sibling = tmp->sibling;
229 			}
230 			old->parent = NULL;
231 			return 0;
232 		}
233 		p = &tmp->sibling;
234 	}
235 	return -EINVAL;
236 }
237 
__release_child_resources(struct resource * r)238 static void __release_child_resources(struct resource *r)
239 {
240 	struct resource *tmp, *p;
241 	resource_size_t size;
242 
243 	p = r->child;
244 	r->child = NULL;
245 	while (p) {
246 		tmp = p;
247 		p = p->sibling;
248 
249 		tmp->parent = NULL;
250 		tmp->sibling = NULL;
251 		__release_child_resources(tmp);
252 
253 		printk(KERN_DEBUG "release child resource %pR\n", tmp);
254 		/* need to restore size, and keep flags */
255 		size = resource_size(tmp);
256 		tmp->start = 0;
257 		tmp->end = size - 1;
258 	}
259 }
260 
release_child_resources(struct resource * r)261 void release_child_resources(struct resource *r)
262 {
263 	write_lock(&resource_lock);
264 	__release_child_resources(r);
265 	write_unlock(&resource_lock);
266 }
267 
268 /**
269  * request_resource_conflict - request and reserve an I/O or memory resource
270  * @root: root resource descriptor
271  * @new: resource descriptor desired by caller
272  *
273  * Returns 0 for success, conflict resource on error.
274  */
request_resource_conflict(struct resource * root,struct resource * new)275 struct resource *request_resource_conflict(struct resource *root, struct resource *new)
276 {
277 	struct resource *conflict;
278 
279 	write_lock(&resource_lock);
280 	conflict = __request_resource(root, new);
281 	write_unlock(&resource_lock);
282 	return conflict;
283 }
284 
285 /**
286  * request_resource - request and reserve an I/O or memory resource
287  * @root: root resource descriptor
288  * @new: resource descriptor desired by caller
289  *
290  * Returns 0 for success, negative error code on error.
291  */
request_resource(struct resource * root,struct resource * new)292 int request_resource(struct resource *root, struct resource *new)
293 {
294 	struct resource *conflict;
295 
296 	conflict = request_resource_conflict(root, new);
297 	return conflict ? -EBUSY : 0;
298 }
299 
300 EXPORT_SYMBOL(request_resource);
301 
302 /**
303  * release_resource - release a previously reserved resource
304  * @old: resource pointer
305  */
release_resource(struct resource * old)306 int release_resource(struct resource *old)
307 {
308 	int retval;
309 
310 	write_lock(&resource_lock);
311 	retval = __release_resource(old, true);
312 	write_unlock(&resource_lock);
313 	return retval;
314 }
315 
316 EXPORT_SYMBOL(release_resource);
317 
is_type_match(struct resource * p,unsigned long flags,unsigned long desc)318 static bool is_type_match(struct resource *p, unsigned long flags, unsigned long desc)
319 {
320 	return (p->flags & flags) == flags && (desc == IORES_DESC_NONE || desc == p->desc);
321 }
322 
323 /**
324  * find_next_iomem_res - Finds the lowest iomem resource that covers part of
325  *			 [@start..@end].
326  *
327  * If a resource is found, returns 0 and @*res is overwritten with the part
328  * of the resource that's within [@start..@end]; if none is found, returns
329  * -ENODEV.  Returns -EINVAL for invalid parameters.
330  *
331  * @start:	start address of the resource searched for
332  * @end:	end address of same resource
333  * @flags:	flags which the resource must have
334  * @desc:	descriptor the resource must have
335  * @res:	return ptr, if resource found
336  *
337  * The caller must specify @start, @end, @flags, and @desc
338  * (which may be IORES_DESC_NONE).
339  */
find_next_iomem_res(resource_size_t start,resource_size_t end,unsigned long flags,unsigned long desc,struct resource * res)340 static int find_next_iomem_res(resource_size_t start, resource_size_t end,
341 			       unsigned long flags, unsigned long desc,
342 			       struct resource *res)
343 {
344 	struct resource *p;
345 
346 	if (!res)
347 		return -EINVAL;
348 
349 	if (start >= end)
350 		return -EINVAL;
351 
352 	read_lock(&resource_lock);
353 
354 	for_each_resource(&iomem_resource, p, false) {
355 		/* If we passed the resource we are looking for, stop */
356 		if (p->start > end) {
357 			p = NULL;
358 			break;
359 		}
360 
361 		/* Skip until we find a range that matches what we look for */
362 		if (p->end < start)
363 			continue;
364 
365 		/* Found a match, break */
366 		if (is_type_match(p, flags, desc))
367 			break;
368 	}
369 
370 	if (p) {
371 		/* copy data */
372 		*res = (struct resource) {
373 			.start = max(start, p->start),
374 			.end = min(end, p->end),
375 			.flags = p->flags,
376 			.desc = p->desc,
377 			.parent = p->parent,
378 		};
379 	}
380 
381 	read_unlock(&resource_lock);
382 	return p ? 0 : -ENODEV;
383 }
384 
__walk_iomem_res_desc(resource_size_t start,resource_size_t end,unsigned long flags,unsigned long desc,void * arg,int (* func)(struct resource *,void *))385 static int __walk_iomem_res_desc(resource_size_t start, resource_size_t end,
386 				 unsigned long flags, unsigned long desc,
387 				 void *arg,
388 				 int (*func)(struct resource *, void *))
389 {
390 	struct resource res;
391 	int ret = -EINVAL;
392 
393 	while (start < end &&
394 	       !find_next_iomem_res(start, end, flags, desc, &res)) {
395 		ret = (*func)(&res, arg);
396 		if (ret)
397 			break;
398 
399 		start = res.end + 1;
400 	}
401 
402 	return ret;
403 }
404 
405 /**
406  * walk_iomem_res_desc - Walks through iomem resources and calls func()
407  *			 with matching resource ranges.
408  * *
409  * @desc: I/O resource descriptor. Use IORES_DESC_NONE to skip @desc check.
410  * @flags: I/O resource flags
411  * @start: start addr
412  * @end: end addr
413  * @arg: function argument for the callback @func
414  * @func: callback function that is called for each qualifying resource area
415  *
416  * All the memory ranges which overlap start,end and also match flags and
417  * desc are valid candidates.
418  *
419  * NOTE: For a new descriptor search, define a new IORES_DESC in
420  * <linux/ioport.h> and set it in 'desc' of a target resource entry.
421  */
walk_iomem_res_desc(unsigned long desc,unsigned long flags,u64 start,u64 end,void * arg,int (* func)(struct resource *,void *))422 int walk_iomem_res_desc(unsigned long desc, unsigned long flags, u64 start,
423 		u64 end, void *arg, int (*func)(struct resource *, void *))
424 {
425 	return __walk_iomem_res_desc(start, end, flags, desc, arg, func);
426 }
427 EXPORT_SYMBOL_GPL(walk_iomem_res_desc);
428 
429 /*
430  * This function calls the @func callback against all memory ranges of type
431  * System RAM which are marked as IORESOURCE_SYSTEM_RAM and IORESOUCE_BUSY.
432  * Now, this function is only for System RAM, it deals with full ranges and
433  * not PFNs. If resources are not PFN-aligned, dealing with PFNs can truncate
434  * ranges.
435  */
walk_system_ram_res(u64 start,u64 end,void * arg,int (* func)(struct resource *,void *))436 int walk_system_ram_res(u64 start, u64 end, void *arg,
437 			int (*func)(struct resource *, void *))
438 {
439 	unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
440 
441 	return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, arg,
442 				     func);
443 }
444 
445 /*
446  * This function, being a variant of walk_system_ram_res(), calls the @func
447  * callback against all memory ranges of type System RAM which are marked as
448  * IORESOURCE_SYSTEM_RAM and IORESOUCE_BUSY in reversed order, i.e., from
449  * higher to lower.
450  */
walk_system_ram_res_rev(u64 start,u64 end,void * arg,int (* func)(struct resource *,void *))451 int walk_system_ram_res_rev(u64 start, u64 end, void *arg,
452 				int (*func)(struct resource *, void *))
453 {
454 	struct resource res, *rams;
455 	int rams_size = 16, i;
456 	unsigned long flags;
457 	int ret = -1;
458 
459 	/* create a list */
460 	rams = kvcalloc(rams_size, sizeof(struct resource), GFP_KERNEL);
461 	if (!rams)
462 		return ret;
463 
464 	flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
465 	i = 0;
466 	while ((start < end) &&
467 		(!find_next_iomem_res(start, end, flags, IORES_DESC_NONE, &res))) {
468 		if (i >= rams_size) {
469 			/* re-alloc */
470 			struct resource *rams_new;
471 
472 			rams_new = kvrealloc(rams, (rams_size + 16) * sizeof(struct resource),
473 					     GFP_KERNEL);
474 			if (!rams_new)
475 				goto out;
476 
477 			rams = rams_new;
478 			rams_size += 16;
479 		}
480 
481 		rams[i++] = res;
482 		start = res.end + 1;
483 	}
484 
485 	/* go reverse */
486 	for (i--; i >= 0; i--) {
487 		ret = (*func)(&rams[i], arg);
488 		if (ret)
489 			break;
490 	}
491 
492 out:
493 	kvfree(rams);
494 	return ret;
495 }
496 
497 /*
498  * This function calls the @func callback against all memory ranges, which
499  * are ranges marked as IORESOURCE_MEM and IORESOUCE_BUSY.
500  */
walk_mem_res(u64 start,u64 end,void * arg,int (* func)(struct resource *,void *))501 int walk_mem_res(u64 start, u64 end, void *arg,
502 		 int (*func)(struct resource *, void *))
503 {
504 	unsigned long flags = IORESOURCE_MEM | IORESOURCE_BUSY;
505 
506 	return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, arg,
507 				     func);
508 }
509 
510 /*
511  * This function calls the @func callback against all memory ranges of type
512  * System RAM which are marked as IORESOURCE_SYSTEM_RAM and IORESOUCE_BUSY.
513  * It is to be used only for System RAM.
514  */
walk_system_ram_range(unsigned long start_pfn,unsigned long nr_pages,void * arg,int (* func)(unsigned long,unsigned long,void *))515 int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
516 			  void *arg, int (*func)(unsigned long, unsigned long, void *))
517 {
518 	resource_size_t start, end;
519 	unsigned long flags;
520 	struct resource res;
521 	unsigned long pfn, end_pfn;
522 	int ret = -EINVAL;
523 
524 	start = (u64) start_pfn << PAGE_SHIFT;
525 	end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1;
526 	flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
527 	while (start < end &&
528 	       !find_next_iomem_res(start, end, flags, IORES_DESC_NONE, &res)) {
529 		pfn = PFN_UP(res.start);
530 		end_pfn = PFN_DOWN(res.end + 1);
531 		if (end_pfn > pfn)
532 			ret = (*func)(pfn, end_pfn - pfn, arg);
533 		if (ret)
534 			break;
535 		start = res.end + 1;
536 	}
537 	return ret;
538 }
539 
__is_ram(unsigned long pfn,unsigned long nr_pages,void * arg)540 static int __is_ram(unsigned long pfn, unsigned long nr_pages, void *arg)
541 {
542 	return 1;
543 }
544 
545 /*
546  * This generic page_is_ram() returns true if specified address is
547  * registered as System RAM in iomem_resource list.
548  */
page_is_ram(unsigned long pfn)549 int __weak page_is_ram(unsigned long pfn)
550 {
551 	return walk_system_ram_range(pfn, 1, NULL, __is_ram) == 1;
552 }
553 EXPORT_SYMBOL_GPL(page_is_ram);
554 
__region_intersects(struct resource * parent,resource_size_t start,size_t size,unsigned long flags,unsigned long desc)555 static int __region_intersects(struct resource *parent, resource_size_t start,
556 			       size_t size, unsigned long flags,
557 			       unsigned long desc)
558 {
559 	int type = 0; int other = 0;
560 	struct resource *p, *dp;
561 	struct resource res, o;
562 	bool covered;
563 
564 	res = DEFINE_RES(start, size, 0);
565 
566 	for (p = parent->child; p ; p = p->sibling) {
567 		if (!resource_intersection(p, &res, &o))
568 			continue;
569 		if (is_type_match(p, flags, desc)) {
570 			type++;
571 			continue;
572 		}
573 		/*
574 		 * Continue to search in descendant resources as if the
575 		 * matched descendant resources cover some ranges of 'p'.
576 		 *
577 		 * |------------- "CXL Window 0" ------------|
578 		 * |-- "System RAM" --|
579 		 *
580 		 * will behave similar as the following fake resource
581 		 * tree when searching "System RAM".
582 		 *
583 		 * |-- "System RAM" --||-- "CXL Window 0a" --|
584 		 */
585 		covered = false;
586 		for_each_resource(p, dp, false) {
587 			if (!resource_overlaps(dp, &res))
588 				continue;
589 			if (is_type_match(dp, flags, desc)) {
590 				type++;
591 				/*
592 				 * Range from 'o.start' to 'dp->start'
593 				 * isn't covered by matched resource.
594 				 */
595 				if (dp->start > o.start)
596 					break;
597 				if (dp->end >= o.end) {
598 					covered = true;
599 					break;
600 				}
601 				/* Remove covered range */
602 				o.start = max(o.start, dp->end + 1);
603 			}
604 		}
605 		if (!covered)
606 			other++;
607 	}
608 
609 	if (type == 0)
610 		return REGION_DISJOINT;
611 
612 	if (other == 0)
613 		return REGION_INTERSECTS;
614 
615 	return REGION_MIXED;
616 }
617 
618 /**
619  * region_intersects() - determine intersection of region with known resources
620  * @start: region start address
621  * @size: size of region
622  * @flags: flags of resource (in iomem_resource)
623  * @desc: descriptor of resource (in iomem_resource) or IORES_DESC_NONE
624  *
625  * Check if the specified region partially overlaps or fully eclipses a
626  * resource identified by @flags and @desc (optional with IORES_DESC_NONE).
627  * Return REGION_DISJOINT if the region does not overlap @flags/@desc,
628  * return REGION_MIXED if the region overlaps @flags/@desc and another
629  * resource, and return REGION_INTERSECTS if the region overlaps @flags/@desc
630  * and no other defined resource. Note that REGION_INTERSECTS is also
631  * returned in the case when the specified region overlaps RAM and undefined
632  * memory holes.
633  *
634  * region_intersect() is used by memory remapping functions to ensure
635  * the user is not remapping RAM and is a vast speed up over walking
636  * through the resource table page by page.
637  */
region_intersects(resource_size_t start,size_t size,unsigned long flags,unsigned long desc)638 int region_intersects(resource_size_t start, size_t size, unsigned long flags,
639 		      unsigned long desc)
640 {
641 	int ret;
642 
643 	read_lock(&resource_lock);
644 	ret = __region_intersects(&iomem_resource, start, size, flags, desc);
645 	read_unlock(&resource_lock);
646 
647 	return ret;
648 }
649 EXPORT_SYMBOL_GPL(region_intersects);
650 
arch_remove_reservations(struct resource * avail)651 void __weak arch_remove_reservations(struct resource *avail)
652 {
653 }
654 
resource_clip(struct resource * res,resource_size_t min,resource_size_t max)655 static void resource_clip(struct resource *res, resource_size_t min,
656 			  resource_size_t max)
657 {
658 	if (res->start < min)
659 		res->start = min;
660 	if (res->end > max)
661 		res->end = max;
662 }
663 
664 /*
665  * Find empty space in the resource tree with the given range and
666  * alignment constraints
667  */
__find_resource_space(struct resource * root,struct resource * old,struct resource * new,resource_size_t size,struct resource_constraint * constraint)668 static int __find_resource_space(struct resource *root, struct resource *old,
669 				 struct resource *new, resource_size_t size,
670 				 struct resource_constraint *constraint)
671 {
672 	struct resource *this = root->child;
673 	struct resource tmp = *new, avail, alloc;
674 	resource_alignf alignf = constraint->alignf;
675 
676 	tmp.start = root->start;
677 	/*
678 	 * Skip past an allocated resource that starts at 0, since the assignment
679 	 * of this->start - 1 to tmp->end below would cause an underflow.
680 	 */
681 	if (this && this->start == root->start) {
682 		tmp.start = (this == old) ? old->start : this->end + 1;
683 		this = this->sibling;
684 	}
685 	for(;;) {
686 		if (this)
687 			tmp.end = (this == old) ?  this->end : this->start - 1;
688 		else
689 			tmp.end = root->end;
690 
691 		if (tmp.end < tmp.start)
692 			goto next;
693 
694 		resource_clip(&tmp, constraint->min, constraint->max);
695 		arch_remove_reservations(&tmp);
696 
697 		/* Check for overflow after ALIGN() */
698 		avail.start = ALIGN(tmp.start, constraint->align);
699 		avail.end = tmp.end;
700 		avail.flags = new->flags & ~IORESOURCE_UNSET;
701 		if (avail.start >= tmp.start) {
702 			alloc.flags = avail.flags;
703 			if (alignf) {
704 				alloc.start = alignf(constraint->alignf_data,
705 						     &avail, size, constraint->align);
706 			} else {
707 				alloc.start = avail.start;
708 			}
709 			alloc.end = alloc.start + size - 1;
710 			if (alloc.start <= alloc.end &&
711 			    resource_contains(&avail, &alloc)) {
712 				new->start = alloc.start;
713 				new->end = alloc.end;
714 				return 0;
715 			}
716 		}
717 
718 next:		if (!this || this->end == root->end)
719 			break;
720 
721 		if (this != old)
722 			tmp.start = this->end + 1;
723 		this = this->sibling;
724 	}
725 	return -EBUSY;
726 }
727 
728 /**
729  * find_resource_space - Find empty space in the resource tree
730  * @root:	Root resource descriptor
731  * @new:	Resource descriptor awaiting an empty resource space
732  * @size:	The minimum size of the empty space
733  * @constraint:	The range and alignment constraints to be met
734  *
735  * Finds an empty space under @root in the resource tree satisfying range and
736  * alignment @constraints.
737  *
738  * Return:
739  * * %0		- if successful, @new members start, end, and flags are altered.
740  * * %-EBUSY	- if no empty space was found.
741  */
find_resource_space(struct resource * root,struct resource * new,resource_size_t size,struct resource_constraint * constraint)742 int find_resource_space(struct resource *root, struct resource *new,
743 			resource_size_t size,
744 			struct resource_constraint *constraint)
745 {
746 	return  __find_resource_space(root, NULL, new, size, constraint);
747 }
748 EXPORT_SYMBOL_GPL(find_resource_space);
749 
750 /**
751  * reallocate_resource - allocate a slot in the resource tree given range & alignment.
752  *	The resource will be relocated if the new size cannot be reallocated in the
753  *	current location.
754  *
755  * @root: root resource descriptor
756  * @old:  resource descriptor desired by caller
757  * @newsize: new size of the resource descriptor
758  * @constraint: the memory range and alignment constraints to be met.
759  */
reallocate_resource(struct resource * root,struct resource * old,resource_size_t newsize,struct resource_constraint * constraint)760 static int reallocate_resource(struct resource *root, struct resource *old,
761 			       resource_size_t newsize,
762 			       struct resource_constraint *constraint)
763 {
764 	int err=0;
765 	struct resource new = *old;
766 	struct resource *conflict;
767 
768 	write_lock(&resource_lock);
769 
770 	if ((err = __find_resource_space(root, old, &new, newsize, constraint)))
771 		goto out;
772 
773 	if (resource_contains(&new, old)) {
774 		old->start = new.start;
775 		old->end = new.end;
776 		goto out;
777 	}
778 
779 	if (old->child) {
780 		err = -EBUSY;
781 		goto out;
782 	}
783 
784 	if (resource_contains(old, &new)) {
785 		old->start = new.start;
786 		old->end = new.end;
787 	} else {
788 		__release_resource(old, true);
789 		*old = new;
790 		conflict = __request_resource(root, old);
791 		BUG_ON(conflict);
792 	}
793 out:
794 	write_unlock(&resource_lock);
795 	return err;
796 }
797 
798 
799 /**
800  * allocate_resource - allocate empty slot in the resource tree given range & alignment.
801  * 	The resource will be reallocated with a new size if it was already allocated
802  * @root: root resource descriptor
803  * @new: resource descriptor desired by caller
804  * @size: requested resource region size
805  * @min: minimum boundary to allocate
806  * @max: maximum boundary to allocate
807  * @align: alignment requested, in bytes
808  * @alignf: alignment function, optional, called if not NULL
809  * @alignf_data: arbitrary data to pass to the @alignf function
810  */
allocate_resource(struct resource * root,struct resource * new,resource_size_t size,resource_size_t min,resource_size_t max,resource_size_t align,resource_alignf alignf,void * alignf_data)811 int allocate_resource(struct resource *root, struct resource *new,
812 		      resource_size_t size, resource_size_t min,
813 		      resource_size_t max, resource_size_t align,
814 		      resource_alignf alignf,
815 		      void *alignf_data)
816 {
817 	int err;
818 	struct resource_constraint constraint;
819 
820 	constraint.min = min;
821 	constraint.max = max;
822 	constraint.align = align;
823 	constraint.alignf = alignf;
824 	constraint.alignf_data = alignf_data;
825 
826 	if ( new->parent ) {
827 		/* resource is already allocated, try reallocating with
828 		   the new constraints */
829 		return reallocate_resource(root, new, size, &constraint);
830 	}
831 
832 	write_lock(&resource_lock);
833 	err = find_resource_space(root, new, size, &constraint);
834 	if (err >= 0 && __request_resource(root, new))
835 		err = -EBUSY;
836 	write_unlock(&resource_lock);
837 	return err;
838 }
839 
840 EXPORT_SYMBOL(allocate_resource);
841 
842 /**
843  * lookup_resource - find an existing resource by a resource start address
844  * @root: root resource descriptor
845  * @start: resource start address
846  *
847  * Returns a pointer to the resource if found, NULL otherwise
848  */
lookup_resource(struct resource * root,resource_size_t start)849 struct resource *lookup_resource(struct resource *root, resource_size_t start)
850 {
851 	struct resource *res;
852 
853 	read_lock(&resource_lock);
854 	for (res = root->child; res; res = res->sibling) {
855 		if (res->start == start)
856 			break;
857 	}
858 	read_unlock(&resource_lock);
859 
860 	return res;
861 }
862 
863 /*
864  * Insert a resource into the resource tree. If successful, return NULL,
865  * otherwise return the conflicting resource (compare to __request_resource())
866  */
__insert_resource(struct resource * parent,struct resource * new)867 static struct resource * __insert_resource(struct resource *parent, struct resource *new)
868 {
869 	struct resource *first, *next;
870 
871 	for (;; parent = first) {
872 		first = __request_resource(parent, new);
873 		if (!first)
874 			return first;
875 
876 		if (first == parent)
877 			return first;
878 		if (WARN_ON(first == new))	/* duplicated insertion */
879 			return first;
880 
881 		if ((first->start > new->start) || (first->end < new->end))
882 			break;
883 		if ((first->start == new->start) && (first->end == new->end))
884 			break;
885 	}
886 
887 	for (next = first; ; next = next->sibling) {
888 		/* Partial overlap? Bad, and unfixable */
889 		if (next->start < new->start || next->end > new->end)
890 			return next;
891 		if (!next->sibling)
892 			break;
893 		if (next->sibling->start > new->end)
894 			break;
895 	}
896 
897 	new->parent = parent;
898 	new->sibling = next->sibling;
899 	new->child = first;
900 
901 	next->sibling = NULL;
902 	for (next = first; next; next = next->sibling)
903 		next->parent = new;
904 
905 	if (parent->child == first) {
906 		parent->child = new;
907 	} else {
908 		next = parent->child;
909 		while (next->sibling != first)
910 			next = next->sibling;
911 		next->sibling = new;
912 	}
913 	return NULL;
914 }
915 
916 /**
917  * insert_resource_conflict - Inserts resource in the resource tree
918  * @parent: parent of the new resource
919  * @new: new resource to insert
920  *
921  * Returns 0 on success, conflict resource if the resource can't be inserted.
922  *
923  * This function is equivalent to request_resource_conflict when no conflict
924  * happens. If a conflict happens, and the conflicting resources
925  * entirely fit within the range of the new resource, then the new
926  * resource is inserted and the conflicting resources become children of
927  * the new resource.
928  *
929  * This function is intended for producers of resources, such as FW modules
930  * and bus drivers.
931  */
insert_resource_conflict(struct resource * parent,struct resource * new)932 struct resource *insert_resource_conflict(struct resource *parent, struct resource *new)
933 {
934 	struct resource *conflict;
935 
936 	write_lock(&resource_lock);
937 	conflict = __insert_resource(parent, new);
938 	write_unlock(&resource_lock);
939 	return conflict;
940 }
941 
942 /**
943  * insert_resource - Inserts a resource in the resource tree
944  * @parent: parent of the new resource
945  * @new: new resource to insert
946  *
947  * Returns 0 on success, -EBUSY if the resource can't be inserted.
948  *
949  * This function is intended for producers of resources, such as FW modules
950  * and bus drivers.
951  */
insert_resource(struct resource * parent,struct resource * new)952 int insert_resource(struct resource *parent, struct resource *new)
953 {
954 	struct resource *conflict;
955 
956 	conflict = insert_resource_conflict(parent, new);
957 	return conflict ? -EBUSY : 0;
958 }
959 EXPORT_SYMBOL_GPL(insert_resource);
960 
961 /**
962  * insert_resource_expand_to_fit - Insert a resource into the resource tree
963  * @root: root resource descriptor
964  * @new: new resource to insert
965  *
966  * Insert a resource into the resource tree, possibly expanding it in order
967  * to make it encompass any conflicting resources.
968  */
insert_resource_expand_to_fit(struct resource * root,struct resource * new)969 void insert_resource_expand_to_fit(struct resource *root, struct resource *new)
970 {
971 	if (new->parent)
972 		return;
973 
974 	write_lock(&resource_lock);
975 	for (;;) {
976 		struct resource *conflict;
977 
978 		conflict = __insert_resource(root, new);
979 		if (!conflict)
980 			break;
981 		if (conflict == root)
982 			break;
983 
984 		/* Ok, expand resource to cover the conflict, then try again .. */
985 		if (conflict->start < new->start)
986 			new->start = conflict->start;
987 		if (conflict->end > new->end)
988 			new->end = conflict->end;
989 
990 		pr_info("Expanded resource %s due to conflict with %s\n", new->name, conflict->name);
991 	}
992 	write_unlock(&resource_lock);
993 }
994 /*
995  * Not for general consumption, only early boot memory map parsing, PCI
996  * resource discovery, and late discovery of CXL resources are expected
997  * to use this interface. The former are built-in and only the latter,
998  * CXL, is a module.
999  */
1000 EXPORT_SYMBOL_NS_GPL(insert_resource_expand_to_fit, "CXL");
1001 
1002 /**
1003  * remove_resource - Remove a resource in the resource tree
1004  * @old: resource to remove
1005  *
1006  * Returns 0 on success, -EINVAL if the resource is not valid.
1007  *
1008  * This function removes a resource previously inserted by insert_resource()
1009  * or insert_resource_conflict(), and moves the children (if any) up to
1010  * where they were before.  insert_resource() and insert_resource_conflict()
1011  * insert a new resource, and move any conflicting resources down to the
1012  * children of the new resource.
1013  *
1014  * insert_resource(), insert_resource_conflict() and remove_resource() are
1015  * intended for producers of resources, such as FW modules and bus drivers.
1016  */
remove_resource(struct resource * old)1017 int remove_resource(struct resource *old)
1018 {
1019 	int retval;
1020 
1021 	write_lock(&resource_lock);
1022 	retval = __release_resource(old, false);
1023 	write_unlock(&resource_lock);
1024 	return retval;
1025 }
1026 EXPORT_SYMBOL_GPL(remove_resource);
1027 
__adjust_resource(struct resource * res,resource_size_t start,resource_size_t size)1028 static int __adjust_resource(struct resource *res, resource_size_t start,
1029 				resource_size_t size)
1030 {
1031 	struct resource *tmp, *parent = res->parent;
1032 	resource_size_t end = start + size - 1;
1033 	int result = -EBUSY;
1034 
1035 	if (!parent)
1036 		goto skip;
1037 
1038 	if ((start < parent->start) || (end > parent->end))
1039 		goto out;
1040 
1041 	if (res->sibling && (res->sibling->start <= end))
1042 		goto out;
1043 
1044 	tmp = parent->child;
1045 	if (tmp != res) {
1046 		while (tmp->sibling != res)
1047 			tmp = tmp->sibling;
1048 		if (start <= tmp->end)
1049 			goto out;
1050 	}
1051 
1052 skip:
1053 	for (tmp = res->child; tmp; tmp = tmp->sibling)
1054 		if ((tmp->start < start) || (tmp->end > end))
1055 			goto out;
1056 
1057 	res->start = start;
1058 	res->end = end;
1059 	result = 0;
1060 
1061  out:
1062 	return result;
1063 }
1064 
1065 /**
1066  * adjust_resource - modify a resource's start and size
1067  * @res: resource to modify
1068  * @start: new start value
1069  * @size: new size
1070  *
1071  * Given an existing resource, change its start and size to match the
1072  * arguments.  Returns 0 on success, -EBUSY if it can't fit.
1073  * Existing children of the resource are assumed to be immutable.
1074  */
adjust_resource(struct resource * res,resource_size_t start,resource_size_t size)1075 int adjust_resource(struct resource *res, resource_size_t start,
1076 		    resource_size_t size)
1077 {
1078 	int result;
1079 
1080 	write_lock(&resource_lock);
1081 	result = __adjust_resource(res, start, size);
1082 	write_unlock(&resource_lock);
1083 	return result;
1084 }
1085 EXPORT_SYMBOL(adjust_resource);
1086 
1087 static void __init
__reserve_region_with_split(struct resource * root,resource_size_t start,resource_size_t end,const char * name)1088 __reserve_region_with_split(struct resource *root, resource_size_t start,
1089 			    resource_size_t end, const char *name)
1090 {
1091 	struct resource *parent = root;
1092 	struct resource *conflict;
1093 	struct resource *res = alloc_resource(GFP_ATOMIC);
1094 	struct resource *next_res = NULL;
1095 	int type = resource_type(root);
1096 
1097 	if (!res)
1098 		return;
1099 
1100 	res->name = name;
1101 	res->start = start;
1102 	res->end = end;
1103 	res->flags = type | IORESOURCE_BUSY;
1104 	res->desc = IORES_DESC_NONE;
1105 
1106 	while (1) {
1107 
1108 		conflict = __request_resource(parent, res);
1109 		if (!conflict) {
1110 			if (!next_res)
1111 				break;
1112 			res = next_res;
1113 			next_res = NULL;
1114 			continue;
1115 		}
1116 
1117 		/* conflict covered whole area */
1118 		if (conflict->start <= res->start &&
1119 				conflict->end >= res->end) {
1120 			free_resource(res);
1121 			WARN_ON(next_res);
1122 			break;
1123 		}
1124 
1125 		/* failed, split and try again */
1126 		if (conflict->start > res->start) {
1127 			end = res->end;
1128 			res->end = conflict->start - 1;
1129 			if (conflict->end < end) {
1130 				next_res = alloc_resource(GFP_ATOMIC);
1131 				if (!next_res) {
1132 					free_resource(res);
1133 					break;
1134 				}
1135 				next_res->name = name;
1136 				next_res->start = conflict->end + 1;
1137 				next_res->end = end;
1138 				next_res->flags = type | IORESOURCE_BUSY;
1139 				next_res->desc = IORES_DESC_NONE;
1140 			}
1141 		} else {
1142 			res->start = conflict->end + 1;
1143 		}
1144 	}
1145 
1146 }
1147 
1148 void __init
reserve_region_with_split(struct resource * root,resource_size_t start,resource_size_t end,const char * name)1149 reserve_region_with_split(struct resource *root, resource_size_t start,
1150 			  resource_size_t end, const char *name)
1151 {
1152 	int abort = 0;
1153 
1154 	write_lock(&resource_lock);
1155 	if (root->start > start || root->end < end) {
1156 		pr_err("requested range [0x%llx-0x%llx] not in root %pr\n",
1157 		       (unsigned long long)start, (unsigned long long)end,
1158 		       root);
1159 		if (start > root->end || end < root->start)
1160 			abort = 1;
1161 		else {
1162 			if (end > root->end)
1163 				end = root->end;
1164 			if (start < root->start)
1165 				start = root->start;
1166 			pr_err("fixing request to [0x%llx-0x%llx]\n",
1167 			       (unsigned long long)start,
1168 			       (unsigned long long)end);
1169 		}
1170 		dump_stack();
1171 	}
1172 	if (!abort)
1173 		__reserve_region_with_split(root, start, end, name);
1174 	write_unlock(&resource_lock);
1175 }
1176 
1177 /**
1178  * resource_alignment - calculate resource's alignment
1179  * @res: resource pointer
1180  *
1181  * Returns alignment on success, 0 (invalid alignment) on failure.
1182  */
resource_alignment(struct resource * res)1183 resource_size_t resource_alignment(struct resource *res)
1184 {
1185 	switch (res->flags & (IORESOURCE_SIZEALIGN | IORESOURCE_STARTALIGN)) {
1186 	case IORESOURCE_SIZEALIGN:
1187 		return resource_size(res);
1188 	case IORESOURCE_STARTALIGN:
1189 		return res->start;
1190 	default:
1191 		return 0;
1192 	}
1193 }
1194 
1195 /*
1196  * This is compatibility stuff for IO resources.
1197  *
1198  * Note how this, unlike the above, knows about
1199  * the IO flag meanings (busy etc).
1200  *
1201  * request_region creates a new busy region.
1202  *
1203  * release_region releases a matching busy region.
1204  */
1205 
1206 static DECLARE_WAIT_QUEUE_HEAD(muxed_resource_wait);
1207 
1208 static struct inode *iomem_inode;
1209 
1210 #ifdef CONFIG_IO_STRICT_DEVMEM
revoke_iomem(struct resource * res)1211 static void revoke_iomem(struct resource *res)
1212 {
1213 	/* pairs with smp_store_release() in iomem_init_inode() */
1214 	struct inode *inode = smp_load_acquire(&iomem_inode);
1215 
1216 	/*
1217 	 * Check that the initialization has completed. Losing the race
1218 	 * is ok because it means drivers are claiming resources before
1219 	 * the fs_initcall level of init and prevent iomem_get_mapping users
1220 	 * from establishing mappings.
1221 	 */
1222 	if (!inode)
1223 		return;
1224 
1225 	/*
1226 	 * The expectation is that the driver has successfully marked
1227 	 * the resource busy by this point, so devmem_is_allowed()
1228 	 * should start returning false, however for performance this
1229 	 * does not iterate the entire resource range.
1230 	 */
1231 	if (devmem_is_allowed(PHYS_PFN(res->start)) &&
1232 	    devmem_is_allowed(PHYS_PFN(res->end))) {
1233 		/*
1234 		 * *cringe* iomem=relaxed says "go ahead, what's the
1235 		 * worst that can happen?"
1236 		 */
1237 		return;
1238 	}
1239 
1240 	unmap_mapping_range(inode->i_mapping, res->start, resource_size(res), 1);
1241 }
1242 #else
revoke_iomem(struct resource * res)1243 static void revoke_iomem(struct resource *res) {}
1244 #endif
1245 
iomem_get_mapping(void)1246 struct address_space *iomem_get_mapping(void)
1247 {
1248 	/*
1249 	 * This function is only called from file open paths, hence guaranteed
1250 	 * that fs_initcalls have completed and no need to check for NULL. But
1251 	 * since revoke_iomem can be called before the initcall we still need
1252 	 * the barrier to appease checkers.
1253 	 */
1254 	return smp_load_acquire(&iomem_inode)->i_mapping;
1255 }
1256 
__request_region_locked(struct resource * res,struct resource * parent,resource_size_t start,resource_size_t n,const char * name,int flags)1257 static int __request_region_locked(struct resource *res, struct resource *parent,
1258 				   resource_size_t start, resource_size_t n,
1259 				   const char *name, int flags)
1260 {
1261 	DECLARE_WAITQUEUE(wait, current);
1262 
1263 	res->name = name;
1264 	res->start = start;
1265 	res->end = start + n - 1;
1266 
1267 	for (;;) {
1268 		struct resource *conflict;
1269 
1270 		res->flags = resource_type(parent) | resource_ext_type(parent);
1271 		res->flags |= IORESOURCE_BUSY | flags;
1272 		res->desc = parent->desc;
1273 
1274 		conflict = __request_resource(parent, res);
1275 		if (!conflict)
1276 			break;
1277 		/*
1278 		 * mm/hmm.c reserves physical addresses which then
1279 		 * become unavailable to other users.  Conflicts are
1280 		 * not expected.  Warn to aid debugging if encountered.
1281 		 */
1282 		if (parent == &iomem_resource &&
1283 		    conflict->desc == IORES_DESC_DEVICE_PRIVATE_MEMORY) {
1284 			pr_warn("Unaddressable device %s %pR conflicts with %pR\n",
1285 				conflict->name, conflict, res);
1286 		}
1287 		if (conflict != parent) {
1288 			if (!(conflict->flags & IORESOURCE_BUSY)) {
1289 				parent = conflict;
1290 				continue;
1291 			}
1292 		}
1293 		if (conflict->flags & flags & IORESOURCE_MUXED) {
1294 			add_wait_queue(&muxed_resource_wait, &wait);
1295 			write_unlock(&resource_lock);
1296 			set_current_state(TASK_UNINTERRUPTIBLE);
1297 			schedule();
1298 			remove_wait_queue(&muxed_resource_wait, &wait);
1299 			write_lock(&resource_lock);
1300 			continue;
1301 		}
1302 		/* Uhhuh, that didn't work out.. */
1303 		return -EBUSY;
1304 	}
1305 
1306 	return 0;
1307 }
1308 
1309 /**
1310  * __request_region - create a new busy resource region
1311  * @parent: parent resource descriptor
1312  * @start: resource start address
1313  * @n: resource region size
1314  * @name: reserving caller's ID string
1315  * @flags: IO resource flags
1316  */
__request_region(struct resource * parent,resource_size_t start,resource_size_t n,const char * name,int flags)1317 struct resource *__request_region(struct resource *parent,
1318 				  resource_size_t start, resource_size_t n,
1319 				  const char *name, int flags)
1320 {
1321 	struct resource *res = alloc_resource(GFP_KERNEL);
1322 	int ret;
1323 
1324 	if (!res)
1325 		return NULL;
1326 
1327 	write_lock(&resource_lock);
1328 	ret = __request_region_locked(res, parent, start, n, name, flags);
1329 	write_unlock(&resource_lock);
1330 
1331 	if (ret) {
1332 		free_resource(res);
1333 		return NULL;
1334 	}
1335 
1336 	if (parent == &iomem_resource)
1337 		revoke_iomem(res);
1338 
1339 	return res;
1340 }
1341 EXPORT_SYMBOL(__request_region);
1342 
1343 /**
1344  * __release_region - release a previously reserved resource region
1345  * @parent: parent resource descriptor
1346  * @start: resource start address
1347  * @n: resource region size
1348  *
1349  * The described resource region must match a currently busy region.
1350  */
__release_region(struct resource * parent,resource_size_t start,resource_size_t n)1351 void __release_region(struct resource *parent, resource_size_t start,
1352 		      resource_size_t n)
1353 {
1354 	struct resource **p;
1355 	resource_size_t end;
1356 
1357 	p = &parent->child;
1358 	end = start + n - 1;
1359 
1360 	write_lock(&resource_lock);
1361 
1362 	for (;;) {
1363 		struct resource *res = *p;
1364 
1365 		if (!res)
1366 			break;
1367 		if (res->start <= start && res->end >= end) {
1368 			if (!(res->flags & IORESOURCE_BUSY)) {
1369 				p = &res->child;
1370 				continue;
1371 			}
1372 			if (res->start != start || res->end != end)
1373 				break;
1374 			*p = res->sibling;
1375 			write_unlock(&resource_lock);
1376 			if (res->flags & IORESOURCE_MUXED)
1377 				wake_up(&muxed_resource_wait);
1378 			free_resource(res);
1379 			return;
1380 		}
1381 		p = &res->sibling;
1382 	}
1383 
1384 	write_unlock(&resource_lock);
1385 
1386 	pr_warn("Trying to free nonexistent resource <%pa-%pa>\n", &start, &end);
1387 }
1388 EXPORT_SYMBOL(__release_region);
1389 
1390 #ifdef CONFIG_MEMORY_HOTREMOVE
1391 /**
1392  * release_mem_region_adjustable - release a previously reserved memory region
1393  * @start: resource start address
1394  * @size: resource region size
1395  *
1396  * This interface is intended for memory hot-delete.  The requested region
1397  * is released from a currently busy memory resource.  The requested region
1398  * must either match exactly or fit into a single busy resource entry.  In
1399  * the latter case, the remaining resource is adjusted accordingly.
1400  * Existing children of the busy memory resource must be immutable in the
1401  * request.
1402  *
1403  * Note:
1404  * - Additional release conditions, such as overlapping region, can be
1405  *   supported after they are confirmed as valid cases.
1406  * - When a busy memory resource gets split into two entries, the code
1407  *   assumes that all children remain in the lower address entry for
1408  *   simplicity.  Enhance this logic when necessary.
1409  */
release_mem_region_adjustable(resource_size_t start,resource_size_t size)1410 void release_mem_region_adjustable(resource_size_t start, resource_size_t size)
1411 {
1412 	struct resource *parent = &iomem_resource;
1413 	struct resource *new_res = NULL;
1414 	bool alloc_nofail = false;
1415 	struct resource **p;
1416 	struct resource *res;
1417 	resource_size_t end;
1418 
1419 	end = start + size - 1;
1420 	if (WARN_ON_ONCE((start < parent->start) || (end > parent->end)))
1421 		return;
1422 
1423 	/*
1424 	 * We free up quite a lot of memory on memory hotunplug (esp., memap),
1425 	 * just before releasing the region. This is highly unlikely to
1426 	 * fail - let's play save and make it never fail as the caller cannot
1427 	 * perform any error handling (e.g., trying to re-add memory will fail
1428 	 * similarly).
1429 	 */
1430 retry:
1431 	new_res = alloc_resource(GFP_KERNEL | (alloc_nofail ? __GFP_NOFAIL : 0));
1432 
1433 	p = &parent->child;
1434 	write_lock(&resource_lock);
1435 
1436 	while ((res = *p)) {
1437 		if (res->start >= end)
1438 			break;
1439 
1440 		/* look for the next resource if it does not fit into */
1441 		if (res->start > start || res->end < end) {
1442 			p = &res->sibling;
1443 			continue;
1444 		}
1445 
1446 		if (!(res->flags & IORESOURCE_MEM))
1447 			break;
1448 
1449 		if (!(res->flags & IORESOURCE_BUSY)) {
1450 			p = &res->child;
1451 			continue;
1452 		}
1453 
1454 		/* found the target resource; let's adjust accordingly */
1455 		if (res->start == start && res->end == end) {
1456 			/* free the whole entry */
1457 			*p = res->sibling;
1458 			free_resource(res);
1459 		} else if (res->start == start && res->end != end) {
1460 			/* adjust the start */
1461 			WARN_ON_ONCE(__adjust_resource(res, end + 1,
1462 						       res->end - end));
1463 		} else if (res->start != start && res->end == end) {
1464 			/* adjust the end */
1465 			WARN_ON_ONCE(__adjust_resource(res, res->start,
1466 						       start - res->start));
1467 		} else {
1468 			/* split into two entries - we need a new resource */
1469 			if (!new_res) {
1470 				new_res = alloc_resource(GFP_ATOMIC);
1471 				if (!new_res) {
1472 					alloc_nofail = true;
1473 					write_unlock(&resource_lock);
1474 					goto retry;
1475 				}
1476 			}
1477 			new_res->name = res->name;
1478 			new_res->start = end + 1;
1479 			new_res->end = res->end;
1480 			new_res->flags = res->flags;
1481 			new_res->desc = res->desc;
1482 			new_res->parent = res->parent;
1483 			new_res->sibling = res->sibling;
1484 			new_res->child = NULL;
1485 
1486 			if (WARN_ON_ONCE(__adjust_resource(res, res->start,
1487 							   start - res->start)))
1488 				break;
1489 			res->sibling = new_res;
1490 			new_res = NULL;
1491 		}
1492 
1493 		break;
1494 	}
1495 
1496 	write_unlock(&resource_lock);
1497 	free_resource(new_res);
1498 }
1499 #endif	/* CONFIG_MEMORY_HOTREMOVE */
1500 
1501 #ifdef CONFIG_MEMORY_HOTPLUG
system_ram_resources_mergeable(struct resource * r1,struct resource * r2)1502 static bool system_ram_resources_mergeable(struct resource *r1,
1503 					   struct resource *r2)
1504 {
1505 	/* We assume either r1 or r2 is IORESOURCE_SYSRAM_MERGEABLE. */
1506 	return r1->flags == r2->flags && r1->end + 1 == r2->start &&
1507 	       r1->name == r2->name && r1->desc == r2->desc &&
1508 	       !r1->child && !r2->child;
1509 }
1510 
1511 /**
1512  * merge_system_ram_resource - mark the System RAM resource mergeable and try to
1513  *	merge it with adjacent, mergeable resources
1514  * @res: resource descriptor
1515  *
1516  * This interface is intended for memory hotplug, whereby lots of contiguous
1517  * system ram resources are added (e.g., via add_memory*()) by a driver, and
1518  * the actual resource boundaries are not of interest (e.g., it might be
1519  * relevant for DIMMs). Only resources that are marked mergeable, that have the
1520  * same parent, and that don't have any children are considered. All mergeable
1521  * resources must be immutable during the request.
1522  *
1523  * Note:
1524  * - The caller has to make sure that no pointers to resources that are
1525  *   marked mergeable are used anymore after this call - the resource might
1526  *   be freed and the pointer might be stale!
1527  * - release_mem_region_adjustable() will split on demand on memory hotunplug
1528  */
merge_system_ram_resource(struct resource * res)1529 void merge_system_ram_resource(struct resource *res)
1530 {
1531 	const unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
1532 	struct resource *cur;
1533 
1534 	if (WARN_ON_ONCE((res->flags & flags) != flags))
1535 		return;
1536 
1537 	write_lock(&resource_lock);
1538 	res->flags |= IORESOURCE_SYSRAM_MERGEABLE;
1539 
1540 	/* Try to merge with next item in the list. */
1541 	cur = res->sibling;
1542 	if (cur && system_ram_resources_mergeable(res, cur)) {
1543 		res->end = cur->end;
1544 		res->sibling = cur->sibling;
1545 		free_resource(cur);
1546 	}
1547 
1548 	/* Try to merge with previous item in the list. */
1549 	cur = res->parent->child;
1550 	while (cur && cur->sibling != res)
1551 		cur = cur->sibling;
1552 	if (cur && system_ram_resources_mergeable(cur, res)) {
1553 		cur->end = res->end;
1554 		cur->sibling = res->sibling;
1555 		free_resource(res);
1556 	}
1557 	write_unlock(&resource_lock);
1558 }
1559 #endif	/* CONFIG_MEMORY_HOTPLUG */
1560 
1561 /*
1562  * Managed region resource
1563  */
devm_resource_release(struct device * dev,void * ptr)1564 static void devm_resource_release(struct device *dev, void *ptr)
1565 {
1566 	struct resource **r = ptr;
1567 
1568 	release_resource(*r);
1569 }
1570 
1571 /**
1572  * devm_request_resource() - request and reserve an I/O or memory resource
1573  * @dev: device for which to request the resource
1574  * @root: root of the resource tree from which to request the resource
1575  * @new: descriptor of the resource to request
1576  *
1577  * This is a device-managed version of request_resource(). There is usually
1578  * no need to release resources requested by this function explicitly since
1579  * that will be taken care of when the device is unbound from its driver.
1580  * If for some reason the resource needs to be released explicitly, because
1581  * of ordering issues for example, drivers must call devm_release_resource()
1582  * rather than the regular release_resource().
1583  *
1584  * When a conflict is detected between any existing resources and the newly
1585  * requested resource, an error message will be printed.
1586  *
1587  * Returns 0 on success or a negative error code on failure.
1588  */
devm_request_resource(struct device * dev,struct resource * root,struct resource * new)1589 int devm_request_resource(struct device *dev, struct resource *root,
1590 			  struct resource *new)
1591 {
1592 	struct resource *conflict, **ptr;
1593 
1594 	ptr = devres_alloc(devm_resource_release, sizeof(*ptr), GFP_KERNEL);
1595 	if (!ptr)
1596 		return -ENOMEM;
1597 
1598 	*ptr = new;
1599 
1600 	conflict = request_resource_conflict(root, new);
1601 	if (conflict) {
1602 		dev_err(dev, "resource collision: %pR conflicts with %s %pR\n",
1603 			new, conflict->name, conflict);
1604 		devres_free(ptr);
1605 		return -EBUSY;
1606 	}
1607 
1608 	devres_add(dev, ptr);
1609 	return 0;
1610 }
1611 EXPORT_SYMBOL(devm_request_resource);
1612 
devm_resource_match(struct device * dev,void * res,void * data)1613 static int devm_resource_match(struct device *dev, void *res, void *data)
1614 {
1615 	struct resource **ptr = res;
1616 
1617 	return *ptr == data;
1618 }
1619 
1620 /**
1621  * devm_release_resource() - release a previously requested resource
1622  * @dev: device for which to release the resource
1623  * @new: descriptor of the resource to release
1624  *
1625  * Releases a resource previously requested using devm_request_resource().
1626  */
devm_release_resource(struct device * dev,struct resource * new)1627 void devm_release_resource(struct device *dev, struct resource *new)
1628 {
1629 	WARN_ON(devres_release(dev, devm_resource_release, devm_resource_match,
1630 			       new));
1631 }
1632 EXPORT_SYMBOL(devm_release_resource);
1633 
1634 struct region_devres {
1635 	struct resource *parent;
1636 	resource_size_t start;
1637 	resource_size_t n;
1638 };
1639 
devm_region_release(struct device * dev,void * res)1640 static void devm_region_release(struct device *dev, void *res)
1641 {
1642 	struct region_devres *this = res;
1643 
1644 	__release_region(this->parent, this->start, this->n);
1645 }
1646 
devm_region_match(struct device * dev,void * res,void * match_data)1647 static int devm_region_match(struct device *dev, void *res, void *match_data)
1648 {
1649 	struct region_devres *this = res, *match = match_data;
1650 
1651 	return this->parent == match->parent &&
1652 		this->start == match->start && this->n == match->n;
1653 }
1654 
1655 struct resource *
__devm_request_region(struct device * dev,struct resource * parent,resource_size_t start,resource_size_t n,const char * name)1656 __devm_request_region(struct device *dev, struct resource *parent,
1657 		      resource_size_t start, resource_size_t n, const char *name)
1658 {
1659 	struct region_devres *dr = NULL;
1660 	struct resource *res;
1661 
1662 	dr = devres_alloc(devm_region_release, sizeof(struct region_devres),
1663 			  GFP_KERNEL);
1664 	if (!dr)
1665 		return NULL;
1666 
1667 	dr->parent = parent;
1668 	dr->start = start;
1669 	dr->n = n;
1670 
1671 	res = __request_region(parent, start, n, name, 0);
1672 	if (res)
1673 		devres_add(dev, dr);
1674 	else
1675 		devres_free(dr);
1676 
1677 	return res;
1678 }
1679 EXPORT_SYMBOL(__devm_request_region);
1680 
__devm_release_region(struct device * dev,struct resource * parent,resource_size_t start,resource_size_t n)1681 void __devm_release_region(struct device *dev, struct resource *parent,
1682 			   resource_size_t start, resource_size_t n)
1683 {
1684 	struct region_devres match_data = { parent, start, n };
1685 
1686 	WARN_ON(devres_release(dev, devm_region_release, devm_region_match,
1687 			       &match_data));
1688 }
1689 EXPORT_SYMBOL(__devm_release_region);
1690 
1691 /*
1692  * Reserve I/O ports or memory based on "reserve=" kernel parameter.
1693  */
1694 #define MAXRESERVE 4
reserve_setup(char * str)1695 static int __init reserve_setup(char *str)
1696 {
1697 	static int reserved;
1698 	static struct resource reserve[MAXRESERVE];
1699 
1700 	for (;;) {
1701 		unsigned int io_start, io_num;
1702 		int x = reserved;
1703 		struct resource *parent;
1704 
1705 		if (get_option(&str, &io_start) != 2)
1706 			break;
1707 		if (get_option(&str, &io_num) == 0)
1708 			break;
1709 		if (x < MAXRESERVE) {
1710 			struct resource *res = reserve + x;
1711 
1712 			/*
1713 			 * If the region starts below 0x10000, we assume it's
1714 			 * I/O port space; otherwise assume it's memory.
1715 			 */
1716 			if (io_start < 0x10000) {
1717 				*res = DEFINE_RES_IO_NAMED(io_start, io_num, "reserved");
1718 				parent = &ioport_resource;
1719 			} else {
1720 				*res = DEFINE_RES_MEM_NAMED(io_start, io_num, "reserved");
1721 				parent = &iomem_resource;
1722 			}
1723 			res->flags |= IORESOURCE_BUSY;
1724 			if (request_resource(parent, res) == 0)
1725 				reserved = x+1;
1726 		}
1727 	}
1728 	return 1;
1729 }
1730 __setup("reserve=", reserve_setup);
1731 
1732 /*
1733  * Check if the requested addr and size spans more than any slot in the
1734  * iomem resource tree.
1735  */
iomem_map_sanity_check(resource_size_t addr,unsigned long size)1736 int iomem_map_sanity_check(resource_size_t addr, unsigned long size)
1737 {
1738 	resource_size_t end = addr + size - 1;
1739 	struct resource *p;
1740 	int err = 0;
1741 
1742 	read_lock(&resource_lock);
1743 	for_each_resource(&iomem_resource, p, false) {
1744 		/*
1745 		 * We can probably skip the resources without
1746 		 * IORESOURCE_IO attribute?
1747 		 */
1748 		if (p->start > end)
1749 			continue;
1750 		if (p->end < addr)
1751 			continue;
1752 		if (PFN_DOWN(p->start) <= PFN_DOWN(addr) &&
1753 		    PFN_DOWN(p->end) >= PFN_DOWN(end))
1754 			continue;
1755 		/*
1756 		 * if a resource is "BUSY", it's not a hardware resource
1757 		 * but a driver mapping of such a resource; we don't want
1758 		 * to warn for those; some drivers legitimately map only
1759 		 * partial hardware resources. (example: vesafb)
1760 		 */
1761 		if (p->flags & IORESOURCE_BUSY)
1762 			continue;
1763 
1764 		pr_warn("resource sanity check: requesting [mem %pa-%pa], which spans more than %s %pR\n",
1765 			&addr, &end, p->name, p);
1766 		err = -1;
1767 		break;
1768 	}
1769 	read_unlock(&resource_lock);
1770 
1771 	return err;
1772 }
1773 
1774 #ifdef CONFIG_STRICT_DEVMEM
1775 static int strict_iomem_checks = 1;
1776 #else
1777 static int strict_iomem_checks;
1778 #endif
1779 
1780 /*
1781  * Check if an address is exclusive to the kernel and must not be mapped to
1782  * user space, for example, via /dev/mem.
1783  *
1784  * Returns true if exclusive to the kernel, otherwise returns false.
1785  */
resource_is_exclusive(struct resource * root,u64 addr,resource_size_t size)1786 bool resource_is_exclusive(struct resource *root, u64 addr, resource_size_t size)
1787 {
1788 	const unsigned int exclusive_system_ram = IORESOURCE_SYSTEM_RAM |
1789 						  IORESOURCE_EXCLUSIVE;
1790 	bool skip_children = false, err = false;
1791 	struct resource *p;
1792 
1793 	read_lock(&resource_lock);
1794 	for_each_resource(root, p, skip_children) {
1795 		if (p->start >= addr + size)
1796 			break;
1797 		if (p->end < addr) {
1798 			skip_children = true;
1799 			continue;
1800 		}
1801 		skip_children = false;
1802 
1803 		/*
1804 		 * IORESOURCE_SYSTEM_RAM resources are exclusive if
1805 		 * IORESOURCE_EXCLUSIVE is set, even if they
1806 		 * are not busy and even if "iomem=relaxed" is set. The
1807 		 * responsible driver dynamically adds/removes system RAM within
1808 		 * such an area and uncontrolled access is dangerous.
1809 		 */
1810 		if ((p->flags & exclusive_system_ram) == exclusive_system_ram) {
1811 			err = true;
1812 			break;
1813 		}
1814 
1815 		/*
1816 		 * A resource is exclusive if IORESOURCE_EXCLUSIVE is set
1817 		 * or CONFIG_IO_STRICT_DEVMEM is enabled and the
1818 		 * resource is busy.
1819 		 */
1820 		if (!strict_iomem_checks || !(p->flags & IORESOURCE_BUSY))
1821 			continue;
1822 		if (IS_ENABLED(CONFIG_IO_STRICT_DEVMEM)
1823 				|| p->flags & IORESOURCE_EXCLUSIVE) {
1824 			err = true;
1825 			break;
1826 		}
1827 	}
1828 	read_unlock(&resource_lock);
1829 
1830 	return err;
1831 }
1832 
iomem_is_exclusive(u64 addr)1833 bool iomem_is_exclusive(u64 addr)
1834 {
1835 	return resource_is_exclusive(&iomem_resource, addr & PAGE_MASK,
1836 				     PAGE_SIZE);
1837 }
1838 
resource_list_create_entry(struct resource * res,size_t extra_size)1839 struct resource_entry *resource_list_create_entry(struct resource *res,
1840 						  size_t extra_size)
1841 {
1842 	struct resource_entry *entry;
1843 
1844 	entry = kzalloc(sizeof(*entry) + extra_size, GFP_KERNEL);
1845 	if (entry) {
1846 		INIT_LIST_HEAD(&entry->node);
1847 		entry->res = res ? res : &entry->__res;
1848 	}
1849 
1850 	return entry;
1851 }
1852 EXPORT_SYMBOL(resource_list_create_entry);
1853 
resource_list_free(struct list_head * head)1854 void resource_list_free(struct list_head *head)
1855 {
1856 	struct resource_entry *entry, *tmp;
1857 
1858 	list_for_each_entry_safe(entry, tmp, head, node)
1859 		resource_list_destroy_entry(entry);
1860 }
1861 EXPORT_SYMBOL(resource_list_free);
1862 
1863 #ifdef CONFIG_GET_FREE_REGION
1864 #define GFR_DESCENDING		(1UL << 0)
1865 #define GFR_REQUEST_REGION	(1UL << 1)
1866 #ifdef PA_SECTION_SHIFT
1867 #define GFR_DEFAULT_ALIGN	(1UL << PA_SECTION_SHIFT)
1868 #else
1869 #define GFR_DEFAULT_ALIGN	PAGE_SIZE
1870 #endif
1871 
gfr_start(struct resource * base,resource_size_t size,resource_size_t align,unsigned long flags)1872 static resource_size_t gfr_start(struct resource *base, resource_size_t size,
1873 				 resource_size_t align, unsigned long flags)
1874 {
1875 	if (flags & GFR_DESCENDING) {
1876 		resource_size_t end;
1877 
1878 		end = min_t(resource_size_t, base->end, DIRECT_MAP_PHYSMEM_END);
1879 		return end - size + 1;
1880 	}
1881 
1882 	return ALIGN(max(base->start, align), align);
1883 }
1884 
gfr_continue(struct resource * base,resource_size_t addr,resource_size_t size,unsigned long flags)1885 static bool gfr_continue(struct resource *base, resource_size_t addr,
1886 			 resource_size_t size, unsigned long flags)
1887 {
1888 	if (flags & GFR_DESCENDING)
1889 		return addr > size && addr >= base->start;
1890 	/*
1891 	 * In the ascend case be careful that the last increment by
1892 	 * @size did not wrap 0.
1893 	 */
1894 	return addr > addr - size &&
1895 	       addr <= min_t(resource_size_t, base->end, DIRECT_MAP_PHYSMEM_END);
1896 }
1897 
gfr_next(resource_size_t addr,resource_size_t size,unsigned long flags)1898 static resource_size_t gfr_next(resource_size_t addr, resource_size_t size,
1899 				unsigned long flags)
1900 {
1901 	if (flags & GFR_DESCENDING)
1902 		return addr - size;
1903 	return addr + size;
1904 }
1905 
remove_free_mem_region(void * _res)1906 static void remove_free_mem_region(void *_res)
1907 {
1908 	struct resource *res = _res;
1909 
1910 	if (res->parent)
1911 		remove_resource(res);
1912 	free_resource(res);
1913 }
1914 
1915 static struct resource *
get_free_mem_region(struct device * dev,struct resource * base,resource_size_t size,const unsigned long align,const char * name,const unsigned long desc,const unsigned long flags)1916 get_free_mem_region(struct device *dev, struct resource *base,
1917 		    resource_size_t size, const unsigned long align,
1918 		    const char *name, const unsigned long desc,
1919 		    const unsigned long flags)
1920 {
1921 	resource_size_t addr;
1922 	struct resource *res;
1923 	struct region_devres *dr = NULL;
1924 
1925 	size = ALIGN(size, align);
1926 
1927 	res = alloc_resource(GFP_KERNEL);
1928 	if (!res)
1929 		return ERR_PTR(-ENOMEM);
1930 
1931 	if (dev && (flags & GFR_REQUEST_REGION)) {
1932 		dr = devres_alloc(devm_region_release,
1933 				sizeof(struct region_devres), GFP_KERNEL);
1934 		if (!dr) {
1935 			free_resource(res);
1936 			return ERR_PTR(-ENOMEM);
1937 		}
1938 	} else if (dev) {
1939 		if (devm_add_action_or_reset(dev, remove_free_mem_region, res))
1940 			return ERR_PTR(-ENOMEM);
1941 	}
1942 
1943 	write_lock(&resource_lock);
1944 	for (addr = gfr_start(base, size, align, flags);
1945 	     gfr_continue(base, addr, align, flags);
1946 	     addr = gfr_next(addr, align, flags)) {
1947 		if (__region_intersects(base, addr, size, 0, IORES_DESC_NONE) !=
1948 		    REGION_DISJOINT)
1949 			continue;
1950 
1951 		if (flags & GFR_REQUEST_REGION) {
1952 			if (__request_region_locked(res, &iomem_resource, addr,
1953 						    size, name, 0))
1954 				break;
1955 
1956 			if (dev) {
1957 				dr->parent = &iomem_resource;
1958 				dr->start = addr;
1959 				dr->n = size;
1960 				devres_add(dev, dr);
1961 			}
1962 
1963 			res->desc = desc;
1964 			write_unlock(&resource_lock);
1965 
1966 
1967 			/*
1968 			 * A driver is claiming this region so revoke any
1969 			 * mappings.
1970 			 */
1971 			revoke_iomem(res);
1972 		} else {
1973 			*res = DEFINE_RES_NAMED_DESC(addr, size, name, IORESOURCE_MEM, desc);
1974 
1975 			/*
1976 			 * Only succeed if the resource hosts an exclusive
1977 			 * range after the insert
1978 			 */
1979 			if (__insert_resource(base, res) || res->child)
1980 				break;
1981 
1982 			write_unlock(&resource_lock);
1983 		}
1984 
1985 		return res;
1986 	}
1987 	write_unlock(&resource_lock);
1988 
1989 	if (flags & GFR_REQUEST_REGION) {
1990 		free_resource(res);
1991 		devres_free(dr);
1992 	} else if (dev)
1993 		devm_release_action(dev, remove_free_mem_region, res);
1994 
1995 	return ERR_PTR(-ERANGE);
1996 }
1997 
1998 /**
1999  * devm_request_free_mem_region - find free region for device private memory
2000  *
2001  * @dev: device struct to bind the resource to
2002  * @size: size in bytes of the device memory to add
2003  * @base: resource tree to look in
2004  *
2005  * This function tries to find an empty range of physical address big enough to
2006  * contain the new resource, so that it can later be hotplugged as ZONE_DEVICE
2007  * memory, which in turn allocates struct pages.
2008  */
devm_request_free_mem_region(struct device * dev,struct resource * base,unsigned long size)2009 struct resource *devm_request_free_mem_region(struct device *dev,
2010 		struct resource *base, unsigned long size)
2011 {
2012 	unsigned long flags = GFR_DESCENDING | GFR_REQUEST_REGION;
2013 
2014 	return get_free_mem_region(dev, base, size, GFR_DEFAULT_ALIGN,
2015 				   dev_name(dev),
2016 				   IORES_DESC_DEVICE_PRIVATE_MEMORY, flags);
2017 }
2018 EXPORT_SYMBOL_GPL(devm_request_free_mem_region);
2019 
request_free_mem_region(struct resource * base,unsigned long size,const char * name)2020 struct resource *request_free_mem_region(struct resource *base,
2021 		unsigned long size, const char *name)
2022 {
2023 	unsigned long flags = GFR_DESCENDING | GFR_REQUEST_REGION;
2024 
2025 	return get_free_mem_region(NULL, base, size, GFR_DEFAULT_ALIGN, name,
2026 				   IORES_DESC_DEVICE_PRIVATE_MEMORY, flags);
2027 }
2028 EXPORT_SYMBOL_GPL(request_free_mem_region);
2029 
2030 /**
2031  * alloc_free_mem_region - find a free region relative to @base
2032  * @base: resource that will parent the new resource
2033  * @size: size in bytes of memory to allocate from @base
2034  * @align: alignment requirements for the allocation
2035  * @name: resource name
2036  *
2037  * Buses like CXL, that can dynamically instantiate new memory regions,
2038  * need a method to allocate physical address space for those regions.
2039  * Allocate and insert a new resource to cover a free, unclaimed by a
2040  * descendant of @base, range in the span of @base.
2041  */
alloc_free_mem_region(struct resource * base,unsigned long size,unsigned long align,const char * name)2042 struct resource *alloc_free_mem_region(struct resource *base,
2043 				       unsigned long size, unsigned long align,
2044 				       const char *name)
2045 {
2046 	/* Default of ascending direction and insert resource */
2047 	unsigned long flags = 0;
2048 
2049 	return get_free_mem_region(NULL, base, size, align, name,
2050 				   IORES_DESC_NONE, flags);
2051 }
2052 EXPORT_SYMBOL_GPL(alloc_free_mem_region);
2053 #endif /* CONFIG_GET_FREE_REGION */
2054 
strict_iomem(char * str)2055 static int __init strict_iomem(char *str)
2056 {
2057 	if (strstr(str, "relaxed"))
2058 		strict_iomem_checks = 0;
2059 	if (strstr(str, "strict"))
2060 		strict_iomem_checks = 1;
2061 	return 1;
2062 }
2063 
iomem_fs_init_fs_context(struct fs_context * fc)2064 static int iomem_fs_init_fs_context(struct fs_context *fc)
2065 {
2066 	return init_pseudo(fc, DEVMEM_MAGIC) ? 0 : -ENOMEM;
2067 }
2068 
2069 static struct file_system_type iomem_fs_type = {
2070 	.name		= "iomem",
2071 	.owner		= THIS_MODULE,
2072 	.init_fs_context = iomem_fs_init_fs_context,
2073 	.kill_sb	= kill_anon_super,
2074 };
2075 
iomem_init_inode(void)2076 static int __init iomem_init_inode(void)
2077 {
2078 	static struct vfsmount *iomem_vfs_mount;
2079 	static int iomem_fs_cnt;
2080 	struct inode *inode;
2081 	int rc;
2082 
2083 	rc = simple_pin_fs(&iomem_fs_type, &iomem_vfs_mount, &iomem_fs_cnt);
2084 	if (rc < 0) {
2085 		pr_err("Cannot mount iomem pseudo filesystem: %d\n", rc);
2086 		return rc;
2087 	}
2088 
2089 	inode = alloc_anon_inode(iomem_vfs_mount->mnt_sb);
2090 	if (IS_ERR(inode)) {
2091 		rc = PTR_ERR(inode);
2092 		pr_err("Cannot allocate inode for iomem: %d\n", rc);
2093 		simple_release_fs(&iomem_vfs_mount, &iomem_fs_cnt);
2094 		return rc;
2095 	}
2096 
2097 	/*
2098 	 * Publish iomem revocation inode initialized.
2099 	 * Pairs with smp_load_acquire() in revoke_iomem().
2100 	 */
2101 	smp_store_release(&iomem_inode, inode);
2102 
2103 	return 0;
2104 }
2105 
2106 fs_initcall(iomem_init_inode);
2107 
2108 __setup("iomem=", strict_iomem);
2109