xref: /linux/drivers/of/of_reserved_mem.c (revision 34e0e2a8ea9e9e4f4dceb33072103dffaa1366b3)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Device tree based initialization code for reserved memory.
4  *
5  * Copyright (c) 2013, 2015 The Linux Foundation. All Rights Reserved.
6  * Copyright (c) 2013,2014 Samsung Electronics Co., Ltd.
7  *		http://www.samsung.com
8  * Author: Marek Szyprowski <m.szyprowski@samsung.com>
9  * Author: Josh Cartwright <joshc@codeaurora.org>
10  */
11 
12 #define pr_fmt(fmt)	"OF: reserved mem: " fmt
13 
14 #include <linux/err.h>
15 #include <linux/ioport.h>
16 #include <linux/libfdt.h>
17 #include <linux/of.h>
18 #include <linux/of_fdt.h>
19 #include <linux/of_platform.h>
20 #include <linux/mm.h>
21 #include <linux/sizes.h>
22 #include <linux/of_reserved_mem.h>
23 #include <linux/sort.h>
24 #include <linux/slab.h>
25 #include <linux/memblock.h>
26 #include <linux/kmemleak.h>
27 
28 #include "of_private.h"
29 
30 static struct reserved_mem reserved_mem_array[MAX_RESERVED_REGIONS] __initdata;
31 static struct reserved_mem *reserved_mem __refdata = reserved_mem_array;
32 static int total_reserved_mem_cnt = MAX_RESERVED_REGIONS;
33 static int reserved_mem_count;
34 
35 static int __init early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
36 	phys_addr_t align, phys_addr_t start, phys_addr_t end, bool nomap,
37 	phys_addr_t *res_base)
38 {
39 	phys_addr_t base;
40 	int err = 0;
41 
42 	end = !end ? MEMBLOCK_ALLOC_ANYWHERE : end;
43 	align = !align ? SMP_CACHE_BYTES : align;
44 	base = memblock_phys_alloc_range(size, align, start, end);
45 	if (!base)
46 		return -ENOMEM;
47 
48 	*res_base = base;
49 	if (nomap) {
50 		err = memblock_mark_nomap(base, size);
51 		if (err)
52 			memblock_phys_free(base, size);
53 	}
54 
55 	if (!err)
56 		kmemleak_ignore_phys(base);
57 
58 	return err;
59 }
60 
61 /*
62  * alloc_reserved_mem_array() - allocate memory for the reserved_mem
63  * array using memblock
64  *
65  * This function is used to allocate memory for the reserved_mem
66  * array according to the total number of reserved memory regions
67  * defined in the DT.
68  * After the new array is allocated, the information stored in
69  * the initial static array is copied over to this new array and
70  * the new array is used from this point on.
71  */
72 static void __init alloc_reserved_mem_array(void)
73 {
74 	struct reserved_mem *new_array;
75 	size_t alloc_size, copy_size, memset_size;
76 
77 	alloc_size = array_size(total_reserved_mem_cnt, sizeof(*new_array));
78 	if (alloc_size == SIZE_MAX) {
79 		pr_err("Failed to allocate memory for reserved_mem array with err: %d", -EOVERFLOW);
80 		return;
81 	}
82 
83 	new_array = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
84 	if (!new_array) {
85 		pr_err("Failed to allocate memory for reserved_mem array with err: %d", -ENOMEM);
86 		return;
87 	}
88 
89 	copy_size = array_size(reserved_mem_count, sizeof(*new_array));
90 	if (copy_size == SIZE_MAX) {
91 		memblock_free(new_array, alloc_size);
92 		total_reserved_mem_cnt = MAX_RESERVED_REGIONS;
93 		pr_err("Failed to allocate memory for reserved_mem array with err: %d", -EOVERFLOW);
94 		return;
95 	}
96 
97 	memset_size = alloc_size - copy_size;
98 
99 	memcpy(new_array, reserved_mem, copy_size);
100 	memset(new_array + reserved_mem_count, 0, memset_size);
101 
102 	reserved_mem = new_array;
103 }
104 
105 static void fdt_init_reserved_mem_node(unsigned long node, const char *uname,
106 				       phys_addr_t base, phys_addr_t size);
107 static int fdt_validate_reserved_mem_node(unsigned long node,
108 					  phys_addr_t *align);
109 static int fdt_fixup_reserved_mem_node(unsigned long node,
110 				       phys_addr_t base, phys_addr_t size);
111 
112 static int __init early_init_dt_reserve_memory(phys_addr_t base,
113 					       phys_addr_t size, bool nomap)
114 {
115 	if (nomap) {
116 		/*
117 		 * If the memory is already reserved (by another region), we
118 		 * should not allow it to be marked nomap, but don't worry
119 		 * if the region isn't memory as it won't be mapped.
120 		 */
121 		if (memblock_overlaps_region(&memblock.memory, base, size) &&
122 		    memblock_is_region_reserved(base, size))
123 			return -EBUSY;
124 
125 		return memblock_mark_nomap(base, size);
126 	}
127 	return memblock_reserve(base, size);
128 }
129 
130 /*
131  * __reserved_mem_reserve_reg() - reserve all memory described in 'reg' property
132  */
133 static int __init __reserved_mem_reserve_reg(unsigned long node,
134 					     const char *uname)
135 {
136 	phys_addr_t base, size;
137 	int i, len, err;
138 	const __be32 *prop;
139 	bool nomap;
140 
141 	prop = of_flat_dt_get_addr_size_prop(node, "reg", &len);
142 	if (!prop)
143 		return -ENOENT;
144 
145 	nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
146 
147 	err = fdt_validate_reserved_mem_node(node, NULL);
148 	if (err && err != -ENODEV)
149 		return err;
150 
151 	for (i = 0; i < len; i++) {
152 		u64 b, s;
153 
154 		of_flat_dt_read_addr_size(prop, i, &b, &s);
155 
156 		base = b;
157 		size = s;
158 
159 		if (size && early_init_dt_reserve_memory(base, size, nomap) == 0) {
160 			fdt_fixup_reserved_mem_node(node, base, size);
161 			pr_debug("Reserved memory: reserved region for node '%s': base %pa, size %lu MiB\n",
162 				uname, &base, (unsigned long)(size / SZ_1M));
163 		} else {
164 			pr_err("Reserved memory: failed to reserve memory for node '%s': base %pa, size %lu MiB\n",
165 			       uname, &base, (unsigned long)(size / SZ_1M));
166 		}
167 	}
168 	return 0;
169 }
170 
171 /*
172  * __reserved_mem_check_root() - check if #size-cells, #address-cells provided
173  * in /reserved-memory matches the values supported by the current implementation,
174  * also check if ranges property has been provided
175  */
176 static int __init __reserved_mem_check_root(unsigned long node)
177 {
178 	const __be32 *prop;
179 
180 	prop = of_get_flat_dt_prop(node, "#size-cells", NULL);
181 	if (!prop || be32_to_cpup(prop) != dt_root_size_cells)
182 		return -EINVAL;
183 
184 	prop = of_get_flat_dt_prop(node, "#address-cells", NULL);
185 	if (!prop || be32_to_cpup(prop) != dt_root_addr_cells)
186 		return -EINVAL;
187 
188 	prop = of_get_flat_dt_prop(node, "ranges", NULL);
189 	if (!prop)
190 		return -EINVAL;
191 	return 0;
192 }
193 
194 static int __init __rmem_cmp(const void *a, const void *b)
195 {
196 	const struct reserved_mem *ra = a, *rb = b;
197 
198 	if (ra->base < rb->base)
199 		return -1;
200 
201 	if (ra->base > rb->base)
202 		return 1;
203 
204 	/*
205 	 * Put the dynamic allocations (address == 0, size == 0) before static
206 	 * allocations at address 0x0 so that overlap detection works
207 	 * correctly.
208 	 */
209 	if (ra->size < rb->size)
210 		return -1;
211 	if (ra->size > rb->size)
212 		return 1;
213 
214 	return 0;
215 }
216 
217 static void __init __rmem_check_for_overlap(void)
218 {
219 	int i;
220 
221 	if (reserved_mem_count < 2)
222 		return;
223 
224 	sort(reserved_mem, reserved_mem_count, sizeof(reserved_mem[0]),
225 	     __rmem_cmp, NULL);
226 	for (i = 0; i < reserved_mem_count - 1; i++) {
227 		struct reserved_mem *this, *next;
228 
229 		this = &reserved_mem[i];
230 		next = &reserved_mem[i + 1];
231 
232 		if (this->base + this->size > next->base) {
233 			phys_addr_t this_end, next_end;
234 
235 			this_end = this->base + this->size;
236 			next_end = next->base + next->size;
237 			pr_err("OVERLAP DETECTED!\n%s (%pa--%pa) overlaps with %s (%pa--%pa)\n",
238 			       this->name, &this->base, &this_end,
239 			       next->name, &next->base, &next_end);
240 		}
241 	}
242 }
243 
244 /**
245  * fdt_scan_reserved_mem_late() - Scan FDT and initialize remaining reserved
246  * memory regions.
247  *
248  * This function is used to scan again through the DT and initialize the
249  * "static" reserved memory regions, that are defined using the "reg"
250  * property. Each such region is then initialized with its specific init
251  * function and stored in the global reserved_mem array.
252  */
253 void __init fdt_scan_reserved_mem_late(void)
254 {
255 	const void *fdt = initial_boot_params;
256 	phys_addr_t base, size;
257 	int node, child;
258 
259 	if (!fdt)
260 		return;
261 
262 	node = fdt_path_offset(fdt, "/reserved-memory");
263 	if (node < 0) {
264 		pr_info("Reserved memory: No reserved-memory node in the DT\n");
265 		return;
266 	}
267 
268 	/* Attempt dynamic allocation of a new reserved_mem array */
269 	alloc_reserved_mem_array();
270 
271 	if (__reserved_mem_check_root(node)) {
272 		pr_err("Reserved memory: unsupported node format, ignoring\n");
273 		return;
274 	}
275 
276 	fdt_for_each_subnode(child, fdt, node) {
277 		const char *uname;
278 		u64 b, s;
279 		int ret;
280 
281 		if (!of_fdt_device_is_available(fdt, child))
282 			continue;
283 
284 		if (!of_flat_dt_get_addr_size(child, "reg", &b, &s))
285 			continue;
286 
287 		ret = fdt_validate_reserved_mem_node(child, NULL);
288 		if (ret && ret != -ENODEV)
289 			continue;
290 
291 		base = b;
292 		size = s;
293 
294 		if (size) {
295 			uname = fdt_get_name(fdt, child, NULL);
296 			fdt_init_reserved_mem_node(child, uname, base, size);
297 		}
298 	}
299 
300 	/* check for overlapping reserved regions */
301 	__rmem_check_for_overlap();
302 }
303 
304 static int __init __reserved_mem_alloc_size(unsigned long node, const char *uname);
305 
306 /*
307  * fdt_scan_reserved_mem() - reserve and allocate memory occupied by
308  * reserved memory regions.
309  *
310  * This function is used to scan through the FDT and mark memory occupied
311  * by all static (defined by the "reg" property) reserved memory regions.
312  * Then memory for all dynamic regions (defined by size & alignment) is
313  * allocated, a region specific init function is called and region information
314  * is stored in the reserved_mem array.
315  */
316 int __init fdt_scan_reserved_mem(void)
317 {
318 	int node, child;
319 	int dynamic_nodes_cnt = 0, count = 0;
320 	int dynamic_nodes[MAX_RESERVED_REGIONS];
321 	const void *fdt = initial_boot_params;
322 
323 	node = fdt_path_offset(fdt, "/reserved-memory");
324 	if (node < 0)
325 		return -ENODEV;
326 
327 	if (__reserved_mem_check_root(node) != 0) {
328 		pr_err("Reserved memory: unsupported node format, ignoring\n");
329 		return -EINVAL;
330 	}
331 
332 	fdt_for_each_subnode(child, fdt, node) {
333 		const char *uname;
334 		int err;
335 
336 		if (!of_fdt_device_is_available(fdt, child))
337 			continue;
338 
339 		uname = fdt_get_name(fdt, child, NULL);
340 
341 		err = __reserved_mem_reserve_reg(child, uname);
342 		if (!err)
343 			count++;
344 		/*
345 		 * Save the nodes for the dynamically-placed regions
346 		 * into an array which will be used for allocation right
347 		 * after all the statically-placed regions are reserved
348 		 * or marked as no-map. This is done to avoid dynamically
349 		 * allocating from one of the statically-placed regions.
350 		 */
351 		if (err == -ENOENT && of_get_flat_dt_prop(child, "size", NULL)) {
352 			dynamic_nodes[dynamic_nodes_cnt] = child;
353 			dynamic_nodes_cnt++;
354 		}
355 	}
356 	for (int i = 0; i < dynamic_nodes_cnt; i++) {
357 		const char *uname;
358 		int err;
359 
360 		child = dynamic_nodes[i];
361 		uname = fdt_get_name(fdt, child, NULL);
362 		err = __reserved_mem_alloc_size(child, uname);
363 		if (!err)
364 			count++;
365 	}
366 	total_reserved_mem_cnt = count;
367 	return 0;
368 }
369 
370 /*
371  * __reserved_mem_alloc_in_range() - allocate reserved memory described with
372  *	'alloc-ranges'. Choose bottom-up/top-down depending on nearby existing
373  *	reserved regions to keep the reserved memory contiguous if possible.
374  */
375 static int __init __reserved_mem_alloc_in_range(phys_addr_t size,
376 	phys_addr_t align, phys_addr_t start, phys_addr_t end, bool nomap,
377 	phys_addr_t *res_base)
378 {
379 	bool prev_bottom_up = memblock_bottom_up();
380 	bool bottom_up = false, top_down = false;
381 	int ret, i;
382 
383 	for (i = 0; i < reserved_mem_count; i++) {
384 		struct reserved_mem *rmem = &reserved_mem[i];
385 
386 		/* Skip regions that were not reserved yet */
387 		if (rmem->size == 0)
388 			continue;
389 
390 		/*
391 		 * If range starts next to an existing reservation, use bottom-up:
392 		 *	|....RRRR................RRRRRRRR..............|
393 		 *	       --RRRR------
394 		 */
395 		if (start >= rmem->base && start <= (rmem->base + rmem->size))
396 			bottom_up = true;
397 
398 		/*
399 		 * If range ends next to an existing reservation, use top-down:
400 		 *	|....RRRR................RRRRRRRR..............|
401 		 *	              -------RRRR-----
402 		 */
403 		if (end >= rmem->base && end <= (rmem->base + rmem->size))
404 			top_down = true;
405 	}
406 
407 	/* Change setting only if either bottom-up or top-down was selected */
408 	if (bottom_up != top_down)
409 		memblock_set_bottom_up(bottom_up);
410 
411 	ret = early_init_dt_alloc_reserved_memory_arch(size, align,
412 			start, end, nomap, res_base);
413 
414 	/* Restore old setting if needed */
415 	if (bottom_up != top_down)
416 		memblock_set_bottom_up(prev_bottom_up);
417 
418 	return ret;
419 }
420 
421 /*
422  * __reserved_mem_alloc_size() - allocate reserved memory described by
423  *	'size', 'alignment'  and 'alloc-ranges' properties.
424  */
425 static int __init __reserved_mem_alloc_size(unsigned long node, const char *uname)
426 {
427 	phys_addr_t start = 0, end = 0;
428 	phys_addr_t base = 0, align = 0, size;
429 	int i, len;
430 	const __be32 *prop;
431 	bool nomap;
432 	int ret;
433 
434 	prop = of_get_flat_dt_prop(node, "size", &len);
435 	if (!prop)
436 		return -EINVAL;
437 
438 	if (len != dt_root_size_cells * sizeof(__be32)) {
439 		pr_err("invalid size property in '%s' node.\n", uname);
440 		return -EINVAL;
441 	}
442 	size = dt_mem_next_cell(dt_root_size_cells, &prop);
443 
444 	prop = of_get_flat_dt_prop(node, "alignment", &len);
445 	if (prop) {
446 		if (len != dt_root_addr_cells * sizeof(__be32)) {
447 			pr_err("invalid alignment property in '%s' node.\n",
448 				uname);
449 			return -EINVAL;
450 		}
451 		align = dt_mem_next_cell(dt_root_addr_cells, &prop);
452 	}
453 
454 	nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
455 
456 	ret = fdt_validate_reserved_mem_node(node, &align);
457 	if (ret && ret != -ENODEV)
458 		return ret;
459 
460 	prop = of_flat_dt_get_addr_size_prop(node, "alloc-ranges", &len);
461 	if (prop) {
462 		for (i = 0; i < len; i++) {
463 			u64 b, s;
464 
465 			of_flat_dt_read_addr_size(prop, i, &b, &s);
466 
467 			start = b;
468 			end = b + s;
469 
470 			base = 0;
471 			ret = __reserved_mem_alloc_in_range(size, align,
472 					start, end, nomap, &base);
473 			if (ret == 0) {
474 				pr_debug("allocated memory for '%s' node: base %pa, size %lu MiB\n",
475 					uname, &base,
476 					(unsigned long)(size / SZ_1M));
477 				break;
478 			}
479 		}
480 	} else {
481 		ret = early_init_dt_alloc_reserved_memory_arch(size, align,
482 							0, 0, nomap, &base);
483 		if (ret == 0)
484 			pr_debug("allocated memory for '%s' node: base %pa, size %lu MiB\n",
485 				uname, &base, (unsigned long)(size / SZ_1M));
486 	}
487 
488 	if (base == 0) {
489 		pr_err("failed to allocate memory for node '%s': size %lu MiB\n",
490 		       uname, (unsigned long)(size / SZ_1M));
491 		return -ENOMEM;
492 	}
493 
494 	fdt_fixup_reserved_mem_node(node, base, size);
495 	fdt_init_reserved_mem_node(node, uname, base, size);
496 
497 	return 0;
498 }
499 
500 extern const struct of_device_id __reservedmem_of_table[];
501 static const struct of_device_id __rmem_of_table_sentinel
502 	__used __section("__reservedmem_of_table_end");
503 
504 /**
505  * fdt_fixup_reserved_mem_node() - call fixup function for a reserved memory node
506  * @node: FDT node to fixup
507  * @base: base address of the reserved memory region
508  * @size: size of the reserved memory region
509  *
510  * This function iterates through the reserved memory drivers and calls
511  * the node_fixup callback for the compatible entry matching the node.
512  *
513  * Return: 0 on success, -ENODEV if no compatible match found
514  */
515 static int __init fdt_fixup_reserved_mem_node(unsigned long node,
516 					phys_addr_t base, phys_addr_t size)
517 {
518 	const struct of_device_id *i;
519 	int ret = -ENODEV;
520 
521 	for (i = __reservedmem_of_table; ret == -ENODEV &&
522 	     i < &__rmem_of_table_sentinel; i++) {
523 		const struct reserved_mem_ops *ops = i->data;
524 
525 		if (!of_flat_dt_is_compatible(node, i->compatible))
526 			continue;
527 
528 		if (ops->node_fixup)
529 			ret = ops->node_fixup(node, base, size);
530 	}
531 	return ret;
532 }
533 
534 /**
535  * fdt_validate_reserved_mem_node() - validate a reserved memory node
536  * @node: FDT node to validate
537  * @align: pointer to store the validated alignment (may be modified by callback)
538  *
539  * This function iterates through the reserved memory drivers and calls
540  * the node_validate callback for the compatible entry matching the node.
541  *
542  * Return: 0 on success, -ENODEV if no compatible match found
543  */
544 static int __init fdt_validate_reserved_mem_node(unsigned long node, phys_addr_t *align)
545 {
546 	const struct of_device_id *i;
547 	int ret = -ENODEV;
548 
549 	for (i = __reservedmem_of_table; ret == -ENODEV &&
550 	     i < &__rmem_of_table_sentinel; i++) {
551 		const struct reserved_mem_ops *ops = i->data;
552 
553 		if (!of_flat_dt_is_compatible(node, i->compatible))
554 			continue;
555 
556 		if (ops->node_validate)
557 			ret = ops->node_validate(node, align);
558 	}
559 	return ret;
560 }
561 
562 /**
563  * __reserved_mem_init_node() - initialize a reserved memory region
564  * @rmem: reserved_mem structure to initialize
565  * @node: FDT node describing the reserved memory region
566  *
567  * This function iterates through the reserved memory drivers and calls the
568  * node_init callback for the compatible entry matching the node. On success,
569  * the operations pointer is stored in the reserved_mem structure.
570  *
571  * Return: 0 on success, -ENODEV if no compatible match found
572  */
573 static int __init __reserved_mem_init_node(struct reserved_mem *rmem,
574 					   unsigned long node)
575 {
576 	const struct of_device_id *i;
577 	int ret = -ENODEV;
578 
579 	for (i = __reservedmem_of_table; ret == -ENODEV &&
580 	     i < &__rmem_of_table_sentinel; i++) {
581 		const struct reserved_mem_ops *ops = i->data;
582 		const char *compat = i->compatible;
583 
584 		if (!of_flat_dt_is_compatible(node, compat))
585 			continue;
586 
587 		ret = ops->node_init(node, rmem);
588 		if (ret == 0) {
589 			rmem->ops = ops;
590 			pr_info("initialized node %s, compatible id %s\n",
591 				rmem->name, compat);
592 			return ret;
593 		}
594 	}
595 	return ret;
596 }
597 
598 /**
599  * fdt_init_reserved_mem_node() - Initialize a reserved memory region
600  * @node: fdt node of the initialized region
601  * @uname: name of the reserved memory node
602  * @base: base address of the reserved memory region
603  * @size: size of the reserved memory region
604  *
605  * This function calls the region-specific initialization function for a
606  * reserved memory region and saves all region-specific data to the
607  * reserved_mem array to allow of_reserved_mem_lookup() to find it.
608  */
609 static void __init fdt_init_reserved_mem_node(unsigned long node, const char *uname,
610 					      phys_addr_t base, phys_addr_t size)
611 {
612 	int err = 0;
613 	bool nomap;
614 
615 	struct reserved_mem *rmem = &reserved_mem[reserved_mem_count];
616 
617 	if (reserved_mem_count == total_reserved_mem_cnt) {
618 		pr_err("not enough space for all defined regions.\n");
619 		return;
620 	}
621 
622 	rmem->name = uname;
623 	rmem->base = base;
624 	rmem->size = size;
625 
626 	nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
627 
628 	err = __reserved_mem_init_node(rmem, node);
629 	if (err != 0 && err != -ENODEV) {
630 		pr_info("node %s compatible matching fail\n", rmem->name);
631 		rmem->name = NULL;
632 
633 		if (nomap)
634 			memblock_clear_nomap(rmem->base, rmem->size);
635 		else
636 			memblock_phys_free(rmem->base, rmem->size);
637 		return;
638 	} else {
639 		phys_addr_t end = rmem->base + rmem->size - 1;
640 		bool reusable =
641 			(of_get_flat_dt_prop(node, "reusable", NULL)) != NULL;
642 
643 		pr_info("%pa..%pa (%lu KiB) %s %s %s\n",
644 			&rmem->base, &end, (unsigned long)(rmem->size / SZ_1K),
645 			nomap ? "nomap" : "map",
646 			reusable ? "reusable" : "non-reusable",
647 			rmem->name ? rmem->name : "unknown");
648 	}
649 
650 	reserved_mem_count++;
651 }
652 
653 struct rmem_assigned_device {
654 	struct device *dev;
655 	struct reserved_mem *rmem;
656 	struct list_head list;
657 };
658 
659 static LIST_HEAD(of_rmem_assigned_device_list);
660 static DEFINE_MUTEX(of_rmem_assigned_device_mutex);
661 
662 /**
663  * of_reserved_mem_device_init_by_idx() - assign reserved memory region to
664  *					  given device
665  * @dev:	Pointer to the device to configure
666  * @np:		Pointer to the device_node with 'reserved-memory' property
667  * @idx:	Index of selected region
668  *
669  * This function assigns respective DMA-mapping operations based on reserved
670  * memory region specified by 'memory-region' property in @np node to the @dev
671  * device. When driver needs to use more than one reserved memory region, it
672  * should allocate child devices and initialize regions by name for each of
673  * child device.
674  *
675  * Returns error code or zero on success.
676  */
677 int of_reserved_mem_device_init_by_idx(struct device *dev,
678 				       struct device_node *np, int idx)
679 {
680 	struct rmem_assigned_device *rd;
681 	struct device_node *target;
682 	struct reserved_mem *rmem;
683 	int ret;
684 
685 	if (!np || !dev)
686 		return -EINVAL;
687 
688 	target = of_parse_phandle(np, "memory-region", idx);
689 	if (!target)
690 		return -ENODEV;
691 
692 	if (!of_device_is_available(target)) {
693 		of_node_put(target);
694 		return 0;
695 	}
696 
697 	rmem = of_reserved_mem_lookup(target);
698 	of_node_put(target);
699 
700 	if (!rmem || !rmem->ops || !rmem->ops->device_init)
701 		return -EINVAL;
702 
703 	rd = kmalloc_obj(struct rmem_assigned_device);
704 	if (!rd)
705 		return -ENOMEM;
706 
707 	ret = rmem->ops->device_init(rmem, dev);
708 	if (ret == 0) {
709 		rd->dev = dev;
710 		rd->rmem = rmem;
711 
712 		mutex_lock(&of_rmem_assigned_device_mutex);
713 		list_add(&rd->list, &of_rmem_assigned_device_list);
714 		mutex_unlock(&of_rmem_assigned_device_mutex);
715 
716 		dev_info(dev, "assigned reserved memory node %s\n", rmem->name);
717 	} else {
718 		kfree(rd);
719 	}
720 
721 	return ret;
722 }
723 EXPORT_SYMBOL_GPL(of_reserved_mem_device_init_by_idx);
724 
725 /**
726  * of_reserved_mem_device_init_by_name() - assign named reserved memory region
727  *					   to given device
728  * @dev: pointer to the device to configure
729  * @np: pointer to the device node with 'memory-region' property
730  * @name: name of the selected memory region
731  *
732  * Returns: 0 on success or a negative error-code on failure.
733  */
734 int of_reserved_mem_device_init_by_name(struct device *dev,
735 					struct device_node *np,
736 					const char *name)
737 {
738 	int idx = of_property_match_string(np, "memory-region-names", name);
739 
740 	return of_reserved_mem_device_init_by_idx(dev, np, idx);
741 }
742 EXPORT_SYMBOL_GPL(of_reserved_mem_device_init_by_name);
743 
744 /**
745  * of_reserved_mem_device_release() - release reserved memory device structures
746  * @dev:	Pointer to the device to deconfigure
747  *
748  * This function releases structures allocated for memory region handling for
749  * the given device.
750  */
751 void of_reserved_mem_device_release(struct device *dev)
752 {
753 	struct rmem_assigned_device *rd, *tmp;
754 	LIST_HEAD(release_list);
755 
756 	mutex_lock(&of_rmem_assigned_device_mutex);
757 	list_for_each_entry_safe(rd, tmp, &of_rmem_assigned_device_list, list) {
758 		if (rd->dev == dev)
759 			list_move_tail(&rd->list, &release_list);
760 	}
761 	mutex_unlock(&of_rmem_assigned_device_mutex);
762 
763 	list_for_each_entry_safe(rd, tmp, &release_list, list) {
764 		if (rd->rmem && rd->rmem->ops && rd->rmem->ops->device_release)
765 			rd->rmem->ops->device_release(rd->rmem, dev);
766 
767 		kfree(rd);
768 	}
769 }
770 EXPORT_SYMBOL_GPL(of_reserved_mem_device_release);
771 
772 /**
773  * of_reserved_mem_lookup() - acquire reserved_mem from a device node
774  * @np:		node pointer of the desired reserved-memory region
775  *
776  * This function allows drivers to acquire a reference to the reserved_mem
777  * struct based on a device node handle.
778  *
779  * Returns a reserved_mem reference, or NULL on error.
780  */
781 struct reserved_mem *of_reserved_mem_lookup(struct device_node *np)
782 {
783 	const char *name;
784 	int i;
785 
786 	if (!np->full_name)
787 		return NULL;
788 
789 	name = kbasename(np->full_name);
790 	for (i = 0; i < reserved_mem_count; i++)
791 		if (!strcmp(reserved_mem[i].name, name))
792 			return &reserved_mem[i];
793 
794 	return NULL;
795 }
796 EXPORT_SYMBOL_GPL(of_reserved_mem_lookup);
797 
798 /**
799  * of_reserved_mem_region_to_resource() - Get a reserved memory region as a resource
800  * @np:		node containing 'memory-region' property
801  * @idx:	index of 'memory-region' property to lookup
802  * @res:	Pointer to a struct resource to fill in with reserved region
803  *
804  * This function allows drivers to lookup a node's 'memory-region' property
805  * entries by index and return a struct resource for the entry.
806  *
807  * Returns 0 on success with @res filled in. Returns -ENODEV if 'memory-region'
808  * is missing or unavailable, -EINVAL for any other error.
809  */
810 int of_reserved_mem_region_to_resource(const struct device_node *np,
811 				       unsigned int idx, struct resource *res)
812 {
813 	struct reserved_mem *rmem;
814 
815 	if (!np)
816 		return -EINVAL;
817 
818 	struct device_node *target __free(device_node) = of_parse_phandle(np, "memory-region", idx);
819 	if (!target || !of_device_is_available(target))
820 		return -ENODEV;
821 
822 	rmem = of_reserved_mem_lookup(target);
823 	if (!rmem)
824 		return -EINVAL;
825 
826 	resource_set_range(res, rmem->base, rmem->size);
827 	res->flags = IORESOURCE_MEM;
828 	res->name = rmem->name;
829 	return 0;
830 }
831 EXPORT_SYMBOL_GPL(of_reserved_mem_region_to_resource);
832 
833 /**
834  * of_reserved_mem_region_to_resource_byname() - Get a reserved memory region as a resource
835  * @np:		node containing 'memory-region' property
836  * @name:	name of 'memory-region' property entry to lookup
837  * @res:	Pointer to a struct resource to fill in with reserved region
838  *
839  * This function allows drivers to lookup a node's 'memory-region' property
840  * entries by name and return a struct resource for the entry.
841  *
842  * Returns 0 on success with @res filled in, or a negative error-code on
843  * failure.
844  */
845 int of_reserved_mem_region_to_resource_byname(const struct device_node *np,
846 					      const char *name,
847 					      struct resource *res)
848 {
849 	int idx;
850 
851 	if (!name)
852 		return -EINVAL;
853 
854 	idx = of_property_match_string(np, "memory-region-names", name);
855 	if (idx < 0)
856 		return idx;
857 
858 	return of_reserved_mem_region_to_resource(np, idx, res);
859 }
860 EXPORT_SYMBOL_GPL(of_reserved_mem_region_to_resource_byname);
861 
862 /**
863  * of_reserved_mem_region_count() - Return the number of 'memory-region' entries
864  * @np:		node containing 'memory-region' property
865  *
866  * This function allows drivers to retrieve the number of entries for a node's
867  * 'memory-region' property.
868  *
869  * Returns the number of entries on success, or negative error code on a
870  * malformed property.
871  */
872 int of_reserved_mem_region_count(const struct device_node *np)
873 {
874 	return of_count_phandle_with_args(np, "memory-region", NULL);
875 }
876 EXPORT_SYMBOL_GPL(of_reserved_mem_region_count);
877