xref: /linux/drivers/of/of_reserved_mem.c (revision 2b54ac9e0cf8986e138736840f20d537db22ce79)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Device tree based initialization code for reserved memory.
4  *
5  * Copyright (c) 2013, 2015 The Linux Foundation. All Rights Reserved.
6  * Copyright (c) 2013,2014 Samsung Electronics Co., Ltd.
7  *		http://www.samsung.com
8  * Author: Marek Szyprowski <m.szyprowski@samsung.com>
9  * Author: Josh Cartwright <joshc@codeaurora.org>
10  */
11 
12 #define pr_fmt(fmt)	"OF: reserved mem: " fmt
13 
14 #include <linux/err.h>
15 #include <linux/ioport.h>
16 #include <linux/libfdt.h>
17 #include <linux/of.h>
18 #include <linux/of_fdt.h>
19 #include <linux/of_platform.h>
20 #include <linux/mm.h>
21 #include <linux/sizes.h>
22 #include <linux/of_reserved_mem.h>
23 #include <linux/sort.h>
24 #include <linux/slab.h>
25 #include <linux/memblock.h>
26 #include <linux/kmemleak.h>
27 #include <linux/cma.h>
28 #include <linux/dma-map-ops.h>
29 
30 #include "of_private.h"
31 
32 static struct reserved_mem reserved_mem_array[MAX_RESERVED_REGIONS] __initdata;
33 static struct reserved_mem *reserved_mem __refdata = reserved_mem_array;
34 static int total_reserved_mem_cnt = MAX_RESERVED_REGIONS;
35 static int reserved_mem_count;
36 
early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,phys_addr_t align,phys_addr_t start,phys_addr_t end,bool nomap,phys_addr_t * res_base)37 static int __init early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
38 	phys_addr_t align, phys_addr_t start, phys_addr_t end, bool nomap,
39 	phys_addr_t *res_base)
40 {
41 	phys_addr_t base;
42 	int err = 0;
43 
44 	end = !end ? MEMBLOCK_ALLOC_ANYWHERE : end;
45 	align = !align ? SMP_CACHE_BYTES : align;
46 	base = memblock_phys_alloc_range(size, align, start, end);
47 	if (!base)
48 		return -ENOMEM;
49 
50 	*res_base = base;
51 	if (nomap) {
52 		err = memblock_mark_nomap(base, size);
53 		if (err)
54 			memblock_phys_free(base, size);
55 	}
56 
57 	if (!err)
58 		kmemleak_ignore_phys(base);
59 
60 	return err;
61 }
62 
63 /*
64  * alloc_reserved_mem_array() - allocate memory for the reserved_mem
65  * array using memblock
66  *
67  * This function is used to allocate memory for the reserved_mem
68  * array according to the total number of reserved memory regions
69  * defined in the DT.
70  * After the new array is allocated, the information stored in
71  * the initial static array is copied over to this new array and
72  * the new array is used from this point on.
73  */
alloc_reserved_mem_array(void)74 static void __init alloc_reserved_mem_array(void)
75 {
76 	struct reserved_mem *new_array;
77 	size_t alloc_size, copy_size, memset_size;
78 
79 	alloc_size = array_size(total_reserved_mem_cnt, sizeof(*new_array));
80 	if (alloc_size == SIZE_MAX) {
81 		pr_err("Failed to allocate memory for reserved_mem array with err: %d", -EOVERFLOW);
82 		return;
83 	}
84 
85 	new_array = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
86 	if (!new_array) {
87 		pr_err("Failed to allocate memory for reserved_mem array with err: %d", -ENOMEM);
88 		return;
89 	}
90 
91 	copy_size = array_size(reserved_mem_count, sizeof(*new_array));
92 	if (copy_size == SIZE_MAX) {
93 		memblock_free(new_array, alloc_size);
94 		total_reserved_mem_cnt = MAX_RESERVED_REGIONS;
95 		pr_err("Failed to allocate memory for reserved_mem array with err: %d", -EOVERFLOW);
96 		return;
97 	}
98 
99 	memset_size = alloc_size - copy_size;
100 
101 	memcpy(new_array, reserved_mem, copy_size);
102 	memset(new_array + reserved_mem_count, 0, memset_size);
103 
104 	reserved_mem = new_array;
105 }
106 
107 static void __init fdt_init_reserved_mem_node(struct reserved_mem *rmem);
108 /*
109  * fdt_reserved_mem_save_node() - save fdt node for second pass initialization
110  */
fdt_reserved_mem_save_node(unsigned long node,const char * uname,phys_addr_t base,phys_addr_t size)111 static void __init fdt_reserved_mem_save_node(unsigned long node, const char *uname,
112 					      phys_addr_t base, phys_addr_t size)
113 {
114 	struct reserved_mem *rmem = &reserved_mem[reserved_mem_count];
115 
116 	if (reserved_mem_count == total_reserved_mem_cnt) {
117 		pr_err("not enough space for all defined regions.\n");
118 		return;
119 	}
120 
121 	rmem->fdt_node = node;
122 	rmem->name = uname;
123 	rmem->base = base;
124 	rmem->size = size;
125 
126 	/* Call the region specific initialization function */
127 	fdt_init_reserved_mem_node(rmem);
128 
129 	reserved_mem_count++;
130 	return;
131 }
132 
early_init_dt_reserve_memory(phys_addr_t base,phys_addr_t size,bool nomap)133 static int __init early_init_dt_reserve_memory(phys_addr_t base,
134 					       phys_addr_t size, bool nomap)
135 {
136 	if (nomap) {
137 		/*
138 		 * If the memory is already reserved (by another region), we
139 		 * should not allow it to be marked nomap, but don't worry
140 		 * if the region isn't memory as it won't be mapped.
141 		 */
142 		if (memblock_overlaps_region(&memblock.memory, base, size) &&
143 		    memblock_is_region_reserved(base, size))
144 			return -EBUSY;
145 
146 		return memblock_mark_nomap(base, size);
147 	}
148 	return memblock_reserve(base, size);
149 }
150 
151 /*
152  * __reserved_mem_reserve_reg() - reserve all memory described in 'reg' property
153  */
__reserved_mem_reserve_reg(unsigned long node,const char * uname)154 static int __init __reserved_mem_reserve_reg(unsigned long node,
155 					     const char *uname)
156 {
157 	phys_addr_t base, size;
158 	int i, len;
159 	const __be32 *prop;
160 	bool nomap, default_cma;
161 
162 	prop = of_flat_dt_get_addr_size_prop(node, "reg", &len);
163 	if (!prop)
164 		return -ENOENT;
165 
166 	nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
167 	default_cma = of_get_flat_dt_prop(node, "linux,cma-default", NULL);
168 
169 	if (default_cma && cma_skip_dt_default_reserved_mem()) {
170 		pr_err("Skipping dt linux,cma-default for \"cma=\" kernel param.\n");
171 		return -EINVAL;
172 	}
173 
174 	for (i = 0; i < len; i++) {
175 		u64 b, s;
176 
177 		of_flat_dt_read_addr_size(prop, i, &b, &s);
178 
179 		base = b;
180 		size = s;
181 
182 		if (size && early_init_dt_reserve_memory(base, size, nomap) == 0) {
183 			/* Architecture specific contiguous memory fixup. */
184 			if (of_flat_dt_is_compatible(node, "shared-dma-pool") &&
185 			    of_get_flat_dt_prop(node, "reusable", NULL))
186 				dma_contiguous_early_fixup(base, size);
187 			pr_debug("Reserved memory: reserved region for node '%s': base %pa, size %lu MiB\n",
188 				uname, &base, (unsigned long)(size / SZ_1M));
189 		} else {
190 			pr_err("Reserved memory: failed to reserve memory for node '%s': base %pa, size %lu MiB\n",
191 			       uname, &base, (unsigned long)(size / SZ_1M));
192 		}
193 	}
194 	return 0;
195 }
196 
197 /*
198  * __reserved_mem_check_root() - check if #size-cells, #address-cells provided
199  * in /reserved-memory matches the values supported by the current implementation,
200  * also check if ranges property has been provided
201  */
__reserved_mem_check_root(unsigned long node)202 static int __init __reserved_mem_check_root(unsigned long node)
203 {
204 	const __be32 *prop;
205 
206 	prop = of_get_flat_dt_prop(node, "#size-cells", NULL);
207 	if (!prop || be32_to_cpup(prop) != dt_root_size_cells)
208 		return -EINVAL;
209 
210 	prop = of_get_flat_dt_prop(node, "#address-cells", NULL);
211 	if (!prop || be32_to_cpup(prop) != dt_root_addr_cells)
212 		return -EINVAL;
213 
214 	prop = of_get_flat_dt_prop(node, "ranges", NULL);
215 	if (!prop)
216 		return -EINVAL;
217 	return 0;
218 }
219 
220 static void __init __rmem_check_for_overlap(void);
221 
222 /**
223  * fdt_scan_reserved_mem_reg_nodes() - Store info for the "reg" defined
224  * reserved memory regions.
225  *
226  * This function is used to scan through the DT and store the
227  * information for the reserved memory regions that are defined using
228  * the "reg" property. The region node number, name, base address, and
229  * size are all stored in the reserved_mem array by calling the
230  * fdt_reserved_mem_save_node() function.
231  */
fdt_scan_reserved_mem_reg_nodes(void)232 void __init fdt_scan_reserved_mem_reg_nodes(void)
233 {
234 	const void *fdt = initial_boot_params;
235 	phys_addr_t base, size;
236 	int node, child;
237 
238 	if (!fdt)
239 		return;
240 
241 	node = fdt_path_offset(fdt, "/reserved-memory");
242 	if (node < 0) {
243 		pr_info("Reserved memory: No reserved-memory node in the DT\n");
244 		return;
245 	}
246 
247 	/* Attempt dynamic allocation of a new reserved_mem array */
248 	alloc_reserved_mem_array();
249 
250 	if (__reserved_mem_check_root(node)) {
251 		pr_err("Reserved memory: unsupported node format, ignoring\n");
252 		return;
253 	}
254 
255 	fdt_for_each_subnode(child, fdt, node) {
256 		const char *uname;
257 		bool default_cma = of_get_flat_dt_prop(child, "linux,cma-default", NULL);
258 		u64 b, s;
259 
260 		if (!of_fdt_device_is_available(fdt, child))
261 			continue;
262 		if (default_cma && cma_skip_dt_default_reserved_mem())
263 			continue;
264 
265 		if (!of_flat_dt_get_addr_size(child, "reg", &b, &s))
266 			continue;
267 
268 		base = b;
269 		size = s;
270 
271 		if (size) {
272 			uname = fdt_get_name(fdt, child, NULL);
273 			fdt_reserved_mem_save_node(child, uname, base, size);
274 		}
275 	}
276 
277 	/* check for overlapping reserved regions */
278 	__rmem_check_for_overlap();
279 }
280 
281 static int __init __reserved_mem_alloc_size(unsigned long node, const char *uname);
282 
283 /*
284  * fdt_scan_reserved_mem() - scan a single FDT node for reserved memory
285  */
fdt_scan_reserved_mem(void)286 int __init fdt_scan_reserved_mem(void)
287 {
288 	int node, child;
289 	int dynamic_nodes_cnt = 0, count = 0;
290 	int dynamic_nodes[MAX_RESERVED_REGIONS];
291 	const void *fdt = initial_boot_params;
292 
293 	node = fdt_path_offset(fdt, "/reserved-memory");
294 	if (node < 0)
295 		return -ENODEV;
296 
297 	if (__reserved_mem_check_root(node) != 0) {
298 		pr_err("Reserved memory: unsupported node format, ignoring\n");
299 		return -EINVAL;
300 	}
301 
302 	fdt_for_each_subnode(child, fdt, node) {
303 		const char *uname;
304 		int err;
305 
306 		if (!of_fdt_device_is_available(fdt, child))
307 			continue;
308 
309 		uname = fdt_get_name(fdt, child, NULL);
310 
311 		err = __reserved_mem_reserve_reg(child, uname);
312 		if (!err)
313 			count++;
314 		/*
315 		 * Save the nodes for the dynamically-placed regions
316 		 * into an array which will be used for allocation right
317 		 * after all the statically-placed regions are reserved
318 		 * or marked as no-map. This is done to avoid dynamically
319 		 * allocating from one of the statically-placed regions.
320 		 */
321 		if (err == -ENOENT && of_get_flat_dt_prop(child, "size", NULL)) {
322 			dynamic_nodes[dynamic_nodes_cnt] = child;
323 			dynamic_nodes_cnt++;
324 		}
325 	}
326 	for (int i = 0; i < dynamic_nodes_cnt; i++) {
327 		const char *uname;
328 		int err;
329 
330 		child = dynamic_nodes[i];
331 		uname = fdt_get_name(fdt, child, NULL);
332 		err = __reserved_mem_alloc_size(child, uname);
333 		if (!err)
334 			count++;
335 	}
336 	total_reserved_mem_cnt = count;
337 	return 0;
338 }
339 
340 /*
341  * __reserved_mem_alloc_in_range() - allocate reserved memory described with
342  *	'alloc-ranges'. Choose bottom-up/top-down depending on nearby existing
343  *	reserved regions to keep the reserved memory contiguous if possible.
344  */
__reserved_mem_alloc_in_range(phys_addr_t size,phys_addr_t align,phys_addr_t start,phys_addr_t end,bool nomap,phys_addr_t * res_base)345 static int __init __reserved_mem_alloc_in_range(phys_addr_t size,
346 	phys_addr_t align, phys_addr_t start, phys_addr_t end, bool nomap,
347 	phys_addr_t *res_base)
348 {
349 	bool prev_bottom_up = memblock_bottom_up();
350 	bool bottom_up = false, top_down = false;
351 	int ret, i;
352 
353 	for (i = 0; i < reserved_mem_count; i++) {
354 		struct reserved_mem *rmem = &reserved_mem[i];
355 
356 		/* Skip regions that were not reserved yet */
357 		if (rmem->size == 0)
358 			continue;
359 
360 		/*
361 		 * If range starts next to an existing reservation, use bottom-up:
362 		 *	|....RRRR................RRRRRRRR..............|
363 		 *	       --RRRR------
364 		 */
365 		if (start >= rmem->base && start <= (rmem->base + rmem->size))
366 			bottom_up = true;
367 
368 		/*
369 		 * If range ends next to an existing reservation, use top-down:
370 		 *	|....RRRR................RRRRRRRR..............|
371 		 *	              -------RRRR-----
372 		 */
373 		if (end >= rmem->base && end <= (rmem->base + rmem->size))
374 			top_down = true;
375 	}
376 
377 	/* Change setting only if either bottom-up or top-down was selected */
378 	if (bottom_up != top_down)
379 		memblock_set_bottom_up(bottom_up);
380 
381 	ret = early_init_dt_alloc_reserved_memory_arch(size, align,
382 			start, end, nomap, res_base);
383 
384 	/* Restore old setting if needed */
385 	if (bottom_up != top_down)
386 		memblock_set_bottom_up(prev_bottom_up);
387 
388 	return ret;
389 }
390 
391 /*
392  * __reserved_mem_alloc_size() - allocate reserved memory described by
393  *	'size', 'alignment'  and 'alloc-ranges' properties.
394  */
__reserved_mem_alloc_size(unsigned long node,const char * uname)395 static int __init __reserved_mem_alloc_size(unsigned long node, const char *uname)
396 {
397 	phys_addr_t start = 0, end = 0;
398 	phys_addr_t base = 0, align = 0, size;
399 	int i, len;
400 	const __be32 *prop;
401 	bool nomap, default_cma;
402 	int ret;
403 
404 	prop = of_get_flat_dt_prop(node, "size", &len);
405 	if (!prop)
406 		return -EINVAL;
407 
408 	if (len != dt_root_size_cells * sizeof(__be32)) {
409 		pr_err("invalid size property in '%s' node.\n", uname);
410 		return -EINVAL;
411 	}
412 	size = dt_mem_next_cell(dt_root_size_cells, &prop);
413 
414 	prop = of_get_flat_dt_prop(node, "alignment", &len);
415 	if (prop) {
416 		if (len != dt_root_addr_cells * sizeof(__be32)) {
417 			pr_err("invalid alignment property in '%s' node.\n",
418 				uname);
419 			return -EINVAL;
420 		}
421 		align = dt_mem_next_cell(dt_root_addr_cells, &prop);
422 	}
423 
424 	nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
425 	default_cma = of_get_flat_dt_prop(node, "linux,cma-default", NULL);
426 
427 	if (default_cma && cma_skip_dt_default_reserved_mem()) {
428 		pr_err("Skipping dt linux,cma-default for \"cma=\" kernel param.\n");
429 		return -EINVAL;
430 	}
431 
432 	/* Need adjust the alignment to satisfy the CMA requirement */
433 	if (IS_ENABLED(CONFIG_CMA)
434 	    && of_flat_dt_is_compatible(node, "shared-dma-pool")
435 	    && of_get_flat_dt_prop(node, "reusable", NULL)
436 	    && !nomap)
437 		align = max_t(phys_addr_t, align, CMA_MIN_ALIGNMENT_BYTES);
438 
439 	prop = of_flat_dt_get_addr_size_prop(node, "alloc-ranges", &len);
440 	if (prop) {
441 		for (i = 0; i < len; i++) {
442 			u64 b, s;
443 
444 			of_flat_dt_read_addr_size(prop, i, &b, &s);
445 
446 			start = b;
447 			end = b + s;
448 
449 			base = 0;
450 			ret = __reserved_mem_alloc_in_range(size, align,
451 					start, end, nomap, &base);
452 			if (ret == 0) {
453 				pr_debug("allocated memory for '%s' node: base %pa, size %lu MiB\n",
454 					uname, &base,
455 					(unsigned long)(size / SZ_1M));
456 				break;
457 			}
458 		}
459 	} else {
460 		ret = early_init_dt_alloc_reserved_memory_arch(size, align,
461 							0, 0, nomap, &base);
462 		if (ret == 0)
463 			pr_debug("allocated memory for '%s' node: base %pa, size %lu MiB\n",
464 				uname, &base, (unsigned long)(size / SZ_1M));
465 	}
466 
467 	if (base == 0) {
468 		pr_err("failed to allocate memory for node '%s': size %lu MiB\n",
469 		       uname, (unsigned long)(size / SZ_1M));
470 		return -ENOMEM;
471 	}
472 	/* Architecture specific contiguous memory fixup. */
473 	if (of_flat_dt_is_compatible(node, "shared-dma-pool") &&
474 	    of_get_flat_dt_prop(node, "reusable", NULL))
475 		dma_contiguous_early_fixup(base, size);
476 	/* Save region in the reserved_mem array */
477 	fdt_reserved_mem_save_node(node, uname, base, size);
478 	return 0;
479 }
480 
481 static const struct of_device_id __rmem_of_table_sentinel
482 	__used __section("__reservedmem_of_table_end");
483 
484 /*
485  * __reserved_mem_init_node() - call region specific reserved memory init code
486  */
__reserved_mem_init_node(struct reserved_mem * rmem)487 static int __init __reserved_mem_init_node(struct reserved_mem *rmem)
488 {
489 	extern const struct of_device_id __reservedmem_of_table[];
490 	const struct of_device_id *i;
491 	int ret = -ENOENT;
492 
493 	for (i = __reservedmem_of_table; i < &__rmem_of_table_sentinel; i++) {
494 		reservedmem_of_init_fn initfn = i->data;
495 		const char *compat = i->compatible;
496 
497 		if (!of_flat_dt_is_compatible(rmem->fdt_node, compat))
498 			continue;
499 
500 		ret = initfn(rmem);
501 		if (ret == 0) {
502 			pr_info("initialized node %s, compatible id %s\n",
503 				rmem->name, compat);
504 			break;
505 		}
506 	}
507 	return ret;
508 }
509 
__rmem_cmp(const void * a,const void * b)510 static int __init __rmem_cmp(const void *a, const void *b)
511 {
512 	const struct reserved_mem *ra = a, *rb = b;
513 
514 	if (ra->base < rb->base)
515 		return -1;
516 
517 	if (ra->base > rb->base)
518 		return 1;
519 
520 	/*
521 	 * Put the dynamic allocations (address == 0, size == 0) before static
522 	 * allocations at address 0x0 so that overlap detection works
523 	 * correctly.
524 	 */
525 	if (ra->size < rb->size)
526 		return -1;
527 	if (ra->size > rb->size)
528 		return 1;
529 
530 	if (ra->fdt_node < rb->fdt_node)
531 		return -1;
532 	if (ra->fdt_node > rb->fdt_node)
533 		return 1;
534 
535 	return 0;
536 }
537 
__rmem_check_for_overlap(void)538 static void __init __rmem_check_for_overlap(void)
539 {
540 	int i;
541 
542 	if (reserved_mem_count < 2)
543 		return;
544 
545 	sort(reserved_mem, reserved_mem_count, sizeof(reserved_mem[0]),
546 	     __rmem_cmp, NULL);
547 	for (i = 0; i < reserved_mem_count - 1; i++) {
548 		struct reserved_mem *this, *next;
549 
550 		this = &reserved_mem[i];
551 		next = &reserved_mem[i + 1];
552 
553 		if (this->base + this->size > next->base) {
554 			phys_addr_t this_end, next_end;
555 
556 			this_end = this->base + this->size;
557 			next_end = next->base + next->size;
558 			pr_err("OVERLAP DETECTED!\n%s (%pa--%pa) overlaps with %s (%pa--%pa)\n",
559 			       this->name, &this->base, &this_end,
560 			       next->name, &next->base, &next_end);
561 		}
562 	}
563 }
564 
565 /**
566  * fdt_init_reserved_mem_node() - Initialize a reserved memory region
567  * @rmem: reserved_mem struct of the memory region to be initialized.
568  *
569  * This function is used to call the region specific initialization
570  * function for a reserved memory region.
571  */
fdt_init_reserved_mem_node(struct reserved_mem * rmem)572 static void __init fdt_init_reserved_mem_node(struct reserved_mem *rmem)
573 {
574 	unsigned long node = rmem->fdt_node;
575 	int err = 0;
576 	bool nomap;
577 
578 	nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
579 
580 	err = __reserved_mem_init_node(rmem);
581 	if (err != 0 && err != -ENOENT) {
582 		pr_info("node %s compatible matching fail\n", rmem->name);
583 		if (nomap)
584 			memblock_clear_nomap(rmem->base, rmem->size);
585 		else
586 			memblock_phys_free(rmem->base, rmem->size);
587 	} else {
588 		phys_addr_t end = rmem->base + rmem->size - 1;
589 		bool reusable =
590 			(of_get_flat_dt_prop(node, "reusable", NULL)) != NULL;
591 
592 		pr_info("%pa..%pa (%lu KiB) %s %s %s\n",
593 			&rmem->base, &end, (unsigned long)(rmem->size / SZ_1K),
594 			nomap ? "nomap" : "map",
595 			reusable ? "reusable" : "non-reusable",
596 			rmem->name ? rmem->name : "unknown");
597 	}
598 }
599 
600 struct rmem_assigned_device {
601 	struct device *dev;
602 	struct reserved_mem *rmem;
603 	struct list_head list;
604 };
605 
606 static LIST_HEAD(of_rmem_assigned_device_list);
607 static DEFINE_MUTEX(of_rmem_assigned_device_mutex);
608 
609 /**
610  * of_reserved_mem_device_init_by_idx() - assign reserved memory region to
611  *					  given device
612  * @dev:	Pointer to the device to configure
613  * @np:		Pointer to the device_node with 'reserved-memory' property
614  * @idx:	Index of selected region
615  *
616  * This function assigns respective DMA-mapping operations based on reserved
617  * memory region specified by 'memory-region' property in @np node to the @dev
618  * device. When driver needs to use more than one reserved memory region, it
619  * should allocate child devices and initialize regions by name for each of
620  * child device.
621  *
622  * Returns error code or zero on success.
623  */
of_reserved_mem_device_init_by_idx(struct device * dev,struct device_node * np,int idx)624 int of_reserved_mem_device_init_by_idx(struct device *dev,
625 				       struct device_node *np, int idx)
626 {
627 	struct rmem_assigned_device *rd;
628 	struct device_node *target;
629 	struct reserved_mem *rmem;
630 	int ret;
631 
632 	if (!np || !dev)
633 		return -EINVAL;
634 
635 	target = of_parse_phandle(np, "memory-region", idx);
636 	if (!target)
637 		return -ENODEV;
638 
639 	if (!of_device_is_available(target)) {
640 		of_node_put(target);
641 		return 0;
642 	}
643 
644 	rmem = of_reserved_mem_lookup(target);
645 	of_node_put(target);
646 
647 	if (!rmem || !rmem->ops || !rmem->ops->device_init)
648 		return -EINVAL;
649 
650 	rd = kmalloc(sizeof(struct rmem_assigned_device), GFP_KERNEL);
651 	if (!rd)
652 		return -ENOMEM;
653 
654 	ret = rmem->ops->device_init(rmem, dev);
655 	if (ret == 0) {
656 		rd->dev = dev;
657 		rd->rmem = rmem;
658 
659 		mutex_lock(&of_rmem_assigned_device_mutex);
660 		list_add(&rd->list, &of_rmem_assigned_device_list);
661 		mutex_unlock(&of_rmem_assigned_device_mutex);
662 
663 		dev_info(dev, "assigned reserved memory node %s\n", rmem->name);
664 	} else {
665 		kfree(rd);
666 	}
667 
668 	return ret;
669 }
670 EXPORT_SYMBOL_GPL(of_reserved_mem_device_init_by_idx);
671 
672 /**
673  * of_reserved_mem_device_init_by_name() - assign named reserved memory region
674  *					   to given device
675  * @dev: pointer to the device to configure
676  * @np: pointer to the device node with 'memory-region' property
677  * @name: name of the selected memory region
678  *
679  * Returns: 0 on success or a negative error-code on failure.
680  */
of_reserved_mem_device_init_by_name(struct device * dev,struct device_node * np,const char * name)681 int of_reserved_mem_device_init_by_name(struct device *dev,
682 					struct device_node *np,
683 					const char *name)
684 {
685 	int idx = of_property_match_string(np, "memory-region-names", name);
686 
687 	return of_reserved_mem_device_init_by_idx(dev, np, idx);
688 }
689 EXPORT_SYMBOL_GPL(of_reserved_mem_device_init_by_name);
690 
691 /**
692  * of_reserved_mem_device_release() - release reserved memory device structures
693  * @dev:	Pointer to the device to deconfigure
694  *
695  * This function releases structures allocated for memory region handling for
696  * the given device.
697  */
of_reserved_mem_device_release(struct device * dev)698 void of_reserved_mem_device_release(struct device *dev)
699 {
700 	struct rmem_assigned_device *rd, *tmp;
701 	LIST_HEAD(release_list);
702 
703 	mutex_lock(&of_rmem_assigned_device_mutex);
704 	list_for_each_entry_safe(rd, tmp, &of_rmem_assigned_device_list, list) {
705 		if (rd->dev == dev)
706 			list_move_tail(&rd->list, &release_list);
707 	}
708 	mutex_unlock(&of_rmem_assigned_device_mutex);
709 
710 	list_for_each_entry_safe(rd, tmp, &release_list, list) {
711 		if (rd->rmem && rd->rmem->ops && rd->rmem->ops->device_release)
712 			rd->rmem->ops->device_release(rd->rmem, dev);
713 
714 		kfree(rd);
715 	}
716 }
717 EXPORT_SYMBOL_GPL(of_reserved_mem_device_release);
718 
719 /**
720  * of_reserved_mem_lookup() - acquire reserved_mem from a device node
721  * @np:		node pointer of the desired reserved-memory region
722  *
723  * This function allows drivers to acquire a reference to the reserved_mem
724  * struct based on a device node handle.
725  *
726  * Returns a reserved_mem reference, or NULL on error.
727  */
of_reserved_mem_lookup(struct device_node * np)728 struct reserved_mem *of_reserved_mem_lookup(struct device_node *np)
729 {
730 	const char *name;
731 	int i;
732 
733 	if (!np->full_name)
734 		return NULL;
735 
736 	name = kbasename(np->full_name);
737 	for (i = 0; i < reserved_mem_count; i++)
738 		if (!strcmp(reserved_mem[i].name, name))
739 			return &reserved_mem[i];
740 
741 	return NULL;
742 }
743 EXPORT_SYMBOL_GPL(of_reserved_mem_lookup);
744 
745 /**
746  * of_reserved_mem_region_to_resource() - Get a reserved memory region as a resource
747  * @np:		node containing 'memory-region' property
748  * @idx:	index of 'memory-region' property to lookup
749  * @res:	Pointer to a struct resource to fill in with reserved region
750  *
751  * This function allows drivers to lookup a node's 'memory-region' property
752  * entries by index and return a struct resource for the entry.
753  *
754  * Returns 0 on success with @res filled in. Returns -ENODEV if 'memory-region'
755  * is missing or unavailable, -EINVAL for any other error.
756  */
of_reserved_mem_region_to_resource(const struct device_node * np,unsigned int idx,struct resource * res)757 int of_reserved_mem_region_to_resource(const struct device_node *np,
758 				       unsigned int idx, struct resource *res)
759 {
760 	struct reserved_mem *rmem;
761 
762 	if (!np)
763 		return -EINVAL;
764 
765 	struct device_node __free(device_node) *target = of_parse_phandle(np, "memory-region", idx);
766 	if (!target || !of_device_is_available(target))
767 		return -ENODEV;
768 
769 	rmem = of_reserved_mem_lookup(target);
770 	if (!rmem)
771 		return -EINVAL;
772 
773 	resource_set_range(res, rmem->base, rmem->size);
774 	res->flags = IORESOURCE_MEM;
775 	res->name = rmem->name;
776 	return 0;
777 }
778 EXPORT_SYMBOL_GPL(of_reserved_mem_region_to_resource);
779 
780 /**
781  * of_reserved_mem_region_to_resource_byname() - Get a reserved memory region as a resource
782  * @np:		node containing 'memory-region' property
783  * @name:	name of 'memory-region' property entry to lookup
784  * @res:	Pointer to a struct resource to fill in with reserved region
785  *
786  * This function allows drivers to lookup a node's 'memory-region' property
787  * entries by name and return a struct resource for the entry.
788  *
789  * Returns 0 on success with @res filled in, or a negative error-code on
790  * failure.
791  */
of_reserved_mem_region_to_resource_byname(const struct device_node * np,const char * name,struct resource * res)792 int of_reserved_mem_region_to_resource_byname(const struct device_node *np,
793 					      const char *name,
794 					      struct resource *res)
795 {
796 	int idx;
797 
798 	if (!name)
799 		return -EINVAL;
800 
801 	idx = of_property_match_string(np, "memory-region-names", name);
802 	if (idx < 0)
803 		return idx;
804 
805 	return of_reserved_mem_region_to_resource(np, idx, res);
806 }
807 EXPORT_SYMBOL_GPL(of_reserved_mem_region_to_resource_byname);
808 
809 /**
810  * of_reserved_mem_region_count() - Return the number of 'memory-region' entries
811  * @np:		node containing 'memory-region' property
812  *
813  * This function allows drivers to retrieve the number of entries for a node's
814  * 'memory-region' property.
815  *
816  * Returns the number of entries on success, or negative error code on a
817  * malformed property.
818  */
of_reserved_mem_region_count(const struct device_node * np)819 int of_reserved_mem_region_count(const struct device_node *np)
820 {
821 	return of_count_phandle_with_args(np, "memory-region", NULL);
822 }
823 EXPORT_SYMBOL_GPL(of_reserved_mem_region_count);
824