1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Device tree based initialization code for reserved memory.
4 *
5 * Copyright (c) 2013, 2015 The Linux Foundation. All Rights Reserved.
6 * Copyright (c) 2013,2014 Samsung Electronics Co., Ltd.
7 * http://www.samsung.com
8 * Author: Marek Szyprowski <m.szyprowski@samsung.com>
9 * Author: Josh Cartwright <joshc@codeaurora.org>
10 */
11
12 #define pr_fmt(fmt) "OF: reserved mem: " fmt
13
14 #include <linux/err.h>
15 #include <linux/ioport.h>
16 #include <linux/libfdt.h>
17 #include <linux/of.h>
18 #include <linux/of_fdt.h>
19 #include <linux/of_platform.h>
20 #include <linux/mm.h>
21 #include <linux/sizes.h>
22 #include <linux/of_reserved_mem.h>
23 #include <linux/sort.h>
24 #include <linux/slab.h>
25 #include <linux/memblock.h>
26 #include <linux/kmemleak.h>
27 #include <linux/cma.h>
28
29 #include "of_private.h"
30
31 static struct reserved_mem reserved_mem_array[MAX_RESERVED_REGIONS] __initdata;
32 static struct reserved_mem *reserved_mem __refdata = reserved_mem_array;
33 static int total_reserved_mem_cnt = MAX_RESERVED_REGIONS;
34 static int reserved_mem_count;
35
early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,phys_addr_t align,phys_addr_t start,phys_addr_t end,bool nomap,phys_addr_t * res_base)36 static int __init early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
37 phys_addr_t align, phys_addr_t start, phys_addr_t end, bool nomap,
38 phys_addr_t *res_base)
39 {
40 phys_addr_t base;
41 int err = 0;
42
43 end = !end ? MEMBLOCK_ALLOC_ANYWHERE : end;
44 align = !align ? SMP_CACHE_BYTES : align;
45 base = memblock_phys_alloc_range(size, align, start, end);
46 if (!base)
47 return -ENOMEM;
48
49 *res_base = base;
50 if (nomap) {
51 err = memblock_mark_nomap(base, size);
52 if (err)
53 memblock_phys_free(base, size);
54 }
55
56 if (!err)
57 kmemleak_ignore_phys(base);
58
59 return err;
60 }
61
62 /*
63 * alloc_reserved_mem_array() - allocate memory for the reserved_mem
64 * array using memblock
65 *
66 * This function is used to allocate memory for the reserved_mem
67 * array according to the total number of reserved memory regions
68 * defined in the DT.
69 * After the new array is allocated, the information stored in
70 * the initial static array is copied over to this new array and
71 * the new array is used from this point on.
72 */
alloc_reserved_mem_array(void)73 static void __init alloc_reserved_mem_array(void)
74 {
75 struct reserved_mem *new_array;
76 size_t alloc_size, copy_size, memset_size;
77
78 alloc_size = array_size(total_reserved_mem_cnt, sizeof(*new_array));
79 if (alloc_size == SIZE_MAX) {
80 pr_err("Failed to allocate memory for reserved_mem array with err: %d", -EOVERFLOW);
81 return;
82 }
83
84 new_array = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
85 if (!new_array) {
86 pr_err("Failed to allocate memory for reserved_mem array with err: %d", -ENOMEM);
87 return;
88 }
89
90 copy_size = array_size(reserved_mem_count, sizeof(*new_array));
91 if (copy_size == SIZE_MAX) {
92 memblock_free(new_array, alloc_size);
93 total_reserved_mem_cnt = MAX_RESERVED_REGIONS;
94 pr_err("Failed to allocate memory for reserved_mem array with err: %d", -EOVERFLOW);
95 return;
96 }
97
98 memset_size = alloc_size - copy_size;
99
100 memcpy(new_array, reserved_mem, copy_size);
101 memset(new_array + reserved_mem_count, 0, memset_size);
102
103 reserved_mem = new_array;
104 }
105
106 static void __init fdt_init_reserved_mem_node(struct reserved_mem *rmem);
107 /*
108 * fdt_reserved_mem_save_node() - save fdt node for second pass initialization
109 */
fdt_reserved_mem_save_node(unsigned long node,const char * uname,phys_addr_t base,phys_addr_t size)110 static void __init fdt_reserved_mem_save_node(unsigned long node, const char *uname,
111 phys_addr_t base, phys_addr_t size)
112 {
113 struct reserved_mem *rmem = &reserved_mem[reserved_mem_count];
114
115 if (reserved_mem_count == total_reserved_mem_cnt) {
116 pr_err("not enough space for all defined regions.\n");
117 return;
118 }
119
120 rmem->fdt_node = node;
121 rmem->name = uname;
122 rmem->base = base;
123 rmem->size = size;
124
125 /* Call the region specific initialization function */
126 fdt_init_reserved_mem_node(rmem);
127
128 reserved_mem_count++;
129 return;
130 }
131
early_init_dt_reserve_memory(phys_addr_t base,phys_addr_t size,bool nomap)132 static int __init early_init_dt_reserve_memory(phys_addr_t base,
133 phys_addr_t size, bool nomap)
134 {
135 if (nomap) {
136 /*
137 * If the memory is already reserved (by another region), we
138 * should not allow it to be marked nomap, but don't worry
139 * if the region isn't memory as it won't be mapped.
140 */
141 if (memblock_overlaps_region(&memblock.memory, base, size) &&
142 memblock_is_region_reserved(base, size))
143 return -EBUSY;
144
145 return memblock_mark_nomap(base, size);
146 }
147 return memblock_reserve(base, size);
148 }
149
150 /*
151 * __reserved_mem_reserve_reg() - reserve all memory described in 'reg' property
152 */
__reserved_mem_reserve_reg(unsigned long node,const char * uname)153 static int __init __reserved_mem_reserve_reg(unsigned long node,
154 const char *uname)
155 {
156 int t_len = (dt_root_addr_cells + dt_root_size_cells) * sizeof(__be32);
157 phys_addr_t base, size;
158 int len;
159 const __be32 *prop;
160 bool nomap;
161
162 prop = of_get_flat_dt_prop(node, "reg", &len);
163 if (!prop)
164 return -ENOENT;
165
166 if (len && len % t_len != 0) {
167 pr_err("Reserved memory: invalid reg property in '%s', skipping node.\n",
168 uname);
169 return -EINVAL;
170 }
171
172 nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
173
174 while (len >= t_len) {
175 base = dt_mem_next_cell(dt_root_addr_cells, &prop);
176 size = dt_mem_next_cell(dt_root_size_cells, &prop);
177
178 if (size &&
179 early_init_dt_reserve_memory(base, size, nomap) == 0)
180 pr_debug("Reserved memory: reserved region for node '%s': base %pa, size %lu MiB\n",
181 uname, &base, (unsigned long)(size / SZ_1M));
182 else
183 pr_err("Reserved memory: failed to reserve memory for node '%s': base %pa, size %lu MiB\n",
184 uname, &base, (unsigned long)(size / SZ_1M));
185
186 len -= t_len;
187 }
188 return 0;
189 }
190
191 /*
192 * __reserved_mem_check_root() - check if #size-cells, #address-cells provided
193 * in /reserved-memory matches the values supported by the current implementation,
194 * also check if ranges property has been provided
195 */
__reserved_mem_check_root(unsigned long node)196 static int __init __reserved_mem_check_root(unsigned long node)
197 {
198 const __be32 *prop;
199
200 prop = of_get_flat_dt_prop(node, "#size-cells", NULL);
201 if (!prop || be32_to_cpup(prop) != dt_root_size_cells)
202 return -EINVAL;
203
204 prop = of_get_flat_dt_prop(node, "#address-cells", NULL);
205 if (!prop || be32_to_cpup(prop) != dt_root_addr_cells)
206 return -EINVAL;
207
208 prop = of_get_flat_dt_prop(node, "ranges", NULL);
209 if (!prop)
210 return -EINVAL;
211 return 0;
212 }
213
214 static void __init __rmem_check_for_overlap(void);
215
216 /**
217 * fdt_scan_reserved_mem_reg_nodes() - Store info for the "reg" defined
218 * reserved memory regions.
219 *
220 * This function is used to scan through the DT and store the
221 * information for the reserved memory regions that are defined using
222 * the "reg" property. The region node number, name, base address, and
223 * size are all stored in the reserved_mem array by calling the
224 * fdt_reserved_mem_save_node() function.
225 */
fdt_scan_reserved_mem_reg_nodes(void)226 void __init fdt_scan_reserved_mem_reg_nodes(void)
227 {
228 int t_len = (dt_root_addr_cells + dt_root_size_cells) * sizeof(__be32);
229 const void *fdt = initial_boot_params;
230 phys_addr_t base, size;
231 const __be32 *prop;
232 int node, child;
233 int len;
234
235 if (!fdt)
236 return;
237
238 node = fdt_path_offset(fdt, "/reserved-memory");
239 if (node < 0) {
240 pr_info("Reserved memory: No reserved-memory node in the DT\n");
241 return;
242 }
243
244 /* Attempt dynamic allocation of a new reserved_mem array */
245 alloc_reserved_mem_array();
246
247 if (__reserved_mem_check_root(node)) {
248 pr_err("Reserved memory: unsupported node format, ignoring\n");
249 return;
250 }
251
252 fdt_for_each_subnode(child, fdt, node) {
253 const char *uname;
254
255 prop = of_get_flat_dt_prop(child, "reg", &len);
256 if (!prop)
257 continue;
258 if (!of_fdt_device_is_available(fdt, child))
259 continue;
260
261 uname = fdt_get_name(fdt, child, NULL);
262 if (len && len % t_len != 0) {
263 pr_err("Reserved memory: invalid reg property in '%s', skipping node.\n",
264 uname);
265 continue;
266 }
267
268 if (len > t_len)
269 pr_warn("%s() ignores %d regions in node '%s'\n",
270 __func__, len / t_len - 1, uname);
271
272 base = dt_mem_next_cell(dt_root_addr_cells, &prop);
273 size = dt_mem_next_cell(dt_root_size_cells, &prop);
274
275 if (size)
276 fdt_reserved_mem_save_node(child, uname, base, size);
277 }
278
279 /* check for overlapping reserved regions */
280 __rmem_check_for_overlap();
281 }
282
283 static int __init __reserved_mem_alloc_size(unsigned long node, const char *uname);
284
285 /*
286 * fdt_scan_reserved_mem() - scan a single FDT node for reserved memory
287 */
fdt_scan_reserved_mem(void)288 int __init fdt_scan_reserved_mem(void)
289 {
290 int node, child;
291 int dynamic_nodes_cnt = 0, count = 0;
292 int dynamic_nodes[MAX_RESERVED_REGIONS];
293 const void *fdt = initial_boot_params;
294
295 node = fdt_path_offset(fdt, "/reserved-memory");
296 if (node < 0)
297 return -ENODEV;
298
299 if (__reserved_mem_check_root(node) != 0) {
300 pr_err("Reserved memory: unsupported node format, ignoring\n");
301 return -EINVAL;
302 }
303
304 fdt_for_each_subnode(child, fdt, node) {
305 const char *uname;
306 int err;
307
308 if (!of_fdt_device_is_available(fdt, child))
309 continue;
310
311 uname = fdt_get_name(fdt, child, NULL);
312
313 err = __reserved_mem_reserve_reg(child, uname);
314 if (!err)
315 count++;
316 /*
317 * Save the nodes for the dynamically-placed regions
318 * into an array which will be used for allocation right
319 * after all the statically-placed regions are reserved
320 * or marked as no-map. This is done to avoid dynamically
321 * allocating from one of the statically-placed regions.
322 */
323 if (err == -ENOENT && of_get_flat_dt_prop(child, "size", NULL)) {
324 dynamic_nodes[dynamic_nodes_cnt] = child;
325 dynamic_nodes_cnt++;
326 }
327 }
328 for (int i = 0; i < dynamic_nodes_cnt; i++) {
329 const char *uname;
330 int err;
331
332 child = dynamic_nodes[i];
333 uname = fdt_get_name(fdt, child, NULL);
334 err = __reserved_mem_alloc_size(child, uname);
335 if (!err)
336 count++;
337 }
338 total_reserved_mem_cnt = count;
339 return 0;
340 }
341
342 /*
343 * __reserved_mem_alloc_in_range() - allocate reserved memory described with
344 * 'alloc-ranges'. Choose bottom-up/top-down depending on nearby existing
345 * reserved regions to keep the reserved memory contiguous if possible.
346 */
__reserved_mem_alloc_in_range(phys_addr_t size,phys_addr_t align,phys_addr_t start,phys_addr_t end,bool nomap,phys_addr_t * res_base)347 static int __init __reserved_mem_alloc_in_range(phys_addr_t size,
348 phys_addr_t align, phys_addr_t start, phys_addr_t end, bool nomap,
349 phys_addr_t *res_base)
350 {
351 bool prev_bottom_up = memblock_bottom_up();
352 bool bottom_up = false, top_down = false;
353 int ret, i;
354
355 for (i = 0; i < reserved_mem_count; i++) {
356 struct reserved_mem *rmem = &reserved_mem[i];
357
358 /* Skip regions that were not reserved yet */
359 if (rmem->size == 0)
360 continue;
361
362 /*
363 * If range starts next to an existing reservation, use bottom-up:
364 * |....RRRR................RRRRRRRR..............|
365 * --RRRR------
366 */
367 if (start >= rmem->base && start <= (rmem->base + rmem->size))
368 bottom_up = true;
369
370 /*
371 * If range ends next to an existing reservation, use top-down:
372 * |....RRRR................RRRRRRRR..............|
373 * -------RRRR-----
374 */
375 if (end >= rmem->base && end <= (rmem->base + rmem->size))
376 top_down = true;
377 }
378
379 /* Change setting only if either bottom-up or top-down was selected */
380 if (bottom_up != top_down)
381 memblock_set_bottom_up(bottom_up);
382
383 ret = early_init_dt_alloc_reserved_memory_arch(size, align,
384 start, end, nomap, res_base);
385
386 /* Restore old setting if needed */
387 if (bottom_up != top_down)
388 memblock_set_bottom_up(prev_bottom_up);
389
390 return ret;
391 }
392
393 /*
394 * __reserved_mem_alloc_size() - allocate reserved memory described by
395 * 'size', 'alignment' and 'alloc-ranges' properties.
396 */
__reserved_mem_alloc_size(unsigned long node,const char * uname)397 static int __init __reserved_mem_alloc_size(unsigned long node, const char *uname)
398 {
399 int t_len = (dt_root_addr_cells + dt_root_size_cells) * sizeof(__be32);
400 phys_addr_t start = 0, end = 0;
401 phys_addr_t base = 0, align = 0, size;
402 int len;
403 const __be32 *prop;
404 bool nomap;
405 int ret;
406
407 prop = of_get_flat_dt_prop(node, "size", &len);
408 if (!prop)
409 return -EINVAL;
410
411 if (len != dt_root_size_cells * sizeof(__be32)) {
412 pr_err("invalid size property in '%s' node.\n", uname);
413 return -EINVAL;
414 }
415 size = dt_mem_next_cell(dt_root_size_cells, &prop);
416
417 prop = of_get_flat_dt_prop(node, "alignment", &len);
418 if (prop) {
419 if (len != dt_root_addr_cells * sizeof(__be32)) {
420 pr_err("invalid alignment property in '%s' node.\n",
421 uname);
422 return -EINVAL;
423 }
424 align = dt_mem_next_cell(dt_root_addr_cells, &prop);
425 }
426
427 nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
428
429 /* Need adjust the alignment to satisfy the CMA requirement */
430 if (IS_ENABLED(CONFIG_CMA)
431 && of_flat_dt_is_compatible(node, "shared-dma-pool")
432 && of_get_flat_dt_prop(node, "reusable", NULL)
433 && !nomap)
434 align = max_t(phys_addr_t, align, CMA_MIN_ALIGNMENT_BYTES);
435
436 prop = of_get_flat_dt_prop(node, "alloc-ranges", &len);
437 if (prop) {
438
439 if (len % t_len != 0) {
440 pr_err("invalid alloc-ranges property in '%s', skipping node.\n",
441 uname);
442 return -EINVAL;
443 }
444
445 while (len > 0) {
446 start = dt_mem_next_cell(dt_root_addr_cells, &prop);
447 end = start + dt_mem_next_cell(dt_root_size_cells,
448 &prop);
449
450 base = 0;
451 ret = __reserved_mem_alloc_in_range(size, align,
452 start, end, nomap, &base);
453 if (ret == 0) {
454 pr_debug("allocated memory for '%s' node: base %pa, size %lu MiB\n",
455 uname, &base,
456 (unsigned long)(size / SZ_1M));
457 break;
458 }
459 len -= t_len;
460 }
461
462 } else {
463 ret = early_init_dt_alloc_reserved_memory_arch(size, align,
464 0, 0, nomap, &base);
465 if (ret == 0)
466 pr_debug("allocated memory for '%s' node: base %pa, size %lu MiB\n",
467 uname, &base, (unsigned long)(size / SZ_1M));
468 }
469
470 if (base == 0) {
471 pr_err("failed to allocate memory for node '%s': size %lu MiB\n",
472 uname, (unsigned long)(size / SZ_1M));
473 return -ENOMEM;
474 }
475
476 /* Save region in the reserved_mem array */
477 fdt_reserved_mem_save_node(node, uname, base, size);
478 return 0;
479 }
480
481 static const struct of_device_id __rmem_of_table_sentinel
482 __used __section("__reservedmem_of_table_end");
483
484 /*
485 * __reserved_mem_init_node() - call region specific reserved memory init code
486 */
__reserved_mem_init_node(struct reserved_mem * rmem)487 static int __init __reserved_mem_init_node(struct reserved_mem *rmem)
488 {
489 extern const struct of_device_id __reservedmem_of_table[];
490 const struct of_device_id *i;
491 int ret = -ENOENT;
492
493 for (i = __reservedmem_of_table; i < &__rmem_of_table_sentinel; i++) {
494 reservedmem_of_init_fn initfn = i->data;
495 const char *compat = i->compatible;
496
497 if (!of_flat_dt_is_compatible(rmem->fdt_node, compat))
498 continue;
499
500 ret = initfn(rmem);
501 if (ret == 0) {
502 pr_info("initialized node %s, compatible id %s\n",
503 rmem->name, compat);
504 break;
505 }
506 }
507 return ret;
508 }
509
__rmem_cmp(const void * a,const void * b)510 static int __init __rmem_cmp(const void *a, const void *b)
511 {
512 const struct reserved_mem *ra = a, *rb = b;
513
514 if (ra->base < rb->base)
515 return -1;
516
517 if (ra->base > rb->base)
518 return 1;
519
520 /*
521 * Put the dynamic allocations (address == 0, size == 0) before static
522 * allocations at address 0x0 so that overlap detection works
523 * correctly.
524 */
525 if (ra->size < rb->size)
526 return -1;
527 if (ra->size > rb->size)
528 return 1;
529
530 if (ra->fdt_node < rb->fdt_node)
531 return -1;
532 if (ra->fdt_node > rb->fdt_node)
533 return 1;
534
535 return 0;
536 }
537
__rmem_check_for_overlap(void)538 static void __init __rmem_check_for_overlap(void)
539 {
540 int i;
541
542 if (reserved_mem_count < 2)
543 return;
544
545 sort(reserved_mem, reserved_mem_count, sizeof(reserved_mem[0]),
546 __rmem_cmp, NULL);
547 for (i = 0; i < reserved_mem_count - 1; i++) {
548 struct reserved_mem *this, *next;
549
550 this = &reserved_mem[i];
551 next = &reserved_mem[i + 1];
552
553 if (this->base + this->size > next->base) {
554 phys_addr_t this_end, next_end;
555
556 this_end = this->base + this->size;
557 next_end = next->base + next->size;
558 pr_err("OVERLAP DETECTED!\n%s (%pa--%pa) overlaps with %s (%pa--%pa)\n",
559 this->name, &this->base, &this_end,
560 next->name, &next->base, &next_end);
561 }
562 }
563 }
564
565 /**
566 * fdt_init_reserved_mem_node() - Initialize a reserved memory region
567 * @rmem: reserved_mem struct of the memory region to be initialized.
568 *
569 * This function is used to call the region specific initialization
570 * function for a reserved memory region.
571 */
fdt_init_reserved_mem_node(struct reserved_mem * rmem)572 static void __init fdt_init_reserved_mem_node(struct reserved_mem *rmem)
573 {
574 unsigned long node = rmem->fdt_node;
575 int err = 0;
576 bool nomap;
577
578 nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
579
580 err = __reserved_mem_init_node(rmem);
581 if (err != 0 && err != -ENOENT) {
582 pr_info("node %s compatible matching fail\n", rmem->name);
583 if (nomap)
584 memblock_clear_nomap(rmem->base, rmem->size);
585 else
586 memblock_phys_free(rmem->base, rmem->size);
587 } else {
588 phys_addr_t end = rmem->base + rmem->size - 1;
589 bool reusable =
590 (of_get_flat_dt_prop(node, "reusable", NULL)) != NULL;
591
592 pr_info("%pa..%pa (%lu KiB) %s %s %s\n",
593 &rmem->base, &end, (unsigned long)(rmem->size / SZ_1K),
594 nomap ? "nomap" : "map",
595 reusable ? "reusable" : "non-reusable",
596 rmem->name ? rmem->name : "unknown");
597 }
598 }
599
600 struct rmem_assigned_device {
601 struct device *dev;
602 struct reserved_mem *rmem;
603 struct list_head list;
604 };
605
606 static LIST_HEAD(of_rmem_assigned_device_list);
607 static DEFINE_MUTEX(of_rmem_assigned_device_mutex);
608
609 /**
610 * of_reserved_mem_device_init_by_idx() - assign reserved memory region to
611 * given device
612 * @dev: Pointer to the device to configure
613 * @np: Pointer to the device_node with 'reserved-memory' property
614 * @idx: Index of selected region
615 *
616 * This function assigns respective DMA-mapping operations based on reserved
617 * memory region specified by 'memory-region' property in @np node to the @dev
618 * device. When driver needs to use more than one reserved memory region, it
619 * should allocate child devices and initialize regions by name for each of
620 * child device.
621 *
622 * Returns error code or zero on success.
623 */
of_reserved_mem_device_init_by_idx(struct device * dev,struct device_node * np,int idx)624 int of_reserved_mem_device_init_by_idx(struct device *dev,
625 struct device_node *np, int idx)
626 {
627 struct rmem_assigned_device *rd;
628 struct device_node *target;
629 struct reserved_mem *rmem;
630 int ret;
631
632 if (!np || !dev)
633 return -EINVAL;
634
635 target = of_parse_phandle(np, "memory-region", idx);
636 if (!target)
637 return -ENODEV;
638
639 if (!of_device_is_available(target)) {
640 of_node_put(target);
641 return 0;
642 }
643
644 rmem = of_reserved_mem_lookup(target);
645 of_node_put(target);
646
647 if (!rmem || !rmem->ops || !rmem->ops->device_init)
648 return -EINVAL;
649
650 rd = kmalloc(sizeof(struct rmem_assigned_device), GFP_KERNEL);
651 if (!rd)
652 return -ENOMEM;
653
654 ret = rmem->ops->device_init(rmem, dev);
655 if (ret == 0) {
656 rd->dev = dev;
657 rd->rmem = rmem;
658
659 mutex_lock(&of_rmem_assigned_device_mutex);
660 list_add(&rd->list, &of_rmem_assigned_device_list);
661 mutex_unlock(&of_rmem_assigned_device_mutex);
662
663 dev_info(dev, "assigned reserved memory node %s\n", rmem->name);
664 } else {
665 kfree(rd);
666 }
667
668 return ret;
669 }
670 EXPORT_SYMBOL_GPL(of_reserved_mem_device_init_by_idx);
671
672 /**
673 * of_reserved_mem_device_init_by_name() - assign named reserved memory region
674 * to given device
675 * @dev: pointer to the device to configure
676 * @np: pointer to the device node with 'memory-region' property
677 * @name: name of the selected memory region
678 *
679 * Returns: 0 on success or a negative error-code on failure.
680 */
of_reserved_mem_device_init_by_name(struct device * dev,struct device_node * np,const char * name)681 int of_reserved_mem_device_init_by_name(struct device *dev,
682 struct device_node *np,
683 const char *name)
684 {
685 int idx = of_property_match_string(np, "memory-region-names", name);
686
687 return of_reserved_mem_device_init_by_idx(dev, np, idx);
688 }
689 EXPORT_SYMBOL_GPL(of_reserved_mem_device_init_by_name);
690
691 /**
692 * of_reserved_mem_device_release() - release reserved memory device structures
693 * @dev: Pointer to the device to deconfigure
694 *
695 * This function releases structures allocated for memory region handling for
696 * the given device.
697 */
of_reserved_mem_device_release(struct device * dev)698 void of_reserved_mem_device_release(struct device *dev)
699 {
700 struct rmem_assigned_device *rd, *tmp;
701 LIST_HEAD(release_list);
702
703 mutex_lock(&of_rmem_assigned_device_mutex);
704 list_for_each_entry_safe(rd, tmp, &of_rmem_assigned_device_list, list) {
705 if (rd->dev == dev)
706 list_move_tail(&rd->list, &release_list);
707 }
708 mutex_unlock(&of_rmem_assigned_device_mutex);
709
710 list_for_each_entry_safe(rd, tmp, &release_list, list) {
711 if (rd->rmem && rd->rmem->ops && rd->rmem->ops->device_release)
712 rd->rmem->ops->device_release(rd->rmem, dev);
713
714 kfree(rd);
715 }
716 }
717 EXPORT_SYMBOL_GPL(of_reserved_mem_device_release);
718
719 /**
720 * of_reserved_mem_lookup() - acquire reserved_mem from a device node
721 * @np: node pointer of the desired reserved-memory region
722 *
723 * This function allows drivers to acquire a reference to the reserved_mem
724 * struct based on a device node handle.
725 *
726 * Returns a reserved_mem reference, or NULL on error.
727 */
of_reserved_mem_lookup(struct device_node * np)728 struct reserved_mem *of_reserved_mem_lookup(struct device_node *np)
729 {
730 const char *name;
731 int i;
732
733 if (!np->full_name)
734 return NULL;
735
736 name = kbasename(np->full_name);
737 for (i = 0; i < reserved_mem_count; i++)
738 if (!strcmp(reserved_mem[i].name, name))
739 return &reserved_mem[i];
740
741 return NULL;
742 }
743 EXPORT_SYMBOL_GPL(of_reserved_mem_lookup);
744
745 /**
746 * of_reserved_mem_region_to_resource() - Get a reserved memory region as a resource
747 * @np: node containing 'memory-region' property
748 * @idx: index of 'memory-region' property to lookup
749 * @res: Pointer to a struct resource to fill in with reserved region
750 *
751 * This function allows drivers to lookup a node's 'memory-region' property
752 * entries by index and return a struct resource for the entry.
753 *
754 * Returns 0 on success with @res filled in. Returns -ENODEV if 'memory-region'
755 * is missing or unavailable, -EINVAL for any other error.
756 */
of_reserved_mem_region_to_resource(const struct device_node * np,unsigned int idx,struct resource * res)757 int of_reserved_mem_region_to_resource(const struct device_node *np,
758 unsigned int idx, struct resource *res)
759 {
760 struct reserved_mem *rmem;
761
762 if (!np)
763 return -EINVAL;
764
765 struct device_node __free(device_node) *target = of_parse_phandle(np, "memory-region", idx);
766 if (!target || !of_device_is_available(target))
767 return -ENODEV;
768
769 rmem = of_reserved_mem_lookup(target);
770 if (!rmem)
771 return -EINVAL;
772
773 resource_set_range(res, rmem->base, rmem->size);
774 res->name = rmem->name;
775 return 0;
776 }
777 EXPORT_SYMBOL_GPL(of_reserved_mem_region_to_resource);
778
779 /**
780 * of_reserved_mem_region_to_resource_byname() - Get a reserved memory region as a resource
781 * @np: node containing 'memory-region' property
782 * @name: name of 'memory-region' property entry to lookup
783 * @res: Pointer to a struct resource to fill in with reserved region
784 *
785 * This function allows drivers to lookup a node's 'memory-region' property
786 * entries by name and return a struct resource for the entry.
787 *
788 * Returns 0 on success with @res filled in, or a negative error-code on
789 * failure.
790 */
of_reserved_mem_region_to_resource_byname(const struct device_node * np,const char * name,struct resource * res)791 int of_reserved_mem_region_to_resource_byname(const struct device_node *np,
792 const char *name,
793 struct resource *res)
794 {
795 int idx;
796
797 if (!name)
798 return -EINVAL;
799
800 idx = of_property_match_string(np, "memory-region-names", name);
801 if (idx < 0)
802 return idx;
803
804 return of_reserved_mem_region_to_resource(np, idx, res);
805 }
806 EXPORT_SYMBOL_GPL(of_reserved_mem_region_to_resource_byname);
807
808 /**
809 * of_reserved_mem_region_count() - Return the number of 'memory-region' entries
810 * @np: node containing 'memory-region' property
811 *
812 * This function allows drivers to retrieve the number of entries for a node's
813 * 'memory-region' property.
814 *
815 * Returns the number of entries on success, or negative error code on a
816 * malformed property.
817 */
of_reserved_mem_region_count(const struct device_node * np)818 int of_reserved_mem_region_count(const struct device_node *np)
819 {
820 return of_count_phandle_with_args(np, "memory-region", NULL);
821 }
822 EXPORT_SYMBOL_GPL(of_reserved_mem_region_count);
823