1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Device tree based initialization code for reserved memory. 4 * 5 * Copyright (c) 2013, 2015 The Linux Foundation. All Rights Reserved. 6 * Copyright (c) 2013,2014 Samsung Electronics Co., Ltd. 7 * http://www.samsung.com 8 * Author: Marek Szyprowski <m.szyprowski@samsung.com> 9 * Author: Josh Cartwright <joshc@codeaurora.org> 10 */ 11 12 #define pr_fmt(fmt) "OF: reserved mem: " fmt 13 14 #include <linux/err.h> 15 #include <linux/libfdt.h> 16 #include <linux/of.h> 17 #include <linux/of_fdt.h> 18 #include <linux/of_platform.h> 19 #include <linux/mm.h> 20 #include <linux/sizes.h> 21 #include <linux/of_reserved_mem.h> 22 #include <linux/sort.h> 23 #include <linux/slab.h> 24 #include <linux/memblock.h> 25 #include <linux/kmemleak.h> 26 #include <linux/cma.h> 27 28 #include "of_private.h" 29 30 static struct reserved_mem reserved_mem_array[MAX_RESERVED_REGIONS] __initdata; 31 static struct reserved_mem *reserved_mem __refdata = reserved_mem_array; 32 static int total_reserved_mem_cnt = MAX_RESERVED_REGIONS; 33 static int reserved_mem_count; 34 35 static int __init early_init_dt_alloc_reserved_memory_arch(phys_addr_t size, 36 phys_addr_t align, phys_addr_t start, phys_addr_t end, bool nomap, 37 phys_addr_t *res_base) 38 { 39 phys_addr_t base; 40 int err = 0; 41 42 end = !end ? MEMBLOCK_ALLOC_ANYWHERE : end; 43 align = !align ? SMP_CACHE_BYTES : align; 44 base = memblock_phys_alloc_range(size, align, start, end); 45 if (!base) 46 return -ENOMEM; 47 48 *res_base = base; 49 if (nomap) { 50 err = memblock_mark_nomap(base, size); 51 if (err) 52 memblock_phys_free(base, size); 53 } 54 55 kmemleak_ignore_phys(base); 56 57 return err; 58 } 59 60 /* 61 * alloc_reserved_mem_array() - allocate memory for the reserved_mem 62 * array using memblock 63 * 64 * This function is used to allocate memory for the reserved_mem 65 * array according to the total number of reserved memory regions 66 * defined in the DT. 67 * After the new array is allocated, the information stored in 68 * the initial static array is copied over to this new array and 69 * the new array is used from this point on. 70 */ 71 static void __init alloc_reserved_mem_array(void) 72 { 73 struct reserved_mem *new_array; 74 size_t alloc_size, copy_size, memset_size; 75 76 alloc_size = array_size(total_reserved_mem_cnt, sizeof(*new_array)); 77 if (alloc_size == SIZE_MAX) { 78 pr_err("Failed to allocate memory for reserved_mem array with err: %d", -EOVERFLOW); 79 return; 80 } 81 82 new_array = memblock_alloc(alloc_size, SMP_CACHE_BYTES); 83 if (!new_array) { 84 pr_err("Failed to allocate memory for reserved_mem array with err: %d", -ENOMEM); 85 return; 86 } 87 88 copy_size = array_size(reserved_mem_count, sizeof(*new_array)); 89 if (copy_size == SIZE_MAX) { 90 memblock_free(new_array, alloc_size); 91 total_reserved_mem_cnt = MAX_RESERVED_REGIONS; 92 pr_err("Failed to allocate memory for reserved_mem array with err: %d", -EOVERFLOW); 93 return; 94 } 95 96 memset_size = alloc_size - copy_size; 97 98 memcpy(new_array, reserved_mem, copy_size); 99 memset(new_array + reserved_mem_count, 0, memset_size); 100 101 reserved_mem = new_array; 102 } 103 104 static void __init fdt_init_reserved_mem_node(struct reserved_mem *rmem); 105 /* 106 * fdt_reserved_mem_save_node() - save fdt node for second pass initialization 107 */ 108 static void __init fdt_reserved_mem_save_node(unsigned long node, const char *uname, 109 phys_addr_t base, phys_addr_t size) 110 { 111 struct reserved_mem *rmem = &reserved_mem[reserved_mem_count]; 112 113 if (reserved_mem_count == total_reserved_mem_cnt) { 114 pr_err("not enough space for all defined regions.\n"); 115 return; 116 } 117 118 rmem->fdt_node = node; 119 rmem->name = uname; 120 rmem->base = base; 121 rmem->size = size; 122 123 /* Call the region specific initialization function */ 124 fdt_init_reserved_mem_node(rmem); 125 126 reserved_mem_count++; 127 return; 128 } 129 130 static int __init early_init_dt_reserve_memory(phys_addr_t base, 131 phys_addr_t size, bool nomap) 132 { 133 if (nomap) { 134 /* 135 * If the memory is already reserved (by another region), we 136 * should not allow it to be marked nomap, but don't worry 137 * if the region isn't memory as it won't be mapped. 138 */ 139 if (memblock_overlaps_region(&memblock.memory, base, size) && 140 memblock_is_region_reserved(base, size)) 141 return -EBUSY; 142 143 return memblock_mark_nomap(base, size); 144 } 145 return memblock_reserve(base, size); 146 } 147 148 /* 149 * __reserved_mem_reserve_reg() - reserve all memory described in 'reg' property 150 */ 151 static int __init __reserved_mem_reserve_reg(unsigned long node, 152 const char *uname) 153 { 154 int t_len = (dt_root_addr_cells + dt_root_size_cells) * sizeof(__be32); 155 phys_addr_t base, size; 156 int len; 157 const __be32 *prop; 158 bool nomap; 159 160 prop = of_get_flat_dt_prop(node, "reg", &len); 161 if (!prop) 162 return -ENOENT; 163 164 if (len && len % t_len != 0) { 165 pr_err("Reserved memory: invalid reg property in '%s', skipping node.\n", 166 uname); 167 return -EINVAL; 168 } 169 170 nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL; 171 172 while (len >= t_len) { 173 base = dt_mem_next_cell(dt_root_addr_cells, &prop); 174 size = dt_mem_next_cell(dt_root_size_cells, &prop); 175 176 if (size && 177 early_init_dt_reserve_memory(base, size, nomap) == 0) 178 pr_debug("Reserved memory: reserved region for node '%s': base %pa, size %lu MiB\n", 179 uname, &base, (unsigned long)(size / SZ_1M)); 180 else 181 pr_err("Reserved memory: failed to reserve memory for node '%s': base %pa, size %lu MiB\n", 182 uname, &base, (unsigned long)(size / SZ_1M)); 183 184 len -= t_len; 185 } 186 return 0; 187 } 188 189 /* 190 * __reserved_mem_check_root() - check if #size-cells, #address-cells provided 191 * in /reserved-memory matches the values supported by the current implementation, 192 * also check if ranges property has been provided 193 */ 194 static int __init __reserved_mem_check_root(unsigned long node) 195 { 196 const __be32 *prop; 197 198 prop = of_get_flat_dt_prop(node, "#size-cells", NULL); 199 if (!prop || be32_to_cpup(prop) != dt_root_size_cells) 200 return -EINVAL; 201 202 prop = of_get_flat_dt_prop(node, "#address-cells", NULL); 203 if (!prop || be32_to_cpup(prop) != dt_root_addr_cells) 204 return -EINVAL; 205 206 prop = of_get_flat_dt_prop(node, "ranges", NULL); 207 if (!prop) 208 return -EINVAL; 209 return 0; 210 } 211 212 static void __init __rmem_check_for_overlap(void); 213 214 /** 215 * fdt_scan_reserved_mem_reg_nodes() - Store info for the "reg" defined 216 * reserved memory regions. 217 * 218 * This function is used to scan through the DT and store the 219 * information for the reserved memory regions that are defined using 220 * the "reg" property. The region node number, name, base address, and 221 * size are all stored in the reserved_mem array by calling the 222 * fdt_reserved_mem_save_node() function. 223 */ 224 void __init fdt_scan_reserved_mem_reg_nodes(void) 225 { 226 int t_len = (dt_root_addr_cells + dt_root_size_cells) * sizeof(__be32); 227 const void *fdt = initial_boot_params; 228 phys_addr_t base, size; 229 const __be32 *prop; 230 int node, child; 231 int len; 232 233 if (!fdt) 234 return; 235 236 node = fdt_path_offset(fdt, "/reserved-memory"); 237 if (node < 0) { 238 pr_info("Reserved memory: No reserved-memory node in the DT\n"); 239 return; 240 } 241 242 /* Attempt dynamic allocation of a new reserved_mem array */ 243 alloc_reserved_mem_array(); 244 245 if (__reserved_mem_check_root(node)) { 246 pr_err("Reserved memory: unsupported node format, ignoring\n"); 247 return; 248 } 249 250 fdt_for_each_subnode(child, fdt, node) { 251 const char *uname; 252 253 prop = of_get_flat_dt_prop(child, "reg", &len); 254 if (!prop) 255 continue; 256 if (!of_fdt_device_is_available(fdt, child)) 257 continue; 258 259 uname = fdt_get_name(fdt, child, NULL); 260 if (len && len % t_len != 0) { 261 pr_err("Reserved memory: invalid reg property in '%s', skipping node.\n", 262 uname); 263 continue; 264 } 265 base = dt_mem_next_cell(dt_root_addr_cells, &prop); 266 size = dt_mem_next_cell(dt_root_size_cells, &prop); 267 268 if (size) 269 fdt_reserved_mem_save_node(child, uname, base, size); 270 } 271 272 /* check for overlapping reserved regions */ 273 __rmem_check_for_overlap(); 274 } 275 276 static int __init __reserved_mem_alloc_size(unsigned long node, const char *uname); 277 278 /* 279 * fdt_scan_reserved_mem() - scan a single FDT node for reserved memory 280 */ 281 int __init fdt_scan_reserved_mem(void) 282 { 283 int node, child; 284 int dynamic_nodes_cnt = 0, count = 0; 285 int dynamic_nodes[MAX_RESERVED_REGIONS]; 286 const void *fdt = initial_boot_params; 287 288 node = fdt_path_offset(fdt, "/reserved-memory"); 289 if (node < 0) 290 return -ENODEV; 291 292 if (__reserved_mem_check_root(node) != 0) { 293 pr_err("Reserved memory: unsupported node format, ignoring\n"); 294 return -EINVAL; 295 } 296 297 fdt_for_each_subnode(child, fdt, node) { 298 const char *uname; 299 int err; 300 301 if (!of_fdt_device_is_available(fdt, child)) 302 continue; 303 304 uname = fdt_get_name(fdt, child, NULL); 305 306 err = __reserved_mem_reserve_reg(child, uname); 307 if (!err) 308 count++; 309 /* 310 * Save the nodes for the dynamically-placed regions 311 * into an array which will be used for allocation right 312 * after all the statically-placed regions are reserved 313 * or marked as no-map. This is done to avoid dynamically 314 * allocating from one of the statically-placed regions. 315 */ 316 if (err == -ENOENT && of_get_flat_dt_prop(child, "size", NULL)) { 317 dynamic_nodes[dynamic_nodes_cnt] = child; 318 dynamic_nodes_cnt++; 319 } 320 } 321 for (int i = 0; i < dynamic_nodes_cnt; i++) { 322 const char *uname; 323 int err; 324 325 child = dynamic_nodes[i]; 326 uname = fdt_get_name(fdt, child, NULL); 327 err = __reserved_mem_alloc_size(child, uname); 328 if (!err) 329 count++; 330 } 331 total_reserved_mem_cnt = count; 332 return 0; 333 } 334 335 /* 336 * __reserved_mem_alloc_in_range() - allocate reserved memory described with 337 * 'alloc-ranges'. Choose bottom-up/top-down depending on nearby existing 338 * reserved regions to keep the reserved memory contiguous if possible. 339 */ 340 static int __init __reserved_mem_alloc_in_range(phys_addr_t size, 341 phys_addr_t align, phys_addr_t start, phys_addr_t end, bool nomap, 342 phys_addr_t *res_base) 343 { 344 bool prev_bottom_up = memblock_bottom_up(); 345 bool bottom_up = false, top_down = false; 346 int ret, i; 347 348 for (i = 0; i < reserved_mem_count; i++) { 349 struct reserved_mem *rmem = &reserved_mem[i]; 350 351 /* Skip regions that were not reserved yet */ 352 if (rmem->size == 0) 353 continue; 354 355 /* 356 * If range starts next to an existing reservation, use bottom-up: 357 * |....RRRR................RRRRRRRR..............| 358 * --RRRR------ 359 */ 360 if (start >= rmem->base && start <= (rmem->base + rmem->size)) 361 bottom_up = true; 362 363 /* 364 * If range ends next to an existing reservation, use top-down: 365 * |....RRRR................RRRRRRRR..............| 366 * -------RRRR----- 367 */ 368 if (end >= rmem->base && end <= (rmem->base + rmem->size)) 369 top_down = true; 370 } 371 372 /* Change setting only if either bottom-up or top-down was selected */ 373 if (bottom_up != top_down) 374 memblock_set_bottom_up(bottom_up); 375 376 ret = early_init_dt_alloc_reserved_memory_arch(size, align, 377 start, end, nomap, res_base); 378 379 /* Restore old setting if needed */ 380 if (bottom_up != top_down) 381 memblock_set_bottom_up(prev_bottom_up); 382 383 return ret; 384 } 385 386 /* 387 * __reserved_mem_alloc_size() - allocate reserved memory described by 388 * 'size', 'alignment' and 'alloc-ranges' properties. 389 */ 390 static int __init __reserved_mem_alloc_size(unsigned long node, const char *uname) 391 { 392 int t_len = (dt_root_addr_cells + dt_root_size_cells) * sizeof(__be32); 393 phys_addr_t start = 0, end = 0; 394 phys_addr_t base = 0, align = 0, size; 395 int len; 396 const __be32 *prop; 397 bool nomap; 398 int ret; 399 400 prop = of_get_flat_dt_prop(node, "size", &len); 401 if (!prop) 402 return -EINVAL; 403 404 if (len != dt_root_size_cells * sizeof(__be32)) { 405 pr_err("invalid size property in '%s' node.\n", uname); 406 return -EINVAL; 407 } 408 size = dt_mem_next_cell(dt_root_size_cells, &prop); 409 410 prop = of_get_flat_dt_prop(node, "alignment", &len); 411 if (prop) { 412 if (len != dt_root_addr_cells * sizeof(__be32)) { 413 pr_err("invalid alignment property in '%s' node.\n", 414 uname); 415 return -EINVAL; 416 } 417 align = dt_mem_next_cell(dt_root_addr_cells, &prop); 418 } 419 420 nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL; 421 422 /* Need adjust the alignment to satisfy the CMA requirement */ 423 if (IS_ENABLED(CONFIG_CMA) 424 && of_flat_dt_is_compatible(node, "shared-dma-pool") 425 && of_get_flat_dt_prop(node, "reusable", NULL) 426 && !nomap) 427 align = max_t(phys_addr_t, align, CMA_MIN_ALIGNMENT_BYTES); 428 429 prop = of_get_flat_dt_prop(node, "alloc-ranges", &len); 430 if (prop) { 431 432 if (len % t_len != 0) { 433 pr_err("invalid alloc-ranges property in '%s', skipping node.\n", 434 uname); 435 return -EINVAL; 436 } 437 438 base = 0; 439 440 while (len > 0) { 441 start = dt_mem_next_cell(dt_root_addr_cells, &prop); 442 end = start + dt_mem_next_cell(dt_root_size_cells, 443 &prop); 444 445 ret = __reserved_mem_alloc_in_range(size, align, 446 start, end, nomap, &base); 447 if (ret == 0) { 448 pr_debug("allocated memory for '%s' node: base %pa, size %lu MiB\n", 449 uname, &base, 450 (unsigned long)(size / SZ_1M)); 451 break; 452 } 453 len -= t_len; 454 } 455 456 } else { 457 ret = early_init_dt_alloc_reserved_memory_arch(size, align, 458 0, 0, nomap, &base); 459 if (ret == 0) 460 pr_debug("allocated memory for '%s' node: base %pa, size %lu MiB\n", 461 uname, &base, (unsigned long)(size / SZ_1M)); 462 } 463 464 if (base == 0) { 465 pr_err("failed to allocate memory for node '%s': size %lu MiB\n", 466 uname, (unsigned long)(size / SZ_1M)); 467 return -ENOMEM; 468 } 469 470 /* Save region in the reserved_mem array */ 471 fdt_reserved_mem_save_node(node, uname, base, size); 472 return 0; 473 } 474 475 static const struct of_device_id __rmem_of_table_sentinel 476 __used __section("__reservedmem_of_table_end"); 477 478 /* 479 * __reserved_mem_init_node() - call region specific reserved memory init code 480 */ 481 static int __init __reserved_mem_init_node(struct reserved_mem *rmem) 482 { 483 extern const struct of_device_id __reservedmem_of_table[]; 484 const struct of_device_id *i; 485 int ret = -ENOENT; 486 487 for (i = __reservedmem_of_table; i < &__rmem_of_table_sentinel; i++) { 488 reservedmem_of_init_fn initfn = i->data; 489 const char *compat = i->compatible; 490 491 if (!of_flat_dt_is_compatible(rmem->fdt_node, compat)) 492 continue; 493 494 ret = initfn(rmem); 495 if (ret == 0) { 496 pr_info("initialized node %s, compatible id %s\n", 497 rmem->name, compat); 498 break; 499 } 500 } 501 return ret; 502 } 503 504 static int __init __rmem_cmp(const void *a, const void *b) 505 { 506 const struct reserved_mem *ra = a, *rb = b; 507 508 if (ra->base < rb->base) 509 return -1; 510 511 if (ra->base > rb->base) 512 return 1; 513 514 /* 515 * Put the dynamic allocations (address == 0, size == 0) before static 516 * allocations at address 0x0 so that overlap detection works 517 * correctly. 518 */ 519 if (ra->size < rb->size) 520 return -1; 521 if (ra->size > rb->size) 522 return 1; 523 524 if (ra->fdt_node < rb->fdt_node) 525 return -1; 526 if (ra->fdt_node > rb->fdt_node) 527 return 1; 528 529 return 0; 530 } 531 532 static void __init __rmem_check_for_overlap(void) 533 { 534 int i; 535 536 if (reserved_mem_count < 2) 537 return; 538 539 sort(reserved_mem, reserved_mem_count, sizeof(reserved_mem[0]), 540 __rmem_cmp, NULL); 541 for (i = 0; i < reserved_mem_count - 1; i++) { 542 struct reserved_mem *this, *next; 543 544 this = &reserved_mem[i]; 545 next = &reserved_mem[i + 1]; 546 547 if (this->base + this->size > next->base) { 548 phys_addr_t this_end, next_end; 549 550 this_end = this->base + this->size; 551 next_end = next->base + next->size; 552 pr_err("OVERLAP DETECTED!\n%s (%pa--%pa) overlaps with %s (%pa--%pa)\n", 553 this->name, &this->base, &this_end, 554 next->name, &next->base, &next_end); 555 } 556 } 557 } 558 559 /** 560 * fdt_init_reserved_mem_node() - Initialize a reserved memory region 561 * @rmem: reserved_mem struct of the memory region to be initialized. 562 * 563 * This function is used to call the region specific initialization 564 * function for a reserved memory region. 565 */ 566 static void __init fdt_init_reserved_mem_node(struct reserved_mem *rmem) 567 { 568 unsigned long node = rmem->fdt_node; 569 int err = 0; 570 bool nomap; 571 572 nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL; 573 574 err = __reserved_mem_init_node(rmem); 575 if (err != 0 && err != -ENOENT) { 576 pr_info("node %s compatible matching fail\n", rmem->name); 577 if (nomap) 578 memblock_clear_nomap(rmem->base, rmem->size); 579 else 580 memblock_phys_free(rmem->base, rmem->size); 581 } else { 582 phys_addr_t end = rmem->base + rmem->size - 1; 583 bool reusable = 584 (of_get_flat_dt_prop(node, "reusable", NULL)) != NULL; 585 586 pr_info("%pa..%pa (%lu KiB) %s %s %s\n", 587 &rmem->base, &end, (unsigned long)(rmem->size / SZ_1K), 588 nomap ? "nomap" : "map", 589 reusable ? "reusable" : "non-reusable", 590 rmem->name ? rmem->name : "unknown"); 591 } 592 } 593 594 struct rmem_assigned_device { 595 struct device *dev; 596 struct reserved_mem *rmem; 597 struct list_head list; 598 }; 599 600 static LIST_HEAD(of_rmem_assigned_device_list); 601 static DEFINE_MUTEX(of_rmem_assigned_device_mutex); 602 603 /** 604 * of_reserved_mem_device_init_by_idx() - assign reserved memory region to 605 * given device 606 * @dev: Pointer to the device to configure 607 * @np: Pointer to the device_node with 'reserved-memory' property 608 * @idx: Index of selected region 609 * 610 * This function assigns respective DMA-mapping operations based on reserved 611 * memory region specified by 'memory-region' property in @np node to the @dev 612 * device. When driver needs to use more than one reserved memory region, it 613 * should allocate child devices and initialize regions by name for each of 614 * child device. 615 * 616 * Returns error code or zero on success. 617 */ 618 int of_reserved_mem_device_init_by_idx(struct device *dev, 619 struct device_node *np, int idx) 620 { 621 struct rmem_assigned_device *rd; 622 struct device_node *target; 623 struct reserved_mem *rmem; 624 int ret; 625 626 if (!np || !dev) 627 return -EINVAL; 628 629 target = of_parse_phandle(np, "memory-region", idx); 630 if (!target) 631 return -ENODEV; 632 633 if (!of_device_is_available(target)) { 634 of_node_put(target); 635 return 0; 636 } 637 638 rmem = of_reserved_mem_lookup(target); 639 of_node_put(target); 640 641 if (!rmem || !rmem->ops || !rmem->ops->device_init) 642 return -EINVAL; 643 644 rd = kmalloc(sizeof(struct rmem_assigned_device), GFP_KERNEL); 645 if (!rd) 646 return -ENOMEM; 647 648 ret = rmem->ops->device_init(rmem, dev); 649 if (ret == 0) { 650 rd->dev = dev; 651 rd->rmem = rmem; 652 653 mutex_lock(&of_rmem_assigned_device_mutex); 654 list_add(&rd->list, &of_rmem_assigned_device_list); 655 mutex_unlock(&of_rmem_assigned_device_mutex); 656 657 dev_info(dev, "assigned reserved memory node %s\n", rmem->name); 658 } else { 659 kfree(rd); 660 } 661 662 return ret; 663 } 664 EXPORT_SYMBOL_GPL(of_reserved_mem_device_init_by_idx); 665 666 /** 667 * of_reserved_mem_device_init_by_name() - assign named reserved memory region 668 * to given device 669 * @dev: pointer to the device to configure 670 * @np: pointer to the device node with 'memory-region' property 671 * @name: name of the selected memory region 672 * 673 * Returns: 0 on success or a negative error-code on failure. 674 */ 675 int of_reserved_mem_device_init_by_name(struct device *dev, 676 struct device_node *np, 677 const char *name) 678 { 679 int idx = of_property_match_string(np, "memory-region-names", name); 680 681 return of_reserved_mem_device_init_by_idx(dev, np, idx); 682 } 683 EXPORT_SYMBOL_GPL(of_reserved_mem_device_init_by_name); 684 685 /** 686 * of_reserved_mem_device_release() - release reserved memory device structures 687 * @dev: Pointer to the device to deconfigure 688 * 689 * This function releases structures allocated for memory region handling for 690 * the given device. 691 */ 692 void of_reserved_mem_device_release(struct device *dev) 693 { 694 struct rmem_assigned_device *rd, *tmp; 695 LIST_HEAD(release_list); 696 697 mutex_lock(&of_rmem_assigned_device_mutex); 698 list_for_each_entry_safe(rd, tmp, &of_rmem_assigned_device_list, list) { 699 if (rd->dev == dev) 700 list_move_tail(&rd->list, &release_list); 701 } 702 mutex_unlock(&of_rmem_assigned_device_mutex); 703 704 list_for_each_entry_safe(rd, tmp, &release_list, list) { 705 if (rd->rmem && rd->rmem->ops && rd->rmem->ops->device_release) 706 rd->rmem->ops->device_release(rd->rmem, dev); 707 708 kfree(rd); 709 } 710 } 711 EXPORT_SYMBOL_GPL(of_reserved_mem_device_release); 712 713 /** 714 * of_reserved_mem_lookup() - acquire reserved_mem from a device node 715 * @np: node pointer of the desired reserved-memory region 716 * 717 * This function allows drivers to acquire a reference to the reserved_mem 718 * struct based on a device node handle. 719 * 720 * Returns a reserved_mem reference, or NULL on error. 721 */ 722 struct reserved_mem *of_reserved_mem_lookup(struct device_node *np) 723 { 724 const char *name; 725 int i; 726 727 if (!np->full_name) 728 return NULL; 729 730 name = kbasename(np->full_name); 731 for (i = 0; i < reserved_mem_count; i++) 732 if (!strcmp(reserved_mem[i].name, name)) 733 return &reserved_mem[i]; 734 735 return NULL; 736 } 737 EXPORT_SYMBOL_GPL(of_reserved_mem_lookup); 738