1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Device tree based initialization code for reserved memory. 4 * 5 * Copyright (c) 2013, 2015 The Linux Foundation. All Rights Reserved. 6 * Copyright (c) 2013,2014 Samsung Electronics Co., Ltd. 7 * http://www.samsung.com 8 * Author: Marek Szyprowski <m.szyprowski@samsung.com> 9 * Author: Josh Cartwright <joshc@codeaurora.org> 10 */ 11 12 #define pr_fmt(fmt) "OF: reserved mem: " fmt 13 14 #include <linux/err.h> 15 #include <linux/ioport.h> 16 #include <linux/libfdt.h> 17 #include <linux/of.h> 18 #include <linux/of_fdt.h> 19 #include <linux/of_platform.h> 20 #include <linux/mm.h> 21 #include <linux/sizes.h> 22 #include <linux/of_reserved_mem.h> 23 #include <linux/sort.h> 24 #include <linux/slab.h> 25 #include <linux/memblock.h> 26 #include <linux/kmemleak.h> 27 #include <linux/cma.h> 28 #include <linux/dma-map-ops.h> 29 30 #include "of_private.h" 31 32 static struct reserved_mem reserved_mem_array[MAX_RESERVED_REGIONS] __initdata; 33 static struct reserved_mem *reserved_mem __refdata = reserved_mem_array; 34 static int total_reserved_mem_cnt = MAX_RESERVED_REGIONS; 35 static int reserved_mem_count; 36 37 static int __init early_init_dt_alloc_reserved_memory_arch(phys_addr_t size, 38 phys_addr_t align, phys_addr_t start, phys_addr_t end, bool nomap, 39 phys_addr_t *res_base) 40 { 41 phys_addr_t base; 42 int err = 0; 43 44 end = !end ? MEMBLOCK_ALLOC_ANYWHERE : end; 45 align = !align ? SMP_CACHE_BYTES : align; 46 base = memblock_phys_alloc_range(size, align, start, end); 47 if (!base) 48 return -ENOMEM; 49 50 *res_base = base; 51 if (nomap) { 52 err = memblock_mark_nomap(base, size); 53 if (err) 54 memblock_phys_free(base, size); 55 } 56 57 if (!err) 58 kmemleak_ignore_phys(base); 59 60 return err; 61 } 62 63 /* 64 * alloc_reserved_mem_array() - allocate memory for the reserved_mem 65 * array using memblock 66 * 67 * This function is used to allocate memory for the reserved_mem 68 * array according to the total number of reserved memory regions 69 * defined in the DT. 70 * After the new array is allocated, the information stored in 71 * the initial static array is copied over to this new array and 72 * the new array is used from this point on. 73 */ 74 static void __init alloc_reserved_mem_array(void) 75 { 76 struct reserved_mem *new_array; 77 size_t alloc_size, copy_size, memset_size; 78 79 alloc_size = array_size(total_reserved_mem_cnt, sizeof(*new_array)); 80 if (alloc_size == SIZE_MAX) { 81 pr_err("Failed to allocate memory for reserved_mem array with err: %d", -EOVERFLOW); 82 return; 83 } 84 85 new_array = memblock_alloc(alloc_size, SMP_CACHE_BYTES); 86 if (!new_array) { 87 pr_err("Failed to allocate memory for reserved_mem array with err: %d", -ENOMEM); 88 return; 89 } 90 91 copy_size = array_size(reserved_mem_count, sizeof(*new_array)); 92 if (copy_size == SIZE_MAX) { 93 memblock_free(new_array, alloc_size); 94 total_reserved_mem_cnt = MAX_RESERVED_REGIONS; 95 pr_err("Failed to allocate memory for reserved_mem array with err: %d", -EOVERFLOW); 96 return; 97 } 98 99 memset_size = alloc_size - copy_size; 100 101 memcpy(new_array, reserved_mem, copy_size); 102 memset(new_array + reserved_mem_count, 0, memset_size); 103 104 reserved_mem = new_array; 105 } 106 107 static void __init fdt_init_reserved_mem_node(struct reserved_mem *rmem); 108 /* 109 * fdt_reserved_mem_save_node() - save fdt node for second pass initialization 110 */ 111 static void __init fdt_reserved_mem_save_node(unsigned long node, const char *uname, 112 phys_addr_t base, phys_addr_t size) 113 { 114 struct reserved_mem *rmem = &reserved_mem[reserved_mem_count]; 115 116 if (reserved_mem_count == total_reserved_mem_cnt) { 117 pr_err("not enough space for all defined regions.\n"); 118 return; 119 } 120 121 rmem->fdt_node = node; 122 rmem->name = uname; 123 rmem->base = base; 124 rmem->size = size; 125 126 /* Call the region specific initialization function */ 127 fdt_init_reserved_mem_node(rmem); 128 129 reserved_mem_count++; 130 return; 131 } 132 133 static int __init early_init_dt_reserve_memory(phys_addr_t base, 134 phys_addr_t size, bool nomap) 135 { 136 if (nomap) { 137 /* 138 * If the memory is already reserved (by another region), we 139 * should not allow it to be marked nomap, but don't worry 140 * if the region isn't memory as it won't be mapped. 141 */ 142 if (memblock_overlaps_region(&memblock.memory, base, size) && 143 memblock_is_region_reserved(base, size)) 144 return -EBUSY; 145 146 return memblock_mark_nomap(base, size); 147 } 148 return memblock_reserve(base, size); 149 } 150 151 /* 152 * __reserved_mem_reserve_reg() - reserve all memory described in 'reg' property 153 */ 154 static int __init __reserved_mem_reserve_reg(unsigned long node, 155 const char *uname) 156 { 157 int t_len = (dt_root_addr_cells + dt_root_size_cells) * sizeof(__be32); 158 phys_addr_t base, size; 159 int len; 160 const __be32 *prop; 161 bool nomap; 162 163 prop = of_get_flat_dt_prop(node, "reg", &len); 164 if (!prop) 165 return -ENOENT; 166 167 if (len && len % t_len != 0) { 168 pr_err("Reserved memory: invalid reg property in '%s', skipping node.\n", 169 uname); 170 return -EINVAL; 171 } 172 173 nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL; 174 175 while (len >= t_len) { 176 base = dt_mem_next_cell(dt_root_addr_cells, &prop); 177 size = dt_mem_next_cell(dt_root_size_cells, &prop); 178 179 if (size && early_init_dt_reserve_memory(base, size, nomap) == 0) { 180 /* Architecture specific contiguous memory fixup. */ 181 if (of_flat_dt_is_compatible(node, "shared-dma-pool") && 182 of_get_flat_dt_prop(node, "reusable", NULL)) 183 dma_contiguous_early_fixup(base, size); 184 pr_debug("Reserved memory: reserved region for node '%s': base %pa, size %lu MiB\n", 185 uname, &base, (unsigned long)(size / SZ_1M)); 186 } else { 187 pr_err("Reserved memory: failed to reserve memory for node '%s': base %pa, size %lu MiB\n", 188 uname, &base, (unsigned long)(size / SZ_1M)); 189 } 190 191 len -= t_len; 192 } 193 return 0; 194 } 195 196 /* 197 * __reserved_mem_check_root() - check if #size-cells, #address-cells provided 198 * in /reserved-memory matches the values supported by the current implementation, 199 * also check if ranges property has been provided 200 */ 201 static int __init __reserved_mem_check_root(unsigned long node) 202 { 203 const __be32 *prop; 204 205 prop = of_get_flat_dt_prop(node, "#size-cells", NULL); 206 if (!prop || be32_to_cpup(prop) != dt_root_size_cells) 207 return -EINVAL; 208 209 prop = of_get_flat_dt_prop(node, "#address-cells", NULL); 210 if (!prop || be32_to_cpup(prop) != dt_root_addr_cells) 211 return -EINVAL; 212 213 prop = of_get_flat_dt_prop(node, "ranges", NULL); 214 if (!prop) 215 return -EINVAL; 216 return 0; 217 } 218 219 static void __init __rmem_check_for_overlap(void); 220 221 /** 222 * fdt_scan_reserved_mem_reg_nodes() - Store info for the "reg" defined 223 * reserved memory regions. 224 * 225 * This function is used to scan through the DT and store the 226 * information for the reserved memory regions that are defined using 227 * the "reg" property. The region node number, name, base address, and 228 * size are all stored in the reserved_mem array by calling the 229 * fdt_reserved_mem_save_node() function. 230 */ 231 void __init fdt_scan_reserved_mem_reg_nodes(void) 232 { 233 int t_len = (dt_root_addr_cells + dt_root_size_cells) * sizeof(__be32); 234 const void *fdt = initial_boot_params; 235 phys_addr_t base, size; 236 const __be32 *prop; 237 int node, child; 238 int len; 239 240 if (!fdt) 241 return; 242 243 node = fdt_path_offset(fdt, "/reserved-memory"); 244 if (node < 0) { 245 pr_info("Reserved memory: No reserved-memory node in the DT\n"); 246 return; 247 } 248 249 /* Attempt dynamic allocation of a new reserved_mem array */ 250 alloc_reserved_mem_array(); 251 252 if (__reserved_mem_check_root(node)) { 253 pr_err("Reserved memory: unsupported node format, ignoring\n"); 254 return; 255 } 256 257 fdt_for_each_subnode(child, fdt, node) { 258 const char *uname; 259 260 prop = of_get_flat_dt_prop(child, "reg", &len); 261 if (!prop) 262 continue; 263 if (!of_fdt_device_is_available(fdt, child)) 264 continue; 265 266 uname = fdt_get_name(fdt, child, NULL); 267 if (len && len % t_len != 0) { 268 pr_err("Reserved memory: invalid reg property in '%s', skipping node.\n", 269 uname); 270 continue; 271 } 272 273 if (len > t_len) 274 pr_warn("%s() ignores %d regions in node '%s'\n", 275 __func__, len / t_len - 1, uname); 276 277 base = dt_mem_next_cell(dt_root_addr_cells, &prop); 278 size = dt_mem_next_cell(dt_root_size_cells, &prop); 279 280 if (size) 281 fdt_reserved_mem_save_node(child, uname, base, size); 282 } 283 284 /* check for overlapping reserved regions */ 285 __rmem_check_for_overlap(); 286 } 287 288 static int __init __reserved_mem_alloc_size(unsigned long node, const char *uname); 289 290 /* 291 * fdt_scan_reserved_mem() - scan a single FDT node for reserved memory 292 */ 293 int __init fdt_scan_reserved_mem(void) 294 { 295 int node, child; 296 int dynamic_nodes_cnt = 0, count = 0; 297 int dynamic_nodes[MAX_RESERVED_REGIONS]; 298 const void *fdt = initial_boot_params; 299 300 node = fdt_path_offset(fdt, "/reserved-memory"); 301 if (node < 0) 302 return -ENODEV; 303 304 if (__reserved_mem_check_root(node) != 0) { 305 pr_err("Reserved memory: unsupported node format, ignoring\n"); 306 return -EINVAL; 307 } 308 309 fdt_for_each_subnode(child, fdt, node) { 310 const char *uname; 311 int err; 312 313 if (!of_fdt_device_is_available(fdt, child)) 314 continue; 315 316 uname = fdt_get_name(fdt, child, NULL); 317 318 err = __reserved_mem_reserve_reg(child, uname); 319 if (!err) 320 count++; 321 /* 322 * Save the nodes for the dynamically-placed regions 323 * into an array which will be used for allocation right 324 * after all the statically-placed regions are reserved 325 * or marked as no-map. This is done to avoid dynamically 326 * allocating from one of the statically-placed regions. 327 */ 328 if (err == -ENOENT && of_get_flat_dt_prop(child, "size", NULL)) { 329 dynamic_nodes[dynamic_nodes_cnt] = child; 330 dynamic_nodes_cnt++; 331 } 332 } 333 for (int i = 0; i < dynamic_nodes_cnt; i++) { 334 const char *uname; 335 int err; 336 337 child = dynamic_nodes[i]; 338 uname = fdt_get_name(fdt, child, NULL); 339 err = __reserved_mem_alloc_size(child, uname); 340 if (!err) 341 count++; 342 } 343 total_reserved_mem_cnt = count; 344 return 0; 345 } 346 347 /* 348 * __reserved_mem_alloc_in_range() - allocate reserved memory described with 349 * 'alloc-ranges'. Choose bottom-up/top-down depending on nearby existing 350 * reserved regions to keep the reserved memory contiguous if possible. 351 */ 352 static int __init __reserved_mem_alloc_in_range(phys_addr_t size, 353 phys_addr_t align, phys_addr_t start, phys_addr_t end, bool nomap, 354 phys_addr_t *res_base) 355 { 356 bool prev_bottom_up = memblock_bottom_up(); 357 bool bottom_up = false, top_down = false; 358 int ret, i; 359 360 for (i = 0; i < reserved_mem_count; i++) { 361 struct reserved_mem *rmem = &reserved_mem[i]; 362 363 /* Skip regions that were not reserved yet */ 364 if (rmem->size == 0) 365 continue; 366 367 /* 368 * If range starts next to an existing reservation, use bottom-up: 369 * |....RRRR................RRRRRRRR..............| 370 * --RRRR------ 371 */ 372 if (start >= rmem->base && start <= (rmem->base + rmem->size)) 373 bottom_up = true; 374 375 /* 376 * If range ends next to an existing reservation, use top-down: 377 * |....RRRR................RRRRRRRR..............| 378 * -------RRRR----- 379 */ 380 if (end >= rmem->base && end <= (rmem->base + rmem->size)) 381 top_down = true; 382 } 383 384 /* Change setting only if either bottom-up or top-down was selected */ 385 if (bottom_up != top_down) 386 memblock_set_bottom_up(bottom_up); 387 388 ret = early_init_dt_alloc_reserved_memory_arch(size, align, 389 start, end, nomap, res_base); 390 391 /* Restore old setting if needed */ 392 if (bottom_up != top_down) 393 memblock_set_bottom_up(prev_bottom_up); 394 395 return ret; 396 } 397 398 /* 399 * __reserved_mem_alloc_size() - allocate reserved memory described by 400 * 'size', 'alignment' and 'alloc-ranges' properties. 401 */ 402 static int __init __reserved_mem_alloc_size(unsigned long node, const char *uname) 403 { 404 int t_len = (dt_root_addr_cells + dt_root_size_cells) * sizeof(__be32); 405 phys_addr_t start = 0, end = 0; 406 phys_addr_t base = 0, align = 0, size; 407 int len; 408 const __be32 *prop; 409 bool nomap; 410 int ret; 411 412 prop = of_get_flat_dt_prop(node, "size", &len); 413 if (!prop) 414 return -EINVAL; 415 416 if (len != dt_root_size_cells * sizeof(__be32)) { 417 pr_err("invalid size property in '%s' node.\n", uname); 418 return -EINVAL; 419 } 420 size = dt_mem_next_cell(dt_root_size_cells, &prop); 421 422 prop = of_get_flat_dt_prop(node, "alignment", &len); 423 if (prop) { 424 if (len != dt_root_addr_cells * sizeof(__be32)) { 425 pr_err("invalid alignment property in '%s' node.\n", 426 uname); 427 return -EINVAL; 428 } 429 align = dt_mem_next_cell(dt_root_addr_cells, &prop); 430 } 431 432 nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL; 433 434 /* Need adjust the alignment to satisfy the CMA requirement */ 435 if (IS_ENABLED(CONFIG_CMA) 436 && of_flat_dt_is_compatible(node, "shared-dma-pool") 437 && of_get_flat_dt_prop(node, "reusable", NULL) 438 && !nomap) 439 align = max_t(phys_addr_t, align, CMA_MIN_ALIGNMENT_BYTES); 440 441 prop = of_get_flat_dt_prop(node, "alloc-ranges", &len); 442 if (prop) { 443 444 if (len % t_len != 0) { 445 pr_err("invalid alloc-ranges property in '%s', skipping node.\n", 446 uname); 447 return -EINVAL; 448 } 449 450 while (len > 0) { 451 start = dt_mem_next_cell(dt_root_addr_cells, &prop); 452 end = start + dt_mem_next_cell(dt_root_size_cells, 453 &prop); 454 455 base = 0; 456 ret = __reserved_mem_alloc_in_range(size, align, 457 start, end, nomap, &base); 458 if (ret == 0) { 459 pr_debug("allocated memory for '%s' node: base %pa, size %lu MiB\n", 460 uname, &base, 461 (unsigned long)(size / SZ_1M)); 462 break; 463 } 464 len -= t_len; 465 } 466 467 } else { 468 ret = early_init_dt_alloc_reserved_memory_arch(size, align, 469 0, 0, nomap, &base); 470 if (ret == 0) 471 pr_debug("allocated memory for '%s' node: base %pa, size %lu MiB\n", 472 uname, &base, (unsigned long)(size / SZ_1M)); 473 } 474 475 if (base == 0) { 476 pr_err("failed to allocate memory for node '%s': size %lu MiB\n", 477 uname, (unsigned long)(size / SZ_1M)); 478 return -ENOMEM; 479 } 480 /* Architecture specific contiguous memory fixup. */ 481 if (of_flat_dt_is_compatible(node, "shared-dma-pool") && 482 of_get_flat_dt_prop(node, "reusable", NULL)) 483 dma_contiguous_early_fixup(base, size); 484 /* Save region in the reserved_mem array */ 485 fdt_reserved_mem_save_node(node, uname, base, size); 486 return 0; 487 } 488 489 static const struct of_device_id __rmem_of_table_sentinel 490 __used __section("__reservedmem_of_table_end"); 491 492 /* 493 * __reserved_mem_init_node() - call region specific reserved memory init code 494 */ 495 static int __init __reserved_mem_init_node(struct reserved_mem *rmem) 496 { 497 extern const struct of_device_id __reservedmem_of_table[]; 498 const struct of_device_id *i; 499 int ret = -ENOENT; 500 501 for (i = __reservedmem_of_table; i < &__rmem_of_table_sentinel; i++) { 502 reservedmem_of_init_fn initfn = i->data; 503 const char *compat = i->compatible; 504 505 if (!of_flat_dt_is_compatible(rmem->fdt_node, compat)) 506 continue; 507 508 ret = initfn(rmem); 509 if (ret == 0) { 510 pr_info("initialized node %s, compatible id %s\n", 511 rmem->name, compat); 512 break; 513 } 514 } 515 return ret; 516 } 517 518 static int __init __rmem_cmp(const void *a, const void *b) 519 { 520 const struct reserved_mem *ra = a, *rb = b; 521 522 if (ra->base < rb->base) 523 return -1; 524 525 if (ra->base > rb->base) 526 return 1; 527 528 /* 529 * Put the dynamic allocations (address == 0, size == 0) before static 530 * allocations at address 0x0 so that overlap detection works 531 * correctly. 532 */ 533 if (ra->size < rb->size) 534 return -1; 535 if (ra->size > rb->size) 536 return 1; 537 538 if (ra->fdt_node < rb->fdt_node) 539 return -1; 540 if (ra->fdt_node > rb->fdt_node) 541 return 1; 542 543 return 0; 544 } 545 546 static void __init __rmem_check_for_overlap(void) 547 { 548 int i; 549 550 if (reserved_mem_count < 2) 551 return; 552 553 sort(reserved_mem, reserved_mem_count, sizeof(reserved_mem[0]), 554 __rmem_cmp, NULL); 555 for (i = 0; i < reserved_mem_count - 1; i++) { 556 struct reserved_mem *this, *next; 557 558 this = &reserved_mem[i]; 559 next = &reserved_mem[i + 1]; 560 561 if (this->base + this->size > next->base) { 562 phys_addr_t this_end, next_end; 563 564 this_end = this->base + this->size; 565 next_end = next->base + next->size; 566 pr_err("OVERLAP DETECTED!\n%s (%pa--%pa) overlaps with %s (%pa--%pa)\n", 567 this->name, &this->base, &this_end, 568 next->name, &next->base, &next_end); 569 } 570 } 571 } 572 573 /** 574 * fdt_init_reserved_mem_node() - Initialize a reserved memory region 575 * @rmem: reserved_mem struct of the memory region to be initialized. 576 * 577 * This function is used to call the region specific initialization 578 * function for a reserved memory region. 579 */ 580 static void __init fdt_init_reserved_mem_node(struct reserved_mem *rmem) 581 { 582 unsigned long node = rmem->fdt_node; 583 int err = 0; 584 bool nomap; 585 586 nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL; 587 588 err = __reserved_mem_init_node(rmem); 589 if (err != 0 && err != -ENOENT) { 590 pr_info("node %s compatible matching fail\n", rmem->name); 591 if (nomap) 592 memblock_clear_nomap(rmem->base, rmem->size); 593 else 594 memblock_phys_free(rmem->base, rmem->size); 595 } else { 596 phys_addr_t end = rmem->base + rmem->size - 1; 597 bool reusable = 598 (of_get_flat_dt_prop(node, "reusable", NULL)) != NULL; 599 600 pr_info("%pa..%pa (%lu KiB) %s %s %s\n", 601 &rmem->base, &end, (unsigned long)(rmem->size / SZ_1K), 602 nomap ? "nomap" : "map", 603 reusable ? "reusable" : "non-reusable", 604 rmem->name ? rmem->name : "unknown"); 605 } 606 } 607 608 struct rmem_assigned_device { 609 struct device *dev; 610 struct reserved_mem *rmem; 611 struct list_head list; 612 }; 613 614 static LIST_HEAD(of_rmem_assigned_device_list); 615 static DEFINE_MUTEX(of_rmem_assigned_device_mutex); 616 617 /** 618 * of_reserved_mem_device_init_by_idx() - assign reserved memory region to 619 * given device 620 * @dev: Pointer to the device to configure 621 * @np: Pointer to the device_node with 'reserved-memory' property 622 * @idx: Index of selected region 623 * 624 * This function assigns respective DMA-mapping operations based on reserved 625 * memory region specified by 'memory-region' property in @np node to the @dev 626 * device. When driver needs to use more than one reserved memory region, it 627 * should allocate child devices and initialize regions by name for each of 628 * child device. 629 * 630 * Returns error code or zero on success. 631 */ 632 int of_reserved_mem_device_init_by_idx(struct device *dev, 633 struct device_node *np, int idx) 634 { 635 struct rmem_assigned_device *rd; 636 struct device_node *target; 637 struct reserved_mem *rmem; 638 int ret; 639 640 if (!np || !dev) 641 return -EINVAL; 642 643 target = of_parse_phandle(np, "memory-region", idx); 644 if (!target) 645 return -ENODEV; 646 647 if (!of_device_is_available(target)) { 648 of_node_put(target); 649 return 0; 650 } 651 652 rmem = of_reserved_mem_lookup(target); 653 of_node_put(target); 654 655 if (!rmem || !rmem->ops || !rmem->ops->device_init) 656 return -EINVAL; 657 658 rd = kmalloc(sizeof(struct rmem_assigned_device), GFP_KERNEL); 659 if (!rd) 660 return -ENOMEM; 661 662 ret = rmem->ops->device_init(rmem, dev); 663 if (ret == 0) { 664 rd->dev = dev; 665 rd->rmem = rmem; 666 667 mutex_lock(&of_rmem_assigned_device_mutex); 668 list_add(&rd->list, &of_rmem_assigned_device_list); 669 mutex_unlock(&of_rmem_assigned_device_mutex); 670 671 dev_info(dev, "assigned reserved memory node %s\n", rmem->name); 672 } else { 673 kfree(rd); 674 } 675 676 return ret; 677 } 678 EXPORT_SYMBOL_GPL(of_reserved_mem_device_init_by_idx); 679 680 /** 681 * of_reserved_mem_device_init_by_name() - assign named reserved memory region 682 * to given device 683 * @dev: pointer to the device to configure 684 * @np: pointer to the device node with 'memory-region' property 685 * @name: name of the selected memory region 686 * 687 * Returns: 0 on success or a negative error-code on failure. 688 */ 689 int of_reserved_mem_device_init_by_name(struct device *dev, 690 struct device_node *np, 691 const char *name) 692 { 693 int idx = of_property_match_string(np, "memory-region-names", name); 694 695 return of_reserved_mem_device_init_by_idx(dev, np, idx); 696 } 697 EXPORT_SYMBOL_GPL(of_reserved_mem_device_init_by_name); 698 699 /** 700 * of_reserved_mem_device_release() - release reserved memory device structures 701 * @dev: Pointer to the device to deconfigure 702 * 703 * This function releases structures allocated for memory region handling for 704 * the given device. 705 */ 706 void of_reserved_mem_device_release(struct device *dev) 707 { 708 struct rmem_assigned_device *rd, *tmp; 709 LIST_HEAD(release_list); 710 711 mutex_lock(&of_rmem_assigned_device_mutex); 712 list_for_each_entry_safe(rd, tmp, &of_rmem_assigned_device_list, list) { 713 if (rd->dev == dev) 714 list_move_tail(&rd->list, &release_list); 715 } 716 mutex_unlock(&of_rmem_assigned_device_mutex); 717 718 list_for_each_entry_safe(rd, tmp, &release_list, list) { 719 if (rd->rmem && rd->rmem->ops && rd->rmem->ops->device_release) 720 rd->rmem->ops->device_release(rd->rmem, dev); 721 722 kfree(rd); 723 } 724 } 725 EXPORT_SYMBOL_GPL(of_reserved_mem_device_release); 726 727 /** 728 * of_reserved_mem_lookup() - acquire reserved_mem from a device node 729 * @np: node pointer of the desired reserved-memory region 730 * 731 * This function allows drivers to acquire a reference to the reserved_mem 732 * struct based on a device node handle. 733 * 734 * Returns a reserved_mem reference, or NULL on error. 735 */ 736 struct reserved_mem *of_reserved_mem_lookup(struct device_node *np) 737 { 738 const char *name; 739 int i; 740 741 if (!np->full_name) 742 return NULL; 743 744 name = kbasename(np->full_name); 745 for (i = 0; i < reserved_mem_count; i++) 746 if (!strcmp(reserved_mem[i].name, name)) 747 return &reserved_mem[i]; 748 749 return NULL; 750 } 751 EXPORT_SYMBOL_GPL(of_reserved_mem_lookup); 752 753 /** 754 * of_reserved_mem_region_to_resource() - Get a reserved memory region as a resource 755 * @np: node containing 'memory-region' property 756 * @idx: index of 'memory-region' property to lookup 757 * @res: Pointer to a struct resource to fill in with reserved region 758 * 759 * This function allows drivers to lookup a node's 'memory-region' property 760 * entries by index and return a struct resource for the entry. 761 * 762 * Returns 0 on success with @res filled in. Returns -ENODEV if 'memory-region' 763 * is missing or unavailable, -EINVAL for any other error. 764 */ 765 int of_reserved_mem_region_to_resource(const struct device_node *np, 766 unsigned int idx, struct resource *res) 767 { 768 struct reserved_mem *rmem; 769 770 if (!np) 771 return -EINVAL; 772 773 struct device_node __free(device_node) *target = of_parse_phandle(np, "memory-region", idx); 774 if (!target || !of_device_is_available(target)) 775 return -ENODEV; 776 777 rmem = of_reserved_mem_lookup(target); 778 if (!rmem) 779 return -EINVAL; 780 781 resource_set_range(res, rmem->base, rmem->size); 782 res->flags = IORESOURCE_MEM; 783 res->name = rmem->name; 784 return 0; 785 } 786 EXPORT_SYMBOL_GPL(of_reserved_mem_region_to_resource); 787 788 /** 789 * of_reserved_mem_region_to_resource_byname() - Get a reserved memory region as a resource 790 * @np: node containing 'memory-region' property 791 * @name: name of 'memory-region' property entry to lookup 792 * @res: Pointer to a struct resource to fill in with reserved region 793 * 794 * This function allows drivers to lookup a node's 'memory-region' property 795 * entries by name and return a struct resource for the entry. 796 * 797 * Returns 0 on success with @res filled in, or a negative error-code on 798 * failure. 799 */ 800 int of_reserved_mem_region_to_resource_byname(const struct device_node *np, 801 const char *name, 802 struct resource *res) 803 { 804 int idx; 805 806 if (!name) 807 return -EINVAL; 808 809 idx = of_property_match_string(np, "memory-region-names", name); 810 if (idx < 0) 811 return idx; 812 813 return of_reserved_mem_region_to_resource(np, idx, res); 814 } 815 EXPORT_SYMBOL_GPL(of_reserved_mem_region_to_resource_byname); 816 817 /** 818 * of_reserved_mem_region_count() - Return the number of 'memory-region' entries 819 * @np: node containing 'memory-region' property 820 * 821 * This function allows drivers to retrieve the number of entries for a node's 822 * 'memory-region' property. 823 * 824 * Returns the number of entries on success, or negative error code on a 825 * malformed property. 826 */ 827 int of_reserved_mem_region_count(const struct device_node *np) 828 { 829 return of_count_phandle_with_args(np, "memory-region", NULL); 830 } 831 EXPORT_SYMBOL_GPL(of_reserved_mem_region_count); 832