1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Device tree based initialization code for reserved memory. 4 * 5 * Copyright (c) 2013, 2015 The Linux Foundation. All Rights Reserved. 6 * Copyright (c) 2013,2014 Samsung Electronics Co., Ltd. 7 * http://www.samsung.com 8 * Author: Marek Szyprowski <m.szyprowski@samsung.com> 9 * Author: Josh Cartwright <joshc@codeaurora.org> 10 */ 11 12 #define pr_fmt(fmt) "OF: reserved mem: " fmt 13 14 #include <linux/err.h> 15 #include <linux/libfdt.h> 16 #include <linux/of.h> 17 #include <linux/of_fdt.h> 18 #include <linux/of_platform.h> 19 #include <linux/mm.h> 20 #include <linux/sizes.h> 21 #include <linux/of_reserved_mem.h> 22 #include <linux/sort.h> 23 #include <linux/slab.h> 24 #include <linux/memblock.h> 25 #include <linux/kmemleak.h> 26 #include <linux/cma.h> 27 28 #include "of_private.h" 29 30 #define MAX_RESERVED_REGIONS 64 31 static struct reserved_mem reserved_mem[MAX_RESERVED_REGIONS]; 32 static int reserved_mem_count; 33 34 static int __init early_init_dt_alloc_reserved_memory_arch(phys_addr_t size, 35 phys_addr_t align, phys_addr_t start, phys_addr_t end, bool nomap, 36 phys_addr_t *res_base) 37 { 38 phys_addr_t base; 39 int err = 0; 40 41 end = !end ? MEMBLOCK_ALLOC_ANYWHERE : end; 42 align = !align ? SMP_CACHE_BYTES : align; 43 base = memblock_phys_alloc_range(size, align, start, end); 44 if (!base) 45 return -ENOMEM; 46 47 *res_base = base; 48 if (nomap) { 49 err = memblock_mark_nomap(base, size); 50 if (err) 51 memblock_phys_free(base, size); 52 } 53 54 kmemleak_ignore_phys(base); 55 56 return err; 57 } 58 59 /* 60 * fdt_reserved_mem_save_node() - save fdt node for second pass initialization 61 */ 62 static void __init fdt_reserved_mem_save_node(unsigned long node, const char *uname, 63 phys_addr_t base, phys_addr_t size) 64 { 65 struct reserved_mem *rmem = &reserved_mem[reserved_mem_count]; 66 67 if (reserved_mem_count == ARRAY_SIZE(reserved_mem)) { 68 pr_err("not enough space for all defined regions.\n"); 69 return; 70 } 71 72 rmem->fdt_node = node; 73 rmem->name = uname; 74 rmem->base = base; 75 rmem->size = size; 76 77 reserved_mem_count++; 78 return; 79 } 80 81 static int __init early_init_dt_reserve_memory(phys_addr_t base, 82 phys_addr_t size, bool nomap) 83 { 84 if (nomap) { 85 /* 86 * If the memory is already reserved (by another region), we 87 * should not allow it to be marked nomap, but don't worry 88 * if the region isn't memory as it won't be mapped. 89 */ 90 if (memblock_overlaps_region(&memblock.memory, base, size) && 91 memblock_is_region_reserved(base, size)) 92 return -EBUSY; 93 94 return memblock_mark_nomap(base, size); 95 } 96 return memblock_reserve(base, size); 97 } 98 99 /* 100 * __reserved_mem_reserve_reg() - reserve all memory described in 'reg' property 101 */ 102 static int __init __reserved_mem_reserve_reg(unsigned long node, 103 const char *uname) 104 { 105 int t_len = (dt_root_addr_cells + dt_root_size_cells) * sizeof(__be32); 106 phys_addr_t base, size; 107 int len; 108 const __be32 *prop; 109 int first = 1; 110 bool nomap; 111 112 prop = of_get_flat_dt_prop(node, "reg", &len); 113 if (!prop) 114 return -ENOENT; 115 116 if (len && len % t_len != 0) { 117 pr_err("Reserved memory: invalid reg property in '%s', skipping node.\n", 118 uname); 119 return -EINVAL; 120 } 121 122 nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL; 123 124 while (len >= t_len) { 125 base = dt_mem_next_cell(dt_root_addr_cells, &prop); 126 size = dt_mem_next_cell(dt_root_size_cells, &prop); 127 128 if (size && 129 early_init_dt_reserve_memory(base, size, nomap) == 0) 130 pr_debug("Reserved memory: reserved region for node '%s': base %pa, size %lu MiB\n", 131 uname, &base, (unsigned long)(size / SZ_1M)); 132 else 133 pr_err("Reserved memory: failed to reserve memory for node '%s': base %pa, size %lu MiB\n", 134 uname, &base, (unsigned long)(size / SZ_1M)); 135 136 len -= t_len; 137 if (first) { 138 fdt_reserved_mem_save_node(node, uname, base, size); 139 first = 0; 140 } 141 } 142 return 0; 143 } 144 145 /* 146 * __reserved_mem_check_root() - check if #size-cells, #address-cells provided 147 * in /reserved-memory matches the values supported by the current implementation, 148 * also check if ranges property has been provided 149 */ 150 static int __init __reserved_mem_check_root(unsigned long node) 151 { 152 const __be32 *prop; 153 154 prop = of_get_flat_dt_prop(node, "#size-cells", NULL); 155 if (!prop || be32_to_cpup(prop) != dt_root_size_cells) 156 return -EINVAL; 157 158 prop = of_get_flat_dt_prop(node, "#address-cells", NULL); 159 if (!prop || be32_to_cpup(prop) != dt_root_addr_cells) 160 return -EINVAL; 161 162 prop = of_get_flat_dt_prop(node, "ranges", NULL); 163 if (!prop) 164 return -EINVAL; 165 return 0; 166 } 167 168 /* 169 * fdt_scan_reserved_mem() - scan a single FDT node for reserved memory 170 */ 171 int __init fdt_scan_reserved_mem(void) 172 { 173 int node, child; 174 const void *fdt = initial_boot_params; 175 176 node = fdt_path_offset(fdt, "/reserved-memory"); 177 if (node < 0) 178 return -ENODEV; 179 180 if (__reserved_mem_check_root(node) != 0) { 181 pr_err("Reserved memory: unsupported node format, ignoring\n"); 182 return -EINVAL; 183 } 184 185 fdt_for_each_subnode(child, fdt, node) { 186 const char *uname; 187 int err; 188 189 if (!of_fdt_device_is_available(fdt, child)) 190 continue; 191 192 uname = fdt_get_name(fdt, child, NULL); 193 194 err = __reserved_mem_reserve_reg(child, uname); 195 if (err == -ENOENT && of_get_flat_dt_prop(child, "size", NULL)) 196 fdt_reserved_mem_save_node(child, uname, 0, 0); 197 } 198 return 0; 199 } 200 201 /* 202 * __reserved_mem_alloc_in_range() - allocate reserved memory described with 203 * 'alloc-ranges'. Choose bottom-up/top-down depending on nearby existing 204 * reserved regions to keep the reserved memory contiguous if possible. 205 */ 206 static int __init __reserved_mem_alloc_in_range(phys_addr_t size, 207 phys_addr_t align, phys_addr_t start, phys_addr_t end, bool nomap, 208 phys_addr_t *res_base) 209 { 210 bool prev_bottom_up = memblock_bottom_up(); 211 bool bottom_up = false, top_down = false; 212 int ret, i; 213 214 for (i = 0; i < reserved_mem_count; i++) { 215 struct reserved_mem *rmem = &reserved_mem[i]; 216 217 /* Skip regions that were not reserved yet */ 218 if (rmem->size == 0) 219 continue; 220 221 /* 222 * If range starts next to an existing reservation, use bottom-up: 223 * |....RRRR................RRRRRRRR..............| 224 * --RRRR------ 225 */ 226 if (start >= rmem->base && start <= (rmem->base + rmem->size)) 227 bottom_up = true; 228 229 /* 230 * If range ends next to an existing reservation, use top-down: 231 * |....RRRR................RRRRRRRR..............| 232 * -------RRRR----- 233 */ 234 if (end >= rmem->base && end <= (rmem->base + rmem->size)) 235 top_down = true; 236 } 237 238 /* Change setting only if either bottom-up or top-down was selected */ 239 if (bottom_up != top_down) 240 memblock_set_bottom_up(bottom_up); 241 242 ret = early_init_dt_alloc_reserved_memory_arch(size, align, 243 start, end, nomap, res_base); 244 245 /* Restore old setting if needed */ 246 if (bottom_up != top_down) 247 memblock_set_bottom_up(prev_bottom_up); 248 249 return ret; 250 } 251 252 /* 253 * __reserved_mem_alloc_size() - allocate reserved memory described by 254 * 'size', 'alignment' and 'alloc-ranges' properties. 255 */ 256 static int __init __reserved_mem_alloc_size(unsigned long node, 257 const char *uname, phys_addr_t *res_base, phys_addr_t *res_size) 258 { 259 int t_len = (dt_root_addr_cells + dt_root_size_cells) * sizeof(__be32); 260 phys_addr_t start = 0, end = 0; 261 phys_addr_t base = 0, align = 0, size; 262 int len; 263 const __be32 *prop; 264 bool nomap; 265 int ret; 266 267 prop = of_get_flat_dt_prop(node, "size", &len); 268 if (!prop) 269 return -EINVAL; 270 271 if (len != dt_root_size_cells * sizeof(__be32)) { 272 pr_err("invalid size property in '%s' node.\n", uname); 273 return -EINVAL; 274 } 275 size = dt_mem_next_cell(dt_root_size_cells, &prop); 276 277 prop = of_get_flat_dt_prop(node, "alignment", &len); 278 if (prop) { 279 if (len != dt_root_addr_cells * sizeof(__be32)) { 280 pr_err("invalid alignment property in '%s' node.\n", 281 uname); 282 return -EINVAL; 283 } 284 align = dt_mem_next_cell(dt_root_addr_cells, &prop); 285 } 286 287 nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL; 288 289 /* Need adjust the alignment to satisfy the CMA requirement */ 290 if (IS_ENABLED(CONFIG_CMA) 291 && of_flat_dt_is_compatible(node, "shared-dma-pool") 292 && of_get_flat_dt_prop(node, "reusable", NULL) 293 && !nomap) 294 align = max_t(phys_addr_t, align, CMA_MIN_ALIGNMENT_BYTES); 295 296 prop = of_get_flat_dt_prop(node, "alloc-ranges", &len); 297 if (prop) { 298 299 if (len % t_len != 0) { 300 pr_err("invalid alloc-ranges property in '%s', skipping node.\n", 301 uname); 302 return -EINVAL; 303 } 304 305 base = 0; 306 307 while (len > 0) { 308 start = dt_mem_next_cell(dt_root_addr_cells, &prop); 309 end = start + dt_mem_next_cell(dt_root_size_cells, 310 &prop); 311 312 ret = __reserved_mem_alloc_in_range(size, align, 313 start, end, nomap, &base); 314 if (ret == 0) { 315 pr_debug("allocated memory for '%s' node: base %pa, size %lu MiB\n", 316 uname, &base, 317 (unsigned long)(size / SZ_1M)); 318 break; 319 } 320 len -= t_len; 321 } 322 323 } else { 324 ret = early_init_dt_alloc_reserved_memory_arch(size, align, 325 0, 0, nomap, &base); 326 if (ret == 0) 327 pr_debug("allocated memory for '%s' node: base %pa, size %lu MiB\n", 328 uname, &base, (unsigned long)(size / SZ_1M)); 329 } 330 331 if (base == 0) { 332 pr_err("failed to allocate memory for node '%s': size %lu MiB\n", 333 uname, (unsigned long)(size / SZ_1M)); 334 return -ENOMEM; 335 } 336 337 *res_base = base; 338 *res_size = size; 339 340 return 0; 341 } 342 343 static const struct of_device_id __rmem_of_table_sentinel 344 __used __section("__reservedmem_of_table_end"); 345 346 /* 347 * __reserved_mem_init_node() - call region specific reserved memory init code 348 */ 349 static int __init __reserved_mem_init_node(struct reserved_mem *rmem) 350 { 351 extern const struct of_device_id __reservedmem_of_table[]; 352 const struct of_device_id *i; 353 int ret = -ENOENT; 354 355 for (i = __reservedmem_of_table; i < &__rmem_of_table_sentinel; i++) { 356 reservedmem_of_init_fn initfn = i->data; 357 const char *compat = i->compatible; 358 359 if (!of_flat_dt_is_compatible(rmem->fdt_node, compat)) 360 continue; 361 362 ret = initfn(rmem); 363 if (ret == 0) { 364 pr_info("initialized node %s, compatible id %s\n", 365 rmem->name, compat); 366 break; 367 } 368 } 369 return ret; 370 } 371 372 static int __init __rmem_cmp(const void *a, const void *b) 373 { 374 const struct reserved_mem *ra = a, *rb = b; 375 376 if (ra->base < rb->base) 377 return -1; 378 379 if (ra->base > rb->base) 380 return 1; 381 382 /* 383 * Put the dynamic allocations (address == 0, size == 0) before static 384 * allocations at address 0x0 so that overlap detection works 385 * correctly. 386 */ 387 if (ra->size < rb->size) 388 return -1; 389 if (ra->size > rb->size) 390 return 1; 391 392 if (ra->fdt_node < rb->fdt_node) 393 return -1; 394 if (ra->fdt_node > rb->fdt_node) 395 return 1; 396 397 return 0; 398 } 399 400 static void __init __rmem_check_for_overlap(void) 401 { 402 int i; 403 404 if (reserved_mem_count < 2) 405 return; 406 407 sort(reserved_mem, reserved_mem_count, sizeof(reserved_mem[0]), 408 __rmem_cmp, NULL); 409 for (i = 0; i < reserved_mem_count - 1; i++) { 410 struct reserved_mem *this, *next; 411 412 this = &reserved_mem[i]; 413 next = &reserved_mem[i + 1]; 414 415 if (this->base + this->size > next->base) { 416 phys_addr_t this_end, next_end; 417 418 this_end = this->base + this->size; 419 next_end = next->base + next->size; 420 pr_err("OVERLAP DETECTED!\n%s (%pa--%pa) overlaps with %s (%pa--%pa)\n", 421 this->name, &this->base, &this_end, 422 next->name, &next->base, &next_end); 423 } 424 } 425 } 426 427 /** 428 * fdt_init_reserved_mem() - allocate and init all saved reserved memory regions 429 */ 430 void __init fdt_init_reserved_mem(void) 431 { 432 int i; 433 434 /* check for overlapping reserved regions */ 435 __rmem_check_for_overlap(); 436 437 for (i = 0; i < reserved_mem_count; i++) { 438 struct reserved_mem *rmem = &reserved_mem[i]; 439 unsigned long node = rmem->fdt_node; 440 int err = 0; 441 bool nomap; 442 443 nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL; 444 445 if (rmem->size == 0) 446 err = __reserved_mem_alloc_size(node, rmem->name, 447 &rmem->base, &rmem->size); 448 if (err == 0) { 449 err = __reserved_mem_init_node(rmem); 450 if (err != 0 && err != -ENOENT) { 451 pr_info("node %s compatible matching fail\n", 452 rmem->name); 453 if (nomap) 454 memblock_clear_nomap(rmem->base, rmem->size); 455 else 456 memblock_phys_free(rmem->base, 457 rmem->size); 458 } else { 459 phys_addr_t end = rmem->base + rmem->size - 1; 460 bool reusable = 461 (of_get_flat_dt_prop(node, "reusable", NULL)) != NULL; 462 463 pr_info("%pa..%pa (%lu KiB) %s %s %s\n", 464 &rmem->base, &end, (unsigned long)(rmem->size / SZ_1K), 465 nomap ? "nomap" : "map", 466 reusable ? "reusable" : "non-reusable", 467 rmem->name ? rmem->name : "unknown"); 468 } 469 } 470 } 471 } 472 473 struct rmem_assigned_device { 474 struct device *dev; 475 struct reserved_mem *rmem; 476 struct list_head list; 477 }; 478 479 static LIST_HEAD(of_rmem_assigned_device_list); 480 static DEFINE_MUTEX(of_rmem_assigned_device_mutex); 481 482 /** 483 * of_reserved_mem_device_init_by_idx() - assign reserved memory region to 484 * given device 485 * @dev: Pointer to the device to configure 486 * @np: Pointer to the device_node with 'reserved-memory' property 487 * @idx: Index of selected region 488 * 489 * This function assigns respective DMA-mapping operations based on reserved 490 * memory region specified by 'memory-region' property in @np node to the @dev 491 * device. When driver needs to use more than one reserved memory region, it 492 * should allocate child devices and initialize regions by name for each of 493 * child device. 494 * 495 * Returns error code or zero on success. 496 */ 497 int of_reserved_mem_device_init_by_idx(struct device *dev, 498 struct device_node *np, int idx) 499 { 500 struct rmem_assigned_device *rd; 501 struct device_node *target; 502 struct reserved_mem *rmem; 503 int ret; 504 505 if (!np || !dev) 506 return -EINVAL; 507 508 target = of_parse_phandle(np, "memory-region", idx); 509 if (!target) 510 return -ENODEV; 511 512 if (!of_device_is_available(target)) { 513 of_node_put(target); 514 return 0; 515 } 516 517 rmem = of_reserved_mem_lookup(target); 518 of_node_put(target); 519 520 if (!rmem || !rmem->ops || !rmem->ops->device_init) 521 return -EINVAL; 522 523 rd = kmalloc(sizeof(struct rmem_assigned_device), GFP_KERNEL); 524 if (!rd) 525 return -ENOMEM; 526 527 ret = rmem->ops->device_init(rmem, dev); 528 if (ret == 0) { 529 rd->dev = dev; 530 rd->rmem = rmem; 531 532 mutex_lock(&of_rmem_assigned_device_mutex); 533 list_add(&rd->list, &of_rmem_assigned_device_list); 534 mutex_unlock(&of_rmem_assigned_device_mutex); 535 536 dev_info(dev, "assigned reserved memory node %s\n", rmem->name); 537 } else { 538 kfree(rd); 539 } 540 541 return ret; 542 } 543 EXPORT_SYMBOL_GPL(of_reserved_mem_device_init_by_idx); 544 545 /** 546 * of_reserved_mem_device_init_by_name() - assign named reserved memory region 547 * to given device 548 * @dev: pointer to the device to configure 549 * @np: pointer to the device node with 'memory-region' property 550 * @name: name of the selected memory region 551 * 552 * Returns: 0 on success or a negative error-code on failure. 553 */ 554 int of_reserved_mem_device_init_by_name(struct device *dev, 555 struct device_node *np, 556 const char *name) 557 { 558 int idx = of_property_match_string(np, "memory-region-names", name); 559 560 return of_reserved_mem_device_init_by_idx(dev, np, idx); 561 } 562 EXPORT_SYMBOL_GPL(of_reserved_mem_device_init_by_name); 563 564 /** 565 * of_reserved_mem_device_release() - release reserved memory device structures 566 * @dev: Pointer to the device to deconfigure 567 * 568 * This function releases structures allocated for memory region handling for 569 * the given device. 570 */ 571 void of_reserved_mem_device_release(struct device *dev) 572 { 573 struct rmem_assigned_device *rd, *tmp; 574 LIST_HEAD(release_list); 575 576 mutex_lock(&of_rmem_assigned_device_mutex); 577 list_for_each_entry_safe(rd, tmp, &of_rmem_assigned_device_list, list) { 578 if (rd->dev == dev) 579 list_move_tail(&rd->list, &release_list); 580 } 581 mutex_unlock(&of_rmem_assigned_device_mutex); 582 583 list_for_each_entry_safe(rd, tmp, &release_list, list) { 584 if (rd->rmem && rd->rmem->ops && rd->rmem->ops->device_release) 585 rd->rmem->ops->device_release(rd->rmem, dev); 586 587 kfree(rd); 588 } 589 } 590 EXPORT_SYMBOL_GPL(of_reserved_mem_device_release); 591 592 /** 593 * of_reserved_mem_lookup() - acquire reserved_mem from a device node 594 * @np: node pointer of the desired reserved-memory region 595 * 596 * This function allows drivers to acquire a reference to the reserved_mem 597 * struct based on a device node handle. 598 * 599 * Returns a reserved_mem reference, or NULL on error. 600 */ 601 struct reserved_mem *of_reserved_mem_lookup(struct device_node *np) 602 { 603 const char *name; 604 int i; 605 606 if (!np->full_name) 607 return NULL; 608 609 name = kbasename(np->full_name); 610 for (i = 0; i < reserved_mem_count; i++) 611 if (!strcmp(reserved_mem[i].name, name)) 612 return &reserved_mem[i]; 613 614 return NULL; 615 } 616 EXPORT_SYMBOL_GPL(of_reserved_mem_lookup); 617