1 /* 2 * Device tree based initialization code for reserved memory. 3 * 4 * Copyright (c) 2013, 2015 The Linux Foundation. All Rights Reserved. 5 * Copyright (c) 2013,2014 Samsung Electronics Co., Ltd. 6 * http://www.samsung.com 7 * Author: Marek Szyprowski <m.szyprowski@samsung.com> 8 * Author: Josh Cartwright <joshc@codeaurora.org> 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU General Public License as 12 * published by the Free Software Foundation; either version 2 of the 13 * License or (at your optional) any later version of the license. 14 */ 15 16 #include <linux/err.h> 17 #include <linux/of.h> 18 #include <linux/of_fdt.h> 19 #include <linux/of_platform.h> 20 #include <linux/mm.h> 21 #include <linux/sizes.h> 22 #include <linux/of_reserved_mem.h> 23 #include <linux/sort.h> 24 25 #define MAX_RESERVED_REGIONS 16 26 static struct reserved_mem reserved_mem[MAX_RESERVED_REGIONS]; 27 static int reserved_mem_count; 28 29 #if defined(CONFIG_HAVE_MEMBLOCK) 30 #include <linux/memblock.h> 31 int __init __weak early_init_dt_alloc_reserved_memory_arch(phys_addr_t size, 32 phys_addr_t align, phys_addr_t start, phys_addr_t end, bool nomap, 33 phys_addr_t *res_base) 34 { 35 /* 36 * We use __memblock_alloc_base() because memblock_alloc_base() 37 * panic()s on allocation failure. 38 */ 39 phys_addr_t base = __memblock_alloc_base(size, align, end); 40 if (!base) 41 return -ENOMEM; 42 43 /* 44 * Check if the allocated region fits in to start..end window 45 */ 46 if (base < start) { 47 memblock_free(base, size); 48 return -ENOMEM; 49 } 50 51 *res_base = base; 52 if (nomap) 53 return memblock_remove(base, size); 54 return 0; 55 } 56 #else 57 int __init __weak early_init_dt_alloc_reserved_memory_arch(phys_addr_t size, 58 phys_addr_t align, phys_addr_t start, phys_addr_t end, bool nomap, 59 phys_addr_t *res_base) 60 { 61 pr_err("Reserved memory not supported, ignoring region 0x%llx%s\n", 62 size, nomap ? " (nomap)" : ""); 63 return -ENOSYS; 64 } 65 #endif 66 67 /** 68 * res_mem_save_node() - save fdt node for second pass initialization 69 */ 70 void __init fdt_reserved_mem_save_node(unsigned long node, const char *uname, 71 phys_addr_t base, phys_addr_t size) 72 { 73 struct reserved_mem *rmem = &reserved_mem[reserved_mem_count]; 74 75 if (reserved_mem_count == ARRAY_SIZE(reserved_mem)) { 76 pr_err("Reserved memory: not enough space all defined regions.\n"); 77 return; 78 } 79 80 rmem->fdt_node = node; 81 rmem->name = uname; 82 rmem->base = base; 83 rmem->size = size; 84 85 reserved_mem_count++; 86 return; 87 } 88 89 /** 90 * res_mem_alloc_size() - allocate reserved memory described by 'size', 'align' 91 * and 'alloc-ranges' properties 92 */ 93 static int __init __reserved_mem_alloc_size(unsigned long node, 94 const char *uname, phys_addr_t *res_base, phys_addr_t *res_size) 95 { 96 int t_len = (dt_root_addr_cells + dt_root_size_cells) * sizeof(__be32); 97 phys_addr_t start = 0, end = 0; 98 phys_addr_t base = 0, align = 0, size; 99 int len; 100 const __be32 *prop; 101 int nomap; 102 int ret; 103 104 prop = of_get_flat_dt_prop(node, "size", &len); 105 if (!prop) 106 return -EINVAL; 107 108 if (len != dt_root_size_cells * sizeof(__be32)) { 109 pr_err("Reserved memory: invalid size property in '%s' node.\n", 110 uname); 111 return -EINVAL; 112 } 113 size = dt_mem_next_cell(dt_root_size_cells, &prop); 114 115 nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL; 116 117 prop = of_get_flat_dt_prop(node, "alignment", &len); 118 if (prop) { 119 if (len != dt_root_addr_cells * sizeof(__be32)) { 120 pr_err("Reserved memory: invalid alignment property in '%s' node.\n", 121 uname); 122 return -EINVAL; 123 } 124 align = dt_mem_next_cell(dt_root_addr_cells, &prop); 125 } 126 127 prop = of_get_flat_dt_prop(node, "alloc-ranges", &len); 128 if (prop) { 129 130 if (len % t_len != 0) { 131 pr_err("Reserved memory: invalid alloc-ranges property in '%s', skipping node.\n", 132 uname); 133 return -EINVAL; 134 } 135 136 base = 0; 137 138 while (len > 0) { 139 start = dt_mem_next_cell(dt_root_addr_cells, &prop); 140 end = start + dt_mem_next_cell(dt_root_size_cells, 141 &prop); 142 143 ret = early_init_dt_alloc_reserved_memory_arch(size, 144 align, start, end, nomap, &base); 145 if (ret == 0) { 146 pr_debug("Reserved memory: allocated memory for '%s' node: base %pa, size %ld MiB\n", 147 uname, &base, 148 (unsigned long)size / SZ_1M); 149 break; 150 } 151 len -= t_len; 152 } 153 154 } else { 155 ret = early_init_dt_alloc_reserved_memory_arch(size, align, 156 0, 0, nomap, &base); 157 if (ret == 0) 158 pr_debug("Reserved memory: allocated memory for '%s' node: base %pa, size %ld MiB\n", 159 uname, &base, (unsigned long)size / SZ_1M); 160 } 161 162 if (base == 0) { 163 pr_info("Reserved memory: failed to allocate memory for node '%s'\n", 164 uname); 165 return -ENOMEM; 166 } 167 168 *res_base = base; 169 *res_size = size; 170 171 return 0; 172 } 173 174 static const struct of_device_id __rmem_of_table_sentinel 175 __used __section(__reservedmem_of_table_end); 176 177 /** 178 * res_mem_init_node() - call region specific reserved memory init code 179 */ 180 static int __init __reserved_mem_init_node(struct reserved_mem *rmem) 181 { 182 extern const struct of_device_id __reservedmem_of_table[]; 183 const struct of_device_id *i; 184 185 for (i = __reservedmem_of_table; i < &__rmem_of_table_sentinel; i++) { 186 reservedmem_of_init_fn initfn = i->data; 187 const char *compat = i->compatible; 188 189 if (!of_flat_dt_is_compatible(rmem->fdt_node, compat)) 190 continue; 191 192 if (initfn(rmem) == 0) { 193 pr_info("Reserved memory: initialized node %s, compatible id %s\n", 194 rmem->name, compat); 195 return 0; 196 } 197 } 198 return -ENOENT; 199 } 200 201 static int __init __rmem_cmp(const void *a, const void *b) 202 { 203 const struct reserved_mem *ra = a, *rb = b; 204 205 return ra->base - rb->base; 206 } 207 208 static void __init __rmem_check_for_overlap(void) 209 { 210 int i; 211 212 if (reserved_mem_count < 2) 213 return; 214 215 sort(reserved_mem, reserved_mem_count, sizeof(reserved_mem[0]), 216 __rmem_cmp, NULL); 217 for (i = 0; i < reserved_mem_count - 1; i++) { 218 struct reserved_mem *this, *next; 219 220 this = &reserved_mem[i]; 221 next = &reserved_mem[i + 1]; 222 if (!(this->base && next->base)) 223 continue; 224 if (this->base + this->size > next->base) { 225 phys_addr_t this_end, next_end; 226 227 this_end = this->base + this->size; 228 next_end = next->base + next->size; 229 WARN(1, 230 "Reserved memory: OVERLAP DETECTED!\n%s (%pa--%pa) overlaps with %s (%pa--%pa)\n", 231 this->name, &this->base, &this_end, 232 next->name, &next->base, &next_end); 233 } 234 } 235 } 236 237 /** 238 * fdt_init_reserved_mem - allocate and init all saved reserved memory regions 239 */ 240 void __init fdt_init_reserved_mem(void) 241 { 242 int i; 243 244 /* check for overlapping reserved regions */ 245 __rmem_check_for_overlap(); 246 247 for (i = 0; i < reserved_mem_count; i++) { 248 struct reserved_mem *rmem = &reserved_mem[i]; 249 unsigned long node = rmem->fdt_node; 250 int len; 251 const __be32 *prop; 252 int err = 0; 253 254 prop = of_get_flat_dt_prop(node, "phandle", &len); 255 if (!prop) 256 prop = of_get_flat_dt_prop(node, "linux,phandle", &len); 257 if (prop) 258 rmem->phandle = of_read_number(prop, len/4); 259 260 if (rmem->size == 0) 261 err = __reserved_mem_alloc_size(node, rmem->name, 262 &rmem->base, &rmem->size); 263 if (err == 0) 264 __reserved_mem_init_node(rmem); 265 } 266 } 267 268 static inline struct reserved_mem *__find_rmem(struct device_node *node) 269 { 270 unsigned int i; 271 272 if (!node->phandle) 273 return NULL; 274 275 for (i = 0; i < reserved_mem_count; i++) 276 if (reserved_mem[i].phandle == node->phandle) 277 return &reserved_mem[i]; 278 return NULL; 279 } 280 281 /** 282 * of_reserved_mem_device_init() - assign reserved memory region to given device 283 * 284 * This function assign memory region pointed by "memory-region" device tree 285 * property to the given device. 286 */ 287 int of_reserved_mem_device_init(struct device *dev) 288 { 289 struct reserved_mem *rmem; 290 struct device_node *np; 291 int ret; 292 293 np = of_parse_phandle(dev->of_node, "memory-region", 0); 294 if (!np) 295 return -ENODEV; 296 297 rmem = __find_rmem(np); 298 of_node_put(np); 299 300 if (!rmem || !rmem->ops || !rmem->ops->device_init) 301 return -EINVAL; 302 303 ret = rmem->ops->device_init(rmem, dev); 304 if (ret == 0) 305 dev_info(dev, "assigned reserved memory node %s\n", rmem->name); 306 307 return ret; 308 } 309 EXPORT_SYMBOL_GPL(of_reserved_mem_device_init); 310 311 /** 312 * of_reserved_mem_device_release() - release reserved memory device structures 313 * 314 * This function releases structures allocated for memory region handling for 315 * the given device. 316 */ 317 void of_reserved_mem_device_release(struct device *dev) 318 { 319 struct reserved_mem *rmem; 320 struct device_node *np; 321 322 np = of_parse_phandle(dev->of_node, "memory-region", 0); 323 if (!np) 324 return; 325 326 rmem = __find_rmem(np); 327 of_node_put(np); 328 329 if (!rmem || !rmem->ops || !rmem->ops->device_release) 330 return; 331 332 rmem->ops->device_release(rmem, dev); 333 } 334 EXPORT_SYMBOL_GPL(of_reserved_mem_device_release); 335