1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Coherent per-device memory handling. 4 * Borrowed from i386 5 */ 6 #include <linux/io.h> 7 #include <linux/slab.h> 8 #include <linux/kernel.h> 9 #include <linux/module.h> 10 #include <linux/dma-mapping.h> 11 12 struct dma_coherent_mem { 13 void *virt_base; 14 dma_addr_t device_base; 15 unsigned long pfn_base; 16 int size; 17 int flags; 18 unsigned long *bitmap; 19 spinlock_t spinlock; 20 bool use_dev_dma_pfn_offset; 21 }; 22 23 static struct dma_coherent_mem *dma_coherent_default_memory __ro_after_init; 24 25 static inline struct dma_coherent_mem *dev_get_coherent_memory(struct device *dev) 26 { 27 if (dev && dev->dma_mem) 28 return dev->dma_mem; 29 return NULL; 30 } 31 32 static inline dma_addr_t dma_get_device_base(struct device *dev, 33 struct dma_coherent_mem * mem) 34 { 35 if (mem->use_dev_dma_pfn_offset) 36 return (mem->pfn_base - dev->dma_pfn_offset) << PAGE_SHIFT; 37 else 38 return mem->device_base; 39 } 40 41 static int dma_init_coherent_memory( 42 phys_addr_t phys_addr, dma_addr_t device_addr, size_t size, int flags, 43 struct dma_coherent_mem **mem) 44 { 45 struct dma_coherent_mem *dma_mem = NULL; 46 void __iomem *mem_base = NULL; 47 int pages = size >> PAGE_SHIFT; 48 int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long); 49 int ret; 50 51 if (!size) { 52 ret = -EINVAL; 53 goto out; 54 } 55 56 mem_base = memremap(phys_addr, size, MEMREMAP_WC); 57 if (!mem_base) { 58 ret = -EINVAL; 59 goto out; 60 } 61 dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL); 62 if (!dma_mem) { 63 ret = -ENOMEM; 64 goto out; 65 } 66 dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL); 67 if (!dma_mem->bitmap) { 68 ret = -ENOMEM; 69 goto out; 70 } 71 72 dma_mem->virt_base = mem_base; 73 dma_mem->device_base = device_addr; 74 dma_mem->pfn_base = PFN_DOWN(phys_addr); 75 dma_mem->size = pages; 76 dma_mem->flags = flags; 77 spin_lock_init(&dma_mem->spinlock); 78 79 *mem = dma_mem; 80 return 0; 81 82 out: 83 kfree(dma_mem); 84 if (mem_base) 85 memunmap(mem_base); 86 return ret; 87 } 88 89 static void dma_release_coherent_memory(struct dma_coherent_mem *mem) 90 { 91 if (!mem) 92 return; 93 94 memunmap(mem->virt_base); 95 kfree(mem->bitmap); 96 kfree(mem); 97 } 98 99 static int dma_assign_coherent_memory(struct device *dev, 100 struct dma_coherent_mem *mem) 101 { 102 if (!dev) 103 return -ENODEV; 104 105 if (dev->dma_mem) 106 return -EBUSY; 107 108 dev->dma_mem = mem; 109 return 0; 110 } 111 112 int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, 113 dma_addr_t device_addr, size_t size, int flags) 114 { 115 struct dma_coherent_mem *mem; 116 int ret; 117 118 ret = dma_init_coherent_memory(phys_addr, device_addr, size, flags, &mem); 119 if (ret) 120 return ret; 121 122 ret = dma_assign_coherent_memory(dev, mem); 123 if (ret) 124 dma_release_coherent_memory(mem); 125 return ret; 126 } 127 EXPORT_SYMBOL(dma_declare_coherent_memory); 128 129 void dma_release_declared_memory(struct device *dev) 130 { 131 struct dma_coherent_mem *mem = dev->dma_mem; 132 133 if (!mem) 134 return; 135 dma_release_coherent_memory(mem); 136 dev->dma_mem = NULL; 137 } 138 EXPORT_SYMBOL(dma_release_declared_memory); 139 140 void *dma_mark_declared_memory_occupied(struct device *dev, 141 dma_addr_t device_addr, size_t size) 142 { 143 struct dma_coherent_mem *mem = dev->dma_mem; 144 unsigned long flags; 145 int pos, err; 146 147 size += device_addr & ~PAGE_MASK; 148 149 if (!mem) 150 return ERR_PTR(-EINVAL); 151 152 spin_lock_irqsave(&mem->spinlock, flags); 153 pos = PFN_DOWN(device_addr - dma_get_device_base(dev, mem)); 154 err = bitmap_allocate_region(mem->bitmap, pos, get_order(size)); 155 spin_unlock_irqrestore(&mem->spinlock, flags); 156 157 if (err != 0) 158 return ERR_PTR(err); 159 return mem->virt_base + (pos << PAGE_SHIFT); 160 } 161 EXPORT_SYMBOL(dma_mark_declared_memory_occupied); 162 163 static void *__dma_alloc_from_coherent(struct dma_coherent_mem *mem, 164 ssize_t size, dma_addr_t *dma_handle) 165 { 166 int order = get_order(size); 167 unsigned long flags; 168 int pageno; 169 void *ret; 170 171 spin_lock_irqsave(&mem->spinlock, flags); 172 173 if (unlikely(size > (mem->size << PAGE_SHIFT))) 174 goto err; 175 176 pageno = bitmap_find_free_region(mem->bitmap, mem->size, order); 177 if (unlikely(pageno < 0)) 178 goto err; 179 180 /* 181 * Memory was found in the coherent area. 182 */ 183 *dma_handle = mem->device_base + (pageno << PAGE_SHIFT); 184 ret = mem->virt_base + (pageno << PAGE_SHIFT); 185 spin_unlock_irqrestore(&mem->spinlock, flags); 186 memset(ret, 0, size); 187 return ret; 188 err: 189 spin_unlock_irqrestore(&mem->spinlock, flags); 190 return NULL; 191 } 192 193 /** 194 * dma_alloc_from_dev_coherent() - allocate memory from device coherent pool 195 * @dev: device from which we allocate memory 196 * @size: size of requested memory area 197 * @dma_handle: This will be filled with the correct dma handle 198 * @ret: This pointer will be filled with the virtual address 199 * to allocated area. 200 * 201 * This function should be only called from per-arch dma_alloc_coherent() 202 * to support allocation from per-device coherent memory pools. 203 * 204 * Returns 0 if dma_alloc_coherent should continue with allocating from 205 * generic memory areas, or !0 if dma_alloc_coherent should return @ret. 206 */ 207 int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size, 208 dma_addr_t *dma_handle, void **ret) 209 { 210 struct dma_coherent_mem *mem = dev_get_coherent_memory(dev); 211 212 if (!mem) 213 return 0; 214 215 *ret = __dma_alloc_from_coherent(mem, size, dma_handle); 216 if (*ret) 217 return 1; 218 219 /* 220 * In the case where the allocation can not be satisfied from the 221 * per-device area, try to fall back to generic memory if the 222 * constraints allow it. 223 */ 224 return mem->flags & DMA_MEMORY_EXCLUSIVE; 225 } 226 227 void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle) 228 { 229 if (!dma_coherent_default_memory) 230 return NULL; 231 232 return __dma_alloc_from_coherent(dma_coherent_default_memory, size, 233 dma_handle); 234 } 235 236 static int __dma_release_from_coherent(struct dma_coherent_mem *mem, 237 int order, void *vaddr) 238 { 239 if (mem && vaddr >= mem->virt_base && vaddr < 240 (mem->virt_base + (mem->size << PAGE_SHIFT))) { 241 int page = (vaddr - mem->virt_base) >> PAGE_SHIFT; 242 unsigned long flags; 243 244 spin_lock_irqsave(&mem->spinlock, flags); 245 bitmap_release_region(mem->bitmap, page, order); 246 spin_unlock_irqrestore(&mem->spinlock, flags); 247 return 1; 248 } 249 return 0; 250 } 251 252 /** 253 * dma_release_from_dev_coherent() - free memory to device coherent memory pool 254 * @dev: device from which the memory was allocated 255 * @order: the order of pages allocated 256 * @vaddr: virtual address of allocated pages 257 * 258 * This checks whether the memory was allocated from the per-device 259 * coherent memory pool and if so, releases that memory. 260 * 261 * Returns 1 if we correctly released the memory, or 0 if the caller should 262 * proceed with releasing memory from generic pools. 263 */ 264 int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr) 265 { 266 struct dma_coherent_mem *mem = dev_get_coherent_memory(dev); 267 268 return __dma_release_from_coherent(mem, order, vaddr); 269 } 270 271 int dma_release_from_global_coherent(int order, void *vaddr) 272 { 273 if (!dma_coherent_default_memory) 274 return 0; 275 276 return __dma_release_from_coherent(dma_coherent_default_memory, order, 277 vaddr); 278 } 279 280 static int __dma_mmap_from_coherent(struct dma_coherent_mem *mem, 281 struct vm_area_struct *vma, void *vaddr, size_t size, int *ret) 282 { 283 if (mem && vaddr >= mem->virt_base && vaddr + size <= 284 (mem->virt_base + (mem->size << PAGE_SHIFT))) { 285 unsigned long off = vma->vm_pgoff; 286 int start = (vaddr - mem->virt_base) >> PAGE_SHIFT; 287 int user_count = vma_pages(vma); 288 int count = PAGE_ALIGN(size) >> PAGE_SHIFT; 289 290 *ret = -ENXIO; 291 if (off < count && user_count <= count - off) { 292 unsigned long pfn = mem->pfn_base + start + off; 293 *ret = remap_pfn_range(vma, vma->vm_start, pfn, 294 user_count << PAGE_SHIFT, 295 vma->vm_page_prot); 296 } 297 return 1; 298 } 299 return 0; 300 } 301 302 /** 303 * dma_mmap_from_dev_coherent() - mmap memory from the device coherent pool 304 * @dev: device from which the memory was allocated 305 * @vma: vm_area for the userspace memory 306 * @vaddr: cpu address returned by dma_alloc_from_dev_coherent 307 * @size: size of the memory buffer allocated 308 * @ret: result from remap_pfn_range() 309 * 310 * This checks whether the memory was allocated from the per-device 311 * coherent memory pool and if so, maps that memory to the provided vma. 312 * 313 * Returns 1 if @vaddr belongs to the device coherent pool and the caller 314 * should return @ret, or 0 if they should proceed with mapping memory from 315 * generic areas. 316 */ 317 int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma, 318 void *vaddr, size_t size, int *ret) 319 { 320 struct dma_coherent_mem *mem = dev_get_coherent_memory(dev); 321 322 return __dma_mmap_from_coherent(mem, vma, vaddr, size, ret); 323 } 324 EXPORT_SYMBOL(dma_mmap_from_dev_coherent); 325 326 int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *vaddr, 327 size_t size, int *ret) 328 { 329 if (!dma_coherent_default_memory) 330 return 0; 331 332 return __dma_mmap_from_coherent(dma_coherent_default_memory, vma, 333 vaddr, size, ret); 334 } 335 336 /* 337 * Support for reserved memory regions defined in device tree 338 */ 339 #ifdef CONFIG_OF_RESERVED_MEM 340 #include <linux/of.h> 341 #include <linux/of_fdt.h> 342 #include <linux/of_reserved_mem.h> 343 344 static struct reserved_mem *dma_reserved_default_memory __initdata; 345 346 static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev) 347 { 348 struct dma_coherent_mem *mem = rmem->priv; 349 int ret; 350 351 if (!mem) { 352 ret = dma_init_coherent_memory(rmem->base, rmem->base, 353 rmem->size, 354 DMA_MEMORY_EXCLUSIVE, &mem); 355 if (ret) { 356 pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n", 357 &rmem->base, (unsigned long)rmem->size / SZ_1M); 358 return ret; 359 } 360 } 361 mem->use_dev_dma_pfn_offset = true; 362 rmem->priv = mem; 363 dma_assign_coherent_memory(dev, mem); 364 return 0; 365 } 366 367 static void rmem_dma_device_release(struct reserved_mem *rmem, 368 struct device *dev) 369 { 370 if (dev) 371 dev->dma_mem = NULL; 372 } 373 374 static const struct reserved_mem_ops rmem_dma_ops = { 375 .device_init = rmem_dma_device_init, 376 .device_release = rmem_dma_device_release, 377 }; 378 379 static int __init rmem_dma_setup(struct reserved_mem *rmem) 380 { 381 unsigned long node = rmem->fdt_node; 382 383 if (of_get_flat_dt_prop(node, "reusable", NULL)) 384 return -EINVAL; 385 386 #ifdef CONFIG_ARM 387 if (!of_get_flat_dt_prop(node, "no-map", NULL)) { 388 pr_err("Reserved memory: regions without no-map are not yet supported\n"); 389 return -EINVAL; 390 } 391 392 if (of_get_flat_dt_prop(node, "linux,dma-default", NULL)) { 393 WARN(dma_reserved_default_memory, 394 "Reserved memory: region for default DMA coherent area is redefined\n"); 395 dma_reserved_default_memory = rmem; 396 } 397 #endif 398 399 rmem->ops = &rmem_dma_ops; 400 pr_info("Reserved memory: created DMA memory pool at %pa, size %ld MiB\n", 401 &rmem->base, (unsigned long)rmem->size / SZ_1M); 402 return 0; 403 } 404 405 static int __init dma_init_reserved_memory(void) 406 { 407 const struct reserved_mem_ops *ops; 408 int ret; 409 410 if (!dma_reserved_default_memory) 411 return -ENOMEM; 412 413 ops = dma_reserved_default_memory->ops; 414 415 /* 416 * We rely on rmem_dma_device_init() does not propagate error of 417 * dma_assign_coherent_memory() for "NULL" device. 418 */ 419 ret = ops->device_init(dma_reserved_default_memory, NULL); 420 421 if (!ret) { 422 dma_coherent_default_memory = dma_reserved_default_memory->priv; 423 pr_info("DMA: default coherent area is set\n"); 424 } 425 426 return ret; 427 } 428 429 core_initcall(dma_init_reserved_memory); 430 431 RESERVEDMEM_OF_DECLARE(dma, "shared-dma-pool", rmem_dma_setup); 432 #endif 433