Lines Matching +full:page +full:- +full:size

1 // SPDX-License-Identifier: GPL-2.0+
4 * Copyright (c) 2010-2011 by Samsung Electronics.
17 * Various devices on embedded systems have no scatter-getter and/or
30 * inaccessible to page system even if device drivers don't use it.
40 #include <asm/page.h>
45 #include <linux/dma-map-ops.h>
58 * Default global CMA area size can be defined in kernel's .config.
61 * The size can be set in bytes or as a percentage of the total memory
64 * Users, who want to set the size of global CMA area for their system
69 static phys_addr_t size_cmdline __initdata = -1;
77 return -EINVAL; in early_cma()
84 if (*p != '-') { in early_cma()
204 * dma_contiguous_reserve() - reserve area(s) for contiguous memory handling
223 if (size_cmdline != -1) { in dma_contiguous_reserve()
253 dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) in dma_contiguous_early_fixup() argument
258 * dma_contiguous_reserve_area() - reserve custom contiguous area
259 * @size: Size of the reserved area (in bytes),
274 int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, in dma_contiguous_reserve_area() argument
280 ret = cma_declare_contiguous(base, size, limit, 0, 0, fixed, in dma_contiguous_reserve_area()
293 * dma_alloc_from_contiguous() - allocate pages from contiguous area
304 struct page *dma_alloc_from_contiguous(struct device *dev, size_t count, in dma_alloc_from_contiguous()
314 * dma_release_from_contiguous() - release allocated pages
323 bool dma_release_from_contiguous(struct device *dev, struct page *pages, in dma_release_from_contiguous()
329 static struct page *cma_alloc_aligned(struct cma *cma, size_t size, gfp_t gfp) in cma_alloc_aligned() argument
331 unsigned int align = min(get_order(size), CONFIG_CMA_ALIGNMENT); in cma_alloc_aligned()
333 return cma_alloc(cma, size >> PAGE_SHIFT, align, gfp & __GFP_NOWARN); in cma_alloc_aligned()
337 * dma_alloc_contiguous() - allocate contiguous pages
339 * @size: Requested allocation size.
343 * tries to use per-numa cma, if the allocation fails, it will fallback to
346 * Note that it bypass one-page size of allocations from the per-numa and
347 * global area as the addresses within one page are always contiguous, so
351 struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp) in dma_alloc_contiguous() argument
360 if (dev->cma_area) in dma_alloc_contiguous()
361 return cma_alloc_aligned(dev->cma_area, size, gfp); in dma_alloc_contiguous()
362 if (size <= PAGE_SIZE) in dma_alloc_contiguous()
368 struct page *page; in dma_alloc_contiguous() local
371 page = cma_alloc_aligned(cma, size, gfp); in dma_alloc_contiguous()
372 if (page) in dma_alloc_contiguous()
373 return page; in dma_alloc_contiguous()
378 page = cma_alloc_aligned(cma, size, gfp); in dma_alloc_contiguous()
379 if (page) in dma_alloc_contiguous()
380 return page; in dma_alloc_contiguous()
387 return cma_alloc_aligned(dma_contiguous_default_area, size, gfp); in dma_alloc_contiguous()
391 * dma_free_contiguous() - release allocated pages
393 * @page: Pointer to the allocated pages.
394 * @size: Size of allocated pages.
399 * upon a false-return.
401 void dma_free_contiguous(struct device *dev, struct page *page, size_t size) in dma_free_contiguous() argument
403 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; in dma_free_contiguous()
405 /* if dev has its own cma, free page from there */ in dma_free_contiguous()
406 if (dev->cma_area) { in dma_free_contiguous()
407 if (cma_release(dev->cma_area, page, count)) in dma_free_contiguous()
411 * otherwise, page is from either per-numa cma or default cma in dma_free_contiguous()
414 if (cma_release(dma_contiguous_pernuma_area[page_to_nid(page)], in dma_free_contiguous()
415 page, count)) in dma_free_contiguous()
417 if (cma_release(dma_contiguous_numa_area[page_to_nid(page)], in dma_free_contiguous()
418 page, count)) in dma_free_contiguous()
421 if (cma_release(dma_contiguous_default_area, page, count)) in dma_free_contiguous()
426 __free_pages(page, get_order(size)); in dma_free_contiguous()
442 dev->cma_area = rmem->priv; in rmem_cma_device_init()
449 dev->cma_area = NULL; in rmem_cma_device_release()
459 unsigned long node = rmem->fdt_node; in rmem_cma_setup()
460 bool default_cma = of_get_flat_dt_prop(node, "linux,cma-default", NULL); in rmem_cma_setup()
464 if (size_cmdline != -1 && default_cma) { in rmem_cma_setup()
466 rmem->name); in rmem_cma_setup()
467 return -EBUSY; in rmem_cma_setup()
471 of_get_flat_dt_prop(node, "no-map", NULL)) in rmem_cma_setup()
472 return -EINVAL; in rmem_cma_setup()
474 if (!IS_ALIGNED(rmem->base | rmem->size, CMA_MIN_ALIGNMENT_BYTES)) { in rmem_cma_setup()
476 return -EINVAL; in rmem_cma_setup()
479 err = cma_init_reserved_mem(rmem->base, rmem->size, 0, rmem->name, &cma); in rmem_cma_setup()
485 dma_contiguous_early_fixup(rmem->base, rmem->size); in rmem_cma_setup()
490 rmem->ops = &rmem_cma_ops; in rmem_cma_setup()
491 rmem->priv = cma; in rmem_cma_setup()
493 pr_info("Reserved memory: created CMA memory pool at %pa, size %ld MiB\n", in rmem_cma_setup()
494 &rmem->base, (unsigned long)rmem->size / SZ_1M); in rmem_cma_setup()
498 RESERVEDMEM_OF_DECLARE(cma, "shared-dma-pool", rmem_cma_setup);