xref: /linux/include/linux/cma.h (revision 3a39d672e7f48b8d6b91a09afa4b55352773b4b5)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __CMA_H__
3 #define __CMA_H__
4 
5 #include <linux/init.h>
6 #include <linux/types.h>
7 #include <linux/numa.h>
8 
9 #ifdef CONFIG_CMA_AREAS
10 #define MAX_CMA_AREAS	CONFIG_CMA_AREAS
11 #endif
12 
13 #define CMA_MAX_NAME 64
14 
15 /*
16  *  the buddy -- especially pageblock merging and alloc_contig_range()
17  * -- can deal with only some pageblocks of a higher-order page being
18  *  MIGRATE_CMA, we can use pageblock_nr_pages.
19  */
20 #define CMA_MIN_ALIGNMENT_PAGES pageblock_nr_pages
21 #define CMA_MIN_ALIGNMENT_BYTES (PAGE_SIZE * CMA_MIN_ALIGNMENT_PAGES)
22 
23 struct cma;
24 
25 extern unsigned long totalcma_pages;
26 extern phys_addr_t cma_get_base(const struct cma *cma);
27 extern unsigned long cma_get_size(const struct cma *cma);
28 extern const char *cma_get_name(const struct cma *cma);
29 
30 extern int __init cma_declare_contiguous_nid(phys_addr_t base,
31 			phys_addr_t size, phys_addr_t limit,
32 			phys_addr_t alignment, unsigned int order_per_bit,
33 			bool fixed, const char *name, struct cma **res_cma,
34 			int nid);
cma_declare_contiguous(phys_addr_t base,phys_addr_t size,phys_addr_t limit,phys_addr_t alignment,unsigned int order_per_bit,bool fixed,const char * name,struct cma ** res_cma)35 static inline int __init cma_declare_contiguous(phys_addr_t base,
36 			phys_addr_t size, phys_addr_t limit,
37 			phys_addr_t alignment, unsigned int order_per_bit,
38 			bool fixed, const char *name, struct cma **res_cma)
39 {
40 	return cma_declare_contiguous_nid(base, size, limit, alignment,
41 			order_per_bit, fixed, name, res_cma, NUMA_NO_NODE);
42 }
43 extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
44 					unsigned int order_per_bit,
45 					const char *name,
46 					struct cma **res_cma);
47 extern struct page *cma_alloc(struct cma *cma, unsigned long count, unsigned int align,
48 			      bool no_warn);
49 extern bool cma_pages_valid(struct cma *cma, const struct page *pages, unsigned long count);
50 extern bool cma_release(struct cma *cma, const struct page *pages, unsigned long count);
51 
52 extern int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data);
53 
54 extern void cma_reserve_pages_on_error(struct cma *cma);
55 
56 #ifdef CONFIG_CMA
57 struct folio *cma_alloc_folio(struct cma *cma, int order, gfp_t gfp);
58 bool cma_free_folio(struct cma *cma, const struct folio *folio);
59 #else
cma_alloc_folio(struct cma * cma,int order,gfp_t gfp)60 static inline struct folio *cma_alloc_folio(struct cma *cma, int order, gfp_t gfp)
61 {
62 	return NULL;
63 }
64 
cma_free_folio(struct cma * cma,const struct folio * folio)65 static inline bool cma_free_folio(struct cma *cma, const struct folio *folio)
66 {
67 	return false;
68 }
69 #endif
70 
71 #endif
72