xref: /linux/include/linux/genalloc.h (revision 0b34fd0feac6202602591dc15c58e25ffde41bd5)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Basic general purpose allocator for managing special purpose
4  * memory, for example, memory that is not managed by the regular
5  * kmalloc/kfree interface.  Uses for this includes on-device special
6  * memory, uncached memory etc.
7  *
8  * It is safe to use the allocator in NMI handlers and other special
9  * unblockable contexts that could otherwise deadlock on locks.  This
10  * is implemented by using atomic operations and retries on any
11  * conflicts.  The disadvantage is that there may be livelocks in
12  * extreme cases.  For better scalability, one allocator can be used
13  * for each CPU.
14  *
15  * The lockless operation only works if there is enough memory
16  * available.  If new memory is added to the pool a lock has to be
17  * still taken.  So any user relying on locklessness has to ensure
18  * that sufficient memory is preallocated.
19  *
20  * The basic atomic operation of this allocator is cmpxchg on long.
21  * On architectures that don't have NMI-safe cmpxchg implementation,
22  * the allocator can NOT be used in NMI handler.  So code uses the
23  * allocator in NMI handler should depend on
24  * CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG.
25  */
26 
27 
28 #ifndef __GENALLOC_H__
29 #define __GENALLOC_H__
30 
31 #include <linux/types.h>
32 #include <linux/spinlock_types.h>
33 #include <linux/atomic.h>
34 
35 struct device;
36 struct device_node;
37 struct gen_pool;
38 
39 /**
40  * typedef genpool_algo_t: Allocation callback function type definition
41  * @map: Pointer to bitmap
42  * @size: The bitmap size in bits
43  * @start: The bitnumber to start searching at
44  * @nr: The number of zeroed bits we're looking for
45  * @data: optional additional data used by the callback
46  * @pool: the pool being allocated from
47  * @start_addr: start address of memory chunk
48  */
49 typedef unsigned long (*genpool_algo_t)(unsigned long *map,
50 			unsigned long size,
51 			unsigned long start,
52 			unsigned int nr,
53 			void *data, struct gen_pool *pool,
54 			unsigned long start_addr);
55 
56 /*
57  *  General purpose special memory pool descriptor.
58  */
59 struct gen_pool {
60 	spinlock_t lock;
61 	struct list_head chunks;	/* list of chunks in this pool */
62 	int min_alloc_order;		/* minimum allocation order */
63 
64 	genpool_algo_t algo;		/* allocation function */
65 	void *data;
66 
67 	const char *name;
68 };
69 
70 /*
71  *  General purpose special memory pool chunk descriptor.
72  */
73 struct gen_pool_chunk {
74 	struct list_head next_chunk;	/* next chunk in pool */
75 	atomic_long_t avail;
76 	phys_addr_t phys_addr;		/* physical starting address of memory chunk */
77 	void *owner;			/* private data to retrieve at alloc time */
78 	unsigned long start_addr;	/* start address of memory chunk */
79 	unsigned long end_addr;		/* end address of memory chunk (inclusive) */
80 	unsigned long bits[];		/* bitmap for allocating memory chunk */
81 };
82 
83 /*
84  *  gen_pool data descriptor for gen_pool_first_fit_align.
85  */
86 struct genpool_data_align {
87 	int align;		/* alignment by bytes for starting address */
88 };
89 
90 /*
91  *  gen_pool data descriptor for gen_pool_fixed_alloc.
92  */
93 struct genpool_data_fixed {
94 	unsigned long offset;		/* The offset of the specific region */
95 };
96 
97 extern struct gen_pool *gen_pool_create(int, int);
98 extern phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long);
99 extern int gen_pool_add_owner(struct gen_pool *, unsigned long, phys_addr_t,
100 			     size_t, int, void *);
101 
gen_pool_add_virt(struct gen_pool * pool,unsigned long addr,phys_addr_t phys,size_t size,int nid)102 static inline int gen_pool_add_virt(struct gen_pool *pool, unsigned long addr,
103 		phys_addr_t phys, size_t size, int nid)
104 {
105 	return gen_pool_add_owner(pool, addr, phys, size, nid, NULL);
106 }
107 
108 /**
109  * gen_pool_add - add a new chunk of special memory to the pool
110  * @pool: pool to add new memory chunk to
111  * @addr: starting address of memory chunk to add to pool
112  * @size: size in bytes of the memory chunk to add to pool
113  * @nid: node id of the node the chunk structure and bitmap should be
114  *       allocated on, or -1
115  *
116  * Add a new chunk of special memory to the specified pool.
117  *
118  * Returns 0 on success or a -ve errno on failure.
119  */
gen_pool_add(struct gen_pool * pool,unsigned long addr,size_t size,int nid)120 static inline int gen_pool_add(struct gen_pool *pool, unsigned long addr,
121 			       size_t size, int nid)
122 {
123 	return gen_pool_add_virt(pool, addr, -1, size, nid);
124 }
125 extern void gen_pool_destroy(struct gen_pool *);
126 unsigned long gen_pool_alloc_algo_owner(struct gen_pool *pool, size_t size,
127 		genpool_algo_t algo, void *data, void **owner);
128 
gen_pool_alloc_owner(struct gen_pool * pool,size_t size,void ** owner)129 static inline unsigned long gen_pool_alloc_owner(struct gen_pool *pool,
130 		size_t size, void **owner)
131 {
132 	return gen_pool_alloc_algo_owner(pool, size, pool->algo, pool->data,
133 			owner);
134 }
135 
gen_pool_alloc_algo(struct gen_pool * pool,size_t size,genpool_algo_t algo,void * data)136 static inline unsigned long gen_pool_alloc_algo(struct gen_pool *pool,
137 		size_t size, genpool_algo_t algo, void *data)
138 {
139 	return gen_pool_alloc_algo_owner(pool, size, algo, data, NULL);
140 }
141 
142 /**
143  * gen_pool_alloc - allocate special memory from the pool
144  * @pool: pool to allocate from
145  * @size: number of bytes to allocate from the pool
146  *
147  * Allocate the requested number of bytes from the specified pool.
148  * Uses the pool allocation function (with first-fit algorithm by default).
149  * Can not be used in NMI handler on architectures without
150  * NMI-safe cmpxchg implementation.
151  */
gen_pool_alloc(struct gen_pool * pool,size_t size)152 static inline unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
153 {
154 	return gen_pool_alloc_algo(pool, size, pool->algo, pool->data);
155 }
156 
157 extern void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size,
158 		dma_addr_t *dma);
159 extern void *gen_pool_dma_alloc_algo(struct gen_pool *pool, size_t size,
160 		dma_addr_t *dma, genpool_algo_t algo, void *data);
161 extern void *gen_pool_dma_alloc_align(struct gen_pool *pool, size_t size,
162 		dma_addr_t *dma, int align);
163 extern void *gen_pool_dma_zalloc(struct gen_pool *pool, size_t size, dma_addr_t *dma);
164 extern void *gen_pool_dma_zalloc_algo(struct gen_pool *pool, size_t size,
165 		dma_addr_t *dma, genpool_algo_t algo, void *data);
166 extern void *gen_pool_dma_zalloc_align(struct gen_pool *pool, size_t size,
167 		dma_addr_t *dma, int align);
168 extern void gen_pool_free_owner(struct gen_pool *pool, unsigned long addr,
169 		size_t size, void **owner);
gen_pool_free(struct gen_pool * pool,unsigned long addr,size_t size)170 static inline void gen_pool_free(struct gen_pool *pool, unsigned long addr,
171                 size_t size)
172 {
173 	gen_pool_free_owner(pool, addr, size, NULL);
174 }
175 
176 extern void gen_pool_for_each_chunk(struct gen_pool *,
177 	void (*)(struct gen_pool *, struct gen_pool_chunk *, void *), void *);
178 extern size_t gen_pool_avail(struct gen_pool *);
179 extern size_t gen_pool_size(struct gen_pool *);
180 
181 extern void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo,
182 		void *data);
183 
184 extern unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size,
185 		unsigned long start, unsigned int nr, void *data,
186 		struct gen_pool *pool, unsigned long start_addr);
187 
188 extern unsigned long gen_pool_fixed_alloc(unsigned long *map,
189 		unsigned long size, unsigned long start, unsigned int nr,
190 		void *data, struct gen_pool *pool, unsigned long start_addr);
191 
192 extern unsigned long gen_pool_first_fit_align(unsigned long *map,
193 		unsigned long size, unsigned long start, unsigned int nr,
194 		void *data, struct gen_pool *pool, unsigned long start_addr);
195 
196 
197 extern unsigned long gen_pool_first_fit_order_align(unsigned long *map,
198 		unsigned long size, unsigned long start, unsigned int nr,
199 		void *data, struct gen_pool *pool, unsigned long start_addr);
200 
201 extern unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
202 		unsigned long start, unsigned int nr, void *data,
203 		struct gen_pool *pool, unsigned long start_addr);
204 
205 
206 extern struct gen_pool *devm_gen_pool_create(struct device *dev,
207 		int min_alloc_order, int nid, const char *name);
208 extern struct gen_pool *gen_pool_get(struct device *dev, const char *name);
209 
210 extern bool gen_pool_has_addr(struct gen_pool *pool, unsigned long start,
211 			size_t size);
212 
213 #ifdef CONFIG_OF
214 extern struct gen_pool *of_gen_pool_get(struct device_node *np,
215 	const char *propname, int index);
216 #else
of_gen_pool_get(struct device_node * np,const char * propname,int index)217 static inline struct gen_pool *of_gen_pool_get(struct device_node *np,
218 	const char *propname, int index)
219 {
220 	return NULL;
221 }
222 #endif
223 #endif /* __GENALLOC_H__ */
224