xref: /linux/include/linux/vmalloc.h (revision 7203ca412fc8e8a0588e9adc0f777d3163f8dff3)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_VMALLOC_H
3 #define _LINUX_VMALLOC_H
4 
5 #include <linux/alloc_tag.h>
6 #include <linux/sched.h>
7 #include <linux/spinlock.h>
8 #include <linux/init.h>
9 #include <linux/list.h>
10 #include <linux/llist.h>
11 #include <asm/page.h>		/* pgprot_t */
12 #include <linux/rbtree.h>
13 #include <linux/overflow.h>
14 
15 #include <asm/vmalloc.h>
16 
17 struct vm_area_struct;		/* vma defining user mapping in mm_types.h */
18 struct notifier_block;		/* in notifier.h */
19 struct iov_iter;		/* in uio.h */
20 
21 /* bits in flags of vmalloc's vm_struct below */
22 #define VM_IOREMAP		0x00000001	/* ioremap() and friends */
23 #define VM_ALLOC		0x00000002	/* vmalloc() */
24 #define VM_MAP			0x00000004	/* vmap()ed pages */
25 #define VM_USERMAP		0x00000008	/* suitable for remap_vmalloc_range */
26 #define VM_DMA_COHERENT		0x00000010	/* dma_alloc_coherent */
27 #define VM_UNINITIALIZED	0x00000020	/* vm_struct is not fully initialized */
28 #define VM_NO_GUARD		0x00000040      /* ***DANGEROUS*** don't add guard page */
29 #define VM_KASAN		0x00000080      /* has allocated kasan shadow memory */
30 #define VM_FLUSH_RESET_PERMS	0x00000100	/* reset direct map and flush TLB on unmap, can't be freed in atomic context */
31 #define VM_MAP_PUT_PAGES	0x00000200	/* put pages and free array in vfree */
32 #define VM_ALLOW_HUGE_VMAP	0x00000400      /* Allow for huge pages on archs with HAVE_ARCH_HUGE_VMALLOC */
33 
34 #if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
35 	!defined(CONFIG_KASAN_VMALLOC)
36 #define VM_DEFER_KMEMLEAK	0x00000800	/* defer kmemleak object creation */
37 #else
38 #define VM_DEFER_KMEMLEAK	0
39 #endif
40 #define VM_SPARSE		0x00001000	/* sparse vm_area. not all pages are present. */
41 
42 /* bits [20..32] reserved for arch specific ioremap internals */
43 
44 /*
45  * Maximum alignment for ioremap() regions.
46  * Can be overridden by arch-specific value.
47  */
48 #ifndef IOREMAP_MAX_ORDER
49 #define IOREMAP_MAX_ORDER	(7 + PAGE_SHIFT)	/* 128 pages */
50 #endif
51 
52 struct vm_struct {
53 	union {
54 		struct vm_struct *next;	  /* Early registration of vm_areas. */
55 		struct llist_node llnode; /* Asynchronous freeing on error paths. */
56 	};
57 
58 	void			*addr;
59 	unsigned long		size;
60 	unsigned long		flags;
61 	struct page		**pages;
62 #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
63 	unsigned int		page_order;
64 #endif
65 	unsigned int		nr_pages;
66 	phys_addr_t		phys_addr;
67 	const void		*caller;
68 	unsigned long		requested_size;
69 };
70 
71 struct vmap_area {
72 	unsigned long va_start;
73 	unsigned long va_end;
74 
75 	struct rb_node rb_node;         /* address sorted rbtree */
76 	struct list_head list;          /* address sorted list */
77 
78 	/*
79 	 * The following two variables can be packed, because
80 	 * a vmap_area object can be either:
81 	 *    1) in "free" tree (root is free_vmap_area_root)
82 	 *    2) or "busy" tree (root is vmap_area_root)
83 	 */
84 	union {
85 		unsigned long subtree_max_size; /* in "free" tree */
86 		struct vm_struct *vm;           /* in "busy" tree */
87 	};
88 	unsigned long flags; /* mark type of vm_map_ram area */
89 };
90 
91 /* archs that select HAVE_ARCH_HUGE_VMAP should override one or more of these */
92 #ifndef arch_vmap_p4d_supported
arch_vmap_p4d_supported(pgprot_t prot)93 static inline bool arch_vmap_p4d_supported(pgprot_t prot)
94 {
95 	return false;
96 }
97 #endif
98 
99 #ifndef arch_vmap_pud_supported
arch_vmap_pud_supported(pgprot_t prot)100 static inline bool arch_vmap_pud_supported(pgprot_t prot)
101 {
102 	return false;
103 }
104 #endif
105 
106 #ifndef arch_vmap_pmd_supported
arch_vmap_pmd_supported(pgprot_t prot)107 static inline bool arch_vmap_pmd_supported(pgprot_t prot)
108 {
109 	return false;
110 }
111 #endif
112 
113 #ifndef arch_vmap_pte_range_map_size
arch_vmap_pte_range_map_size(unsigned long addr,unsigned long end,u64 pfn,unsigned int max_page_shift)114 static inline unsigned long arch_vmap_pte_range_map_size(unsigned long addr, unsigned long end,
115 							 u64 pfn, unsigned int max_page_shift)
116 {
117 	return PAGE_SIZE;
118 }
119 #endif
120 
121 #ifndef arch_vmap_pte_range_unmap_size
arch_vmap_pte_range_unmap_size(unsigned long addr,pte_t * ptep)122 static inline unsigned long arch_vmap_pte_range_unmap_size(unsigned long addr,
123 							   pte_t *ptep)
124 {
125 	return PAGE_SIZE;
126 }
127 #endif
128 
129 #ifndef arch_vmap_pte_supported_shift
arch_vmap_pte_supported_shift(unsigned long size)130 static inline int arch_vmap_pte_supported_shift(unsigned long size)
131 {
132 	return PAGE_SHIFT;
133 }
134 #endif
135 
136 #ifndef arch_vmap_pgprot_tagged
arch_vmap_pgprot_tagged(pgprot_t prot)137 static inline pgprot_t arch_vmap_pgprot_tagged(pgprot_t prot)
138 {
139 	return prot;
140 }
141 #endif
142 
143 /*
144  *	Highlevel APIs for driver use
145  */
146 extern void vm_unmap_ram(const void *mem, unsigned int count);
147 extern void *vm_map_ram(struct page **pages, unsigned int count, int node);
148 extern void vm_unmap_aliases(void);
149 
150 extern void *vmalloc_noprof(unsigned long size) __alloc_size(1);
151 #define vmalloc(...)		alloc_hooks(vmalloc_noprof(__VA_ARGS__))
152 
153 extern void *vzalloc_noprof(unsigned long size) __alloc_size(1);
154 #define vzalloc(...)		alloc_hooks(vzalloc_noprof(__VA_ARGS__))
155 
156 extern void *vmalloc_user_noprof(unsigned long size) __alloc_size(1);
157 #define vmalloc_user(...)	alloc_hooks(vmalloc_user_noprof(__VA_ARGS__))
158 
159 extern void *vmalloc_node_noprof(unsigned long size, int node) __alloc_size(1);
160 #define vmalloc_node(...)	alloc_hooks(vmalloc_node_noprof(__VA_ARGS__))
161 
162 extern void *vzalloc_node_noprof(unsigned long size, int node) __alloc_size(1);
163 #define vzalloc_node(...)	alloc_hooks(vzalloc_node_noprof(__VA_ARGS__))
164 
165 extern void *vmalloc_32_noprof(unsigned long size) __alloc_size(1);
166 #define vmalloc_32(...)		alloc_hooks(vmalloc_32_noprof(__VA_ARGS__))
167 
168 extern void *vmalloc_32_user_noprof(unsigned long size) __alloc_size(1);
169 #define vmalloc_32_user(...)	alloc_hooks(vmalloc_32_user_noprof(__VA_ARGS__))
170 
171 extern void *__vmalloc_noprof(unsigned long size, gfp_t gfp_mask) __alloc_size(1);
172 #define __vmalloc(...)		alloc_hooks(__vmalloc_noprof(__VA_ARGS__))
173 
174 extern void *__vmalloc_node_range_noprof(unsigned long size, unsigned long align,
175 			unsigned long start, unsigned long end, gfp_t gfp_mask,
176 			pgprot_t prot, unsigned long vm_flags, int node,
177 			const void *caller) __alloc_size(1);
178 #define __vmalloc_node_range(...)	alloc_hooks(__vmalloc_node_range_noprof(__VA_ARGS__))
179 
180 void *__vmalloc_node_noprof(unsigned long size, unsigned long align, gfp_t gfp_mask,
181 		int node, const void *caller) __alloc_size(1);
182 #define __vmalloc_node(...)	alloc_hooks(__vmalloc_node_noprof(__VA_ARGS__))
183 
184 void *vmalloc_huge_node_noprof(unsigned long size, gfp_t gfp_mask, int node) __alloc_size(1);
185 #define vmalloc_huge_node(...)	alloc_hooks(vmalloc_huge_node_noprof(__VA_ARGS__))
186 
vmalloc_huge(unsigned long size,gfp_t gfp_mask)187 static inline void *vmalloc_huge(unsigned long size, gfp_t gfp_mask)
188 {
189 	return vmalloc_huge_node(size, gfp_mask, NUMA_NO_NODE);
190 }
191 
192 extern void *__vmalloc_array_noprof(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2);
193 #define __vmalloc_array(...)	alloc_hooks(__vmalloc_array_noprof(__VA_ARGS__))
194 
195 extern void *vmalloc_array_noprof(size_t n, size_t size) __alloc_size(1, 2);
196 #define vmalloc_array(...)	alloc_hooks(vmalloc_array_noprof(__VA_ARGS__))
197 
198 extern void *__vcalloc_noprof(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2);
199 #define __vcalloc(...)		alloc_hooks(__vcalloc_noprof(__VA_ARGS__))
200 
201 extern void *vcalloc_noprof(size_t n, size_t size) __alloc_size(1, 2);
202 #define vcalloc(...)		alloc_hooks(vcalloc_noprof(__VA_ARGS__))
203 
204 void *__must_check vrealloc_node_align_noprof(const void *p, size_t size,
205 		unsigned long align, gfp_t flags, int nid) __realloc_size(2);
206 #define vrealloc_node_noprof(_p, _s, _f, _nid)	\
207 	vrealloc_node_align_noprof(_p, _s, 1, _f, _nid)
208 #define vrealloc_noprof(_p, _s, _f)		\
209 	vrealloc_node_align_noprof(_p, _s, 1, _f, NUMA_NO_NODE)
210 #define vrealloc_node_align(...)		alloc_hooks(vrealloc_node_align_noprof(__VA_ARGS__))
211 #define vrealloc_node(...)			alloc_hooks(vrealloc_node_noprof(__VA_ARGS__))
212 #define vrealloc(...)				alloc_hooks(vrealloc_noprof(__VA_ARGS__))
213 
214 extern void vfree(const void *addr);
215 extern void vfree_atomic(const void *addr);
216 
217 extern void *vmap(struct page **pages, unsigned int count,
218 			unsigned long flags, pgprot_t prot);
219 void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot);
220 extern void vunmap(const void *addr);
221 
222 extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
223 				       unsigned long uaddr, void *kaddr,
224 				       unsigned long pgoff, unsigned long size);
225 
226 extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
227 							unsigned long pgoff);
228 
229 int vmap_pages_range(unsigned long addr, unsigned long end, pgprot_t prot,
230 		     struct page **pages, unsigned int page_shift);
231 
232 /*
233  *	Lowlevel-APIs (not for driver use!)
234  */
235 
get_vm_area_size(const struct vm_struct * area)236 static inline size_t get_vm_area_size(const struct vm_struct *area)
237 {
238 	if (!(area->flags & VM_NO_GUARD))
239 		/* return actual size without guard page */
240 		return area->size - PAGE_SIZE;
241 	else
242 		return area->size;
243 
244 }
245 
246 extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags);
247 extern struct vm_struct *get_vm_area_caller(unsigned long size,
248 					unsigned long flags, const void *caller);
249 extern struct vm_struct *__get_vm_area_caller(unsigned long size,
250 					unsigned long flags,
251 					unsigned long start, unsigned long end,
252 					const void *caller);
253 void free_vm_area(struct vm_struct *area);
254 extern struct vm_struct *remove_vm_area(const void *addr);
255 extern struct vm_struct *find_vm_area(const void *addr);
256 struct vmap_area *find_vmap_area(unsigned long addr);
257 
is_vm_area_hugepages(const void * addr)258 static inline bool is_vm_area_hugepages(const void *addr)
259 {
260 	/*
261 	 * This may not 100% tell if the area is mapped with > PAGE_SIZE
262 	 * page table entries, if for some reason the architecture indicates
263 	 * larger sizes are available but decides not to use them, nothing
264 	 * prevents that. This only indicates the size of the physical page
265 	 * allocated in the vmalloc layer.
266 	 */
267 #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
268 	return find_vm_area(addr)->page_order > 0;
269 #else
270 	return false;
271 #endif
272 }
273 
274 /* for /proc/kcore */
275 long vread_iter(struct iov_iter *iter, const char *addr, size_t count);
276 
277 /*
278  *	Internals.  Don't use..
279  */
280 __init void vm_area_add_early(struct vm_struct *vm);
281 __init void vm_area_register_early(struct vm_struct *vm, size_t align);
282 
283 int register_vmap_purge_notifier(struct notifier_block *nb);
284 int unregister_vmap_purge_notifier(struct notifier_block *nb);
285 
286 #ifdef CONFIG_MMU
287 #define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START)
288 
289 unsigned long vmalloc_nr_pages(void);
290 
291 int vm_area_map_pages(struct vm_struct *area, unsigned long start,
292 		      unsigned long end, struct page **pages);
293 void vm_area_unmap_pages(struct vm_struct *area, unsigned long start,
294 			 unsigned long end);
295 void vunmap_range(unsigned long addr, unsigned long end);
296 
set_vm_flush_reset_perms(void * addr)297 static inline void set_vm_flush_reset_perms(void *addr)
298 {
299 	struct vm_struct *vm = find_vm_area(addr);
300 
301 	if (vm)
302 		vm->flags |= VM_FLUSH_RESET_PERMS;
303 }
304 #else  /* !CONFIG_MMU */
305 #define VMALLOC_TOTAL 0UL
306 
vmalloc_nr_pages(void)307 static inline unsigned long vmalloc_nr_pages(void) { return 0; }
set_vm_flush_reset_perms(void * addr)308 static inline void set_vm_flush_reset_perms(void *addr) {}
309 #endif /* CONFIG_MMU */
310 
311 #if defined(CONFIG_MMU) && defined(CONFIG_SMP)
312 struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
313 				     const size_t *sizes, int nr_vms,
314 				     size_t align);
315 
316 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
317 # else
318 static inline struct vm_struct **
pcpu_get_vm_areas(const unsigned long * offsets,const size_t * sizes,int nr_vms,size_t align)319 pcpu_get_vm_areas(const unsigned long *offsets,
320 		const size_t *sizes, int nr_vms,
321 		size_t align)
322 {
323 	return NULL;
324 }
325 
pcpu_free_vm_areas(struct vm_struct ** vms,int nr_vms)326 static inline void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) {}
327 #endif
328 
329 #if defined(CONFIG_MMU) && defined(CONFIG_PRINTK)
330 bool vmalloc_dump_obj(void *object);
331 #else
vmalloc_dump_obj(void * object)332 static inline bool vmalloc_dump_obj(void *object) { return false; }
333 #endif
334 
335 unsigned int memalloc_apply_gfp_scope(gfp_t gfp_mask);
336 void memalloc_restore_scope(unsigned int flags);
337 #endif /* _LINUX_VMALLOC_H */
338