xref: /linux/mm/vmalloc.c (revision 8804d970fab45726b3c7cd7f240b31122aa94219)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  Copyright (C) 1993  Linus Torvalds
4  *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
5  *  SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
6  *  Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
7  *  Numa awareness, Christoph Lameter, SGI, June 2005
8  *  Improving global KVA allocator, Uladzislau Rezki, Sony, May 2019
9  */
10 
11 #include <linux/vmalloc.h>
12 #include <linux/mm.h>
13 #include <linux/module.h>
14 #include <linux/highmem.h>
15 #include <linux/sched/signal.h>
16 #include <linux/slab.h>
17 #include <linux/spinlock.h>
18 #include <linux/interrupt.h>
19 #include <linux/proc_fs.h>
20 #include <linux/seq_file.h>
21 #include <linux/set_memory.h>
22 #include <linux/debugobjects.h>
23 #include <linux/kallsyms.h>
24 #include <linux/list.h>
25 #include <linux/notifier.h>
26 #include <linux/rbtree.h>
27 #include <linux/xarray.h>
28 #include <linux/io.h>
29 #include <linux/rcupdate.h>
30 #include <linux/pfn.h>
31 #include <linux/kmemleak.h>
32 #include <linux/atomic.h>
33 #include <linux/compiler.h>
34 #include <linux/memcontrol.h>
35 #include <linux/llist.h>
36 #include <linux/uio.h>
37 #include <linux/bitops.h>
38 #include <linux/rbtree_augmented.h>
39 #include <linux/overflow.h>
40 #include <linux/pgtable.h>
41 #include <linux/hugetlb.h>
42 #include <linux/sched/mm.h>
43 #include <asm/tlbflush.h>
44 #include <asm/shmparam.h>
45 #include <linux/page_owner.h>
46 
47 #define CREATE_TRACE_POINTS
48 #include <trace/events/vmalloc.h>
49 
50 #include "internal.h"
51 #include "pgalloc-track.h"
52 
53 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
54 static unsigned int __ro_after_init ioremap_max_page_shift = BITS_PER_LONG - 1;
55 
set_nohugeiomap(char * str)56 static int __init set_nohugeiomap(char *str)
57 {
58 	ioremap_max_page_shift = PAGE_SHIFT;
59 	return 0;
60 }
61 early_param("nohugeiomap", set_nohugeiomap);
62 #else /* CONFIG_HAVE_ARCH_HUGE_VMAP */
63 static const unsigned int ioremap_max_page_shift = PAGE_SHIFT;
64 #endif	/* CONFIG_HAVE_ARCH_HUGE_VMAP */
65 
66 #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
67 static bool __ro_after_init vmap_allow_huge = true;
68 
set_nohugevmalloc(char * str)69 static int __init set_nohugevmalloc(char *str)
70 {
71 	vmap_allow_huge = false;
72 	return 0;
73 }
74 early_param("nohugevmalloc", set_nohugevmalloc);
75 #else /* CONFIG_HAVE_ARCH_HUGE_VMALLOC */
76 static const bool vmap_allow_huge = false;
77 #endif	/* CONFIG_HAVE_ARCH_HUGE_VMALLOC */
78 
is_vmalloc_addr(const void * x)79 bool is_vmalloc_addr(const void *x)
80 {
81 	unsigned long addr = (unsigned long)kasan_reset_tag(x);
82 
83 	return addr >= VMALLOC_START && addr < VMALLOC_END;
84 }
85 EXPORT_SYMBOL(is_vmalloc_addr);
86 
87 struct vfree_deferred {
88 	struct llist_head list;
89 	struct work_struct wq;
90 };
91 static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
92 
93 /*** Page table manipulation functions ***/
vmap_pte_range(pmd_t * pmd,unsigned long addr,unsigned long end,phys_addr_t phys_addr,pgprot_t prot,unsigned int max_page_shift,pgtbl_mod_mask * mask)94 static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
95 			phys_addr_t phys_addr, pgprot_t prot,
96 			unsigned int max_page_shift, pgtbl_mod_mask *mask)
97 {
98 	pte_t *pte;
99 	u64 pfn;
100 	struct page *page;
101 	unsigned long size = PAGE_SIZE;
102 
103 	pfn = phys_addr >> PAGE_SHIFT;
104 	pte = pte_alloc_kernel_track(pmd, addr, mask);
105 	if (!pte)
106 		return -ENOMEM;
107 
108 	arch_enter_lazy_mmu_mode();
109 
110 	do {
111 		if (unlikely(!pte_none(ptep_get(pte)))) {
112 			if (pfn_valid(pfn)) {
113 				page = pfn_to_page(pfn);
114 				dump_page(page, "remapping already mapped page");
115 			}
116 			BUG();
117 		}
118 
119 #ifdef CONFIG_HUGETLB_PAGE
120 		size = arch_vmap_pte_range_map_size(addr, end, pfn, max_page_shift);
121 		if (size != PAGE_SIZE) {
122 			pte_t entry = pfn_pte(pfn, prot);
123 
124 			entry = arch_make_huge_pte(entry, ilog2(size), 0);
125 			set_huge_pte_at(&init_mm, addr, pte, entry, size);
126 			pfn += PFN_DOWN(size);
127 			continue;
128 		}
129 #endif
130 		set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
131 		pfn++;
132 	} while (pte += PFN_DOWN(size), addr += size, addr != end);
133 
134 	arch_leave_lazy_mmu_mode();
135 	*mask |= PGTBL_PTE_MODIFIED;
136 	return 0;
137 }
138 
vmap_try_huge_pmd(pmd_t * pmd,unsigned long addr,unsigned long end,phys_addr_t phys_addr,pgprot_t prot,unsigned int max_page_shift)139 static int vmap_try_huge_pmd(pmd_t *pmd, unsigned long addr, unsigned long end,
140 			phys_addr_t phys_addr, pgprot_t prot,
141 			unsigned int max_page_shift)
142 {
143 	if (max_page_shift < PMD_SHIFT)
144 		return 0;
145 
146 	if (!arch_vmap_pmd_supported(prot))
147 		return 0;
148 
149 	if ((end - addr) != PMD_SIZE)
150 		return 0;
151 
152 	if (!IS_ALIGNED(addr, PMD_SIZE))
153 		return 0;
154 
155 	if (!IS_ALIGNED(phys_addr, PMD_SIZE))
156 		return 0;
157 
158 	if (pmd_present(*pmd) && !pmd_free_pte_page(pmd, addr))
159 		return 0;
160 
161 	return pmd_set_huge(pmd, phys_addr, prot);
162 }
163 
vmap_pmd_range(pud_t * pud,unsigned long addr,unsigned long end,phys_addr_t phys_addr,pgprot_t prot,unsigned int max_page_shift,pgtbl_mod_mask * mask)164 static int vmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
165 			phys_addr_t phys_addr, pgprot_t prot,
166 			unsigned int max_page_shift, pgtbl_mod_mask *mask)
167 {
168 	pmd_t *pmd;
169 	unsigned long next;
170 
171 	pmd = pmd_alloc_track(&init_mm, pud, addr, mask);
172 	if (!pmd)
173 		return -ENOMEM;
174 	do {
175 		next = pmd_addr_end(addr, end);
176 
177 		if (vmap_try_huge_pmd(pmd, addr, next, phys_addr, prot,
178 					max_page_shift)) {
179 			*mask |= PGTBL_PMD_MODIFIED;
180 			continue;
181 		}
182 
183 		if (vmap_pte_range(pmd, addr, next, phys_addr, prot, max_page_shift, mask))
184 			return -ENOMEM;
185 	} while (pmd++, phys_addr += (next - addr), addr = next, addr != end);
186 	return 0;
187 }
188 
vmap_try_huge_pud(pud_t * pud,unsigned long addr,unsigned long end,phys_addr_t phys_addr,pgprot_t prot,unsigned int max_page_shift)189 static int vmap_try_huge_pud(pud_t *pud, unsigned long addr, unsigned long end,
190 			phys_addr_t phys_addr, pgprot_t prot,
191 			unsigned int max_page_shift)
192 {
193 	if (max_page_shift < PUD_SHIFT)
194 		return 0;
195 
196 	if (!arch_vmap_pud_supported(prot))
197 		return 0;
198 
199 	if ((end - addr) != PUD_SIZE)
200 		return 0;
201 
202 	if (!IS_ALIGNED(addr, PUD_SIZE))
203 		return 0;
204 
205 	if (!IS_ALIGNED(phys_addr, PUD_SIZE))
206 		return 0;
207 
208 	if (pud_present(*pud) && !pud_free_pmd_page(pud, addr))
209 		return 0;
210 
211 	return pud_set_huge(pud, phys_addr, prot);
212 }
213 
vmap_pud_range(p4d_t * p4d,unsigned long addr,unsigned long end,phys_addr_t phys_addr,pgprot_t prot,unsigned int max_page_shift,pgtbl_mod_mask * mask)214 static int vmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
215 			phys_addr_t phys_addr, pgprot_t prot,
216 			unsigned int max_page_shift, pgtbl_mod_mask *mask)
217 {
218 	pud_t *pud;
219 	unsigned long next;
220 
221 	pud = pud_alloc_track(&init_mm, p4d, addr, mask);
222 	if (!pud)
223 		return -ENOMEM;
224 	do {
225 		next = pud_addr_end(addr, end);
226 
227 		if (vmap_try_huge_pud(pud, addr, next, phys_addr, prot,
228 					max_page_shift)) {
229 			*mask |= PGTBL_PUD_MODIFIED;
230 			continue;
231 		}
232 
233 		if (vmap_pmd_range(pud, addr, next, phys_addr, prot,
234 					max_page_shift, mask))
235 			return -ENOMEM;
236 	} while (pud++, phys_addr += (next - addr), addr = next, addr != end);
237 	return 0;
238 }
239 
vmap_try_huge_p4d(p4d_t * p4d,unsigned long addr,unsigned long end,phys_addr_t phys_addr,pgprot_t prot,unsigned int max_page_shift)240 static int vmap_try_huge_p4d(p4d_t *p4d, unsigned long addr, unsigned long end,
241 			phys_addr_t phys_addr, pgprot_t prot,
242 			unsigned int max_page_shift)
243 {
244 	if (max_page_shift < P4D_SHIFT)
245 		return 0;
246 
247 	if (!arch_vmap_p4d_supported(prot))
248 		return 0;
249 
250 	if ((end - addr) != P4D_SIZE)
251 		return 0;
252 
253 	if (!IS_ALIGNED(addr, P4D_SIZE))
254 		return 0;
255 
256 	if (!IS_ALIGNED(phys_addr, P4D_SIZE))
257 		return 0;
258 
259 	if (p4d_present(*p4d) && !p4d_free_pud_page(p4d, addr))
260 		return 0;
261 
262 	return p4d_set_huge(p4d, phys_addr, prot);
263 }
264 
vmap_p4d_range(pgd_t * pgd,unsigned long addr,unsigned long end,phys_addr_t phys_addr,pgprot_t prot,unsigned int max_page_shift,pgtbl_mod_mask * mask)265 static int vmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
266 			phys_addr_t phys_addr, pgprot_t prot,
267 			unsigned int max_page_shift, pgtbl_mod_mask *mask)
268 {
269 	p4d_t *p4d;
270 	unsigned long next;
271 
272 	p4d = p4d_alloc_track(&init_mm, pgd, addr, mask);
273 	if (!p4d)
274 		return -ENOMEM;
275 	do {
276 		next = p4d_addr_end(addr, end);
277 
278 		if (vmap_try_huge_p4d(p4d, addr, next, phys_addr, prot,
279 					max_page_shift)) {
280 			*mask |= PGTBL_P4D_MODIFIED;
281 			continue;
282 		}
283 
284 		if (vmap_pud_range(p4d, addr, next, phys_addr, prot,
285 					max_page_shift, mask))
286 			return -ENOMEM;
287 	} while (p4d++, phys_addr += (next - addr), addr = next, addr != end);
288 	return 0;
289 }
290 
vmap_range_noflush(unsigned long addr,unsigned long end,phys_addr_t phys_addr,pgprot_t prot,unsigned int max_page_shift)291 static int vmap_range_noflush(unsigned long addr, unsigned long end,
292 			phys_addr_t phys_addr, pgprot_t prot,
293 			unsigned int max_page_shift)
294 {
295 	pgd_t *pgd;
296 	unsigned long start;
297 	unsigned long next;
298 	int err;
299 	pgtbl_mod_mask mask = 0;
300 
301 	might_sleep();
302 	BUG_ON(addr >= end);
303 
304 	start = addr;
305 	pgd = pgd_offset_k(addr);
306 	do {
307 		next = pgd_addr_end(addr, end);
308 		err = vmap_p4d_range(pgd, addr, next, phys_addr, prot,
309 					max_page_shift, &mask);
310 		if (err)
311 			break;
312 	} while (pgd++, phys_addr += (next - addr), addr = next, addr != end);
313 
314 	if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
315 		arch_sync_kernel_mappings(start, end);
316 
317 	return err;
318 }
319 
vmap_page_range(unsigned long addr,unsigned long end,phys_addr_t phys_addr,pgprot_t prot)320 int vmap_page_range(unsigned long addr, unsigned long end,
321 		    phys_addr_t phys_addr, pgprot_t prot)
322 {
323 	int err;
324 
325 	err = vmap_range_noflush(addr, end, phys_addr, pgprot_nx(prot),
326 				 ioremap_max_page_shift);
327 	flush_cache_vmap(addr, end);
328 	if (!err)
329 		err = kmsan_ioremap_page_range(addr, end, phys_addr, prot,
330 					       ioremap_max_page_shift);
331 	return err;
332 }
333 
ioremap_page_range(unsigned long addr,unsigned long end,phys_addr_t phys_addr,pgprot_t prot)334 int ioremap_page_range(unsigned long addr, unsigned long end,
335 		phys_addr_t phys_addr, pgprot_t prot)
336 {
337 	struct vm_struct *area;
338 
339 	area = find_vm_area((void *)addr);
340 	if (!area || !(area->flags & VM_IOREMAP)) {
341 		WARN_ONCE(1, "vm_area at addr %lx is not marked as VM_IOREMAP\n", addr);
342 		return -EINVAL;
343 	}
344 	if (addr != (unsigned long)area->addr ||
345 	    (void *)end != area->addr + get_vm_area_size(area)) {
346 		WARN_ONCE(1, "ioremap request [%lx,%lx) doesn't match vm_area [%lx, %lx)\n",
347 			  addr, end, (long)area->addr,
348 			  (long)area->addr + get_vm_area_size(area));
349 		return -ERANGE;
350 	}
351 	return vmap_page_range(addr, end, phys_addr, prot);
352 }
353 
vunmap_pte_range(pmd_t * pmd,unsigned long addr,unsigned long end,pgtbl_mod_mask * mask)354 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
355 			     pgtbl_mod_mask *mask)
356 {
357 	pte_t *pte;
358 	pte_t ptent;
359 	unsigned long size = PAGE_SIZE;
360 
361 	pte = pte_offset_kernel(pmd, addr);
362 	arch_enter_lazy_mmu_mode();
363 
364 	do {
365 #ifdef CONFIG_HUGETLB_PAGE
366 		size = arch_vmap_pte_range_unmap_size(addr, pte);
367 		if (size != PAGE_SIZE) {
368 			if (WARN_ON(!IS_ALIGNED(addr, size))) {
369 				addr = ALIGN_DOWN(addr, size);
370 				pte = PTR_ALIGN_DOWN(pte, sizeof(*pte) * (size >> PAGE_SHIFT));
371 			}
372 			ptent = huge_ptep_get_and_clear(&init_mm, addr, pte, size);
373 			if (WARN_ON(end - addr < size))
374 				size = end - addr;
375 		} else
376 #endif
377 			ptent = ptep_get_and_clear(&init_mm, addr, pte);
378 		WARN_ON(!pte_none(ptent) && !pte_present(ptent));
379 	} while (pte += (size >> PAGE_SHIFT), addr += size, addr != end);
380 
381 	arch_leave_lazy_mmu_mode();
382 	*mask |= PGTBL_PTE_MODIFIED;
383 }
384 
vunmap_pmd_range(pud_t * pud,unsigned long addr,unsigned long end,pgtbl_mod_mask * mask)385 static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
386 			     pgtbl_mod_mask *mask)
387 {
388 	pmd_t *pmd;
389 	unsigned long next;
390 	int cleared;
391 
392 	pmd = pmd_offset(pud, addr);
393 	do {
394 		next = pmd_addr_end(addr, end);
395 
396 		cleared = pmd_clear_huge(pmd);
397 		if (cleared || pmd_bad(*pmd))
398 			*mask |= PGTBL_PMD_MODIFIED;
399 
400 		if (cleared) {
401 			WARN_ON(next - addr < PMD_SIZE);
402 			continue;
403 		}
404 		if (pmd_none_or_clear_bad(pmd))
405 			continue;
406 		vunmap_pte_range(pmd, addr, next, mask);
407 
408 		cond_resched();
409 	} while (pmd++, addr = next, addr != end);
410 }
411 
vunmap_pud_range(p4d_t * p4d,unsigned long addr,unsigned long end,pgtbl_mod_mask * mask)412 static void vunmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
413 			     pgtbl_mod_mask *mask)
414 {
415 	pud_t *pud;
416 	unsigned long next;
417 	int cleared;
418 
419 	pud = pud_offset(p4d, addr);
420 	do {
421 		next = pud_addr_end(addr, end);
422 
423 		cleared = pud_clear_huge(pud);
424 		if (cleared || pud_bad(*pud))
425 			*mask |= PGTBL_PUD_MODIFIED;
426 
427 		if (cleared) {
428 			WARN_ON(next - addr < PUD_SIZE);
429 			continue;
430 		}
431 		if (pud_none_or_clear_bad(pud))
432 			continue;
433 		vunmap_pmd_range(pud, addr, next, mask);
434 	} while (pud++, addr = next, addr != end);
435 }
436 
vunmap_p4d_range(pgd_t * pgd,unsigned long addr,unsigned long end,pgtbl_mod_mask * mask)437 static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
438 			     pgtbl_mod_mask *mask)
439 {
440 	p4d_t *p4d;
441 	unsigned long next;
442 
443 	p4d = p4d_offset(pgd, addr);
444 	do {
445 		next = p4d_addr_end(addr, end);
446 
447 		p4d_clear_huge(p4d);
448 		if (p4d_bad(*p4d))
449 			*mask |= PGTBL_P4D_MODIFIED;
450 
451 		if (p4d_none_or_clear_bad(p4d))
452 			continue;
453 		vunmap_pud_range(p4d, addr, next, mask);
454 	} while (p4d++, addr = next, addr != end);
455 }
456 
457 /*
458  * vunmap_range_noflush is similar to vunmap_range, but does not
459  * flush caches or TLBs.
460  *
461  * The caller is responsible for calling flush_cache_vmap() before calling
462  * this function, and flush_tlb_kernel_range after it has returned
463  * successfully (and before the addresses are expected to cause a page fault
464  * or be re-mapped for something else, if TLB flushes are being delayed or
465  * coalesced).
466  *
467  * This is an internal function only. Do not use outside mm/.
468  */
__vunmap_range_noflush(unsigned long start,unsigned long end)469 void __vunmap_range_noflush(unsigned long start, unsigned long end)
470 {
471 	unsigned long next;
472 	pgd_t *pgd;
473 	unsigned long addr = start;
474 	pgtbl_mod_mask mask = 0;
475 
476 	BUG_ON(addr >= end);
477 	pgd = pgd_offset_k(addr);
478 	do {
479 		next = pgd_addr_end(addr, end);
480 		if (pgd_bad(*pgd))
481 			mask |= PGTBL_PGD_MODIFIED;
482 		if (pgd_none_or_clear_bad(pgd))
483 			continue;
484 		vunmap_p4d_range(pgd, addr, next, &mask);
485 	} while (pgd++, addr = next, addr != end);
486 
487 	if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
488 		arch_sync_kernel_mappings(start, end);
489 }
490 
vunmap_range_noflush(unsigned long start,unsigned long end)491 void vunmap_range_noflush(unsigned long start, unsigned long end)
492 {
493 	kmsan_vunmap_range_noflush(start, end);
494 	__vunmap_range_noflush(start, end);
495 }
496 
497 /**
498  * vunmap_range - unmap kernel virtual addresses
499  * @addr: start of the VM area to unmap
500  * @end: end of the VM area to unmap (non-inclusive)
501  *
502  * Clears any present PTEs in the virtual address range, flushes TLBs and
503  * caches. Any subsequent access to the address before it has been re-mapped
504  * is a kernel bug.
505  */
vunmap_range(unsigned long addr,unsigned long end)506 void vunmap_range(unsigned long addr, unsigned long end)
507 {
508 	flush_cache_vunmap(addr, end);
509 	vunmap_range_noflush(addr, end);
510 	flush_tlb_kernel_range(addr, end);
511 }
512 
vmap_pages_pte_range(pmd_t * pmd,unsigned long addr,unsigned long end,pgprot_t prot,struct page ** pages,int * nr,pgtbl_mod_mask * mask)513 static int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr,
514 		unsigned long end, pgprot_t prot, struct page **pages, int *nr,
515 		pgtbl_mod_mask *mask)
516 {
517 	int err = 0;
518 	pte_t *pte;
519 
520 	/*
521 	 * nr is a running index into the array which helps higher level
522 	 * callers keep track of where we're up to.
523 	 */
524 
525 	pte = pte_alloc_kernel_track(pmd, addr, mask);
526 	if (!pte)
527 		return -ENOMEM;
528 
529 	arch_enter_lazy_mmu_mode();
530 
531 	do {
532 		struct page *page = pages[*nr];
533 
534 		if (WARN_ON(!pte_none(ptep_get(pte)))) {
535 			err = -EBUSY;
536 			break;
537 		}
538 		if (WARN_ON(!page)) {
539 			err = -ENOMEM;
540 			break;
541 		}
542 		if (WARN_ON(!pfn_valid(page_to_pfn(page)))) {
543 			err = -EINVAL;
544 			break;
545 		}
546 
547 		set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
548 		(*nr)++;
549 	} while (pte++, addr += PAGE_SIZE, addr != end);
550 
551 	arch_leave_lazy_mmu_mode();
552 	*mask |= PGTBL_PTE_MODIFIED;
553 
554 	return err;
555 }
556 
vmap_pages_pmd_range(pud_t * pud,unsigned long addr,unsigned long end,pgprot_t prot,struct page ** pages,int * nr,pgtbl_mod_mask * mask)557 static int vmap_pages_pmd_range(pud_t *pud, unsigned long addr,
558 		unsigned long end, pgprot_t prot, struct page **pages, int *nr,
559 		pgtbl_mod_mask *mask)
560 {
561 	pmd_t *pmd;
562 	unsigned long next;
563 
564 	pmd = pmd_alloc_track(&init_mm, pud, addr, mask);
565 	if (!pmd)
566 		return -ENOMEM;
567 	do {
568 		next = pmd_addr_end(addr, end);
569 		if (vmap_pages_pte_range(pmd, addr, next, prot, pages, nr, mask))
570 			return -ENOMEM;
571 	} while (pmd++, addr = next, addr != end);
572 	return 0;
573 }
574 
vmap_pages_pud_range(p4d_t * p4d,unsigned long addr,unsigned long end,pgprot_t prot,struct page ** pages,int * nr,pgtbl_mod_mask * mask)575 static int vmap_pages_pud_range(p4d_t *p4d, unsigned long addr,
576 		unsigned long end, pgprot_t prot, struct page **pages, int *nr,
577 		pgtbl_mod_mask *mask)
578 {
579 	pud_t *pud;
580 	unsigned long next;
581 
582 	pud = pud_alloc_track(&init_mm, p4d, addr, mask);
583 	if (!pud)
584 		return -ENOMEM;
585 	do {
586 		next = pud_addr_end(addr, end);
587 		if (vmap_pages_pmd_range(pud, addr, next, prot, pages, nr, mask))
588 			return -ENOMEM;
589 	} while (pud++, addr = next, addr != end);
590 	return 0;
591 }
592 
vmap_pages_p4d_range(pgd_t * pgd,unsigned long addr,unsigned long end,pgprot_t prot,struct page ** pages,int * nr,pgtbl_mod_mask * mask)593 static int vmap_pages_p4d_range(pgd_t *pgd, unsigned long addr,
594 		unsigned long end, pgprot_t prot, struct page **pages, int *nr,
595 		pgtbl_mod_mask *mask)
596 {
597 	p4d_t *p4d;
598 	unsigned long next;
599 
600 	p4d = p4d_alloc_track(&init_mm, pgd, addr, mask);
601 	if (!p4d)
602 		return -ENOMEM;
603 	do {
604 		next = p4d_addr_end(addr, end);
605 		if (vmap_pages_pud_range(p4d, addr, next, prot, pages, nr, mask))
606 			return -ENOMEM;
607 	} while (p4d++, addr = next, addr != end);
608 	return 0;
609 }
610 
vmap_small_pages_range_noflush(unsigned long addr,unsigned long end,pgprot_t prot,struct page ** pages)611 static int vmap_small_pages_range_noflush(unsigned long addr, unsigned long end,
612 		pgprot_t prot, struct page **pages)
613 {
614 	unsigned long start = addr;
615 	pgd_t *pgd;
616 	unsigned long next;
617 	int err = 0;
618 	int nr = 0;
619 	pgtbl_mod_mask mask = 0;
620 
621 	BUG_ON(addr >= end);
622 	pgd = pgd_offset_k(addr);
623 	do {
624 		next = pgd_addr_end(addr, end);
625 		if (pgd_bad(*pgd))
626 			mask |= PGTBL_PGD_MODIFIED;
627 		err = vmap_pages_p4d_range(pgd, addr, next, prot, pages, &nr, &mask);
628 		if (err)
629 			break;
630 	} while (pgd++, addr = next, addr != end);
631 
632 	if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
633 		arch_sync_kernel_mappings(start, end);
634 
635 	return err;
636 }
637 
638 /*
639  * vmap_pages_range_noflush is similar to vmap_pages_range, but does not
640  * flush caches.
641  *
642  * The caller is responsible for calling flush_cache_vmap() after this
643  * function returns successfully and before the addresses are accessed.
644  *
645  * This is an internal function only. Do not use outside mm/.
646  */
__vmap_pages_range_noflush(unsigned long addr,unsigned long end,pgprot_t prot,struct page ** pages,unsigned int page_shift)647 int __vmap_pages_range_noflush(unsigned long addr, unsigned long end,
648 		pgprot_t prot, struct page **pages, unsigned int page_shift)
649 {
650 	unsigned int i, nr = (end - addr) >> PAGE_SHIFT;
651 
652 	WARN_ON(page_shift < PAGE_SHIFT);
653 
654 	if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMALLOC) ||
655 			page_shift == PAGE_SHIFT)
656 		return vmap_small_pages_range_noflush(addr, end, prot, pages);
657 
658 	for (i = 0; i < nr; i += 1U << (page_shift - PAGE_SHIFT)) {
659 		int err;
660 
661 		err = vmap_range_noflush(addr, addr + (1UL << page_shift),
662 					page_to_phys(pages[i]), prot,
663 					page_shift);
664 		if (err)
665 			return err;
666 
667 		addr += 1UL << page_shift;
668 	}
669 
670 	return 0;
671 }
672 
vmap_pages_range_noflush(unsigned long addr,unsigned long end,pgprot_t prot,struct page ** pages,unsigned int page_shift)673 int vmap_pages_range_noflush(unsigned long addr, unsigned long end,
674 		pgprot_t prot, struct page **pages, unsigned int page_shift)
675 {
676 	int ret = kmsan_vmap_pages_range_noflush(addr, end, prot, pages,
677 						 page_shift);
678 
679 	if (ret)
680 		return ret;
681 	return __vmap_pages_range_noflush(addr, end, prot, pages, page_shift);
682 }
683 
684 /**
685  * vmap_pages_range - map pages to a kernel virtual address
686  * @addr: start of the VM area to map
687  * @end: end of the VM area to map (non-inclusive)
688  * @prot: page protection flags to use
689  * @pages: pages to map (always PAGE_SIZE pages)
690  * @page_shift: maximum shift that the pages may be mapped with, @pages must
691  * be aligned and contiguous up to at least this shift.
692  *
693  * RETURNS:
694  * 0 on success, -errno on failure.
695  */
vmap_pages_range(unsigned long addr,unsigned long end,pgprot_t prot,struct page ** pages,unsigned int page_shift)696 int vmap_pages_range(unsigned long addr, unsigned long end,
697 		pgprot_t prot, struct page **pages, unsigned int page_shift)
698 {
699 	int err;
700 
701 	err = vmap_pages_range_noflush(addr, end, prot, pages, page_shift);
702 	flush_cache_vmap(addr, end);
703 	return err;
704 }
705 
check_sparse_vm_area(struct vm_struct * area,unsigned long start,unsigned long end)706 static int check_sparse_vm_area(struct vm_struct *area, unsigned long start,
707 				unsigned long end)
708 {
709 	might_sleep();
710 	if (WARN_ON_ONCE(area->flags & VM_FLUSH_RESET_PERMS))
711 		return -EINVAL;
712 	if (WARN_ON_ONCE(area->flags & VM_NO_GUARD))
713 		return -EINVAL;
714 	if (WARN_ON_ONCE(!(area->flags & VM_SPARSE)))
715 		return -EINVAL;
716 	if ((end - start) >> PAGE_SHIFT > totalram_pages())
717 		return -E2BIG;
718 	if (start < (unsigned long)area->addr ||
719 	    (void *)end > area->addr + get_vm_area_size(area))
720 		return -ERANGE;
721 	return 0;
722 }
723 
724 /**
725  * vm_area_map_pages - map pages inside given sparse vm_area
726  * @area: vm_area
727  * @start: start address inside vm_area
728  * @end: end address inside vm_area
729  * @pages: pages to map (always PAGE_SIZE pages)
730  */
vm_area_map_pages(struct vm_struct * area,unsigned long start,unsigned long end,struct page ** pages)731 int vm_area_map_pages(struct vm_struct *area, unsigned long start,
732 		      unsigned long end, struct page **pages)
733 {
734 	int err;
735 
736 	err = check_sparse_vm_area(area, start, end);
737 	if (err)
738 		return err;
739 
740 	return vmap_pages_range(start, end, PAGE_KERNEL, pages, PAGE_SHIFT);
741 }
742 
743 /**
744  * vm_area_unmap_pages - unmap pages inside given sparse vm_area
745  * @area: vm_area
746  * @start: start address inside vm_area
747  * @end: end address inside vm_area
748  */
vm_area_unmap_pages(struct vm_struct * area,unsigned long start,unsigned long end)749 void vm_area_unmap_pages(struct vm_struct *area, unsigned long start,
750 			 unsigned long end)
751 {
752 	if (check_sparse_vm_area(area, start, end))
753 		return;
754 
755 	vunmap_range(start, end);
756 }
757 
is_vmalloc_or_module_addr(const void * x)758 int is_vmalloc_or_module_addr(const void *x)
759 {
760 	/*
761 	 * ARM, x86-64 and sparc64 put modules in a special place,
762 	 * and fall back on vmalloc() if that fails. Others
763 	 * just put it in the vmalloc space.
764 	 */
765 #if defined(CONFIG_EXECMEM) && defined(MODULES_VADDR)
766 	unsigned long addr = (unsigned long)kasan_reset_tag(x);
767 	if (addr >= MODULES_VADDR && addr < MODULES_END)
768 		return 1;
769 #endif
770 	return is_vmalloc_addr(x);
771 }
772 EXPORT_SYMBOL_GPL(is_vmalloc_or_module_addr);
773 
774 /*
775  * Walk a vmap address to the struct page it maps. Huge vmap mappings will
776  * return the tail page that corresponds to the base page address, which
777  * matches small vmap mappings.
778  */
vmalloc_to_page(const void * vmalloc_addr)779 struct page *vmalloc_to_page(const void *vmalloc_addr)
780 {
781 	unsigned long addr = (unsigned long) vmalloc_addr;
782 	struct page *page = NULL;
783 	pgd_t *pgd = pgd_offset_k(addr);
784 	p4d_t *p4d;
785 	pud_t *pud;
786 	pmd_t *pmd;
787 	pte_t *ptep, pte;
788 
789 	/*
790 	 * XXX we might need to change this if we add VIRTUAL_BUG_ON for
791 	 * architectures that do not vmalloc module space
792 	 */
793 	VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr));
794 
795 	if (pgd_none(*pgd))
796 		return NULL;
797 	if (WARN_ON_ONCE(pgd_leaf(*pgd)))
798 		return NULL; /* XXX: no allowance for huge pgd */
799 	if (WARN_ON_ONCE(pgd_bad(*pgd)))
800 		return NULL;
801 
802 	p4d = p4d_offset(pgd, addr);
803 	if (p4d_none(*p4d))
804 		return NULL;
805 	if (p4d_leaf(*p4d))
806 		return p4d_page(*p4d) + ((addr & ~P4D_MASK) >> PAGE_SHIFT);
807 	if (WARN_ON_ONCE(p4d_bad(*p4d)))
808 		return NULL;
809 
810 	pud = pud_offset(p4d, addr);
811 	if (pud_none(*pud))
812 		return NULL;
813 	if (pud_leaf(*pud))
814 		return pud_page(*pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
815 	if (WARN_ON_ONCE(pud_bad(*pud)))
816 		return NULL;
817 
818 	pmd = pmd_offset(pud, addr);
819 	if (pmd_none(*pmd))
820 		return NULL;
821 	if (pmd_leaf(*pmd))
822 		return pmd_page(*pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
823 	if (WARN_ON_ONCE(pmd_bad(*pmd)))
824 		return NULL;
825 
826 	ptep = pte_offset_kernel(pmd, addr);
827 	pte = ptep_get(ptep);
828 	if (pte_present(pte))
829 		page = pte_page(pte);
830 
831 	return page;
832 }
833 EXPORT_SYMBOL(vmalloc_to_page);
834 
835 /*
836  * Map a vmalloc()-space virtual address to the physical page frame number.
837  */
vmalloc_to_pfn(const void * vmalloc_addr)838 unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
839 {
840 	return page_to_pfn(vmalloc_to_page(vmalloc_addr));
841 }
842 EXPORT_SYMBOL(vmalloc_to_pfn);
843 
844 
845 /*** Global kva allocator ***/
846 
847 #define DEBUG_AUGMENT_PROPAGATE_CHECK 0
848 #define DEBUG_AUGMENT_LOWEST_MATCH_CHECK 0
849 
850 
851 static DEFINE_SPINLOCK(free_vmap_area_lock);
852 static bool vmap_initialized __read_mostly;
853 
854 /*
855  * This kmem_cache is used for vmap_area objects. Instead of
856  * allocating from slab we reuse an object from this cache to
857  * make things faster. Especially in "no edge" splitting of
858  * free block.
859  */
860 static struct kmem_cache *vmap_area_cachep;
861 
862 /*
863  * This linked list is used in pair with free_vmap_area_root.
864  * It gives O(1) access to prev/next to perform fast coalescing.
865  */
866 static LIST_HEAD(free_vmap_area_list);
867 
868 /*
869  * This augment red-black tree represents the free vmap space.
870  * All vmap_area objects in this tree are sorted by va->va_start
871  * address. It is used for allocation and merging when a vmap
872  * object is released.
873  *
874  * Each vmap_area node contains a maximum available free block
875  * of its sub-tree, right or left. Therefore it is possible to
876  * find a lowest match of free area.
877  */
878 static struct rb_root free_vmap_area_root = RB_ROOT;
879 
880 /*
881  * Preload a CPU with one object for "no edge" split case. The
882  * aim is to get rid of allocations from the atomic context, thus
883  * to use more permissive allocation masks.
884  */
885 static DEFINE_PER_CPU(struct vmap_area *, ne_fit_preload_node);
886 
887 /*
888  * This structure defines a single, solid model where a list and
889  * rb-tree are part of one entity protected by the lock. Nodes are
890  * sorted in ascending order, thus for O(1) access to left/right
891  * neighbors a list is used as well as for sequential traversal.
892  */
893 struct rb_list {
894 	struct rb_root root;
895 	struct list_head head;
896 	spinlock_t lock;
897 };
898 
899 /*
900  * A fast size storage contains VAs up to 1M size. A pool consists
901  * of linked between each other ready to go VAs of certain sizes.
902  * An index in the pool-array corresponds to number of pages + 1.
903  */
904 #define MAX_VA_SIZE_PAGES 256
905 
906 struct vmap_pool {
907 	struct list_head head;
908 	unsigned long len;
909 };
910 
911 /*
912  * An effective vmap-node logic. Users make use of nodes instead
913  * of a global heap. It allows to balance an access and mitigate
914  * contention.
915  */
916 static struct vmap_node {
917 	/* Simple size segregated storage. */
918 	struct vmap_pool pool[MAX_VA_SIZE_PAGES];
919 	spinlock_t pool_lock;
920 	bool skip_populate;
921 
922 	/* Bookkeeping data of this node. */
923 	struct rb_list busy;
924 	struct rb_list lazy;
925 
926 	/*
927 	 * Ready-to-free areas.
928 	 */
929 	struct list_head purge_list;
930 	struct work_struct purge_work;
931 	unsigned long nr_purged;
932 } single;
933 
934 /*
935  * Initial setup consists of one single node, i.e. a balancing
936  * is fully disabled. Later on, after vmap is initialized these
937  * parameters are updated based on a system capacity.
938  */
939 static struct vmap_node *vmap_nodes = &single;
940 static __read_mostly unsigned int nr_vmap_nodes = 1;
941 static __read_mostly unsigned int vmap_zone_size = 1;
942 
943 /* A simple iterator over all vmap-nodes. */
944 #define for_each_vmap_node(vn)	\
945 	for ((vn) = &vmap_nodes[0];	\
946 		(vn) < &vmap_nodes[nr_vmap_nodes]; (vn)++)
947 
948 static inline unsigned int
addr_to_node_id(unsigned long addr)949 addr_to_node_id(unsigned long addr)
950 {
951 	return (addr / vmap_zone_size) % nr_vmap_nodes;
952 }
953 
954 static inline struct vmap_node *
addr_to_node(unsigned long addr)955 addr_to_node(unsigned long addr)
956 {
957 	return &vmap_nodes[addr_to_node_id(addr)];
958 }
959 
960 static inline struct vmap_node *
id_to_node(unsigned int id)961 id_to_node(unsigned int id)
962 {
963 	return &vmap_nodes[id % nr_vmap_nodes];
964 }
965 
966 static inline unsigned int
node_to_id(struct vmap_node * node)967 node_to_id(struct vmap_node *node)
968 {
969 	/* Pointer arithmetic. */
970 	unsigned int id = node - vmap_nodes;
971 
972 	if (likely(id < nr_vmap_nodes))
973 		return id;
974 
975 	WARN_ONCE(1, "An address 0x%p is out-of-bounds.\n", node);
976 	return 0;
977 }
978 
979 /*
980  * We use the value 0 to represent "no node", that is why
981  * an encoded value will be the node-id incremented by 1.
982  * It is always greater then 0. A valid node_id which can
983  * be encoded is [0:nr_vmap_nodes - 1]. If a passed node_id
984  * is not valid 0 is returned.
985  */
986 static unsigned int
encode_vn_id(unsigned int node_id)987 encode_vn_id(unsigned int node_id)
988 {
989 	/* Can store U8_MAX [0:254] nodes. */
990 	if (node_id < nr_vmap_nodes)
991 		return (node_id + 1) << BITS_PER_BYTE;
992 
993 	/* Warn and no node encoded. */
994 	WARN_ONCE(1, "Encode wrong node id (%u)\n", node_id);
995 	return 0;
996 }
997 
998 /*
999  * Returns an encoded node-id, the valid range is within
1000  * [0:nr_vmap_nodes-1] values. Otherwise nr_vmap_nodes is
1001  * returned if extracted data is wrong.
1002  */
1003 static unsigned int
decode_vn_id(unsigned int val)1004 decode_vn_id(unsigned int val)
1005 {
1006 	unsigned int node_id = (val >> BITS_PER_BYTE) - 1;
1007 
1008 	/* Can store U8_MAX [0:254] nodes. */
1009 	if (node_id < nr_vmap_nodes)
1010 		return node_id;
1011 
1012 	/* If it was _not_ zero, warn. */
1013 	WARN_ONCE(node_id != UINT_MAX,
1014 		"Decode wrong node id (%d)\n", node_id);
1015 
1016 	return nr_vmap_nodes;
1017 }
1018 
1019 static bool
is_vn_id_valid(unsigned int node_id)1020 is_vn_id_valid(unsigned int node_id)
1021 {
1022 	if (node_id < nr_vmap_nodes)
1023 		return true;
1024 
1025 	return false;
1026 }
1027 
1028 static __always_inline unsigned long
va_size(struct vmap_area * va)1029 va_size(struct vmap_area *va)
1030 {
1031 	return (va->va_end - va->va_start);
1032 }
1033 
1034 static __always_inline unsigned long
get_subtree_max_size(struct rb_node * node)1035 get_subtree_max_size(struct rb_node *node)
1036 {
1037 	struct vmap_area *va;
1038 
1039 	va = rb_entry_safe(node, struct vmap_area, rb_node);
1040 	return va ? va->subtree_max_size : 0;
1041 }
1042 
1043 RB_DECLARE_CALLBACKS_MAX(static, free_vmap_area_rb_augment_cb,
1044 	struct vmap_area, rb_node, unsigned long, subtree_max_size, va_size)
1045 
1046 static void reclaim_and_purge_vmap_areas(void);
1047 static BLOCKING_NOTIFIER_HEAD(vmap_notify_list);
1048 static void drain_vmap_area_work(struct work_struct *work);
1049 static DECLARE_WORK(drain_vmap_work, drain_vmap_area_work);
1050 
1051 static __cacheline_aligned_in_smp atomic_long_t nr_vmalloc_pages;
1052 static __cacheline_aligned_in_smp atomic_long_t vmap_lazy_nr;
1053 
vmalloc_nr_pages(void)1054 unsigned long vmalloc_nr_pages(void)
1055 {
1056 	return atomic_long_read(&nr_vmalloc_pages);
1057 }
1058 
__find_vmap_area(unsigned long addr,struct rb_root * root)1059 static struct vmap_area *__find_vmap_area(unsigned long addr, struct rb_root *root)
1060 {
1061 	struct rb_node *n = root->rb_node;
1062 
1063 	addr = (unsigned long)kasan_reset_tag((void *)addr);
1064 
1065 	while (n) {
1066 		struct vmap_area *va;
1067 
1068 		va = rb_entry(n, struct vmap_area, rb_node);
1069 		if (addr < va->va_start)
1070 			n = n->rb_left;
1071 		else if (addr >= va->va_end)
1072 			n = n->rb_right;
1073 		else
1074 			return va;
1075 	}
1076 
1077 	return NULL;
1078 }
1079 
1080 /* Look up the first VA which satisfies addr < va_end, NULL if none. */
1081 static struct vmap_area *
__find_vmap_area_exceed_addr(unsigned long addr,struct rb_root * root)1082 __find_vmap_area_exceed_addr(unsigned long addr, struct rb_root *root)
1083 {
1084 	struct vmap_area *va = NULL;
1085 	struct rb_node *n = root->rb_node;
1086 
1087 	addr = (unsigned long)kasan_reset_tag((void *)addr);
1088 
1089 	while (n) {
1090 		struct vmap_area *tmp;
1091 
1092 		tmp = rb_entry(n, struct vmap_area, rb_node);
1093 		if (tmp->va_end > addr) {
1094 			va = tmp;
1095 			if (tmp->va_start <= addr)
1096 				break;
1097 
1098 			n = n->rb_left;
1099 		} else
1100 			n = n->rb_right;
1101 	}
1102 
1103 	return va;
1104 }
1105 
1106 /*
1107  * Returns a node where a first VA, that satisfies addr < va_end, resides.
1108  * If success, a node is locked. A user is responsible to unlock it when a
1109  * VA is no longer needed to be accessed.
1110  *
1111  * Returns NULL if nothing found.
1112  */
1113 static struct vmap_node *
find_vmap_area_exceed_addr_lock(unsigned long addr,struct vmap_area ** va)1114 find_vmap_area_exceed_addr_lock(unsigned long addr, struct vmap_area **va)
1115 {
1116 	unsigned long va_start_lowest;
1117 	struct vmap_node *vn;
1118 
1119 repeat:
1120 	va_start_lowest = 0;
1121 
1122 	for_each_vmap_node(vn) {
1123 		spin_lock(&vn->busy.lock);
1124 		*va = __find_vmap_area_exceed_addr(addr, &vn->busy.root);
1125 
1126 		if (*va)
1127 			if (!va_start_lowest || (*va)->va_start < va_start_lowest)
1128 				va_start_lowest = (*va)->va_start;
1129 		spin_unlock(&vn->busy.lock);
1130 	}
1131 
1132 	/*
1133 	 * Check if found VA exists, it might have gone away.  In this case we
1134 	 * repeat the search because a VA has been removed concurrently and we
1135 	 * need to proceed to the next one, which is a rare case.
1136 	 */
1137 	if (va_start_lowest) {
1138 		vn = addr_to_node(va_start_lowest);
1139 
1140 		spin_lock(&vn->busy.lock);
1141 		*va = __find_vmap_area(va_start_lowest, &vn->busy.root);
1142 
1143 		if (*va)
1144 			return vn;
1145 
1146 		spin_unlock(&vn->busy.lock);
1147 		goto repeat;
1148 	}
1149 
1150 	return NULL;
1151 }
1152 
1153 /*
1154  * This function returns back addresses of parent node
1155  * and its left or right link for further processing.
1156  *
1157  * Otherwise NULL is returned. In that case all further
1158  * steps regarding inserting of conflicting overlap range
1159  * have to be declined and actually considered as a bug.
1160  */
1161 static __always_inline struct rb_node **
find_va_links(struct vmap_area * va,struct rb_root * root,struct rb_node * from,struct rb_node ** parent)1162 find_va_links(struct vmap_area *va,
1163 	struct rb_root *root, struct rb_node *from,
1164 	struct rb_node **parent)
1165 {
1166 	struct vmap_area *tmp_va;
1167 	struct rb_node **link;
1168 
1169 	if (root) {
1170 		link = &root->rb_node;
1171 		if (unlikely(!*link)) {
1172 			*parent = NULL;
1173 			return link;
1174 		}
1175 	} else {
1176 		link = &from;
1177 	}
1178 
1179 	/*
1180 	 * Go to the bottom of the tree. When we hit the last point
1181 	 * we end up with parent rb_node and correct direction, i name
1182 	 * it link, where the new va->rb_node will be attached to.
1183 	 */
1184 	do {
1185 		tmp_va = rb_entry(*link, struct vmap_area, rb_node);
1186 
1187 		/*
1188 		 * During the traversal we also do some sanity check.
1189 		 * Trigger the BUG() if there are sides(left/right)
1190 		 * or full overlaps.
1191 		 */
1192 		if (va->va_end <= tmp_va->va_start)
1193 			link = &(*link)->rb_left;
1194 		else if (va->va_start >= tmp_va->va_end)
1195 			link = &(*link)->rb_right;
1196 		else {
1197 			WARN(1, "vmalloc bug: 0x%lx-0x%lx overlaps with 0x%lx-0x%lx\n",
1198 				va->va_start, va->va_end, tmp_va->va_start, tmp_va->va_end);
1199 
1200 			return NULL;
1201 		}
1202 	} while (*link);
1203 
1204 	*parent = &tmp_va->rb_node;
1205 	return link;
1206 }
1207 
1208 static __always_inline struct list_head *
get_va_next_sibling(struct rb_node * parent,struct rb_node ** link)1209 get_va_next_sibling(struct rb_node *parent, struct rb_node **link)
1210 {
1211 	struct list_head *list;
1212 
1213 	if (unlikely(!parent))
1214 		/*
1215 		 * The red-black tree where we try to find VA neighbors
1216 		 * before merging or inserting is empty, i.e. it means
1217 		 * there is no free vmap space. Normally it does not
1218 		 * happen but we handle this case anyway.
1219 		 */
1220 		return NULL;
1221 
1222 	list = &rb_entry(parent, struct vmap_area, rb_node)->list;
1223 	return (&parent->rb_right == link ? list->next : list);
1224 }
1225 
1226 static __always_inline void
__link_va(struct vmap_area * va,struct rb_root * root,struct rb_node * parent,struct rb_node ** link,struct list_head * head,bool augment)1227 __link_va(struct vmap_area *va, struct rb_root *root,
1228 	struct rb_node *parent, struct rb_node **link,
1229 	struct list_head *head, bool augment)
1230 {
1231 	/*
1232 	 * VA is still not in the list, but we can
1233 	 * identify its future previous list_head node.
1234 	 */
1235 	if (likely(parent)) {
1236 		head = &rb_entry(parent, struct vmap_area, rb_node)->list;
1237 		if (&parent->rb_right != link)
1238 			head = head->prev;
1239 	}
1240 
1241 	/* Insert to the rb-tree */
1242 	rb_link_node(&va->rb_node, parent, link);
1243 	if (augment) {
1244 		/*
1245 		 * Some explanation here. Just perform simple insertion
1246 		 * to the tree. We do not set va->subtree_max_size to
1247 		 * its current size before calling rb_insert_augmented().
1248 		 * It is because we populate the tree from the bottom
1249 		 * to parent levels when the node _is_ in the tree.
1250 		 *
1251 		 * Therefore we set subtree_max_size to zero after insertion,
1252 		 * to let __augment_tree_propagate_from() puts everything to
1253 		 * the correct order later on.
1254 		 */
1255 		rb_insert_augmented(&va->rb_node,
1256 			root, &free_vmap_area_rb_augment_cb);
1257 		va->subtree_max_size = 0;
1258 	} else {
1259 		rb_insert_color(&va->rb_node, root);
1260 	}
1261 
1262 	/* Address-sort this list */
1263 	list_add(&va->list, head);
1264 }
1265 
1266 static __always_inline void
link_va(struct vmap_area * va,struct rb_root * root,struct rb_node * parent,struct rb_node ** link,struct list_head * head)1267 link_va(struct vmap_area *va, struct rb_root *root,
1268 	struct rb_node *parent, struct rb_node **link,
1269 	struct list_head *head)
1270 {
1271 	__link_va(va, root, parent, link, head, false);
1272 }
1273 
1274 static __always_inline void
link_va_augment(struct vmap_area * va,struct rb_root * root,struct rb_node * parent,struct rb_node ** link,struct list_head * head)1275 link_va_augment(struct vmap_area *va, struct rb_root *root,
1276 	struct rb_node *parent, struct rb_node **link,
1277 	struct list_head *head)
1278 {
1279 	__link_va(va, root, parent, link, head, true);
1280 }
1281 
1282 static __always_inline void
__unlink_va(struct vmap_area * va,struct rb_root * root,bool augment)1283 __unlink_va(struct vmap_area *va, struct rb_root *root, bool augment)
1284 {
1285 	if (WARN_ON(RB_EMPTY_NODE(&va->rb_node)))
1286 		return;
1287 
1288 	if (augment)
1289 		rb_erase_augmented(&va->rb_node,
1290 			root, &free_vmap_area_rb_augment_cb);
1291 	else
1292 		rb_erase(&va->rb_node, root);
1293 
1294 	list_del_init(&va->list);
1295 	RB_CLEAR_NODE(&va->rb_node);
1296 }
1297 
1298 static __always_inline void
unlink_va(struct vmap_area * va,struct rb_root * root)1299 unlink_va(struct vmap_area *va, struct rb_root *root)
1300 {
1301 	__unlink_va(va, root, false);
1302 }
1303 
1304 static __always_inline void
unlink_va_augment(struct vmap_area * va,struct rb_root * root)1305 unlink_va_augment(struct vmap_area *va, struct rb_root *root)
1306 {
1307 	__unlink_va(va, root, true);
1308 }
1309 
1310 #if DEBUG_AUGMENT_PROPAGATE_CHECK
1311 /*
1312  * Gets called when remove the node and rotate.
1313  */
1314 static __always_inline unsigned long
compute_subtree_max_size(struct vmap_area * va)1315 compute_subtree_max_size(struct vmap_area *va)
1316 {
1317 	return max3(va_size(va),
1318 		get_subtree_max_size(va->rb_node.rb_left),
1319 		get_subtree_max_size(va->rb_node.rb_right));
1320 }
1321 
1322 static void
augment_tree_propagate_check(void)1323 augment_tree_propagate_check(void)
1324 {
1325 	struct vmap_area *va;
1326 	unsigned long computed_size;
1327 
1328 	list_for_each_entry(va, &free_vmap_area_list, list) {
1329 		computed_size = compute_subtree_max_size(va);
1330 		if (computed_size != va->subtree_max_size)
1331 			pr_emerg("tree is corrupted: %lu, %lu\n",
1332 				va_size(va), va->subtree_max_size);
1333 	}
1334 }
1335 #endif
1336 
1337 /*
1338  * This function populates subtree_max_size from bottom to upper
1339  * levels starting from VA point. The propagation must be done
1340  * when VA size is modified by changing its va_start/va_end. Or
1341  * in case of newly inserting of VA to the tree.
1342  *
1343  * It means that __augment_tree_propagate_from() must be called:
1344  * - After VA has been inserted to the tree(free path);
1345  * - After VA has been shrunk(allocation path);
1346  * - After VA has been increased(merging path).
1347  *
1348  * Please note that, it does not mean that upper parent nodes
1349  * and their subtree_max_size are recalculated all the time up
1350  * to the root node.
1351  *
1352  *       4--8
1353  *        /\
1354  *       /  \
1355  *      /    \
1356  *    2--2  8--8
1357  *
1358  * For example if we modify the node 4, shrinking it to 2, then
1359  * no any modification is required. If we shrink the node 2 to 1
1360  * its subtree_max_size is updated only, and set to 1. If we shrink
1361  * the node 8 to 6, then its subtree_max_size is set to 6 and parent
1362  * node becomes 4--6.
1363  */
1364 static __always_inline void
augment_tree_propagate_from(struct vmap_area * va)1365 augment_tree_propagate_from(struct vmap_area *va)
1366 {
1367 	/*
1368 	 * Populate the tree from bottom towards the root until
1369 	 * the calculated maximum available size of checked node
1370 	 * is equal to its current one.
1371 	 */
1372 	free_vmap_area_rb_augment_cb_propagate(&va->rb_node, NULL);
1373 
1374 #if DEBUG_AUGMENT_PROPAGATE_CHECK
1375 	augment_tree_propagate_check();
1376 #endif
1377 }
1378 
1379 static void
insert_vmap_area(struct vmap_area * va,struct rb_root * root,struct list_head * head)1380 insert_vmap_area(struct vmap_area *va,
1381 	struct rb_root *root, struct list_head *head)
1382 {
1383 	struct rb_node **link;
1384 	struct rb_node *parent;
1385 
1386 	link = find_va_links(va, root, NULL, &parent);
1387 	if (link)
1388 		link_va(va, root, parent, link, head);
1389 }
1390 
1391 static void
insert_vmap_area_augment(struct vmap_area * va,struct rb_node * from,struct rb_root * root,struct list_head * head)1392 insert_vmap_area_augment(struct vmap_area *va,
1393 	struct rb_node *from, struct rb_root *root,
1394 	struct list_head *head)
1395 {
1396 	struct rb_node **link;
1397 	struct rb_node *parent;
1398 
1399 	if (from)
1400 		link = find_va_links(va, NULL, from, &parent);
1401 	else
1402 		link = find_va_links(va, root, NULL, &parent);
1403 
1404 	if (link) {
1405 		link_va_augment(va, root, parent, link, head);
1406 		augment_tree_propagate_from(va);
1407 	}
1408 }
1409 
1410 /*
1411  * Merge de-allocated chunk of VA memory with previous
1412  * and next free blocks. If coalesce is not done a new
1413  * free area is inserted. If VA has been merged, it is
1414  * freed.
1415  *
1416  * Please note, it can return NULL in case of overlap
1417  * ranges, followed by WARN() report. Despite it is a
1418  * buggy behaviour, a system can be alive and keep
1419  * ongoing.
1420  */
1421 static __always_inline struct vmap_area *
__merge_or_add_vmap_area(struct vmap_area * va,struct rb_root * root,struct list_head * head,bool augment)1422 __merge_or_add_vmap_area(struct vmap_area *va,
1423 	struct rb_root *root, struct list_head *head, bool augment)
1424 {
1425 	struct vmap_area *sibling;
1426 	struct list_head *next;
1427 	struct rb_node **link;
1428 	struct rb_node *parent;
1429 	bool merged = false;
1430 
1431 	/*
1432 	 * Find a place in the tree where VA potentially will be
1433 	 * inserted, unless it is merged with its sibling/siblings.
1434 	 */
1435 	link = find_va_links(va, root, NULL, &parent);
1436 	if (!link)
1437 		return NULL;
1438 
1439 	/*
1440 	 * Get next node of VA to check if merging can be done.
1441 	 */
1442 	next = get_va_next_sibling(parent, link);
1443 	if (unlikely(next == NULL))
1444 		goto insert;
1445 
1446 	/*
1447 	 * start            end
1448 	 * |                |
1449 	 * |<------VA------>|<-----Next----->|
1450 	 *                  |                |
1451 	 *                  start            end
1452 	 */
1453 	if (next != head) {
1454 		sibling = list_entry(next, struct vmap_area, list);
1455 		if (sibling->va_start == va->va_end) {
1456 			sibling->va_start = va->va_start;
1457 
1458 			/* Free vmap_area object. */
1459 			kmem_cache_free(vmap_area_cachep, va);
1460 
1461 			/* Point to the new merged area. */
1462 			va = sibling;
1463 			merged = true;
1464 		}
1465 	}
1466 
1467 	/*
1468 	 * start            end
1469 	 * |                |
1470 	 * |<-----Prev----->|<------VA------>|
1471 	 *                  |                |
1472 	 *                  start            end
1473 	 */
1474 	if (next->prev != head) {
1475 		sibling = list_entry(next->prev, struct vmap_area, list);
1476 		if (sibling->va_end == va->va_start) {
1477 			/*
1478 			 * If both neighbors are coalesced, it is important
1479 			 * to unlink the "next" node first, followed by merging
1480 			 * with "previous" one. Otherwise the tree might not be
1481 			 * fully populated if a sibling's augmented value is
1482 			 * "normalized" because of rotation operations.
1483 			 */
1484 			if (merged)
1485 				__unlink_va(va, root, augment);
1486 
1487 			sibling->va_end = va->va_end;
1488 
1489 			/* Free vmap_area object. */
1490 			kmem_cache_free(vmap_area_cachep, va);
1491 
1492 			/* Point to the new merged area. */
1493 			va = sibling;
1494 			merged = true;
1495 		}
1496 	}
1497 
1498 insert:
1499 	if (!merged)
1500 		__link_va(va, root, parent, link, head, augment);
1501 
1502 	return va;
1503 }
1504 
1505 static __always_inline struct vmap_area *
merge_or_add_vmap_area(struct vmap_area * va,struct rb_root * root,struct list_head * head)1506 merge_or_add_vmap_area(struct vmap_area *va,
1507 	struct rb_root *root, struct list_head *head)
1508 {
1509 	return __merge_or_add_vmap_area(va, root, head, false);
1510 }
1511 
1512 static __always_inline struct vmap_area *
merge_or_add_vmap_area_augment(struct vmap_area * va,struct rb_root * root,struct list_head * head)1513 merge_or_add_vmap_area_augment(struct vmap_area *va,
1514 	struct rb_root *root, struct list_head *head)
1515 {
1516 	va = __merge_or_add_vmap_area(va, root, head, true);
1517 	if (va)
1518 		augment_tree_propagate_from(va);
1519 
1520 	return va;
1521 }
1522 
1523 static __always_inline bool
is_within_this_va(struct vmap_area * va,unsigned long size,unsigned long align,unsigned long vstart)1524 is_within_this_va(struct vmap_area *va, unsigned long size,
1525 	unsigned long align, unsigned long vstart)
1526 {
1527 	unsigned long nva_start_addr;
1528 
1529 	if (va->va_start > vstart)
1530 		nva_start_addr = ALIGN(va->va_start, align);
1531 	else
1532 		nva_start_addr = ALIGN(vstart, align);
1533 
1534 	/* Can be overflowed due to big size or alignment. */
1535 	if (nva_start_addr + size < nva_start_addr ||
1536 			nva_start_addr < vstart)
1537 		return false;
1538 
1539 	return (nva_start_addr + size <= va->va_end);
1540 }
1541 
1542 /*
1543  * Find the first free block(lowest start address) in the tree,
1544  * that will accomplish the request corresponding to passing
1545  * parameters. Please note, with an alignment bigger than PAGE_SIZE,
1546  * a search length is adjusted to account for worst case alignment
1547  * overhead.
1548  */
1549 static __always_inline struct vmap_area *
find_vmap_lowest_match(struct rb_root * root,unsigned long size,unsigned long align,unsigned long vstart,bool adjust_search_size)1550 find_vmap_lowest_match(struct rb_root *root, unsigned long size,
1551 	unsigned long align, unsigned long vstart, bool adjust_search_size)
1552 {
1553 	struct vmap_area *va;
1554 	struct rb_node *node;
1555 	unsigned long length;
1556 
1557 	/* Start from the root. */
1558 	node = root->rb_node;
1559 
1560 	/* Adjust the search size for alignment overhead. */
1561 	length = adjust_search_size ? size + align - 1 : size;
1562 
1563 	while (node) {
1564 		va = rb_entry(node, struct vmap_area, rb_node);
1565 
1566 		if (get_subtree_max_size(node->rb_left) >= length &&
1567 				vstart < va->va_start) {
1568 			node = node->rb_left;
1569 		} else {
1570 			if (is_within_this_va(va, size, align, vstart))
1571 				return va;
1572 
1573 			/*
1574 			 * Does not make sense to go deeper towards the right
1575 			 * sub-tree if it does not have a free block that is
1576 			 * equal or bigger to the requested search length.
1577 			 */
1578 			if (get_subtree_max_size(node->rb_right) >= length) {
1579 				node = node->rb_right;
1580 				continue;
1581 			}
1582 
1583 			/*
1584 			 * OK. We roll back and find the first right sub-tree,
1585 			 * that will satisfy the search criteria. It can happen
1586 			 * due to "vstart" restriction or an alignment overhead
1587 			 * that is bigger then PAGE_SIZE.
1588 			 */
1589 			while ((node = rb_parent(node))) {
1590 				va = rb_entry(node, struct vmap_area, rb_node);
1591 				if (is_within_this_va(va, size, align, vstart))
1592 					return va;
1593 
1594 				if (get_subtree_max_size(node->rb_right) >= length &&
1595 						vstart <= va->va_start) {
1596 					/*
1597 					 * Shift the vstart forward. Please note, we update it with
1598 					 * parent's start address adding "1" because we do not want
1599 					 * to enter same sub-tree after it has already been checked
1600 					 * and no suitable free block found there.
1601 					 */
1602 					vstart = va->va_start + 1;
1603 					node = node->rb_right;
1604 					break;
1605 				}
1606 			}
1607 		}
1608 	}
1609 
1610 	return NULL;
1611 }
1612 
1613 #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
1614 #include <linux/random.h>
1615 
1616 static struct vmap_area *
find_vmap_lowest_linear_match(struct list_head * head,unsigned long size,unsigned long align,unsigned long vstart)1617 find_vmap_lowest_linear_match(struct list_head *head, unsigned long size,
1618 	unsigned long align, unsigned long vstart)
1619 {
1620 	struct vmap_area *va;
1621 
1622 	list_for_each_entry(va, head, list) {
1623 		if (!is_within_this_va(va, size, align, vstart))
1624 			continue;
1625 
1626 		return va;
1627 	}
1628 
1629 	return NULL;
1630 }
1631 
1632 static void
find_vmap_lowest_match_check(struct rb_root * root,struct list_head * head,unsigned long size,unsigned long align)1633 find_vmap_lowest_match_check(struct rb_root *root, struct list_head *head,
1634 			     unsigned long size, unsigned long align)
1635 {
1636 	struct vmap_area *va_1, *va_2;
1637 	unsigned long vstart;
1638 	unsigned int rnd;
1639 
1640 	get_random_bytes(&rnd, sizeof(rnd));
1641 	vstart = VMALLOC_START + rnd;
1642 
1643 	va_1 = find_vmap_lowest_match(root, size, align, vstart, false);
1644 	va_2 = find_vmap_lowest_linear_match(head, size, align, vstart);
1645 
1646 	if (va_1 != va_2)
1647 		pr_emerg("not lowest: t: 0x%p, l: 0x%p, v: 0x%lx\n",
1648 			va_1, va_2, vstart);
1649 }
1650 #endif
1651 
1652 enum fit_type {
1653 	NOTHING_FIT = 0,
1654 	FL_FIT_TYPE = 1,	/* full fit */
1655 	LE_FIT_TYPE = 2,	/* left edge fit */
1656 	RE_FIT_TYPE = 3,	/* right edge fit */
1657 	NE_FIT_TYPE = 4		/* no edge fit */
1658 };
1659 
1660 static __always_inline enum fit_type
classify_va_fit_type(struct vmap_area * va,unsigned long nva_start_addr,unsigned long size)1661 classify_va_fit_type(struct vmap_area *va,
1662 	unsigned long nva_start_addr, unsigned long size)
1663 {
1664 	enum fit_type type;
1665 
1666 	/* Check if it is within VA. */
1667 	if (nva_start_addr < va->va_start ||
1668 			nva_start_addr + size > va->va_end)
1669 		return NOTHING_FIT;
1670 
1671 	/* Now classify. */
1672 	if (va->va_start == nva_start_addr) {
1673 		if (va->va_end == nva_start_addr + size)
1674 			type = FL_FIT_TYPE;
1675 		else
1676 			type = LE_FIT_TYPE;
1677 	} else if (va->va_end == nva_start_addr + size) {
1678 		type = RE_FIT_TYPE;
1679 	} else {
1680 		type = NE_FIT_TYPE;
1681 	}
1682 
1683 	return type;
1684 }
1685 
1686 static __always_inline int
va_clip(struct rb_root * root,struct list_head * head,struct vmap_area * va,unsigned long nva_start_addr,unsigned long size)1687 va_clip(struct rb_root *root, struct list_head *head,
1688 		struct vmap_area *va, unsigned long nva_start_addr,
1689 		unsigned long size)
1690 {
1691 	struct vmap_area *lva = NULL;
1692 	enum fit_type type = classify_va_fit_type(va, nva_start_addr, size);
1693 
1694 	if (type == FL_FIT_TYPE) {
1695 		/*
1696 		 * No need to split VA, it fully fits.
1697 		 *
1698 		 * |               |
1699 		 * V      NVA      V
1700 		 * |---------------|
1701 		 */
1702 		unlink_va_augment(va, root);
1703 		kmem_cache_free(vmap_area_cachep, va);
1704 	} else if (type == LE_FIT_TYPE) {
1705 		/*
1706 		 * Split left edge of fit VA.
1707 		 *
1708 		 * |       |
1709 		 * V  NVA  V   R
1710 		 * |-------|-------|
1711 		 */
1712 		va->va_start += size;
1713 	} else if (type == RE_FIT_TYPE) {
1714 		/*
1715 		 * Split right edge of fit VA.
1716 		 *
1717 		 *         |       |
1718 		 *     L   V  NVA  V
1719 		 * |-------|-------|
1720 		 */
1721 		va->va_end = nva_start_addr;
1722 	} else if (type == NE_FIT_TYPE) {
1723 		/*
1724 		 * Split no edge of fit VA.
1725 		 *
1726 		 *     |       |
1727 		 *   L V  NVA  V R
1728 		 * |---|-------|---|
1729 		 */
1730 		lva = __this_cpu_xchg(ne_fit_preload_node, NULL);
1731 		if (unlikely(!lva)) {
1732 			/*
1733 			 * For percpu allocator we do not do any pre-allocation
1734 			 * and leave it as it is. The reason is it most likely
1735 			 * never ends up with NE_FIT_TYPE splitting. In case of
1736 			 * percpu allocations offsets and sizes are aligned to
1737 			 * fixed align request, i.e. RE_FIT_TYPE and FL_FIT_TYPE
1738 			 * are its main fitting cases.
1739 			 *
1740 			 * There are a few exceptions though, as an example it is
1741 			 * a first allocation (early boot up) when we have "one"
1742 			 * big free space that has to be split.
1743 			 *
1744 			 * Also we can hit this path in case of regular "vmap"
1745 			 * allocations, if "this" current CPU was not preloaded.
1746 			 * See the comment in alloc_vmap_area() why. If so, then
1747 			 * GFP_NOWAIT is used instead to get an extra object for
1748 			 * split purpose. That is rare and most time does not
1749 			 * occur.
1750 			 *
1751 			 * What happens if an allocation gets failed. Basically,
1752 			 * an "overflow" path is triggered to purge lazily freed
1753 			 * areas to free some memory, then, the "retry" path is
1754 			 * triggered to repeat one more time. See more details
1755 			 * in alloc_vmap_area() function.
1756 			 */
1757 			lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT);
1758 			if (!lva)
1759 				return -ENOMEM;
1760 		}
1761 
1762 		/*
1763 		 * Build the remainder.
1764 		 */
1765 		lva->va_start = va->va_start;
1766 		lva->va_end = nva_start_addr;
1767 
1768 		/*
1769 		 * Shrink this VA to remaining size.
1770 		 */
1771 		va->va_start = nva_start_addr + size;
1772 	} else {
1773 		return -EINVAL;
1774 	}
1775 
1776 	if (type != FL_FIT_TYPE) {
1777 		augment_tree_propagate_from(va);
1778 
1779 		if (lva)	/* type == NE_FIT_TYPE */
1780 			insert_vmap_area_augment(lva, &va->rb_node, root, head);
1781 	}
1782 
1783 	return 0;
1784 }
1785 
1786 static unsigned long
va_alloc(struct vmap_area * va,struct rb_root * root,struct list_head * head,unsigned long size,unsigned long align,unsigned long vstart,unsigned long vend)1787 va_alloc(struct vmap_area *va,
1788 		struct rb_root *root, struct list_head *head,
1789 		unsigned long size, unsigned long align,
1790 		unsigned long vstart, unsigned long vend)
1791 {
1792 	unsigned long nva_start_addr;
1793 	int ret;
1794 
1795 	if (va->va_start > vstart)
1796 		nva_start_addr = ALIGN(va->va_start, align);
1797 	else
1798 		nva_start_addr = ALIGN(vstart, align);
1799 
1800 	/* Check the "vend" restriction. */
1801 	if (nva_start_addr + size > vend)
1802 		return -ERANGE;
1803 
1804 	/* Update the free vmap_area. */
1805 	ret = va_clip(root, head, va, nva_start_addr, size);
1806 	if (WARN_ON_ONCE(ret))
1807 		return ret;
1808 
1809 	return nva_start_addr;
1810 }
1811 
1812 /*
1813  * Returns a start address of the newly allocated area, if success.
1814  * Otherwise an error value is returned that indicates failure.
1815  */
1816 static __always_inline unsigned long
__alloc_vmap_area(struct rb_root * root,struct list_head * head,unsigned long size,unsigned long align,unsigned long vstart,unsigned long vend)1817 __alloc_vmap_area(struct rb_root *root, struct list_head *head,
1818 	unsigned long size, unsigned long align,
1819 	unsigned long vstart, unsigned long vend)
1820 {
1821 	bool adjust_search_size = true;
1822 	unsigned long nva_start_addr;
1823 	struct vmap_area *va;
1824 
1825 	/*
1826 	 * Do not adjust when:
1827 	 *   a) align <= PAGE_SIZE, because it does not make any sense.
1828 	 *      All blocks(their start addresses) are at least PAGE_SIZE
1829 	 *      aligned anyway;
1830 	 *   b) a short range where a requested size corresponds to exactly
1831 	 *      specified [vstart:vend] interval and an alignment > PAGE_SIZE.
1832 	 *      With adjusted search length an allocation would not succeed.
1833 	 */
1834 	if (align <= PAGE_SIZE || (align > PAGE_SIZE && (vend - vstart) == size))
1835 		adjust_search_size = false;
1836 
1837 	va = find_vmap_lowest_match(root, size, align, vstart, adjust_search_size);
1838 	if (unlikely(!va))
1839 		return -ENOENT;
1840 
1841 	nva_start_addr = va_alloc(va, root, head, size, align, vstart, vend);
1842 
1843 #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
1844 	if (!IS_ERR_VALUE(nva_start_addr))
1845 		find_vmap_lowest_match_check(root, head, size, align);
1846 #endif
1847 
1848 	return nva_start_addr;
1849 }
1850 
1851 /*
1852  * Free a region of KVA allocated by alloc_vmap_area
1853  */
free_vmap_area(struct vmap_area * va)1854 static void free_vmap_area(struct vmap_area *va)
1855 {
1856 	struct vmap_node *vn = addr_to_node(va->va_start);
1857 
1858 	/*
1859 	 * Remove from the busy tree/list.
1860 	 */
1861 	spin_lock(&vn->busy.lock);
1862 	unlink_va(va, &vn->busy.root);
1863 	spin_unlock(&vn->busy.lock);
1864 
1865 	/*
1866 	 * Insert/Merge it back to the free tree/list.
1867 	 */
1868 	spin_lock(&free_vmap_area_lock);
1869 	merge_or_add_vmap_area_augment(va, &free_vmap_area_root, &free_vmap_area_list);
1870 	spin_unlock(&free_vmap_area_lock);
1871 }
1872 
1873 static inline void
preload_this_cpu_lock(spinlock_t * lock,gfp_t gfp_mask,int node)1874 preload_this_cpu_lock(spinlock_t *lock, gfp_t gfp_mask, int node)
1875 {
1876 	struct vmap_area *va = NULL, *tmp;
1877 
1878 	/*
1879 	 * Preload this CPU with one extra vmap_area object. It is used
1880 	 * when fit type of free area is NE_FIT_TYPE. It guarantees that
1881 	 * a CPU that does an allocation is preloaded.
1882 	 *
1883 	 * We do it in non-atomic context, thus it allows us to use more
1884 	 * permissive allocation masks to be more stable under low memory
1885 	 * condition and high memory pressure.
1886 	 */
1887 	if (!this_cpu_read(ne_fit_preload_node))
1888 		va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
1889 
1890 	spin_lock(lock);
1891 
1892 	tmp = NULL;
1893 	if (va && !__this_cpu_try_cmpxchg(ne_fit_preload_node, &tmp, va))
1894 		kmem_cache_free(vmap_area_cachep, va);
1895 }
1896 
1897 static struct vmap_pool *
size_to_va_pool(struct vmap_node * vn,unsigned long size)1898 size_to_va_pool(struct vmap_node *vn, unsigned long size)
1899 {
1900 	unsigned int idx = (size - 1) / PAGE_SIZE;
1901 
1902 	if (idx < MAX_VA_SIZE_PAGES)
1903 		return &vn->pool[idx];
1904 
1905 	return NULL;
1906 }
1907 
1908 static bool
node_pool_add_va(struct vmap_node * n,struct vmap_area * va)1909 node_pool_add_va(struct vmap_node *n, struct vmap_area *va)
1910 {
1911 	struct vmap_pool *vp;
1912 
1913 	vp = size_to_va_pool(n, va_size(va));
1914 	if (!vp)
1915 		return false;
1916 
1917 	spin_lock(&n->pool_lock);
1918 	list_add(&va->list, &vp->head);
1919 	WRITE_ONCE(vp->len, vp->len + 1);
1920 	spin_unlock(&n->pool_lock);
1921 
1922 	return true;
1923 }
1924 
1925 static struct vmap_area *
node_pool_del_va(struct vmap_node * vn,unsigned long size,unsigned long align,unsigned long vstart,unsigned long vend)1926 node_pool_del_va(struct vmap_node *vn, unsigned long size,
1927 		unsigned long align, unsigned long vstart,
1928 		unsigned long vend)
1929 {
1930 	struct vmap_area *va = NULL;
1931 	struct vmap_pool *vp;
1932 	int err = 0;
1933 
1934 	vp = size_to_va_pool(vn, size);
1935 	if (!vp || list_empty(&vp->head))
1936 		return NULL;
1937 
1938 	spin_lock(&vn->pool_lock);
1939 	if (!list_empty(&vp->head)) {
1940 		va = list_first_entry(&vp->head, struct vmap_area, list);
1941 
1942 		if (IS_ALIGNED(va->va_start, align)) {
1943 			/*
1944 			 * Do some sanity check and emit a warning
1945 			 * if one of below checks detects an error.
1946 			 */
1947 			err |= (va_size(va) != size);
1948 			err |= (va->va_start < vstart);
1949 			err |= (va->va_end > vend);
1950 
1951 			if (!WARN_ON_ONCE(err)) {
1952 				list_del_init(&va->list);
1953 				WRITE_ONCE(vp->len, vp->len - 1);
1954 			} else {
1955 				va = NULL;
1956 			}
1957 		} else {
1958 			list_move_tail(&va->list, &vp->head);
1959 			va = NULL;
1960 		}
1961 	}
1962 	spin_unlock(&vn->pool_lock);
1963 
1964 	return va;
1965 }
1966 
1967 static struct vmap_area *
node_alloc(unsigned long size,unsigned long align,unsigned long vstart,unsigned long vend,unsigned long * addr,unsigned int * vn_id)1968 node_alloc(unsigned long size, unsigned long align,
1969 		unsigned long vstart, unsigned long vend,
1970 		unsigned long *addr, unsigned int *vn_id)
1971 {
1972 	struct vmap_area *va;
1973 
1974 	*vn_id = 0;
1975 	*addr = -EINVAL;
1976 
1977 	/*
1978 	 * Fallback to a global heap if not vmalloc or there
1979 	 * is only one node.
1980 	 */
1981 	if (vstart != VMALLOC_START || vend != VMALLOC_END ||
1982 			nr_vmap_nodes == 1)
1983 		return NULL;
1984 
1985 	*vn_id = raw_smp_processor_id() % nr_vmap_nodes;
1986 	va = node_pool_del_va(id_to_node(*vn_id), size, align, vstart, vend);
1987 	*vn_id = encode_vn_id(*vn_id);
1988 
1989 	if (va)
1990 		*addr = va->va_start;
1991 
1992 	return va;
1993 }
1994 
setup_vmalloc_vm(struct vm_struct * vm,struct vmap_area * va,unsigned long flags,const void * caller)1995 static inline void setup_vmalloc_vm(struct vm_struct *vm,
1996 	struct vmap_area *va, unsigned long flags, const void *caller)
1997 {
1998 	vm->flags = flags;
1999 	vm->addr = (void *)va->va_start;
2000 	vm->size = vm->requested_size = va_size(va);
2001 	vm->caller = caller;
2002 	va->vm = vm;
2003 }
2004 
2005 /*
2006  * Allocate a region of KVA of the specified size and alignment, within the
2007  * vstart and vend. If vm is passed in, the two will also be bound.
2008  */
alloc_vmap_area(unsigned long size,unsigned long align,unsigned long vstart,unsigned long vend,int node,gfp_t gfp_mask,unsigned long va_flags,struct vm_struct * vm)2009 static struct vmap_area *alloc_vmap_area(unsigned long size,
2010 				unsigned long align,
2011 				unsigned long vstart, unsigned long vend,
2012 				int node, gfp_t gfp_mask,
2013 				unsigned long va_flags, struct vm_struct *vm)
2014 {
2015 	struct vmap_node *vn;
2016 	struct vmap_area *va;
2017 	unsigned long freed;
2018 	unsigned long addr;
2019 	unsigned int vn_id;
2020 	int purged = 0;
2021 	int ret;
2022 
2023 	if (unlikely(!size || offset_in_page(size) || !is_power_of_2(align)))
2024 		return ERR_PTR(-EINVAL);
2025 
2026 	if (unlikely(!vmap_initialized))
2027 		return ERR_PTR(-EBUSY);
2028 
2029 	/* Only reclaim behaviour flags are relevant. */
2030 	gfp_mask = gfp_mask & GFP_RECLAIM_MASK;
2031 	might_sleep();
2032 
2033 	/*
2034 	 * If a VA is obtained from a global heap(if it fails here)
2035 	 * it is anyway marked with this "vn_id" so it is returned
2036 	 * to this pool's node later. Such way gives a possibility
2037 	 * to populate pools based on users demand.
2038 	 *
2039 	 * On success a ready to go VA is returned.
2040 	 */
2041 	va = node_alloc(size, align, vstart, vend, &addr, &vn_id);
2042 	if (!va) {
2043 		va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
2044 		if (unlikely(!va))
2045 			return ERR_PTR(-ENOMEM);
2046 
2047 		/*
2048 		 * Only scan the relevant parts containing pointers to other objects
2049 		 * to avoid false negatives.
2050 		 */
2051 		kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask);
2052 	}
2053 
2054 retry:
2055 	if (IS_ERR_VALUE(addr)) {
2056 		preload_this_cpu_lock(&free_vmap_area_lock, gfp_mask, node);
2057 		addr = __alloc_vmap_area(&free_vmap_area_root, &free_vmap_area_list,
2058 			size, align, vstart, vend);
2059 		spin_unlock(&free_vmap_area_lock);
2060 
2061 		/*
2062 		 * This is not a fast path.  Check if yielding is needed. This
2063 		 * is the only reschedule point in the vmalloc() path.
2064 		 */
2065 		cond_resched();
2066 	}
2067 
2068 	trace_alloc_vmap_area(addr, size, align, vstart, vend, IS_ERR_VALUE(addr));
2069 
2070 	/*
2071 	 * If an allocation fails, the error value is
2072 	 * returned. Therefore trigger the overflow path.
2073 	 */
2074 	if (IS_ERR_VALUE(addr))
2075 		goto overflow;
2076 
2077 	va->va_start = addr;
2078 	va->va_end = addr + size;
2079 	va->vm = NULL;
2080 	va->flags = (va_flags | vn_id);
2081 
2082 	if (vm) {
2083 		vm->addr = (void *)va->va_start;
2084 		vm->size = va_size(va);
2085 		va->vm = vm;
2086 	}
2087 
2088 	vn = addr_to_node(va->va_start);
2089 
2090 	spin_lock(&vn->busy.lock);
2091 	insert_vmap_area(va, &vn->busy.root, &vn->busy.head);
2092 	spin_unlock(&vn->busy.lock);
2093 
2094 	BUG_ON(!IS_ALIGNED(va->va_start, align));
2095 	BUG_ON(va->va_start < vstart);
2096 	BUG_ON(va->va_end > vend);
2097 
2098 	ret = kasan_populate_vmalloc(addr, size, gfp_mask);
2099 	if (ret) {
2100 		free_vmap_area(va);
2101 		return ERR_PTR(ret);
2102 	}
2103 
2104 	return va;
2105 
2106 overflow:
2107 	if (!purged) {
2108 		reclaim_and_purge_vmap_areas();
2109 		purged = 1;
2110 		goto retry;
2111 	}
2112 
2113 	freed = 0;
2114 	blocking_notifier_call_chain(&vmap_notify_list, 0, &freed);
2115 
2116 	if (freed > 0) {
2117 		purged = 0;
2118 		goto retry;
2119 	}
2120 
2121 	if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit())
2122 		pr_warn("vmalloc_node_range for size %lu failed: Address range restricted to %#lx - %#lx\n",
2123 				size, vstart, vend);
2124 
2125 	kmem_cache_free(vmap_area_cachep, va);
2126 	return ERR_PTR(-EBUSY);
2127 }
2128 
register_vmap_purge_notifier(struct notifier_block * nb)2129 int register_vmap_purge_notifier(struct notifier_block *nb)
2130 {
2131 	return blocking_notifier_chain_register(&vmap_notify_list, nb);
2132 }
2133 EXPORT_SYMBOL_GPL(register_vmap_purge_notifier);
2134 
unregister_vmap_purge_notifier(struct notifier_block * nb)2135 int unregister_vmap_purge_notifier(struct notifier_block *nb)
2136 {
2137 	return blocking_notifier_chain_unregister(&vmap_notify_list, nb);
2138 }
2139 EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier);
2140 
2141 /*
2142  * lazy_max_pages is the maximum amount of virtual address space we gather up
2143  * before attempting to purge with a TLB flush.
2144  *
2145  * There is a tradeoff here: a larger number will cover more kernel page tables
2146  * and take slightly longer to purge, but it will linearly reduce the number of
2147  * global TLB flushes that must be performed. It would seem natural to scale
2148  * this number up linearly with the number of CPUs (because vmapping activity
2149  * could also scale linearly with the number of CPUs), however it is likely
2150  * that in practice, workloads might be constrained in other ways that mean
2151  * vmap activity will not scale linearly with CPUs. Also, I want to be
2152  * conservative and not introduce a big latency on huge systems, so go with
2153  * a less aggressive log scale. It will still be an improvement over the old
2154  * code, and it will be simple to change the scale factor if we find that it
2155  * becomes a problem on bigger systems.
2156  */
lazy_max_pages(void)2157 static unsigned long lazy_max_pages(void)
2158 {
2159 	unsigned int log;
2160 
2161 	log = fls(num_online_cpus());
2162 
2163 	return log * (32UL * 1024 * 1024 / PAGE_SIZE);
2164 }
2165 
2166 /*
2167  * Serialize vmap purging.  There is no actual critical section protected
2168  * by this lock, but we want to avoid concurrent calls for performance
2169  * reasons and to make the pcpu_get_vm_areas more deterministic.
2170  */
2171 static DEFINE_MUTEX(vmap_purge_lock);
2172 
2173 /* for per-CPU blocks */
2174 static void purge_fragmented_blocks_allcpus(void);
2175 
2176 static void
reclaim_list_global(struct list_head * head)2177 reclaim_list_global(struct list_head *head)
2178 {
2179 	struct vmap_area *va, *n;
2180 
2181 	if (list_empty(head))
2182 		return;
2183 
2184 	spin_lock(&free_vmap_area_lock);
2185 	list_for_each_entry_safe(va, n, head, list)
2186 		merge_or_add_vmap_area_augment(va,
2187 			&free_vmap_area_root, &free_vmap_area_list);
2188 	spin_unlock(&free_vmap_area_lock);
2189 }
2190 
2191 static void
decay_va_pool_node(struct vmap_node * vn,bool full_decay)2192 decay_va_pool_node(struct vmap_node *vn, bool full_decay)
2193 {
2194 	LIST_HEAD(decay_list);
2195 	struct rb_root decay_root = RB_ROOT;
2196 	struct vmap_area *va, *nva;
2197 	unsigned long n_decay, pool_len;
2198 	int i;
2199 
2200 	for (i = 0; i < MAX_VA_SIZE_PAGES; i++) {
2201 		LIST_HEAD(tmp_list);
2202 
2203 		if (list_empty(&vn->pool[i].head))
2204 			continue;
2205 
2206 		/* Detach the pool, so no-one can access it. */
2207 		spin_lock(&vn->pool_lock);
2208 		list_replace_init(&vn->pool[i].head, &tmp_list);
2209 		spin_unlock(&vn->pool_lock);
2210 
2211 		pool_len = n_decay = vn->pool[i].len;
2212 		WRITE_ONCE(vn->pool[i].len, 0);
2213 
2214 		/* Decay a pool by ~25% out of left objects. */
2215 		if (!full_decay)
2216 			n_decay >>= 2;
2217 		pool_len -= n_decay;
2218 
2219 		list_for_each_entry_safe(va, nva, &tmp_list, list) {
2220 			if (!n_decay--)
2221 				break;
2222 
2223 			list_del_init(&va->list);
2224 			merge_or_add_vmap_area(va, &decay_root, &decay_list);
2225 		}
2226 
2227 		/*
2228 		 * Attach the pool back if it has been partly decayed.
2229 		 * Please note, it is supposed that nobody(other contexts)
2230 		 * can populate the pool therefore a simple list replace
2231 		 * operation takes place here.
2232 		 */
2233 		if (!list_empty(&tmp_list)) {
2234 			spin_lock(&vn->pool_lock);
2235 			list_replace_init(&tmp_list, &vn->pool[i].head);
2236 			WRITE_ONCE(vn->pool[i].len, pool_len);
2237 			spin_unlock(&vn->pool_lock);
2238 		}
2239 	}
2240 
2241 	reclaim_list_global(&decay_list);
2242 }
2243 
2244 static void
kasan_release_vmalloc_node(struct vmap_node * vn)2245 kasan_release_vmalloc_node(struct vmap_node *vn)
2246 {
2247 	struct vmap_area *va;
2248 	unsigned long start, end;
2249 
2250 	start = list_first_entry(&vn->purge_list, struct vmap_area, list)->va_start;
2251 	end = list_last_entry(&vn->purge_list, struct vmap_area, list)->va_end;
2252 
2253 	list_for_each_entry(va, &vn->purge_list, list) {
2254 		if (is_vmalloc_or_module_addr((void *) va->va_start))
2255 			kasan_release_vmalloc(va->va_start, va->va_end,
2256 				va->va_start, va->va_end,
2257 				KASAN_VMALLOC_PAGE_RANGE);
2258 	}
2259 
2260 	kasan_release_vmalloc(start, end, start, end, KASAN_VMALLOC_TLB_FLUSH);
2261 }
2262 
purge_vmap_node(struct work_struct * work)2263 static void purge_vmap_node(struct work_struct *work)
2264 {
2265 	struct vmap_node *vn = container_of(work,
2266 		struct vmap_node, purge_work);
2267 	unsigned long nr_purged_pages = 0;
2268 	struct vmap_area *va, *n_va;
2269 	LIST_HEAD(local_list);
2270 
2271 	if (IS_ENABLED(CONFIG_KASAN_VMALLOC))
2272 		kasan_release_vmalloc_node(vn);
2273 
2274 	vn->nr_purged = 0;
2275 
2276 	list_for_each_entry_safe(va, n_va, &vn->purge_list, list) {
2277 		unsigned long nr = va_size(va) >> PAGE_SHIFT;
2278 		unsigned int vn_id = decode_vn_id(va->flags);
2279 
2280 		list_del_init(&va->list);
2281 
2282 		nr_purged_pages += nr;
2283 		vn->nr_purged++;
2284 
2285 		if (is_vn_id_valid(vn_id) && !vn->skip_populate)
2286 			if (node_pool_add_va(vn, va))
2287 				continue;
2288 
2289 		/* Go back to global. */
2290 		list_add(&va->list, &local_list);
2291 	}
2292 
2293 	atomic_long_sub(nr_purged_pages, &vmap_lazy_nr);
2294 
2295 	reclaim_list_global(&local_list);
2296 }
2297 
2298 /*
2299  * Purges all lazily-freed vmap areas.
2300  */
__purge_vmap_area_lazy(unsigned long start,unsigned long end,bool full_pool_decay)2301 static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end,
2302 		bool full_pool_decay)
2303 {
2304 	unsigned long nr_purged_areas = 0;
2305 	unsigned int nr_purge_helpers;
2306 	static cpumask_t purge_nodes;
2307 	unsigned int nr_purge_nodes;
2308 	struct vmap_node *vn;
2309 	int i;
2310 
2311 	lockdep_assert_held(&vmap_purge_lock);
2312 
2313 	/*
2314 	 * Use cpumask to mark which node has to be processed.
2315 	 */
2316 	purge_nodes = CPU_MASK_NONE;
2317 
2318 	for_each_vmap_node(vn) {
2319 		INIT_LIST_HEAD(&vn->purge_list);
2320 		vn->skip_populate = full_pool_decay;
2321 		decay_va_pool_node(vn, full_pool_decay);
2322 
2323 		if (RB_EMPTY_ROOT(&vn->lazy.root))
2324 			continue;
2325 
2326 		spin_lock(&vn->lazy.lock);
2327 		WRITE_ONCE(vn->lazy.root.rb_node, NULL);
2328 		list_replace_init(&vn->lazy.head, &vn->purge_list);
2329 		spin_unlock(&vn->lazy.lock);
2330 
2331 		start = min(start, list_first_entry(&vn->purge_list,
2332 			struct vmap_area, list)->va_start);
2333 
2334 		end = max(end, list_last_entry(&vn->purge_list,
2335 			struct vmap_area, list)->va_end);
2336 
2337 		cpumask_set_cpu(node_to_id(vn), &purge_nodes);
2338 	}
2339 
2340 	nr_purge_nodes = cpumask_weight(&purge_nodes);
2341 	if (nr_purge_nodes > 0) {
2342 		flush_tlb_kernel_range(start, end);
2343 
2344 		/* One extra worker is per a lazy_max_pages() full set minus one. */
2345 		nr_purge_helpers = atomic_long_read(&vmap_lazy_nr) / lazy_max_pages();
2346 		nr_purge_helpers = clamp(nr_purge_helpers, 1U, nr_purge_nodes) - 1;
2347 
2348 		for_each_cpu(i, &purge_nodes) {
2349 			vn = &vmap_nodes[i];
2350 
2351 			if (nr_purge_helpers > 0) {
2352 				INIT_WORK(&vn->purge_work, purge_vmap_node);
2353 
2354 				if (cpumask_test_cpu(i, cpu_online_mask))
2355 					schedule_work_on(i, &vn->purge_work);
2356 				else
2357 					schedule_work(&vn->purge_work);
2358 
2359 				nr_purge_helpers--;
2360 			} else {
2361 				vn->purge_work.func = NULL;
2362 				purge_vmap_node(&vn->purge_work);
2363 				nr_purged_areas += vn->nr_purged;
2364 			}
2365 		}
2366 
2367 		for_each_cpu(i, &purge_nodes) {
2368 			vn = &vmap_nodes[i];
2369 
2370 			if (vn->purge_work.func) {
2371 				flush_work(&vn->purge_work);
2372 				nr_purged_areas += vn->nr_purged;
2373 			}
2374 		}
2375 	}
2376 
2377 	trace_purge_vmap_area_lazy(start, end, nr_purged_areas);
2378 	return nr_purged_areas > 0;
2379 }
2380 
2381 /*
2382  * Reclaim vmap areas by purging fragmented blocks and purge_vmap_area_list.
2383  */
reclaim_and_purge_vmap_areas(void)2384 static void reclaim_and_purge_vmap_areas(void)
2385 
2386 {
2387 	mutex_lock(&vmap_purge_lock);
2388 	purge_fragmented_blocks_allcpus();
2389 	__purge_vmap_area_lazy(ULONG_MAX, 0, true);
2390 	mutex_unlock(&vmap_purge_lock);
2391 }
2392 
drain_vmap_area_work(struct work_struct * work)2393 static void drain_vmap_area_work(struct work_struct *work)
2394 {
2395 	mutex_lock(&vmap_purge_lock);
2396 	__purge_vmap_area_lazy(ULONG_MAX, 0, false);
2397 	mutex_unlock(&vmap_purge_lock);
2398 }
2399 
2400 /*
2401  * Free a vmap area, caller ensuring that the area has been unmapped,
2402  * unlinked and flush_cache_vunmap had been called for the correct
2403  * range previously.
2404  */
free_vmap_area_noflush(struct vmap_area * va)2405 static void free_vmap_area_noflush(struct vmap_area *va)
2406 {
2407 	unsigned long nr_lazy_max = lazy_max_pages();
2408 	unsigned long va_start = va->va_start;
2409 	unsigned int vn_id = decode_vn_id(va->flags);
2410 	struct vmap_node *vn;
2411 	unsigned long nr_lazy;
2412 
2413 	if (WARN_ON_ONCE(!list_empty(&va->list)))
2414 		return;
2415 
2416 	nr_lazy = atomic_long_add_return_relaxed(va_size(va) >> PAGE_SHIFT,
2417 					 &vmap_lazy_nr);
2418 
2419 	/*
2420 	 * If it was request by a certain node we would like to
2421 	 * return it to that node, i.e. its pool for later reuse.
2422 	 */
2423 	vn = is_vn_id_valid(vn_id) ?
2424 		id_to_node(vn_id):addr_to_node(va->va_start);
2425 
2426 	spin_lock(&vn->lazy.lock);
2427 	insert_vmap_area(va, &vn->lazy.root, &vn->lazy.head);
2428 	spin_unlock(&vn->lazy.lock);
2429 
2430 	trace_free_vmap_area_noflush(va_start, nr_lazy, nr_lazy_max);
2431 
2432 	/* After this point, we may free va at any time */
2433 	if (unlikely(nr_lazy > nr_lazy_max))
2434 		schedule_work(&drain_vmap_work);
2435 }
2436 
2437 /*
2438  * Free and unmap a vmap area
2439  */
free_unmap_vmap_area(struct vmap_area * va)2440 static void free_unmap_vmap_area(struct vmap_area *va)
2441 {
2442 	flush_cache_vunmap(va->va_start, va->va_end);
2443 	vunmap_range_noflush(va->va_start, va->va_end);
2444 	if (debug_pagealloc_enabled_static())
2445 		flush_tlb_kernel_range(va->va_start, va->va_end);
2446 
2447 	free_vmap_area_noflush(va);
2448 }
2449 
find_vmap_area(unsigned long addr)2450 struct vmap_area *find_vmap_area(unsigned long addr)
2451 {
2452 	struct vmap_node *vn;
2453 	struct vmap_area *va;
2454 	int i, j;
2455 
2456 	if (unlikely(!vmap_initialized))
2457 		return NULL;
2458 
2459 	/*
2460 	 * An addr_to_node_id(addr) converts an address to a node index
2461 	 * where a VA is located. If VA spans several zones and passed
2462 	 * addr is not the same as va->va_start, what is not common, we
2463 	 * may need to scan extra nodes. See an example:
2464 	 *
2465 	 *      <----va---->
2466 	 * -|-----|-----|-----|-----|-
2467 	 *     1     2     0     1
2468 	 *
2469 	 * VA resides in node 1 whereas it spans 1, 2 an 0. If passed
2470 	 * addr is within 2 or 0 nodes we should do extra work.
2471 	 */
2472 	i = j = addr_to_node_id(addr);
2473 	do {
2474 		vn = &vmap_nodes[i];
2475 
2476 		spin_lock(&vn->busy.lock);
2477 		va = __find_vmap_area(addr, &vn->busy.root);
2478 		spin_unlock(&vn->busy.lock);
2479 
2480 		if (va)
2481 			return va;
2482 	} while ((i = (i + nr_vmap_nodes - 1) % nr_vmap_nodes) != j);
2483 
2484 	return NULL;
2485 }
2486 
find_unlink_vmap_area(unsigned long addr)2487 static struct vmap_area *find_unlink_vmap_area(unsigned long addr)
2488 {
2489 	struct vmap_node *vn;
2490 	struct vmap_area *va;
2491 	int i, j;
2492 
2493 	/*
2494 	 * Check the comment in the find_vmap_area() about the loop.
2495 	 */
2496 	i = j = addr_to_node_id(addr);
2497 	do {
2498 		vn = &vmap_nodes[i];
2499 
2500 		spin_lock(&vn->busy.lock);
2501 		va = __find_vmap_area(addr, &vn->busy.root);
2502 		if (va)
2503 			unlink_va(va, &vn->busy.root);
2504 		spin_unlock(&vn->busy.lock);
2505 
2506 		if (va)
2507 			return va;
2508 	} while ((i = (i + nr_vmap_nodes - 1) % nr_vmap_nodes) != j);
2509 
2510 	return NULL;
2511 }
2512 
2513 /*** Per cpu kva allocator ***/
2514 
2515 /*
2516  * vmap space is limited especially on 32 bit architectures. Ensure there is
2517  * room for at least 16 percpu vmap blocks per CPU.
2518  */
2519 /*
2520  * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able
2521  * to #define VMALLOC_SPACE		(VMALLOC_END-VMALLOC_START). Guess
2522  * instead (we just need a rough idea)
2523  */
2524 #if BITS_PER_LONG == 32
2525 #define VMALLOC_SPACE		(128UL*1024*1024)
2526 #else
2527 #define VMALLOC_SPACE		(128UL*1024*1024*1024)
2528 #endif
2529 
2530 #define VMALLOC_PAGES		(VMALLOC_SPACE / PAGE_SIZE)
2531 #define VMAP_MAX_ALLOC		BITS_PER_LONG	/* 256K with 4K pages */
2532 #define VMAP_BBMAP_BITS_MAX	1024	/* 4MB with 4K pages */
2533 #define VMAP_BBMAP_BITS_MIN	(VMAP_MAX_ALLOC*2)
2534 #define VMAP_MIN(x, y)		((x) < (y) ? (x) : (y)) /* can't use min() */
2535 #define VMAP_MAX(x, y)		((x) > (y) ? (x) : (y)) /* can't use max() */
2536 #define VMAP_BBMAP_BITS		\
2537 		VMAP_MIN(VMAP_BBMAP_BITS_MAX,	\
2538 		VMAP_MAX(VMAP_BBMAP_BITS_MIN,	\
2539 			VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16))
2540 
2541 #define VMAP_BLOCK_SIZE		(VMAP_BBMAP_BITS * PAGE_SIZE)
2542 
2543 /*
2544  * Purge threshold to prevent overeager purging of fragmented blocks for
2545  * regular operations: Purge if vb->free is less than 1/4 of the capacity.
2546  */
2547 #define VMAP_PURGE_THRESHOLD	(VMAP_BBMAP_BITS / 4)
2548 
2549 #define VMAP_RAM		0x1 /* indicates vm_map_ram area*/
2550 #define VMAP_BLOCK		0x2 /* mark out the vmap_block sub-type*/
2551 #define VMAP_FLAGS_MASK		0x3
2552 
2553 struct vmap_block_queue {
2554 	spinlock_t lock;
2555 	struct list_head free;
2556 
2557 	/*
2558 	 * An xarray requires an extra memory dynamically to
2559 	 * be allocated. If it is an issue, we can use rb-tree
2560 	 * instead.
2561 	 */
2562 	struct xarray vmap_blocks;
2563 };
2564 
2565 struct vmap_block {
2566 	spinlock_t lock;
2567 	struct vmap_area *va;
2568 	unsigned long free, dirty;
2569 	DECLARE_BITMAP(used_map, VMAP_BBMAP_BITS);
2570 	unsigned long dirty_min, dirty_max; /*< dirty range */
2571 	struct list_head free_list;
2572 	struct rcu_head rcu_head;
2573 	struct list_head purge;
2574 	unsigned int cpu;
2575 };
2576 
2577 /* Queue of free and dirty vmap blocks, for allocation and flushing purposes */
2578 static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue);
2579 
2580 /*
2581  * In order to fast access to any "vmap_block" associated with a
2582  * specific address, we use a hash.
2583  *
2584  * A per-cpu vmap_block_queue is used in both ways, to serialize
2585  * an access to free block chains among CPUs(alloc path) and it
2586  * also acts as a vmap_block hash(alloc/free paths). It means we
2587  * overload it, since we already have the per-cpu array which is
2588  * used as a hash table. When used as a hash a 'cpu' passed to
2589  * per_cpu() is not actually a CPU but rather a hash index.
2590  *
2591  * A hash function is addr_to_vb_xa() which hashes any address
2592  * to a specific index(in a hash) it belongs to. This then uses a
2593  * per_cpu() macro to access an array with generated index.
2594  *
2595  * An example:
2596  *
2597  *  CPU_1  CPU_2  CPU_0
2598  *    |      |      |
2599  *    V      V      V
2600  * 0     10     20     30     40     50     60
2601  * |------|------|------|------|------|------|...<vmap address space>
2602  *   CPU0   CPU1   CPU2   CPU0   CPU1   CPU2
2603  *
2604  * - CPU_1 invokes vm_unmap_ram(6), 6 belongs to CPU0 zone, thus
2605  *   it access: CPU0/INDEX0 -> vmap_blocks -> xa_lock;
2606  *
2607  * - CPU_2 invokes vm_unmap_ram(11), 11 belongs to CPU1 zone, thus
2608  *   it access: CPU1/INDEX1 -> vmap_blocks -> xa_lock;
2609  *
2610  * - CPU_0 invokes vm_unmap_ram(20), 20 belongs to CPU2 zone, thus
2611  *   it access: CPU2/INDEX2 -> vmap_blocks -> xa_lock.
2612  *
2613  * This technique almost always avoids lock contention on insert/remove,
2614  * however xarray spinlocks protect against any contention that remains.
2615  */
2616 static struct xarray *
addr_to_vb_xa(unsigned long addr)2617 addr_to_vb_xa(unsigned long addr)
2618 {
2619 	int index = (addr / VMAP_BLOCK_SIZE) % nr_cpu_ids;
2620 
2621 	/*
2622 	 * Please note, nr_cpu_ids points on a highest set
2623 	 * possible bit, i.e. we never invoke cpumask_next()
2624 	 * if an index points on it which is nr_cpu_ids - 1.
2625 	 */
2626 	if (!cpu_possible(index))
2627 		index = cpumask_next(index, cpu_possible_mask);
2628 
2629 	return &per_cpu(vmap_block_queue, index).vmap_blocks;
2630 }
2631 
2632 /*
2633  * We should probably have a fallback mechanism to allocate virtual memory
2634  * out of partially filled vmap blocks. However vmap block sizing should be
2635  * fairly reasonable according to the vmalloc size, so it shouldn't be a
2636  * big problem.
2637  */
2638 
addr_to_vb_idx(unsigned long addr)2639 static unsigned long addr_to_vb_idx(unsigned long addr)
2640 {
2641 	addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1);
2642 	addr /= VMAP_BLOCK_SIZE;
2643 	return addr;
2644 }
2645 
vmap_block_vaddr(unsigned long va_start,unsigned long pages_off)2646 static void *vmap_block_vaddr(unsigned long va_start, unsigned long pages_off)
2647 {
2648 	unsigned long addr;
2649 
2650 	addr = va_start + (pages_off << PAGE_SHIFT);
2651 	BUG_ON(addr_to_vb_idx(addr) != addr_to_vb_idx(va_start));
2652 	return (void *)addr;
2653 }
2654 
2655 /**
2656  * new_vmap_block - allocates new vmap_block and occupies 2^order pages in this
2657  *                  block. Of course pages number can't exceed VMAP_BBMAP_BITS
2658  * @order:    how many 2^order pages should be occupied in newly allocated block
2659  * @gfp_mask: flags for the page level allocator
2660  *
2661  * Return: virtual address in a newly allocated block or ERR_PTR(-errno)
2662  */
new_vmap_block(unsigned int order,gfp_t gfp_mask)2663 static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
2664 {
2665 	struct vmap_block_queue *vbq;
2666 	struct vmap_block *vb;
2667 	struct vmap_area *va;
2668 	struct xarray *xa;
2669 	unsigned long vb_idx;
2670 	int node, err;
2671 	void *vaddr;
2672 
2673 	node = numa_node_id();
2674 
2675 	vb = kmalloc_node(sizeof(struct vmap_block),
2676 			gfp_mask & GFP_RECLAIM_MASK, node);
2677 	if (unlikely(!vb))
2678 		return ERR_PTR(-ENOMEM);
2679 
2680 	va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE,
2681 					VMALLOC_START, VMALLOC_END,
2682 					node, gfp_mask,
2683 					VMAP_RAM|VMAP_BLOCK, NULL);
2684 	if (IS_ERR(va)) {
2685 		kfree(vb);
2686 		return ERR_CAST(va);
2687 	}
2688 
2689 	vaddr = vmap_block_vaddr(va->va_start, 0);
2690 	spin_lock_init(&vb->lock);
2691 	vb->va = va;
2692 	/* At least something should be left free */
2693 	BUG_ON(VMAP_BBMAP_BITS <= (1UL << order));
2694 	bitmap_zero(vb->used_map, VMAP_BBMAP_BITS);
2695 	vb->free = VMAP_BBMAP_BITS - (1UL << order);
2696 	vb->dirty = 0;
2697 	vb->dirty_min = VMAP_BBMAP_BITS;
2698 	vb->dirty_max = 0;
2699 	bitmap_set(vb->used_map, 0, (1UL << order));
2700 	INIT_LIST_HEAD(&vb->free_list);
2701 	vb->cpu = raw_smp_processor_id();
2702 
2703 	xa = addr_to_vb_xa(va->va_start);
2704 	vb_idx = addr_to_vb_idx(va->va_start);
2705 	err = xa_insert(xa, vb_idx, vb, gfp_mask);
2706 	if (err) {
2707 		kfree(vb);
2708 		free_vmap_area(va);
2709 		return ERR_PTR(err);
2710 	}
2711 	/*
2712 	 * list_add_tail_rcu could happened in another core
2713 	 * rather than vb->cpu due to task migration, which
2714 	 * is safe as list_add_tail_rcu will ensure the list's
2715 	 * integrity together with list_for_each_rcu from read
2716 	 * side.
2717 	 */
2718 	vbq = per_cpu_ptr(&vmap_block_queue, vb->cpu);
2719 	spin_lock(&vbq->lock);
2720 	list_add_tail_rcu(&vb->free_list, &vbq->free);
2721 	spin_unlock(&vbq->lock);
2722 
2723 	return vaddr;
2724 }
2725 
free_vmap_block(struct vmap_block * vb)2726 static void free_vmap_block(struct vmap_block *vb)
2727 {
2728 	struct vmap_node *vn;
2729 	struct vmap_block *tmp;
2730 	struct xarray *xa;
2731 
2732 	xa = addr_to_vb_xa(vb->va->va_start);
2733 	tmp = xa_erase(xa, addr_to_vb_idx(vb->va->va_start));
2734 	BUG_ON(tmp != vb);
2735 
2736 	vn = addr_to_node(vb->va->va_start);
2737 	spin_lock(&vn->busy.lock);
2738 	unlink_va(vb->va, &vn->busy.root);
2739 	spin_unlock(&vn->busy.lock);
2740 
2741 	free_vmap_area_noflush(vb->va);
2742 	kfree_rcu(vb, rcu_head);
2743 }
2744 
purge_fragmented_block(struct vmap_block * vb,struct list_head * purge_list,bool force_purge)2745 static bool purge_fragmented_block(struct vmap_block *vb,
2746 		struct list_head *purge_list, bool force_purge)
2747 {
2748 	struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, vb->cpu);
2749 
2750 	if (vb->free + vb->dirty != VMAP_BBMAP_BITS ||
2751 	    vb->dirty == VMAP_BBMAP_BITS)
2752 		return false;
2753 
2754 	/* Don't overeagerly purge usable blocks unless requested */
2755 	if (!(force_purge || vb->free < VMAP_PURGE_THRESHOLD))
2756 		return false;
2757 
2758 	/* prevent further allocs after releasing lock */
2759 	WRITE_ONCE(vb->free, 0);
2760 	/* prevent purging it again */
2761 	WRITE_ONCE(vb->dirty, VMAP_BBMAP_BITS);
2762 	vb->dirty_min = 0;
2763 	vb->dirty_max = VMAP_BBMAP_BITS;
2764 	spin_lock(&vbq->lock);
2765 	list_del_rcu(&vb->free_list);
2766 	spin_unlock(&vbq->lock);
2767 	list_add_tail(&vb->purge, purge_list);
2768 	return true;
2769 }
2770 
free_purged_blocks(struct list_head * purge_list)2771 static void free_purged_blocks(struct list_head *purge_list)
2772 {
2773 	struct vmap_block *vb, *n_vb;
2774 
2775 	list_for_each_entry_safe(vb, n_vb, purge_list, purge) {
2776 		list_del(&vb->purge);
2777 		free_vmap_block(vb);
2778 	}
2779 }
2780 
purge_fragmented_blocks(int cpu)2781 static void purge_fragmented_blocks(int cpu)
2782 {
2783 	LIST_HEAD(purge);
2784 	struct vmap_block *vb;
2785 	struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
2786 
2787 	rcu_read_lock();
2788 	list_for_each_entry_rcu(vb, &vbq->free, free_list) {
2789 		unsigned long free = READ_ONCE(vb->free);
2790 		unsigned long dirty = READ_ONCE(vb->dirty);
2791 
2792 		if (free + dirty != VMAP_BBMAP_BITS ||
2793 		    dirty == VMAP_BBMAP_BITS)
2794 			continue;
2795 
2796 		spin_lock(&vb->lock);
2797 		purge_fragmented_block(vb, &purge, true);
2798 		spin_unlock(&vb->lock);
2799 	}
2800 	rcu_read_unlock();
2801 	free_purged_blocks(&purge);
2802 }
2803 
purge_fragmented_blocks_allcpus(void)2804 static void purge_fragmented_blocks_allcpus(void)
2805 {
2806 	int cpu;
2807 
2808 	for_each_possible_cpu(cpu)
2809 		purge_fragmented_blocks(cpu);
2810 }
2811 
vb_alloc(unsigned long size,gfp_t gfp_mask)2812 static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
2813 {
2814 	struct vmap_block_queue *vbq;
2815 	struct vmap_block *vb;
2816 	void *vaddr = NULL;
2817 	unsigned int order;
2818 
2819 	BUG_ON(offset_in_page(size));
2820 	BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
2821 	if (WARN_ON(size == 0)) {
2822 		/*
2823 		 * Allocating 0 bytes isn't what caller wants since
2824 		 * get_order(0) returns funny result. Just warn and terminate
2825 		 * early.
2826 		 */
2827 		return ERR_PTR(-EINVAL);
2828 	}
2829 	order = get_order(size);
2830 
2831 	rcu_read_lock();
2832 	vbq = raw_cpu_ptr(&vmap_block_queue);
2833 	list_for_each_entry_rcu(vb, &vbq->free, free_list) {
2834 		unsigned long pages_off;
2835 
2836 		if (READ_ONCE(vb->free) < (1UL << order))
2837 			continue;
2838 
2839 		spin_lock(&vb->lock);
2840 		if (vb->free < (1UL << order)) {
2841 			spin_unlock(&vb->lock);
2842 			continue;
2843 		}
2844 
2845 		pages_off = VMAP_BBMAP_BITS - vb->free;
2846 		vaddr = vmap_block_vaddr(vb->va->va_start, pages_off);
2847 		WRITE_ONCE(vb->free, vb->free - (1UL << order));
2848 		bitmap_set(vb->used_map, pages_off, (1UL << order));
2849 		if (vb->free == 0) {
2850 			spin_lock(&vbq->lock);
2851 			list_del_rcu(&vb->free_list);
2852 			spin_unlock(&vbq->lock);
2853 		}
2854 
2855 		spin_unlock(&vb->lock);
2856 		break;
2857 	}
2858 
2859 	rcu_read_unlock();
2860 
2861 	/* Allocate new block if nothing was found */
2862 	if (!vaddr)
2863 		vaddr = new_vmap_block(order, gfp_mask);
2864 
2865 	return vaddr;
2866 }
2867 
vb_free(unsigned long addr,unsigned long size)2868 static void vb_free(unsigned long addr, unsigned long size)
2869 {
2870 	unsigned long offset;
2871 	unsigned int order;
2872 	struct vmap_block *vb;
2873 	struct xarray *xa;
2874 
2875 	BUG_ON(offset_in_page(size));
2876 	BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
2877 
2878 	flush_cache_vunmap(addr, addr + size);
2879 
2880 	order = get_order(size);
2881 	offset = (addr & (VMAP_BLOCK_SIZE - 1)) >> PAGE_SHIFT;
2882 
2883 	xa = addr_to_vb_xa(addr);
2884 	vb = xa_load(xa, addr_to_vb_idx(addr));
2885 
2886 	spin_lock(&vb->lock);
2887 	bitmap_clear(vb->used_map, offset, (1UL << order));
2888 	spin_unlock(&vb->lock);
2889 
2890 	vunmap_range_noflush(addr, addr + size);
2891 
2892 	if (debug_pagealloc_enabled_static())
2893 		flush_tlb_kernel_range(addr, addr + size);
2894 
2895 	spin_lock(&vb->lock);
2896 
2897 	/* Expand the not yet TLB flushed dirty range */
2898 	vb->dirty_min = min(vb->dirty_min, offset);
2899 	vb->dirty_max = max(vb->dirty_max, offset + (1UL << order));
2900 
2901 	WRITE_ONCE(vb->dirty, vb->dirty + (1UL << order));
2902 	if (vb->dirty == VMAP_BBMAP_BITS) {
2903 		BUG_ON(vb->free);
2904 		spin_unlock(&vb->lock);
2905 		free_vmap_block(vb);
2906 	} else
2907 		spin_unlock(&vb->lock);
2908 }
2909 
_vm_unmap_aliases(unsigned long start,unsigned long end,int flush)2910 static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush)
2911 {
2912 	LIST_HEAD(purge_list);
2913 	int cpu;
2914 
2915 	if (unlikely(!vmap_initialized))
2916 		return;
2917 
2918 	mutex_lock(&vmap_purge_lock);
2919 
2920 	for_each_possible_cpu(cpu) {
2921 		struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
2922 		struct vmap_block *vb;
2923 		unsigned long idx;
2924 
2925 		rcu_read_lock();
2926 		xa_for_each(&vbq->vmap_blocks, idx, vb) {
2927 			spin_lock(&vb->lock);
2928 
2929 			/*
2930 			 * Try to purge a fragmented block first. If it's
2931 			 * not purgeable, check whether there is dirty
2932 			 * space to be flushed.
2933 			 */
2934 			if (!purge_fragmented_block(vb, &purge_list, false) &&
2935 			    vb->dirty_max && vb->dirty != VMAP_BBMAP_BITS) {
2936 				unsigned long va_start = vb->va->va_start;
2937 				unsigned long s, e;
2938 
2939 				s = va_start + (vb->dirty_min << PAGE_SHIFT);
2940 				e = va_start + (vb->dirty_max << PAGE_SHIFT);
2941 
2942 				start = min(s, start);
2943 				end   = max(e, end);
2944 
2945 				/* Prevent that this is flushed again */
2946 				vb->dirty_min = VMAP_BBMAP_BITS;
2947 				vb->dirty_max = 0;
2948 
2949 				flush = 1;
2950 			}
2951 			spin_unlock(&vb->lock);
2952 		}
2953 		rcu_read_unlock();
2954 	}
2955 	free_purged_blocks(&purge_list);
2956 
2957 	if (!__purge_vmap_area_lazy(start, end, false) && flush)
2958 		flush_tlb_kernel_range(start, end);
2959 	mutex_unlock(&vmap_purge_lock);
2960 }
2961 
2962 /**
2963  * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer
2964  *
2965  * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily
2966  * to amortize TLB flushing overheads. What this means is that any page you
2967  * have now, may, in a former life, have been mapped into kernel virtual
2968  * address by the vmap layer and so there might be some CPUs with TLB entries
2969  * still referencing that page (additional to the regular 1:1 kernel mapping).
2970  *
2971  * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can
2972  * be sure that none of the pages we have control over will have any aliases
2973  * from the vmap layer.
2974  */
vm_unmap_aliases(void)2975 void vm_unmap_aliases(void)
2976 {
2977 	_vm_unmap_aliases(ULONG_MAX, 0, 0);
2978 }
2979 EXPORT_SYMBOL_GPL(vm_unmap_aliases);
2980 
2981 /**
2982  * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram
2983  * @mem: the pointer returned by vm_map_ram
2984  * @count: the count passed to that vm_map_ram call (cannot unmap partial)
2985  */
vm_unmap_ram(const void * mem,unsigned int count)2986 void vm_unmap_ram(const void *mem, unsigned int count)
2987 {
2988 	unsigned long size = (unsigned long)count << PAGE_SHIFT;
2989 	unsigned long addr = (unsigned long)kasan_reset_tag(mem);
2990 	struct vmap_area *va;
2991 
2992 	might_sleep();
2993 	BUG_ON(!addr);
2994 	BUG_ON(addr < VMALLOC_START);
2995 	BUG_ON(addr > VMALLOC_END);
2996 	BUG_ON(!PAGE_ALIGNED(addr));
2997 
2998 	kasan_poison_vmalloc(mem, size);
2999 
3000 	if (likely(count <= VMAP_MAX_ALLOC)) {
3001 		debug_check_no_locks_freed(mem, size);
3002 		vb_free(addr, size);
3003 		return;
3004 	}
3005 
3006 	va = find_unlink_vmap_area(addr);
3007 	if (WARN_ON_ONCE(!va))
3008 		return;
3009 
3010 	debug_check_no_locks_freed((void *)va->va_start, va_size(va));
3011 	free_unmap_vmap_area(va);
3012 }
3013 EXPORT_SYMBOL(vm_unmap_ram);
3014 
3015 /**
3016  * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space)
3017  * @pages: an array of pointers to the pages to be mapped
3018  * @count: number of pages
3019  * @node: prefer to allocate data structures on this node
3020  *
3021  * If you use this function for less than VMAP_MAX_ALLOC pages, it could be
3022  * faster than vmap so it's good.  But if you mix long-life and short-life
3023  * objects with vm_map_ram(), it could consume lots of address space through
3024  * fragmentation (especially on a 32bit machine).  You could see failures in
3025  * the end.  Please use this function for short-lived objects.
3026  *
3027  * Returns: a pointer to the address that has been mapped, or %NULL on failure
3028  */
vm_map_ram(struct page ** pages,unsigned int count,int node)3029 void *vm_map_ram(struct page **pages, unsigned int count, int node)
3030 {
3031 	unsigned long size = (unsigned long)count << PAGE_SHIFT;
3032 	unsigned long addr;
3033 	void *mem;
3034 
3035 	if (likely(count <= VMAP_MAX_ALLOC)) {
3036 		mem = vb_alloc(size, GFP_KERNEL);
3037 		if (IS_ERR(mem))
3038 			return NULL;
3039 		addr = (unsigned long)mem;
3040 	} else {
3041 		struct vmap_area *va;
3042 		va = alloc_vmap_area(size, PAGE_SIZE,
3043 				VMALLOC_START, VMALLOC_END,
3044 				node, GFP_KERNEL, VMAP_RAM,
3045 				NULL);
3046 		if (IS_ERR(va))
3047 			return NULL;
3048 
3049 		addr = va->va_start;
3050 		mem = (void *)addr;
3051 	}
3052 
3053 	if (vmap_pages_range(addr, addr + size, PAGE_KERNEL,
3054 				pages, PAGE_SHIFT) < 0) {
3055 		vm_unmap_ram(mem, count);
3056 		return NULL;
3057 	}
3058 
3059 	/*
3060 	 * Mark the pages as accessible, now that they are mapped.
3061 	 * With hardware tag-based KASAN, marking is skipped for
3062 	 * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
3063 	 */
3064 	mem = kasan_unpoison_vmalloc(mem, size, KASAN_VMALLOC_PROT_NORMAL);
3065 
3066 	return mem;
3067 }
3068 EXPORT_SYMBOL(vm_map_ram);
3069 
3070 static struct vm_struct *vmlist __initdata;
3071 
vm_area_page_order(struct vm_struct * vm)3072 static inline unsigned int vm_area_page_order(struct vm_struct *vm)
3073 {
3074 #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
3075 	return vm->page_order;
3076 #else
3077 	return 0;
3078 #endif
3079 }
3080 
get_vm_area_page_order(struct vm_struct * vm)3081 unsigned int get_vm_area_page_order(struct vm_struct *vm)
3082 {
3083 	return vm_area_page_order(vm);
3084 }
3085 
set_vm_area_page_order(struct vm_struct * vm,unsigned int order)3086 static inline void set_vm_area_page_order(struct vm_struct *vm, unsigned int order)
3087 {
3088 #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
3089 	vm->page_order = order;
3090 #else
3091 	BUG_ON(order != 0);
3092 #endif
3093 }
3094 
3095 /**
3096  * vm_area_add_early - add vmap area early during boot
3097  * @vm: vm_struct to add
3098  *
3099  * This function is used to add fixed kernel vm area to vmlist before
3100  * vmalloc_init() is called.  @vm->addr, @vm->size, and @vm->flags
3101  * should contain proper values and the other fields should be zero.
3102  *
3103  * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
3104  */
vm_area_add_early(struct vm_struct * vm)3105 void __init vm_area_add_early(struct vm_struct *vm)
3106 {
3107 	struct vm_struct *tmp, **p;
3108 
3109 	BUG_ON(vmap_initialized);
3110 	for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
3111 		if (tmp->addr >= vm->addr) {
3112 			BUG_ON(tmp->addr < vm->addr + vm->size);
3113 			break;
3114 		} else
3115 			BUG_ON(tmp->addr + tmp->size > vm->addr);
3116 	}
3117 	vm->next = *p;
3118 	*p = vm;
3119 }
3120 
3121 /**
3122  * vm_area_register_early - register vmap area early during boot
3123  * @vm: vm_struct to register
3124  * @align: requested alignment
3125  *
3126  * This function is used to register kernel vm area before
3127  * vmalloc_init() is called.  @vm->size and @vm->flags should contain
3128  * proper values on entry and other fields should be zero.  On return,
3129  * vm->addr contains the allocated address.
3130  *
3131  * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
3132  */
vm_area_register_early(struct vm_struct * vm,size_t align)3133 void __init vm_area_register_early(struct vm_struct *vm, size_t align)
3134 {
3135 	unsigned long addr = ALIGN(VMALLOC_START, align);
3136 	struct vm_struct *cur, **p;
3137 
3138 	BUG_ON(vmap_initialized);
3139 
3140 	for (p = &vmlist; (cur = *p) != NULL; p = &cur->next) {
3141 		if ((unsigned long)cur->addr - addr >= vm->size)
3142 			break;
3143 		addr = ALIGN((unsigned long)cur->addr + cur->size, align);
3144 	}
3145 
3146 	BUG_ON(addr > VMALLOC_END - vm->size);
3147 	vm->addr = (void *)addr;
3148 	vm->next = *p;
3149 	*p = vm;
3150 	kasan_populate_early_vm_area_shadow(vm->addr, vm->size);
3151 }
3152 
clear_vm_uninitialized_flag(struct vm_struct * vm)3153 static void clear_vm_uninitialized_flag(struct vm_struct *vm)
3154 {
3155 	/*
3156 	 * Before removing VM_UNINITIALIZED,
3157 	 * we should make sure that vm has proper values.
3158 	 * Pair with smp_rmb() in vread_iter() and vmalloc_info_show().
3159 	 */
3160 	smp_wmb();
3161 	vm->flags &= ~VM_UNINITIALIZED;
3162 }
3163 
__get_vm_area_node(unsigned long size,unsigned long align,unsigned long shift,unsigned long flags,unsigned long start,unsigned long end,int node,gfp_t gfp_mask,const void * caller)3164 struct vm_struct *__get_vm_area_node(unsigned long size,
3165 		unsigned long align, unsigned long shift, unsigned long flags,
3166 		unsigned long start, unsigned long end, int node,
3167 		gfp_t gfp_mask, const void *caller)
3168 {
3169 	struct vmap_area *va;
3170 	struct vm_struct *area;
3171 	unsigned long requested_size = size;
3172 
3173 	BUG_ON(in_interrupt());
3174 	size = ALIGN(size, 1ul << shift);
3175 	if (unlikely(!size))
3176 		return NULL;
3177 
3178 	if (flags & VM_IOREMAP)
3179 		align = 1ul << clamp_t(int, get_count_order_long(size),
3180 				       PAGE_SHIFT, IOREMAP_MAX_ORDER);
3181 
3182 	area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
3183 	if (unlikely(!area))
3184 		return NULL;
3185 
3186 	if (!(flags & VM_NO_GUARD))
3187 		size += PAGE_SIZE;
3188 
3189 	area->flags = flags;
3190 	area->caller = caller;
3191 	area->requested_size = requested_size;
3192 
3193 	va = alloc_vmap_area(size, align, start, end, node, gfp_mask, 0, area);
3194 	if (IS_ERR(va)) {
3195 		kfree(area);
3196 		return NULL;
3197 	}
3198 
3199 	/*
3200 	 * Mark pages for non-VM_ALLOC mappings as accessible. Do it now as a
3201 	 * best-effort approach, as they can be mapped outside of vmalloc code.
3202 	 * For VM_ALLOC mappings, the pages are marked as accessible after
3203 	 * getting mapped in __vmalloc_node_range().
3204 	 * With hardware tag-based KASAN, marking is skipped for
3205 	 * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
3206 	 */
3207 	if (!(flags & VM_ALLOC))
3208 		area->addr = kasan_unpoison_vmalloc(area->addr, requested_size,
3209 						    KASAN_VMALLOC_PROT_NORMAL);
3210 
3211 	return area;
3212 }
3213 
__get_vm_area_caller(unsigned long size,unsigned long flags,unsigned long start,unsigned long end,const void * caller)3214 struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
3215 				       unsigned long start, unsigned long end,
3216 				       const void *caller)
3217 {
3218 	return __get_vm_area_node(size, 1, PAGE_SHIFT, flags, start, end,
3219 				  NUMA_NO_NODE, GFP_KERNEL, caller);
3220 }
3221 
3222 /**
3223  * get_vm_area - reserve a contiguous kernel virtual area
3224  * @size:	 size of the area
3225  * @flags:	 %VM_IOREMAP for I/O mappings or VM_ALLOC
3226  *
3227  * Search an area of @size in the kernel virtual mapping area,
3228  * and reserved it for out purposes.  Returns the area descriptor
3229  * on success or %NULL on failure.
3230  *
3231  * Return: the area descriptor on success or %NULL on failure.
3232  */
get_vm_area(unsigned long size,unsigned long flags)3233 struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
3234 {
3235 	return __get_vm_area_node(size, 1, PAGE_SHIFT, flags,
3236 				  VMALLOC_START, VMALLOC_END,
3237 				  NUMA_NO_NODE, GFP_KERNEL,
3238 				  __builtin_return_address(0));
3239 }
3240 
get_vm_area_caller(unsigned long size,unsigned long flags,const void * caller)3241 struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
3242 				const void *caller)
3243 {
3244 	return __get_vm_area_node(size, 1, PAGE_SHIFT, flags,
3245 				  VMALLOC_START, VMALLOC_END,
3246 				  NUMA_NO_NODE, GFP_KERNEL, caller);
3247 }
3248 
3249 /**
3250  * find_vm_area - find a continuous kernel virtual area
3251  * @addr:	  base address
3252  *
3253  * Search for the kernel VM area starting at @addr, and return it.
3254  * It is up to the caller to do all required locking to keep the returned
3255  * pointer valid.
3256  *
3257  * Return: the area descriptor on success or %NULL on failure.
3258  */
find_vm_area(const void * addr)3259 struct vm_struct *find_vm_area(const void *addr)
3260 {
3261 	struct vmap_area *va;
3262 
3263 	va = find_vmap_area((unsigned long)addr);
3264 	if (!va)
3265 		return NULL;
3266 
3267 	return va->vm;
3268 }
3269 
3270 /**
3271  * remove_vm_area - find and remove a continuous kernel virtual area
3272  * @addr:	    base address
3273  *
3274  * Search for the kernel VM area starting at @addr, and remove it.
3275  * This function returns the found VM area, but using it is NOT safe
3276  * on SMP machines, except for its size or flags.
3277  *
3278  * Return: the area descriptor on success or %NULL on failure.
3279  */
remove_vm_area(const void * addr)3280 struct vm_struct *remove_vm_area(const void *addr)
3281 {
3282 	struct vmap_area *va;
3283 	struct vm_struct *vm;
3284 
3285 	might_sleep();
3286 
3287 	if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n",
3288 			addr))
3289 		return NULL;
3290 
3291 	va = find_unlink_vmap_area((unsigned long)addr);
3292 	if (!va || !va->vm)
3293 		return NULL;
3294 	vm = va->vm;
3295 
3296 	debug_check_no_locks_freed(vm->addr, get_vm_area_size(vm));
3297 	debug_check_no_obj_freed(vm->addr, get_vm_area_size(vm));
3298 	kasan_free_module_shadow(vm);
3299 	kasan_poison_vmalloc(vm->addr, get_vm_area_size(vm));
3300 
3301 	free_unmap_vmap_area(va);
3302 	return vm;
3303 }
3304 
set_area_direct_map(const struct vm_struct * area,int (* set_direct_map)(struct page * page))3305 static inline void set_area_direct_map(const struct vm_struct *area,
3306 				       int (*set_direct_map)(struct page *page))
3307 {
3308 	int i;
3309 
3310 	/* HUGE_VMALLOC passes small pages to set_direct_map */
3311 	for (i = 0; i < area->nr_pages; i++)
3312 		if (page_address(area->pages[i]))
3313 			set_direct_map(area->pages[i]);
3314 }
3315 
3316 /*
3317  * Flush the vm mapping and reset the direct map.
3318  */
vm_reset_perms(struct vm_struct * area)3319 static void vm_reset_perms(struct vm_struct *area)
3320 {
3321 	unsigned long start = ULONG_MAX, end = 0;
3322 	unsigned int page_order = vm_area_page_order(area);
3323 	int flush_dmap = 0;
3324 	int i;
3325 
3326 	/*
3327 	 * Find the start and end range of the direct mappings to make sure that
3328 	 * the vm_unmap_aliases() flush includes the direct map.
3329 	 */
3330 	for (i = 0; i < area->nr_pages; i += 1U << page_order) {
3331 		unsigned long addr = (unsigned long)page_address(area->pages[i]);
3332 
3333 		if (addr) {
3334 			unsigned long page_size;
3335 
3336 			page_size = PAGE_SIZE << page_order;
3337 			start = min(addr, start);
3338 			end = max(addr + page_size, end);
3339 			flush_dmap = 1;
3340 		}
3341 	}
3342 
3343 	/*
3344 	 * Set direct map to something invalid so that it won't be cached if
3345 	 * there are any accesses after the TLB flush, then flush the TLB and
3346 	 * reset the direct map permissions to the default.
3347 	 */
3348 	set_area_direct_map(area, set_direct_map_invalid_noflush);
3349 	_vm_unmap_aliases(start, end, flush_dmap);
3350 	set_area_direct_map(area, set_direct_map_default_noflush);
3351 }
3352 
delayed_vfree_work(struct work_struct * w)3353 static void delayed_vfree_work(struct work_struct *w)
3354 {
3355 	struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
3356 	struct llist_node *t, *llnode;
3357 
3358 	llist_for_each_safe(llnode, t, llist_del_all(&p->list))
3359 		vfree(llnode);
3360 }
3361 
3362 /**
3363  * vfree_atomic - release memory allocated by vmalloc()
3364  * @addr:	  memory base address
3365  *
3366  * This one is just like vfree() but can be called in any atomic context
3367  * except NMIs.
3368  */
vfree_atomic(const void * addr)3369 void vfree_atomic(const void *addr)
3370 {
3371 	struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred);
3372 
3373 	BUG_ON(in_nmi());
3374 	kmemleak_free(addr);
3375 
3376 	/*
3377 	 * Use raw_cpu_ptr() because this can be called from preemptible
3378 	 * context. Preemption is absolutely fine here, because the llist_add()
3379 	 * implementation is lockless, so it works even if we are adding to
3380 	 * another cpu's list. schedule_work() should be fine with this too.
3381 	 */
3382 	if (addr && llist_add((struct llist_node *)addr, &p->list))
3383 		schedule_work(&p->wq);
3384 }
3385 
3386 /**
3387  * vfree - Release memory allocated by vmalloc()
3388  * @addr:  Memory base address
3389  *
3390  * Free the virtually continuous memory area starting at @addr, as obtained
3391  * from one of the vmalloc() family of APIs.  This will usually also free the
3392  * physical memory underlying the virtual allocation, but that memory is
3393  * reference counted, so it will not be freed until the last user goes away.
3394  *
3395  * If @addr is NULL, no operation is performed.
3396  *
3397  * Context:
3398  * May sleep if called *not* from interrupt context.
3399  * Must not be called in NMI context (strictly speaking, it could be
3400  * if we have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling
3401  * conventions for vfree() arch-dependent would be a really bad idea).
3402  */
vfree(const void * addr)3403 void vfree(const void *addr)
3404 {
3405 	struct vm_struct *vm;
3406 	int i;
3407 
3408 	if (unlikely(in_interrupt())) {
3409 		vfree_atomic(addr);
3410 		return;
3411 	}
3412 
3413 	BUG_ON(in_nmi());
3414 	kmemleak_free(addr);
3415 	might_sleep();
3416 
3417 	if (!addr)
3418 		return;
3419 
3420 	vm = remove_vm_area(addr);
3421 	if (unlikely(!vm)) {
3422 		WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
3423 				addr);
3424 		return;
3425 	}
3426 
3427 	if (unlikely(vm->flags & VM_FLUSH_RESET_PERMS))
3428 		vm_reset_perms(vm);
3429 	/* All pages of vm should be charged to same memcg, so use first one. */
3430 	if (vm->nr_pages && !(vm->flags & VM_MAP_PUT_PAGES))
3431 		mod_memcg_page_state(vm->pages[0], MEMCG_VMALLOC, -vm->nr_pages);
3432 	for (i = 0; i < vm->nr_pages; i++) {
3433 		struct page *page = vm->pages[i];
3434 
3435 		BUG_ON(!page);
3436 		/*
3437 		 * High-order allocs for huge vmallocs are split, so
3438 		 * can be freed as an array of order-0 allocations
3439 		 */
3440 		__free_page(page);
3441 		cond_resched();
3442 	}
3443 	if (!(vm->flags & VM_MAP_PUT_PAGES))
3444 		atomic_long_sub(vm->nr_pages, &nr_vmalloc_pages);
3445 	kvfree(vm->pages);
3446 	kfree(vm);
3447 }
3448 EXPORT_SYMBOL(vfree);
3449 
3450 /**
3451  * vunmap - release virtual mapping obtained by vmap()
3452  * @addr:   memory base address
3453  *
3454  * Free the virtually contiguous memory area starting at @addr,
3455  * which was created from the page array passed to vmap().
3456  *
3457  * Must not be called in interrupt context.
3458  */
vunmap(const void * addr)3459 void vunmap(const void *addr)
3460 {
3461 	struct vm_struct *vm;
3462 
3463 	BUG_ON(in_interrupt());
3464 	might_sleep();
3465 
3466 	if (!addr)
3467 		return;
3468 	vm = remove_vm_area(addr);
3469 	if (unlikely(!vm)) {
3470 		WARN(1, KERN_ERR "Trying to vunmap() nonexistent vm area (%p)\n",
3471 				addr);
3472 		return;
3473 	}
3474 	kfree(vm);
3475 }
3476 EXPORT_SYMBOL(vunmap);
3477 
3478 /**
3479  * vmap - map an array of pages into virtually contiguous space
3480  * @pages: array of page pointers
3481  * @count: number of pages to map
3482  * @flags: vm_area->flags
3483  * @prot: page protection for the mapping
3484  *
3485  * Maps @count pages from @pages into contiguous kernel virtual space.
3486  * If @flags contains %VM_MAP_PUT_PAGES the ownership of the pages array itself
3487  * (which must be kmalloc or vmalloc memory) and one reference per pages in it
3488  * are transferred from the caller to vmap(), and will be freed / dropped when
3489  * vfree() is called on the return value.
3490  *
3491  * Return: the address of the area or %NULL on failure
3492  */
vmap(struct page ** pages,unsigned int count,unsigned long flags,pgprot_t prot)3493 void *vmap(struct page **pages, unsigned int count,
3494 	   unsigned long flags, pgprot_t prot)
3495 {
3496 	struct vm_struct *area;
3497 	unsigned long addr;
3498 	unsigned long size;		/* In bytes */
3499 
3500 	might_sleep();
3501 
3502 	if (WARN_ON_ONCE(flags & VM_FLUSH_RESET_PERMS))
3503 		return NULL;
3504 
3505 	/*
3506 	 * Your top guard is someone else's bottom guard. Not having a top
3507 	 * guard compromises someone else's mappings too.
3508 	 */
3509 	if (WARN_ON_ONCE(flags & VM_NO_GUARD))
3510 		flags &= ~VM_NO_GUARD;
3511 
3512 	if (count > totalram_pages())
3513 		return NULL;
3514 
3515 	size = (unsigned long)count << PAGE_SHIFT;
3516 	area = get_vm_area_caller(size, flags, __builtin_return_address(0));
3517 	if (!area)
3518 		return NULL;
3519 
3520 	addr = (unsigned long)area->addr;
3521 	if (vmap_pages_range(addr, addr + size, pgprot_nx(prot),
3522 				pages, PAGE_SHIFT) < 0) {
3523 		vunmap(area->addr);
3524 		return NULL;
3525 	}
3526 
3527 	if (flags & VM_MAP_PUT_PAGES) {
3528 		area->pages = pages;
3529 		area->nr_pages = count;
3530 	}
3531 	return area->addr;
3532 }
3533 EXPORT_SYMBOL(vmap);
3534 
3535 #ifdef CONFIG_VMAP_PFN
3536 struct vmap_pfn_data {
3537 	unsigned long	*pfns;
3538 	pgprot_t	prot;
3539 	unsigned int	idx;
3540 };
3541 
vmap_pfn_apply(pte_t * pte,unsigned long addr,void * private)3542 static int vmap_pfn_apply(pte_t *pte, unsigned long addr, void *private)
3543 {
3544 	struct vmap_pfn_data *data = private;
3545 	unsigned long pfn = data->pfns[data->idx];
3546 	pte_t ptent;
3547 
3548 	if (WARN_ON_ONCE(pfn_valid(pfn)))
3549 		return -EINVAL;
3550 
3551 	ptent = pte_mkspecial(pfn_pte(pfn, data->prot));
3552 	set_pte_at(&init_mm, addr, pte, ptent);
3553 
3554 	data->idx++;
3555 	return 0;
3556 }
3557 
3558 /**
3559  * vmap_pfn - map an array of PFNs into virtually contiguous space
3560  * @pfns: array of PFNs
3561  * @count: number of pages to map
3562  * @prot: page protection for the mapping
3563  *
3564  * Maps @count PFNs from @pfns into contiguous kernel virtual space and returns
3565  * the start address of the mapping.
3566  */
vmap_pfn(unsigned long * pfns,unsigned int count,pgprot_t prot)3567 void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot)
3568 {
3569 	struct vmap_pfn_data data = { .pfns = pfns, .prot = pgprot_nx(prot) };
3570 	struct vm_struct *area;
3571 
3572 	area = get_vm_area_caller(count * PAGE_SIZE, VM_IOREMAP,
3573 			__builtin_return_address(0));
3574 	if (!area)
3575 		return NULL;
3576 	if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
3577 			count * PAGE_SIZE, vmap_pfn_apply, &data)) {
3578 		free_vm_area(area);
3579 		return NULL;
3580 	}
3581 
3582 	flush_cache_vmap((unsigned long)area->addr,
3583 			 (unsigned long)area->addr + count * PAGE_SIZE);
3584 
3585 	return area->addr;
3586 }
3587 EXPORT_SYMBOL_GPL(vmap_pfn);
3588 #endif /* CONFIG_VMAP_PFN */
3589 
3590 static inline unsigned int
vm_area_alloc_pages(gfp_t gfp,int nid,unsigned int order,unsigned int nr_pages,struct page ** pages)3591 vm_area_alloc_pages(gfp_t gfp, int nid,
3592 		unsigned int order, unsigned int nr_pages, struct page **pages)
3593 {
3594 	unsigned int nr_allocated = 0;
3595 	struct page *page;
3596 	int i;
3597 
3598 	/*
3599 	 * For order-0 pages we make use of bulk allocator, if
3600 	 * the page array is partly or not at all populated due
3601 	 * to fails, fallback to a single page allocator that is
3602 	 * more permissive.
3603 	 */
3604 	if (!order) {
3605 		while (nr_allocated < nr_pages) {
3606 			unsigned int nr, nr_pages_request;
3607 
3608 			/*
3609 			 * A maximum allowed request is hard-coded and is 100
3610 			 * pages per call. That is done in order to prevent a
3611 			 * long preemption off scenario in the bulk-allocator
3612 			 * so the range is [1:100].
3613 			 */
3614 			nr_pages_request = min(100U, nr_pages - nr_allocated);
3615 
3616 			/* memory allocation should consider mempolicy, we can't
3617 			 * wrongly use nearest node when nid == NUMA_NO_NODE,
3618 			 * otherwise memory may be allocated in only one node,
3619 			 * but mempolicy wants to alloc memory by interleaving.
3620 			 */
3621 			if (IS_ENABLED(CONFIG_NUMA) && nid == NUMA_NO_NODE)
3622 				nr = alloc_pages_bulk_mempolicy_noprof(gfp,
3623 							nr_pages_request,
3624 							pages + nr_allocated);
3625 			else
3626 				nr = alloc_pages_bulk_node_noprof(gfp, nid,
3627 							nr_pages_request,
3628 							pages + nr_allocated);
3629 
3630 			nr_allocated += nr;
3631 
3632 			/*
3633 			 * If zero or pages were obtained partly,
3634 			 * fallback to a single page allocator.
3635 			 */
3636 			if (nr != nr_pages_request)
3637 				break;
3638 		}
3639 	}
3640 
3641 	/* High-order pages or fallback path if "bulk" fails. */
3642 	while (nr_allocated < nr_pages) {
3643 		if (!(gfp & __GFP_NOFAIL) && fatal_signal_pending(current))
3644 			break;
3645 
3646 		if (nid == NUMA_NO_NODE)
3647 			page = alloc_pages_noprof(gfp, order);
3648 		else
3649 			page = alloc_pages_node_noprof(nid, gfp, order);
3650 
3651 		if (unlikely(!page))
3652 			break;
3653 
3654 		/*
3655 		 * High-order allocations must be able to be treated as
3656 		 * independent small pages by callers (as they can with
3657 		 * small-page vmallocs). Some drivers do their own refcounting
3658 		 * on vmalloc_to_page() pages, some use page->mapping,
3659 		 * page->lru, etc.
3660 		 */
3661 		if (order)
3662 			split_page(page, order);
3663 
3664 		/*
3665 		 * Careful, we allocate and map page-order pages, but
3666 		 * tracking is done per PAGE_SIZE page so as to keep the
3667 		 * vm_struct APIs independent of the physical/mapped size.
3668 		 */
3669 		for (i = 0; i < (1U << order); i++)
3670 			pages[nr_allocated + i] = page + i;
3671 
3672 		nr_allocated += 1U << order;
3673 	}
3674 
3675 	return nr_allocated;
3676 }
3677 
__vmalloc_area_node(struct vm_struct * area,gfp_t gfp_mask,pgprot_t prot,unsigned int page_shift,int node)3678 static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
3679 				 pgprot_t prot, unsigned int page_shift,
3680 				 int node)
3681 {
3682 	const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
3683 	bool nofail = gfp_mask & __GFP_NOFAIL;
3684 	unsigned long addr = (unsigned long)area->addr;
3685 	unsigned long size = get_vm_area_size(area);
3686 	unsigned long array_size;
3687 	unsigned int nr_small_pages = size >> PAGE_SHIFT;
3688 	unsigned int page_order;
3689 	unsigned int flags;
3690 	int ret;
3691 
3692 	array_size = (unsigned long)nr_small_pages * sizeof(struct page *);
3693 
3694 	if (!(gfp_mask & (GFP_DMA | GFP_DMA32)))
3695 		gfp_mask |= __GFP_HIGHMEM;
3696 
3697 	/* Please note that the recursion is strictly bounded. */
3698 	if (array_size > PAGE_SIZE) {
3699 		area->pages = __vmalloc_node_noprof(array_size, 1, nested_gfp, node,
3700 					area->caller);
3701 	} else {
3702 		area->pages = kmalloc_node_noprof(array_size, nested_gfp, node);
3703 	}
3704 
3705 	if (!area->pages) {
3706 		warn_alloc(gfp_mask, NULL,
3707 			"vmalloc error: size %lu, failed to allocated page array size %lu",
3708 			nr_small_pages * PAGE_SIZE, array_size);
3709 		free_vm_area(area);
3710 		return NULL;
3711 	}
3712 
3713 	set_vm_area_page_order(area, page_shift - PAGE_SHIFT);
3714 	page_order = vm_area_page_order(area);
3715 
3716 	/*
3717 	 * High-order nofail allocations are really expensive and
3718 	 * potentially dangerous (pre-mature OOM, disruptive reclaim
3719 	 * and compaction etc.
3720 	 *
3721 	 * Please note, the __vmalloc_node_range_noprof() falls-back
3722 	 * to order-0 pages if high-order attempt is unsuccessful.
3723 	 */
3724 	area->nr_pages = vm_area_alloc_pages((page_order ?
3725 		gfp_mask & ~__GFP_NOFAIL : gfp_mask) | __GFP_NOWARN,
3726 		node, page_order, nr_small_pages, area->pages);
3727 
3728 	atomic_long_add(area->nr_pages, &nr_vmalloc_pages);
3729 	/* All pages of vm should be charged to same memcg, so use first one. */
3730 	if (gfp_mask & __GFP_ACCOUNT && area->nr_pages)
3731 		mod_memcg_page_state(area->pages[0], MEMCG_VMALLOC,
3732 				     area->nr_pages);
3733 
3734 	/*
3735 	 * If not enough pages were obtained to accomplish an
3736 	 * allocation request, free them via vfree() if any.
3737 	 */
3738 	if (area->nr_pages != nr_small_pages) {
3739 		/*
3740 		 * vm_area_alloc_pages() can fail due to insufficient memory but
3741 		 * also:-
3742 		 *
3743 		 * - a pending fatal signal
3744 		 * - insufficient huge page-order pages
3745 		 *
3746 		 * Since we always retry allocations at order-0 in the huge page
3747 		 * case a warning for either is spurious.
3748 		 */
3749 		if (!fatal_signal_pending(current) && page_order == 0)
3750 			warn_alloc(gfp_mask, NULL,
3751 				"vmalloc error: size %lu, failed to allocate pages",
3752 				area->nr_pages * PAGE_SIZE);
3753 		goto fail;
3754 	}
3755 
3756 	/*
3757 	 * page tables allocations ignore external gfp mask, enforce it
3758 	 * by the scope API
3759 	 */
3760 	if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO)
3761 		flags = memalloc_nofs_save();
3762 	else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0)
3763 		flags = memalloc_noio_save();
3764 
3765 	do {
3766 		ret = vmap_pages_range(addr, addr + size, prot, area->pages,
3767 			page_shift);
3768 		if (nofail && (ret < 0))
3769 			schedule_timeout_uninterruptible(1);
3770 	} while (nofail && (ret < 0));
3771 
3772 	if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO)
3773 		memalloc_nofs_restore(flags);
3774 	else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0)
3775 		memalloc_noio_restore(flags);
3776 
3777 	if (ret < 0) {
3778 		warn_alloc(gfp_mask, NULL,
3779 			"vmalloc error: size %lu, failed to map pages",
3780 			area->nr_pages * PAGE_SIZE);
3781 		goto fail;
3782 	}
3783 
3784 	return area->addr;
3785 
3786 fail:
3787 	vfree(area->addr);
3788 	return NULL;
3789 }
3790 
3791 /**
3792  * __vmalloc_node_range - allocate virtually contiguous memory
3793  * @size:		  allocation size
3794  * @align:		  desired alignment
3795  * @start:		  vm area range start
3796  * @end:		  vm area range end
3797  * @gfp_mask:		  flags for the page level allocator
3798  * @prot:		  protection mask for the allocated pages
3799  * @vm_flags:		  additional vm area flags (e.g. %VM_NO_GUARD)
3800  * @node:		  node to use for allocation or NUMA_NO_NODE
3801  * @caller:		  caller's return address
3802  *
3803  * Allocate enough pages to cover @size from the page level
3804  * allocator with @gfp_mask flags. Please note that the full set of gfp
3805  * flags are not supported. GFP_KERNEL, GFP_NOFS and GFP_NOIO are all
3806  * supported.
3807  * Zone modifiers are not supported. From the reclaim modifiers
3808  * __GFP_DIRECT_RECLAIM is required (aka GFP_NOWAIT is not supported)
3809  * and only __GFP_NOFAIL is supported (i.e. __GFP_NORETRY and
3810  * __GFP_RETRY_MAYFAIL are not supported).
3811  *
3812  * __GFP_NOWARN can be used to suppress failures messages.
3813  *
3814  * Map them into contiguous kernel virtual space, using a pagetable
3815  * protection of @prot.
3816  *
3817  * Return: the address of the area or %NULL on failure
3818  */
__vmalloc_node_range_noprof(unsigned long size,unsigned long align,unsigned long start,unsigned long end,gfp_t gfp_mask,pgprot_t prot,unsigned long vm_flags,int node,const void * caller)3819 void *__vmalloc_node_range_noprof(unsigned long size, unsigned long align,
3820 			unsigned long start, unsigned long end, gfp_t gfp_mask,
3821 			pgprot_t prot, unsigned long vm_flags, int node,
3822 			const void *caller)
3823 {
3824 	struct vm_struct *area;
3825 	void *ret;
3826 	kasan_vmalloc_flags_t kasan_flags = KASAN_VMALLOC_NONE;
3827 	unsigned long original_align = align;
3828 	unsigned int shift = PAGE_SHIFT;
3829 
3830 	if (WARN_ON_ONCE(!size))
3831 		return NULL;
3832 
3833 	if ((size >> PAGE_SHIFT) > totalram_pages()) {
3834 		warn_alloc(gfp_mask, NULL,
3835 			"vmalloc error: size %lu, exceeds total pages",
3836 			size);
3837 		return NULL;
3838 	}
3839 
3840 	if (vmap_allow_huge && (vm_flags & VM_ALLOW_HUGE_VMAP)) {
3841 		/*
3842 		 * Try huge pages. Only try for PAGE_KERNEL allocations,
3843 		 * others like modules don't yet expect huge pages in
3844 		 * their allocations due to apply_to_page_range not
3845 		 * supporting them.
3846 		 */
3847 
3848 		if (arch_vmap_pmd_supported(prot) && size >= PMD_SIZE)
3849 			shift = PMD_SHIFT;
3850 		else
3851 			shift = arch_vmap_pte_supported_shift(size);
3852 
3853 		align = max(original_align, 1UL << shift);
3854 	}
3855 
3856 again:
3857 	area = __get_vm_area_node(size, align, shift, VM_ALLOC |
3858 				  VM_UNINITIALIZED | vm_flags, start, end, node,
3859 				  gfp_mask, caller);
3860 	if (!area) {
3861 		bool nofail = gfp_mask & __GFP_NOFAIL;
3862 		warn_alloc(gfp_mask, NULL,
3863 			"vmalloc error: size %lu, vm_struct allocation failed%s",
3864 			size, (nofail) ? ". Retrying." : "");
3865 		if (nofail) {
3866 			schedule_timeout_uninterruptible(1);
3867 			goto again;
3868 		}
3869 		goto fail;
3870 	}
3871 
3872 	/*
3873 	 * Prepare arguments for __vmalloc_area_node() and
3874 	 * kasan_unpoison_vmalloc().
3875 	 */
3876 	if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL)) {
3877 		if (kasan_hw_tags_enabled()) {
3878 			/*
3879 			 * Modify protection bits to allow tagging.
3880 			 * This must be done before mapping.
3881 			 */
3882 			prot = arch_vmap_pgprot_tagged(prot);
3883 
3884 			/*
3885 			 * Skip page_alloc poisoning and zeroing for physical
3886 			 * pages backing VM_ALLOC mapping. Memory is instead
3887 			 * poisoned and zeroed by kasan_unpoison_vmalloc().
3888 			 */
3889 			gfp_mask |= __GFP_SKIP_KASAN | __GFP_SKIP_ZERO;
3890 		}
3891 
3892 		/* Take note that the mapping is PAGE_KERNEL. */
3893 		kasan_flags |= KASAN_VMALLOC_PROT_NORMAL;
3894 	}
3895 
3896 	/* Allocate physical pages and map them into vmalloc space. */
3897 	ret = __vmalloc_area_node(area, gfp_mask, prot, shift, node);
3898 	if (!ret)
3899 		goto fail;
3900 
3901 	/*
3902 	 * Mark the pages as accessible, now that they are mapped.
3903 	 * The condition for setting KASAN_VMALLOC_INIT should complement the
3904 	 * one in post_alloc_hook() with regards to the __GFP_SKIP_ZERO check
3905 	 * to make sure that memory is initialized under the same conditions.
3906 	 * Tag-based KASAN modes only assign tags to normal non-executable
3907 	 * allocations, see __kasan_unpoison_vmalloc().
3908 	 */
3909 	kasan_flags |= KASAN_VMALLOC_VM_ALLOC;
3910 	if (!want_init_on_free() && want_init_on_alloc(gfp_mask) &&
3911 	    (gfp_mask & __GFP_SKIP_ZERO))
3912 		kasan_flags |= KASAN_VMALLOC_INIT;
3913 	/* KASAN_VMALLOC_PROT_NORMAL already set if required. */
3914 	area->addr = kasan_unpoison_vmalloc(area->addr, size, kasan_flags);
3915 
3916 	/*
3917 	 * In this function, newly allocated vm_struct has VM_UNINITIALIZED
3918 	 * flag. It means that vm_struct is not fully initialized.
3919 	 * Now, it is fully initialized, so remove this flag here.
3920 	 */
3921 	clear_vm_uninitialized_flag(area);
3922 
3923 	if (!(vm_flags & VM_DEFER_KMEMLEAK))
3924 		kmemleak_vmalloc(area, PAGE_ALIGN(size), gfp_mask);
3925 
3926 	return area->addr;
3927 
3928 fail:
3929 	if (shift > PAGE_SHIFT) {
3930 		shift = PAGE_SHIFT;
3931 		align = original_align;
3932 		goto again;
3933 	}
3934 
3935 	return NULL;
3936 }
3937 
3938 /**
3939  * __vmalloc_node - allocate virtually contiguous memory
3940  * @size:	    allocation size
3941  * @align:	    desired alignment
3942  * @gfp_mask:	    flags for the page level allocator
3943  * @node:	    node to use for allocation or NUMA_NO_NODE
3944  * @caller:	    caller's return address
3945  *
3946  * Allocate enough pages to cover @size from the page level allocator with
3947  * @gfp_mask flags.  Map them into contiguous kernel virtual space.
3948  *
3949  * Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_RETRY_MAYFAIL
3950  * and __GFP_NOFAIL are not supported
3951  *
3952  * Any use of gfp flags outside of GFP_KERNEL should be consulted
3953  * with mm people.
3954  *
3955  * Return: pointer to the allocated memory or %NULL on error
3956  */
__vmalloc_node_noprof(unsigned long size,unsigned long align,gfp_t gfp_mask,int node,const void * caller)3957 void *__vmalloc_node_noprof(unsigned long size, unsigned long align,
3958 			    gfp_t gfp_mask, int node, const void *caller)
3959 {
3960 	return __vmalloc_node_range_noprof(size, align, VMALLOC_START, VMALLOC_END,
3961 				gfp_mask, PAGE_KERNEL, 0, node, caller);
3962 }
3963 /*
3964  * This is only for performance analysis of vmalloc and stress purpose.
3965  * It is required by vmalloc test module, therefore do not use it other
3966  * than that.
3967  */
3968 #ifdef CONFIG_TEST_VMALLOC_MODULE
3969 EXPORT_SYMBOL_GPL(__vmalloc_node_noprof);
3970 #endif
3971 
__vmalloc_noprof(unsigned long size,gfp_t gfp_mask)3972 void *__vmalloc_noprof(unsigned long size, gfp_t gfp_mask)
3973 {
3974 	return __vmalloc_node_noprof(size, 1, gfp_mask, NUMA_NO_NODE,
3975 				__builtin_return_address(0));
3976 }
3977 EXPORT_SYMBOL(__vmalloc_noprof);
3978 
3979 /**
3980  * vmalloc - allocate virtually contiguous memory
3981  * @size:    allocation size
3982  *
3983  * Allocate enough pages to cover @size from the page level
3984  * allocator and map them into contiguous kernel virtual space.
3985  *
3986  * For tight control over page level allocator and protection flags
3987  * use __vmalloc() instead.
3988  *
3989  * Return: pointer to the allocated memory or %NULL on error
3990  */
vmalloc_noprof(unsigned long size)3991 void *vmalloc_noprof(unsigned long size)
3992 {
3993 	return __vmalloc_node_noprof(size, 1, GFP_KERNEL, NUMA_NO_NODE,
3994 				__builtin_return_address(0));
3995 }
3996 EXPORT_SYMBOL(vmalloc_noprof);
3997 
3998 /**
3999  * vmalloc_huge_node - allocate virtually contiguous memory, allow huge pages
4000  * @size:      allocation size
4001  * @gfp_mask:  flags for the page level allocator
4002  * @node:	    node to use for allocation or NUMA_NO_NODE
4003  *
4004  * Allocate enough pages to cover @size from the page level
4005  * allocator and map them into contiguous kernel virtual space.
4006  * If @size is greater than or equal to PMD_SIZE, allow using
4007  * huge pages for the memory
4008  *
4009  * Return: pointer to the allocated memory or %NULL on error
4010  */
vmalloc_huge_node_noprof(unsigned long size,gfp_t gfp_mask,int node)4011 void *vmalloc_huge_node_noprof(unsigned long size, gfp_t gfp_mask, int node)
4012 {
4013 	return __vmalloc_node_range_noprof(size, 1, VMALLOC_START, VMALLOC_END,
4014 					   gfp_mask, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP,
4015 					   node, __builtin_return_address(0));
4016 }
4017 EXPORT_SYMBOL_GPL(vmalloc_huge_node_noprof);
4018 
4019 /**
4020  * vzalloc - allocate virtually contiguous memory with zero fill
4021  * @size:    allocation size
4022  *
4023  * Allocate enough pages to cover @size from the page level
4024  * allocator and map them into contiguous kernel virtual space.
4025  * The memory allocated is set to zero.
4026  *
4027  * For tight control over page level allocator and protection flags
4028  * use __vmalloc() instead.
4029  *
4030  * Return: pointer to the allocated memory or %NULL on error
4031  */
vzalloc_noprof(unsigned long size)4032 void *vzalloc_noprof(unsigned long size)
4033 {
4034 	return __vmalloc_node_noprof(size, 1, GFP_KERNEL | __GFP_ZERO, NUMA_NO_NODE,
4035 				__builtin_return_address(0));
4036 }
4037 EXPORT_SYMBOL(vzalloc_noprof);
4038 
4039 /**
4040  * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
4041  * @size: allocation size
4042  *
4043  * The resulting memory area is zeroed so it can be mapped to userspace
4044  * without leaking data.
4045  *
4046  * Return: pointer to the allocated memory or %NULL on error
4047  */
vmalloc_user_noprof(unsigned long size)4048 void *vmalloc_user_noprof(unsigned long size)
4049 {
4050 	return __vmalloc_node_range_noprof(size, SHMLBA,  VMALLOC_START, VMALLOC_END,
4051 				    GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL,
4052 				    VM_USERMAP, NUMA_NO_NODE,
4053 				    __builtin_return_address(0));
4054 }
4055 EXPORT_SYMBOL(vmalloc_user_noprof);
4056 
4057 /**
4058  * vmalloc_node - allocate memory on a specific node
4059  * @size:	  allocation size
4060  * @node:	  numa node
4061  *
4062  * Allocate enough pages to cover @size from the page level
4063  * allocator and map them into contiguous kernel virtual space.
4064  *
4065  * For tight control over page level allocator and protection flags
4066  * use __vmalloc() instead.
4067  *
4068  * Return: pointer to the allocated memory or %NULL on error
4069  */
vmalloc_node_noprof(unsigned long size,int node)4070 void *vmalloc_node_noprof(unsigned long size, int node)
4071 {
4072 	return __vmalloc_node_noprof(size, 1, GFP_KERNEL, node,
4073 			__builtin_return_address(0));
4074 }
4075 EXPORT_SYMBOL(vmalloc_node_noprof);
4076 
4077 /**
4078  * vzalloc_node - allocate memory on a specific node with zero fill
4079  * @size:	allocation size
4080  * @node:	numa node
4081  *
4082  * Allocate enough pages to cover @size from the page level
4083  * allocator and map them into contiguous kernel virtual space.
4084  * The memory allocated is set to zero.
4085  *
4086  * Return: pointer to the allocated memory or %NULL on error
4087  */
vzalloc_node_noprof(unsigned long size,int node)4088 void *vzalloc_node_noprof(unsigned long size, int node)
4089 {
4090 	return __vmalloc_node_noprof(size, 1, GFP_KERNEL | __GFP_ZERO, node,
4091 				__builtin_return_address(0));
4092 }
4093 EXPORT_SYMBOL(vzalloc_node_noprof);
4094 
4095 /**
4096  * vrealloc_node_align_noprof - reallocate virtually contiguous memory; contents
4097  * remain unchanged
4098  * @p: object to reallocate memory for
4099  * @size: the size to reallocate
4100  * @align: requested alignment
4101  * @flags: the flags for the page level allocator
4102  * @nid: node number of the target node
4103  *
4104  * If @p is %NULL, vrealloc_XXX() behaves exactly like vmalloc_XXX(). If @size
4105  * is 0 and @p is not a %NULL pointer, the object pointed to is freed.
4106  *
4107  * If the caller wants the new memory to be on specific node *only*,
4108  * __GFP_THISNODE flag should be set, otherwise the function will try to avoid
4109  * reallocation and possibly disregard the specified @nid.
4110  *
4111  * If __GFP_ZERO logic is requested, callers must ensure that, starting with the
4112  * initial memory allocation, every subsequent call to this API for the same
4113  * memory allocation is flagged with __GFP_ZERO. Otherwise, it is possible that
4114  * __GFP_ZERO is not fully honored by this API.
4115  *
4116  * Requesting an alignment that is bigger than the alignment of the existing
4117  * allocation will fail.
4118  *
4119  * In any case, the contents of the object pointed to are preserved up to the
4120  * lesser of the new and old sizes.
4121  *
4122  * This function must not be called concurrently with itself or vfree() for the
4123  * same memory allocation.
4124  *
4125  * Return: pointer to the allocated memory; %NULL if @size is zero or in case of
4126  *         failure
4127  */
vrealloc_node_align_noprof(const void * p,size_t size,unsigned long align,gfp_t flags,int nid)4128 void *vrealloc_node_align_noprof(const void *p, size_t size, unsigned long align,
4129 				 gfp_t flags, int nid)
4130 {
4131 	struct vm_struct *vm = NULL;
4132 	size_t alloced_size = 0;
4133 	size_t old_size = 0;
4134 	void *n;
4135 
4136 	if (!size) {
4137 		vfree(p);
4138 		return NULL;
4139 	}
4140 
4141 	if (p) {
4142 		vm = find_vm_area(p);
4143 		if (unlikely(!vm)) {
4144 			WARN(1, "Trying to vrealloc() nonexistent vm area (%p)\n", p);
4145 			return NULL;
4146 		}
4147 
4148 		alloced_size = get_vm_area_size(vm);
4149 		old_size = vm->requested_size;
4150 		if (WARN(alloced_size < old_size,
4151 			 "vrealloc() has mismatched area vs requested sizes (%p)\n", p))
4152 			return NULL;
4153 		if (WARN(!IS_ALIGNED((unsigned long)p, align),
4154 			 "will not reallocate with a bigger alignment (0x%lx)\n", align))
4155 			return NULL;
4156 		if (unlikely(flags & __GFP_THISNODE) && nid != NUMA_NO_NODE &&
4157 			     nid != page_to_nid(vmalloc_to_page(p)))
4158 			goto need_realloc;
4159 	}
4160 
4161 	/*
4162 	 * TODO: Shrink the vm_area, i.e. unmap and free unused pages. What
4163 	 * would be a good heuristic for when to shrink the vm_area?
4164 	 */
4165 	if (size <= old_size) {
4166 		/* Zero out "freed" memory, potentially for future realloc. */
4167 		if (want_init_on_free() || want_init_on_alloc(flags))
4168 			memset((void *)p + size, 0, old_size - size);
4169 		vm->requested_size = size;
4170 		kasan_poison_vmalloc(p + size, old_size - size);
4171 		return (void *)p;
4172 	}
4173 
4174 	/*
4175 	 * We already have the bytes available in the allocation; use them.
4176 	 */
4177 	if (size <= alloced_size) {
4178 		kasan_unpoison_vmalloc(p + old_size, size - old_size,
4179 				       KASAN_VMALLOC_PROT_NORMAL);
4180 		/*
4181 		 * No need to zero memory here, as unused memory will have
4182 		 * already been zeroed at initial allocation time or during
4183 		 * realloc shrink time.
4184 		 */
4185 		vm->requested_size = size;
4186 		return (void *)p;
4187 	}
4188 
4189 need_realloc:
4190 	/* TODO: Grow the vm_area, i.e. allocate and map additional pages. */
4191 	n = __vmalloc_node_noprof(size, align, flags, nid, __builtin_return_address(0));
4192 
4193 	if (!n)
4194 		return NULL;
4195 
4196 	if (p) {
4197 		memcpy(n, p, old_size);
4198 		vfree(p);
4199 	}
4200 
4201 	return n;
4202 }
4203 
4204 #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
4205 #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
4206 #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
4207 #define GFP_VMALLOC32 (GFP_DMA | GFP_KERNEL)
4208 #else
4209 /*
4210  * 64b systems should always have either DMA or DMA32 zones. For others
4211  * GFP_DMA32 should do the right thing and use the normal zone.
4212  */
4213 #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
4214 #endif
4215 
4216 /**
4217  * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
4218  * @size:	allocation size
4219  *
4220  * Allocate enough 32bit PA addressable pages to cover @size from the
4221  * page level allocator and map them into contiguous kernel virtual space.
4222  *
4223  * Return: pointer to the allocated memory or %NULL on error
4224  */
vmalloc_32_noprof(unsigned long size)4225 void *vmalloc_32_noprof(unsigned long size)
4226 {
4227 	return __vmalloc_node_noprof(size, 1, GFP_VMALLOC32, NUMA_NO_NODE,
4228 			__builtin_return_address(0));
4229 }
4230 EXPORT_SYMBOL(vmalloc_32_noprof);
4231 
4232 /**
4233  * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
4234  * @size:	     allocation size
4235  *
4236  * The resulting memory area is 32bit addressable and zeroed so it can be
4237  * mapped to userspace without leaking data.
4238  *
4239  * Return: pointer to the allocated memory or %NULL on error
4240  */
vmalloc_32_user_noprof(unsigned long size)4241 void *vmalloc_32_user_noprof(unsigned long size)
4242 {
4243 	return __vmalloc_node_range_noprof(size, SHMLBA,  VMALLOC_START, VMALLOC_END,
4244 				    GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
4245 				    VM_USERMAP, NUMA_NO_NODE,
4246 				    __builtin_return_address(0));
4247 }
4248 EXPORT_SYMBOL(vmalloc_32_user_noprof);
4249 
4250 /*
4251  * Atomically zero bytes in the iterator.
4252  *
4253  * Returns the number of zeroed bytes.
4254  */
zero_iter(struct iov_iter * iter,size_t count)4255 static size_t zero_iter(struct iov_iter *iter, size_t count)
4256 {
4257 	size_t remains = count;
4258 
4259 	while (remains > 0) {
4260 		size_t num, copied;
4261 
4262 		num = min_t(size_t, remains, PAGE_SIZE);
4263 		copied = copy_page_to_iter_nofault(ZERO_PAGE(0), 0, num, iter);
4264 		remains -= copied;
4265 
4266 		if (copied < num)
4267 			break;
4268 	}
4269 
4270 	return count - remains;
4271 }
4272 
4273 /*
4274  * small helper routine, copy contents to iter from addr.
4275  * If the page is not present, fill zero.
4276  *
4277  * Returns the number of copied bytes.
4278  */
aligned_vread_iter(struct iov_iter * iter,const char * addr,size_t count)4279 static size_t aligned_vread_iter(struct iov_iter *iter,
4280 				 const char *addr, size_t count)
4281 {
4282 	size_t remains = count;
4283 	struct page *page;
4284 
4285 	while (remains > 0) {
4286 		unsigned long offset, length;
4287 		size_t copied = 0;
4288 
4289 		offset = offset_in_page(addr);
4290 		length = PAGE_SIZE - offset;
4291 		if (length > remains)
4292 			length = remains;
4293 		page = vmalloc_to_page(addr);
4294 		/*
4295 		 * To do safe access to this _mapped_ area, we need lock. But
4296 		 * adding lock here means that we need to add overhead of
4297 		 * vmalloc()/vfree() calls for this _debug_ interface, rarely
4298 		 * used. Instead of that, we'll use an local mapping via
4299 		 * copy_page_to_iter_nofault() and accept a small overhead in
4300 		 * this access function.
4301 		 */
4302 		if (page)
4303 			copied = copy_page_to_iter_nofault(page, offset,
4304 							   length, iter);
4305 		else
4306 			copied = zero_iter(iter, length);
4307 
4308 		addr += copied;
4309 		remains -= copied;
4310 
4311 		if (copied != length)
4312 			break;
4313 	}
4314 
4315 	return count - remains;
4316 }
4317 
4318 /*
4319  * Read from a vm_map_ram region of memory.
4320  *
4321  * Returns the number of copied bytes.
4322  */
vmap_ram_vread_iter(struct iov_iter * iter,const char * addr,size_t count,unsigned long flags)4323 static size_t vmap_ram_vread_iter(struct iov_iter *iter, const char *addr,
4324 				  size_t count, unsigned long flags)
4325 {
4326 	char *start;
4327 	struct vmap_block *vb;
4328 	struct xarray *xa;
4329 	unsigned long offset;
4330 	unsigned int rs, re;
4331 	size_t remains, n;
4332 
4333 	/*
4334 	 * If it's area created by vm_map_ram() interface directly, but
4335 	 * not further subdividing and delegating management to vmap_block,
4336 	 * handle it here.
4337 	 */
4338 	if (!(flags & VMAP_BLOCK))
4339 		return aligned_vread_iter(iter, addr, count);
4340 
4341 	remains = count;
4342 
4343 	/*
4344 	 * Area is split into regions and tracked with vmap_block, read out
4345 	 * each region and zero fill the hole between regions.
4346 	 */
4347 	xa = addr_to_vb_xa((unsigned long) addr);
4348 	vb = xa_load(xa, addr_to_vb_idx((unsigned long)addr));
4349 	if (!vb)
4350 		goto finished_zero;
4351 
4352 	spin_lock(&vb->lock);
4353 	if (bitmap_empty(vb->used_map, VMAP_BBMAP_BITS)) {
4354 		spin_unlock(&vb->lock);
4355 		goto finished_zero;
4356 	}
4357 
4358 	for_each_set_bitrange(rs, re, vb->used_map, VMAP_BBMAP_BITS) {
4359 		size_t copied;
4360 
4361 		if (remains == 0)
4362 			goto finished;
4363 
4364 		start = vmap_block_vaddr(vb->va->va_start, rs);
4365 
4366 		if (addr < start) {
4367 			size_t to_zero = min_t(size_t, start - addr, remains);
4368 			size_t zeroed = zero_iter(iter, to_zero);
4369 
4370 			addr += zeroed;
4371 			remains -= zeroed;
4372 
4373 			if (remains == 0 || zeroed != to_zero)
4374 				goto finished;
4375 		}
4376 
4377 		/*it could start reading from the middle of used region*/
4378 		offset = offset_in_page(addr);
4379 		n = ((re - rs + 1) << PAGE_SHIFT) - offset;
4380 		if (n > remains)
4381 			n = remains;
4382 
4383 		copied = aligned_vread_iter(iter, start + offset, n);
4384 
4385 		addr += copied;
4386 		remains -= copied;
4387 
4388 		if (copied != n)
4389 			goto finished;
4390 	}
4391 
4392 	spin_unlock(&vb->lock);
4393 
4394 finished_zero:
4395 	/* zero-fill the left dirty or free regions */
4396 	return count - remains + zero_iter(iter, remains);
4397 finished:
4398 	/* We couldn't copy/zero everything */
4399 	spin_unlock(&vb->lock);
4400 	return count - remains;
4401 }
4402 
4403 /**
4404  * vread_iter() - read vmalloc area in a safe way to an iterator.
4405  * @iter:         the iterator to which data should be written.
4406  * @addr:         vm address.
4407  * @count:        number of bytes to be read.
4408  *
4409  * This function checks that addr is a valid vmalloc'ed area, and
4410  * copy data from that area to a given buffer. If the given memory range
4411  * of [addr...addr+count) includes some valid address, data is copied to
4412  * proper area of @buf. If there are memory holes, they'll be zero-filled.
4413  * IOREMAP area is treated as memory hole and no copy is done.
4414  *
4415  * If [addr...addr+count) doesn't includes any intersects with alive
4416  * vm_struct area, returns 0. @buf should be kernel's buffer.
4417  *
4418  * Note: In usual ops, vread() is never necessary because the caller
4419  * should know vmalloc() area is valid and can use memcpy().
4420  * This is for routines which have to access vmalloc area without
4421  * any information, as /proc/kcore.
4422  *
4423  * Return: number of bytes for which addr and buf should be increased
4424  * (same number as @count) or %0 if [addr...addr+count) doesn't
4425  * include any intersection with valid vmalloc area
4426  */
vread_iter(struct iov_iter * iter,const char * addr,size_t count)4427 long vread_iter(struct iov_iter *iter, const char *addr, size_t count)
4428 {
4429 	struct vmap_node *vn;
4430 	struct vmap_area *va;
4431 	struct vm_struct *vm;
4432 	char *vaddr;
4433 	size_t n, size, flags, remains;
4434 	unsigned long next;
4435 
4436 	addr = kasan_reset_tag(addr);
4437 
4438 	/* Don't allow overflow */
4439 	if ((unsigned long) addr + count < count)
4440 		count = -(unsigned long) addr;
4441 
4442 	remains = count;
4443 
4444 	vn = find_vmap_area_exceed_addr_lock((unsigned long) addr, &va);
4445 	if (!vn)
4446 		goto finished_zero;
4447 
4448 	/* no intersects with alive vmap_area */
4449 	if ((unsigned long)addr + remains <= va->va_start)
4450 		goto finished_zero;
4451 
4452 	do {
4453 		size_t copied;
4454 
4455 		if (remains == 0)
4456 			goto finished;
4457 
4458 		vm = va->vm;
4459 		flags = va->flags & VMAP_FLAGS_MASK;
4460 		/*
4461 		 * VMAP_BLOCK indicates a sub-type of vm_map_ram area, need
4462 		 * be set together with VMAP_RAM.
4463 		 */
4464 		WARN_ON(flags == VMAP_BLOCK);
4465 
4466 		if (!vm && !flags)
4467 			goto next_va;
4468 
4469 		if (vm && (vm->flags & VM_UNINITIALIZED))
4470 			goto next_va;
4471 
4472 		/* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
4473 		smp_rmb();
4474 
4475 		vaddr = (char *) va->va_start;
4476 		size = vm ? get_vm_area_size(vm) : va_size(va);
4477 
4478 		if (addr >= vaddr + size)
4479 			goto next_va;
4480 
4481 		if (addr < vaddr) {
4482 			size_t to_zero = min_t(size_t, vaddr - addr, remains);
4483 			size_t zeroed = zero_iter(iter, to_zero);
4484 
4485 			addr += zeroed;
4486 			remains -= zeroed;
4487 
4488 			if (remains == 0 || zeroed != to_zero)
4489 				goto finished;
4490 		}
4491 
4492 		n = vaddr + size - addr;
4493 		if (n > remains)
4494 			n = remains;
4495 
4496 		if (flags & VMAP_RAM)
4497 			copied = vmap_ram_vread_iter(iter, addr, n, flags);
4498 		else if (!(vm && (vm->flags & (VM_IOREMAP | VM_SPARSE))))
4499 			copied = aligned_vread_iter(iter, addr, n);
4500 		else /* IOREMAP | SPARSE area is treated as memory hole */
4501 			copied = zero_iter(iter, n);
4502 
4503 		addr += copied;
4504 		remains -= copied;
4505 
4506 		if (copied != n)
4507 			goto finished;
4508 
4509 	next_va:
4510 		next = va->va_end;
4511 		spin_unlock(&vn->busy.lock);
4512 	} while ((vn = find_vmap_area_exceed_addr_lock(next, &va)));
4513 
4514 finished_zero:
4515 	if (vn)
4516 		spin_unlock(&vn->busy.lock);
4517 
4518 	/* zero-fill memory holes */
4519 	return count - remains + zero_iter(iter, remains);
4520 finished:
4521 	/* Nothing remains, or We couldn't copy/zero everything. */
4522 	if (vn)
4523 		spin_unlock(&vn->busy.lock);
4524 
4525 	return count - remains;
4526 }
4527 
4528 /**
4529  * remap_vmalloc_range_partial - map vmalloc pages to userspace
4530  * @vma:		vma to cover
4531  * @uaddr:		target user address to start at
4532  * @kaddr:		virtual address of vmalloc kernel memory
4533  * @pgoff:		offset from @kaddr to start at
4534  * @size:		size of map area
4535  *
4536  * Returns:	0 for success, -Exxx on failure
4537  *
4538  * This function checks that @kaddr is a valid vmalloc'ed area,
4539  * and that it is big enough to cover the range starting at
4540  * @uaddr in @vma. Will return failure if that criteria isn't
4541  * met.
4542  *
4543  * Similar to remap_pfn_range() (see mm/memory.c)
4544  */
remap_vmalloc_range_partial(struct vm_area_struct * vma,unsigned long uaddr,void * kaddr,unsigned long pgoff,unsigned long size)4545 int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
4546 				void *kaddr, unsigned long pgoff,
4547 				unsigned long size)
4548 {
4549 	struct vm_struct *area;
4550 	unsigned long off;
4551 	unsigned long end_index;
4552 
4553 	if (check_shl_overflow(pgoff, PAGE_SHIFT, &off))
4554 		return -EINVAL;
4555 
4556 	size = PAGE_ALIGN(size);
4557 
4558 	if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
4559 		return -EINVAL;
4560 
4561 	area = find_vm_area(kaddr);
4562 	if (!area)
4563 		return -EINVAL;
4564 
4565 	if (!(area->flags & (VM_USERMAP | VM_DMA_COHERENT)))
4566 		return -EINVAL;
4567 
4568 	if (check_add_overflow(size, off, &end_index) ||
4569 	    end_index > get_vm_area_size(area))
4570 		return -EINVAL;
4571 	kaddr += off;
4572 
4573 	do {
4574 		struct page *page = vmalloc_to_page(kaddr);
4575 		int ret;
4576 
4577 		ret = vm_insert_page(vma, uaddr, page);
4578 		if (ret)
4579 			return ret;
4580 
4581 		uaddr += PAGE_SIZE;
4582 		kaddr += PAGE_SIZE;
4583 		size -= PAGE_SIZE;
4584 	} while (size > 0);
4585 
4586 	vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
4587 
4588 	return 0;
4589 }
4590 
4591 /**
4592  * remap_vmalloc_range - map vmalloc pages to userspace
4593  * @vma:		vma to cover (map full range of vma)
4594  * @addr:		vmalloc memory
4595  * @pgoff:		number of pages into addr before first page to map
4596  *
4597  * Returns:	0 for success, -Exxx on failure
4598  *
4599  * This function checks that addr is a valid vmalloc'ed area, and
4600  * that it is big enough to cover the vma. Will return failure if
4601  * that criteria isn't met.
4602  *
4603  * Similar to remap_pfn_range() (see mm/memory.c)
4604  */
remap_vmalloc_range(struct vm_area_struct * vma,void * addr,unsigned long pgoff)4605 int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
4606 						unsigned long pgoff)
4607 {
4608 	return remap_vmalloc_range_partial(vma, vma->vm_start,
4609 					   addr, pgoff,
4610 					   vma->vm_end - vma->vm_start);
4611 }
4612 EXPORT_SYMBOL(remap_vmalloc_range);
4613 
free_vm_area(struct vm_struct * area)4614 void free_vm_area(struct vm_struct *area)
4615 {
4616 	struct vm_struct *ret;
4617 	ret = remove_vm_area(area->addr);
4618 	BUG_ON(ret != area);
4619 	kfree(area);
4620 }
4621 EXPORT_SYMBOL_GPL(free_vm_area);
4622 
4623 #ifdef CONFIG_SMP
node_to_va(struct rb_node * n)4624 static struct vmap_area *node_to_va(struct rb_node *n)
4625 {
4626 	return rb_entry_safe(n, struct vmap_area, rb_node);
4627 }
4628 
4629 /**
4630  * pvm_find_va_enclose_addr - find the vmap_area @addr belongs to
4631  * @addr: target address
4632  *
4633  * Returns: vmap_area if it is found. If there is no such area
4634  *   the first highest(reverse order) vmap_area is returned
4635  *   i.e. va->va_start < addr && va->va_end < addr or NULL
4636  *   if there are no any areas before @addr.
4637  */
4638 static struct vmap_area *
pvm_find_va_enclose_addr(unsigned long addr)4639 pvm_find_va_enclose_addr(unsigned long addr)
4640 {
4641 	struct vmap_area *va, *tmp;
4642 	struct rb_node *n;
4643 
4644 	n = free_vmap_area_root.rb_node;
4645 	va = NULL;
4646 
4647 	while (n) {
4648 		tmp = rb_entry(n, struct vmap_area, rb_node);
4649 		if (tmp->va_start <= addr) {
4650 			va = tmp;
4651 			if (tmp->va_end >= addr)
4652 				break;
4653 
4654 			n = n->rb_right;
4655 		} else {
4656 			n = n->rb_left;
4657 		}
4658 	}
4659 
4660 	return va;
4661 }
4662 
4663 /**
4664  * pvm_determine_end_from_reverse - find the highest aligned address
4665  * of free block below VMALLOC_END
4666  * @va:
4667  *   in - the VA we start the search(reverse order);
4668  *   out - the VA with the highest aligned end address.
4669  * @align: alignment for required highest address
4670  *
4671  * Returns: determined end address within vmap_area
4672  */
4673 static unsigned long
pvm_determine_end_from_reverse(struct vmap_area ** va,unsigned long align)4674 pvm_determine_end_from_reverse(struct vmap_area **va, unsigned long align)
4675 {
4676 	unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
4677 	unsigned long addr;
4678 
4679 	if (likely(*va)) {
4680 		list_for_each_entry_from_reverse((*va),
4681 				&free_vmap_area_list, list) {
4682 			addr = min((*va)->va_end & ~(align - 1), vmalloc_end);
4683 			if ((*va)->va_start < addr)
4684 				return addr;
4685 		}
4686 	}
4687 
4688 	return 0;
4689 }
4690 
4691 /**
4692  * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator
4693  * @offsets: array containing offset of each area
4694  * @sizes: array containing size of each area
4695  * @nr_vms: the number of areas to allocate
4696  * @align: alignment, all entries in @offsets and @sizes must be aligned to this
4697  *
4698  * Returns: kmalloc'd vm_struct pointer array pointing to allocated
4699  *	    vm_structs on success, %NULL on failure
4700  *
4701  * Percpu allocator wants to use congruent vm areas so that it can
4702  * maintain the offsets among percpu areas.  This function allocates
4703  * congruent vmalloc areas for it with GFP_KERNEL.  These areas tend to
4704  * be scattered pretty far, distance between two areas easily going up
4705  * to gigabytes.  To avoid interacting with regular vmallocs, these
4706  * areas are allocated from top.
4707  *
4708  * Despite its complicated look, this allocator is rather simple. It
4709  * does everything top-down and scans free blocks from the end looking
4710  * for matching base. While scanning, if any of the areas do not fit the
4711  * base address is pulled down to fit the area. Scanning is repeated till
4712  * all the areas fit and then all necessary data structures are inserted
4713  * and the result is returned.
4714  */
pcpu_get_vm_areas(const unsigned long * offsets,const size_t * sizes,int nr_vms,size_t align)4715 struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
4716 				     const size_t *sizes, int nr_vms,
4717 				     size_t align)
4718 {
4719 	const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align);
4720 	const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
4721 	struct vmap_area **vas, *va;
4722 	struct vm_struct **vms;
4723 	int area, area2, last_area, term_area;
4724 	unsigned long base, start, size, end, last_end, orig_start, orig_end;
4725 	bool purged = false;
4726 
4727 	/* verify parameters and allocate data structures */
4728 	BUG_ON(offset_in_page(align) || !is_power_of_2(align));
4729 	for (last_area = 0, area = 0; area < nr_vms; area++) {
4730 		start = offsets[area];
4731 		end = start + sizes[area];
4732 
4733 		/* is everything aligned properly? */
4734 		BUG_ON(!IS_ALIGNED(offsets[area], align));
4735 		BUG_ON(!IS_ALIGNED(sizes[area], align));
4736 
4737 		/* detect the area with the highest address */
4738 		if (start > offsets[last_area])
4739 			last_area = area;
4740 
4741 		for (area2 = area + 1; area2 < nr_vms; area2++) {
4742 			unsigned long start2 = offsets[area2];
4743 			unsigned long end2 = start2 + sizes[area2];
4744 
4745 			BUG_ON(start2 < end && start < end2);
4746 		}
4747 	}
4748 	last_end = offsets[last_area] + sizes[last_area];
4749 
4750 	if (vmalloc_end - vmalloc_start < last_end) {
4751 		WARN_ON(true);
4752 		return NULL;
4753 	}
4754 
4755 	vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL);
4756 	vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL);
4757 	if (!vas || !vms)
4758 		goto err_free2;
4759 
4760 	for (area = 0; area < nr_vms; area++) {
4761 		vas[area] = kmem_cache_zalloc(vmap_area_cachep, GFP_KERNEL);
4762 		vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL);
4763 		if (!vas[area] || !vms[area])
4764 			goto err_free;
4765 	}
4766 retry:
4767 	spin_lock(&free_vmap_area_lock);
4768 
4769 	/* start scanning - we scan from the top, begin with the last area */
4770 	area = term_area = last_area;
4771 	start = offsets[area];
4772 	end = start + sizes[area];
4773 
4774 	va = pvm_find_va_enclose_addr(vmalloc_end);
4775 	base = pvm_determine_end_from_reverse(&va, align) - end;
4776 
4777 	while (true) {
4778 		/*
4779 		 * base might have underflowed, add last_end before
4780 		 * comparing.
4781 		 */
4782 		if (base + last_end < vmalloc_start + last_end)
4783 			goto overflow;
4784 
4785 		/*
4786 		 * Fitting base has not been found.
4787 		 */
4788 		if (va == NULL)
4789 			goto overflow;
4790 
4791 		/*
4792 		 * If required width exceeds current VA block, move
4793 		 * base downwards and then recheck.
4794 		 */
4795 		if (base + end > va->va_end) {
4796 			base = pvm_determine_end_from_reverse(&va, align) - end;
4797 			term_area = area;
4798 			continue;
4799 		}
4800 
4801 		/*
4802 		 * If this VA does not fit, move base downwards and recheck.
4803 		 */
4804 		if (base + start < va->va_start) {
4805 			va = node_to_va(rb_prev(&va->rb_node));
4806 			base = pvm_determine_end_from_reverse(&va, align) - end;
4807 			term_area = area;
4808 			continue;
4809 		}
4810 
4811 		/*
4812 		 * This area fits, move on to the previous one.  If
4813 		 * the previous one is the terminal one, we're done.
4814 		 */
4815 		area = (area + nr_vms - 1) % nr_vms;
4816 		if (area == term_area)
4817 			break;
4818 
4819 		start = offsets[area];
4820 		end = start + sizes[area];
4821 		va = pvm_find_va_enclose_addr(base + end);
4822 	}
4823 
4824 	/* we've found a fitting base, insert all va's */
4825 	for (area = 0; area < nr_vms; area++) {
4826 		int ret;
4827 
4828 		start = base + offsets[area];
4829 		size = sizes[area];
4830 
4831 		va = pvm_find_va_enclose_addr(start);
4832 		if (WARN_ON_ONCE(va == NULL))
4833 			/* It is a BUG(), but trigger recovery instead. */
4834 			goto recovery;
4835 
4836 		ret = va_clip(&free_vmap_area_root,
4837 			&free_vmap_area_list, va, start, size);
4838 		if (WARN_ON_ONCE(unlikely(ret)))
4839 			/* It is a BUG(), but trigger recovery instead. */
4840 			goto recovery;
4841 
4842 		/* Allocated area. */
4843 		va = vas[area];
4844 		va->va_start = start;
4845 		va->va_end = start + size;
4846 	}
4847 
4848 	spin_unlock(&free_vmap_area_lock);
4849 
4850 	/* populate the kasan shadow space */
4851 	for (area = 0; area < nr_vms; area++) {
4852 		if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area], GFP_KERNEL))
4853 			goto err_free_shadow;
4854 	}
4855 
4856 	/* insert all vm's */
4857 	for (area = 0; area < nr_vms; area++) {
4858 		struct vmap_node *vn = addr_to_node(vas[area]->va_start);
4859 
4860 		spin_lock(&vn->busy.lock);
4861 		insert_vmap_area(vas[area], &vn->busy.root, &vn->busy.head);
4862 		setup_vmalloc_vm(vms[area], vas[area], VM_ALLOC,
4863 				 pcpu_get_vm_areas);
4864 		spin_unlock(&vn->busy.lock);
4865 	}
4866 
4867 	/*
4868 	 * Mark allocated areas as accessible. Do it now as a best-effort
4869 	 * approach, as they can be mapped outside of vmalloc code.
4870 	 * With hardware tag-based KASAN, marking is skipped for
4871 	 * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
4872 	 */
4873 	for (area = 0; area < nr_vms; area++)
4874 		vms[area]->addr = kasan_unpoison_vmalloc(vms[area]->addr,
4875 				vms[area]->size, KASAN_VMALLOC_PROT_NORMAL);
4876 
4877 	kfree(vas);
4878 	return vms;
4879 
4880 recovery:
4881 	/*
4882 	 * Remove previously allocated areas. There is no
4883 	 * need in removing these areas from the busy tree,
4884 	 * because they are inserted only on the final step
4885 	 * and when pcpu_get_vm_areas() is success.
4886 	 */
4887 	while (area--) {
4888 		orig_start = vas[area]->va_start;
4889 		orig_end = vas[area]->va_end;
4890 		va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root,
4891 				&free_vmap_area_list);
4892 		if (va)
4893 			kasan_release_vmalloc(orig_start, orig_end,
4894 				va->va_start, va->va_end,
4895 				KASAN_VMALLOC_PAGE_RANGE | KASAN_VMALLOC_TLB_FLUSH);
4896 		vas[area] = NULL;
4897 	}
4898 
4899 overflow:
4900 	spin_unlock(&free_vmap_area_lock);
4901 	if (!purged) {
4902 		reclaim_and_purge_vmap_areas();
4903 		purged = true;
4904 
4905 		/* Before "retry", check if we recover. */
4906 		for (area = 0; area < nr_vms; area++) {
4907 			if (vas[area])
4908 				continue;
4909 
4910 			vas[area] = kmem_cache_zalloc(
4911 				vmap_area_cachep, GFP_KERNEL);
4912 			if (!vas[area])
4913 				goto err_free;
4914 		}
4915 
4916 		goto retry;
4917 	}
4918 
4919 err_free:
4920 	for (area = 0; area < nr_vms; area++) {
4921 		if (vas[area])
4922 			kmem_cache_free(vmap_area_cachep, vas[area]);
4923 
4924 		kfree(vms[area]);
4925 	}
4926 err_free2:
4927 	kfree(vas);
4928 	kfree(vms);
4929 	return NULL;
4930 
4931 err_free_shadow:
4932 	spin_lock(&free_vmap_area_lock);
4933 	/*
4934 	 * We release all the vmalloc shadows, even the ones for regions that
4935 	 * hadn't been successfully added. This relies on kasan_release_vmalloc
4936 	 * being able to tolerate this case.
4937 	 */
4938 	for (area = 0; area < nr_vms; area++) {
4939 		orig_start = vas[area]->va_start;
4940 		orig_end = vas[area]->va_end;
4941 		va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root,
4942 				&free_vmap_area_list);
4943 		if (va)
4944 			kasan_release_vmalloc(orig_start, orig_end,
4945 				va->va_start, va->va_end,
4946 				KASAN_VMALLOC_PAGE_RANGE | KASAN_VMALLOC_TLB_FLUSH);
4947 		vas[area] = NULL;
4948 		kfree(vms[area]);
4949 	}
4950 	spin_unlock(&free_vmap_area_lock);
4951 	kfree(vas);
4952 	kfree(vms);
4953 	return NULL;
4954 }
4955 
4956 /**
4957  * pcpu_free_vm_areas - free vmalloc areas for percpu allocator
4958  * @vms: vm_struct pointer array returned by pcpu_get_vm_areas()
4959  * @nr_vms: the number of allocated areas
4960  *
4961  * Free vm_structs and the array allocated by pcpu_get_vm_areas().
4962  */
pcpu_free_vm_areas(struct vm_struct ** vms,int nr_vms)4963 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
4964 {
4965 	int i;
4966 
4967 	for (i = 0; i < nr_vms; i++)
4968 		free_vm_area(vms[i]);
4969 	kfree(vms);
4970 }
4971 #endif	/* CONFIG_SMP */
4972 
4973 #ifdef CONFIG_PRINTK
vmalloc_dump_obj(void * object)4974 bool vmalloc_dump_obj(void *object)
4975 {
4976 	const void *caller;
4977 	struct vm_struct *vm;
4978 	struct vmap_area *va;
4979 	struct vmap_node *vn;
4980 	unsigned long addr;
4981 	unsigned int nr_pages;
4982 
4983 	addr = PAGE_ALIGN((unsigned long) object);
4984 	vn = addr_to_node(addr);
4985 
4986 	if (!spin_trylock(&vn->busy.lock))
4987 		return false;
4988 
4989 	va = __find_vmap_area(addr, &vn->busy.root);
4990 	if (!va || !va->vm) {
4991 		spin_unlock(&vn->busy.lock);
4992 		return false;
4993 	}
4994 
4995 	vm = va->vm;
4996 	addr = (unsigned long) vm->addr;
4997 	caller = vm->caller;
4998 	nr_pages = vm->nr_pages;
4999 	spin_unlock(&vn->busy.lock);
5000 
5001 	pr_cont(" %u-page vmalloc region starting at %#lx allocated at %pS\n",
5002 		nr_pages, addr, caller);
5003 
5004 	return true;
5005 }
5006 #endif
5007 
5008 #ifdef CONFIG_PROC_FS
5009 
5010 /*
5011  * Print number of pages allocated on each memory node.
5012  *
5013  * This function can only be called if CONFIG_NUMA is enabled
5014  * and VM_UNINITIALIZED bit in v->flags is disabled.
5015  */
show_numa_info(struct seq_file * m,struct vm_struct * v,unsigned int * counters)5016 static void show_numa_info(struct seq_file *m, struct vm_struct *v,
5017 				 unsigned int *counters)
5018 {
5019 	unsigned int nr;
5020 	unsigned int step = 1U << vm_area_page_order(v);
5021 
5022 	if (!counters)
5023 		return;
5024 
5025 	memset(counters, 0, nr_node_ids * sizeof(unsigned int));
5026 
5027 	for (nr = 0; nr < v->nr_pages; nr += step)
5028 		counters[page_to_nid(v->pages[nr])] += step;
5029 	for_each_node_state(nr, N_HIGH_MEMORY)
5030 		if (counters[nr])
5031 			seq_printf(m, " N%u=%u", nr, counters[nr]);
5032 }
5033 
show_purge_info(struct seq_file * m)5034 static void show_purge_info(struct seq_file *m)
5035 {
5036 	struct vmap_node *vn;
5037 	struct vmap_area *va;
5038 
5039 	for_each_vmap_node(vn) {
5040 		spin_lock(&vn->lazy.lock);
5041 		list_for_each_entry(va, &vn->lazy.head, list) {
5042 			seq_printf(m, "0x%pK-0x%pK %7ld unpurged vm_area\n",
5043 				(void *)va->va_start, (void *)va->va_end,
5044 				va_size(va));
5045 		}
5046 		spin_unlock(&vn->lazy.lock);
5047 	}
5048 }
5049 
vmalloc_info_show(struct seq_file * m,void * p)5050 static int vmalloc_info_show(struct seq_file *m, void *p)
5051 {
5052 	struct vmap_node *vn;
5053 	struct vmap_area *va;
5054 	struct vm_struct *v;
5055 	unsigned int *counters;
5056 
5057 	if (IS_ENABLED(CONFIG_NUMA))
5058 		counters = kmalloc(nr_node_ids * sizeof(unsigned int), GFP_KERNEL);
5059 
5060 	for_each_vmap_node(vn) {
5061 		spin_lock(&vn->busy.lock);
5062 		list_for_each_entry(va, &vn->busy.head, list) {
5063 			if (!va->vm) {
5064 				if (va->flags & VMAP_RAM)
5065 					seq_printf(m, "0x%pK-0x%pK %7ld vm_map_ram\n",
5066 						(void *)va->va_start, (void *)va->va_end,
5067 						va_size(va));
5068 
5069 				continue;
5070 			}
5071 
5072 			v = va->vm;
5073 			if (v->flags & VM_UNINITIALIZED)
5074 				continue;
5075 
5076 			/* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
5077 			smp_rmb();
5078 
5079 			seq_printf(m, "0x%pK-0x%pK %7ld",
5080 				v->addr, v->addr + v->size, v->size);
5081 
5082 			if (v->caller)
5083 				seq_printf(m, " %pS", v->caller);
5084 
5085 			if (v->nr_pages)
5086 				seq_printf(m, " pages=%d", v->nr_pages);
5087 
5088 			if (v->phys_addr)
5089 				seq_printf(m, " phys=%pa", &v->phys_addr);
5090 
5091 			if (v->flags & VM_IOREMAP)
5092 				seq_puts(m, " ioremap");
5093 
5094 			if (v->flags & VM_SPARSE)
5095 				seq_puts(m, " sparse");
5096 
5097 			if (v->flags & VM_ALLOC)
5098 				seq_puts(m, " vmalloc");
5099 
5100 			if (v->flags & VM_MAP)
5101 				seq_puts(m, " vmap");
5102 
5103 			if (v->flags & VM_USERMAP)
5104 				seq_puts(m, " user");
5105 
5106 			if (v->flags & VM_DMA_COHERENT)
5107 				seq_puts(m, " dma-coherent");
5108 
5109 			if (is_vmalloc_addr(v->pages))
5110 				seq_puts(m, " vpages");
5111 
5112 			if (IS_ENABLED(CONFIG_NUMA))
5113 				show_numa_info(m, v, counters);
5114 
5115 			seq_putc(m, '\n');
5116 		}
5117 		spin_unlock(&vn->busy.lock);
5118 	}
5119 
5120 	/*
5121 	 * As a final step, dump "unpurged" areas.
5122 	 */
5123 	show_purge_info(m);
5124 	if (IS_ENABLED(CONFIG_NUMA))
5125 		kfree(counters);
5126 	return 0;
5127 }
5128 
proc_vmalloc_init(void)5129 static int __init proc_vmalloc_init(void)
5130 {
5131 	proc_create_single("vmallocinfo", 0400, NULL, vmalloc_info_show);
5132 	return 0;
5133 }
5134 module_init(proc_vmalloc_init);
5135 
5136 #endif
5137 
vmap_init_free_space(void)5138 static void __init vmap_init_free_space(void)
5139 {
5140 	unsigned long vmap_start = 1;
5141 	const unsigned long vmap_end = ULONG_MAX;
5142 	struct vmap_area *free;
5143 	struct vm_struct *busy;
5144 
5145 	/*
5146 	 *     B     F     B     B     B     F
5147 	 * -|-----|.....|-----|-----|-----|.....|-
5148 	 *  |           The KVA space           |
5149 	 *  |<--------------------------------->|
5150 	 */
5151 	for (busy = vmlist; busy; busy = busy->next) {
5152 		if ((unsigned long) busy->addr - vmap_start > 0) {
5153 			free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
5154 			if (!WARN_ON_ONCE(!free)) {
5155 				free->va_start = vmap_start;
5156 				free->va_end = (unsigned long) busy->addr;
5157 
5158 				insert_vmap_area_augment(free, NULL,
5159 					&free_vmap_area_root,
5160 						&free_vmap_area_list);
5161 			}
5162 		}
5163 
5164 		vmap_start = (unsigned long) busy->addr + busy->size;
5165 	}
5166 
5167 	if (vmap_end - vmap_start > 0) {
5168 		free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
5169 		if (!WARN_ON_ONCE(!free)) {
5170 			free->va_start = vmap_start;
5171 			free->va_end = vmap_end;
5172 
5173 			insert_vmap_area_augment(free, NULL,
5174 				&free_vmap_area_root,
5175 					&free_vmap_area_list);
5176 		}
5177 	}
5178 }
5179 
vmap_init_nodes(void)5180 static void vmap_init_nodes(void)
5181 {
5182 	struct vmap_node *vn;
5183 	int i;
5184 
5185 #if BITS_PER_LONG == 64
5186 	/*
5187 	 * A high threshold of max nodes is fixed and bound to 128,
5188 	 * thus a scale factor is 1 for systems where number of cores
5189 	 * are less or equal to specified threshold.
5190 	 *
5191 	 * As for NUMA-aware notes. For bigger systems, for example
5192 	 * NUMA with multi-sockets, where we can end-up with thousands
5193 	 * of cores in total, a "sub-numa-clustering" should be added.
5194 	 *
5195 	 * In this case a NUMA domain is considered as a single entity
5196 	 * with dedicated sub-nodes in it which describe one group or
5197 	 * set of cores. Therefore a per-domain purging is supposed to
5198 	 * be added as well as a per-domain balancing.
5199 	 */
5200 	int n = clamp_t(unsigned int, num_possible_cpus(), 1, 128);
5201 
5202 	if (n > 1) {
5203 		vn = kmalloc_array(n, sizeof(*vn), GFP_NOWAIT);
5204 		if (vn) {
5205 			/* Node partition is 16 pages. */
5206 			vmap_zone_size = (1 << 4) * PAGE_SIZE;
5207 			nr_vmap_nodes = n;
5208 			vmap_nodes = vn;
5209 		} else {
5210 			pr_err("Failed to allocate an array. Disable a node layer\n");
5211 		}
5212 	}
5213 #endif
5214 
5215 	for_each_vmap_node(vn) {
5216 		vn->busy.root = RB_ROOT;
5217 		INIT_LIST_HEAD(&vn->busy.head);
5218 		spin_lock_init(&vn->busy.lock);
5219 
5220 		vn->lazy.root = RB_ROOT;
5221 		INIT_LIST_HEAD(&vn->lazy.head);
5222 		spin_lock_init(&vn->lazy.lock);
5223 
5224 		for (i = 0; i < MAX_VA_SIZE_PAGES; i++) {
5225 			INIT_LIST_HEAD(&vn->pool[i].head);
5226 			WRITE_ONCE(vn->pool[i].len, 0);
5227 		}
5228 
5229 		spin_lock_init(&vn->pool_lock);
5230 	}
5231 }
5232 
5233 static unsigned long
vmap_node_shrink_count(struct shrinker * shrink,struct shrink_control * sc)5234 vmap_node_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
5235 {
5236 	unsigned long count = 0;
5237 	struct vmap_node *vn;
5238 	int i;
5239 
5240 	for_each_vmap_node(vn) {
5241 		for (i = 0; i < MAX_VA_SIZE_PAGES; i++)
5242 			count += READ_ONCE(vn->pool[i].len);
5243 	}
5244 
5245 	return count ? count : SHRINK_EMPTY;
5246 }
5247 
5248 static unsigned long
vmap_node_shrink_scan(struct shrinker * shrink,struct shrink_control * sc)5249 vmap_node_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
5250 {
5251 	struct vmap_node *vn;
5252 
5253 	for_each_vmap_node(vn)
5254 		decay_va_pool_node(vn, true);
5255 
5256 	return SHRINK_STOP;
5257 }
5258 
vmalloc_init(void)5259 void __init vmalloc_init(void)
5260 {
5261 	struct shrinker *vmap_node_shrinker;
5262 	struct vmap_area *va;
5263 	struct vmap_node *vn;
5264 	struct vm_struct *tmp;
5265 	int i;
5266 
5267 	/*
5268 	 * Create the cache for vmap_area objects.
5269 	 */
5270 	vmap_area_cachep = KMEM_CACHE(vmap_area, SLAB_PANIC);
5271 
5272 	for_each_possible_cpu(i) {
5273 		struct vmap_block_queue *vbq;
5274 		struct vfree_deferred *p;
5275 
5276 		vbq = &per_cpu(vmap_block_queue, i);
5277 		spin_lock_init(&vbq->lock);
5278 		INIT_LIST_HEAD(&vbq->free);
5279 		p = &per_cpu(vfree_deferred, i);
5280 		init_llist_head(&p->list);
5281 		INIT_WORK(&p->wq, delayed_vfree_work);
5282 		xa_init(&vbq->vmap_blocks);
5283 	}
5284 
5285 	/*
5286 	 * Setup nodes before importing vmlist.
5287 	 */
5288 	vmap_init_nodes();
5289 
5290 	/* Import existing vmlist entries. */
5291 	for (tmp = vmlist; tmp; tmp = tmp->next) {
5292 		va = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
5293 		if (WARN_ON_ONCE(!va))
5294 			continue;
5295 
5296 		va->va_start = (unsigned long)tmp->addr;
5297 		va->va_end = va->va_start + tmp->size;
5298 		va->vm = tmp;
5299 
5300 		vn = addr_to_node(va->va_start);
5301 		insert_vmap_area(va, &vn->busy.root, &vn->busy.head);
5302 	}
5303 
5304 	/*
5305 	 * Now we can initialize a free vmap space.
5306 	 */
5307 	vmap_init_free_space();
5308 	vmap_initialized = true;
5309 
5310 	vmap_node_shrinker = shrinker_alloc(0, "vmap-node");
5311 	if (!vmap_node_shrinker) {
5312 		pr_err("Failed to allocate vmap-node shrinker!\n");
5313 		return;
5314 	}
5315 
5316 	vmap_node_shrinker->count_objects = vmap_node_shrink_count;
5317 	vmap_node_shrinker->scan_objects = vmap_node_shrink_scan;
5318 	shrinker_register(vmap_node_shrinker);
5319 }
5320