xref: /linux/mm/vmalloc.c (revision ae22a94997b8a03dcb3c922857c203246711f9d4)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  Copyright (C) 1993  Linus Torvalds
4  *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
5  *  SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
6  *  Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
7  *  Numa awareness, Christoph Lameter, SGI, June 2005
8  *  Improving global KVA allocator, Uladzislau Rezki, Sony, May 2019
9  */
10 
11 #include <linux/vmalloc.h>
12 #include <linux/mm.h>
13 #include <linux/module.h>
14 #include <linux/highmem.h>
15 #include <linux/sched/signal.h>
16 #include <linux/slab.h>
17 #include <linux/spinlock.h>
18 #include <linux/interrupt.h>
19 #include <linux/proc_fs.h>
20 #include <linux/seq_file.h>
21 #include <linux/set_memory.h>
22 #include <linux/debugobjects.h>
23 #include <linux/kallsyms.h>
24 #include <linux/list.h>
25 #include <linux/notifier.h>
26 #include <linux/rbtree.h>
27 #include <linux/xarray.h>
28 #include <linux/io.h>
29 #include <linux/rcupdate.h>
30 #include <linux/pfn.h>
31 #include <linux/kmemleak.h>
32 #include <linux/atomic.h>
33 #include <linux/compiler.h>
34 #include <linux/memcontrol.h>
35 #include <linux/llist.h>
36 #include <linux/uio.h>
37 #include <linux/bitops.h>
38 #include <linux/rbtree_augmented.h>
39 #include <linux/overflow.h>
40 #include <linux/pgtable.h>
41 #include <linux/hugetlb.h>
42 #include <linux/sched/mm.h>
43 #include <asm/tlbflush.h>
44 #include <asm/shmparam.h>
45 
46 #define CREATE_TRACE_POINTS
47 #include <trace/events/vmalloc.h>
48 
49 #include "internal.h"
50 #include "pgalloc-track.h"
51 
52 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
53 static unsigned int __ro_after_init ioremap_max_page_shift = BITS_PER_LONG - 1;
54 
55 static int __init set_nohugeiomap(char *str)
56 {
57 	ioremap_max_page_shift = PAGE_SHIFT;
58 	return 0;
59 }
60 early_param("nohugeiomap", set_nohugeiomap);
61 #else /* CONFIG_HAVE_ARCH_HUGE_VMAP */
62 static const unsigned int ioremap_max_page_shift = PAGE_SHIFT;
63 #endif	/* CONFIG_HAVE_ARCH_HUGE_VMAP */
64 
65 #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
66 static bool __ro_after_init vmap_allow_huge = true;
67 
68 static int __init set_nohugevmalloc(char *str)
69 {
70 	vmap_allow_huge = false;
71 	return 0;
72 }
73 early_param("nohugevmalloc", set_nohugevmalloc);
74 #else /* CONFIG_HAVE_ARCH_HUGE_VMALLOC */
75 static const bool vmap_allow_huge = false;
76 #endif	/* CONFIG_HAVE_ARCH_HUGE_VMALLOC */
77 
78 bool is_vmalloc_addr(const void *x)
79 {
80 	unsigned long addr = (unsigned long)kasan_reset_tag(x);
81 
82 	return addr >= VMALLOC_START && addr < VMALLOC_END;
83 }
84 EXPORT_SYMBOL(is_vmalloc_addr);
85 
86 struct vfree_deferred {
87 	struct llist_head list;
88 	struct work_struct wq;
89 };
90 static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
91 
92 /*** Page table manipulation functions ***/
93 static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
94 			phys_addr_t phys_addr, pgprot_t prot,
95 			unsigned int max_page_shift, pgtbl_mod_mask *mask)
96 {
97 	pte_t *pte;
98 	u64 pfn;
99 	unsigned long size = PAGE_SIZE;
100 
101 	pfn = phys_addr >> PAGE_SHIFT;
102 	pte = pte_alloc_kernel_track(pmd, addr, mask);
103 	if (!pte)
104 		return -ENOMEM;
105 	do {
106 		BUG_ON(!pte_none(ptep_get(pte)));
107 
108 #ifdef CONFIG_HUGETLB_PAGE
109 		size = arch_vmap_pte_range_map_size(addr, end, pfn, max_page_shift);
110 		if (size != PAGE_SIZE) {
111 			pte_t entry = pfn_pte(pfn, prot);
112 
113 			entry = arch_make_huge_pte(entry, ilog2(size), 0);
114 			set_huge_pte_at(&init_mm, addr, pte, entry, size);
115 			pfn += PFN_DOWN(size);
116 			continue;
117 		}
118 #endif
119 		set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
120 		pfn++;
121 	} while (pte += PFN_DOWN(size), addr += size, addr != end);
122 	*mask |= PGTBL_PTE_MODIFIED;
123 	return 0;
124 }
125 
126 static int vmap_try_huge_pmd(pmd_t *pmd, unsigned long addr, unsigned long end,
127 			phys_addr_t phys_addr, pgprot_t prot,
128 			unsigned int max_page_shift)
129 {
130 	if (max_page_shift < PMD_SHIFT)
131 		return 0;
132 
133 	if (!arch_vmap_pmd_supported(prot))
134 		return 0;
135 
136 	if ((end - addr) != PMD_SIZE)
137 		return 0;
138 
139 	if (!IS_ALIGNED(addr, PMD_SIZE))
140 		return 0;
141 
142 	if (!IS_ALIGNED(phys_addr, PMD_SIZE))
143 		return 0;
144 
145 	if (pmd_present(*pmd) && !pmd_free_pte_page(pmd, addr))
146 		return 0;
147 
148 	return pmd_set_huge(pmd, phys_addr, prot);
149 }
150 
151 static int vmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
152 			phys_addr_t phys_addr, pgprot_t prot,
153 			unsigned int max_page_shift, pgtbl_mod_mask *mask)
154 {
155 	pmd_t *pmd;
156 	unsigned long next;
157 
158 	pmd = pmd_alloc_track(&init_mm, pud, addr, mask);
159 	if (!pmd)
160 		return -ENOMEM;
161 	do {
162 		next = pmd_addr_end(addr, end);
163 
164 		if (vmap_try_huge_pmd(pmd, addr, next, phys_addr, prot,
165 					max_page_shift)) {
166 			*mask |= PGTBL_PMD_MODIFIED;
167 			continue;
168 		}
169 
170 		if (vmap_pte_range(pmd, addr, next, phys_addr, prot, max_page_shift, mask))
171 			return -ENOMEM;
172 	} while (pmd++, phys_addr += (next - addr), addr = next, addr != end);
173 	return 0;
174 }
175 
176 static int vmap_try_huge_pud(pud_t *pud, unsigned long addr, unsigned long end,
177 			phys_addr_t phys_addr, pgprot_t prot,
178 			unsigned int max_page_shift)
179 {
180 	if (max_page_shift < PUD_SHIFT)
181 		return 0;
182 
183 	if (!arch_vmap_pud_supported(prot))
184 		return 0;
185 
186 	if ((end - addr) != PUD_SIZE)
187 		return 0;
188 
189 	if (!IS_ALIGNED(addr, PUD_SIZE))
190 		return 0;
191 
192 	if (!IS_ALIGNED(phys_addr, PUD_SIZE))
193 		return 0;
194 
195 	if (pud_present(*pud) && !pud_free_pmd_page(pud, addr))
196 		return 0;
197 
198 	return pud_set_huge(pud, phys_addr, prot);
199 }
200 
201 static int vmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
202 			phys_addr_t phys_addr, pgprot_t prot,
203 			unsigned int max_page_shift, pgtbl_mod_mask *mask)
204 {
205 	pud_t *pud;
206 	unsigned long next;
207 
208 	pud = pud_alloc_track(&init_mm, p4d, addr, mask);
209 	if (!pud)
210 		return -ENOMEM;
211 	do {
212 		next = pud_addr_end(addr, end);
213 
214 		if (vmap_try_huge_pud(pud, addr, next, phys_addr, prot,
215 					max_page_shift)) {
216 			*mask |= PGTBL_PUD_MODIFIED;
217 			continue;
218 		}
219 
220 		if (vmap_pmd_range(pud, addr, next, phys_addr, prot,
221 					max_page_shift, mask))
222 			return -ENOMEM;
223 	} while (pud++, phys_addr += (next - addr), addr = next, addr != end);
224 	return 0;
225 }
226 
227 static int vmap_try_huge_p4d(p4d_t *p4d, unsigned long addr, unsigned long end,
228 			phys_addr_t phys_addr, pgprot_t prot,
229 			unsigned int max_page_shift)
230 {
231 	if (max_page_shift < P4D_SHIFT)
232 		return 0;
233 
234 	if (!arch_vmap_p4d_supported(prot))
235 		return 0;
236 
237 	if ((end - addr) != P4D_SIZE)
238 		return 0;
239 
240 	if (!IS_ALIGNED(addr, P4D_SIZE))
241 		return 0;
242 
243 	if (!IS_ALIGNED(phys_addr, P4D_SIZE))
244 		return 0;
245 
246 	if (p4d_present(*p4d) && !p4d_free_pud_page(p4d, addr))
247 		return 0;
248 
249 	return p4d_set_huge(p4d, phys_addr, prot);
250 }
251 
252 static int vmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
253 			phys_addr_t phys_addr, pgprot_t prot,
254 			unsigned int max_page_shift, pgtbl_mod_mask *mask)
255 {
256 	p4d_t *p4d;
257 	unsigned long next;
258 
259 	p4d = p4d_alloc_track(&init_mm, pgd, addr, mask);
260 	if (!p4d)
261 		return -ENOMEM;
262 	do {
263 		next = p4d_addr_end(addr, end);
264 
265 		if (vmap_try_huge_p4d(p4d, addr, next, phys_addr, prot,
266 					max_page_shift)) {
267 			*mask |= PGTBL_P4D_MODIFIED;
268 			continue;
269 		}
270 
271 		if (vmap_pud_range(p4d, addr, next, phys_addr, prot,
272 					max_page_shift, mask))
273 			return -ENOMEM;
274 	} while (p4d++, phys_addr += (next - addr), addr = next, addr != end);
275 	return 0;
276 }
277 
278 static int vmap_range_noflush(unsigned long addr, unsigned long end,
279 			phys_addr_t phys_addr, pgprot_t prot,
280 			unsigned int max_page_shift)
281 {
282 	pgd_t *pgd;
283 	unsigned long start;
284 	unsigned long next;
285 	int err;
286 	pgtbl_mod_mask mask = 0;
287 
288 	might_sleep();
289 	BUG_ON(addr >= end);
290 
291 	start = addr;
292 	pgd = pgd_offset_k(addr);
293 	do {
294 		next = pgd_addr_end(addr, end);
295 		err = vmap_p4d_range(pgd, addr, next, phys_addr, prot,
296 					max_page_shift, &mask);
297 		if (err)
298 			break;
299 	} while (pgd++, phys_addr += (next - addr), addr = next, addr != end);
300 
301 	if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
302 		arch_sync_kernel_mappings(start, end);
303 
304 	return err;
305 }
306 
307 int vmap_page_range(unsigned long addr, unsigned long end,
308 		    phys_addr_t phys_addr, pgprot_t prot)
309 {
310 	int err;
311 
312 	err = vmap_range_noflush(addr, end, phys_addr, pgprot_nx(prot),
313 				 ioremap_max_page_shift);
314 	flush_cache_vmap(addr, end);
315 	if (!err)
316 		err = kmsan_ioremap_page_range(addr, end, phys_addr, prot,
317 					       ioremap_max_page_shift);
318 	return err;
319 }
320 
321 int ioremap_page_range(unsigned long addr, unsigned long end,
322 		phys_addr_t phys_addr, pgprot_t prot)
323 {
324 	struct vm_struct *area;
325 
326 	area = find_vm_area((void *)addr);
327 	if (!area || !(area->flags & VM_IOREMAP)) {
328 		WARN_ONCE(1, "vm_area at addr %lx is not marked as VM_IOREMAP\n", addr);
329 		return -EINVAL;
330 	}
331 	if (addr != (unsigned long)area->addr ||
332 	    (void *)end != area->addr + get_vm_area_size(area)) {
333 		WARN_ONCE(1, "ioremap request [%lx,%lx) doesn't match vm_area [%lx, %lx)\n",
334 			  addr, end, (long)area->addr,
335 			  (long)area->addr + get_vm_area_size(area));
336 		return -ERANGE;
337 	}
338 	return vmap_page_range(addr, end, phys_addr, prot);
339 }
340 
341 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
342 			     pgtbl_mod_mask *mask)
343 {
344 	pte_t *pte;
345 
346 	pte = pte_offset_kernel(pmd, addr);
347 	do {
348 		pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
349 		WARN_ON(!pte_none(ptent) && !pte_present(ptent));
350 	} while (pte++, addr += PAGE_SIZE, addr != end);
351 	*mask |= PGTBL_PTE_MODIFIED;
352 }
353 
354 static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
355 			     pgtbl_mod_mask *mask)
356 {
357 	pmd_t *pmd;
358 	unsigned long next;
359 	int cleared;
360 
361 	pmd = pmd_offset(pud, addr);
362 	do {
363 		next = pmd_addr_end(addr, end);
364 
365 		cleared = pmd_clear_huge(pmd);
366 		if (cleared || pmd_bad(*pmd))
367 			*mask |= PGTBL_PMD_MODIFIED;
368 
369 		if (cleared)
370 			continue;
371 		if (pmd_none_or_clear_bad(pmd))
372 			continue;
373 		vunmap_pte_range(pmd, addr, next, mask);
374 
375 		cond_resched();
376 	} while (pmd++, addr = next, addr != end);
377 }
378 
379 static void vunmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
380 			     pgtbl_mod_mask *mask)
381 {
382 	pud_t *pud;
383 	unsigned long next;
384 	int cleared;
385 
386 	pud = pud_offset(p4d, addr);
387 	do {
388 		next = pud_addr_end(addr, end);
389 
390 		cleared = pud_clear_huge(pud);
391 		if (cleared || pud_bad(*pud))
392 			*mask |= PGTBL_PUD_MODIFIED;
393 
394 		if (cleared)
395 			continue;
396 		if (pud_none_or_clear_bad(pud))
397 			continue;
398 		vunmap_pmd_range(pud, addr, next, mask);
399 	} while (pud++, addr = next, addr != end);
400 }
401 
402 static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
403 			     pgtbl_mod_mask *mask)
404 {
405 	p4d_t *p4d;
406 	unsigned long next;
407 
408 	p4d = p4d_offset(pgd, addr);
409 	do {
410 		next = p4d_addr_end(addr, end);
411 
412 		p4d_clear_huge(p4d);
413 		if (p4d_bad(*p4d))
414 			*mask |= PGTBL_P4D_MODIFIED;
415 
416 		if (p4d_none_or_clear_bad(p4d))
417 			continue;
418 		vunmap_pud_range(p4d, addr, next, mask);
419 	} while (p4d++, addr = next, addr != end);
420 }
421 
422 /*
423  * vunmap_range_noflush is similar to vunmap_range, but does not
424  * flush caches or TLBs.
425  *
426  * The caller is responsible for calling flush_cache_vmap() before calling
427  * this function, and flush_tlb_kernel_range after it has returned
428  * successfully (and before the addresses are expected to cause a page fault
429  * or be re-mapped for something else, if TLB flushes are being delayed or
430  * coalesced).
431  *
432  * This is an internal function only. Do not use outside mm/.
433  */
434 void __vunmap_range_noflush(unsigned long start, unsigned long end)
435 {
436 	unsigned long next;
437 	pgd_t *pgd;
438 	unsigned long addr = start;
439 	pgtbl_mod_mask mask = 0;
440 
441 	BUG_ON(addr >= end);
442 	pgd = pgd_offset_k(addr);
443 	do {
444 		next = pgd_addr_end(addr, end);
445 		if (pgd_bad(*pgd))
446 			mask |= PGTBL_PGD_MODIFIED;
447 		if (pgd_none_or_clear_bad(pgd))
448 			continue;
449 		vunmap_p4d_range(pgd, addr, next, &mask);
450 	} while (pgd++, addr = next, addr != end);
451 
452 	if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
453 		arch_sync_kernel_mappings(start, end);
454 }
455 
456 void vunmap_range_noflush(unsigned long start, unsigned long end)
457 {
458 	kmsan_vunmap_range_noflush(start, end);
459 	__vunmap_range_noflush(start, end);
460 }
461 
462 /**
463  * vunmap_range - unmap kernel virtual addresses
464  * @addr: start of the VM area to unmap
465  * @end: end of the VM area to unmap (non-inclusive)
466  *
467  * Clears any present PTEs in the virtual address range, flushes TLBs and
468  * caches. Any subsequent access to the address before it has been re-mapped
469  * is a kernel bug.
470  */
471 void vunmap_range(unsigned long addr, unsigned long end)
472 {
473 	flush_cache_vunmap(addr, end);
474 	vunmap_range_noflush(addr, end);
475 	flush_tlb_kernel_range(addr, end);
476 }
477 
478 static int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr,
479 		unsigned long end, pgprot_t prot, struct page **pages, int *nr,
480 		pgtbl_mod_mask *mask)
481 {
482 	pte_t *pte;
483 
484 	/*
485 	 * nr is a running index into the array which helps higher level
486 	 * callers keep track of where we're up to.
487 	 */
488 
489 	pte = pte_alloc_kernel_track(pmd, addr, mask);
490 	if (!pte)
491 		return -ENOMEM;
492 	do {
493 		struct page *page = pages[*nr];
494 
495 		if (WARN_ON(!pte_none(ptep_get(pte))))
496 			return -EBUSY;
497 		if (WARN_ON(!page))
498 			return -ENOMEM;
499 		if (WARN_ON(!pfn_valid(page_to_pfn(page))))
500 			return -EINVAL;
501 
502 		set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
503 		(*nr)++;
504 	} while (pte++, addr += PAGE_SIZE, addr != end);
505 	*mask |= PGTBL_PTE_MODIFIED;
506 	return 0;
507 }
508 
509 static int vmap_pages_pmd_range(pud_t *pud, unsigned long addr,
510 		unsigned long end, pgprot_t prot, struct page **pages, int *nr,
511 		pgtbl_mod_mask *mask)
512 {
513 	pmd_t *pmd;
514 	unsigned long next;
515 
516 	pmd = pmd_alloc_track(&init_mm, pud, addr, mask);
517 	if (!pmd)
518 		return -ENOMEM;
519 	do {
520 		next = pmd_addr_end(addr, end);
521 		if (vmap_pages_pte_range(pmd, addr, next, prot, pages, nr, mask))
522 			return -ENOMEM;
523 	} while (pmd++, addr = next, addr != end);
524 	return 0;
525 }
526 
527 static int vmap_pages_pud_range(p4d_t *p4d, unsigned long addr,
528 		unsigned long end, pgprot_t prot, struct page **pages, int *nr,
529 		pgtbl_mod_mask *mask)
530 {
531 	pud_t *pud;
532 	unsigned long next;
533 
534 	pud = pud_alloc_track(&init_mm, p4d, addr, mask);
535 	if (!pud)
536 		return -ENOMEM;
537 	do {
538 		next = pud_addr_end(addr, end);
539 		if (vmap_pages_pmd_range(pud, addr, next, prot, pages, nr, mask))
540 			return -ENOMEM;
541 	} while (pud++, addr = next, addr != end);
542 	return 0;
543 }
544 
545 static int vmap_pages_p4d_range(pgd_t *pgd, unsigned long addr,
546 		unsigned long end, pgprot_t prot, struct page **pages, int *nr,
547 		pgtbl_mod_mask *mask)
548 {
549 	p4d_t *p4d;
550 	unsigned long next;
551 
552 	p4d = p4d_alloc_track(&init_mm, pgd, addr, mask);
553 	if (!p4d)
554 		return -ENOMEM;
555 	do {
556 		next = p4d_addr_end(addr, end);
557 		if (vmap_pages_pud_range(p4d, addr, next, prot, pages, nr, mask))
558 			return -ENOMEM;
559 	} while (p4d++, addr = next, addr != end);
560 	return 0;
561 }
562 
563 static int vmap_small_pages_range_noflush(unsigned long addr, unsigned long end,
564 		pgprot_t prot, struct page **pages)
565 {
566 	unsigned long start = addr;
567 	pgd_t *pgd;
568 	unsigned long next;
569 	int err = 0;
570 	int nr = 0;
571 	pgtbl_mod_mask mask = 0;
572 
573 	BUG_ON(addr >= end);
574 	pgd = pgd_offset_k(addr);
575 	do {
576 		next = pgd_addr_end(addr, end);
577 		if (pgd_bad(*pgd))
578 			mask |= PGTBL_PGD_MODIFIED;
579 		err = vmap_pages_p4d_range(pgd, addr, next, prot, pages, &nr, &mask);
580 		if (err)
581 			return err;
582 	} while (pgd++, addr = next, addr != end);
583 
584 	if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
585 		arch_sync_kernel_mappings(start, end);
586 
587 	return 0;
588 }
589 
590 /*
591  * vmap_pages_range_noflush is similar to vmap_pages_range, but does not
592  * flush caches.
593  *
594  * The caller is responsible for calling flush_cache_vmap() after this
595  * function returns successfully and before the addresses are accessed.
596  *
597  * This is an internal function only. Do not use outside mm/.
598  */
599 int __vmap_pages_range_noflush(unsigned long addr, unsigned long end,
600 		pgprot_t prot, struct page **pages, unsigned int page_shift)
601 {
602 	unsigned int i, nr = (end - addr) >> PAGE_SHIFT;
603 
604 	WARN_ON(page_shift < PAGE_SHIFT);
605 
606 	if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMALLOC) ||
607 			page_shift == PAGE_SHIFT)
608 		return vmap_small_pages_range_noflush(addr, end, prot, pages);
609 
610 	for (i = 0; i < nr; i += 1U << (page_shift - PAGE_SHIFT)) {
611 		int err;
612 
613 		err = vmap_range_noflush(addr, addr + (1UL << page_shift),
614 					page_to_phys(pages[i]), prot,
615 					page_shift);
616 		if (err)
617 			return err;
618 
619 		addr += 1UL << page_shift;
620 	}
621 
622 	return 0;
623 }
624 
625 int vmap_pages_range_noflush(unsigned long addr, unsigned long end,
626 		pgprot_t prot, struct page **pages, unsigned int page_shift)
627 {
628 	int ret = kmsan_vmap_pages_range_noflush(addr, end, prot, pages,
629 						 page_shift);
630 
631 	if (ret)
632 		return ret;
633 	return __vmap_pages_range_noflush(addr, end, prot, pages, page_shift);
634 }
635 
636 /**
637  * vmap_pages_range - map pages to a kernel virtual address
638  * @addr: start of the VM area to map
639  * @end: end of the VM area to map (non-inclusive)
640  * @prot: page protection flags to use
641  * @pages: pages to map (always PAGE_SIZE pages)
642  * @page_shift: maximum shift that the pages may be mapped with, @pages must
643  * be aligned and contiguous up to at least this shift.
644  *
645  * RETURNS:
646  * 0 on success, -errno on failure.
647  */
648 static int vmap_pages_range(unsigned long addr, unsigned long end,
649 		pgprot_t prot, struct page **pages, unsigned int page_shift)
650 {
651 	int err;
652 
653 	err = vmap_pages_range_noflush(addr, end, prot, pages, page_shift);
654 	flush_cache_vmap(addr, end);
655 	return err;
656 }
657 
658 static int check_sparse_vm_area(struct vm_struct *area, unsigned long start,
659 				unsigned long end)
660 {
661 	might_sleep();
662 	if (WARN_ON_ONCE(area->flags & VM_FLUSH_RESET_PERMS))
663 		return -EINVAL;
664 	if (WARN_ON_ONCE(area->flags & VM_NO_GUARD))
665 		return -EINVAL;
666 	if (WARN_ON_ONCE(!(area->flags & VM_SPARSE)))
667 		return -EINVAL;
668 	if ((end - start) >> PAGE_SHIFT > totalram_pages())
669 		return -E2BIG;
670 	if (start < (unsigned long)area->addr ||
671 	    (void *)end > area->addr + get_vm_area_size(area))
672 		return -ERANGE;
673 	return 0;
674 }
675 
676 /**
677  * vm_area_map_pages - map pages inside given sparse vm_area
678  * @area: vm_area
679  * @start: start address inside vm_area
680  * @end: end address inside vm_area
681  * @pages: pages to map (always PAGE_SIZE pages)
682  */
683 int vm_area_map_pages(struct vm_struct *area, unsigned long start,
684 		      unsigned long end, struct page **pages)
685 {
686 	int err;
687 
688 	err = check_sparse_vm_area(area, start, end);
689 	if (err)
690 		return err;
691 
692 	return vmap_pages_range(start, end, PAGE_KERNEL, pages, PAGE_SHIFT);
693 }
694 
695 /**
696  * vm_area_unmap_pages - unmap pages inside given sparse vm_area
697  * @area: vm_area
698  * @start: start address inside vm_area
699  * @end: end address inside vm_area
700  */
701 void vm_area_unmap_pages(struct vm_struct *area, unsigned long start,
702 			 unsigned long end)
703 {
704 	if (check_sparse_vm_area(area, start, end))
705 		return;
706 
707 	vunmap_range(start, end);
708 }
709 
710 int is_vmalloc_or_module_addr(const void *x)
711 {
712 	/*
713 	 * ARM, x86-64 and sparc64 put modules in a special place,
714 	 * and fall back on vmalloc() if that fails. Others
715 	 * just put it in the vmalloc space.
716 	 */
717 #if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
718 	unsigned long addr = (unsigned long)kasan_reset_tag(x);
719 	if (addr >= MODULES_VADDR && addr < MODULES_END)
720 		return 1;
721 #endif
722 	return is_vmalloc_addr(x);
723 }
724 EXPORT_SYMBOL_GPL(is_vmalloc_or_module_addr);
725 
726 /*
727  * Walk a vmap address to the struct page it maps. Huge vmap mappings will
728  * return the tail page that corresponds to the base page address, which
729  * matches small vmap mappings.
730  */
731 struct page *vmalloc_to_page(const void *vmalloc_addr)
732 {
733 	unsigned long addr = (unsigned long) vmalloc_addr;
734 	struct page *page = NULL;
735 	pgd_t *pgd = pgd_offset_k(addr);
736 	p4d_t *p4d;
737 	pud_t *pud;
738 	pmd_t *pmd;
739 	pte_t *ptep, pte;
740 
741 	/*
742 	 * XXX we might need to change this if we add VIRTUAL_BUG_ON for
743 	 * architectures that do not vmalloc module space
744 	 */
745 	VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr));
746 
747 	if (pgd_none(*pgd))
748 		return NULL;
749 	if (WARN_ON_ONCE(pgd_leaf(*pgd)))
750 		return NULL; /* XXX: no allowance for huge pgd */
751 	if (WARN_ON_ONCE(pgd_bad(*pgd)))
752 		return NULL;
753 
754 	p4d = p4d_offset(pgd, addr);
755 	if (p4d_none(*p4d))
756 		return NULL;
757 	if (p4d_leaf(*p4d))
758 		return p4d_page(*p4d) + ((addr & ~P4D_MASK) >> PAGE_SHIFT);
759 	if (WARN_ON_ONCE(p4d_bad(*p4d)))
760 		return NULL;
761 
762 	pud = pud_offset(p4d, addr);
763 	if (pud_none(*pud))
764 		return NULL;
765 	if (pud_leaf(*pud))
766 		return pud_page(*pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
767 	if (WARN_ON_ONCE(pud_bad(*pud)))
768 		return NULL;
769 
770 	pmd = pmd_offset(pud, addr);
771 	if (pmd_none(*pmd))
772 		return NULL;
773 	if (pmd_leaf(*pmd))
774 		return pmd_page(*pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
775 	if (WARN_ON_ONCE(pmd_bad(*pmd)))
776 		return NULL;
777 
778 	ptep = pte_offset_kernel(pmd, addr);
779 	pte = ptep_get(ptep);
780 	if (pte_present(pte))
781 		page = pte_page(pte);
782 
783 	return page;
784 }
785 EXPORT_SYMBOL(vmalloc_to_page);
786 
787 /*
788  * Map a vmalloc()-space virtual address to the physical page frame number.
789  */
790 unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
791 {
792 	return page_to_pfn(vmalloc_to_page(vmalloc_addr));
793 }
794 EXPORT_SYMBOL(vmalloc_to_pfn);
795 
796 
797 /*** Global kva allocator ***/
798 
799 #define DEBUG_AUGMENT_PROPAGATE_CHECK 0
800 #define DEBUG_AUGMENT_LOWEST_MATCH_CHECK 0
801 
802 
803 static DEFINE_SPINLOCK(free_vmap_area_lock);
804 static bool vmap_initialized __read_mostly;
805 
806 /*
807  * This kmem_cache is used for vmap_area objects. Instead of
808  * allocating from slab we reuse an object from this cache to
809  * make things faster. Especially in "no edge" splitting of
810  * free block.
811  */
812 static struct kmem_cache *vmap_area_cachep;
813 
814 /*
815  * This linked list is used in pair with free_vmap_area_root.
816  * It gives O(1) access to prev/next to perform fast coalescing.
817  */
818 static LIST_HEAD(free_vmap_area_list);
819 
820 /*
821  * This augment red-black tree represents the free vmap space.
822  * All vmap_area objects in this tree are sorted by va->va_start
823  * address. It is used for allocation and merging when a vmap
824  * object is released.
825  *
826  * Each vmap_area node contains a maximum available free block
827  * of its sub-tree, right or left. Therefore it is possible to
828  * find a lowest match of free area.
829  */
830 static struct rb_root free_vmap_area_root = RB_ROOT;
831 
832 /*
833  * Preload a CPU with one object for "no edge" split case. The
834  * aim is to get rid of allocations from the atomic context, thus
835  * to use more permissive allocation masks.
836  */
837 static DEFINE_PER_CPU(struct vmap_area *, ne_fit_preload_node);
838 
839 /*
840  * This structure defines a single, solid model where a list and
841  * rb-tree are part of one entity protected by the lock. Nodes are
842  * sorted in ascending order, thus for O(1) access to left/right
843  * neighbors a list is used as well as for sequential traversal.
844  */
845 struct rb_list {
846 	struct rb_root root;
847 	struct list_head head;
848 	spinlock_t lock;
849 };
850 
851 /*
852  * A fast size storage contains VAs up to 1M size. A pool consists
853  * of linked between each other ready to go VAs of certain sizes.
854  * An index in the pool-array corresponds to number of pages + 1.
855  */
856 #define MAX_VA_SIZE_PAGES 256
857 
858 struct vmap_pool {
859 	struct list_head head;
860 	unsigned long len;
861 };
862 
863 /*
864  * An effective vmap-node logic. Users make use of nodes instead
865  * of a global heap. It allows to balance an access and mitigate
866  * contention.
867  */
868 static struct vmap_node {
869 	/* Simple size segregated storage. */
870 	struct vmap_pool pool[MAX_VA_SIZE_PAGES];
871 	spinlock_t pool_lock;
872 	bool skip_populate;
873 
874 	/* Bookkeeping data of this node. */
875 	struct rb_list busy;
876 	struct rb_list lazy;
877 
878 	/*
879 	 * Ready-to-free areas.
880 	 */
881 	struct list_head purge_list;
882 	struct work_struct purge_work;
883 	unsigned long nr_purged;
884 } single;
885 
886 /*
887  * Initial setup consists of one single node, i.e. a balancing
888  * is fully disabled. Later on, after vmap is initialized these
889  * parameters are updated based on a system capacity.
890  */
891 static struct vmap_node *vmap_nodes = &single;
892 static __read_mostly unsigned int nr_vmap_nodes = 1;
893 static __read_mostly unsigned int vmap_zone_size = 1;
894 
895 static inline unsigned int
896 addr_to_node_id(unsigned long addr)
897 {
898 	return (addr / vmap_zone_size) % nr_vmap_nodes;
899 }
900 
901 static inline struct vmap_node *
902 addr_to_node(unsigned long addr)
903 {
904 	return &vmap_nodes[addr_to_node_id(addr)];
905 }
906 
907 static inline struct vmap_node *
908 id_to_node(unsigned int id)
909 {
910 	return &vmap_nodes[id % nr_vmap_nodes];
911 }
912 
913 /*
914  * We use the value 0 to represent "no node", that is why
915  * an encoded value will be the node-id incremented by 1.
916  * It is always greater then 0. A valid node_id which can
917  * be encoded is [0:nr_vmap_nodes - 1]. If a passed node_id
918  * is not valid 0 is returned.
919  */
920 static unsigned int
921 encode_vn_id(unsigned int node_id)
922 {
923 	/* Can store U8_MAX [0:254] nodes. */
924 	if (node_id < nr_vmap_nodes)
925 		return (node_id + 1) << BITS_PER_BYTE;
926 
927 	/* Warn and no node encoded. */
928 	WARN_ONCE(1, "Encode wrong node id (%u)\n", node_id);
929 	return 0;
930 }
931 
932 /*
933  * Returns an encoded node-id, the valid range is within
934  * [0:nr_vmap_nodes-1] values. Otherwise nr_vmap_nodes is
935  * returned if extracted data is wrong.
936  */
937 static unsigned int
938 decode_vn_id(unsigned int val)
939 {
940 	unsigned int node_id = (val >> BITS_PER_BYTE) - 1;
941 
942 	/* Can store U8_MAX [0:254] nodes. */
943 	if (node_id < nr_vmap_nodes)
944 		return node_id;
945 
946 	/* If it was _not_ zero, warn. */
947 	WARN_ONCE(node_id != UINT_MAX,
948 		"Decode wrong node id (%d)\n", node_id);
949 
950 	return nr_vmap_nodes;
951 }
952 
953 static bool
954 is_vn_id_valid(unsigned int node_id)
955 {
956 	if (node_id < nr_vmap_nodes)
957 		return true;
958 
959 	return false;
960 }
961 
962 static __always_inline unsigned long
963 va_size(struct vmap_area *va)
964 {
965 	return (va->va_end - va->va_start);
966 }
967 
968 static __always_inline unsigned long
969 get_subtree_max_size(struct rb_node *node)
970 {
971 	struct vmap_area *va;
972 
973 	va = rb_entry_safe(node, struct vmap_area, rb_node);
974 	return va ? va->subtree_max_size : 0;
975 }
976 
977 RB_DECLARE_CALLBACKS_MAX(static, free_vmap_area_rb_augment_cb,
978 	struct vmap_area, rb_node, unsigned long, subtree_max_size, va_size)
979 
980 static void reclaim_and_purge_vmap_areas(void);
981 static BLOCKING_NOTIFIER_HEAD(vmap_notify_list);
982 static void drain_vmap_area_work(struct work_struct *work);
983 static DECLARE_WORK(drain_vmap_work, drain_vmap_area_work);
984 
985 static atomic_long_t nr_vmalloc_pages;
986 
987 unsigned long vmalloc_nr_pages(void)
988 {
989 	return atomic_long_read(&nr_vmalloc_pages);
990 }
991 
992 /* Look up the first VA which satisfies addr < va_end, NULL if none. */
993 static struct vmap_area *
994 __find_vmap_area_exceed_addr(unsigned long addr, struct rb_root *root)
995 {
996 	struct vmap_area *va = NULL;
997 	struct rb_node *n = root->rb_node;
998 
999 	addr = (unsigned long)kasan_reset_tag((void *)addr);
1000 
1001 	while (n) {
1002 		struct vmap_area *tmp;
1003 
1004 		tmp = rb_entry(n, struct vmap_area, rb_node);
1005 		if (tmp->va_end > addr) {
1006 			va = tmp;
1007 			if (tmp->va_start <= addr)
1008 				break;
1009 
1010 			n = n->rb_left;
1011 		} else
1012 			n = n->rb_right;
1013 	}
1014 
1015 	return va;
1016 }
1017 
1018 /*
1019  * Returns a node where a first VA, that satisfies addr < va_end, resides.
1020  * If success, a node is locked. A user is responsible to unlock it when a
1021  * VA is no longer needed to be accessed.
1022  *
1023  * Returns NULL if nothing found.
1024  */
1025 static struct vmap_node *
1026 find_vmap_area_exceed_addr_lock(unsigned long addr, struct vmap_area **va)
1027 {
1028 	struct vmap_node *vn, *va_node = NULL;
1029 	struct vmap_area *va_lowest;
1030 	int i;
1031 
1032 	for (i = 0; i < nr_vmap_nodes; i++) {
1033 		vn = &vmap_nodes[i];
1034 
1035 		spin_lock(&vn->busy.lock);
1036 		va_lowest = __find_vmap_area_exceed_addr(addr, &vn->busy.root);
1037 		if (va_lowest) {
1038 			if (!va_node || va_lowest->va_start < (*va)->va_start) {
1039 				if (va_node)
1040 					spin_unlock(&va_node->busy.lock);
1041 
1042 				*va = va_lowest;
1043 				va_node = vn;
1044 				continue;
1045 			}
1046 		}
1047 		spin_unlock(&vn->busy.lock);
1048 	}
1049 
1050 	return va_node;
1051 }
1052 
1053 static struct vmap_area *__find_vmap_area(unsigned long addr, struct rb_root *root)
1054 {
1055 	struct rb_node *n = root->rb_node;
1056 
1057 	addr = (unsigned long)kasan_reset_tag((void *)addr);
1058 
1059 	while (n) {
1060 		struct vmap_area *va;
1061 
1062 		va = rb_entry(n, struct vmap_area, rb_node);
1063 		if (addr < va->va_start)
1064 			n = n->rb_left;
1065 		else if (addr >= va->va_end)
1066 			n = n->rb_right;
1067 		else
1068 			return va;
1069 	}
1070 
1071 	return NULL;
1072 }
1073 
1074 /*
1075  * This function returns back addresses of parent node
1076  * and its left or right link for further processing.
1077  *
1078  * Otherwise NULL is returned. In that case all further
1079  * steps regarding inserting of conflicting overlap range
1080  * have to be declined and actually considered as a bug.
1081  */
1082 static __always_inline struct rb_node **
1083 find_va_links(struct vmap_area *va,
1084 	struct rb_root *root, struct rb_node *from,
1085 	struct rb_node **parent)
1086 {
1087 	struct vmap_area *tmp_va;
1088 	struct rb_node **link;
1089 
1090 	if (root) {
1091 		link = &root->rb_node;
1092 		if (unlikely(!*link)) {
1093 			*parent = NULL;
1094 			return link;
1095 		}
1096 	} else {
1097 		link = &from;
1098 	}
1099 
1100 	/*
1101 	 * Go to the bottom of the tree. When we hit the last point
1102 	 * we end up with parent rb_node and correct direction, i name
1103 	 * it link, where the new va->rb_node will be attached to.
1104 	 */
1105 	do {
1106 		tmp_va = rb_entry(*link, struct vmap_area, rb_node);
1107 
1108 		/*
1109 		 * During the traversal we also do some sanity check.
1110 		 * Trigger the BUG() if there are sides(left/right)
1111 		 * or full overlaps.
1112 		 */
1113 		if (va->va_end <= tmp_va->va_start)
1114 			link = &(*link)->rb_left;
1115 		else if (va->va_start >= tmp_va->va_end)
1116 			link = &(*link)->rb_right;
1117 		else {
1118 			WARN(1, "vmalloc bug: 0x%lx-0x%lx overlaps with 0x%lx-0x%lx\n",
1119 				va->va_start, va->va_end, tmp_va->va_start, tmp_va->va_end);
1120 
1121 			return NULL;
1122 		}
1123 	} while (*link);
1124 
1125 	*parent = &tmp_va->rb_node;
1126 	return link;
1127 }
1128 
1129 static __always_inline struct list_head *
1130 get_va_next_sibling(struct rb_node *parent, struct rb_node **link)
1131 {
1132 	struct list_head *list;
1133 
1134 	if (unlikely(!parent))
1135 		/*
1136 		 * The red-black tree where we try to find VA neighbors
1137 		 * before merging or inserting is empty, i.e. it means
1138 		 * there is no free vmap space. Normally it does not
1139 		 * happen but we handle this case anyway.
1140 		 */
1141 		return NULL;
1142 
1143 	list = &rb_entry(parent, struct vmap_area, rb_node)->list;
1144 	return (&parent->rb_right == link ? list->next : list);
1145 }
1146 
1147 static __always_inline void
1148 __link_va(struct vmap_area *va, struct rb_root *root,
1149 	struct rb_node *parent, struct rb_node **link,
1150 	struct list_head *head, bool augment)
1151 {
1152 	/*
1153 	 * VA is still not in the list, but we can
1154 	 * identify its future previous list_head node.
1155 	 */
1156 	if (likely(parent)) {
1157 		head = &rb_entry(parent, struct vmap_area, rb_node)->list;
1158 		if (&parent->rb_right != link)
1159 			head = head->prev;
1160 	}
1161 
1162 	/* Insert to the rb-tree */
1163 	rb_link_node(&va->rb_node, parent, link);
1164 	if (augment) {
1165 		/*
1166 		 * Some explanation here. Just perform simple insertion
1167 		 * to the tree. We do not set va->subtree_max_size to
1168 		 * its current size before calling rb_insert_augmented().
1169 		 * It is because we populate the tree from the bottom
1170 		 * to parent levels when the node _is_ in the tree.
1171 		 *
1172 		 * Therefore we set subtree_max_size to zero after insertion,
1173 		 * to let __augment_tree_propagate_from() puts everything to
1174 		 * the correct order later on.
1175 		 */
1176 		rb_insert_augmented(&va->rb_node,
1177 			root, &free_vmap_area_rb_augment_cb);
1178 		va->subtree_max_size = 0;
1179 	} else {
1180 		rb_insert_color(&va->rb_node, root);
1181 	}
1182 
1183 	/* Address-sort this list */
1184 	list_add(&va->list, head);
1185 }
1186 
1187 static __always_inline void
1188 link_va(struct vmap_area *va, struct rb_root *root,
1189 	struct rb_node *parent, struct rb_node **link,
1190 	struct list_head *head)
1191 {
1192 	__link_va(va, root, parent, link, head, false);
1193 }
1194 
1195 static __always_inline void
1196 link_va_augment(struct vmap_area *va, struct rb_root *root,
1197 	struct rb_node *parent, struct rb_node **link,
1198 	struct list_head *head)
1199 {
1200 	__link_va(va, root, parent, link, head, true);
1201 }
1202 
1203 static __always_inline void
1204 __unlink_va(struct vmap_area *va, struct rb_root *root, bool augment)
1205 {
1206 	if (WARN_ON(RB_EMPTY_NODE(&va->rb_node)))
1207 		return;
1208 
1209 	if (augment)
1210 		rb_erase_augmented(&va->rb_node,
1211 			root, &free_vmap_area_rb_augment_cb);
1212 	else
1213 		rb_erase(&va->rb_node, root);
1214 
1215 	list_del_init(&va->list);
1216 	RB_CLEAR_NODE(&va->rb_node);
1217 }
1218 
1219 static __always_inline void
1220 unlink_va(struct vmap_area *va, struct rb_root *root)
1221 {
1222 	__unlink_va(va, root, false);
1223 }
1224 
1225 static __always_inline void
1226 unlink_va_augment(struct vmap_area *va, struct rb_root *root)
1227 {
1228 	__unlink_va(va, root, true);
1229 }
1230 
1231 #if DEBUG_AUGMENT_PROPAGATE_CHECK
1232 /*
1233  * Gets called when remove the node and rotate.
1234  */
1235 static __always_inline unsigned long
1236 compute_subtree_max_size(struct vmap_area *va)
1237 {
1238 	return max3(va_size(va),
1239 		get_subtree_max_size(va->rb_node.rb_left),
1240 		get_subtree_max_size(va->rb_node.rb_right));
1241 }
1242 
1243 static void
1244 augment_tree_propagate_check(void)
1245 {
1246 	struct vmap_area *va;
1247 	unsigned long computed_size;
1248 
1249 	list_for_each_entry(va, &free_vmap_area_list, list) {
1250 		computed_size = compute_subtree_max_size(va);
1251 		if (computed_size != va->subtree_max_size)
1252 			pr_emerg("tree is corrupted: %lu, %lu\n",
1253 				va_size(va), va->subtree_max_size);
1254 	}
1255 }
1256 #endif
1257 
1258 /*
1259  * This function populates subtree_max_size from bottom to upper
1260  * levels starting from VA point. The propagation must be done
1261  * when VA size is modified by changing its va_start/va_end. Or
1262  * in case of newly inserting of VA to the tree.
1263  *
1264  * It means that __augment_tree_propagate_from() must be called:
1265  * - After VA has been inserted to the tree(free path);
1266  * - After VA has been shrunk(allocation path);
1267  * - After VA has been increased(merging path).
1268  *
1269  * Please note that, it does not mean that upper parent nodes
1270  * and their subtree_max_size are recalculated all the time up
1271  * to the root node.
1272  *
1273  *       4--8
1274  *        /\
1275  *       /  \
1276  *      /    \
1277  *    2--2  8--8
1278  *
1279  * For example if we modify the node 4, shrinking it to 2, then
1280  * no any modification is required. If we shrink the node 2 to 1
1281  * its subtree_max_size is updated only, and set to 1. If we shrink
1282  * the node 8 to 6, then its subtree_max_size is set to 6 and parent
1283  * node becomes 4--6.
1284  */
1285 static __always_inline void
1286 augment_tree_propagate_from(struct vmap_area *va)
1287 {
1288 	/*
1289 	 * Populate the tree from bottom towards the root until
1290 	 * the calculated maximum available size of checked node
1291 	 * is equal to its current one.
1292 	 */
1293 	free_vmap_area_rb_augment_cb_propagate(&va->rb_node, NULL);
1294 
1295 #if DEBUG_AUGMENT_PROPAGATE_CHECK
1296 	augment_tree_propagate_check();
1297 #endif
1298 }
1299 
1300 static void
1301 insert_vmap_area(struct vmap_area *va,
1302 	struct rb_root *root, struct list_head *head)
1303 {
1304 	struct rb_node **link;
1305 	struct rb_node *parent;
1306 
1307 	link = find_va_links(va, root, NULL, &parent);
1308 	if (link)
1309 		link_va(va, root, parent, link, head);
1310 }
1311 
1312 static void
1313 insert_vmap_area_augment(struct vmap_area *va,
1314 	struct rb_node *from, struct rb_root *root,
1315 	struct list_head *head)
1316 {
1317 	struct rb_node **link;
1318 	struct rb_node *parent;
1319 
1320 	if (from)
1321 		link = find_va_links(va, NULL, from, &parent);
1322 	else
1323 		link = find_va_links(va, root, NULL, &parent);
1324 
1325 	if (link) {
1326 		link_va_augment(va, root, parent, link, head);
1327 		augment_tree_propagate_from(va);
1328 	}
1329 }
1330 
1331 /*
1332  * Merge de-allocated chunk of VA memory with previous
1333  * and next free blocks. If coalesce is not done a new
1334  * free area is inserted. If VA has been merged, it is
1335  * freed.
1336  *
1337  * Please note, it can return NULL in case of overlap
1338  * ranges, followed by WARN() report. Despite it is a
1339  * buggy behaviour, a system can be alive and keep
1340  * ongoing.
1341  */
1342 static __always_inline struct vmap_area *
1343 __merge_or_add_vmap_area(struct vmap_area *va,
1344 	struct rb_root *root, struct list_head *head, bool augment)
1345 {
1346 	struct vmap_area *sibling;
1347 	struct list_head *next;
1348 	struct rb_node **link;
1349 	struct rb_node *parent;
1350 	bool merged = false;
1351 
1352 	/*
1353 	 * Find a place in the tree where VA potentially will be
1354 	 * inserted, unless it is merged with its sibling/siblings.
1355 	 */
1356 	link = find_va_links(va, root, NULL, &parent);
1357 	if (!link)
1358 		return NULL;
1359 
1360 	/*
1361 	 * Get next node of VA to check if merging can be done.
1362 	 */
1363 	next = get_va_next_sibling(parent, link);
1364 	if (unlikely(next == NULL))
1365 		goto insert;
1366 
1367 	/*
1368 	 * start            end
1369 	 * |                |
1370 	 * |<------VA------>|<-----Next----->|
1371 	 *                  |                |
1372 	 *                  start            end
1373 	 */
1374 	if (next != head) {
1375 		sibling = list_entry(next, struct vmap_area, list);
1376 		if (sibling->va_start == va->va_end) {
1377 			sibling->va_start = va->va_start;
1378 
1379 			/* Free vmap_area object. */
1380 			kmem_cache_free(vmap_area_cachep, va);
1381 
1382 			/* Point to the new merged area. */
1383 			va = sibling;
1384 			merged = true;
1385 		}
1386 	}
1387 
1388 	/*
1389 	 * start            end
1390 	 * |                |
1391 	 * |<-----Prev----->|<------VA------>|
1392 	 *                  |                |
1393 	 *                  start            end
1394 	 */
1395 	if (next->prev != head) {
1396 		sibling = list_entry(next->prev, struct vmap_area, list);
1397 		if (sibling->va_end == va->va_start) {
1398 			/*
1399 			 * If both neighbors are coalesced, it is important
1400 			 * to unlink the "next" node first, followed by merging
1401 			 * with "previous" one. Otherwise the tree might not be
1402 			 * fully populated if a sibling's augmented value is
1403 			 * "normalized" because of rotation operations.
1404 			 */
1405 			if (merged)
1406 				__unlink_va(va, root, augment);
1407 
1408 			sibling->va_end = va->va_end;
1409 
1410 			/* Free vmap_area object. */
1411 			kmem_cache_free(vmap_area_cachep, va);
1412 
1413 			/* Point to the new merged area. */
1414 			va = sibling;
1415 			merged = true;
1416 		}
1417 	}
1418 
1419 insert:
1420 	if (!merged)
1421 		__link_va(va, root, parent, link, head, augment);
1422 
1423 	return va;
1424 }
1425 
1426 static __always_inline struct vmap_area *
1427 merge_or_add_vmap_area(struct vmap_area *va,
1428 	struct rb_root *root, struct list_head *head)
1429 {
1430 	return __merge_or_add_vmap_area(va, root, head, false);
1431 }
1432 
1433 static __always_inline struct vmap_area *
1434 merge_or_add_vmap_area_augment(struct vmap_area *va,
1435 	struct rb_root *root, struct list_head *head)
1436 {
1437 	va = __merge_or_add_vmap_area(va, root, head, true);
1438 	if (va)
1439 		augment_tree_propagate_from(va);
1440 
1441 	return va;
1442 }
1443 
1444 static __always_inline bool
1445 is_within_this_va(struct vmap_area *va, unsigned long size,
1446 	unsigned long align, unsigned long vstart)
1447 {
1448 	unsigned long nva_start_addr;
1449 
1450 	if (va->va_start > vstart)
1451 		nva_start_addr = ALIGN(va->va_start, align);
1452 	else
1453 		nva_start_addr = ALIGN(vstart, align);
1454 
1455 	/* Can be overflowed due to big size or alignment. */
1456 	if (nva_start_addr + size < nva_start_addr ||
1457 			nva_start_addr < vstart)
1458 		return false;
1459 
1460 	return (nva_start_addr + size <= va->va_end);
1461 }
1462 
1463 /*
1464  * Find the first free block(lowest start address) in the tree,
1465  * that will accomplish the request corresponding to passing
1466  * parameters. Please note, with an alignment bigger than PAGE_SIZE,
1467  * a search length is adjusted to account for worst case alignment
1468  * overhead.
1469  */
1470 static __always_inline struct vmap_area *
1471 find_vmap_lowest_match(struct rb_root *root, unsigned long size,
1472 	unsigned long align, unsigned long vstart, bool adjust_search_size)
1473 {
1474 	struct vmap_area *va;
1475 	struct rb_node *node;
1476 	unsigned long length;
1477 
1478 	/* Start from the root. */
1479 	node = root->rb_node;
1480 
1481 	/* Adjust the search size for alignment overhead. */
1482 	length = adjust_search_size ? size + align - 1 : size;
1483 
1484 	while (node) {
1485 		va = rb_entry(node, struct vmap_area, rb_node);
1486 
1487 		if (get_subtree_max_size(node->rb_left) >= length &&
1488 				vstart < va->va_start) {
1489 			node = node->rb_left;
1490 		} else {
1491 			if (is_within_this_va(va, size, align, vstart))
1492 				return va;
1493 
1494 			/*
1495 			 * Does not make sense to go deeper towards the right
1496 			 * sub-tree if it does not have a free block that is
1497 			 * equal or bigger to the requested search length.
1498 			 */
1499 			if (get_subtree_max_size(node->rb_right) >= length) {
1500 				node = node->rb_right;
1501 				continue;
1502 			}
1503 
1504 			/*
1505 			 * OK. We roll back and find the first right sub-tree,
1506 			 * that will satisfy the search criteria. It can happen
1507 			 * due to "vstart" restriction or an alignment overhead
1508 			 * that is bigger then PAGE_SIZE.
1509 			 */
1510 			while ((node = rb_parent(node))) {
1511 				va = rb_entry(node, struct vmap_area, rb_node);
1512 				if (is_within_this_va(va, size, align, vstart))
1513 					return va;
1514 
1515 				if (get_subtree_max_size(node->rb_right) >= length &&
1516 						vstart <= va->va_start) {
1517 					/*
1518 					 * Shift the vstart forward. Please note, we update it with
1519 					 * parent's start address adding "1" because we do not want
1520 					 * to enter same sub-tree after it has already been checked
1521 					 * and no suitable free block found there.
1522 					 */
1523 					vstart = va->va_start + 1;
1524 					node = node->rb_right;
1525 					break;
1526 				}
1527 			}
1528 		}
1529 	}
1530 
1531 	return NULL;
1532 }
1533 
1534 #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
1535 #include <linux/random.h>
1536 
1537 static struct vmap_area *
1538 find_vmap_lowest_linear_match(struct list_head *head, unsigned long size,
1539 	unsigned long align, unsigned long vstart)
1540 {
1541 	struct vmap_area *va;
1542 
1543 	list_for_each_entry(va, head, list) {
1544 		if (!is_within_this_va(va, size, align, vstart))
1545 			continue;
1546 
1547 		return va;
1548 	}
1549 
1550 	return NULL;
1551 }
1552 
1553 static void
1554 find_vmap_lowest_match_check(struct rb_root *root, struct list_head *head,
1555 			     unsigned long size, unsigned long align)
1556 {
1557 	struct vmap_area *va_1, *va_2;
1558 	unsigned long vstart;
1559 	unsigned int rnd;
1560 
1561 	get_random_bytes(&rnd, sizeof(rnd));
1562 	vstart = VMALLOC_START + rnd;
1563 
1564 	va_1 = find_vmap_lowest_match(root, size, align, vstart, false);
1565 	va_2 = find_vmap_lowest_linear_match(head, size, align, vstart);
1566 
1567 	if (va_1 != va_2)
1568 		pr_emerg("not lowest: t: 0x%p, l: 0x%p, v: 0x%lx\n",
1569 			va_1, va_2, vstart);
1570 }
1571 #endif
1572 
1573 enum fit_type {
1574 	NOTHING_FIT = 0,
1575 	FL_FIT_TYPE = 1,	/* full fit */
1576 	LE_FIT_TYPE = 2,	/* left edge fit */
1577 	RE_FIT_TYPE = 3,	/* right edge fit */
1578 	NE_FIT_TYPE = 4		/* no edge fit */
1579 };
1580 
1581 static __always_inline enum fit_type
1582 classify_va_fit_type(struct vmap_area *va,
1583 	unsigned long nva_start_addr, unsigned long size)
1584 {
1585 	enum fit_type type;
1586 
1587 	/* Check if it is within VA. */
1588 	if (nva_start_addr < va->va_start ||
1589 			nva_start_addr + size > va->va_end)
1590 		return NOTHING_FIT;
1591 
1592 	/* Now classify. */
1593 	if (va->va_start == nva_start_addr) {
1594 		if (va->va_end == nva_start_addr + size)
1595 			type = FL_FIT_TYPE;
1596 		else
1597 			type = LE_FIT_TYPE;
1598 	} else if (va->va_end == nva_start_addr + size) {
1599 		type = RE_FIT_TYPE;
1600 	} else {
1601 		type = NE_FIT_TYPE;
1602 	}
1603 
1604 	return type;
1605 }
1606 
1607 static __always_inline int
1608 va_clip(struct rb_root *root, struct list_head *head,
1609 		struct vmap_area *va, unsigned long nva_start_addr,
1610 		unsigned long size)
1611 {
1612 	struct vmap_area *lva = NULL;
1613 	enum fit_type type = classify_va_fit_type(va, nva_start_addr, size);
1614 
1615 	if (type == FL_FIT_TYPE) {
1616 		/*
1617 		 * No need to split VA, it fully fits.
1618 		 *
1619 		 * |               |
1620 		 * V      NVA      V
1621 		 * |---------------|
1622 		 */
1623 		unlink_va_augment(va, root);
1624 		kmem_cache_free(vmap_area_cachep, va);
1625 	} else if (type == LE_FIT_TYPE) {
1626 		/*
1627 		 * Split left edge of fit VA.
1628 		 *
1629 		 * |       |
1630 		 * V  NVA  V   R
1631 		 * |-------|-------|
1632 		 */
1633 		va->va_start += size;
1634 	} else if (type == RE_FIT_TYPE) {
1635 		/*
1636 		 * Split right edge of fit VA.
1637 		 *
1638 		 *         |       |
1639 		 *     L   V  NVA  V
1640 		 * |-------|-------|
1641 		 */
1642 		va->va_end = nva_start_addr;
1643 	} else if (type == NE_FIT_TYPE) {
1644 		/*
1645 		 * Split no edge of fit VA.
1646 		 *
1647 		 *     |       |
1648 		 *   L V  NVA  V R
1649 		 * |---|-------|---|
1650 		 */
1651 		lva = __this_cpu_xchg(ne_fit_preload_node, NULL);
1652 		if (unlikely(!lva)) {
1653 			/*
1654 			 * For percpu allocator we do not do any pre-allocation
1655 			 * and leave it as it is. The reason is it most likely
1656 			 * never ends up with NE_FIT_TYPE splitting. In case of
1657 			 * percpu allocations offsets and sizes are aligned to
1658 			 * fixed align request, i.e. RE_FIT_TYPE and FL_FIT_TYPE
1659 			 * are its main fitting cases.
1660 			 *
1661 			 * There are a few exceptions though, as an example it is
1662 			 * a first allocation (early boot up) when we have "one"
1663 			 * big free space that has to be split.
1664 			 *
1665 			 * Also we can hit this path in case of regular "vmap"
1666 			 * allocations, if "this" current CPU was not preloaded.
1667 			 * See the comment in alloc_vmap_area() why. If so, then
1668 			 * GFP_NOWAIT is used instead to get an extra object for
1669 			 * split purpose. That is rare and most time does not
1670 			 * occur.
1671 			 *
1672 			 * What happens if an allocation gets failed. Basically,
1673 			 * an "overflow" path is triggered to purge lazily freed
1674 			 * areas to free some memory, then, the "retry" path is
1675 			 * triggered to repeat one more time. See more details
1676 			 * in alloc_vmap_area() function.
1677 			 */
1678 			lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT);
1679 			if (!lva)
1680 				return -1;
1681 		}
1682 
1683 		/*
1684 		 * Build the remainder.
1685 		 */
1686 		lva->va_start = va->va_start;
1687 		lva->va_end = nva_start_addr;
1688 
1689 		/*
1690 		 * Shrink this VA to remaining size.
1691 		 */
1692 		va->va_start = nva_start_addr + size;
1693 	} else {
1694 		return -1;
1695 	}
1696 
1697 	if (type != FL_FIT_TYPE) {
1698 		augment_tree_propagate_from(va);
1699 
1700 		if (lva)	/* type == NE_FIT_TYPE */
1701 			insert_vmap_area_augment(lva, &va->rb_node, root, head);
1702 	}
1703 
1704 	return 0;
1705 }
1706 
1707 static unsigned long
1708 va_alloc(struct vmap_area *va,
1709 		struct rb_root *root, struct list_head *head,
1710 		unsigned long size, unsigned long align,
1711 		unsigned long vstart, unsigned long vend)
1712 {
1713 	unsigned long nva_start_addr;
1714 	int ret;
1715 
1716 	if (va->va_start > vstart)
1717 		nva_start_addr = ALIGN(va->va_start, align);
1718 	else
1719 		nva_start_addr = ALIGN(vstart, align);
1720 
1721 	/* Check the "vend" restriction. */
1722 	if (nva_start_addr + size > vend)
1723 		return vend;
1724 
1725 	/* Update the free vmap_area. */
1726 	ret = va_clip(root, head, va, nva_start_addr, size);
1727 	if (WARN_ON_ONCE(ret))
1728 		return vend;
1729 
1730 	return nva_start_addr;
1731 }
1732 
1733 /*
1734  * Returns a start address of the newly allocated area, if success.
1735  * Otherwise a vend is returned that indicates failure.
1736  */
1737 static __always_inline unsigned long
1738 __alloc_vmap_area(struct rb_root *root, struct list_head *head,
1739 	unsigned long size, unsigned long align,
1740 	unsigned long vstart, unsigned long vend)
1741 {
1742 	bool adjust_search_size = true;
1743 	unsigned long nva_start_addr;
1744 	struct vmap_area *va;
1745 
1746 	/*
1747 	 * Do not adjust when:
1748 	 *   a) align <= PAGE_SIZE, because it does not make any sense.
1749 	 *      All blocks(their start addresses) are at least PAGE_SIZE
1750 	 *      aligned anyway;
1751 	 *   b) a short range where a requested size corresponds to exactly
1752 	 *      specified [vstart:vend] interval and an alignment > PAGE_SIZE.
1753 	 *      With adjusted search length an allocation would not succeed.
1754 	 */
1755 	if (align <= PAGE_SIZE || (align > PAGE_SIZE && (vend - vstart) == size))
1756 		adjust_search_size = false;
1757 
1758 	va = find_vmap_lowest_match(root, size, align, vstart, adjust_search_size);
1759 	if (unlikely(!va))
1760 		return vend;
1761 
1762 	nva_start_addr = va_alloc(va, root, head, size, align, vstart, vend);
1763 	if (nva_start_addr == vend)
1764 		return vend;
1765 
1766 #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
1767 	find_vmap_lowest_match_check(root, head, size, align);
1768 #endif
1769 
1770 	return nva_start_addr;
1771 }
1772 
1773 /*
1774  * Free a region of KVA allocated by alloc_vmap_area
1775  */
1776 static void free_vmap_area(struct vmap_area *va)
1777 {
1778 	struct vmap_node *vn = addr_to_node(va->va_start);
1779 
1780 	/*
1781 	 * Remove from the busy tree/list.
1782 	 */
1783 	spin_lock(&vn->busy.lock);
1784 	unlink_va(va, &vn->busy.root);
1785 	spin_unlock(&vn->busy.lock);
1786 
1787 	/*
1788 	 * Insert/Merge it back to the free tree/list.
1789 	 */
1790 	spin_lock(&free_vmap_area_lock);
1791 	merge_or_add_vmap_area_augment(va, &free_vmap_area_root, &free_vmap_area_list);
1792 	spin_unlock(&free_vmap_area_lock);
1793 }
1794 
1795 static inline void
1796 preload_this_cpu_lock(spinlock_t *lock, gfp_t gfp_mask, int node)
1797 {
1798 	struct vmap_area *va = NULL;
1799 
1800 	/*
1801 	 * Preload this CPU with one extra vmap_area object. It is used
1802 	 * when fit type of free area is NE_FIT_TYPE. It guarantees that
1803 	 * a CPU that does an allocation is preloaded.
1804 	 *
1805 	 * We do it in non-atomic context, thus it allows us to use more
1806 	 * permissive allocation masks to be more stable under low memory
1807 	 * condition and high memory pressure.
1808 	 */
1809 	if (!this_cpu_read(ne_fit_preload_node))
1810 		va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
1811 
1812 	spin_lock(lock);
1813 
1814 	if (va && __this_cpu_cmpxchg(ne_fit_preload_node, NULL, va))
1815 		kmem_cache_free(vmap_area_cachep, va);
1816 }
1817 
1818 static struct vmap_pool *
1819 size_to_va_pool(struct vmap_node *vn, unsigned long size)
1820 {
1821 	unsigned int idx = (size - 1) / PAGE_SIZE;
1822 
1823 	if (idx < MAX_VA_SIZE_PAGES)
1824 		return &vn->pool[idx];
1825 
1826 	return NULL;
1827 }
1828 
1829 static bool
1830 node_pool_add_va(struct vmap_node *n, struct vmap_area *va)
1831 {
1832 	struct vmap_pool *vp;
1833 
1834 	vp = size_to_va_pool(n, va_size(va));
1835 	if (!vp)
1836 		return false;
1837 
1838 	spin_lock(&n->pool_lock);
1839 	list_add(&va->list, &vp->head);
1840 	WRITE_ONCE(vp->len, vp->len + 1);
1841 	spin_unlock(&n->pool_lock);
1842 
1843 	return true;
1844 }
1845 
1846 static struct vmap_area *
1847 node_pool_del_va(struct vmap_node *vn, unsigned long size,
1848 		unsigned long align, unsigned long vstart,
1849 		unsigned long vend)
1850 {
1851 	struct vmap_area *va = NULL;
1852 	struct vmap_pool *vp;
1853 	int err = 0;
1854 
1855 	vp = size_to_va_pool(vn, size);
1856 	if (!vp || list_empty(&vp->head))
1857 		return NULL;
1858 
1859 	spin_lock(&vn->pool_lock);
1860 	if (!list_empty(&vp->head)) {
1861 		va = list_first_entry(&vp->head, struct vmap_area, list);
1862 
1863 		if (IS_ALIGNED(va->va_start, align)) {
1864 			/*
1865 			 * Do some sanity check and emit a warning
1866 			 * if one of below checks detects an error.
1867 			 */
1868 			err |= (va_size(va) != size);
1869 			err |= (va->va_start < vstart);
1870 			err |= (va->va_end > vend);
1871 
1872 			if (!WARN_ON_ONCE(err)) {
1873 				list_del_init(&va->list);
1874 				WRITE_ONCE(vp->len, vp->len - 1);
1875 			} else {
1876 				va = NULL;
1877 			}
1878 		} else {
1879 			list_move_tail(&va->list, &vp->head);
1880 			va = NULL;
1881 		}
1882 	}
1883 	spin_unlock(&vn->pool_lock);
1884 
1885 	return va;
1886 }
1887 
1888 static struct vmap_area *
1889 node_alloc(unsigned long size, unsigned long align,
1890 		unsigned long vstart, unsigned long vend,
1891 		unsigned long *addr, unsigned int *vn_id)
1892 {
1893 	struct vmap_area *va;
1894 
1895 	*vn_id = 0;
1896 	*addr = vend;
1897 
1898 	/*
1899 	 * Fallback to a global heap if not vmalloc or there
1900 	 * is only one node.
1901 	 */
1902 	if (vstart != VMALLOC_START || vend != VMALLOC_END ||
1903 			nr_vmap_nodes == 1)
1904 		return NULL;
1905 
1906 	*vn_id = raw_smp_processor_id() % nr_vmap_nodes;
1907 	va = node_pool_del_va(id_to_node(*vn_id), size, align, vstart, vend);
1908 	*vn_id = encode_vn_id(*vn_id);
1909 
1910 	if (va)
1911 		*addr = va->va_start;
1912 
1913 	return va;
1914 }
1915 
1916 /*
1917  * Allocate a region of KVA of the specified size and alignment, within the
1918  * vstart and vend.
1919  */
1920 static struct vmap_area *alloc_vmap_area(unsigned long size,
1921 				unsigned long align,
1922 				unsigned long vstart, unsigned long vend,
1923 				int node, gfp_t gfp_mask,
1924 				unsigned long va_flags)
1925 {
1926 	struct vmap_node *vn;
1927 	struct vmap_area *va;
1928 	unsigned long freed;
1929 	unsigned long addr;
1930 	unsigned int vn_id;
1931 	int purged = 0;
1932 	int ret;
1933 
1934 	if (unlikely(!size || offset_in_page(size) || !is_power_of_2(align)))
1935 		return ERR_PTR(-EINVAL);
1936 
1937 	if (unlikely(!vmap_initialized))
1938 		return ERR_PTR(-EBUSY);
1939 
1940 	might_sleep();
1941 
1942 	/*
1943 	 * If a VA is obtained from a global heap(if it fails here)
1944 	 * it is anyway marked with this "vn_id" so it is returned
1945 	 * to this pool's node later. Such way gives a possibility
1946 	 * to populate pools based on users demand.
1947 	 *
1948 	 * On success a ready to go VA is returned.
1949 	 */
1950 	va = node_alloc(size, align, vstart, vend, &addr, &vn_id);
1951 	if (!va) {
1952 		gfp_mask = gfp_mask & GFP_RECLAIM_MASK;
1953 
1954 		va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
1955 		if (unlikely(!va))
1956 			return ERR_PTR(-ENOMEM);
1957 
1958 		/*
1959 		 * Only scan the relevant parts containing pointers to other objects
1960 		 * to avoid false negatives.
1961 		 */
1962 		kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask);
1963 	}
1964 
1965 retry:
1966 	if (addr == vend) {
1967 		preload_this_cpu_lock(&free_vmap_area_lock, gfp_mask, node);
1968 		addr = __alloc_vmap_area(&free_vmap_area_root, &free_vmap_area_list,
1969 			size, align, vstart, vend);
1970 		spin_unlock(&free_vmap_area_lock);
1971 	}
1972 
1973 	trace_alloc_vmap_area(addr, size, align, vstart, vend, addr == vend);
1974 
1975 	/*
1976 	 * If an allocation fails, the "vend" address is
1977 	 * returned. Therefore trigger the overflow path.
1978 	 */
1979 	if (unlikely(addr == vend))
1980 		goto overflow;
1981 
1982 	va->va_start = addr;
1983 	va->va_end = addr + size;
1984 	va->vm = NULL;
1985 	va->flags = (va_flags | vn_id);
1986 
1987 	vn = addr_to_node(va->va_start);
1988 
1989 	spin_lock(&vn->busy.lock);
1990 	insert_vmap_area(va, &vn->busy.root, &vn->busy.head);
1991 	spin_unlock(&vn->busy.lock);
1992 
1993 	BUG_ON(!IS_ALIGNED(va->va_start, align));
1994 	BUG_ON(va->va_start < vstart);
1995 	BUG_ON(va->va_end > vend);
1996 
1997 	ret = kasan_populate_vmalloc(addr, size);
1998 	if (ret) {
1999 		free_vmap_area(va);
2000 		return ERR_PTR(ret);
2001 	}
2002 
2003 	return va;
2004 
2005 overflow:
2006 	if (!purged) {
2007 		reclaim_and_purge_vmap_areas();
2008 		purged = 1;
2009 		goto retry;
2010 	}
2011 
2012 	freed = 0;
2013 	blocking_notifier_call_chain(&vmap_notify_list, 0, &freed);
2014 
2015 	if (freed > 0) {
2016 		purged = 0;
2017 		goto retry;
2018 	}
2019 
2020 	if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit())
2021 		pr_warn("vmap allocation for size %lu failed: use vmalloc=<size> to increase size\n",
2022 			size);
2023 
2024 	kmem_cache_free(vmap_area_cachep, va);
2025 	return ERR_PTR(-EBUSY);
2026 }
2027 
2028 int register_vmap_purge_notifier(struct notifier_block *nb)
2029 {
2030 	return blocking_notifier_chain_register(&vmap_notify_list, nb);
2031 }
2032 EXPORT_SYMBOL_GPL(register_vmap_purge_notifier);
2033 
2034 int unregister_vmap_purge_notifier(struct notifier_block *nb)
2035 {
2036 	return blocking_notifier_chain_unregister(&vmap_notify_list, nb);
2037 }
2038 EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier);
2039 
2040 /*
2041  * lazy_max_pages is the maximum amount of virtual address space we gather up
2042  * before attempting to purge with a TLB flush.
2043  *
2044  * There is a tradeoff here: a larger number will cover more kernel page tables
2045  * and take slightly longer to purge, but it will linearly reduce the number of
2046  * global TLB flushes that must be performed. It would seem natural to scale
2047  * this number up linearly with the number of CPUs (because vmapping activity
2048  * could also scale linearly with the number of CPUs), however it is likely
2049  * that in practice, workloads might be constrained in other ways that mean
2050  * vmap activity will not scale linearly with CPUs. Also, I want to be
2051  * conservative and not introduce a big latency on huge systems, so go with
2052  * a less aggressive log scale. It will still be an improvement over the old
2053  * code, and it will be simple to change the scale factor if we find that it
2054  * becomes a problem on bigger systems.
2055  */
2056 static unsigned long lazy_max_pages(void)
2057 {
2058 	unsigned int log;
2059 
2060 	log = fls(num_online_cpus());
2061 
2062 	return log * (32UL * 1024 * 1024 / PAGE_SIZE);
2063 }
2064 
2065 static atomic_long_t vmap_lazy_nr = ATOMIC_LONG_INIT(0);
2066 
2067 /*
2068  * Serialize vmap purging.  There is no actual critical section protected
2069  * by this lock, but we want to avoid concurrent calls for performance
2070  * reasons and to make the pcpu_get_vm_areas more deterministic.
2071  */
2072 static DEFINE_MUTEX(vmap_purge_lock);
2073 
2074 /* for per-CPU blocks */
2075 static void purge_fragmented_blocks_allcpus(void);
2076 static cpumask_t purge_nodes;
2077 
2078 static void
2079 reclaim_list_global(struct list_head *head)
2080 {
2081 	struct vmap_area *va, *n;
2082 
2083 	if (list_empty(head))
2084 		return;
2085 
2086 	spin_lock(&free_vmap_area_lock);
2087 	list_for_each_entry_safe(va, n, head, list)
2088 		merge_or_add_vmap_area_augment(va,
2089 			&free_vmap_area_root, &free_vmap_area_list);
2090 	spin_unlock(&free_vmap_area_lock);
2091 }
2092 
2093 static void
2094 decay_va_pool_node(struct vmap_node *vn, bool full_decay)
2095 {
2096 	struct vmap_area *va, *nva;
2097 	struct list_head decay_list;
2098 	struct rb_root decay_root;
2099 	unsigned long n_decay;
2100 	int i;
2101 
2102 	decay_root = RB_ROOT;
2103 	INIT_LIST_HEAD(&decay_list);
2104 
2105 	for (i = 0; i < MAX_VA_SIZE_PAGES; i++) {
2106 		struct list_head tmp_list;
2107 
2108 		if (list_empty(&vn->pool[i].head))
2109 			continue;
2110 
2111 		INIT_LIST_HEAD(&tmp_list);
2112 
2113 		/* Detach the pool, so no-one can access it. */
2114 		spin_lock(&vn->pool_lock);
2115 		list_replace_init(&vn->pool[i].head, &tmp_list);
2116 		spin_unlock(&vn->pool_lock);
2117 
2118 		if (full_decay)
2119 			WRITE_ONCE(vn->pool[i].len, 0);
2120 
2121 		/* Decay a pool by ~25% out of left objects. */
2122 		n_decay = vn->pool[i].len >> 2;
2123 
2124 		list_for_each_entry_safe(va, nva, &tmp_list, list) {
2125 			list_del_init(&va->list);
2126 			merge_or_add_vmap_area(va, &decay_root, &decay_list);
2127 
2128 			if (!full_decay) {
2129 				WRITE_ONCE(vn->pool[i].len, vn->pool[i].len - 1);
2130 
2131 				if (!--n_decay)
2132 					break;
2133 			}
2134 		}
2135 
2136 		/*
2137 		 * Attach the pool back if it has been partly decayed.
2138 		 * Please note, it is supposed that nobody(other contexts)
2139 		 * can populate the pool therefore a simple list replace
2140 		 * operation takes place here.
2141 		 */
2142 		if (!full_decay && !list_empty(&tmp_list)) {
2143 			spin_lock(&vn->pool_lock);
2144 			list_replace_init(&tmp_list, &vn->pool[i].head);
2145 			spin_unlock(&vn->pool_lock);
2146 		}
2147 	}
2148 
2149 	reclaim_list_global(&decay_list);
2150 }
2151 
2152 static void purge_vmap_node(struct work_struct *work)
2153 {
2154 	struct vmap_node *vn = container_of(work,
2155 		struct vmap_node, purge_work);
2156 	struct vmap_area *va, *n_va;
2157 	LIST_HEAD(local_list);
2158 
2159 	vn->nr_purged = 0;
2160 
2161 	list_for_each_entry_safe(va, n_va, &vn->purge_list, list) {
2162 		unsigned long nr = (va->va_end - va->va_start) >> PAGE_SHIFT;
2163 		unsigned long orig_start = va->va_start;
2164 		unsigned long orig_end = va->va_end;
2165 		unsigned int vn_id = decode_vn_id(va->flags);
2166 
2167 		list_del_init(&va->list);
2168 
2169 		if (is_vmalloc_or_module_addr((void *)orig_start))
2170 			kasan_release_vmalloc(orig_start, orig_end,
2171 					      va->va_start, va->va_end);
2172 
2173 		atomic_long_sub(nr, &vmap_lazy_nr);
2174 		vn->nr_purged++;
2175 
2176 		if (is_vn_id_valid(vn_id) && !vn->skip_populate)
2177 			if (node_pool_add_va(vn, va))
2178 				continue;
2179 
2180 		/* Go back to global. */
2181 		list_add(&va->list, &local_list);
2182 	}
2183 
2184 	reclaim_list_global(&local_list);
2185 }
2186 
2187 /*
2188  * Purges all lazily-freed vmap areas.
2189  */
2190 static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end,
2191 		bool full_pool_decay)
2192 {
2193 	unsigned long nr_purged_areas = 0;
2194 	unsigned int nr_purge_helpers;
2195 	unsigned int nr_purge_nodes;
2196 	struct vmap_node *vn;
2197 	int i;
2198 
2199 	lockdep_assert_held(&vmap_purge_lock);
2200 
2201 	/*
2202 	 * Use cpumask to mark which node has to be processed.
2203 	 */
2204 	purge_nodes = CPU_MASK_NONE;
2205 
2206 	for (i = 0; i < nr_vmap_nodes; i++) {
2207 		vn = &vmap_nodes[i];
2208 
2209 		INIT_LIST_HEAD(&vn->purge_list);
2210 		vn->skip_populate = full_pool_decay;
2211 		decay_va_pool_node(vn, full_pool_decay);
2212 
2213 		if (RB_EMPTY_ROOT(&vn->lazy.root))
2214 			continue;
2215 
2216 		spin_lock(&vn->lazy.lock);
2217 		WRITE_ONCE(vn->lazy.root.rb_node, NULL);
2218 		list_replace_init(&vn->lazy.head, &vn->purge_list);
2219 		spin_unlock(&vn->lazy.lock);
2220 
2221 		start = min(start, list_first_entry(&vn->purge_list,
2222 			struct vmap_area, list)->va_start);
2223 
2224 		end = max(end, list_last_entry(&vn->purge_list,
2225 			struct vmap_area, list)->va_end);
2226 
2227 		cpumask_set_cpu(i, &purge_nodes);
2228 	}
2229 
2230 	nr_purge_nodes = cpumask_weight(&purge_nodes);
2231 	if (nr_purge_nodes > 0) {
2232 		flush_tlb_kernel_range(start, end);
2233 
2234 		/* One extra worker is per a lazy_max_pages() full set minus one. */
2235 		nr_purge_helpers = atomic_long_read(&vmap_lazy_nr) / lazy_max_pages();
2236 		nr_purge_helpers = clamp(nr_purge_helpers, 1U, nr_purge_nodes) - 1;
2237 
2238 		for_each_cpu(i, &purge_nodes) {
2239 			vn = &vmap_nodes[i];
2240 
2241 			if (nr_purge_helpers > 0) {
2242 				INIT_WORK(&vn->purge_work, purge_vmap_node);
2243 
2244 				if (cpumask_test_cpu(i, cpu_online_mask))
2245 					schedule_work_on(i, &vn->purge_work);
2246 				else
2247 					schedule_work(&vn->purge_work);
2248 
2249 				nr_purge_helpers--;
2250 			} else {
2251 				vn->purge_work.func = NULL;
2252 				purge_vmap_node(&vn->purge_work);
2253 				nr_purged_areas += vn->nr_purged;
2254 			}
2255 		}
2256 
2257 		for_each_cpu(i, &purge_nodes) {
2258 			vn = &vmap_nodes[i];
2259 
2260 			if (vn->purge_work.func) {
2261 				flush_work(&vn->purge_work);
2262 				nr_purged_areas += vn->nr_purged;
2263 			}
2264 		}
2265 	}
2266 
2267 	trace_purge_vmap_area_lazy(start, end, nr_purged_areas);
2268 	return nr_purged_areas > 0;
2269 }
2270 
2271 /*
2272  * Reclaim vmap areas by purging fragmented blocks and purge_vmap_area_list.
2273  */
2274 static void reclaim_and_purge_vmap_areas(void)
2275 
2276 {
2277 	mutex_lock(&vmap_purge_lock);
2278 	purge_fragmented_blocks_allcpus();
2279 	__purge_vmap_area_lazy(ULONG_MAX, 0, true);
2280 	mutex_unlock(&vmap_purge_lock);
2281 }
2282 
2283 static void drain_vmap_area_work(struct work_struct *work)
2284 {
2285 	mutex_lock(&vmap_purge_lock);
2286 	__purge_vmap_area_lazy(ULONG_MAX, 0, false);
2287 	mutex_unlock(&vmap_purge_lock);
2288 }
2289 
2290 /*
2291  * Free a vmap area, caller ensuring that the area has been unmapped,
2292  * unlinked and flush_cache_vunmap had been called for the correct
2293  * range previously.
2294  */
2295 static void free_vmap_area_noflush(struct vmap_area *va)
2296 {
2297 	unsigned long nr_lazy_max = lazy_max_pages();
2298 	unsigned long va_start = va->va_start;
2299 	unsigned int vn_id = decode_vn_id(va->flags);
2300 	struct vmap_node *vn;
2301 	unsigned long nr_lazy;
2302 
2303 	if (WARN_ON_ONCE(!list_empty(&va->list)))
2304 		return;
2305 
2306 	nr_lazy = atomic_long_add_return((va->va_end - va->va_start) >>
2307 				PAGE_SHIFT, &vmap_lazy_nr);
2308 
2309 	/*
2310 	 * If it was request by a certain node we would like to
2311 	 * return it to that node, i.e. its pool for later reuse.
2312 	 */
2313 	vn = is_vn_id_valid(vn_id) ?
2314 		id_to_node(vn_id):addr_to_node(va->va_start);
2315 
2316 	spin_lock(&vn->lazy.lock);
2317 	insert_vmap_area(va, &vn->lazy.root, &vn->lazy.head);
2318 	spin_unlock(&vn->lazy.lock);
2319 
2320 	trace_free_vmap_area_noflush(va_start, nr_lazy, nr_lazy_max);
2321 
2322 	/* After this point, we may free va at any time */
2323 	if (unlikely(nr_lazy > nr_lazy_max))
2324 		schedule_work(&drain_vmap_work);
2325 }
2326 
2327 /*
2328  * Free and unmap a vmap area
2329  */
2330 static void free_unmap_vmap_area(struct vmap_area *va)
2331 {
2332 	flush_cache_vunmap(va->va_start, va->va_end);
2333 	vunmap_range_noflush(va->va_start, va->va_end);
2334 	if (debug_pagealloc_enabled_static())
2335 		flush_tlb_kernel_range(va->va_start, va->va_end);
2336 
2337 	free_vmap_area_noflush(va);
2338 }
2339 
2340 struct vmap_area *find_vmap_area(unsigned long addr)
2341 {
2342 	struct vmap_node *vn;
2343 	struct vmap_area *va;
2344 	int i, j;
2345 
2346 	/*
2347 	 * An addr_to_node_id(addr) converts an address to a node index
2348 	 * where a VA is located. If VA spans several zones and passed
2349 	 * addr is not the same as va->va_start, what is not common, we
2350 	 * may need to scan extra nodes. See an example:
2351 	 *
2352 	 *      <----va---->
2353 	 * -|-----|-----|-----|-----|-
2354 	 *     1     2     0     1
2355 	 *
2356 	 * VA resides in node 1 whereas it spans 1, 2 an 0. If passed
2357 	 * addr is within 2 or 0 nodes we should do extra work.
2358 	 */
2359 	i = j = addr_to_node_id(addr);
2360 	do {
2361 		vn = &vmap_nodes[i];
2362 
2363 		spin_lock(&vn->busy.lock);
2364 		va = __find_vmap_area(addr, &vn->busy.root);
2365 		spin_unlock(&vn->busy.lock);
2366 
2367 		if (va)
2368 			return va;
2369 	} while ((i = (i + 1) % nr_vmap_nodes) != j);
2370 
2371 	return NULL;
2372 }
2373 
2374 static struct vmap_area *find_unlink_vmap_area(unsigned long addr)
2375 {
2376 	struct vmap_node *vn;
2377 	struct vmap_area *va;
2378 	int i, j;
2379 
2380 	/*
2381 	 * Check the comment in the find_vmap_area() about the loop.
2382 	 */
2383 	i = j = addr_to_node_id(addr);
2384 	do {
2385 		vn = &vmap_nodes[i];
2386 
2387 		spin_lock(&vn->busy.lock);
2388 		va = __find_vmap_area(addr, &vn->busy.root);
2389 		if (va)
2390 			unlink_va(va, &vn->busy.root);
2391 		spin_unlock(&vn->busy.lock);
2392 
2393 		if (va)
2394 			return va;
2395 	} while ((i = (i + 1) % nr_vmap_nodes) != j);
2396 
2397 	return NULL;
2398 }
2399 
2400 /*** Per cpu kva allocator ***/
2401 
2402 /*
2403  * vmap space is limited especially on 32 bit architectures. Ensure there is
2404  * room for at least 16 percpu vmap blocks per CPU.
2405  */
2406 /*
2407  * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able
2408  * to #define VMALLOC_SPACE		(VMALLOC_END-VMALLOC_START). Guess
2409  * instead (we just need a rough idea)
2410  */
2411 #if BITS_PER_LONG == 32
2412 #define VMALLOC_SPACE		(128UL*1024*1024)
2413 #else
2414 #define VMALLOC_SPACE		(128UL*1024*1024*1024)
2415 #endif
2416 
2417 #define VMALLOC_PAGES		(VMALLOC_SPACE / PAGE_SIZE)
2418 #define VMAP_MAX_ALLOC		BITS_PER_LONG	/* 256K with 4K pages */
2419 #define VMAP_BBMAP_BITS_MAX	1024	/* 4MB with 4K pages */
2420 #define VMAP_BBMAP_BITS_MIN	(VMAP_MAX_ALLOC*2)
2421 #define VMAP_MIN(x, y)		((x) < (y) ? (x) : (y)) /* can't use min() */
2422 #define VMAP_MAX(x, y)		((x) > (y) ? (x) : (y)) /* can't use max() */
2423 #define VMAP_BBMAP_BITS		\
2424 		VMAP_MIN(VMAP_BBMAP_BITS_MAX,	\
2425 		VMAP_MAX(VMAP_BBMAP_BITS_MIN,	\
2426 			VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16))
2427 
2428 #define VMAP_BLOCK_SIZE		(VMAP_BBMAP_BITS * PAGE_SIZE)
2429 
2430 /*
2431  * Purge threshold to prevent overeager purging of fragmented blocks for
2432  * regular operations: Purge if vb->free is less than 1/4 of the capacity.
2433  */
2434 #define VMAP_PURGE_THRESHOLD	(VMAP_BBMAP_BITS / 4)
2435 
2436 #define VMAP_RAM		0x1 /* indicates vm_map_ram area*/
2437 #define VMAP_BLOCK		0x2 /* mark out the vmap_block sub-type*/
2438 #define VMAP_FLAGS_MASK		0x3
2439 
2440 struct vmap_block_queue {
2441 	spinlock_t lock;
2442 	struct list_head free;
2443 
2444 	/*
2445 	 * An xarray requires an extra memory dynamically to
2446 	 * be allocated. If it is an issue, we can use rb-tree
2447 	 * instead.
2448 	 */
2449 	struct xarray vmap_blocks;
2450 };
2451 
2452 struct vmap_block {
2453 	spinlock_t lock;
2454 	struct vmap_area *va;
2455 	unsigned long free, dirty;
2456 	DECLARE_BITMAP(used_map, VMAP_BBMAP_BITS);
2457 	unsigned long dirty_min, dirty_max; /*< dirty range */
2458 	struct list_head free_list;
2459 	struct rcu_head rcu_head;
2460 	struct list_head purge;
2461 };
2462 
2463 /* Queue of free and dirty vmap blocks, for allocation and flushing purposes */
2464 static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue);
2465 
2466 /*
2467  * In order to fast access to any "vmap_block" associated with a
2468  * specific address, we use a hash.
2469  *
2470  * A per-cpu vmap_block_queue is used in both ways, to serialize
2471  * an access to free block chains among CPUs(alloc path) and it
2472  * also acts as a vmap_block hash(alloc/free paths). It means we
2473  * overload it, since we already have the per-cpu array which is
2474  * used as a hash table. When used as a hash a 'cpu' passed to
2475  * per_cpu() is not actually a CPU but rather a hash index.
2476  *
2477  * A hash function is addr_to_vb_xa() which hashes any address
2478  * to a specific index(in a hash) it belongs to. This then uses a
2479  * per_cpu() macro to access an array with generated index.
2480  *
2481  * An example:
2482  *
2483  *  CPU_1  CPU_2  CPU_0
2484  *    |      |      |
2485  *    V      V      V
2486  * 0     10     20     30     40     50     60
2487  * |------|------|------|------|------|------|...<vmap address space>
2488  *   CPU0   CPU1   CPU2   CPU0   CPU1   CPU2
2489  *
2490  * - CPU_1 invokes vm_unmap_ram(6), 6 belongs to CPU0 zone, thus
2491  *   it access: CPU0/INDEX0 -> vmap_blocks -> xa_lock;
2492  *
2493  * - CPU_2 invokes vm_unmap_ram(11), 11 belongs to CPU1 zone, thus
2494  *   it access: CPU1/INDEX1 -> vmap_blocks -> xa_lock;
2495  *
2496  * - CPU_0 invokes vm_unmap_ram(20), 20 belongs to CPU2 zone, thus
2497  *   it access: CPU2/INDEX2 -> vmap_blocks -> xa_lock.
2498  *
2499  * This technique almost always avoids lock contention on insert/remove,
2500  * however xarray spinlocks protect against any contention that remains.
2501  */
2502 static struct xarray *
2503 addr_to_vb_xa(unsigned long addr)
2504 {
2505 	int index = (addr / VMAP_BLOCK_SIZE) % num_possible_cpus();
2506 
2507 	return &per_cpu(vmap_block_queue, index).vmap_blocks;
2508 }
2509 
2510 /*
2511  * We should probably have a fallback mechanism to allocate virtual memory
2512  * out of partially filled vmap blocks. However vmap block sizing should be
2513  * fairly reasonable according to the vmalloc size, so it shouldn't be a
2514  * big problem.
2515  */
2516 
2517 static unsigned long addr_to_vb_idx(unsigned long addr)
2518 {
2519 	addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1);
2520 	addr /= VMAP_BLOCK_SIZE;
2521 	return addr;
2522 }
2523 
2524 static void *vmap_block_vaddr(unsigned long va_start, unsigned long pages_off)
2525 {
2526 	unsigned long addr;
2527 
2528 	addr = va_start + (pages_off << PAGE_SHIFT);
2529 	BUG_ON(addr_to_vb_idx(addr) != addr_to_vb_idx(va_start));
2530 	return (void *)addr;
2531 }
2532 
2533 /**
2534  * new_vmap_block - allocates new vmap_block and occupies 2^order pages in this
2535  *                  block. Of course pages number can't exceed VMAP_BBMAP_BITS
2536  * @order:    how many 2^order pages should be occupied in newly allocated block
2537  * @gfp_mask: flags for the page level allocator
2538  *
2539  * Return: virtual address in a newly allocated block or ERR_PTR(-errno)
2540  */
2541 static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
2542 {
2543 	struct vmap_block_queue *vbq;
2544 	struct vmap_block *vb;
2545 	struct vmap_area *va;
2546 	struct xarray *xa;
2547 	unsigned long vb_idx;
2548 	int node, err;
2549 	void *vaddr;
2550 
2551 	node = numa_node_id();
2552 
2553 	vb = kmalloc_node(sizeof(struct vmap_block),
2554 			gfp_mask & GFP_RECLAIM_MASK, node);
2555 	if (unlikely(!vb))
2556 		return ERR_PTR(-ENOMEM);
2557 
2558 	va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE,
2559 					VMALLOC_START, VMALLOC_END,
2560 					node, gfp_mask,
2561 					VMAP_RAM|VMAP_BLOCK);
2562 	if (IS_ERR(va)) {
2563 		kfree(vb);
2564 		return ERR_CAST(va);
2565 	}
2566 
2567 	vaddr = vmap_block_vaddr(va->va_start, 0);
2568 	spin_lock_init(&vb->lock);
2569 	vb->va = va;
2570 	/* At least something should be left free */
2571 	BUG_ON(VMAP_BBMAP_BITS <= (1UL << order));
2572 	bitmap_zero(vb->used_map, VMAP_BBMAP_BITS);
2573 	vb->free = VMAP_BBMAP_BITS - (1UL << order);
2574 	vb->dirty = 0;
2575 	vb->dirty_min = VMAP_BBMAP_BITS;
2576 	vb->dirty_max = 0;
2577 	bitmap_set(vb->used_map, 0, (1UL << order));
2578 	INIT_LIST_HEAD(&vb->free_list);
2579 
2580 	xa = addr_to_vb_xa(va->va_start);
2581 	vb_idx = addr_to_vb_idx(va->va_start);
2582 	err = xa_insert(xa, vb_idx, vb, gfp_mask);
2583 	if (err) {
2584 		kfree(vb);
2585 		free_vmap_area(va);
2586 		return ERR_PTR(err);
2587 	}
2588 
2589 	vbq = raw_cpu_ptr(&vmap_block_queue);
2590 	spin_lock(&vbq->lock);
2591 	list_add_tail_rcu(&vb->free_list, &vbq->free);
2592 	spin_unlock(&vbq->lock);
2593 
2594 	return vaddr;
2595 }
2596 
2597 static void free_vmap_block(struct vmap_block *vb)
2598 {
2599 	struct vmap_node *vn;
2600 	struct vmap_block *tmp;
2601 	struct xarray *xa;
2602 
2603 	xa = addr_to_vb_xa(vb->va->va_start);
2604 	tmp = xa_erase(xa, addr_to_vb_idx(vb->va->va_start));
2605 	BUG_ON(tmp != vb);
2606 
2607 	vn = addr_to_node(vb->va->va_start);
2608 	spin_lock(&vn->busy.lock);
2609 	unlink_va(vb->va, &vn->busy.root);
2610 	spin_unlock(&vn->busy.lock);
2611 
2612 	free_vmap_area_noflush(vb->va);
2613 	kfree_rcu(vb, rcu_head);
2614 }
2615 
2616 static bool purge_fragmented_block(struct vmap_block *vb,
2617 		struct vmap_block_queue *vbq, struct list_head *purge_list,
2618 		bool force_purge)
2619 {
2620 	if (vb->free + vb->dirty != VMAP_BBMAP_BITS ||
2621 	    vb->dirty == VMAP_BBMAP_BITS)
2622 		return false;
2623 
2624 	/* Don't overeagerly purge usable blocks unless requested */
2625 	if (!(force_purge || vb->free < VMAP_PURGE_THRESHOLD))
2626 		return false;
2627 
2628 	/* prevent further allocs after releasing lock */
2629 	WRITE_ONCE(vb->free, 0);
2630 	/* prevent purging it again */
2631 	WRITE_ONCE(vb->dirty, VMAP_BBMAP_BITS);
2632 	vb->dirty_min = 0;
2633 	vb->dirty_max = VMAP_BBMAP_BITS;
2634 	spin_lock(&vbq->lock);
2635 	list_del_rcu(&vb->free_list);
2636 	spin_unlock(&vbq->lock);
2637 	list_add_tail(&vb->purge, purge_list);
2638 	return true;
2639 }
2640 
2641 static void free_purged_blocks(struct list_head *purge_list)
2642 {
2643 	struct vmap_block *vb, *n_vb;
2644 
2645 	list_for_each_entry_safe(vb, n_vb, purge_list, purge) {
2646 		list_del(&vb->purge);
2647 		free_vmap_block(vb);
2648 	}
2649 }
2650 
2651 static void purge_fragmented_blocks(int cpu)
2652 {
2653 	LIST_HEAD(purge);
2654 	struct vmap_block *vb;
2655 	struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
2656 
2657 	rcu_read_lock();
2658 	list_for_each_entry_rcu(vb, &vbq->free, free_list) {
2659 		unsigned long free = READ_ONCE(vb->free);
2660 		unsigned long dirty = READ_ONCE(vb->dirty);
2661 
2662 		if (free + dirty != VMAP_BBMAP_BITS ||
2663 		    dirty == VMAP_BBMAP_BITS)
2664 			continue;
2665 
2666 		spin_lock(&vb->lock);
2667 		purge_fragmented_block(vb, vbq, &purge, true);
2668 		spin_unlock(&vb->lock);
2669 	}
2670 	rcu_read_unlock();
2671 	free_purged_blocks(&purge);
2672 }
2673 
2674 static void purge_fragmented_blocks_allcpus(void)
2675 {
2676 	int cpu;
2677 
2678 	for_each_possible_cpu(cpu)
2679 		purge_fragmented_blocks(cpu);
2680 }
2681 
2682 static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
2683 {
2684 	struct vmap_block_queue *vbq;
2685 	struct vmap_block *vb;
2686 	void *vaddr = NULL;
2687 	unsigned int order;
2688 
2689 	BUG_ON(offset_in_page(size));
2690 	BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
2691 	if (WARN_ON(size == 0)) {
2692 		/*
2693 		 * Allocating 0 bytes isn't what caller wants since
2694 		 * get_order(0) returns funny result. Just warn and terminate
2695 		 * early.
2696 		 */
2697 		return NULL;
2698 	}
2699 	order = get_order(size);
2700 
2701 	rcu_read_lock();
2702 	vbq = raw_cpu_ptr(&vmap_block_queue);
2703 	list_for_each_entry_rcu(vb, &vbq->free, free_list) {
2704 		unsigned long pages_off;
2705 
2706 		if (READ_ONCE(vb->free) < (1UL << order))
2707 			continue;
2708 
2709 		spin_lock(&vb->lock);
2710 		if (vb->free < (1UL << order)) {
2711 			spin_unlock(&vb->lock);
2712 			continue;
2713 		}
2714 
2715 		pages_off = VMAP_BBMAP_BITS - vb->free;
2716 		vaddr = vmap_block_vaddr(vb->va->va_start, pages_off);
2717 		WRITE_ONCE(vb->free, vb->free - (1UL << order));
2718 		bitmap_set(vb->used_map, pages_off, (1UL << order));
2719 		if (vb->free == 0) {
2720 			spin_lock(&vbq->lock);
2721 			list_del_rcu(&vb->free_list);
2722 			spin_unlock(&vbq->lock);
2723 		}
2724 
2725 		spin_unlock(&vb->lock);
2726 		break;
2727 	}
2728 
2729 	rcu_read_unlock();
2730 
2731 	/* Allocate new block if nothing was found */
2732 	if (!vaddr)
2733 		vaddr = new_vmap_block(order, gfp_mask);
2734 
2735 	return vaddr;
2736 }
2737 
2738 static void vb_free(unsigned long addr, unsigned long size)
2739 {
2740 	unsigned long offset;
2741 	unsigned int order;
2742 	struct vmap_block *vb;
2743 	struct xarray *xa;
2744 
2745 	BUG_ON(offset_in_page(size));
2746 	BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
2747 
2748 	flush_cache_vunmap(addr, addr + size);
2749 
2750 	order = get_order(size);
2751 	offset = (addr & (VMAP_BLOCK_SIZE - 1)) >> PAGE_SHIFT;
2752 
2753 	xa = addr_to_vb_xa(addr);
2754 	vb = xa_load(xa, addr_to_vb_idx(addr));
2755 
2756 	spin_lock(&vb->lock);
2757 	bitmap_clear(vb->used_map, offset, (1UL << order));
2758 	spin_unlock(&vb->lock);
2759 
2760 	vunmap_range_noflush(addr, addr + size);
2761 
2762 	if (debug_pagealloc_enabled_static())
2763 		flush_tlb_kernel_range(addr, addr + size);
2764 
2765 	spin_lock(&vb->lock);
2766 
2767 	/* Expand the not yet TLB flushed dirty range */
2768 	vb->dirty_min = min(vb->dirty_min, offset);
2769 	vb->dirty_max = max(vb->dirty_max, offset + (1UL << order));
2770 
2771 	WRITE_ONCE(vb->dirty, vb->dirty + (1UL << order));
2772 	if (vb->dirty == VMAP_BBMAP_BITS) {
2773 		BUG_ON(vb->free);
2774 		spin_unlock(&vb->lock);
2775 		free_vmap_block(vb);
2776 	} else
2777 		spin_unlock(&vb->lock);
2778 }
2779 
2780 static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush)
2781 {
2782 	LIST_HEAD(purge_list);
2783 	int cpu;
2784 
2785 	if (unlikely(!vmap_initialized))
2786 		return;
2787 
2788 	mutex_lock(&vmap_purge_lock);
2789 
2790 	for_each_possible_cpu(cpu) {
2791 		struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
2792 		struct vmap_block *vb;
2793 		unsigned long idx;
2794 
2795 		rcu_read_lock();
2796 		xa_for_each(&vbq->vmap_blocks, idx, vb) {
2797 			spin_lock(&vb->lock);
2798 
2799 			/*
2800 			 * Try to purge a fragmented block first. If it's
2801 			 * not purgeable, check whether there is dirty
2802 			 * space to be flushed.
2803 			 */
2804 			if (!purge_fragmented_block(vb, vbq, &purge_list, false) &&
2805 			    vb->dirty_max && vb->dirty != VMAP_BBMAP_BITS) {
2806 				unsigned long va_start = vb->va->va_start;
2807 				unsigned long s, e;
2808 
2809 				s = va_start + (vb->dirty_min << PAGE_SHIFT);
2810 				e = va_start + (vb->dirty_max << PAGE_SHIFT);
2811 
2812 				start = min(s, start);
2813 				end   = max(e, end);
2814 
2815 				/* Prevent that this is flushed again */
2816 				vb->dirty_min = VMAP_BBMAP_BITS;
2817 				vb->dirty_max = 0;
2818 
2819 				flush = 1;
2820 			}
2821 			spin_unlock(&vb->lock);
2822 		}
2823 		rcu_read_unlock();
2824 	}
2825 	free_purged_blocks(&purge_list);
2826 
2827 	if (!__purge_vmap_area_lazy(start, end, false) && flush)
2828 		flush_tlb_kernel_range(start, end);
2829 	mutex_unlock(&vmap_purge_lock);
2830 }
2831 
2832 /**
2833  * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer
2834  *
2835  * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily
2836  * to amortize TLB flushing overheads. What this means is that any page you
2837  * have now, may, in a former life, have been mapped into kernel virtual
2838  * address by the vmap layer and so there might be some CPUs with TLB entries
2839  * still referencing that page (additional to the regular 1:1 kernel mapping).
2840  *
2841  * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can
2842  * be sure that none of the pages we have control over will have any aliases
2843  * from the vmap layer.
2844  */
2845 void vm_unmap_aliases(void)
2846 {
2847 	unsigned long start = ULONG_MAX, end = 0;
2848 	int flush = 0;
2849 
2850 	_vm_unmap_aliases(start, end, flush);
2851 }
2852 EXPORT_SYMBOL_GPL(vm_unmap_aliases);
2853 
2854 /**
2855  * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram
2856  * @mem: the pointer returned by vm_map_ram
2857  * @count: the count passed to that vm_map_ram call (cannot unmap partial)
2858  */
2859 void vm_unmap_ram(const void *mem, unsigned int count)
2860 {
2861 	unsigned long size = (unsigned long)count << PAGE_SHIFT;
2862 	unsigned long addr = (unsigned long)kasan_reset_tag(mem);
2863 	struct vmap_area *va;
2864 
2865 	might_sleep();
2866 	BUG_ON(!addr);
2867 	BUG_ON(addr < VMALLOC_START);
2868 	BUG_ON(addr > VMALLOC_END);
2869 	BUG_ON(!PAGE_ALIGNED(addr));
2870 
2871 	kasan_poison_vmalloc(mem, size);
2872 
2873 	if (likely(count <= VMAP_MAX_ALLOC)) {
2874 		debug_check_no_locks_freed(mem, size);
2875 		vb_free(addr, size);
2876 		return;
2877 	}
2878 
2879 	va = find_unlink_vmap_area(addr);
2880 	if (WARN_ON_ONCE(!va))
2881 		return;
2882 
2883 	debug_check_no_locks_freed((void *)va->va_start,
2884 				    (va->va_end - va->va_start));
2885 	free_unmap_vmap_area(va);
2886 }
2887 EXPORT_SYMBOL(vm_unmap_ram);
2888 
2889 /**
2890  * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space)
2891  * @pages: an array of pointers to the pages to be mapped
2892  * @count: number of pages
2893  * @node: prefer to allocate data structures on this node
2894  *
2895  * If you use this function for less than VMAP_MAX_ALLOC pages, it could be
2896  * faster than vmap so it's good.  But if you mix long-life and short-life
2897  * objects with vm_map_ram(), it could consume lots of address space through
2898  * fragmentation (especially on a 32bit machine).  You could see failures in
2899  * the end.  Please use this function for short-lived objects.
2900  *
2901  * Returns: a pointer to the address that has been mapped, or %NULL on failure
2902  */
2903 void *vm_map_ram(struct page **pages, unsigned int count, int node)
2904 {
2905 	unsigned long size = (unsigned long)count << PAGE_SHIFT;
2906 	unsigned long addr;
2907 	void *mem;
2908 
2909 	if (likely(count <= VMAP_MAX_ALLOC)) {
2910 		mem = vb_alloc(size, GFP_KERNEL);
2911 		if (IS_ERR(mem))
2912 			return NULL;
2913 		addr = (unsigned long)mem;
2914 	} else {
2915 		struct vmap_area *va;
2916 		va = alloc_vmap_area(size, PAGE_SIZE,
2917 				VMALLOC_START, VMALLOC_END,
2918 				node, GFP_KERNEL, VMAP_RAM);
2919 		if (IS_ERR(va))
2920 			return NULL;
2921 
2922 		addr = va->va_start;
2923 		mem = (void *)addr;
2924 	}
2925 
2926 	if (vmap_pages_range(addr, addr + size, PAGE_KERNEL,
2927 				pages, PAGE_SHIFT) < 0) {
2928 		vm_unmap_ram(mem, count);
2929 		return NULL;
2930 	}
2931 
2932 	/*
2933 	 * Mark the pages as accessible, now that they are mapped.
2934 	 * With hardware tag-based KASAN, marking is skipped for
2935 	 * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
2936 	 */
2937 	mem = kasan_unpoison_vmalloc(mem, size, KASAN_VMALLOC_PROT_NORMAL);
2938 
2939 	return mem;
2940 }
2941 EXPORT_SYMBOL(vm_map_ram);
2942 
2943 static struct vm_struct *vmlist __initdata;
2944 
2945 static inline unsigned int vm_area_page_order(struct vm_struct *vm)
2946 {
2947 #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
2948 	return vm->page_order;
2949 #else
2950 	return 0;
2951 #endif
2952 }
2953 
2954 static inline void set_vm_area_page_order(struct vm_struct *vm, unsigned int order)
2955 {
2956 #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
2957 	vm->page_order = order;
2958 #else
2959 	BUG_ON(order != 0);
2960 #endif
2961 }
2962 
2963 /**
2964  * vm_area_add_early - add vmap area early during boot
2965  * @vm: vm_struct to add
2966  *
2967  * This function is used to add fixed kernel vm area to vmlist before
2968  * vmalloc_init() is called.  @vm->addr, @vm->size, and @vm->flags
2969  * should contain proper values and the other fields should be zero.
2970  *
2971  * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
2972  */
2973 void __init vm_area_add_early(struct vm_struct *vm)
2974 {
2975 	struct vm_struct *tmp, **p;
2976 
2977 	BUG_ON(vmap_initialized);
2978 	for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
2979 		if (tmp->addr >= vm->addr) {
2980 			BUG_ON(tmp->addr < vm->addr + vm->size);
2981 			break;
2982 		} else
2983 			BUG_ON(tmp->addr + tmp->size > vm->addr);
2984 	}
2985 	vm->next = *p;
2986 	*p = vm;
2987 }
2988 
2989 /**
2990  * vm_area_register_early - register vmap area early during boot
2991  * @vm: vm_struct to register
2992  * @align: requested alignment
2993  *
2994  * This function is used to register kernel vm area before
2995  * vmalloc_init() is called.  @vm->size and @vm->flags should contain
2996  * proper values on entry and other fields should be zero.  On return,
2997  * vm->addr contains the allocated address.
2998  *
2999  * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
3000  */
3001 void __init vm_area_register_early(struct vm_struct *vm, size_t align)
3002 {
3003 	unsigned long addr = ALIGN(VMALLOC_START, align);
3004 	struct vm_struct *cur, **p;
3005 
3006 	BUG_ON(vmap_initialized);
3007 
3008 	for (p = &vmlist; (cur = *p) != NULL; p = &cur->next) {
3009 		if ((unsigned long)cur->addr - addr >= vm->size)
3010 			break;
3011 		addr = ALIGN((unsigned long)cur->addr + cur->size, align);
3012 	}
3013 
3014 	BUG_ON(addr > VMALLOC_END - vm->size);
3015 	vm->addr = (void *)addr;
3016 	vm->next = *p;
3017 	*p = vm;
3018 	kasan_populate_early_vm_area_shadow(vm->addr, vm->size);
3019 }
3020 
3021 static inline void setup_vmalloc_vm_locked(struct vm_struct *vm,
3022 	struct vmap_area *va, unsigned long flags, const void *caller)
3023 {
3024 	vm->flags = flags;
3025 	vm->addr = (void *)va->va_start;
3026 	vm->size = va->va_end - va->va_start;
3027 	vm->caller = caller;
3028 	va->vm = vm;
3029 }
3030 
3031 static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
3032 			      unsigned long flags, const void *caller)
3033 {
3034 	struct vmap_node *vn = addr_to_node(va->va_start);
3035 
3036 	spin_lock(&vn->busy.lock);
3037 	setup_vmalloc_vm_locked(vm, va, flags, caller);
3038 	spin_unlock(&vn->busy.lock);
3039 }
3040 
3041 static void clear_vm_uninitialized_flag(struct vm_struct *vm)
3042 {
3043 	/*
3044 	 * Before removing VM_UNINITIALIZED,
3045 	 * we should make sure that vm has proper values.
3046 	 * Pair with smp_rmb() in show_numa_info().
3047 	 */
3048 	smp_wmb();
3049 	vm->flags &= ~VM_UNINITIALIZED;
3050 }
3051 
3052 static struct vm_struct *__get_vm_area_node(unsigned long size,
3053 		unsigned long align, unsigned long shift, unsigned long flags,
3054 		unsigned long start, unsigned long end, int node,
3055 		gfp_t gfp_mask, const void *caller)
3056 {
3057 	struct vmap_area *va;
3058 	struct vm_struct *area;
3059 	unsigned long requested_size = size;
3060 
3061 	BUG_ON(in_interrupt());
3062 	size = ALIGN(size, 1ul << shift);
3063 	if (unlikely(!size))
3064 		return NULL;
3065 
3066 	if (flags & VM_IOREMAP)
3067 		align = 1ul << clamp_t(int, get_count_order_long(size),
3068 				       PAGE_SHIFT, IOREMAP_MAX_ORDER);
3069 
3070 	area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
3071 	if (unlikely(!area))
3072 		return NULL;
3073 
3074 	if (!(flags & VM_NO_GUARD))
3075 		size += PAGE_SIZE;
3076 
3077 	va = alloc_vmap_area(size, align, start, end, node, gfp_mask, 0);
3078 	if (IS_ERR(va)) {
3079 		kfree(area);
3080 		return NULL;
3081 	}
3082 
3083 	setup_vmalloc_vm(area, va, flags, caller);
3084 
3085 	/*
3086 	 * Mark pages for non-VM_ALLOC mappings as accessible. Do it now as a
3087 	 * best-effort approach, as they can be mapped outside of vmalloc code.
3088 	 * For VM_ALLOC mappings, the pages are marked as accessible after
3089 	 * getting mapped in __vmalloc_node_range().
3090 	 * With hardware tag-based KASAN, marking is skipped for
3091 	 * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
3092 	 */
3093 	if (!(flags & VM_ALLOC))
3094 		area->addr = kasan_unpoison_vmalloc(area->addr, requested_size,
3095 						    KASAN_VMALLOC_PROT_NORMAL);
3096 
3097 	return area;
3098 }
3099 
3100 struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
3101 				       unsigned long start, unsigned long end,
3102 				       const void *caller)
3103 {
3104 	return __get_vm_area_node(size, 1, PAGE_SHIFT, flags, start, end,
3105 				  NUMA_NO_NODE, GFP_KERNEL, caller);
3106 }
3107 
3108 /**
3109  * get_vm_area - reserve a contiguous kernel virtual area
3110  * @size:	 size of the area
3111  * @flags:	 %VM_IOREMAP for I/O mappings or VM_ALLOC
3112  *
3113  * Search an area of @size in the kernel virtual mapping area,
3114  * and reserved it for out purposes.  Returns the area descriptor
3115  * on success or %NULL on failure.
3116  *
3117  * Return: the area descriptor on success or %NULL on failure.
3118  */
3119 struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
3120 {
3121 	return __get_vm_area_node(size, 1, PAGE_SHIFT, flags,
3122 				  VMALLOC_START, VMALLOC_END,
3123 				  NUMA_NO_NODE, GFP_KERNEL,
3124 				  __builtin_return_address(0));
3125 }
3126 
3127 struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
3128 				const void *caller)
3129 {
3130 	return __get_vm_area_node(size, 1, PAGE_SHIFT, flags,
3131 				  VMALLOC_START, VMALLOC_END,
3132 				  NUMA_NO_NODE, GFP_KERNEL, caller);
3133 }
3134 
3135 /**
3136  * find_vm_area - find a continuous kernel virtual area
3137  * @addr:	  base address
3138  *
3139  * Search for the kernel VM area starting at @addr, and return it.
3140  * It is up to the caller to do all required locking to keep the returned
3141  * pointer valid.
3142  *
3143  * Return: the area descriptor on success or %NULL on failure.
3144  */
3145 struct vm_struct *find_vm_area(const void *addr)
3146 {
3147 	struct vmap_area *va;
3148 
3149 	va = find_vmap_area((unsigned long)addr);
3150 	if (!va)
3151 		return NULL;
3152 
3153 	return va->vm;
3154 }
3155 
3156 /**
3157  * remove_vm_area - find and remove a continuous kernel virtual area
3158  * @addr:	    base address
3159  *
3160  * Search for the kernel VM area starting at @addr, and remove it.
3161  * This function returns the found VM area, but using it is NOT safe
3162  * on SMP machines, except for its size or flags.
3163  *
3164  * Return: the area descriptor on success or %NULL on failure.
3165  */
3166 struct vm_struct *remove_vm_area(const void *addr)
3167 {
3168 	struct vmap_area *va;
3169 	struct vm_struct *vm;
3170 
3171 	might_sleep();
3172 
3173 	if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n",
3174 			addr))
3175 		return NULL;
3176 
3177 	va = find_unlink_vmap_area((unsigned long)addr);
3178 	if (!va || !va->vm)
3179 		return NULL;
3180 	vm = va->vm;
3181 
3182 	debug_check_no_locks_freed(vm->addr, get_vm_area_size(vm));
3183 	debug_check_no_obj_freed(vm->addr, get_vm_area_size(vm));
3184 	kasan_free_module_shadow(vm);
3185 	kasan_poison_vmalloc(vm->addr, get_vm_area_size(vm));
3186 
3187 	free_unmap_vmap_area(va);
3188 	return vm;
3189 }
3190 
3191 static inline void set_area_direct_map(const struct vm_struct *area,
3192 				       int (*set_direct_map)(struct page *page))
3193 {
3194 	int i;
3195 
3196 	/* HUGE_VMALLOC passes small pages to set_direct_map */
3197 	for (i = 0; i < area->nr_pages; i++)
3198 		if (page_address(area->pages[i]))
3199 			set_direct_map(area->pages[i]);
3200 }
3201 
3202 /*
3203  * Flush the vm mapping and reset the direct map.
3204  */
3205 static void vm_reset_perms(struct vm_struct *area)
3206 {
3207 	unsigned long start = ULONG_MAX, end = 0;
3208 	unsigned int page_order = vm_area_page_order(area);
3209 	int flush_dmap = 0;
3210 	int i;
3211 
3212 	/*
3213 	 * Find the start and end range of the direct mappings to make sure that
3214 	 * the vm_unmap_aliases() flush includes the direct map.
3215 	 */
3216 	for (i = 0; i < area->nr_pages; i += 1U << page_order) {
3217 		unsigned long addr = (unsigned long)page_address(area->pages[i]);
3218 
3219 		if (addr) {
3220 			unsigned long page_size;
3221 
3222 			page_size = PAGE_SIZE << page_order;
3223 			start = min(addr, start);
3224 			end = max(addr + page_size, end);
3225 			flush_dmap = 1;
3226 		}
3227 	}
3228 
3229 	/*
3230 	 * Set direct map to something invalid so that it won't be cached if
3231 	 * there are any accesses after the TLB flush, then flush the TLB and
3232 	 * reset the direct map permissions to the default.
3233 	 */
3234 	set_area_direct_map(area, set_direct_map_invalid_noflush);
3235 	_vm_unmap_aliases(start, end, flush_dmap);
3236 	set_area_direct_map(area, set_direct_map_default_noflush);
3237 }
3238 
3239 static void delayed_vfree_work(struct work_struct *w)
3240 {
3241 	struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
3242 	struct llist_node *t, *llnode;
3243 
3244 	llist_for_each_safe(llnode, t, llist_del_all(&p->list))
3245 		vfree(llnode);
3246 }
3247 
3248 /**
3249  * vfree_atomic - release memory allocated by vmalloc()
3250  * @addr:	  memory base address
3251  *
3252  * This one is just like vfree() but can be called in any atomic context
3253  * except NMIs.
3254  */
3255 void vfree_atomic(const void *addr)
3256 {
3257 	struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred);
3258 
3259 	BUG_ON(in_nmi());
3260 	kmemleak_free(addr);
3261 
3262 	/*
3263 	 * Use raw_cpu_ptr() because this can be called from preemptible
3264 	 * context. Preemption is absolutely fine here, because the llist_add()
3265 	 * implementation is lockless, so it works even if we are adding to
3266 	 * another cpu's list. schedule_work() should be fine with this too.
3267 	 */
3268 	if (addr && llist_add((struct llist_node *)addr, &p->list))
3269 		schedule_work(&p->wq);
3270 }
3271 
3272 /**
3273  * vfree - Release memory allocated by vmalloc()
3274  * @addr:  Memory base address
3275  *
3276  * Free the virtually continuous memory area starting at @addr, as obtained
3277  * from one of the vmalloc() family of APIs.  This will usually also free the
3278  * physical memory underlying the virtual allocation, but that memory is
3279  * reference counted, so it will not be freed until the last user goes away.
3280  *
3281  * If @addr is NULL, no operation is performed.
3282  *
3283  * Context:
3284  * May sleep if called *not* from interrupt context.
3285  * Must not be called in NMI context (strictly speaking, it could be
3286  * if we have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling
3287  * conventions for vfree() arch-dependent would be a really bad idea).
3288  */
3289 void vfree(const void *addr)
3290 {
3291 	struct vm_struct *vm;
3292 	int i;
3293 
3294 	if (unlikely(in_interrupt())) {
3295 		vfree_atomic(addr);
3296 		return;
3297 	}
3298 
3299 	BUG_ON(in_nmi());
3300 	kmemleak_free(addr);
3301 	might_sleep();
3302 
3303 	if (!addr)
3304 		return;
3305 
3306 	vm = remove_vm_area(addr);
3307 	if (unlikely(!vm)) {
3308 		WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
3309 				addr);
3310 		return;
3311 	}
3312 
3313 	if (unlikely(vm->flags & VM_FLUSH_RESET_PERMS))
3314 		vm_reset_perms(vm);
3315 	for (i = 0; i < vm->nr_pages; i++) {
3316 		struct page *page = vm->pages[i];
3317 
3318 		BUG_ON(!page);
3319 		mod_memcg_page_state(page, MEMCG_VMALLOC, -1);
3320 		/*
3321 		 * High-order allocs for huge vmallocs are split, so
3322 		 * can be freed as an array of order-0 allocations
3323 		 */
3324 		__free_page(page);
3325 		cond_resched();
3326 	}
3327 	atomic_long_sub(vm->nr_pages, &nr_vmalloc_pages);
3328 	kvfree(vm->pages);
3329 	kfree(vm);
3330 }
3331 EXPORT_SYMBOL(vfree);
3332 
3333 /**
3334  * vunmap - release virtual mapping obtained by vmap()
3335  * @addr:   memory base address
3336  *
3337  * Free the virtually contiguous memory area starting at @addr,
3338  * which was created from the page array passed to vmap().
3339  *
3340  * Must not be called in interrupt context.
3341  */
3342 void vunmap(const void *addr)
3343 {
3344 	struct vm_struct *vm;
3345 
3346 	BUG_ON(in_interrupt());
3347 	might_sleep();
3348 
3349 	if (!addr)
3350 		return;
3351 	vm = remove_vm_area(addr);
3352 	if (unlikely(!vm)) {
3353 		WARN(1, KERN_ERR "Trying to vunmap() nonexistent vm area (%p)\n",
3354 				addr);
3355 		return;
3356 	}
3357 	kfree(vm);
3358 }
3359 EXPORT_SYMBOL(vunmap);
3360 
3361 /**
3362  * vmap - map an array of pages into virtually contiguous space
3363  * @pages: array of page pointers
3364  * @count: number of pages to map
3365  * @flags: vm_area->flags
3366  * @prot: page protection for the mapping
3367  *
3368  * Maps @count pages from @pages into contiguous kernel virtual space.
3369  * If @flags contains %VM_MAP_PUT_PAGES the ownership of the pages array itself
3370  * (which must be kmalloc or vmalloc memory) and one reference per pages in it
3371  * are transferred from the caller to vmap(), and will be freed / dropped when
3372  * vfree() is called on the return value.
3373  *
3374  * Return: the address of the area or %NULL on failure
3375  */
3376 void *vmap(struct page **pages, unsigned int count,
3377 	   unsigned long flags, pgprot_t prot)
3378 {
3379 	struct vm_struct *area;
3380 	unsigned long addr;
3381 	unsigned long size;		/* In bytes */
3382 
3383 	might_sleep();
3384 
3385 	if (WARN_ON_ONCE(flags & VM_FLUSH_RESET_PERMS))
3386 		return NULL;
3387 
3388 	/*
3389 	 * Your top guard is someone else's bottom guard. Not having a top
3390 	 * guard compromises someone else's mappings too.
3391 	 */
3392 	if (WARN_ON_ONCE(flags & VM_NO_GUARD))
3393 		flags &= ~VM_NO_GUARD;
3394 
3395 	if (count > totalram_pages())
3396 		return NULL;
3397 
3398 	size = (unsigned long)count << PAGE_SHIFT;
3399 	area = get_vm_area_caller(size, flags, __builtin_return_address(0));
3400 	if (!area)
3401 		return NULL;
3402 
3403 	addr = (unsigned long)area->addr;
3404 	if (vmap_pages_range(addr, addr + size, pgprot_nx(prot),
3405 				pages, PAGE_SHIFT) < 0) {
3406 		vunmap(area->addr);
3407 		return NULL;
3408 	}
3409 
3410 	if (flags & VM_MAP_PUT_PAGES) {
3411 		area->pages = pages;
3412 		area->nr_pages = count;
3413 	}
3414 	return area->addr;
3415 }
3416 EXPORT_SYMBOL(vmap);
3417 
3418 #ifdef CONFIG_VMAP_PFN
3419 struct vmap_pfn_data {
3420 	unsigned long	*pfns;
3421 	pgprot_t	prot;
3422 	unsigned int	idx;
3423 };
3424 
3425 static int vmap_pfn_apply(pte_t *pte, unsigned long addr, void *private)
3426 {
3427 	struct vmap_pfn_data *data = private;
3428 	unsigned long pfn = data->pfns[data->idx];
3429 	pte_t ptent;
3430 
3431 	if (WARN_ON_ONCE(pfn_valid(pfn)))
3432 		return -EINVAL;
3433 
3434 	ptent = pte_mkspecial(pfn_pte(pfn, data->prot));
3435 	set_pte_at(&init_mm, addr, pte, ptent);
3436 
3437 	data->idx++;
3438 	return 0;
3439 }
3440 
3441 /**
3442  * vmap_pfn - map an array of PFNs into virtually contiguous space
3443  * @pfns: array of PFNs
3444  * @count: number of pages to map
3445  * @prot: page protection for the mapping
3446  *
3447  * Maps @count PFNs from @pfns into contiguous kernel virtual space and returns
3448  * the start address of the mapping.
3449  */
3450 void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot)
3451 {
3452 	struct vmap_pfn_data data = { .pfns = pfns, .prot = pgprot_nx(prot) };
3453 	struct vm_struct *area;
3454 
3455 	area = get_vm_area_caller(count * PAGE_SIZE, VM_IOREMAP,
3456 			__builtin_return_address(0));
3457 	if (!area)
3458 		return NULL;
3459 	if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
3460 			count * PAGE_SIZE, vmap_pfn_apply, &data)) {
3461 		free_vm_area(area);
3462 		return NULL;
3463 	}
3464 
3465 	flush_cache_vmap((unsigned long)area->addr,
3466 			 (unsigned long)area->addr + count * PAGE_SIZE);
3467 
3468 	return area->addr;
3469 }
3470 EXPORT_SYMBOL_GPL(vmap_pfn);
3471 #endif /* CONFIG_VMAP_PFN */
3472 
3473 static inline unsigned int
3474 vm_area_alloc_pages(gfp_t gfp, int nid,
3475 		unsigned int order, unsigned int nr_pages, struct page **pages)
3476 {
3477 	unsigned int nr_allocated = 0;
3478 	gfp_t alloc_gfp = gfp;
3479 	bool nofail = false;
3480 	struct page *page;
3481 	int i;
3482 
3483 	/*
3484 	 * For order-0 pages we make use of bulk allocator, if
3485 	 * the page array is partly or not at all populated due
3486 	 * to fails, fallback to a single page allocator that is
3487 	 * more permissive.
3488 	 */
3489 	if (!order) {
3490 		/* bulk allocator doesn't support nofail req. officially */
3491 		gfp_t bulk_gfp = gfp & ~__GFP_NOFAIL;
3492 
3493 		while (nr_allocated < nr_pages) {
3494 			unsigned int nr, nr_pages_request;
3495 
3496 			/*
3497 			 * A maximum allowed request is hard-coded and is 100
3498 			 * pages per call. That is done in order to prevent a
3499 			 * long preemption off scenario in the bulk-allocator
3500 			 * so the range is [1:100].
3501 			 */
3502 			nr_pages_request = min(100U, nr_pages - nr_allocated);
3503 
3504 			/* memory allocation should consider mempolicy, we can't
3505 			 * wrongly use nearest node when nid == NUMA_NO_NODE,
3506 			 * otherwise memory may be allocated in only one node,
3507 			 * but mempolicy wants to alloc memory by interleaving.
3508 			 */
3509 			if (IS_ENABLED(CONFIG_NUMA) && nid == NUMA_NO_NODE)
3510 				nr = alloc_pages_bulk_array_mempolicy(bulk_gfp,
3511 							nr_pages_request,
3512 							pages + nr_allocated);
3513 
3514 			else
3515 				nr = alloc_pages_bulk_array_node(bulk_gfp, nid,
3516 							nr_pages_request,
3517 							pages + nr_allocated);
3518 
3519 			nr_allocated += nr;
3520 			cond_resched();
3521 
3522 			/*
3523 			 * If zero or pages were obtained partly,
3524 			 * fallback to a single page allocator.
3525 			 */
3526 			if (nr != nr_pages_request)
3527 				break;
3528 		}
3529 	} else if (gfp & __GFP_NOFAIL) {
3530 		/*
3531 		 * Higher order nofail allocations are really expensive and
3532 		 * potentially dangerous (pre-mature OOM, disruptive reclaim
3533 		 * and compaction etc.
3534 		 */
3535 		alloc_gfp &= ~__GFP_NOFAIL;
3536 		nofail = true;
3537 	}
3538 
3539 	/* High-order pages or fallback path if "bulk" fails. */
3540 	while (nr_allocated < nr_pages) {
3541 		if (fatal_signal_pending(current))
3542 			break;
3543 
3544 		if (nid == NUMA_NO_NODE)
3545 			page = alloc_pages(alloc_gfp, order);
3546 		else
3547 			page = alloc_pages_node(nid, alloc_gfp, order);
3548 		if (unlikely(!page)) {
3549 			if (!nofail)
3550 				break;
3551 
3552 			/* fall back to the zero order allocations */
3553 			alloc_gfp |= __GFP_NOFAIL;
3554 			order = 0;
3555 			continue;
3556 		}
3557 
3558 		/*
3559 		 * Higher order allocations must be able to be treated as
3560 		 * indepdenent small pages by callers (as they can with
3561 		 * small-page vmallocs). Some drivers do their own refcounting
3562 		 * on vmalloc_to_page() pages, some use page->mapping,
3563 		 * page->lru, etc.
3564 		 */
3565 		if (order)
3566 			split_page(page, order);
3567 
3568 		/*
3569 		 * Careful, we allocate and map page-order pages, but
3570 		 * tracking is done per PAGE_SIZE page so as to keep the
3571 		 * vm_struct APIs independent of the physical/mapped size.
3572 		 */
3573 		for (i = 0; i < (1U << order); i++)
3574 			pages[nr_allocated + i] = page + i;
3575 
3576 		cond_resched();
3577 		nr_allocated += 1U << order;
3578 	}
3579 
3580 	return nr_allocated;
3581 }
3582 
3583 static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
3584 				 pgprot_t prot, unsigned int page_shift,
3585 				 int node)
3586 {
3587 	const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
3588 	bool nofail = gfp_mask & __GFP_NOFAIL;
3589 	unsigned long addr = (unsigned long)area->addr;
3590 	unsigned long size = get_vm_area_size(area);
3591 	unsigned long array_size;
3592 	unsigned int nr_small_pages = size >> PAGE_SHIFT;
3593 	unsigned int page_order;
3594 	unsigned int flags;
3595 	int ret;
3596 
3597 	array_size = (unsigned long)nr_small_pages * sizeof(struct page *);
3598 
3599 	if (!(gfp_mask & (GFP_DMA | GFP_DMA32)))
3600 		gfp_mask |= __GFP_HIGHMEM;
3601 
3602 	/* Please note that the recursion is strictly bounded. */
3603 	if (array_size > PAGE_SIZE) {
3604 		area->pages = __vmalloc_node(array_size, 1, nested_gfp, node,
3605 					area->caller);
3606 	} else {
3607 		area->pages = kmalloc_node(array_size, nested_gfp, node);
3608 	}
3609 
3610 	if (!area->pages) {
3611 		warn_alloc(gfp_mask, NULL,
3612 			"vmalloc error: size %lu, failed to allocated page array size %lu",
3613 			nr_small_pages * PAGE_SIZE, array_size);
3614 		free_vm_area(area);
3615 		return NULL;
3616 	}
3617 
3618 	set_vm_area_page_order(area, page_shift - PAGE_SHIFT);
3619 	page_order = vm_area_page_order(area);
3620 
3621 	area->nr_pages = vm_area_alloc_pages(gfp_mask | __GFP_NOWARN,
3622 		node, page_order, nr_small_pages, area->pages);
3623 
3624 	atomic_long_add(area->nr_pages, &nr_vmalloc_pages);
3625 	if (gfp_mask & __GFP_ACCOUNT) {
3626 		int i;
3627 
3628 		for (i = 0; i < area->nr_pages; i++)
3629 			mod_memcg_page_state(area->pages[i], MEMCG_VMALLOC, 1);
3630 	}
3631 
3632 	/*
3633 	 * If not enough pages were obtained to accomplish an
3634 	 * allocation request, free them via vfree() if any.
3635 	 */
3636 	if (area->nr_pages != nr_small_pages) {
3637 		/*
3638 		 * vm_area_alloc_pages() can fail due to insufficient memory but
3639 		 * also:-
3640 		 *
3641 		 * - a pending fatal signal
3642 		 * - insufficient huge page-order pages
3643 		 *
3644 		 * Since we always retry allocations at order-0 in the huge page
3645 		 * case a warning for either is spurious.
3646 		 */
3647 		if (!fatal_signal_pending(current) && page_order == 0)
3648 			warn_alloc(gfp_mask, NULL,
3649 				"vmalloc error: size %lu, failed to allocate pages",
3650 				area->nr_pages * PAGE_SIZE);
3651 		goto fail;
3652 	}
3653 
3654 	/*
3655 	 * page tables allocations ignore external gfp mask, enforce it
3656 	 * by the scope API
3657 	 */
3658 	if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO)
3659 		flags = memalloc_nofs_save();
3660 	else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0)
3661 		flags = memalloc_noio_save();
3662 
3663 	do {
3664 		ret = vmap_pages_range(addr, addr + size, prot, area->pages,
3665 			page_shift);
3666 		if (nofail && (ret < 0))
3667 			schedule_timeout_uninterruptible(1);
3668 	} while (nofail && (ret < 0));
3669 
3670 	if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO)
3671 		memalloc_nofs_restore(flags);
3672 	else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0)
3673 		memalloc_noio_restore(flags);
3674 
3675 	if (ret < 0) {
3676 		warn_alloc(gfp_mask, NULL,
3677 			"vmalloc error: size %lu, failed to map pages",
3678 			area->nr_pages * PAGE_SIZE);
3679 		goto fail;
3680 	}
3681 
3682 	return area->addr;
3683 
3684 fail:
3685 	vfree(area->addr);
3686 	return NULL;
3687 }
3688 
3689 /**
3690  * __vmalloc_node_range - allocate virtually contiguous memory
3691  * @size:		  allocation size
3692  * @align:		  desired alignment
3693  * @start:		  vm area range start
3694  * @end:		  vm area range end
3695  * @gfp_mask:		  flags for the page level allocator
3696  * @prot:		  protection mask for the allocated pages
3697  * @vm_flags:		  additional vm area flags (e.g. %VM_NO_GUARD)
3698  * @node:		  node to use for allocation or NUMA_NO_NODE
3699  * @caller:		  caller's return address
3700  *
3701  * Allocate enough pages to cover @size from the page level
3702  * allocator with @gfp_mask flags. Please note that the full set of gfp
3703  * flags are not supported. GFP_KERNEL, GFP_NOFS and GFP_NOIO are all
3704  * supported.
3705  * Zone modifiers are not supported. From the reclaim modifiers
3706  * __GFP_DIRECT_RECLAIM is required (aka GFP_NOWAIT is not supported)
3707  * and only __GFP_NOFAIL is supported (i.e. __GFP_NORETRY and
3708  * __GFP_RETRY_MAYFAIL are not supported).
3709  *
3710  * __GFP_NOWARN can be used to suppress failures messages.
3711  *
3712  * Map them into contiguous kernel virtual space, using a pagetable
3713  * protection of @prot.
3714  *
3715  * Return: the address of the area or %NULL on failure
3716  */
3717 void *__vmalloc_node_range(unsigned long size, unsigned long align,
3718 			unsigned long start, unsigned long end, gfp_t gfp_mask,
3719 			pgprot_t prot, unsigned long vm_flags, int node,
3720 			const void *caller)
3721 {
3722 	struct vm_struct *area;
3723 	void *ret;
3724 	kasan_vmalloc_flags_t kasan_flags = KASAN_VMALLOC_NONE;
3725 	unsigned long real_size = size;
3726 	unsigned long real_align = align;
3727 	unsigned int shift = PAGE_SHIFT;
3728 
3729 	if (WARN_ON_ONCE(!size))
3730 		return NULL;
3731 
3732 	if ((size >> PAGE_SHIFT) > totalram_pages()) {
3733 		warn_alloc(gfp_mask, NULL,
3734 			"vmalloc error: size %lu, exceeds total pages",
3735 			real_size);
3736 		return NULL;
3737 	}
3738 
3739 	if (vmap_allow_huge && (vm_flags & VM_ALLOW_HUGE_VMAP)) {
3740 		unsigned long size_per_node;
3741 
3742 		/*
3743 		 * Try huge pages. Only try for PAGE_KERNEL allocations,
3744 		 * others like modules don't yet expect huge pages in
3745 		 * their allocations due to apply_to_page_range not
3746 		 * supporting them.
3747 		 */
3748 
3749 		size_per_node = size;
3750 		if (node == NUMA_NO_NODE)
3751 			size_per_node /= num_online_nodes();
3752 		if (arch_vmap_pmd_supported(prot) && size_per_node >= PMD_SIZE)
3753 			shift = PMD_SHIFT;
3754 		else
3755 			shift = arch_vmap_pte_supported_shift(size_per_node);
3756 
3757 		align = max(real_align, 1UL << shift);
3758 		size = ALIGN(real_size, 1UL << shift);
3759 	}
3760 
3761 again:
3762 	area = __get_vm_area_node(real_size, align, shift, VM_ALLOC |
3763 				  VM_UNINITIALIZED | vm_flags, start, end, node,
3764 				  gfp_mask, caller);
3765 	if (!area) {
3766 		bool nofail = gfp_mask & __GFP_NOFAIL;
3767 		warn_alloc(gfp_mask, NULL,
3768 			"vmalloc error: size %lu, vm_struct allocation failed%s",
3769 			real_size, (nofail) ? ". Retrying." : "");
3770 		if (nofail) {
3771 			schedule_timeout_uninterruptible(1);
3772 			goto again;
3773 		}
3774 		goto fail;
3775 	}
3776 
3777 	/*
3778 	 * Prepare arguments for __vmalloc_area_node() and
3779 	 * kasan_unpoison_vmalloc().
3780 	 */
3781 	if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL)) {
3782 		if (kasan_hw_tags_enabled()) {
3783 			/*
3784 			 * Modify protection bits to allow tagging.
3785 			 * This must be done before mapping.
3786 			 */
3787 			prot = arch_vmap_pgprot_tagged(prot);
3788 
3789 			/*
3790 			 * Skip page_alloc poisoning and zeroing for physical
3791 			 * pages backing VM_ALLOC mapping. Memory is instead
3792 			 * poisoned and zeroed by kasan_unpoison_vmalloc().
3793 			 */
3794 			gfp_mask |= __GFP_SKIP_KASAN | __GFP_SKIP_ZERO;
3795 		}
3796 
3797 		/* Take note that the mapping is PAGE_KERNEL. */
3798 		kasan_flags |= KASAN_VMALLOC_PROT_NORMAL;
3799 	}
3800 
3801 	/* Allocate physical pages and map them into vmalloc space. */
3802 	ret = __vmalloc_area_node(area, gfp_mask, prot, shift, node);
3803 	if (!ret)
3804 		goto fail;
3805 
3806 	/*
3807 	 * Mark the pages as accessible, now that they are mapped.
3808 	 * The condition for setting KASAN_VMALLOC_INIT should complement the
3809 	 * one in post_alloc_hook() with regards to the __GFP_SKIP_ZERO check
3810 	 * to make sure that memory is initialized under the same conditions.
3811 	 * Tag-based KASAN modes only assign tags to normal non-executable
3812 	 * allocations, see __kasan_unpoison_vmalloc().
3813 	 */
3814 	kasan_flags |= KASAN_VMALLOC_VM_ALLOC;
3815 	if (!want_init_on_free() && want_init_on_alloc(gfp_mask) &&
3816 	    (gfp_mask & __GFP_SKIP_ZERO))
3817 		kasan_flags |= KASAN_VMALLOC_INIT;
3818 	/* KASAN_VMALLOC_PROT_NORMAL already set if required. */
3819 	area->addr = kasan_unpoison_vmalloc(area->addr, real_size, kasan_flags);
3820 
3821 	/*
3822 	 * In this function, newly allocated vm_struct has VM_UNINITIALIZED
3823 	 * flag. It means that vm_struct is not fully initialized.
3824 	 * Now, it is fully initialized, so remove this flag here.
3825 	 */
3826 	clear_vm_uninitialized_flag(area);
3827 
3828 	size = PAGE_ALIGN(size);
3829 	if (!(vm_flags & VM_DEFER_KMEMLEAK))
3830 		kmemleak_vmalloc(area, size, gfp_mask);
3831 
3832 	return area->addr;
3833 
3834 fail:
3835 	if (shift > PAGE_SHIFT) {
3836 		shift = PAGE_SHIFT;
3837 		align = real_align;
3838 		size = real_size;
3839 		goto again;
3840 	}
3841 
3842 	return NULL;
3843 }
3844 
3845 /**
3846  * __vmalloc_node - allocate virtually contiguous memory
3847  * @size:	    allocation size
3848  * @align:	    desired alignment
3849  * @gfp_mask:	    flags for the page level allocator
3850  * @node:	    node to use for allocation or NUMA_NO_NODE
3851  * @caller:	    caller's return address
3852  *
3853  * Allocate enough pages to cover @size from the page level allocator with
3854  * @gfp_mask flags.  Map them into contiguous kernel virtual space.
3855  *
3856  * Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_RETRY_MAYFAIL
3857  * and __GFP_NOFAIL are not supported
3858  *
3859  * Any use of gfp flags outside of GFP_KERNEL should be consulted
3860  * with mm people.
3861  *
3862  * Return: pointer to the allocated memory or %NULL on error
3863  */
3864 void *__vmalloc_node(unsigned long size, unsigned long align,
3865 			    gfp_t gfp_mask, int node, const void *caller)
3866 {
3867 	return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
3868 				gfp_mask, PAGE_KERNEL, 0, node, caller);
3869 }
3870 /*
3871  * This is only for performance analysis of vmalloc and stress purpose.
3872  * It is required by vmalloc test module, therefore do not use it other
3873  * than that.
3874  */
3875 #ifdef CONFIG_TEST_VMALLOC_MODULE
3876 EXPORT_SYMBOL_GPL(__vmalloc_node);
3877 #endif
3878 
3879 void *__vmalloc(unsigned long size, gfp_t gfp_mask)
3880 {
3881 	return __vmalloc_node(size, 1, gfp_mask, NUMA_NO_NODE,
3882 				__builtin_return_address(0));
3883 }
3884 EXPORT_SYMBOL(__vmalloc);
3885 
3886 /**
3887  * vmalloc - allocate virtually contiguous memory
3888  * @size:    allocation size
3889  *
3890  * Allocate enough pages to cover @size from the page level
3891  * allocator and map them into contiguous kernel virtual space.
3892  *
3893  * For tight control over page level allocator and protection flags
3894  * use __vmalloc() instead.
3895  *
3896  * Return: pointer to the allocated memory or %NULL on error
3897  */
3898 void *vmalloc(unsigned long size)
3899 {
3900 	return __vmalloc_node(size, 1, GFP_KERNEL, NUMA_NO_NODE,
3901 				__builtin_return_address(0));
3902 }
3903 EXPORT_SYMBOL(vmalloc);
3904 
3905 /**
3906  * vmalloc_huge - allocate virtually contiguous memory, allow huge pages
3907  * @size:      allocation size
3908  * @gfp_mask:  flags for the page level allocator
3909  *
3910  * Allocate enough pages to cover @size from the page level
3911  * allocator and map them into contiguous kernel virtual space.
3912  * If @size is greater than or equal to PMD_SIZE, allow using
3913  * huge pages for the memory
3914  *
3915  * Return: pointer to the allocated memory or %NULL on error
3916  */
3917 void *vmalloc_huge(unsigned long size, gfp_t gfp_mask)
3918 {
3919 	return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
3920 				    gfp_mask, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP,
3921 				    NUMA_NO_NODE, __builtin_return_address(0));
3922 }
3923 EXPORT_SYMBOL_GPL(vmalloc_huge);
3924 
3925 /**
3926  * vzalloc - allocate virtually contiguous memory with zero fill
3927  * @size:    allocation size
3928  *
3929  * Allocate enough pages to cover @size from the page level
3930  * allocator and map them into contiguous kernel virtual space.
3931  * The memory allocated is set to zero.
3932  *
3933  * For tight control over page level allocator and protection flags
3934  * use __vmalloc() instead.
3935  *
3936  * Return: pointer to the allocated memory or %NULL on error
3937  */
3938 void *vzalloc(unsigned long size)
3939 {
3940 	return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_ZERO, NUMA_NO_NODE,
3941 				__builtin_return_address(0));
3942 }
3943 EXPORT_SYMBOL(vzalloc);
3944 
3945 /**
3946  * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
3947  * @size: allocation size
3948  *
3949  * The resulting memory area is zeroed so it can be mapped to userspace
3950  * without leaking data.
3951  *
3952  * Return: pointer to the allocated memory or %NULL on error
3953  */
3954 void *vmalloc_user(unsigned long size)
3955 {
3956 	return __vmalloc_node_range(size, SHMLBA,  VMALLOC_START, VMALLOC_END,
3957 				    GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL,
3958 				    VM_USERMAP, NUMA_NO_NODE,
3959 				    __builtin_return_address(0));
3960 }
3961 EXPORT_SYMBOL(vmalloc_user);
3962 
3963 /**
3964  * vmalloc_node - allocate memory on a specific node
3965  * @size:	  allocation size
3966  * @node:	  numa node
3967  *
3968  * Allocate enough pages to cover @size from the page level
3969  * allocator and map them into contiguous kernel virtual space.
3970  *
3971  * For tight control over page level allocator and protection flags
3972  * use __vmalloc() instead.
3973  *
3974  * Return: pointer to the allocated memory or %NULL on error
3975  */
3976 void *vmalloc_node(unsigned long size, int node)
3977 {
3978 	return __vmalloc_node(size, 1, GFP_KERNEL, node,
3979 			__builtin_return_address(0));
3980 }
3981 EXPORT_SYMBOL(vmalloc_node);
3982 
3983 /**
3984  * vzalloc_node - allocate memory on a specific node with zero fill
3985  * @size:	allocation size
3986  * @node:	numa node
3987  *
3988  * Allocate enough pages to cover @size from the page level
3989  * allocator and map them into contiguous kernel virtual space.
3990  * The memory allocated is set to zero.
3991  *
3992  * Return: pointer to the allocated memory or %NULL on error
3993  */
3994 void *vzalloc_node(unsigned long size, int node)
3995 {
3996 	return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_ZERO, node,
3997 				__builtin_return_address(0));
3998 }
3999 EXPORT_SYMBOL(vzalloc_node);
4000 
4001 #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
4002 #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
4003 #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
4004 #define GFP_VMALLOC32 (GFP_DMA | GFP_KERNEL)
4005 #else
4006 /*
4007  * 64b systems should always have either DMA or DMA32 zones. For others
4008  * GFP_DMA32 should do the right thing and use the normal zone.
4009  */
4010 #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
4011 #endif
4012 
4013 /**
4014  * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
4015  * @size:	allocation size
4016  *
4017  * Allocate enough 32bit PA addressable pages to cover @size from the
4018  * page level allocator and map them into contiguous kernel virtual space.
4019  *
4020  * Return: pointer to the allocated memory or %NULL on error
4021  */
4022 void *vmalloc_32(unsigned long size)
4023 {
4024 	return __vmalloc_node(size, 1, GFP_VMALLOC32, NUMA_NO_NODE,
4025 			__builtin_return_address(0));
4026 }
4027 EXPORT_SYMBOL(vmalloc_32);
4028 
4029 /**
4030  * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
4031  * @size:	     allocation size
4032  *
4033  * The resulting memory area is 32bit addressable and zeroed so it can be
4034  * mapped to userspace without leaking data.
4035  *
4036  * Return: pointer to the allocated memory or %NULL on error
4037  */
4038 void *vmalloc_32_user(unsigned long size)
4039 {
4040 	return __vmalloc_node_range(size, SHMLBA,  VMALLOC_START, VMALLOC_END,
4041 				    GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
4042 				    VM_USERMAP, NUMA_NO_NODE,
4043 				    __builtin_return_address(0));
4044 }
4045 EXPORT_SYMBOL(vmalloc_32_user);
4046 
4047 /*
4048  * Atomically zero bytes in the iterator.
4049  *
4050  * Returns the number of zeroed bytes.
4051  */
4052 static size_t zero_iter(struct iov_iter *iter, size_t count)
4053 {
4054 	size_t remains = count;
4055 
4056 	while (remains > 0) {
4057 		size_t num, copied;
4058 
4059 		num = min_t(size_t, remains, PAGE_SIZE);
4060 		copied = copy_page_to_iter_nofault(ZERO_PAGE(0), 0, num, iter);
4061 		remains -= copied;
4062 
4063 		if (copied < num)
4064 			break;
4065 	}
4066 
4067 	return count - remains;
4068 }
4069 
4070 /*
4071  * small helper routine, copy contents to iter from addr.
4072  * If the page is not present, fill zero.
4073  *
4074  * Returns the number of copied bytes.
4075  */
4076 static size_t aligned_vread_iter(struct iov_iter *iter,
4077 				 const char *addr, size_t count)
4078 {
4079 	size_t remains = count;
4080 	struct page *page;
4081 
4082 	while (remains > 0) {
4083 		unsigned long offset, length;
4084 		size_t copied = 0;
4085 
4086 		offset = offset_in_page(addr);
4087 		length = PAGE_SIZE - offset;
4088 		if (length > remains)
4089 			length = remains;
4090 		page = vmalloc_to_page(addr);
4091 		/*
4092 		 * To do safe access to this _mapped_ area, we need lock. But
4093 		 * adding lock here means that we need to add overhead of
4094 		 * vmalloc()/vfree() calls for this _debug_ interface, rarely
4095 		 * used. Instead of that, we'll use an local mapping via
4096 		 * copy_page_to_iter_nofault() and accept a small overhead in
4097 		 * this access function.
4098 		 */
4099 		if (page)
4100 			copied = copy_page_to_iter_nofault(page, offset,
4101 							   length, iter);
4102 		else
4103 			copied = zero_iter(iter, length);
4104 
4105 		addr += copied;
4106 		remains -= copied;
4107 
4108 		if (copied != length)
4109 			break;
4110 	}
4111 
4112 	return count - remains;
4113 }
4114 
4115 /*
4116  * Read from a vm_map_ram region of memory.
4117  *
4118  * Returns the number of copied bytes.
4119  */
4120 static size_t vmap_ram_vread_iter(struct iov_iter *iter, const char *addr,
4121 				  size_t count, unsigned long flags)
4122 {
4123 	char *start;
4124 	struct vmap_block *vb;
4125 	struct xarray *xa;
4126 	unsigned long offset;
4127 	unsigned int rs, re;
4128 	size_t remains, n;
4129 
4130 	/*
4131 	 * If it's area created by vm_map_ram() interface directly, but
4132 	 * not further subdividing and delegating management to vmap_block,
4133 	 * handle it here.
4134 	 */
4135 	if (!(flags & VMAP_BLOCK))
4136 		return aligned_vread_iter(iter, addr, count);
4137 
4138 	remains = count;
4139 
4140 	/*
4141 	 * Area is split into regions and tracked with vmap_block, read out
4142 	 * each region and zero fill the hole between regions.
4143 	 */
4144 	xa = addr_to_vb_xa((unsigned long) addr);
4145 	vb = xa_load(xa, addr_to_vb_idx((unsigned long)addr));
4146 	if (!vb)
4147 		goto finished_zero;
4148 
4149 	spin_lock(&vb->lock);
4150 	if (bitmap_empty(vb->used_map, VMAP_BBMAP_BITS)) {
4151 		spin_unlock(&vb->lock);
4152 		goto finished_zero;
4153 	}
4154 
4155 	for_each_set_bitrange(rs, re, vb->used_map, VMAP_BBMAP_BITS) {
4156 		size_t copied;
4157 
4158 		if (remains == 0)
4159 			goto finished;
4160 
4161 		start = vmap_block_vaddr(vb->va->va_start, rs);
4162 
4163 		if (addr < start) {
4164 			size_t to_zero = min_t(size_t, start - addr, remains);
4165 			size_t zeroed = zero_iter(iter, to_zero);
4166 
4167 			addr += zeroed;
4168 			remains -= zeroed;
4169 
4170 			if (remains == 0 || zeroed != to_zero)
4171 				goto finished;
4172 		}
4173 
4174 		/*it could start reading from the middle of used region*/
4175 		offset = offset_in_page(addr);
4176 		n = ((re - rs + 1) << PAGE_SHIFT) - offset;
4177 		if (n > remains)
4178 			n = remains;
4179 
4180 		copied = aligned_vread_iter(iter, start + offset, n);
4181 
4182 		addr += copied;
4183 		remains -= copied;
4184 
4185 		if (copied != n)
4186 			goto finished;
4187 	}
4188 
4189 	spin_unlock(&vb->lock);
4190 
4191 finished_zero:
4192 	/* zero-fill the left dirty or free regions */
4193 	return count - remains + zero_iter(iter, remains);
4194 finished:
4195 	/* We couldn't copy/zero everything */
4196 	spin_unlock(&vb->lock);
4197 	return count - remains;
4198 }
4199 
4200 /**
4201  * vread_iter() - read vmalloc area in a safe way to an iterator.
4202  * @iter:         the iterator to which data should be written.
4203  * @addr:         vm address.
4204  * @count:        number of bytes to be read.
4205  *
4206  * This function checks that addr is a valid vmalloc'ed area, and
4207  * copy data from that area to a given buffer. If the given memory range
4208  * of [addr...addr+count) includes some valid address, data is copied to
4209  * proper area of @buf. If there are memory holes, they'll be zero-filled.
4210  * IOREMAP area is treated as memory hole and no copy is done.
4211  *
4212  * If [addr...addr+count) doesn't includes any intersects with alive
4213  * vm_struct area, returns 0. @buf should be kernel's buffer.
4214  *
4215  * Note: In usual ops, vread() is never necessary because the caller
4216  * should know vmalloc() area is valid and can use memcpy().
4217  * This is for routines which have to access vmalloc area without
4218  * any information, as /proc/kcore.
4219  *
4220  * Return: number of bytes for which addr and buf should be increased
4221  * (same number as @count) or %0 if [addr...addr+count) doesn't
4222  * include any intersection with valid vmalloc area
4223  */
4224 long vread_iter(struct iov_iter *iter, const char *addr, size_t count)
4225 {
4226 	struct vmap_node *vn;
4227 	struct vmap_area *va;
4228 	struct vm_struct *vm;
4229 	char *vaddr;
4230 	size_t n, size, flags, remains;
4231 	unsigned long next;
4232 
4233 	addr = kasan_reset_tag(addr);
4234 
4235 	/* Don't allow overflow */
4236 	if ((unsigned long) addr + count < count)
4237 		count = -(unsigned long) addr;
4238 
4239 	remains = count;
4240 
4241 	vn = find_vmap_area_exceed_addr_lock((unsigned long) addr, &va);
4242 	if (!vn)
4243 		goto finished_zero;
4244 
4245 	/* no intersects with alive vmap_area */
4246 	if ((unsigned long)addr + remains <= va->va_start)
4247 		goto finished_zero;
4248 
4249 	do {
4250 		size_t copied;
4251 
4252 		if (remains == 0)
4253 			goto finished;
4254 
4255 		vm = va->vm;
4256 		flags = va->flags & VMAP_FLAGS_MASK;
4257 		/*
4258 		 * VMAP_BLOCK indicates a sub-type of vm_map_ram area, need
4259 		 * be set together with VMAP_RAM.
4260 		 */
4261 		WARN_ON(flags == VMAP_BLOCK);
4262 
4263 		if (!vm && !flags)
4264 			goto next_va;
4265 
4266 		if (vm && (vm->flags & VM_UNINITIALIZED))
4267 			goto next_va;
4268 
4269 		/* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
4270 		smp_rmb();
4271 
4272 		vaddr = (char *) va->va_start;
4273 		size = vm ? get_vm_area_size(vm) : va_size(va);
4274 
4275 		if (addr >= vaddr + size)
4276 			goto next_va;
4277 
4278 		if (addr < vaddr) {
4279 			size_t to_zero = min_t(size_t, vaddr - addr, remains);
4280 			size_t zeroed = zero_iter(iter, to_zero);
4281 
4282 			addr += zeroed;
4283 			remains -= zeroed;
4284 
4285 			if (remains == 0 || zeroed != to_zero)
4286 				goto finished;
4287 		}
4288 
4289 		n = vaddr + size - addr;
4290 		if (n > remains)
4291 			n = remains;
4292 
4293 		if (flags & VMAP_RAM)
4294 			copied = vmap_ram_vread_iter(iter, addr, n, flags);
4295 		else if (!(vm && (vm->flags & (VM_IOREMAP | VM_SPARSE))))
4296 			copied = aligned_vread_iter(iter, addr, n);
4297 		else /* IOREMAP | SPARSE area is treated as memory hole */
4298 			copied = zero_iter(iter, n);
4299 
4300 		addr += copied;
4301 		remains -= copied;
4302 
4303 		if (copied != n)
4304 			goto finished;
4305 
4306 	next_va:
4307 		next = va->va_end;
4308 		spin_unlock(&vn->busy.lock);
4309 	} while ((vn = find_vmap_area_exceed_addr_lock(next, &va)));
4310 
4311 finished_zero:
4312 	if (vn)
4313 		spin_unlock(&vn->busy.lock);
4314 
4315 	/* zero-fill memory holes */
4316 	return count - remains + zero_iter(iter, remains);
4317 finished:
4318 	/* Nothing remains, or We couldn't copy/zero everything. */
4319 	if (vn)
4320 		spin_unlock(&vn->busy.lock);
4321 
4322 	return count - remains;
4323 }
4324 
4325 /**
4326  * remap_vmalloc_range_partial - map vmalloc pages to userspace
4327  * @vma:		vma to cover
4328  * @uaddr:		target user address to start at
4329  * @kaddr:		virtual address of vmalloc kernel memory
4330  * @pgoff:		offset from @kaddr to start at
4331  * @size:		size of map area
4332  *
4333  * Returns:	0 for success, -Exxx on failure
4334  *
4335  * This function checks that @kaddr is a valid vmalloc'ed area,
4336  * and that it is big enough to cover the range starting at
4337  * @uaddr in @vma. Will return failure if that criteria isn't
4338  * met.
4339  *
4340  * Similar to remap_pfn_range() (see mm/memory.c)
4341  */
4342 int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
4343 				void *kaddr, unsigned long pgoff,
4344 				unsigned long size)
4345 {
4346 	struct vm_struct *area;
4347 	unsigned long off;
4348 	unsigned long end_index;
4349 
4350 	if (check_shl_overflow(pgoff, PAGE_SHIFT, &off))
4351 		return -EINVAL;
4352 
4353 	size = PAGE_ALIGN(size);
4354 
4355 	if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
4356 		return -EINVAL;
4357 
4358 	area = find_vm_area(kaddr);
4359 	if (!area)
4360 		return -EINVAL;
4361 
4362 	if (!(area->flags & (VM_USERMAP | VM_DMA_COHERENT)))
4363 		return -EINVAL;
4364 
4365 	if (check_add_overflow(size, off, &end_index) ||
4366 	    end_index > get_vm_area_size(area))
4367 		return -EINVAL;
4368 	kaddr += off;
4369 
4370 	do {
4371 		struct page *page = vmalloc_to_page(kaddr);
4372 		int ret;
4373 
4374 		ret = vm_insert_page(vma, uaddr, page);
4375 		if (ret)
4376 			return ret;
4377 
4378 		uaddr += PAGE_SIZE;
4379 		kaddr += PAGE_SIZE;
4380 		size -= PAGE_SIZE;
4381 	} while (size > 0);
4382 
4383 	vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
4384 
4385 	return 0;
4386 }
4387 
4388 /**
4389  * remap_vmalloc_range - map vmalloc pages to userspace
4390  * @vma:		vma to cover (map full range of vma)
4391  * @addr:		vmalloc memory
4392  * @pgoff:		number of pages into addr before first page to map
4393  *
4394  * Returns:	0 for success, -Exxx on failure
4395  *
4396  * This function checks that addr is a valid vmalloc'ed area, and
4397  * that it is big enough to cover the vma. Will return failure if
4398  * that criteria isn't met.
4399  *
4400  * Similar to remap_pfn_range() (see mm/memory.c)
4401  */
4402 int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
4403 						unsigned long pgoff)
4404 {
4405 	return remap_vmalloc_range_partial(vma, vma->vm_start,
4406 					   addr, pgoff,
4407 					   vma->vm_end - vma->vm_start);
4408 }
4409 EXPORT_SYMBOL(remap_vmalloc_range);
4410 
4411 void free_vm_area(struct vm_struct *area)
4412 {
4413 	struct vm_struct *ret;
4414 	ret = remove_vm_area(area->addr);
4415 	BUG_ON(ret != area);
4416 	kfree(area);
4417 }
4418 EXPORT_SYMBOL_GPL(free_vm_area);
4419 
4420 #ifdef CONFIG_SMP
4421 static struct vmap_area *node_to_va(struct rb_node *n)
4422 {
4423 	return rb_entry_safe(n, struct vmap_area, rb_node);
4424 }
4425 
4426 /**
4427  * pvm_find_va_enclose_addr - find the vmap_area @addr belongs to
4428  * @addr: target address
4429  *
4430  * Returns: vmap_area if it is found. If there is no such area
4431  *   the first highest(reverse order) vmap_area is returned
4432  *   i.e. va->va_start < addr && va->va_end < addr or NULL
4433  *   if there are no any areas before @addr.
4434  */
4435 static struct vmap_area *
4436 pvm_find_va_enclose_addr(unsigned long addr)
4437 {
4438 	struct vmap_area *va, *tmp;
4439 	struct rb_node *n;
4440 
4441 	n = free_vmap_area_root.rb_node;
4442 	va = NULL;
4443 
4444 	while (n) {
4445 		tmp = rb_entry(n, struct vmap_area, rb_node);
4446 		if (tmp->va_start <= addr) {
4447 			va = tmp;
4448 			if (tmp->va_end >= addr)
4449 				break;
4450 
4451 			n = n->rb_right;
4452 		} else {
4453 			n = n->rb_left;
4454 		}
4455 	}
4456 
4457 	return va;
4458 }
4459 
4460 /**
4461  * pvm_determine_end_from_reverse - find the highest aligned address
4462  * of free block below VMALLOC_END
4463  * @va:
4464  *   in - the VA we start the search(reverse order);
4465  *   out - the VA with the highest aligned end address.
4466  * @align: alignment for required highest address
4467  *
4468  * Returns: determined end address within vmap_area
4469  */
4470 static unsigned long
4471 pvm_determine_end_from_reverse(struct vmap_area **va, unsigned long align)
4472 {
4473 	unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
4474 	unsigned long addr;
4475 
4476 	if (likely(*va)) {
4477 		list_for_each_entry_from_reverse((*va),
4478 				&free_vmap_area_list, list) {
4479 			addr = min((*va)->va_end & ~(align - 1), vmalloc_end);
4480 			if ((*va)->va_start < addr)
4481 				return addr;
4482 		}
4483 	}
4484 
4485 	return 0;
4486 }
4487 
4488 /**
4489  * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator
4490  * @offsets: array containing offset of each area
4491  * @sizes: array containing size of each area
4492  * @nr_vms: the number of areas to allocate
4493  * @align: alignment, all entries in @offsets and @sizes must be aligned to this
4494  *
4495  * Returns: kmalloc'd vm_struct pointer array pointing to allocated
4496  *	    vm_structs on success, %NULL on failure
4497  *
4498  * Percpu allocator wants to use congruent vm areas so that it can
4499  * maintain the offsets among percpu areas.  This function allocates
4500  * congruent vmalloc areas for it with GFP_KERNEL.  These areas tend to
4501  * be scattered pretty far, distance between two areas easily going up
4502  * to gigabytes.  To avoid interacting with regular vmallocs, these
4503  * areas are allocated from top.
4504  *
4505  * Despite its complicated look, this allocator is rather simple. It
4506  * does everything top-down and scans free blocks from the end looking
4507  * for matching base. While scanning, if any of the areas do not fit the
4508  * base address is pulled down to fit the area. Scanning is repeated till
4509  * all the areas fit and then all necessary data structures are inserted
4510  * and the result is returned.
4511  */
4512 struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
4513 				     const size_t *sizes, int nr_vms,
4514 				     size_t align)
4515 {
4516 	const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align);
4517 	const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
4518 	struct vmap_area **vas, *va;
4519 	struct vm_struct **vms;
4520 	int area, area2, last_area, term_area;
4521 	unsigned long base, start, size, end, last_end, orig_start, orig_end;
4522 	bool purged = false;
4523 
4524 	/* verify parameters and allocate data structures */
4525 	BUG_ON(offset_in_page(align) || !is_power_of_2(align));
4526 	for (last_area = 0, area = 0; area < nr_vms; area++) {
4527 		start = offsets[area];
4528 		end = start + sizes[area];
4529 
4530 		/* is everything aligned properly? */
4531 		BUG_ON(!IS_ALIGNED(offsets[area], align));
4532 		BUG_ON(!IS_ALIGNED(sizes[area], align));
4533 
4534 		/* detect the area with the highest address */
4535 		if (start > offsets[last_area])
4536 			last_area = area;
4537 
4538 		for (area2 = area + 1; area2 < nr_vms; area2++) {
4539 			unsigned long start2 = offsets[area2];
4540 			unsigned long end2 = start2 + sizes[area2];
4541 
4542 			BUG_ON(start2 < end && start < end2);
4543 		}
4544 	}
4545 	last_end = offsets[last_area] + sizes[last_area];
4546 
4547 	if (vmalloc_end - vmalloc_start < last_end) {
4548 		WARN_ON(true);
4549 		return NULL;
4550 	}
4551 
4552 	vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL);
4553 	vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL);
4554 	if (!vas || !vms)
4555 		goto err_free2;
4556 
4557 	for (area = 0; area < nr_vms; area++) {
4558 		vas[area] = kmem_cache_zalloc(vmap_area_cachep, GFP_KERNEL);
4559 		vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL);
4560 		if (!vas[area] || !vms[area])
4561 			goto err_free;
4562 	}
4563 retry:
4564 	spin_lock(&free_vmap_area_lock);
4565 
4566 	/* start scanning - we scan from the top, begin with the last area */
4567 	area = term_area = last_area;
4568 	start = offsets[area];
4569 	end = start + sizes[area];
4570 
4571 	va = pvm_find_va_enclose_addr(vmalloc_end);
4572 	base = pvm_determine_end_from_reverse(&va, align) - end;
4573 
4574 	while (true) {
4575 		/*
4576 		 * base might have underflowed, add last_end before
4577 		 * comparing.
4578 		 */
4579 		if (base + last_end < vmalloc_start + last_end)
4580 			goto overflow;
4581 
4582 		/*
4583 		 * Fitting base has not been found.
4584 		 */
4585 		if (va == NULL)
4586 			goto overflow;
4587 
4588 		/*
4589 		 * If required width exceeds current VA block, move
4590 		 * base downwards and then recheck.
4591 		 */
4592 		if (base + end > va->va_end) {
4593 			base = pvm_determine_end_from_reverse(&va, align) - end;
4594 			term_area = area;
4595 			continue;
4596 		}
4597 
4598 		/*
4599 		 * If this VA does not fit, move base downwards and recheck.
4600 		 */
4601 		if (base + start < va->va_start) {
4602 			va = node_to_va(rb_prev(&va->rb_node));
4603 			base = pvm_determine_end_from_reverse(&va, align) - end;
4604 			term_area = area;
4605 			continue;
4606 		}
4607 
4608 		/*
4609 		 * This area fits, move on to the previous one.  If
4610 		 * the previous one is the terminal one, we're done.
4611 		 */
4612 		area = (area + nr_vms - 1) % nr_vms;
4613 		if (area == term_area)
4614 			break;
4615 
4616 		start = offsets[area];
4617 		end = start + sizes[area];
4618 		va = pvm_find_va_enclose_addr(base + end);
4619 	}
4620 
4621 	/* we've found a fitting base, insert all va's */
4622 	for (area = 0; area < nr_vms; area++) {
4623 		int ret;
4624 
4625 		start = base + offsets[area];
4626 		size = sizes[area];
4627 
4628 		va = pvm_find_va_enclose_addr(start);
4629 		if (WARN_ON_ONCE(va == NULL))
4630 			/* It is a BUG(), but trigger recovery instead. */
4631 			goto recovery;
4632 
4633 		ret = va_clip(&free_vmap_area_root,
4634 			&free_vmap_area_list, va, start, size);
4635 		if (WARN_ON_ONCE(unlikely(ret)))
4636 			/* It is a BUG(), but trigger recovery instead. */
4637 			goto recovery;
4638 
4639 		/* Allocated area. */
4640 		va = vas[area];
4641 		va->va_start = start;
4642 		va->va_end = start + size;
4643 	}
4644 
4645 	spin_unlock(&free_vmap_area_lock);
4646 
4647 	/* populate the kasan shadow space */
4648 	for (area = 0; area < nr_vms; area++) {
4649 		if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area]))
4650 			goto err_free_shadow;
4651 	}
4652 
4653 	/* insert all vm's */
4654 	for (area = 0; area < nr_vms; area++) {
4655 		struct vmap_node *vn = addr_to_node(vas[area]->va_start);
4656 
4657 		spin_lock(&vn->busy.lock);
4658 		insert_vmap_area(vas[area], &vn->busy.root, &vn->busy.head);
4659 		setup_vmalloc_vm_locked(vms[area], vas[area], VM_ALLOC,
4660 				 pcpu_get_vm_areas);
4661 		spin_unlock(&vn->busy.lock);
4662 	}
4663 
4664 	/*
4665 	 * Mark allocated areas as accessible. Do it now as a best-effort
4666 	 * approach, as they can be mapped outside of vmalloc code.
4667 	 * With hardware tag-based KASAN, marking is skipped for
4668 	 * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
4669 	 */
4670 	for (area = 0; area < nr_vms; area++)
4671 		vms[area]->addr = kasan_unpoison_vmalloc(vms[area]->addr,
4672 				vms[area]->size, KASAN_VMALLOC_PROT_NORMAL);
4673 
4674 	kfree(vas);
4675 	return vms;
4676 
4677 recovery:
4678 	/*
4679 	 * Remove previously allocated areas. There is no
4680 	 * need in removing these areas from the busy tree,
4681 	 * because they are inserted only on the final step
4682 	 * and when pcpu_get_vm_areas() is success.
4683 	 */
4684 	while (area--) {
4685 		orig_start = vas[area]->va_start;
4686 		orig_end = vas[area]->va_end;
4687 		va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root,
4688 				&free_vmap_area_list);
4689 		if (va)
4690 			kasan_release_vmalloc(orig_start, orig_end,
4691 				va->va_start, va->va_end);
4692 		vas[area] = NULL;
4693 	}
4694 
4695 overflow:
4696 	spin_unlock(&free_vmap_area_lock);
4697 	if (!purged) {
4698 		reclaim_and_purge_vmap_areas();
4699 		purged = true;
4700 
4701 		/* Before "retry", check if we recover. */
4702 		for (area = 0; area < nr_vms; area++) {
4703 			if (vas[area])
4704 				continue;
4705 
4706 			vas[area] = kmem_cache_zalloc(
4707 				vmap_area_cachep, GFP_KERNEL);
4708 			if (!vas[area])
4709 				goto err_free;
4710 		}
4711 
4712 		goto retry;
4713 	}
4714 
4715 err_free:
4716 	for (area = 0; area < nr_vms; area++) {
4717 		if (vas[area])
4718 			kmem_cache_free(vmap_area_cachep, vas[area]);
4719 
4720 		kfree(vms[area]);
4721 	}
4722 err_free2:
4723 	kfree(vas);
4724 	kfree(vms);
4725 	return NULL;
4726 
4727 err_free_shadow:
4728 	spin_lock(&free_vmap_area_lock);
4729 	/*
4730 	 * We release all the vmalloc shadows, even the ones for regions that
4731 	 * hadn't been successfully added. This relies on kasan_release_vmalloc
4732 	 * being able to tolerate this case.
4733 	 */
4734 	for (area = 0; area < nr_vms; area++) {
4735 		orig_start = vas[area]->va_start;
4736 		orig_end = vas[area]->va_end;
4737 		va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root,
4738 				&free_vmap_area_list);
4739 		if (va)
4740 			kasan_release_vmalloc(orig_start, orig_end,
4741 				va->va_start, va->va_end);
4742 		vas[area] = NULL;
4743 		kfree(vms[area]);
4744 	}
4745 	spin_unlock(&free_vmap_area_lock);
4746 	kfree(vas);
4747 	kfree(vms);
4748 	return NULL;
4749 }
4750 
4751 /**
4752  * pcpu_free_vm_areas - free vmalloc areas for percpu allocator
4753  * @vms: vm_struct pointer array returned by pcpu_get_vm_areas()
4754  * @nr_vms: the number of allocated areas
4755  *
4756  * Free vm_structs and the array allocated by pcpu_get_vm_areas().
4757  */
4758 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
4759 {
4760 	int i;
4761 
4762 	for (i = 0; i < nr_vms; i++)
4763 		free_vm_area(vms[i]);
4764 	kfree(vms);
4765 }
4766 #endif	/* CONFIG_SMP */
4767 
4768 #ifdef CONFIG_PRINTK
4769 bool vmalloc_dump_obj(void *object)
4770 {
4771 	const void *caller;
4772 	struct vm_struct *vm;
4773 	struct vmap_area *va;
4774 	struct vmap_node *vn;
4775 	unsigned long addr;
4776 	unsigned int nr_pages;
4777 
4778 	addr = PAGE_ALIGN((unsigned long) object);
4779 	vn = addr_to_node(addr);
4780 
4781 	if (!spin_trylock(&vn->busy.lock))
4782 		return false;
4783 
4784 	va = __find_vmap_area(addr, &vn->busy.root);
4785 	if (!va || !va->vm) {
4786 		spin_unlock(&vn->busy.lock);
4787 		return false;
4788 	}
4789 
4790 	vm = va->vm;
4791 	addr = (unsigned long) vm->addr;
4792 	caller = vm->caller;
4793 	nr_pages = vm->nr_pages;
4794 	spin_unlock(&vn->busy.lock);
4795 
4796 	pr_cont(" %u-page vmalloc region starting at %#lx allocated at %pS\n",
4797 		nr_pages, addr, caller);
4798 
4799 	return true;
4800 }
4801 #endif
4802 
4803 #ifdef CONFIG_PROC_FS
4804 static void show_numa_info(struct seq_file *m, struct vm_struct *v)
4805 {
4806 	if (IS_ENABLED(CONFIG_NUMA)) {
4807 		unsigned int nr, *counters = m->private;
4808 		unsigned int step = 1U << vm_area_page_order(v);
4809 
4810 		if (!counters)
4811 			return;
4812 
4813 		if (v->flags & VM_UNINITIALIZED)
4814 			return;
4815 		/* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
4816 		smp_rmb();
4817 
4818 		memset(counters, 0, nr_node_ids * sizeof(unsigned int));
4819 
4820 		for (nr = 0; nr < v->nr_pages; nr += step)
4821 			counters[page_to_nid(v->pages[nr])] += step;
4822 		for_each_node_state(nr, N_HIGH_MEMORY)
4823 			if (counters[nr])
4824 				seq_printf(m, " N%u=%u", nr, counters[nr]);
4825 	}
4826 }
4827 
4828 static void show_purge_info(struct seq_file *m)
4829 {
4830 	struct vmap_node *vn;
4831 	struct vmap_area *va;
4832 	int i;
4833 
4834 	for (i = 0; i < nr_vmap_nodes; i++) {
4835 		vn = &vmap_nodes[i];
4836 
4837 		spin_lock(&vn->lazy.lock);
4838 		list_for_each_entry(va, &vn->lazy.head, list) {
4839 			seq_printf(m, "0x%pK-0x%pK %7ld unpurged vm_area\n",
4840 				(void *)va->va_start, (void *)va->va_end,
4841 				va->va_end - va->va_start);
4842 		}
4843 		spin_unlock(&vn->lazy.lock);
4844 	}
4845 }
4846 
4847 static int vmalloc_info_show(struct seq_file *m, void *p)
4848 {
4849 	struct vmap_node *vn;
4850 	struct vmap_area *va;
4851 	struct vm_struct *v;
4852 	int i;
4853 
4854 	for (i = 0; i < nr_vmap_nodes; i++) {
4855 		vn = &vmap_nodes[i];
4856 
4857 		spin_lock(&vn->busy.lock);
4858 		list_for_each_entry(va, &vn->busy.head, list) {
4859 			if (!va->vm) {
4860 				if (va->flags & VMAP_RAM)
4861 					seq_printf(m, "0x%pK-0x%pK %7ld vm_map_ram\n",
4862 						(void *)va->va_start, (void *)va->va_end,
4863 						va->va_end - va->va_start);
4864 
4865 				continue;
4866 			}
4867 
4868 			v = va->vm;
4869 
4870 			seq_printf(m, "0x%pK-0x%pK %7ld",
4871 				v->addr, v->addr + v->size, v->size);
4872 
4873 			if (v->caller)
4874 				seq_printf(m, " %pS", v->caller);
4875 
4876 			if (v->nr_pages)
4877 				seq_printf(m, " pages=%d", v->nr_pages);
4878 
4879 			if (v->phys_addr)
4880 				seq_printf(m, " phys=%pa", &v->phys_addr);
4881 
4882 			if (v->flags & VM_IOREMAP)
4883 				seq_puts(m, " ioremap");
4884 
4885 			if (v->flags & VM_SPARSE)
4886 				seq_puts(m, " sparse");
4887 
4888 			if (v->flags & VM_ALLOC)
4889 				seq_puts(m, " vmalloc");
4890 
4891 			if (v->flags & VM_MAP)
4892 				seq_puts(m, " vmap");
4893 
4894 			if (v->flags & VM_USERMAP)
4895 				seq_puts(m, " user");
4896 
4897 			if (v->flags & VM_DMA_COHERENT)
4898 				seq_puts(m, " dma-coherent");
4899 
4900 			if (is_vmalloc_addr(v->pages))
4901 				seq_puts(m, " vpages");
4902 
4903 			show_numa_info(m, v);
4904 			seq_putc(m, '\n');
4905 		}
4906 		spin_unlock(&vn->busy.lock);
4907 	}
4908 
4909 	/*
4910 	 * As a final step, dump "unpurged" areas.
4911 	 */
4912 	show_purge_info(m);
4913 	return 0;
4914 }
4915 
4916 static int __init proc_vmalloc_init(void)
4917 {
4918 	void *priv_data = NULL;
4919 
4920 	if (IS_ENABLED(CONFIG_NUMA))
4921 		priv_data = kmalloc(nr_node_ids * sizeof(unsigned int), GFP_KERNEL);
4922 
4923 	proc_create_single_data("vmallocinfo",
4924 		0400, NULL, vmalloc_info_show, priv_data);
4925 
4926 	return 0;
4927 }
4928 module_init(proc_vmalloc_init);
4929 
4930 #endif
4931 
4932 static void __init vmap_init_free_space(void)
4933 {
4934 	unsigned long vmap_start = 1;
4935 	const unsigned long vmap_end = ULONG_MAX;
4936 	struct vmap_area *free;
4937 	struct vm_struct *busy;
4938 
4939 	/*
4940 	 *     B     F     B     B     B     F
4941 	 * -|-----|.....|-----|-----|-----|.....|-
4942 	 *  |           The KVA space           |
4943 	 *  |<--------------------------------->|
4944 	 */
4945 	for (busy = vmlist; busy; busy = busy->next) {
4946 		if ((unsigned long) busy->addr - vmap_start > 0) {
4947 			free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
4948 			if (!WARN_ON_ONCE(!free)) {
4949 				free->va_start = vmap_start;
4950 				free->va_end = (unsigned long) busy->addr;
4951 
4952 				insert_vmap_area_augment(free, NULL,
4953 					&free_vmap_area_root,
4954 						&free_vmap_area_list);
4955 			}
4956 		}
4957 
4958 		vmap_start = (unsigned long) busy->addr + busy->size;
4959 	}
4960 
4961 	if (vmap_end - vmap_start > 0) {
4962 		free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
4963 		if (!WARN_ON_ONCE(!free)) {
4964 			free->va_start = vmap_start;
4965 			free->va_end = vmap_end;
4966 
4967 			insert_vmap_area_augment(free, NULL,
4968 				&free_vmap_area_root,
4969 					&free_vmap_area_list);
4970 		}
4971 	}
4972 }
4973 
4974 static void vmap_init_nodes(void)
4975 {
4976 	struct vmap_node *vn;
4977 	int i, n;
4978 
4979 #if BITS_PER_LONG == 64
4980 	/*
4981 	 * A high threshold of max nodes is fixed and bound to 128,
4982 	 * thus a scale factor is 1 for systems where number of cores
4983 	 * are less or equal to specified threshold.
4984 	 *
4985 	 * As for NUMA-aware notes. For bigger systems, for example
4986 	 * NUMA with multi-sockets, where we can end-up with thousands
4987 	 * of cores in total, a "sub-numa-clustering" should be added.
4988 	 *
4989 	 * In this case a NUMA domain is considered as a single entity
4990 	 * with dedicated sub-nodes in it which describe one group or
4991 	 * set of cores. Therefore a per-domain purging is supposed to
4992 	 * be added as well as a per-domain balancing.
4993 	 */
4994 	n = clamp_t(unsigned int, num_possible_cpus(), 1, 128);
4995 
4996 	if (n > 1) {
4997 		vn = kmalloc_array(n, sizeof(*vn), GFP_NOWAIT | __GFP_NOWARN);
4998 		if (vn) {
4999 			/* Node partition is 16 pages. */
5000 			vmap_zone_size = (1 << 4) * PAGE_SIZE;
5001 			nr_vmap_nodes = n;
5002 			vmap_nodes = vn;
5003 		} else {
5004 			pr_err("Failed to allocate an array. Disable a node layer\n");
5005 		}
5006 	}
5007 #endif
5008 
5009 	for (n = 0; n < nr_vmap_nodes; n++) {
5010 		vn = &vmap_nodes[n];
5011 		vn->busy.root = RB_ROOT;
5012 		INIT_LIST_HEAD(&vn->busy.head);
5013 		spin_lock_init(&vn->busy.lock);
5014 
5015 		vn->lazy.root = RB_ROOT;
5016 		INIT_LIST_HEAD(&vn->lazy.head);
5017 		spin_lock_init(&vn->lazy.lock);
5018 
5019 		for (i = 0; i < MAX_VA_SIZE_PAGES; i++) {
5020 			INIT_LIST_HEAD(&vn->pool[i].head);
5021 			WRITE_ONCE(vn->pool[i].len, 0);
5022 		}
5023 
5024 		spin_lock_init(&vn->pool_lock);
5025 	}
5026 }
5027 
5028 static unsigned long
5029 vmap_node_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
5030 {
5031 	unsigned long count;
5032 	struct vmap_node *vn;
5033 	int i, j;
5034 
5035 	for (count = 0, i = 0; i < nr_vmap_nodes; i++) {
5036 		vn = &vmap_nodes[i];
5037 
5038 		for (j = 0; j < MAX_VA_SIZE_PAGES; j++)
5039 			count += READ_ONCE(vn->pool[j].len);
5040 	}
5041 
5042 	return count ? count : SHRINK_EMPTY;
5043 }
5044 
5045 static unsigned long
5046 vmap_node_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
5047 {
5048 	int i;
5049 
5050 	for (i = 0; i < nr_vmap_nodes; i++)
5051 		decay_va_pool_node(&vmap_nodes[i], true);
5052 
5053 	return SHRINK_STOP;
5054 }
5055 
5056 void __init vmalloc_init(void)
5057 {
5058 	struct shrinker *vmap_node_shrinker;
5059 	struct vmap_area *va;
5060 	struct vmap_node *vn;
5061 	struct vm_struct *tmp;
5062 	int i;
5063 
5064 	/*
5065 	 * Create the cache for vmap_area objects.
5066 	 */
5067 	vmap_area_cachep = KMEM_CACHE(vmap_area, SLAB_PANIC);
5068 
5069 	for_each_possible_cpu(i) {
5070 		struct vmap_block_queue *vbq;
5071 		struct vfree_deferred *p;
5072 
5073 		vbq = &per_cpu(vmap_block_queue, i);
5074 		spin_lock_init(&vbq->lock);
5075 		INIT_LIST_HEAD(&vbq->free);
5076 		p = &per_cpu(vfree_deferred, i);
5077 		init_llist_head(&p->list);
5078 		INIT_WORK(&p->wq, delayed_vfree_work);
5079 		xa_init(&vbq->vmap_blocks);
5080 	}
5081 
5082 	/*
5083 	 * Setup nodes before importing vmlist.
5084 	 */
5085 	vmap_init_nodes();
5086 
5087 	/* Import existing vmlist entries. */
5088 	for (tmp = vmlist; tmp; tmp = tmp->next) {
5089 		va = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
5090 		if (WARN_ON_ONCE(!va))
5091 			continue;
5092 
5093 		va->va_start = (unsigned long)tmp->addr;
5094 		va->va_end = va->va_start + tmp->size;
5095 		va->vm = tmp;
5096 
5097 		vn = addr_to_node(va->va_start);
5098 		insert_vmap_area(va, &vn->busy.root, &vn->busy.head);
5099 	}
5100 
5101 	/*
5102 	 * Now we can initialize a free vmap space.
5103 	 */
5104 	vmap_init_free_space();
5105 	vmap_initialized = true;
5106 
5107 	vmap_node_shrinker = shrinker_alloc(0, "vmap-node");
5108 	if (!vmap_node_shrinker) {
5109 		pr_err("Failed to allocate vmap-node shrinker!\n");
5110 		return;
5111 	}
5112 
5113 	vmap_node_shrinker->count_objects = vmap_node_shrink_count;
5114 	vmap_node_shrinker->scan_objects = vmap_node_shrink_scan;
5115 	shrinker_register(vmap_node_shrinker);
5116 }
5117