xref: /linux/mm/vmalloc.c (revision 93d0d6f8a654b623addb40f23e5c7696bc93fbd5)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  Copyright (C) 1993  Linus Torvalds
4  *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
5  *  SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
6  *  Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
7  *  Numa awareness, Christoph Lameter, SGI, June 2005
8  *  Improving global KVA allocator, Uladzislau Rezki, Sony, May 2019
9  */
10 
11 #include <linux/vmalloc.h>
12 #include <linux/mm.h>
13 #include <linux/module.h>
14 #include <linux/highmem.h>
15 #include <linux/sched/signal.h>
16 #include <linux/slab.h>
17 #include <linux/spinlock.h>
18 #include <linux/interrupt.h>
19 #include <linux/proc_fs.h>
20 #include <linux/seq_file.h>
21 #include <linux/set_memory.h>
22 #include <linux/debugobjects.h>
23 #include <linux/kallsyms.h>
24 #include <linux/list.h>
25 #include <linux/notifier.h>
26 #include <linux/rbtree.h>
27 #include <linux/xarray.h>
28 #include <linux/io.h>
29 #include <linux/rcupdate.h>
30 #include <linux/pfn.h>
31 #include <linux/kmemleak.h>
32 #include <linux/atomic.h>
33 #include <linux/compiler.h>
34 #include <linux/memcontrol.h>
35 #include <linux/llist.h>
36 #include <linux/uio.h>
37 #include <linux/bitops.h>
38 #include <linux/rbtree_augmented.h>
39 #include <linux/overflow.h>
40 #include <linux/pgtable.h>
41 #include <linux/hugetlb.h>
42 #include <linux/sched/mm.h>
43 #include <asm/tlbflush.h>
44 #include <asm/shmparam.h>
45 #include <linux/page_owner.h>
46 
47 #define CREATE_TRACE_POINTS
48 #include <trace/events/vmalloc.h>
49 
50 #include "internal.h"
51 #include "pgalloc-track.h"
52 
53 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
54 static unsigned int __ro_after_init ioremap_max_page_shift = BITS_PER_LONG - 1;
55 
56 static int __init set_nohugeiomap(char *str)
57 {
58 	ioremap_max_page_shift = PAGE_SHIFT;
59 	return 0;
60 }
61 early_param("nohugeiomap", set_nohugeiomap);
62 #else /* CONFIG_HAVE_ARCH_HUGE_VMAP */
63 static const unsigned int ioremap_max_page_shift = PAGE_SHIFT;
64 #endif	/* CONFIG_HAVE_ARCH_HUGE_VMAP */
65 
66 #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
67 static bool __ro_after_init vmap_allow_huge = true;
68 
69 static int __init set_nohugevmalloc(char *str)
70 {
71 	vmap_allow_huge = false;
72 	return 0;
73 }
74 early_param("nohugevmalloc", set_nohugevmalloc);
75 #else /* CONFIG_HAVE_ARCH_HUGE_VMALLOC */
76 static const bool vmap_allow_huge = false;
77 #endif	/* CONFIG_HAVE_ARCH_HUGE_VMALLOC */
78 
79 bool is_vmalloc_addr(const void *x)
80 {
81 	unsigned long addr = (unsigned long)kasan_reset_tag(x);
82 
83 	return addr >= VMALLOC_START && addr < VMALLOC_END;
84 }
85 EXPORT_SYMBOL(is_vmalloc_addr);
86 
87 struct vfree_deferred {
88 	struct llist_head list;
89 	struct work_struct wq;
90 };
91 static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
92 
93 /*** Page table manipulation functions ***/
94 static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
95 			phys_addr_t phys_addr, pgprot_t prot,
96 			unsigned int max_page_shift, pgtbl_mod_mask *mask)
97 {
98 	pte_t *pte;
99 	u64 pfn;
100 	struct page *page;
101 	unsigned long size = PAGE_SIZE;
102 
103 	pfn = phys_addr >> PAGE_SHIFT;
104 	pte = pte_alloc_kernel_track(pmd, addr, mask);
105 	if (!pte)
106 		return -ENOMEM;
107 
108 	arch_enter_lazy_mmu_mode();
109 
110 	do {
111 		if (unlikely(!pte_none(ptep_get(pte)))) {
112 			if (pfn_valid(pfn)) {
113 				page = pfn_to_page(pfn);
114 				dump_page(page, "remapping already mapped page");
115 			}
116 			BUG();
117 		}
118 
119 #ifdef CONFIG_HUGETLB_PAGE
120 		size = arch_vmap_pte_range_map_size(addr, end, pfn, max_page_shift);
121 		if (size != PAGE_SIZE) {
122 			pte_t entry = pfn_pte(pfn, prot);
123 
124 			entry = arch_make_huge_pte(entry, ilog2(size), 0);
125 			set_huge_pte_at(&init_mm, addr, pte, entry, size);
126 			pfn += PFN_DOWN(size);
127 			continue;
128 		}
129 #endif
130 		set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
131 		pfn++;
132 	} while (pte += PFN_DOWN(size), addr += size, addr != end);
133 
134 	arch_leave_lazy_mmu_mode();
135 	*mask |= PGTBL_PTE_MODIFIED;
136 	return 0;
137 }
138 
139 static int vmap_try_huge_pmd(pmd_t *pmd, unsigned long addr, unsigned long end,
140 			phys_addr_t phys_addr, pgprot_t prot,
141 			unsigned int max_page_shift)
142 {
143 	if (max_page_shift < PMD_SHIFT)
144 		return 0;
145 
146 	if (!arch_vmap_pmd_supported(prot))
147 		return 0;
148 
149 	if ((end - addr) != PMD_SIZE)
150 		return 0;
151 
152 	if (!IS_ALIGNED(addr, PMD_SIZE))
153 		return 0;
154 
155 	if (!IS_ALIGNED(phys_addr, PMD_SIZE))
156 		return 0;
157 
158 	if (pmd_present(*pmd) && !pmd_free_pte_page(pmd, addr))
159 		return 0;
160 
161 	return pmd_set_huge(pmd, phys_addr, prot);
162 }
163 
164 static int vmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
165 			phys_addr_t phys_addr, pgprot_t prot,
166 			unsigned int max_page_shift, pgtbl_mod_mask *mask)
167 {
168 	pmd_t *pmd;
169 	unsigned long next;
170 
171 	pmd = pmd_alloc_track(&init_mm, pud, addr, mask);
172 	if (!pmd)
173 		return -ENOMEM;
174 	do {
175 		next = pmd_addr_end(addr, end);
176 
177 		if (vmap_try_huge_pmd(pmd, addr, next, phys_addr, prot,
178 					max_page_shift)) {
179 			*mask |= PGTBL_PMD_MODIFIED;
180 			continue;
181 		}
182 
183 		if (vmap_pte_range(pmd, addr, next, phys_addr, prot, max_page_shift, mask))
184 			return -ENOMEM;
185 	} while (pmd++, phys_addr += (next - addr), addr = next, addr != end);
186 	return 0;
187 }
188 
189 static int vmap_try_huge_pud(pud_t *pud, unsigned long addr, unsigned long end,
190 			phys_addr_t phys_addr, pgprot_t prot,
191 			unsigned int max_page_shift)
192 {
193 	if (max_page_shift < PUD_SHIFT)
194 		return 0;
195 
196 	if (!arch_vmap_pud_supported(prot))
197 		return 0;
198 
199 	if ((end - addr) != PUD_SIZE)
200 		return 0;
201 
202 	if (!IS_ALIGNED(addr, PUD_SIZE))
203 		return 0;
204 
205 	if (!IS_ALIGNED(phys_addr, PUD_SIZE))
206 		return 0;
207 
208 	if (pud_present(*pud) && !pud_free_pmd_page(pud, addr))
209 		return 0;
210 
211 	return pud_set_huge(pud, phys_addr, prot);
212 }
213 
214 static int vmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
215 			phys_addr_t phys_addr, pgprot_t prot,
216 			unsigned int max_page_shift, pgtbl_mod_mask *mask)
217 {
218 	pud_t *pud;
219 	unsigned long next;
220 
221 	pud = pud_alloc_track(&init_mm, p4d, addr, mask);
222 	if (!pud)
223 		return -ENOMEM;
224 	do {
225 		next = pud_addr_end(addr, end);
226 
227 		if (vmap_try_huge_pud(pud, addr, next, phys_addr, prot,
228 					max_page_shift)) {
229 			*mask |= PGTBL_PUD_MODIFIED;
230 			continue;
231 		}
232 
233 		if (vmap_pmd_range(pud, addr, next, phys_addr, prot,
234 					max_page_shift, mask))
235 			return -ENOMEM;
236 	} while (pud++, phys_addr += (next - addr), addr = next, addr != end);
237 	return 0;
238 }
239 
240 static int vmap_try_huge_p4d(p4d_t *p4d, unsigned long addr, unsigned long end,
241 			phys_addr_t phys_addr, pgprot_t prot,
242 			unsigned int max_page_shift)
243 {
244 	if (max_page_shift < P4D_SHIFT)
245 		return 0;
246 
247 	if (!arch_vmap_p4d_supported(prot))
248 		return 0;
249 
250 	if ((end - addr) != P4D_SIZE)
251 		return 0;
252 
253 	if (!IS_ALIGNED(addr, P4D_SIZE))
254 		return 0;
255 
256 	if (!IS_ALIGNED(phys_addr, P4D_SIZE))
257 		return 0;
258 
259 	if (p4d_present(*p4d) && !p4d_free_pud_page(p4d, addr))
260 		return 0;
261 
262 	return p4d_set_huge(p4d, phys_addr, prot);
263 }
264 
265 static int vmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
266 			phys_addr_t phys_addr, pgprot_t prot,
267 			unsigned int max_page_shift, pgtbl_mod_mask *mask)
268 {
269 	p4d_t *p4d;
270 	unsigned long next;
271 
272 	p4d = p4d_alloc_track(&init_mm, pgd, addr, mask);
273 	if (!p4d)
274 		return -ENOMEM;
275 	do {
276 		next = p4d_addr_end(addr, end);
277 
278 		if (vmap_try_huge_p4d(p4d, addr, next, phys_addr, prot,
279 					max_page_shift)) {
280 			*mask |= PGTBL_P4D_MODIFIED;
281 			continue;
282 		}
283 
284 		if (vmap_pud_range(p4d, addr, next, phys_addr, prot,
285 					max_page_shift, mask))
286 			return -ENOMEM;
287 	} while (p4d++, phys_addr += (next - addr), addr = next, addr != end);
288 	return 0;
289 }
290 
291 static int vmap_range_noflush(unsigned long addr, unsigned long end,
292 			phys_addr_t phys_addr, pgprot_t prot,
293 			unsigned int max_page_shift)
294 {
295 	pgd_t *pgd;
296 	unsigned long start;
297 	unsigned long next;
298 	int err;
299 	pgtbl_mod_mask mask = 0;
300 
301 	might_sleep();
302 	BUG_ON(addr >= end);
303 
304 	start = addr;
305 	pgd = pgd_offset_k(addr);
306 	do {
307 		next = pgd_addr_end(addr, end);
308 		err = vmap_p4d_range(pgd, addr, next, phys_addr, prot,
309 					max_page_shift, &mask);
310 		if (err)
311 			break;
312 	} while (pgd++, phys_addr += (next - addr), addr = next, addr != end);
313 
314 	if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
315 		arch_sync_kernel_mappings(start, end);
316 
317 	return err;
318 }
319 
320 int vmap_page_range(unsigned long addr, unsigned long end,
321 		    phys_addr_t phys_addr, pgprot_t prot)
322 {
323 	int err;
324 
325 	err = vmap_range_noflush(addr, end, phys_addr, pgprot_nx(prot),
326 				 ioremap_max_page_shift);
327 	flush_cache_vmap(addr, end);
328 	if (!err)
329 		err = kmsan_ioremap_page_range(addr, end, phys_addr, prot,
330 					       ioremap_max_page_shift);
331 	return err;
332 }
333 
334 int ioremap_page_range(unsigned long addr, unsigned long end,
335 		phys_addr_t phys_addr, pgprot_t prot)
336 {
337 	struct vm_struct *area;
338 
339 	area = find_vm_area((void *)addr);
340 	if (!area || !(area->flags & VM_IOREMAP)) {
341 		WARN_ONCE(1, "vm_area at addr %lx is not marked as VM_IOREMAP\n", addr);
342 		return -EINVAL;
343 	}
344 	if (addr != (unsigned long)area->addr ||
345 	    (void *)end != area->addr + get_vm_area_size(area)) {
346 		WARN_ONCE(1, "ioremap request [%lx,%lx) doesn't match vm_area [%lx, %lx)\n",
347 			  addr, end, (long)area->addr,
348 			  (long)area->addr + get_vm_area_size(area));
349 		return -ERANGE;
350 	}
351 	return vmap_page_range(addr, end, phys_addr, prot);
352 }
353 
354 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
355 			     pgtbl_mod_mask *mask)
356 {
357 	pte_t *pte;
358 	pte_t ptent;
359 	unsigned long size = PAGE_SIZE;
360 
361 	pte = pte_offset_kernel(pmd, addr);
362 	arch_enter_lazy_mmu_mode();
363 
364 	do {
365 #ifdef CONFIG_HUGETLB_PAGE
366 		size = arch_vmap_pte_range_unmap_size(addr, pte);
367 		if (size != PAGE_SIZE) {
368 			if (WARN_ON(!IS_ALIGNED(addr, size))) {
369 				addr = ALIGN_DOWN(addr, size);
370 				pte = PTR_ALIGN_DOWN(pte, sizeof(*pte) * (size >> PAGE_SHIFT));
371 			}
372 			ptent = huge_ptep_get_and_clear(&init_mm, addr, pte, size);
373 			if (WARN_ON(end - addr < size))
374 				size = end - addr;
375 		} else
376 #endif
377 			ptent = ptep_get_and_clear(&init_mm, addr, pte);
378 		WARN_ON(!pte_none(ptent) && !pte_present(ptent));
379 	} while (pte += (size >> PAGE_SHIFT), addr += size, addr != end);
380 
381 	arch_leave_lazy_mmu_mode();
382 	*mask |= PGTBL_PTE_MODIFIED;
383 }
384 
385 static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
386 			     pgtbl_mod_mask *mask)
387 {
388 	pmd_t *pmd;
389 	unsigned long next;
390 	int cleared;
391 
392 	pmd = pmd_offset(pud, addr);
393 	do {
394 		next = pmd_addr_end(addr, end);
395 
396 		cleared = pmd_clear_huge(pmd);
397 		if (cleared || pmd_bad(*pmd))
398 			*mask |= PGTBL_PMD_MODIFIED;
399 
400 		if (cleared) {
401 			WARN_ON(next - addr < PMD_SIZE);
402 			continue;
403 		}
404 		if (pmd_none_or_clear_bad(pmd))
405 			continue;
406 		vunmap_pte_range(pmd, addr, next, mask);
407 
408 		cond_resched();
409 	} while (pmd++, addr = next, addr != end);
410 }
411 
412 static void vunmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
413 			     pgtbl_mod_mask *mask)
414 {
415 	pud_t *pud;
416 	unsigned long next;
417 	int cleared;
418 
419 	pud = pud_offset(p4d, addr);
420 	do {
421 		next = pud_addr_end(addr, end);
422 
423 		cleared = pud_clear_huge(pud);
424 		if (cleared || pud_bad(*pud))
425 			*mask |= PGTBL_PUD_MODIFIED;
426 
427 		if (cleared) {
428 			WARN_ON(next - addr < PUD_SIZE);
429 			continue;
430 		}
431 		if (pud_none_or_clear_bad(pud))
432 			continue;
433 		vunmap_pmd_range(pud, addr, next, mask);
434 	} while (pud++, addr = next, addr != end);
435 }
436 
437 static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
438 			     pgtbl_mod_mask *mask)
439 {
440 	p4d_t *p4d;
441 	unsigned long next;
442 
443 	p4d = p4d_offset(pgd, addr);
444 	do {
445 		next = p4d_addr_end(addr, end);
446 
447 		p4d_clear_huge(p4d);
448 		if (p4d_bad(*p4d))
449 			*mask |= PGTBL_P4D_MODIFIED;
450 
451 		if (p4d_none_or_clear_bad(p4d))
452 			continue;
453 		vunmap_pud_range(p4d, addr, next, mask);
454 	} while (p4d++, addr = next, addr != end);
455 }
456 
457 /*
458  * vunmap_range_noflush is similar to vunmap_range, but does not
459  * flush caches or TLBs.
460  *
461  * The caller is responsible for calling flush_cache_vmap() before calling
462  * this function, and flush_tlb_kernel_range after it has returned
463  * successfully (and before the addresses are expected to cause a page fault
464  * or be re-mapped for something else, if TLB flushes are being delayed or
465  * coalesced).
466  *
467  * This is an internal function only. Do not use outside mm/.
468  */
469 void __vunmap_range_noflush(unsigned long start, unsigned long end)
470 {
471 	unsigned long next;
472 	pgd_t *pgd;
473 	unsigned long addr = start;
474 	pgtbl_mod_mask mask = 0;
475 
476 	BUG_ON(addr >= end);
477 	pgd = pgd_offset_k(addr);
478 	do {
479 		next = pgd_addr_end(addr, end);
480 		if (pgd_bad(*pgd))
481 			mask |= PGTBL_PGD_MODIFIED;
482 		if (pgd_none_or_clear_bad(pgd))
483 			continue;
484 		vunmap_p4d_range(pgd, addr, next, &mask);
485 	} while (pgd++, addr = next, addr != end);
486 
487 	if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
488 		arch_sync_kernel_mappings(start, end);
489 }
490 
491 void vunmap_range_noflush(unsigned long start, unsigned long end)
492 {
493 	kmsan_vunmap_range_noflush(start, end);
494 	__vunmap_range_noflush(start, end);
495 }
496 
497 /**
498  * vunmap_range - unmap kernel virtual addresses
499  * @addr: start of the VM area to unmap
500  * @end: end of the VM area to unmap (non-inclusive)
501  *
502  * Clears any present PTEs in the virtual address range, flushes TLBs and
503  * caches. Any subsequent access to the address before it has been re-mapped
504  * is a kernel bug.
505  */
506 void vunmap_range(unsigned long addr, unsigned long end)
507 {
508 	flush_cache_vunmap(addr, end);
509 	vunmap_range_noflush(addr, end);
510 	flush_tlb_kernel_range(addr, end);
511 }
512 
513 static int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr,
514 		unsigned long end, pgprot_t prot, struct page **pages, int *nr,
515 		pgtbl_mod_mask *mask)
516 {
517 	pte_t *pte;
518 
519 	/*
520 	 * nr is a running index into the array which helps higher level
521 	 * callers keep track of where we're up to.
522 	 */
523 
524 	pte = pte_alloc_kernel_track(pmd, addr, mask);
525 	if (!pte)
526 		return -ENOMEM;
527 
528 	arch_enter_lazy_mmu_mode();
529 
530 	do {
531 		struct page *page = pages[*nr];
532 
533 		if (WARN_ON(!pte_none(ptep_get(pte))))
534 			return -EBUSY;
535 		if (WARN_ON(!page))
536 			return -ENOMEM;
537 		if (WARN_ON(!pfn_valid(page_to_pfn(page))))
538 			return -EINVAL;
539 
540 		set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
541 		(*nr)++;
542 	} while (pte++, addr += PAGE_SIZE, addr != end);
543 
544 	arch_leave_lazy_mmu_mode();
545 	*mask |= PGTBL_PTE_MODIFIED;
546 	return 0;
547 }
548 
549 static int vmap_pages_pmd_range(pud_t *pud, unsigned long addr,
550 		unsigned long end, pgprot_t prot, struct page **pages, int *nr,
551 		pgtbl_mod_mask *mask)
552 {
553 	pmd_t *pmd;
554 	unsigned long next;
555 
556 	pmd = pmd_alloc_track(&init_mm, pud, addr, mask);
557 	if (!pmd)
558 		return -ENOMEM;
559 	do {
560 		next = pmd_addr_end(addr, end);
561 		if (vmap_pages_pte_range(pmd, addr, next, prot, pages, nr, mask))
562 			return -ENOMEM;
563 	} while (pmd++, addr = next, addr != end);
564 	return 0;
565 }
566 
567 static int vmap_pages_pud_range(p4d_t *p4d, unsigned long addr,
568 		unsigned long end, pgprot_t prot, struct page **pages, int *nr,
569 		pgtbl_mod_mask *mask)
570 {
571 	pud_t *pud;
572 	unsigned long next;
573 
574 	pud = pud_alloc_track(&init_mm, p4d, addr, mask);
575 	if (!pud)
576 		return -ENOMEM;
577 	do {
578 		next = pud_addr_end(addr, end);
579 		if (vmap_pages_pmd_range(pud, addr, next, prot, pages, nr, mask))
580 			return -ENOMEM;
581 	} while (pud++, addr = next, addr != end);
582 	return 0;
583 }
584 
585 static int vmap_pages_p4d_range(pgd_t *pgd, unsigned long addr,
586 		unsigned long end, pgprot_t prot, struct page **pages, int *nr,
587 		pgtbl_mod_mask *mask)
588 {
589 	p4d_t *p4d;
590 	unsigned long next;
591 
592 	p4d = p4d_alloc_track(&init_mm, pgd, addr, mask);
593 	if (!p4d)
594 		return -ENOMEM;
595 	do {
596 		next = p4d_addr_end(addr, end);
597 		if (vmap_pages_pud_range(p4d, addr, next, prot, pages, nr, mask))
598 			return -ENOMEM;
599 	} while (p4d++, addr = next, addr != end);
600 	return 0;
601 }
602 
603 static int vmap_small_pages_range_noflush(unsigned long addr, unsigned long end,
604 		pgprot_t prot, struct page **pages)
605 {
606 	unsigned long start = addr;
607 	pgd_t *pgd;
608 	unsigned long next;
609 	int err = 0;
610 	int nr = 0;
611 	pgtbl_mod_mask mask = 0;
612 
613 	BUG_ON(addr >= end);
614 	pgd = pgd_offset_k(addr);
615 	do {
616 		next = pgd_addr_end(addr, end);
617 		if (pgd_bad(*pgd))
618 			mask |= PGTBL_PGD_MODIFIED;
619 		err = vmap_pages_p4d_range(pgd, addr, next, prot, pages, &nr, &mask);
620 		if (err)
621 			break;
622 	} while (pgd++, addr = next, addr != end);
623 
624 	if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
625 		arch_sync_kernel_mappings(start, end);
626 
627 	return err;
628 }
629 
630 /*
631  * vmap_pages_range_noflush is similar to vmap_pages_range, but does not
632  * flush caches.
633  *
634  * The caller is responsible for calling flush_cache_vmap() after this
635  * function returns successfully and before the addresses are accessed.
636  *
637  * This is an internal function only. Do not use outside mm/.
638  */
639 int __vmap_pages_range_noflush(unsigned long addr, unsigned long end,
640 		pgprot_t prot, struct page **pages, unsigned int page_shift)
641 {
642 	unsigned int i, nr = (end - addr) >> PAGE_SHIFT;
643 
644 	WARN_ON(page_shift < PAGE_SHIFT);
645 
646 	if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMALLOC) ||
647 			page_shift == PAGE_SHIFT)
648 		return vmap_small_pages_range_noflush(addr, end, prot, pages);
649 
650 	for (i = 0; i < nr; i += 1U << (page_shift - PAGE_SHIFT)) {
651 		int err;
652 
653 		err = vmap_range_noflush(addr, addr + (1UL << page_shift),
654 					page_to_phys(pages[i]), prot,
655 					page_shift);
656 		if (err)
657 			return err;
658 
659 		addr += 1UL << page_shift;
660 	}
661 
662 	return 0;
663 }
664 
665 int vmap_pages_range_noflush(unsigned long addr, unsigned long end,
666 		pgprot_t prot, struct page **pages, unsigned int page_shift)
667 {
668 	int ret = kmsan_vmap_pages_range_noflush(addr, end, prot, pages,
669 						 page_shift);
670 
671 	if (ret)
672 		return ret;
673 	return __vmap_pages_range_noflush(addr, end, prot, pages, page_shift);
674 }
675 
676 /**
677  * vmap_pages_range - map pages to a kernel virtual address
678  * @addr: start of the VM area to map
679  * @end: end of the VM area to map (non-inclusive)
680  * @prot: page protection flags to use
681  * @pages: pages to map (always PAGE_SIZE pages)
682  * @page_shift: maximum shift that the pages may be mapped with, @pages must
683  * be aligned and contiguous up to at least this shift.
684  *
685  * RETURNS:
686  * 0 on success, -errno on failure.
687  */
688 int vmap_pages_range(unsigned long addr, unsigned long end,
689 		pgprot_t prot, struct page **pages, unsigned int page_shift)
690 {
691 	int err;
692 
693 	err = vmap_pages_range_noflush(addr, end, prot, pages, page_shift);
694 	flush_cache_vmap(addr, end);
695 	return err;
696 }
697 
698 static int check_sparse_vm_area(struct vm_struct *area, unsigned long start,
699 				unsigned long end)
700 {
701 	might_sleep();
702 	if (WARN_ON_ONCE(area->flags & VM_FLUSH_RESET_PERMS))
703 		return -EINVAL;
704 	if (WARN_ON_ONCE(area->flags & VM_NO_GUARD))
705 		return -EINVAL;
706 	if (WARN_ON_ONCE(!(area->flags & VM_SPARSE)))
707 		return -EINVAL;
708 	if ((end - start) >> PAGE_SHIFT > totalram_pages())
709 		return -E2BIG;
710 	if (start < (unsigned long)area->addr ||
711 	    (void *)end > area->addr + get_vm_area_size(area))
712 		return -ERANGE;
713 	return 0;
714 }
715 
716 /**
717  * vm_area_map_pages - map pages inside given sparse vm_area
718  * @area: vm_area
719  * @start: start address inside vm_area
720  * @end: end address inside vm_area
721  * @pages: pages to map (always PAGE_SIZE pages)
722  */
723 int vm_area_map_pages(struct vm_struct *area, unsigned long start,
724 		      unsigned long end, struct page **pages)
725 {
726 	int err;
727 
728 	err = check_sparse_vm_area(area, start, end);
729 	if (err)
730 		return err;
731 
732 	return vmap_pages_range(start, end, PAGE_KERNEL, pages, PAGE_SHIFT);
733 }
734 
735 /**
736  * vm_area_unmap_pages - unmap pages inside given sparse vm_area
737  * @area: vm_area
738  * @start: start address inside vm_area
739  * @end: end address inside vm_area
740  */
741 void vm_area_unmap_pages(struct vm_struct *area, unsigned long start,
742 			 unsigned long end)
743 {
744 	if (check_sparse_vm_area(area, start, end))
745 		return;
746 
747 	vunmap_range(start, end);
748 }
749 
750 int is_vmalloc_or_module_addr(const void *x)
751 {
752 	/*
753 	 * ARM, x86-64 and sparc64 put modules in a special place,
754 	 * and fall back on vmalloc() if that fails. Others
755 	 * just put it in the vmalloc space.
756 	 */
757 #if defined(CONFIG_EXECMEM) && defined(MODULES_VADDR)
758 	unsigned long addr = (unsigned long)kasan_reset_tag(x);
759 	if (addr >= MODULES_VADDR && addr < MODULES_END)
760 		return 1;
761 #endif
762 	return is_vmalloc_addr(x);
763 }
764 EXPORT_SYMBOL_GPL(is_vmalloc_or_module_addr);
765 
766 /*
767  * Walk a vmap address to the struct page it maps. Huge vmap mappings will
768  * return the tail page that corresponds to the base page address, which
769  * matches small vmap mappings.
770  */
771 struct page *vmalloc_to_page(const void *vmalloc_addr)
772 {
773 	unsigned long addr = (unsigned long) vmalloc_addr;
774 	struct page *page = NULL;
775 	pgd_t *pgd = pgd_offset_k(addr);
776 	p4d_t *p4d;
777 	pud_t *pud;
778 	pmd_t *pmd;
779 	pte_t *ptep, pte;
780 
781 	/*
782 	 * XXX we might need to change this if we add VIRTUAL_BUG_ON for
783 	 * architectures that do not vmalloc module space
784 	 */
785 	VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr));
786 
787 	if (pgd_none(*pgd))
788 		return NULL;
789 	if (WARN_ON_ONCE(pgd_leaf(*pgd)))
790 		return NULL; /* XXX: no allowance for huge pgd */
791 	if (WARN_ON_ONCE(pgd_bad(*pgd)))
792 		return NULL;
793 
794 	p4d = p4d_offset(pgd, addr);
795 	if (p4d_none(*p4d))
796 		return NULL;
797 	if (p4d_leaf(*p4d))
798 		return p4d_page(*p4d) + ((addr & ~P4D_MASK) >> PAGE_SHIFT);
799 	if (WARN_ON_ONCE(p4d_bad(*p4d)))
800 		return NULL;
801 
802 	pud = pud_offset(p4d, addr);
803 	if (pud_none(*pud))
804 		return NULL;
805 	if (pud_leaf(*pud))
806 		return pud_page(*pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
807 	if (WARN_ON_ONCE(pud_bad(*pud)))
808 		return NULL;
809 
810 	pmd = pmd_offset(pud, addr);
811 	if (pmd_none(*pmd))
812 		return NULL;
813 	if (pmd_leaf(*pmd))
814 		return pmd_page(*pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
815 	if (WARN_ON_ONCE(pmd_bad(*pmd)))
816 		return NULL;
817 
818 	ptep = pte_offset_kernel(pmd, addr);
819 	pte = ptep_get(ptep);
820 	if (pte_present(pte))
821 		page = pte_page(pte);
822 
823 	return page;
824 }
825 EXPORT_SYMBOL(vmalloc_to_page);
826 
827 /*
828  * Map a vmalloc()-space virtual address to the physical page frame number.
829  */
830 unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
831 {
832 	return page_to_pfn(vmalloc_to_page(vmalloc_addr));
833 }
834 EXPORT_SYMBOL(vmalloc_to_pfn);
835 
836 
837 /*** Global kva allocator ***/
838 
839 #define DEBUG_AUGMENT_PROPAGATE_CHECK 0
840 #define DEBUG_AUGMENT_LOWEST_MATCH_CHECK 0
841 
842 
843 static DEFINE_SPINLOCK(free_vmap_area_lock);
844 static bool vmap_initialized __read_mostly;
845 
846 /*
847  * This kmem_cache is used for vmap_area objects. Instead of
848  * allocating from slab we reuse an object from this cache to
849  * make things faster. Especially in "no edge" splitting of
850  * free block.
851  */
852 static struct kmem_cache *vmap_area_cachep;
853 
854 /*
855  * This linked list is used in pair with free_vmap_area_root.
856  * It gives O(1) access to prev/next to perform fast coalescing.
857  */
858 static LIST_HEAD(free_vmap_area_list);
859 
860 /*
861  * This augment red-black tree represents the free vmap space.
862  * All vmap_area objects in this tree are sorted by va->va_start
863  * address. It is used for allocation and merging when a vmap
864  * object is released.
865  *
866  * Each vmap_area node contains a maximum available free block
867  * of its sub-tree, right or left. Therefore it is possible to
868  * find a lowest match of free area.
869  */
870 static struct rb_root free_vmap_area_root = RB_ROOT;
871 
872 /*
873  * Preload a CPU with one object for "no edge" split case. The
874  * aim is to get rid of allocations from the atomic context, thus
875  * to use more permissive allocation masks.
876  */
877 static DEFINE_PER_CPU(struct vmap_area *, ne_fit_preload_node);
878 
879 /*
880  * This structure defines a single, solid model where a list and
881  * rb-tree are part of one entity protected by the lock. Nodes are
882  * sorted in ascending order, thus for O(1) access to left/right
883  * neighbors a list is used as well as for sequential traversal.
884  */
885 struct rb_list {
886 	struct rb_root root;
887 	struct list_head head;
888 	spinlock_t lock;
889 };
890 
891 /*
892  * A fast size storage contains VAs up to 1M size. A pool consists
893  * of linked between each other ready to go VAs of certain sizes.
894  * An index in the pool-array corresponds to number of pages + 1.
895  */
896 #define MAX_VA_SIZE_PAGES 256
897 
898 struct vmap_pool {
899 	struct list_head head;
900 	unsigned long len;
901 };
902 
903 /*
904  * An effective vmap-node logic. Users make use of nodes instead
905  * of a global heap. It allows to balance an access and mitigate
906  * contention.
907  */
908 static struct vmap_node {
909 	/* Simple size segregated storage. */
910 	struct vmap_pool pool[MAX_VA_SIZE_PAGES];
911 	spinlock_t pool_lock;
912 	bool skip_populate;
913 
914 	/* Bookkeeping data of this node. */
915 	struct rb_list busy;
916 	struct rb_list lazy;
917 
918 	/*
919 	 * Ready-to-free areas.
920 	 */
921 	struct list_head purge_list;
922 	struct work_struct purge_work;
923 	unsigned long nr_purged;
924 } single;
925 
926 /*
927  * Initial setup consists of one single node, i.e. a balancing
928  * is fully disabled. Later on, after vmap is initialized these
929  * parameters are updated based on a system capacity.
930  */
931 static struct vmap_node *vmap_nodes = &single;
932 static __read_mostly unsigned int nr_vmap_nodes = 1;
933 static __read_mostly unsigned int vmap_zone_size = 1;
934 
935 static inline unsigned int
936 addr_to_node_id(unsigned long addr)
937 {
938 	return (addr / vmap_zone_size) % nr_vmap_nodes;
939 }
940 
941 static inline struct vmap_node *
942 addr_to_node(unsigned long addr)
943 {
944 	return &vmap_nodes[addr_to_node_id(addr)];
945 }
946 
947 static inline struct vmap_node *
948 id_to_node(unsigned int id)
949 {
950 	return &vmap_nodes[id % nr_vmap_nodes];
951 }
952 
953 /*
954  * We use the value 0 to represent "no node", that is why
955  * an encoded value will be the node-id incremented by 1.
956  * It is always greater then 0. A valid node_id which can
957  * be encoded is [0:nr_vmap_nodes - 1]. If a passed node_id
958  * is not valid 0 is returned.
959  */
960 static unsigned int
961 encode_vn_id(unsigned int node_id)
962 {
963 	/* Can store U8_MAX [0:254] nodes. */
964 	if (node_id < nr_vmap_nodes)
965 		return (node_id + 1) << BITS_PER_BYTE;
966 
967 	/* Warn and no node encoded. */
968 	WARN_ONCE(1, "Encode wrong node id (%u)\n", node_id);
969 	return 0;
970 }
971 
972 /*
973  * Returns an encoded node-id, the valid range is within
974  * [0:nr_vmap_nodes-1] values. Otherwise nr_vmap_nodes is
975  * returned if extracted data is wrong.
976  */
977 static unsigned int
978 decode_vn_id(unsigned int val)
979 {
980 	unsigned int node_id = (val >> BITS_PER_BYTE) - 1;
981 
982 	/* Can store U8_MAX [0:254] nodes. */
983 	if (node_id < nr_vmap_nodes)
984 		return node_id;
985 
986 	/* If it was _not_ zero, warn. */
987 	WARN_ONCE(node_id != UINT_MAX,
988 		"Decode wrong node id (%d)\n", node_id);
989 
990 	return nr_vmap_nodes;
991 }
992 
993 static bool
994 is_vn_id_valid(unsigned int node_id)
995 {
996 	if (node_id < nr_vmap_nodes)
997 		return true;
998 
999 	return false;
1000 }
1001 
1002 static __always_inline unsigned long
1003 va_size(struct vmap_area *va)
1004 {
1005 	return (va->va_end - va->va_start);
1006 }
1007 
1008 static __always_inline unsigned long
1009 get_subtree_max_size(struct rb_node *node)
1010 {
1011 	struct vmap_area *va;
1012 
1013 	va = rb_entry_safe(node, struct vmap_area, rb_node);
1014 	return va ? va->subtree_max_size : 0;
1015 }
1016 
1017 RB_DECLARE_CALLBACKS_MAX(static, free_vmap_area_rb_augment_cb,
1018 	struct vmap_area, rb_node, unsigned long, subtree_max_size, va_size)
1019 
1020 static void reclaim_and_purge_vmap_areas(void);
1021 static BLOCKING_NOTIFIER_HEAD(vmap_notify_list);
1022 static void drain_vmap_area_work(struct work_struct *work);
1023 static DECLARE_WORK(drain_vmap_work, drain_vmap_area_work);
1024 
1025 static atomic_long_t nr_vmalloc_pages;
1026 
1027 unsigned long vmalloc_nr_pages(void)
1028 {
1029 	return atomic_long_read(&nr_vmalloc_pages);
1030 }
1031 
1032 static struct vmap_area *__find_vmap_area(unsigned long addr, struct rb_root *root)
1033 {
1034 	struct rb_node *n = root->rb_node;
1035 
1036 	addr = (unsigned long)kasan_reset_tag((void *)addr);
1037 
1038 	while (n) {
1039 		struct vmap_area *va;
1040 
1041 		va = rb_entry(n, struct vmap_area, rb_node);
1042 		if (addr < va->va_start)
1043 			n = n->rb_left;
1044 		else if (addr >= va->va_end)
1045 			n = n->rb_right;
1046 		else
1047 			return va;
1048 	}
1049 
1050 	return NULL;
1051 }
1052 
1053 /* Look up the first VA which satisfies addr < va_end, NULL if none. */
1054 static struct vmap_area *
1055 __find_vmap_area_exceed_addr(unsigned long addr, struct rb_root *root)
1056 {
1057 	struct vmap_area *va = NULL;
1058 	struct rb_node *n = root->rb_node;
1059 
1060 	addr = (unsigned long)kasan_reset_tag((void *)addr);
1061 
1062 	while (n) {
1063 		struct vmap_area *tmp;
1064 
1065 		tmp = rb_entry(n, struct vmap_area, rb_node);
1066 		if (tmp->va_end > addr) {
1067 			va = tmp;
1068 			if (tmp->va_start <= addr)
1069 				break;
1070 
1071 			n = n->rb_left;
1072 		} else
1073 			n = n->rb_right;
1074 	}
1075 
1076 	return va;
1077 }
1078 
1079 /*
1080  * Returns a node where a first VA, that satisfies addr < va_end, resides.
1081  * If success, a node is locked. A user is responsible to unlock it when a
1082  * VA is no longer needed to be accessed.
1083  *
1084  * Returns NULL if nothing found.
1085  */
1086 static struct vmap_node *
1087 find_vmap_area_exceed_addr_lock(unsigned long addr, struct vmap_area **va)
1088 {
1089 	unsigned long va_start_lowest;
1090 	struct vmap_node *vn;
1091 	int i;
1092 
1093 repeat:
1094 	for (i = 0, va_start_lowest = 0; i < nr_vmap_nodes; i++) {
1095 		vn = &vmap_nodes[i];
1096 
1097 		spin_lock(&vn->busy.lock);
1098 		*va = __find_vmap_area_exceed_addr(addr, &vn->busy.root);
1099 
1100 		if (*va)
1101 			if (!va_start_lowest || (*va)->va_start < va_start_lowest)
1102 				va_start_lowest = (*va)->va_start;
1103 		spin_unlock(&vn->busy.lock);
1104 	}
1105 
1106 	/*
1107 	 * Check if found VA exists, it might have gone away.  In this case we
1108 	 * repeat the search because a VA has been removed concurrently and we
1109 	 * need to proceed to the next one, which is a rare case.
1110 	 */
1111 	if (va_start_lowest) {
1112 		vn = addr_to_node(va_start_lowest);
1113 
1114 		spin_lock(&vn->busy.lock);
1115 		*va = __find_vmap_area(va_start_lowest, &vn->busy.root);
1116 
1117 		if (*va)
1118 			return vn;
1119 
1120 		spin_unlock(&vn->busy.lock);
1121 		goto repeat;
1122 	}
1123 
1124 	return NULL;
1125 }
1126 
1127 /*
1128  * This function returns back addresses of parent node
1129  * and its left or right link for further processing.
1130  *
1131  * Otherwise NULL is returned. In that case all further
1132  * steps regarding inserting of conflicting overlap range
1133  * have to be declined and actually considered as a bug.
1134  */
1135 static __always_inline struct rb_node **
1136 find_va_links(struct vmap_area *va,
1137 	struct rb_root *root, struct rb_node *from,
1138 	struct rb_node **parent)
1139 {
1140 	struct vmap_area *tmp_va;
1141 	struct rb_node **link;
1142 
1143 	if (root) {
1144 		link = &root->rb_node;
1145 		if (unlikely(!*link)) {
1146 			*parent = NULL;
1147 			return link;
1148 		}
1149 	} else {
1150 		link = &from;
1151 	}
1152 
1153 	/*
1154 	 * Go to the bottom of the tree. When we hit the last point
1155 	 * we end up with parent rb_node and correct direction, i name
1156 	 * it link, where the new va->rb_node will be attached to.
1157 	 */
1158 	do {
1159 		tmp_va = rb_entry(*link, struct vmap_area, rb_node);
1160 
1161 		/*
1162 		 * During the traversal we also do some sanity check.
1163 		 * Trigger the BUG() if there are sides(left/right)
1164 		 * or full overlaps.
1165 		 */
1166 		if (va->va_end <= tmp_va->va_start)
1167 			link = &(*link)->rb_left;
1168 		else if (va->va_start >= tmp_va->va_end)
1169 			link = &(*link)->rb_right;
1170 		else {
1171 			WARN(1, "vmalloc bug: 0x%lx-0x%lx overlaps with 0x%lx-0x%lx\n",
1172 				va->va_start, va->va_end, tmp_va->va_start, tmp_va->va_end);
1173 
1174 			return NULL;
1175 		}
1176 	} while (*link);
1177 
1178 	*parent = &tmp_va->rb_node;
1179 	return link;
1180 }
1181 
1182 static __always_inline struct list_head *
1183 get_va_next_sibling(struct rb_node *parent, struct rb_node **link)
1184 {
1185 	struct list_head *list;
1186 
1187 	if (unlikely(!parent))
1188 		/*
1189 		 * The red-black tree where we try to find VA neighbors
1190 		 * before merging or inserting is empty, i.e. it means
1191 		 * there is no free vmap space. Normally it does not
1192 		 * happen but we handle this case anyway.
1193 		 */
1194 		return NULL;
1195 
1196 	list = &rb_entry(parent, struct vmap_area, rb_node)->list;
1197 	return (&parent->rb_right == link ? list->next : list);
1198 }
1199 
1200 static __always_inline void
1201 __link_va(struct vmap_area *va, struct rb_root *root,
1202 	struct rb_node *parent, struct rb_node **link,
1203 	struct list_head *head, bool augment)
1204 {
1205 	/*
1206 	 * VA is still not in the list, but we can
1207 	 * identify its future previous list_head node.
1208 	 */
1209 	if (likely(parent)) {
1210 		head = &rb_entry(parent, struct vmap_area, rb_node)->list;
1211 		if (&parent->rb_right != link)
1212 			head = head->prev;
1213 	}
1214 
1215 	/* Insert to the rb-tree */
1216 	rb_link_node(&va->rb_node, parent, link);
1217 	if (augment) {
1218 		/*
1219 		 * Some explanation here. Just perform simple insertion
1220 		 * to the tree. We do not set va->subtree_max_size to
1221 		 * its current size before calling rb_insert_augmented().
1222 		 * It is because we populate the tree from the bottom
1223 		 * to parent levels when the node _is_ in the tree.
1224 		 *
1225 		 * Therefore we set subtree_max_size to zero after insertion,
1226 		 * to let __augment_tree_propagate_from() puts everything to
1227 		 * the correct order later on.
1228 		 */
1229 		rb_insert_augmented(&va->rb_node,
1230 			root, &free_vmap_area_rb_augment_cb);
1231 		va->subtree_max_size = 0;
1232 	} else {
1233 		rb_insert_color(&va->rb_node, root);
1234 	}
1235 
1236 	/* Address-sort this list */
1237 	list_add(&va->list, head);
1238 }
1239 
1240 static __always_inline void
1241 link_va(struct vmap_area *va, struct rb_root *root,
1242 	struct rb_node *parent, struct rb_node **link,
1243 	struct list_head *head)
1244 {
1245 	__link_va(va, root, parent, link, head, false);
1246 }
1247 
1248 static __always_inline void
1249 link_va_augment(struct vmap_area *va, struct rb_root *root,
1250 	struct rb_node *parent, struct rb_node **link,
1251 	struct list_head *head)
1252 {
1253 	__link_va(va, root, parent, link, head, true);
1254 }
1255 
1256 static __always_inline void
1257 __unlink_va(struct vmap_area *va, struct rb_root *root, bool augment)
1258 {
1259 	if (WARN_ON(RB_EMPTY_NODE(&va->rb_node)))
1260 		return;
1261 
1262 	if (augment)
1263 		rb_erase_augmented(&va->rb_node,
1264 			root, &free_vmap_area_rb_augment_cb);
1265 	else
1266 		rb_erase(&va->rb_node, root);
1267 
1268 	list_del_init(&va->list);
1269 	RB_CLEAR_NODE(&va->rb_node);
1270 }
1271 
1272 static __always_inline void
1273 unlink_va(struct vmap_area *va, struct rb_root *root)
1274 {
1275 	__unlink_va(va, root, false);
1276 }
1277 
1278 static __always_inline void
1279 unlink_va_augment(struct vmap_area *va, struct rb_root *root)
1280 {
1281 	__unlink_va(va, root, true);
1282 }
1283 
1284 #if DEBUG_AUGMENT_PROPAGATE_CHECK
1285 /*
1286  * Gets called when remove the node and rotate.
1287  */
1288 static __always_inline unsigned long
1289 compute_subtree_max_size(struct vmap_area *va)
1290 {
1291 	return max3(va_size(va),
1292 		get_subtree_max_size(va->rb_node.rb_left),
1293 		get_subtree_max_size(va->rb_node.rb_right));
1294 }
1295 
1296 static void
1297 augment_tree_propagate_check(void)
1298 {
1299 	struct vmap_area *va;
1300 	unsigned long computed_size;
1301 
1302 	list_for_each_entry(va, &free_vmap_area_list, list) {
1303 		computed_size = compute_subtree_max_size(va);
1304 		if (computed_size != va->subtree_max_size)
1305 			pr_emerg("tree is corrupted: %lu, %lu\n",
1306 				va_size(va), va->subtree_max_size);
1307 	}
1308 }
1309 #endif
1310 
1311 /*
1312  * This function populates subtree_max_size from bottom to upper
1313  * levels starting from VA point. The propagation must be done
1314  * when VA size is modified by changing its va_start/va_end. Or
1315  * in case of newly inserting of VA to the tree.
1316  *
1317  * It means that __augment_tree_propagate_from() must be called:
1318  * - After VA has been inserted to the tree(free path);
1319  * - After VA has been shrunk(allocation path);
1320  * - After VA has been increased(merging path).
1321  *
1322  * Please note that, it does not mean that upper parent nodes
1323  * and their subtree_max_size are recalculated all the time up
1324  * to the root node.
1325  *
1326  *       4--8
1327  *        /\
1328  *       /  \
1329  *      /    \
1330  *    2--2  8--8
1331  *
1332  * For example if we modify the node 4, shrinking it to 2, then
1333  * no any modification is required. If we shrink the node 2 to 1
1334  * its subtree_max_size is updated only, and set to 1. If we shrink
1335  * the node 8 to 6, then its subtree_max_size is set to 6 and parent
1336  * node becomes 4--6.
1337  */
1338 static __always_inline void
1339 augment_tree_propagate_from(struct vmap_area *va)
1340 {
1341 	/*
1342 	 * Populate the tree from bottom towards the root until
1343 	 * the calculated maximum available size of checked node
1344 	 * is equal to its current one.
1345 	 */
1346 	free_vmap_area_rb_augment_cb_propagate(&va->rb_node, NULL);
1347 
1348 #if DEBUG_AUGMENT_PROPAGATE_CHECK
1349 	augment_tree_propagate_check();
1350 #endif
1351 }
1352 
1353 static void
1354 insert_vmap_area(struct vmap_area *va,
1355 	struct rb_root *root, struct list_head *head)
1356 {
1357 	struct rb_node **link;
1358 	struct rb_node *parent;
1359 
1360 	link = find_va_links(va, root, NULL, &parent);
1361 	if (link)
1362 		link_va(va, root, parent, link, head);
1363 }
1364 
1365 static void
1366 insert_vmap_area_augment(struct vmap_area *va,
1367 	struct rb_node *from, struct rb_root *root,
1368 	struct list_head *head)
1369 {
1370 	struct rb_node **link;
1371 	struct rb_node *parent;
1372 
1373 	if (from)
1374 		link = find_va_links(va, NULL, from, &parent);
1375 	else
1376 		link = find_va_links(va, root, NULL, &parent);
1377 
1378 	if (link) {
1379 		link_va_augment(va, root, parent, link, head);
1380 		augment_tree_propagate_from(va);
1381 	}
1382 }
1383 
1384 /*
1385  * Merge de-allocated chunk of VA memory with previous
1386  * and next free blocks. If coalesce is not done a new
1387  * free area is inserted. If VA has been merged, it is
1388  * freed.
1389  *
1390  * Please note, it can return NULL in case of overlap
1391  * ranges, followed by WARN() report. Despite it is a
1392  * buggy behaviour, a system can be alive and keep
1393  * ongoing.
1394  */
1395 static __always_inline struct vmap_area *
1396 __merge_or_add_vmap_area(struct vmap_area *va,
1397 	struct rb_root *root, struct list_head *head, bool augment)
1398 {
1399 	struct vmap_area *sibling;
1400 	struct list_head *next;
1401 	struct rb_node **link;
1402 	struct rb_node *parent;
1403 	bool merged = false;
1404 
1405 	/*
1406 	 * Find a place in the tree where VA potentially will be
1407 	 * inserted, unless it is merged with its sibling/siblings.
1408 	 */
1409 	link = find_va_links(va, root, NULL, &parent);
1410 	if (!link)
1411 		return NULL;
1412 
1413 	/*
1414 	 * Get next node of VA to check if merging can be done.
1415 	 */
1416 	next = get_va_next_sibling(parent, link);
1417 	if (unlikely(next == NULL))
1418 		goto insert;
1419 
1420 	/*
1421 	 * start            end
1422 	 * |                |
1423 	 * |<------VA------>|<-----Next----->|
1424 	 *                  |                |
1425 	 *                  start            end
1426 	 */
1427 	if (next != head) {
1428 		sibling = list_entry(next, struct vmap_area, list);
1429 		if (sibling->va_start == va->va_end) {
1430 			sibling->va_start = va->va_start;
1431 
1432 			/* Free vmap_area object. */
1433 			kmem_cache_free(vmap_area_cachep, va);
1434 
1435 			/* Point to the new merged area. */
1436 			va = sibling;
1437 			merged = true;
1438 		}
1439 	}
1440 
1441 	/*
1442 	 * start            end
1443 	 * |                |
1444 	 * |<-----Prev----->|<------VA------>|
1445 	 *                  |                |
1446 	 *                  start            end
1447 	 */
1448 	if (next->prev != head) {
1449 		sibling = list_entry(next->prev, struct vmap_area, list);
1450 		if (sibling->va_end == va->va_start) {
1451 			/*
1452 			 * If both neighbors are coalesced, it is important
1453 			 * to unlink the "next" node first, followed by merging
1454 			 * with "previous" one. Otherwise the tree might not be
1455 			 * fully populated if a sibling's augmented value is
1456 			 * "normalized" because of rotation operations.
1457 			 */
1458 			if (merged)
1459 				__unlink_va(va, root, augment);
1460 
1461 			sibling->va_end = va->va_end;
1462 
1463 			/* Free vmap_area object. */
1464 			kmem_cache_free(vmap_area_cachep, va);
1465 
1466 			/* Point to the new merged area. */
1467 			va = sibling;
1468 			merged = true;
1469 		}
1470 	}
1471 
1472 insert:
1473 	if (!merged)
1474 		__link_va(va, root, parent, link, head, augment);
1475 
1476 	return va;
1477 }
1478 
1479 static __always_inline struct vmap_area *
1480 merge_or_add_vmap_area(struct vmap_area *va,
1481 	struct rb_root *root, struct list_head *head)
1482 {
1483 	return __merge_or_add_vmap_area(va, root, head, false);
1484 }
1485 
1486 static __always_inline struct vmap_area *
1487 merge_or_add_vmap_area_augment(struct vmap_area *va,
1488 	struct rb_root *root, struct list_head *head)
1489 {
1490 	va = __merge_or_add_vmap_area(va, root, head, true);
1491 	if (va)
1492 		augment_tree_propagate_from(va);
1493 
1494 	return va;
1495 }
1496 
1497 static __always_inline bool
1498 is_within_this_va(struct vmap_area *va, unsigned long size,
1499 	unsigned long align, unsigned long vstart)
1500 {
1501 	unsigned long nva_start_addr;
1502 
1503 	if (va->va_start > vstart)
1504 		nva_start_addr = ALIGN(va->va_start, align);
1505 	else
1506 		nva_start_addr = ALIGN(vstart, align);
1507 
1508 	/* Can be overflowed due to big size or alignment. */
1509 	if (nva_start_addr + size < nva_start_addr ||
1510 			nva_start_addr < vstart)
1511 		return false;
1512 
1513 	return (nva_start_addr + size <= va->va_end);
1514 }
1515 
1516 /*
1517  * Find the first free block(lowest start address) in the tree,
1518  * that will accomplish the request corresponding to passing
1519  * parameters. Please note, with an alignment bigger than PAGE_SIZE,
1520  * a search length is adjusted to account for worst case alignment
1521  * overhead.
1522  */
1523 static __always_inline struct vmap_area *
1524 find_vmap_lowest_match(struct rb_root *root, unsigned long size,
1525 	unsigned long align, unsigned long vstart, bool adjust_search_size)
1526 {
1527 	struct vmap_area *va;
1528 	struct rb_node *node;
1529 	unsigned long length;
1530 
1531 	/* Start from the root. */
1532 	node = root->rb_node;
1533 
1534 	/* Adjust the search size for alignment overhead. */
1535 	length = adjust_search_size ? size + align - 1 : size;
1536 
1537 	while (node) {
1538 		va = rb_entry(node, struct vmap_area, rb_node);
1539 
1540 		if (get_subtree_max_size(node->rb_left) >= length &&
1541 				vstart < va->va_start) {
1542 			node = node->rb_left;
1543 		} else {
1544 			if (is_within_this_va(va, size, align, vstart))
1545 				return va;
1546 
1547 			/*
1548 			 * Does not make sense to go deeper towards the right
1549 			 * sub-tree if it does not have a free block that is
1550 			 * equal or bigger to the requested search length.
1551 			 */
1552 			if (get_subtree_max_size(node->rb_right) >= length) {
1553 				node = node->rb_right;
1554 				continue;
1555 			}
1556 
1557 			/*
1558 			 * OK. We roll back and find the first right sub-tree,
1559 			 * that will satisfy the search criteria. It can happen
1560 			 * due to "vstart" restriction or an alignment overhead
1561 			 * that is bigger then PAGE_SIZE.
1562 			 */
1563 			while ((node = rb_parent(node))) {
1564 				va = rb_entry(node, struct vmap_area, rb_node);
1565 				if (is_within_this_va(va, size, align, vstart))
1566 					return va;
1567 
1568 				if (get_subtree_max_size(node->rb_right) >= length &&
1569 						vstart <= va->va_start) {
1570 					/*
1571 					 * Shift the vstart forward. Please note, we update it with
1572 					 * parent's start address adding "1" because we do not want
1573 					 * to enter same sub-tree after it has already been checked
1574 					 * and no suitable free block found there.
1575 					 */
1576 					vstart = va->va_start + 1;
1577 					node = node->rb_right;
1578 					break;
1579 				}
1580 			}
1581 		}
1582 	}
1583 
1584 	return NULL;
1585 }
1586 
1587 #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
1588 #include <linux/random.h>
1589 
1590 static struct vmap_area *
1591 find_vmap_lowest_linear_match(struct list_head *head, unsigned long size,
1592 	unsigned long align, unsigned long vstart)
1593 {
1594 	struct vmap_area *va;
1595 
1596 	list_for_each_entry(va, head, list) {
1597 		if (!is_within_this_va(va, size, align, vstart))
1598 			continue;
1599 
1600 		return va;
1601 	}
1602 
1603 	return NULL;
1604 }
1605 
1606 static void
1607 find_vmap_lowest_match_check(struct rb_root *root, struct list_head *head,
1608 			     unsigned long size, unsigned long align)
1609 {
1610 	struct vmap_area *va_1, *va_2;
1611 	unsigned long vstart;
1612 	unsigned int rnd;
1613 
1614 	get_random_bytes(&rnd, sizeof(rnd));
1615 	vstart = VMALLOC_START + rnd;
1616 
1617 	va_1 = find_vmap_lowest_match(root, size, align, vstart, false);
1618 	va_2 = find_vmap_lowest_linear_match(head, size, align, vstart);
1619 
1620 	if (va_1 != va_2)
1621 		pr_emerg("not lowest: t: 0x%p, l: 0x%p, v: 0x%lx\n",
1622 			va_1, va_2, vstart);
1623 }
1624 #endif
1625 
1626 enum fit_type {
1627 	NOTHING_FIT = 0,
1628 	FL_FIT_TYPE = 1,	/* full fit */
1629 	LE_FIT_TYPE = 2,	/* left edge fit */
1630 	RE_FIT_TYPE = 3,	/* right edge fit */
1631 	NE_FIT_TYPE = 4		/* no edge fit */
1632 };
1633 
1634 static __always_inline enum fit_type
1635 classify_va_fit_type(struct vmap_area *va,
1636 	unsigned long nva_start_addr, unsigned long size)
1637 {
1638 	enum fit_type type;
1639 
1640 	/* Check if it is within VA. */
1641 	if (nva_start_addr < va->va_start ||
1642 			nva_start_addr + size > va->va_end)
1643 		return NOTHING_FIT;
1644 
1645 	/* Now classify. */
1646 	if (va->va_start == nva_start_addr) {
1647 		if (va->va_end == nva_start_addr + size)
1648 			type = FL_FIT_TYPE;
1649 		else
1650 			type = LE_FIT_TYPE;
1651 	} else if (va->va_end == nva_start_addr + size) {
1652 		type = RE_FIT_TYPE;
1653 	} else {
1654 		type = NE_FIT_TYPE;
1655 	}
1656 
1657 	return type;
1658 }
1659 
1660 static __always_inline int
1661 va_clip(struct rb_root *root, struct list_head *head,
1662 		struct vmap_area *va, unsigned long nva_start_addr,
1663 		unsigned long size)
1664 {
1665 	struct vmap_area *lva = NULL;
1666 	enum fit_type type = classify_va_fit_type(va, nva_start_addr, size);
1667 
1668 	if (type == FL_FIT_TYPE) {
1669 		/*
1670 		 * No need to split VA, it fully fits.
1671 		 *
1672 		 * |               |
1673 		 * V      NVA      V
1674 		 * |---------------|
1675 		 */
1676 		unlink_va_augment(va, root);
1677 		kmem_cache_free(vmap_area_cachep, va);
1678 	} else if (type == LE_FIT_TYPE) {
1679 		/*
1680 		 * Split left edge of fit VA.
1681 		 *
1682 		 * |       |
1683 		 * V  NVA  V   R
1684 		 * |-------|-------|
1685 		 */
1686 		va->va_start += size;
1687 	} else if (type == RE_FIT_TYPE) {
1688 		/*
1689 		 * Split right edge of fit VA.
1690 		 *
1691 		 *         |       |
1692 		 *     L   V  NVA  V
1693 		 * |-------|-------|
1694 		 */
1695 		va->va_end = nva_start_addr;
1696 	} else if (type == NE_FIT_TYPE) {
1697 		/*
1698 		 * Split no edge of fit VA.
1699 		 *
1700 		 *     |       |
1701 		 *   L V  NVA  V R
1702 		 * |---|-------|---|
1703 		 */
1704 		lva = __this_cpu_xchg(ne_fit_preload_node, NULL);
1705 		if (unlikely(!lva)) {
1706 			/*
1707 			 * For percpu allocator we do not do any pre-allocation
1708 			 * and leave it as it is. The reason is it most likely
1709 			 * never ends up with NE_FIT_TYPE splitting. In case of
1710 			 * percpu allocations offsets and sizes are aligned to
1711 			 * fixed align request, i.e. RE_FIT_TYPE and FL_FIT_TYPE
1712 			 * are its main fitting cases.
1713 			 *
1714 			 * There are a few exceptions though, as an example it is
1715 			 * a first allocation (early boot up) when we have "one"
1716 			 * big free space that has to be split.
1717 			 *
1718 			 * Also we can hit this path in case of regular "vmap"
1719 			 * allocations, if "this" current CPU was not preloaded.
1720 			 * See the comment in alloc_vmap_area() why. If so, then
1721 			 * GFP_NOWAIT is used instead to get an extra object for
1722 			 * split purpose. That is rare and most time does not
1723 			 * occur.
1724 			 *
1725 			 * What happens if an allocation gets failed. Basically,
1726 			 * an "overflow" path is triggered to purge lazily freed
1727 			 * areas to free some memory, then, the "retry" path is
1728 			 * triggered to repeat one more time. See more details
1729 			 * in alloc_vmap_area() function.
1730 			 */
1731 			lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT);
1732 			if (!lva)
1733 				return -1;
1734 		}
1735 
1736 		/*
1737 		 * Build the remainder.
1738 		 */
1739 		lva->va_start = va->va_start;
1740 		lva->va_end = nva_start_addr;
1741 
1742 		/*
1743 		 * Shrink this VA to remaining size.
1744 		 */
1745 		va->va_start = nva_start_addr + size;
1746 	} else {
1747 		return -1;
1748 	}
1749 
1750 	if (type != FL_FIT_TYPE) {
1751 		augment_tree_propagate_from(va);
1752 
1753 		if (lva)	/* type == NE_FIT_TYPE */
1754 			insert_vmap_area_augment(lva, &va->rb_node, root, head);
1755 	}
1756 
1757 	return 0;
1758 }
1759 
1760 static unsigned long
1761 va_alloc(struct vmap_area *va,
1762 		struct rb_root *root, struct list_head *head,
1763 		unsigned long size, unsigned long align,
1764 		unsigned long vstart, unsigned long vend)
1765 {
1766 	unsigned long nva_start_addr;
1767 	int ret;
1768 
1769 	if (va->va_start > vstart)
1770 		nva_start_addr = ALIGN(va->va_start, align);
1771 	else
1772 		nva_start_addr = ALIGN(vstart, align);
1773 
1774 	/* Check the "vend" restriction. */
1775 	if (nva_start_addr + size > vend)
1776 		return vend;
1777 
1778 	/* Update the free vmap_area. */
1779 	ret = va_clip(root, head, va, nva_start_addr, size);
1780 	if (WARN_ON_ONCE(ret))
1781 		return vend;
1782 
1783 	return nva_start_addr;
1784 }
1785 
1786 /*
1787  * Returns a start address of the newly allocated area, if success.
1788  * Otherwise a vend is returned that indicates failure.
1789  */
1790 static __always_inline unsigned long
1791 __alloc_vmap_area(struct rb_root *root, struct list_head *head,
1792 	unsigned long size, unsigned long align,
1793 	unsigned long vstart, unsigned long vend)
1794 {
1795 	bool adjust_search_size = true;
1796 	unsigned long nva_start_addr;
1797 	struct vmap_area *va;
1798 
1799 	/*
1800 	 * Do not adjust when:
1801 	 *   a) align <= PAGE_SIZE, because it does not make any sense.
1802 	 *      All blocks(their start addresses) are at least PAGE_SIZE
1803 	 *      aligned anyway;
1804 	 *   b) a short range where a requested size corresponds to exactly
1805 	 *      specified [vstart:vend] interval and an alignment > PAGE_SIZE.
1806 	 *      With adjusted search length an allocation would not succeed.
1807 	 */
1808 	if (align <= PAGE_SIZE || (align > PAGE_SIZE && (vend - vstart) == size))
1809 		adjust_search_size = false;
1810 
1811 	va = find_vmap_lowest_match(root, size, align, vstart, adjust_search_size);
1812 	if (unlikely(!va))
1813 		return vend;
1814 
1815 	nva_start_addr = va_alloc(va, root, head, size, align, vstart, vend);
1816 	if (nva_start_addr == vend)
1817 		return vend;
1818 
1819 #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
1820 	find_vmap_lowest_match_check(root, head, size, align);
1821 #endif
1822 
1823 	return nva_start_addr;
1824 }
1825 
1826 /*
1827  * Free a region of KVA allocated by alloc_vmap_area
1828  */
1829 static void free_vmap_area(struct vmap_area *va)
1830 {
1831 	struct vmap_node *vn = addr_to_node(va->va_start);
1832 
1833 	/*
1834 	 * Remove from the busy tree/list.
1835 	 */
1836 	spin_lock(&vn->busy.lock);
1837 	unlink_va(va, &vn->busy.root);
1838 	spin_unlock(&vn->busy.lock);
1839 
1840 	/*
1841 	 * Insert/Merge it back to the free tree/list.
1842 	 */
1843 	spin_lock(&free_vmap_area_lock);
1844 	merge_or_add_vmap_area_augment(va, &free_vmap_area_root, &free_vmap_area_list);
1845 	spin_unlock(&free_vmap_area_lock);
1846 }
1847 
1848 static inline void
1849 preload_this_cpu_lock(spinlock_t *lock, gfp_t gfp_mask, int node)
1850 {
1851 	struct vmap_area *va = NULL, *tmp;
1852 
1853 	/*
1854 	 * Preload this CPU with one extra vmap_area object. It is used
1855 	 * when fit type of free area is NE_FIT_TYPE. It guarantees that
1856 	 * a CPU that does an allocation is preloaded.
1857 	 *
1858 	 * We do it in non-atomic context, thus it allows us to use more
1859 	 * permissive allocation masks to be more stable under low memory
1860 	 * condition and high memory pressure.
1861 	 */
1862 	if (!this_cpu_read(ne_fit_preload_node))
1863 		va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
1864 
1865 	spin_lock(lock);
1866 
1867 	tmp = NULL;
1868 	if (va && !__this_cpu_try_cmpxchg(ne_fit_preload_node, &tmp, va))
1869 		kmem_cache_free(vmap_area_cachep, va);
1870 }
1871 
1872 static struct vmap_pool *
1873 size_to_va_pool(struct vmap_node *vn, unsigned long size)
1874 {
1875 	unsigned int idx = (size - 1) / PAGE_SIZE;
1876 
1877 	if (idx < MAX_VA_SIZE_PAGES)
1878 		return &vn->pool[idx];
1879 
1880 	return NULL;
1881 }
1882 
1883 static bool
1884 node_pool_add_va(struct vmap_node *n, struct vmap_area *va)
1885 {
1886 	struct vmap_pool *vp;
1887 
1888 	vp = size_to_va_pool(n, va_size(va));
1889 	if (!vp)
1890 		return false;
1891 
1892 	spin_lock(&n->pool_lock);
1893 	list_add(&va->list, &vp->head);
1894 	WRITE_ONCE(vp->len, vp->len + 1);
1895 	spin_unlock(&n->pool_lock);
1896 
1897 	return true;
1898 }
1899 
1900 static struct vmap_area *
1901 node_pool_del_va(struct vmap_node *vn, unsigned long size,
1902 		unsigned long align, unsigned long vstart,
1903 		unsigned long vend)
1904 {
1905 	struct vmap_area *va = NULL;
1906 	struct vmap_pool *vp;
1907 	int err = 0;
1908 
1909 	vp = size_to_va_pool(vn, size);
1910 	if (!vp || list_empty(&vp->head))
1911 		return NULL;
1912 
1913 	spin_lock(&vn->pool_lock);
1914 	if (!list_empty(&vp->head)) {
1915 		va = list_first_entry(&vp->head, struct vmap_area, list);
1916 
1917 		if (IS_ALIGNED(va->va_start, align)) {
1918 			/*
1919 			 * Do some sanity check and emit a warning
1920 			 * if one of below checks detects an error.
1921 			 */
1922 			err |= (va_size(va) != size);
1923 			err |= (va->va_start < vstart);
1924 			err |= (va->va_end > vend);
1925 
1926 			if (!WARN_ON_ONCE(err)) {
1927 				list_del_init(&va->list);
1928 				WRITE_ONCE(vp->len, vp->len - 1);
1929 			} else {
1930 				va = NULL;
1931 			}
1932 		} else {
1933 			list_move_tail(&va->list, &vp->head);
1934 			va = NULL;
1935 		}
1936 	}
1937 	spin_unlock(&vn->pool_lock);
1938 
1939 	return va;
1940 }
1941 
1942 static struct vmap_area *
1943 node_alloc(unsigned long size, unsigned long align,
1944 		unsigned long vstart, unsigned long vend,
1945 		unsigned long *addr, unsigned int *vn_id)
1946 {
1947 	struct vmap_area *va;
1948 
1949 	*vn_id = 0;
1950 	*addr = vend;
1951 
1952 	/*
1953 	 * Fallback to a global heap if not vmalloc or there
1954 	 * is only one node.
1955 	 */
1956 	if (vstart != VMALLOC_START || vend != VMALLOC_END ||
1957 			nr_vmap_nodes == 1)
1958 		return NULL;
1959 
1960 	*vn_id = raw_smp_processor_id() % nr_vmap_nodes;
1961 	va = node_pool_del_va(id_to_node(*vn_id), size, align, vstart, vend);
1962 	*vn_id = encode_vn_id(*vn_id);
1963 
1964 	if (va)
1965 		*addr = va->va_start;
1966 
1967 	return va;
1968 }
1969 
1970 static inline void setup_vmalloc_vm(struct vm_struct *vm,
1971 	struct vmap_area *va, unsigned long flags, const void *caller)
1972 {
1973 	vm->flags = flags;
1974 	vm->addr = (void *)va->va_start;
1975 	vm->size = va_size(va);
1976 	vm->caller = caller;
1977 	va->vm = vm;
1978 }
1979 
1980 /*
1981  * Allocate a region of KVA of the specified size and alignment, within the
1982  * vstart and vend. If vm is passed in, the two will also be bound.
1983  */
1984 static struct vmap_area *alloc_vmap_area(unsigned long size,
1985 				unsigned long align,
1986 				unsigned long vstart, unsigned long vend,
1987 				int node, gfp_t gfp_mask,
1988 				unsigned long va_flags, struct vm_struct *vm)
1989 {
1990 	struct vmap_node *vn;
1991 	struct vmap_area *va;
1992 	unsigned long freed;
1993 	unsigned long addr;
1994 	unsigned int vn_id;
1995 	int purged = 0;
1996 	int ret;
1997 
1998 	if (unlikely(!size || offset_in_page(size) || !is_power_of_2(align)))
1999 		return ERR_PTR(-EINVAL);
2000 
2001 	if (unlikely(!vmap_initialized))
2002 		return ERR_PTR(-EBUSY);
2003 
2004 	might_sleep();
2005 
2006 	/*
2007 	 * If a VA is obtained from a global heap(if it fails here)
2008 	 * it is anyway marked with this "vn_id" so it is returned
2009 	 * to this pool's node later. Such way gives a possibility
2010 	 * to populate pools based on users demand.
2011 	 *
2012 	 * On success a ready to go VA is returned.
2013 	 */
2014 	va = node_alloc(size, align, vstart, vend, &addr, &vn_id);
2015 	if (!va) {
2016 		gfp_mask = gfp_mask & GFP_RECLAIM_MASK;
2017 
2018 		va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
2019 		if (unlikely(!va))
2020 			return ERR_PTR(-ENOMEM);
2021 
2022 		/*
2023 		 * Only scan the relevant parts containing pointers to other objects
2024 		 * to avoid false negatives.
2025 		 */
2026 		kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask);
2027 	}
2028 
2029 retry:
2030 	if (addr == vend) {
2031 		preload_this_cpu_lock(&free_vmap_area_lock, gfp_mask, node);
2032 		addr = __alloc_vmap_area(&free_vmap_area_root, &free_vmap_area_list,
2033 			size, align, vstart, vend);
2034 		spin_unlock(&free_vmap_area_lock);
2035 	}
2036 
2037 	trace_alloc_vmap_area(addr, size, align, vstart, vend, addr == vend);
2038 
2039 	/*
2040 	 * If an allocation fails, the "vend" address is
2041 	 * returned. Therefore trigger the overflow path.
2042 	 */
2043 	if (unlikely(addr == vend))
2044 		goto overflow;
2045 
2046 	va->va_start = addr;
2047 	va->va_end = addr + size;
2048 	va->vm = NULL;
2049 	va->flags = (va_flags | vn_id);
2050 
2051 	if (vm) {
2052 		vm->addr = (void *)va->va_start;
2053 		vm->size = va_size(va);
2054 		va->vm = vm;
2055 	}
2056 
2057 	vn = addr_to_node(va->va_start);
2058 
2059 	spin_lock(&vn->busy.lock);
2060 	insert_vmap_area(va, &vn->busy.root, &vn->busy.head);
2061 	spin_unlock(&vn->busy.lock);
2062 
2063 	BUG_ON(!IS_ALIGNED(va->va_start, align));
2064 	BUG_ON(va->va_start < vstart);
2065 	BUG_ON(va->va_end > vend);
2066 
2067 	ret = kasan_populate_vmalloc(addr, size);
2068 	if (ret) {
2069 		free_vmap_area(va);
2070 		return ERR_PTR(ret);
2071 	}
2072 
2073 	return va;
2074 
2075 overflow:
2076 	if (!purged) {
2077 		reclaim_and_purge_vmap_areas();
2078 		purged = 1;
2079 		goto retry;
2080 	}
2081 
2082 	freed = 0;
2083 	blocking_notifier_call_chain(&vmap_notify_list, 0, &freed);
2084 
2085 	if (freed > 0) {
2086 		purged = 0;
2087 		goto retry;
2088 	}
2089 
2090 	if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit())
2091 		pr_warn("vmalloc_node_range for size %lu failed: Address range restricted to %#lx - %#lx\n",
2092 				size, vstart, vend);
2093 
2094 	kmem_cache_free(vmap_area_cachep, va);
2095 	return ERR_PTR(-EBUSY);
2096 }
2097 
2098 int register_vmap_purge_notifier(struct notifier_block *nb)
2099 {
2100 	return blocking_notifier_chain_register(&vmap_notify_list, nb);
2101 }
2102 EXPORT_SYMBOL_GPL(register_vmap_purge_notifier);
2103 
2104 int unregister_vmap_purge_notifier(struct notifier_block *nb)
2105 {
2106 	return blocking_notifier_chain_unregister(&vmap_notify_list, nb);
2107 }
2108 EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier);
2109 
2110 /*
2111  * lazy_max_pages is the maximum amount of virtual address space we gather up
2112  * before attempting to purge with a TLB flush.
2113  *
2114  * There is a tradeoff here: a larger number will cover more kernel page tables
2115  * and take slightly longer to purge, but it will linearly reduce the number of
2116  * global TLB flushes that must be performed. It would seem natural to scale
2117  * this number up linearly with the number of CPUs (because vmapping activity
2118  * could also scale linearly with the number of CPUs), however it is likely
2119  * that in practice, workloads might be constrained in other ways that mean
2120  * vmap activity will not scale linearly with CPUs. Also, I want to be
2121  * conservative and not introduce a big latency on huge systems, so go with
2122  * a less aggressive log scale. It will still be an improvement over the old
2123  * code, and it will be simple to change the scale factor if we find that it
2124  * becomes a problem on bigger systems.
2125  */
2126 static unsigned long lazy_max_pages(void)
2127 {
2128 	unsigned int log;
2129 
2130 	log = fls(num_online_cpus());
2131 
2132 	return log * (32UL * 1024 * 1024 / PAGE_SIZE);
2133 }
2134 
2135 static atomic_long_t vmap_lazy_nr = ATOMIC_LONG_INIT(0);
2136 
2137 /*
2138  * Serialize vmap purging.  There is no actual critical section protected
2139  * by this lock, but we want to avoid concurrent calls for performance
2140  * reasons and to make the pcpu_get_vm_areas more deterministic.
2141  */
2142 static DEFINE_MUTEX(vmap_purge_lock);
2143 
2144 /* for per-CPU blocks */
2145 static void purge_fragmented_blocks_allcpus(void);
2146 static cpumask_t purge_nodes;
2147 
2148 static void
2149 reclaim_list_global(struct list_head *head)
2150 {
2151 	struct vmap_area *va, *n;
2152 
2153 	if (list_empty(head))
2154 		return;
2155 
2156 	spin_lock(&free_vmap_area_lock);
2157 	list_for_each_entry_safe(va, n, head, list)
2158 		merge_or_add_vmap_area_augment(va,
2159 			&free_vmap_area_root, &free_vmap_area_list);
2160 	spin_unlock(&free_vmap_area_lock);
2161 }
2162 
2163 static void
2164 decay_va_pool_node(struct vmap_node *vn, bool full_decay)
2165 {
2166 	LIST_HEAD(decay_list);
2167 	struct rb_root decay_root = RB_ROOT;
2168 	struct vmap_area *va, *nva;
2169 	unsigned long n_decay;
2170 	int i;
2171 
2172 	for (i = 0; i < MAX_VA_SIZE_PAGES; i++) {
2173 		LIST_HEAD(tmp_list);
2174 
2175 		if (list_empty(&vn->pool[i].head))
2176 			continue;
2177 
2178 		/* Detach the pool, so no-one can access it. */
2179 		spin_lock(&vn->pool_lock);
2180 		list_replace_init(&vn->pool[i].head, &tmp_list);
2181 		spin_unlock(&vn->pool_lock);
2182 
2183 		if (full_decay)
2184 			WRITE_ONCE(vn->pool[i].len, 0);
2185 
2186 		/* Decay a pool by ~25% out of left objects. */
2187 		n_decay = vn->pool[i].len >> 2;
2188 
2189 		list_for_each_entry_safe(va, nva, &tmp_list, list) {
2190 			list_del_init(&va->list);
2191 			merge_or_add_vmap_area(va, &decay_root, &decay_list);
2192 
2193 			if (!full_decay) {
2194 				WRITE_ONCE(vn->pool[i].len, vn->pool[i].len - 1);
2195 
2196 				if (!--n_decay)
2197 					break;
2198 			}
2199 		}
2200 
2201 		/*
2202 		 * Attach the pool back if it has been partly decayed.
2203 		 * Please note, it is supposed that nobody(other contexts)
2204 		 * can populate the pool therefore a simple list replace
2205 		 * operation takes place here.
2206 		 */
2207 		if (!full_decay && !list_empty(&tmp_list)) {
2208 			spin_lock(&vn->pool_lock);
2209 			list_replace_init(&tmp_list, &vn->pool[i].head);
2210 			spin_unlock(&vn->pool_lock);
2211 		}
2212 	}
2213 
2214 	reclaim_list_global(&decay_list);
2215 }
2216 
2217 static void
2218 kasan_release_vmalloc_node(struct vmap_node *vn)
2219 {
2220 	struct vmap_area *va;
2221 	unsigned long start, end;
2222 
2223 	start = list_first_entry(&vn->purge_list, struct vmap_area, list)->va_start;
2224 	end = list_last_entry(&vn->purge_list, struct vmap_area, list)->va_end;
2225 
2226 	list_for_each_entry(va, &vn->purge_list, list) {
2227 		if (is_vmalloc_or_module_addr((void *) va->va_start))
2228 			kasan_release_vmalloc(va->va_start, va->va_end,
2229 				va->va_start, va->va_end,
2230 				KASAN_VMALLOC_PAGE_RANGE);
2231 	}
2232 
2233 	kasan_release_vmalloc(start, end, start, end, KASAN_VMALLOC_TLB_FLUSH);
2234 }
2235 
2236 static void purge_vmap_node(struct work_struct *work)
2237 {
2238 	struct vmap_node *vn = container_of(work,
2239 		struct vmap_node, purge_work);
2240 	unsigned long nr_purged_pages = 0;
2241 	struct vmap_area *va, *n_va;
2242 	LIST_HEAD(local_list);
2243 
2244 	if (IS_ENABLED(CONFIG_KASAN_VMALLOC))
2245 		kasan_release_vmalloc_node(vn);
2246 
2247 	vn->nr_purged = 0;
2248 
2249 	list_for_each_entry_safe(va, n_va, &vn->purge_list, list) {
2250 		unsigned long nr = va_size(va) >> PAGE_SHIFT;
2251 		unsigned int vn_id = decode_vn_id(va->flags);
2252 
2253 		list_del_init(&va->list);
2254 
2255 		nr_purged_pages += nr;
2256 		vn->nr_purged++;
2257 
2258 		if (is_vn_id_valid(vn_id) && !vn->skip_populate)
2259 			if (node_pool_add_va(vn, va))
2260 				continue;
2261 
2262 		/* Go back to global. */
2263 		list_add(&va->list, &local_list);
2264 	}
2265 
2266 	atomic_long_sub(nr_purged_pages, &vmap_lazy_nr);
2267 
2268 	reclaim_list_global(&local_list);
2269 }
2270 
2271 /*
2272  * Purges all lazily-freed vmap areas.
2273  */
2274 static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end,
2275 		bool full_pool_decay)
2276 {
2277 	unsigned long nr_purged_areas = 0;
2278 	unsigned int nr_purge_helpers;
2279 	unsigned int nr_purge_nodes;
2280 	struct vmap_node *vn;
2281 	int i;
2282 
2283 	lockdep_assert_held(&vmap_purge_lock);
2284 
2285 	/*
2286 	 * Use cpumask to mark which node has to be processed.
2287 	 */
2288 	purge_nodes = CPU_MASK_NONE;
2289 
2290 	for (i = 0; i < nr_vmap_nodes; i++) {
2291 		vn = &vmap_nodes[i];
2292 
2293 		INIT_LIST_HEAD(&vn->purge_list);
2294 		vn->skip_populate = full_pool_decay;
2295 		decay_va_pool_node(vn, full_pool_decay);
2296 
2297 		if (RB_EMPTY_ROOT(&vn->lazy.root))
2298 			continue;
2299 
2300 		spin_lock(&vn->lazy.lock);
2301 		WRITE_ONCE(vn->lazy.root.rb_node, NULL);
2302 		list_replace_init(&vn->lazy.head, &vn->purge_list);
2303 		spin_unlock(&vn->lazy.lock);
2304 
2305 		start = min(start, list_first_entry(&vn->purge_list,
2306 			struct vmap_area, list)->va_start);
2307 
2308 		end = max(end, list_last_entry(&vn->purge_list,
2309 			struct vmap_area, list)->va_end);
2310 
2311 		cpumask_set_cpu(i, &purge_nodes);
2312 	}
2313 
2314 	nr_purge_nodes = cpumask_weight(&purge_nodes);
2315 	if (nr_purge_nodes > 0) {
2316 		flush_tlb_kernel_range(start, end);
2317 
2318 		/* One extra worker is per a lazy_max_pages() full set minus one. */
2319 		nr_purge_helpers = atomic_long_read(&vmap_lazy_nr) / lazy_max_pages();
2320 		nr_purge_helpers = clamp(nr_purge_helpers, 1U, nr_purge_nodes) - 1;
2321 
2322 		for_each_cpu(i, &purge_nodes) {
2323 			vn = &vmap_nodes[i];
2324 
2325 			if (nr_purge_helpers > 0) {
2326 				INIT_WORK(&vn->purge_work, purge_vmap_node);
2327 
2328 				if (cpumask_test_cpu(i, cpu_online_mask))
2329 					schedule_work_on(i, &vn->purge_work);
2330 				else
2331 					schedule_work(&vn->purge_work);
2332 
2333 				nr_purge_helpers--;
2334 			} else {
2335 				vn->purge_work.func = NULL;
2336 				purge_vmap_node(&vn->purge_work);
2337 				nr_purged_areas += vn->nr_purged;
2338 			}
2339 		}
2340 
2341 		for_each_cpu(i, &purge_nodes) {
2342 			vn = &vmap_nodes[i];
2343 
2344 			if (vn->purge_work.func) {
2345 				flush_work(&vn->purge_work);
2346 				nr_purged_areas += vn->nr_purged;
2347 			}
2348 		}
2349 	}
2350 
2351 	trace_purge_vmap_area_lazy(start, end, nr_purged_areas);
2352 	return nr_purged_areas > 0;
2353 }
2354 
2355 /*
2356  * Reclaim vmap areas by purging fragmented blocks and purge_vmap_area_list.
2357  */
2358 static void reclaim_and_purge_vmap_areas(void)
2359 
2360 {
2361 	mutex_lock(&vmap_purge_lock);
2362 	purge_fragmented_blocks_allcpus();
2363 	__purge_vmap_area_lazy(ULONG_MAX, 0, true);
2364 	mutex_unlock(&vmap_purge_lock);
2365 }
2366 
2367 static void drain_vmap_area_work(struct work_struct *work)
2368 {
2369 	mutex_lock(&vmap_purge_lock);
2370 	__purge_vmap_area_lazy(ULONG_MAX, 0, false);
2371 	mutex_unlock(&vmap_purge_lock);
2372 }
2373 
2374 /*
2375  * Free a vmap area, caller ensuring that the area has been unmapped,
2376  * unlinked and flush_cache_vunmap had been called for the correct
2377  * range previously.
2378  */
2379 static void free_vmap_area_noflush(struct vmap_area *va)
2380 {
2381 	unsigned long nr_lazy_max = lazy_max_pages();
2382 	unsigned long va_start = va->va_start;
2383 	unsigned int vn_id = decode_vn_id(va->flags);
2384 	struct vmap_node *vn;
2385 	unsigned long nr_lazy;
2386 
2387 	if (WARN_ON_ONCE(!list_empty(&va->list)))
2388 		return;
2389 
2390 	nr_lazy = atomic_long_add_return(va_size(va) >> PAGE_SHIFT,
2391 					 &vmap_lazy_nr);
2392 
2393 	/*
2394 	 * If it was request by a certain node we would like to
2395 	 * return it to that node, i.e. its pool for later reuse.
2396 	 */
2397 	vn = is_vn_id_valid(vn_id) ?
2398 		id_to_node(vn_id):addr_to_node(va->va_start);
2399 
2400 	spin_lock(&vn->lazy.lock);
2401 	insert_vmap_area(va, &vn->lazy.root, &vn->lazy.head);
2402 	spin_unlock(&vn->lazy.lock);
2403 
2404 	trace_free_vmap_area_noflush(va_start, nr_lazy, nr_lazy_max);
2405 
2406 	/* After this point, we may free va at any time */
2407 	if (unlikely(nr_lazy > nr_lazy_max))
2408 		schedule_work(&drain_vmap_work);
2409 }
2410 
2411 /*
2412  * Free and unmap a vmap area
2413  */
2414 static void free_unmap_vmap_area(struct vmap_area *va)
2415 {
2416 	flush_cache_vunmap(va->va_start, va->va_end);
2417 	vunmap_range_noflush(va->va_start, va->va_end);
2418 	if (debug_pagealloc_enabled_static())
2419 		flush_tlb_kernel_range(va->va_start, va->va_end);
2420 
2421 	free_vmap_area_noflush(va);
2422 }
2423 
2424 struct vmap_area *find_vmap_area(unsigned long addr)
2425 {
2426 	struct vmap_node *vn;
2427 	struct vmap_area *va;
2428 	int i, j;
2429 
2430 	if (unlikely(!vmap_initialized))
2431 		return NULL;
2432 
2433 	/*
2434 	 * An addr_to_node_id(addr) converts an address to a node index
2435 	 * where a VA is located. If VA spans several zones and passed
2436 	 * addr is not the same as va->va_start, what is not common, we
2437 	 * may need to scan extra nodes. See an example:
2438 	 *
2439 	 *      <----va---->
2440 	 * -|-----|-----|-----|-----|-
2441 	 *     1     2     0     1
2442 	 *
2443 	 * VA resides in node 1 whereas it spans 1, 2 an 0. If passed
2444 	 * addr is within 2 or 0 nodes we should do extra work.
2445 	 */
2446 	i = j = addr_to_node_id(addr);
2447 	do {
2448 		vn = &vmap_nodes[i];
2449 
2450 		spin_lock(&vn->busy.lock);
2451 		va = __find_vmap_area(addr, &vn->busy.root);
2452 		spin_unlock(&vn->busy.lock);
2453 
2454 		if (va)
2455 			return va;
2456 	} while ((i = (i + 1) % nr_vmap_nodes) != j);
2457 
2458 	return NULL;
2459 }
2460 
2461 static struct vmap_area *find_unlink_vmap_area(unsigned long addr)
2462 {
2463 	struct vmap_node *vn;
2464 	struct vmap_area *va;
2465 	int i, j;
2466 
2467 	/*
2468 	 * Check the comment in the find_vmap_area() about the loop.
2469 	 */
2470 	i = j = addr_to_node_id(addr);
2471 	do {
2472 		vn = &vmap_nodes[i];
2473 
2474 		spin_lock(&vn->busy.lock);
2475 		va = __find_vmap_area(addr, &vn->busy.root);
2476 		if (va)
2477 			unlink_va(va, &vn->busy.root);
2478 		spin_unlock(&vn->busy.lock);
2479 
2480 		if (va)
2481 			return va;
2482 	} while ((i = (i + 1) % nr_vmap_nodes) != j);
2483 
2484 	return NULL;
2485 }
2486 
2487 /*** Per cpu kva allocator ***/
2488 
2489 /*
2490  * vmap space is limited especially on 32 bit architectures. Ensure there is
2491  * room for at least 16 percpu vmap blocks per CPU.
2492  */
2493 /*
2494  * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able
2495  * to #define VMALLOC_SPACE		(VMALLOC_END-VMALLOC_START). Guess
2496  * instead (we just need a rough idea)
2497  */
2498 #if BITS_PER_LONG == 32
2499 #define VMALLOC_SPACE		(128UL*1024*1024)
2500 #else
2501 #define VMALLOC_SPACE		(128UL*1024*1024*1024)
2502 #endif
2503 
2504 #define VMALLOC_PAGES		(VMALLOC_SPACE / PAGE_SIZE)
2505 #define VMAP_MAX_ALLOC		BITS_PER_LONG	/* 256K with 4K pages */
2506 #define VMAP_BBMAP_BITS_MAX	1024	/* 4MB with 4K pages */
2507 #define VMAP_BBMAP_BITS_MIN	(VMAP_MAX_ALLOC*2)
2508 #define VMAP_MIN(x, y)		((x) < (y) ? (x) : (y)) /* can't use min() */
2509 #define VMAP_MAX(x, y)		((x) > (y) ? (x) : (y)) /* can't use max() */
2510 #define VMAP_BBMAP_BITS		\
2511 		VMAP_MIN(VMAP_BBMAP_BITS_MAX,	\
2512 		VMAP_MAX(VMAP_BBMAP_BITS_MIN,	\
2513 			VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16))
2514 
2515 #define VMAP_BLOCK_SIZE		(VMAP_BBMAP_BITS * PAGE_SIZE)
2516 
2517 /*
2518  * Purge threshold to prevent overeager purging of fragmented blocks for
2519  * regular operations: Purge if vb->free is less than 1/4 of the capacity.
2520  */
2521 #define VMAP_PURGE_THRESHOLD	(VMAP_BBMAP_BITS / 4)
2522 
2523 #define VMAP_RAM		0x1 /* indicates vm_map_ram area*/
2524 #define VMAP_BLOCK		0x2 /* mark out the vmap_block sub-type*/
2525 #define VMAP_FLAGS_MASK		0x3
2526 
2527 struct vmap_block_queue {
2528 	spinlock_t lock;
2529 	struct list_head free;
2530 
2531 	/*
2532 	 * An xarray requires an extra memory dynamically to
2533 	 * be allocated. If it is an issue, we can use rb-tree
2534 	 * instead.
2535 	 */
2536 	struct xarray vmap_blocks;
2537 };
2538 
2539 struct vmap_block {
2540 	spinlock_t lock;
2541 	struct vmap_area *va;
2542 	unsigned long free, dirty;
2543 	DECLARE_BITMAP(used_map, VMAP_BBMAP_BITS);
2544 	unsigned long dirty_min, dirty_max; /*< dirty range */
2545 	struct list_head free_list;
2546 	struct rcu_head rcu_head;
2547 	struct list_head purge;
2548 	unsigned int cpu;
2549 };
2550 
2551 /* Queue of free and dirty vmap blocks, for allocation and flushing purposes */
2552 static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue);
2553 
2554 /*
2555  * In order to fast access to any "vmap_block" associated with a
2556  * specific address, we use a hash.
2557  *
2558  * A per-cpu vmap_block_queue is used in both ways, to serialize
2559  * an access to free block chains among CPUs(alloc path) and it
2560  * also acts as a vmap_block hash(alloc/free paths). It means we
2561  * overload it, since we already have the per-cpu array which is
2562  * used as a hash table. When used as a hash a 'cpu' passed to
2563  * per_cpu() is not actually a CPU but rather a hash index.
2564  *
2565  * A hash function is addr_to_vb_xa() which hashes any address
2566  * to a specific index(in a hash) it belongs to. This then uses a
2567  * per_cpu() macro to access an array with generated index.
2568  *
2569  * An example:
2570  *
2571  *  CPU_1  CPU_2  CPU_0
2572  *    |      |      |
2573  *    V      V      V
2574  * 0     10     20     30     40     50     60
2575  * |------|------|------|------|------|------|...<vmap address space>
2576  *   CPU0   CPU1   CPU2   CPU0   CPU1   CPU2
2577  *
2578  * - CPU_1 invokes vm_unmap_ram(6), 6 belongs to CPU0 zone, thus
2579  *   it access: CPU0/INDEX0 -> vmap_blocks -> xa_lock;
2580  *
2581  * - CPU_2 invokes vm_unmap_ram(11), 11 belongs to CPU1 zone, thus
2582  *   it access: CPU1/INDEX1 -> vmap_blocks -> xa_lock;
2583  *
2584  * - CPU_0 invokes vm_unmap_ram(20), 20 belongs to CPU2 zone, thus
2585  *   it access: CPU2/INDEX2 -> vmap_blocks -> xa_lock.
2586  *
2587  * This technique almost always avoids lock contention on insert/remove,
2588  * however xarray spinlocks protect against any contention that remains.
2589  */
2590 static struct xarray *
2591 addr_to_vb_xa(unsigned long addr)
2592 {
2593 	int index = (addr / VMAP_BLOCK_SIZE) % nr_cpu_ids;
2594 
2595 	/*
2596 	 * Please note, nr_cpu_ids points on a highest set
2597 	 * possible bit, i.e. we never invoke cpumask_next()
2598 	 * if an index points on it which is nr_cpu_ids - 1.
2599 	 */
2600 	if (!cpu_possible(index))
2601 		index = cpumask_next(index, cpu_possible_mask);
2602 
2603 	return &per_cpu(vmap_block_queue, index).vmap_blocks;
2604 }
2605 
2606 /*
2607  * We should probably have a fallback mechanism to allocate virtual memory
2608  * out of partially filled vmap blocks. However vmap block sizing should be
2609  * fairly reasonable according to the vmalloc size, so it shouldn't be a
2610  * big problem.
2611  */
2612 
2613 static unsigned long addr_to_vb_idx(unsigned long addr)
2614 {
2615 	addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1);
2616 	addr /= VMAP_BLOCK_SIZE;
2617 	return addr;
2618 }
2619 
2620 static void *vmap_block_vaddr(unsigned long va_start, unsigned long pages_off)
2621 {
2622 	unsigned long addr;
2623 
2624 	addr = va_start + (pages_off << PAGE_SHIFT);
2625 	BUG_ON(addr_to_vb_idx(addr) != addr_to_vb_idx(va_start));
2626 	return (void *)addr;
2627 }
2628 
2629 /**
2630  * new_vmap_block - allocates new vmap_block and occupies 2^order pages in this
2631  *                  block. Of course pages number can't exceed VMAP_BBMAP_BITS
2632  * @order:    how many 2^order pages should be occupied in newly allocated block
2633  * @gfp_mask: flags for the page level allocator
2634  *
2635  * Return: virtual address in a newly allocated block or ERR_PTR(-errno)
2636  */
2637 static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
2638 {
2639 	struct vmap_block_queue *vbq;
2640 	struct vmap_block *vb;
2641 	struct vmap_area *va;
2642 	struct xarray *xa;
2643 	unsigned long vb_idx;
2644 	int node, err;
2645 	void *vaddr;
2646 
2647 	node = numa_node_id();
2648 
2649 	vb = kmalloc_node(sizeof(struct vmap_block),
2650 			gfp_mask & GFP_RECLAIM_MASK, node);
2651 	if (unlikely(!vb))
2652 		return ERR_PTR(-ENOMEM);
2653 
2654 	va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE,
2655 					VMALLOC_START, VMALLOC_END,
2656 					node, gfp_mask,
2657 					VMAP_RAM|VMAP_BLOCK, NULL);
2658 	if (IS_ERR(va)) {
2659 		kfree(vb);
2660 		return ERR_CAST(va);
2661 	}
2662 
2663 	vaddr = vmap_block_vaddr(va->va_start, 0);
2664 	spin_lock_init(&vb->lock);
2665 	vb->va = va;
2666 	/* At least something should be left free */
2667 	BUG_ON(VMAP_BBMAP_BITS <= (1UL << order));
2668 	bitmap_zero(vb->used_map, VMAP_BBMAP_BITS);
2669 	vb->free = VMAP_BBMAP_BITS - (1UL << order);
2670 	vb->dirty = 0;
2671 	vb->dirty_min = VMAP_BBMAP_BITS;
2672 	vb->dirty_max = 0;
2673 	bitmap_set(vb->used_map, 0, (1UL << order));
2674 	INIT_LIST_HEAD(&vb->free_list);
2675 	vb->cpu = raw_smp_processor_id();
2676 
2677 	xa = addr_to_vb_xa(va->va_start);
2678 	vb_idx = addr_to_vb_idx(va->va_start);
2679 	err = xa_insert(xa, vb_idx, vb, gfp_mask);
2680 	if (err) {
2681 		kfree(vb);
2682 		free_vmap_area(va);
2683 		return ERR_PTR(err);
2684 	}
2685 	/*
2686 	 * list_add_tail_rcu could happened in another core
2687 	 * rather than vb->cpu due to task migration, which
2688 	 * is safe as list_add_tail_rcu will ensure the list's
2689 	 * integrity together with list_for_each_rcu from read
2690 	 * side.
2691 	 */
2692 	vbq = per_cpu_ptr(&vmap_block_queue, vb->cpu);
2693 	spin_lock(&vbq->lock);
2694 	list_add_tail_rcu(&vb->free_list, &vbq->free);
2695 	spin_unlock(&vbq->lock);
2696 
2697 	return vaddr;
2698 }
2699 
2700 static void free_vmap_block(struct vmap_block *vb)
2701 {
2702 	struct vmap_node *vn;
2703 	struct vmap_block *tmp;
2704 	struct xarray *xa;
2705 
2706 	xa = addr_to_vb_xa(vb->va->va_start);
2707 	tmp = xa_erase(xa, addr_to_vb_idx(vb->va->va_start));
2708 	BUG_ON(tmp != vb);
2709 
2710 	vn = addr_to_node(vb->va->va_start);
2711 	spin_lock(&vn->busy.lock);
2712 	unlink_va(vb->va, &vn->busy.root);
2713 	spin_unlock(&vn->busy.lock);
2714 
2715 	free_vmap_area_noflush(vb->va);
2716 	kfree_rcu(vb, rcu_head);
2717 }
2718 
2719 static bool purge_fragmented_block(struct vmap_block *vb,
2720 		struct list_head *purge_list, bool force_purge)
2721 {
2722 	struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, vb->cpu);
2723 
2724 	if (vb->free + vb->dirty != VMAP_BBMAP_BITS ||
2725 	    vb->dirty == VMAP_BBMAP_BITS)
2726 		return false;
2727 
2728 	/* Don't overeagerly purge usable blocks unless requested */
2729 	if (!(force_purge || vb->free < VMAP_PURGE_THRESHOLD))
2730 		return false;
2731 
2732 	/* prevent further allocs after releasing lock */
2733 	WRITE_ONCE(vb->free, 0);
2734 	/* prevent purging it again */
2735 	WRITE_ONCE(vb->dirty, VMAP_BBMAP_BITS);
2736 	vb->dirty_min = 0;
2737 	vb->dirty_max = VMAP_BBMAP_BITS;
2738 	spin_lock(&vbq->lock);
2739 	list_del_rcu(&vb->free_list);
2740 	spin_unlock(&vbq->lock);
2741 	list_add_tail(&vb->purge, purge_list);
2742 	return true;
2743 }
2744 
2745 static void free_purged_blocks(struct list_head *purge_list)
2746 {
2747 	struct vmap_block *vb, *n_vb;
2748 
2749 	list_for_each_entry_safe(vb, n_vb, purge_list, purge) {
2750 		list_del(&vb->purge);
2751 		free_vmap_block(vb);
2752 	}
2753 }
2754 
2755 static void purge_fragmented_blocks(int cpu)
2756 {
2757 	LIST_HEAD(purge);
2758 	struct vmap_block *vb;
2759 	struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
2760 
2761 	rcu_read_lock();
2762 	list_for_each_entry_rcu(vb, &vbq->free, free_list) {
2763 		unsigned long free = READ_ONCE(vb->free);
2764 		unsigned long dirty = READ_ONCE(vb->dirty);
2765 
2766 		if (free + dirty != VMAP_BBMAP_BITS ||
2767 		    dirty == VMAP_BBMAP_BITS)
2768 			continue;
2769 
2770 		spin_lock(&vb->lock);
2771 		purge_fragmented_block(vb, &purge, true);
2772 		spin_unlock(&vb->lock);
2773 	}
2774 	rcu_read_unlock();
2775 	free_purged_blocks(&purge);
2776 }
2777 
2778 static void purge_fragmented_blocks_allcpus(void)
2779 {
2780 	int cpu;
2781 
2782 	for_each_possible_cpu(cpu)
2783 		purge_fragmented_blocks(cpu);
2784 }
2785 
2786 static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
2787 {
2788 	struct vmap_block_queue *vbq;
2789 	struct vmap_block *vb;
2790 	void *vaddr = NULL;
2791 	unsigned int order;
2792 
2793 	BUG_ON(offset_in_page(size));
2794 	BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
2795 	if (WARN_ON(size == 0)) {
2796 		/*
2797 		 * Allocating 0 bytes isn't what caller wants since
2798 		 * get_order(0) returns funny result. Just warn and terminate
2799 		 * early.
2800 		 */
2801 		return ERR_PTR(-EINVAL);
2802 	}
2803 	order = get_order(size);
2804 
2805 	rcu_read_lock();
2806 	vbq = raw_cpu_ptr(&vmap_block_queue);
2807 	list_for_each_entry_rcu(vb, &vbq->free, free_list) {
2808 		unsigned long pages_off;
2809 
2810 		if (READ_ONCE(vb->free) < (1UL << order))
2811 			continue;
2812 
2813 		spin_lock(&vb->lock);
2814 		if (vb->free < (1UL << order)) {
2815 			spin_unlock(&vb->lock);
2816 			continue;
2817 		}
2818 
2819 		pages_off = VMAP_BBMAP_BITS - vb->free;
2820 		vaddr = vmap_block_vaddr(vb->va->va_start, pages_off);
2821 		WRITE_ONCE(vb->free, vb->free - (1UL << order));
2822 		bitmap_set(vb->used_map, pages_off, (1UL << order));
2823 		if (vb->free == 0) {
2824 			spin_lock(&vbq->lock);
2825 			list_del_rcu(&vb->free_list);
2826 			spin_unlock(&vbq->lock);
2827 		}
2828 
2829 		spin_unlock(&vb->lock);
2830 		break;
2831 	}
2832 
2833 	rcu_read_unlock();
2834 
2835 	/* Allocate new block if nothing was found */
2836 	if (!vaddr)
2837 		vaddr = new_vmap_block(order, gfp_mask);
2838 
2839 	return vaddr;
2840 }
2841 
2842 static void vb_free(unsigned long addr, unsigned long size)
2843 {
2844 	unsigned long offset;
2845 	unsigned int order;
2846 	struct vmap_block *vb;
2847 	struct xarray *xa;
2848 
2849 	BUG_ON(offset_in_page(size));
2850 	BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
2851 
2852 	flush_cache_vunmap(addr, addr + size);
2853 
2854 	order = get_order(size);
2855 	offset = (addr & (VMAP_BLOCK_SIZE - 1)) >> PAGE_SHIFT;
2856 
2857 	xa = addr_to_vb_xa(addr);
2858 	vb = xa_load(xa, addr_to_vb_idx(addr));
2859 
2860 	spin_lock(&vb->lock);
2861 	bitmap_clear(vb->used_map, offset, (1UL << order));
2862 	spin_unlock(&vb->lock);
2863 
2864 	vunmap_range_noflush(addr, addr + size);
2865 
2866 	if (debug_pagealloc_enabled_static())
2867 		flush_tlb_kernel_range(addr, addr + size);
2868 
2869 	spin_lock(&vb->lock);
2870 
2871 	/* Expand the not yet TLB flushed dirty range */
2872 	vb->dirty_min = min(vb->dirty_min, offset);
2873 	vb->dirty_max = max(vb->dirty_max, offset + (1UL << order));
2874 
2875 	WRITE_ONCE(vb->dirty, vb->dirty + (1UL << order));
2876 	if (vb->dirty == VMAP_BBMAP_BITS) {
2877 		BUG_ON(vb->free);
2878 		spin_unlock(&vb->lock);
2879 		free_vmap_block(vb);
2880 	} else
2881 		spin_unlock(&vb->lock);
2882 }
2883 
2884 static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush)
2885 {
2886 	LIST_HEAD(purge_list);
2887 	int cpu;
2888 
2889 	if (unlikely(!vmap_initialized))
2890 		return;
2891 
2892 	mutex_lock(&vmap_purge_lock);
2893 
2894 	for_each_possible_cpu(cpu) {
2895 		struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
2896 		struct vmap_block *vb;
2897 		unsigned long idx;
2898 
2899 		rcu_read_lock();
2900 		xa_for_each(&vbq->vmap_blocks, idx, vb) {
2901 			spin_lock(&vb->lock);
2902 
2903 			/*
2904 			 * Try to purge a fragmented block first. If it's
2905 			 * not purgeable, check whether there is dirty
2906 			 * space to be flushed.
2907 			 */
2908 			if (!purge_fragmented_block(vb, &purge_list, false) &&
2909 			    vb->dirty_max && vb->dirty != VMAP_BBMAP_BITS) {
2910 				unsigned long va_start = vb->va->va_start;
2911 				unsigned long s, e;
2912 
2913 				s = va_start + (vb->dirty_min << PAGE_SHIFT);
2914 				e = va_start + (vb->dirty_max << PAGE_SHIFT);
2915 
2916 				start = min(s, start);
2917 				end   = max(e, end);
2918 
2919 				/* Prevent that this is flushed again */
2920 				vb->dirty_min = VMAP_BBMAP_BITS;
2921 				vb->dirty_max = 0;
2922 
2923 				flush = 1;
2924 			}
2925 			spin_unlock(&vb->lock);
2926 		}
2927 		rcu_read_unlock();
2928 	}
2929 	free_purged_blocks(&purge_list);
2930 
2931 	if (!__purge_vmap_area_lazy(start, end, false) && flush)
2932 		flush_tlb_kernel_range(start, end);
2933 	mutex_unlock(&vmap_purge_lock);
2934 }
2935 
2936 /**
2937  * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer
2938  *
2939  * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily
2940  * to amortize TLB flushing overheads. What this means is that any page you
2941  * have now, may, in a former life, have been mapped into kernel virtual
2942  * address by the vmap layer and so there might be some CPUs with TLB entries
2943  * still referencing that page (additional to the regular 1:1 kernel mapping).
2944  *
2945  * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can
2946  * be sure that none of the pages we have control over will have any aliases
2947  * from the vmap layer.
2948  */
2949 void vm_unmap_aliases(void)
2950 {
2951 	unsigned long start = ULONG_MAX, end = 0;
2952 	int flush = 0;
2953 
2954 	_vm_unmap_aliases(start, end, flush);
2955 }
2956 EXPORT_SYMBOL_GPL(vm_unmap_aliases);
2957 
2958 /**
2959  * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram
2960  * @mem: the pointer returned by vm_map_ram
2961  * @count: the count passed to that vm_map_ram call (cannot unmap partial)
2962  */
2963 void vm_unmap_ram(const void *mem, unsigned int count)
2964 {
2965 	unsigned long size = (unsigned long)count << PAGE_SHIFT;
2966 	unsigned long addr = (unsigned long)kasan_reset_tag(mem);
2967 	struct vmap_area *va;
2968 
2969 	might_sleep();
2970 	BUG_ON(!addr);
2971 	BUG_ON(addr < VMALLOC_START);
2972 	BUG_ON(addr > VMALLOC_END);
2973 	BUG_ON(!PAGE_ALIGNED(addr));
2974 
2975 	kasan_poison_vmalloc(mem, size);
2976 
2977 	if (likely(count <= VMAP_MAX_ALLOC)) {
2978 		debug_check_no_locks_freed(mem, size);
2979 		vb_free(addr, size);
2980 		return;
2981 	}
2982 
2983 	va = find_unlink_vmap_area(addr);
2984 	if (WARN_ON_ONCE(!va))
2985 		return;
2986 
2987 	debug_check_no_locks_freed((void *)va->va_start, va_size(va));
2988 	free_unmap_vmap_area(va);
2989 }
2990 EXPORT_SYMBOL(vm_unmap_ram);
2991 
2992 /**
2993  * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space)
2994  * @pages: an array of pointers to the pages to be mapped
2995  * @count: number of pages
2996  * @node: prefer to allocate data structures on this node
2997  *
2998  * If you use this function for less than VMAP_MAX_ALLOC pages, it could be
2999  * faster than vmap so it's good.  But if you mix long-life and short-life
3000  * objects with vm_map_ram(), it could consume lots of address space through
3001  * fragmentation (especially on a 32bit machine).  You could see failures in
3002  * the end.  Please use this function for short-lived objects.
3003  *
3004  * Returns: a pointer to the address that has been mapped, or %NULL on failure
3005  */
3006 void *vm_map_ram(struct page **pages, unsigned int count, int node)
3007 {
3008 	unsigned long size = (unsigned long)count << PAGE_SHIFT;
3009 	unsigned long addr;
3010 	void *mem;
3011 
3012 	if (likely(count <= VMAP_MAX_ALLOC)) {
3013 		mem = vb_alloc(size, GFP_KERNEL);
3014 		if (IS_ERR(mem))
3015 			return NULL;
3016 		addr = (unsigned long)mem;
3017 	} else {
3018 		struct vmap_area *va;
3019 		va = alloc_vmap_area(size, PAGE_SIZE,
3020 				VMALLOC_START, VMALLOC_END,
3021 				node, GFP_KERNEL, VMAP_RAM,
3022 				NULL);
3023 		if (IS_ERR(va))
3024 			return NULL;
3025 
3026 		addr = va->va_start;
3027 		mem = (void *)addr;
3028 	}
3029 
3030 	if (vmap_pages_range(addr, addr + size, PAGE_KERNEL,
3031 				pages, PAGE_SHIFT) < 0) {
3032 		vm_unmap_ram(mem, count);
3033 		return NULL;
3034 	}
3035 
3036 	/*
3037 	 * Mark the pages as accessible, now that they are mapped.
3038 	 * With hardware tag-based KASAN, marking is skipped for
3039 	 * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
3040 	 */
3041 	mem = kasan_unpoison_vmalloc(mem, size, KASAN_VMALLOC_PROT_NORMAL);
3042 
3043 	return mem;
3044 }
3045 EXPORT_SYMBOL(vm_map_ram);
3046 
3047 static struct vm_struct *vmlist __initdata;
3048 
3049 static inline unsigned int vm_area_page_order(struct vm_struct *vm)
3050 {
3051 #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
3052 	return vm->page_order;
3053 #else
3054 	return 0;
3055 #endif
3056 }
3057 
3058 unsigned int get_vm_area_page_order(struct vm_struct *vm)
3059 {
3060 	return vm_area_page_order(vm);
3061 }
3062 
3063 static inline void set_vm_area_page_order(struct vm_struct *vm, unsigned int order)
3064 {
3065 #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
3066 	vm->page_order = order;
3067 #else
3068 	BUG_ON(order != 0);
3069 #endif
3070 }
3071 
3072 /**
3073  * vm_area_add_early - add vmap area early during boot
3074  * @vm: vm_struct to add
3075  *
3076  * This function is used to add fixed kernel vm area to vmlist before
3077  * vmalloc_init() is called.  @vm->addr, @vm->size, and @vm->flags
3078  * should contain proper values and the other fields should be zero.
3079  *
3080  * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
3081  */
3082 void __init vm_area_add_early(struct vm_struct *vm)
3083 {
3084 	struct vm_struct *tmp, **p;
3085 
3086 	BUG_ON(vmap_initialized);
3087 	for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
3088 		if (tmp->addr >= vm->addr) {
3089 			BUG_ON(tmp->addr < vm->addr + vm->size);
3090 			break;
3091 		} else
3092 			BUG_ON(tmp->addr + tmp->size > vm->addr);
3093 	}
3094 	vm->next = *p;
3095 	*p = vm;
3096 }
3097 
3098 /**
3099  * vm_area_register_early - register vmap area early during boot
3100  * @vm: vm_struct to register
3101  * @align: requested alignment
3102  *
3103  * This function is used to register kernel vm area before
3104  * vmalloc_init() is called.  @vm->size and @vm->flags should contain
3105  * proper values on entry and other fields should be zero.  On return,
3106  * vm->addr contains the allocated address.
3107  *
3108  * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
3109  */
3110 void __init vm_area_register_early(struct vm_struct *vm, size_t align)
3111 {
3112 	unsigned long addr = ALIGN(VMALLOC_START, align);
3113 	struct vm_struct *cur, **p;
3114 
3115 	BUG_ON(vmap_initialized);
3116 
3117 	for (p = &vmlist; (cur = *p) != NULL; p = &cur->next) {
3118 		if ((unsigned long)cur->addr - addr >= vm->size)
3119 			break;
3120 		addr = ALIGN((unsigned long)cur->addr + cur->size, align);
3121 	}
3122 
3123 	BUG_ON(addr > VMALLOC_END - vm->size);
3124 	vm->addr = (void *)addr;
3125 	vm->next = *p;
3126 	*p = vm;
3127 	kasan_populate_early_vm_area_shadow(vm->addr, vm->size);
3128 }
3129 
3130 static void clear_vm_uninitialized_flag(struct vm_struct *vm)
3131 {
3132 	/*
3133 	 * Before removing VM_UNINITIALIZED,
3134 	 * we should make sure that vm has proper values.
3135 	 * Pair with smp_rmb() in show_numa_info().
3136 	 */
3137 	smp_wmb();
3138 	vm->flags &= ~VM_UNINITIALIZED;
3139 }
3140 
3141 struct vm_struct *__get_vm_area_node(unsigned long size,
3142 		unsigned long align, unsigned long shift, unsigned long flags,
3143 		unsigned long start, unsigned long end, int node,
3144 		gfp_t gfp_mask, const void *caller)
3145 {
3146 	struct vmap_area *va;
3147 	struct vm_struct *area;
3148 	unsigned long requested_size = size;
3149 
3150 	BUG_ON(in_interrupt());
3151 	size = ALIGN(size, 1ul << shift);
3152 	if (unlikely(!size))
3153 		return NULL;
3154 
3155 	if (flags & VM_IOREMAP)
3156 		align = 1ul << clamp_t(int, get_count_order_long(size),
3157 				       PAGE_SHIFT, IOREMAP_MAX_ORDER);
3158 
3159 	area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
3160 	if (unlikely(!area))
3161 		return NULL;
3162 
3163 	if (!(flags & VM_NO_GUARD))
3164 		size += PAGE_SIZE;
3165 
3166 	area->flags = flags;
3167 	area->caller = caller;
3168 
3169 	va = alloc_vmap_area(size, align, start, end, node, gfp_mask, 0, area);
3170 	if (IS_ERR(va)) {
3171 		kfree(area);
3172 		return NULL;
3173 	}
3174 
3175 	/*
3176 	 * Mark pages for non-VM_ALLOC mappings as accessible. Do it now as a
3177 	 * best-effort approach, as they can be mapped outside of vmalloc code.
3178 	 * For VM_ALLOC mappings, the pages are marked as accessible after
3179 	 * getting mapped in __vmalloc_node_range().
3180 	 * With hardware tag-based KASAN, marking is skipped for
3181 	 * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
3182 	 */
3183 	if (!(flags & VM_ALLOC))
3184 		area->addr = kasan_unpoison_vmalloc(area->addr, requested_size,
3185 						    KASAN_VMALLOC_PROT_NORMAL);
3186 
3187 	return area;
3188 }
3189 
3190 struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
3191 				       unsigned long start, unsigned long end,
3192 				       const void *caller)
3193 {
3194 	return __get_vm_area_node(size, 1, PAGE_SHIFT, flags, start, end,
3195 				  NUMA_NO_NODE, GFP_KERNEL, caller);
3196 }
3197 
3198 /**
3199  * get_vm_area - reserve a contiguous kernel virtual area
3200  * @size:	 size of the area
3201  * @flags:	 %VM_IOREMAP for I/O mappings or VM_ALLOC
3202  *
3203  * Search an area of @size in the kernel virtual mapping area,
3204  * and reserved it for out purposes.  Returns the area descriptor
3205  * on success or %NULL on failure.
3206  *
3207  * Return: the area descriptor on success or %NULL on failure.
3208  */
3209 struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
3210 {
3211 	return __get_vm_area_node(size, 1, PAGE_SHIFT, flags,
3212 				  VMALLOC_START, VMALLOC_END,
3213 				  NUMA_NO_NODE, GFP_KERNEL,
3214 				  __builtin_return_address(0));
3215 }
3216 
3217 struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
3218 				const void *caller)
3219 {
3220 	return __get_vm_area_node(size, 1, PAGE_SHIFT, flags,
3221 				  VMALLOC_START, VMALLOC_END,
3222 				  NUMA_NO_NODE, GFP_KERNEL, caller);
3223 }
3224 
3225 /**
3226  * find_vm_area - find a continuous kernel virtual area
3227  * @addr:	  base address
3228  *
3229  * Search for the kernel VM area starting at @addr, and return it.
3230  * It is up to the caller to do all required locking to keep the returned
3231  * pointer valid.
3232  *
3233  * Return: the area descriptor on success or %NULL on failure.
3234  */
3235 struct vm_struct *find_vm_area(const void *addr)
3236 {
3237 	struct vmap_area *va;
3238 
3239 	va = find_vmap_area((unsigned long)addr);
3240 	if (!va)
3241 		return NULL;
3242 
3243 	return va->vm;
3244 }
3245 
3246 /**
3247  * remove_vm_area - find and remove a continuous kernel virtual area
3248  * @addr:	    base address
3249  *
3250  * Search for the kernel VM area starting at @addr, and remove it.
3251  * This function returns the found VM area, but using it is NOT safe
3252  * on SMP machines, except for its size or flags.
3253  *
3254  * Return: the area descriptor on success or %NULL on failure.
3255  */
3256 struct vm_struct *remove_vm_area(const void *addr)
3257 {
3258 	struct vmap_area *va;
3259 	struct vm_struct *vm;
3260 
3261 	might_sleep();
3262 
3263 	if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n",
3264 			addr))
3265 		return NULL;
3266 
3267 	va = find_unlink_vmap_area((unsigned long)addr);
3268 	if (!va || !va->vm)
3269 		return NULL;
3270 	vm = va->vm;
3271 
3272 	debug_check_no_locks_freed(vm->addr, get_vm_area_size(vm));
3273 	debug_check_no_obj_freed(vm->addr, get_vm_area_size(vm));
3274 	kasan_free_module_shadow(vm);
3275 	kasan_poison_vmalloc(vm->addr, get_vm_area_size(vm));
3276 
3277 	free_unmap_vmap_area(va);
3278 	return vm;
3279 }
3280 
3281 static inline void set_area_direct_map(const struct vm_struct *area,
3282 				       int (*set_direct_map)(struct page *page))
3283 {
3284 	int i;
3285 
3286 	/* HUGE_VMALLOC passes small pages to set_direct_map */
3287 	for (i = 0; i < area->nr_pages; i++)
3288 		if (page_address(area->pages[i]))
3289 			set_direct_map(area->pages[i]);
3290 }
3291 
3292 /*
3293  * Flush the vm mapping and reset the direct map.
3294  */
3295 static void vm_reset_perms(struct vm_struct *area)
3296 {
3297 	unsigned long start = ULONG_MAX, end = 0;
3298 	unsigned int page_order = vm_area_page_order(area);
3299 	int flush_dmap = 0;
3300 	int i;
3301 
3302 	/*
3303 	 * Find the start and end range of the direct mappings to make sure that
3304 	 * the vm_unmap_aliases() flush includes the direct map.
3305 	 */
3306 	for (i = 0; i < area->nr_pages; i += 1U << page_order) {
3307 		unsigned long addr = (unsigned long)page_address(area->pages[i]);
3308 
3309 		if (addr) {
3310 			unsigned long page_size;
3311 
3312 			page_size = PAGE_SIZE << page_order;
3313 			start = min(addr, start);
3314 			end = max(addr + page_size, end);
3315 			flush_dmap = 1;
3316 		}
3317 	}
3318 
3319 	/*
3320 	 * Set direct map to something invalid so that it won't be cached if
3321 	 * there are any accesses after the TLB flush, then flush the TLB and
3322 	 * reset the direct map permissions to the default.
3323 	 */
3324 	set_area_direct_map(area, set_direct_map_invalid_noflush);
3325 	_vm_unmap_aliases(start, end, flush_dmap);
3326 	set_area_direct_map(area, set_direct_map_default_noflush);
3327 }
3328 
3329 static void delayed_vfree_work(struct work_struct *w)
3330 {
3331 	struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
3332 	struct llist_node *t, *llnode;
3333 
3334 	llist_for_each_safe(llnode, t, llist_del_all(&p->list))
3335 		vfree(llnode);
3336 }
3337 
3338 /**
3339  * vfree_atomic - release memory allocated by vmalloc()
3340  * @addr:	  memory base address
3341  *
3342  * This one is just like vfree() but can be called in any atomic context
3343  * except NMIs.
3344  */
3345 void vfree_atomic(const void *addr)
3346 {
3347 	struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred);
3348 
3349 	BUG_ON(in_nmi());
3350 	kmemleak_free(addr);
3351 
3352 	/*
3353 	 * Use raw_cpu_ptr() because this can be called from preemptible
3354 	 * context. Preemption is absolutely fine here, because the llist_add()
3355 	 * implementation is lockless, so it works even if we are adding to
3356 	 * another cpu's list. schedule_work() should be fine with this too.
3357 	 */
3358 	if (addr && llist_add((struct llist_node *)addr, &p->list))
3359 		schedule_work(&p->wq);
3360 }
3361 
3362 /**
3363  * vfree - Release memory allocated by vmalloc()
3364  * @addr:  Memory base address
3365  *
3366  * Free the virtually continuous memory area starting at @addr, as obtained
3367  * from one of the vmalloc() family of APIs.  This will usually also free the
3368  * physical memory underlying the virtual allocation, but that memory is
3369  * reference counted, so it will not be freed until the last user goes away.
3370  *
3371  * If @addr is NULL, no operation is performed.
3372  *
3373  * Context:
3374  * May sleep if called *not* from interrupt context.
3375  * Must not be called in NMI context (strictly speaking, it could be
3376  * if we have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling
3377  * conventions for vfree() arch-dependent would be a really bad idea).
3378  */
3379 void vfree(const void *addr)
3380 {
3381 	struct vm_struct *vm;
3382 	int i;
3383 
3384 	if (unlikely(in_interrupt())) {
3385 		vfree_atomic(addr);
3386 		return;
3387 	}
3388 
3389 	BUG_ON(in_nmi());
3390 	kmemleak_free(addr);
3391 	might_sleep();
3392 
3393 	if (!addr)
3394 		return;
3395 
3396 	vm = remove_vm_area(addr);
3397 	if (unlikely(!vm)) {
3398 		WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
3399 				addr);
3400 		return;
3401 	}
3402 
3403 	if (unlikely(vm->flags & VM_FLUSH_RESET_PERMS))
3404 		vm_reset_perms(vm);
3405 	for (i = 0; i < vm->nr_pages; i++) {
3406 		struct page *page = vm->pages[i];
3407 
3408 		BUG_ON(!page);
3409 		if (!(vm->flags & VM_MAP_PUT_PAGES))
3410 			mod_memcg_page_state(page, MEMCG_VMALLOC, -1);
3411 		/*
3412 		 * High-order allocs for huge vmallocs are split, so
3413 		 * can be freed as an array of order-0 allocations
3414 		 */
3415 		__free_page(page);
3416 		cond_resched();
3417 	}
3418 	if (!(vm->flags & VM_MAP_PUT_PAGES))
3419 		atomic_long_sub(vm->nr_pages, &nr_vmalloc_pages);
3420 	kvfree(vm->pages);
3421 	kfree(vm);
3422 }
3423 EXPORT_SYMBOL(vfree);
3424 
3425 /**
3426  * vunmap - release virtual mapping obtained by vmap()
3427  * @addr:   memory base address
3428  *
3429  * Free the virtually contiguous memory area starting at @addr,
3430  * which was created from the page array passed to vmap().
3431  *
3432  * Must not be called in interrupt context.
3433  */
3434 void vunmap(const void *addr)
3435 {
3436 	struct vm_struct *vm;
3437 
3438 	BUG_ON(in_interrupt());
3439 	might_sleep();
3440 
3441 	if (!addr)
3442 		return;
3443 	vm = remove_vm_area(addr);
3444 	if (unlikely(!vm)) {
3445 		WARN(1, KERN_ERR "Trying to vunmap() nonexistent vm area (%p)\n",
3446 				addr);
3447 		return;
3448 	}
3449 	kfree(vm);
3450 }
3451 EXPORT_SYMBOL(vunmap);
3452 
3453 /**
3454  * vmap - map an array of pages into virtually contiguous space
3455  * @pages: array of page pointers
3456  * @count: number of pages to map
3457  * @flags: vm_area->flags
3458  * @prot: page protection for the mapping
3459  *
3460  * Maps @count pages from @pages into contiguous kernel virtual space.
3461  * If @flags contains %VM_MAP_PUT_PAGES the ownership of the pages array itself
3462  * (which must be kmalloc or vmalloc memory) and one reference per pages in it
3463  * are transferred from the caller to vmap(), and will be freed / dropped when
3464  * vfree() is called on the return value.
3465  *
3466  * Return: the address of the area or %NULL on failure
3467  */
3468 void *vmap(struct page **pages, unsigned int count,
3469 	   unsigned long flags, pgprot_t prot)
3470 {
3471 	struct vm_struct *area;
3472 	unsigned long addr;
3473 	unsigned long size;		/* In bytes */
3474 
3475 	might_sleep();
3476 
3477 	if (WARN_ON_ONCE(flags & VM_FLUSH_RESET_PERMS))
3478 		return NULL;
3479 
3480 	/*
3481 	 * Your top guard is someone else's bottom guard. Not having a top
3482 	 * guard compromises someone else's mappings too.
3483 	 */
3484 	if (WARN_ON_ONCE(flags & VM_NO_GUARD))
3485 		flags &= ~VM_NO_GUARD;
3486 
3487 	if (count > totalram_pages())
3488 		return NULL;
3489 
3490 	size = (unsigned long)count << PAGE_SHIFT;
3491 	area = get_vm_area_caller(size, flags, __builtin_return_address(0));
3492 	if (!area)
3493 		return NULL;
3494 
3495 	addr = (unsigned long)area->addr;
3496 	if (vmap_pages_range(addr, addr + size, pgprot_nx(prot),
3497 				pages, PAGE_SHIFT) < 0) {
3498 		vunmap(area->addr);
3499 		return NULL;
3500 	}
3501 
3502 	if (flags & VM_MAP_PUT_PAGES) {
3503 		area->pages = pages;
3504 		area->nr_pages = count;
3505 	}
3506 	return area->addr;
3507 }
3508 EXPORT_SYMBOL(vmap);
3509 
3510 #ifdef CONFIG_VMAP_PFN
3511 struct vmap_pfn_data {
3512 	unsigned long	*pfns;
3513 	pgprot_t	prot;
3514 	unsigned int	idx;
3515 };
3516 
3517 static int vmap_pfn_apply(pte_t *pte, unsigned long addr, void *private)
3518 {
3519 	struct vmap_pfn_data *data = private;
3520 	unsigned long pfn = data->pfns[data->idx];
3521 	pte_t ptent;
3522 
3523 	if (WARN_ON_ONCE(pfn_valid(pfn)))
3524 		return -EINVAL;
3525 
3526 	ptent = pte_mkspecial(pfn_pte(pfn, data->prot));
3527 	set_pte_at(&init_mm, addr, pte, ptent);
3528 
3529 	data->idx++;
3530 	return 0;
3531 }
3532 
3533 /**
3534  * vmap_pfn - map an array of PFNs into virtually contiguous space
3535  * @pfns: array of PFNs
3536  * @count: number of pages to map
3537  * @prot: page protection for the mapping
3538  *
3539  * Maps @count PFNs from @pfns into contiguous kernel virtual space and returns
3540  * the start address of the mapping.
3541  */
3542 void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot)
3543 {
3544 	struct vmap_pfn_data data = { .pfns = pfns, .prot = pgprot_nx(prot) };
3545 	struct vm_struct *area;
3546 
3547 	area = get_vm_area_caller(count * PAGE_SIZE, VM_IOREMAP,
3548 			__builtin_return_address(0));
3549 	if (!area)
3550 		return NULL;
3551 	if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
3552 			count * PAGE_SIZE, vmap_pfn_apply, &data)) {
3553 		free_vm_area(area);
3554 		return NULL;
3555 	}
3556 
3557 	flush_cache_vmap((unsigned long)area->addr,
3558 			 (unsigned long)area->addr + count * PAGE_SIZE);
3559 
3560 	return area->addr;
3561 }
3562 EXPORT_SYMBOL_GPL(vmap_pfn);
3563 #endif /* CONFIG_VMAP_PFN */
3564 
3565 static inline unsigned int
3566 vm_area_alloc_pages(gfp_t gfp, int nid,
3567 		unsigned int order, unsigned int nr_pages, struct page **pages)
3568 {
3569 	unsigned int nr_allocated = 0;
3570 	struct page *page;
3571 	int i;
3572 
3573 	/*
3574 	 * For order-0 pages we make use of bulk allocator, if
3575 	 * the page array is partly or not at all populated due
3576 	 * to fails, fallback to a single page allocator that is
3577 	 * more permissive.
3578 	 */
3579 	if (!order) {
3580 		while (nr_allocated < nr_pages) {
3581 			unsigned int nr, nr_pages_request;
3582 
3583 			/*
3584 			 * A maximum allowed request is hard-coded and is 100
3585 			 * pages per call. That is done in order to prevent a
3586 			 * long preemption off scenario in the bulk-allocator
3587 			 * so the range is [1:100].
3588 			 */
3589 			nr_pages_request = min(100U, nr_pages - nr_allocated);
3590 
3591 			/* memory allocation should consider mempolicy, we can't
3592 			 * wrongly use nearest node when nid == NUMA_NO_NODE,
3593 			 * otherwise memory may be allocated in only one node,
3594 			 * but mempolicy wants to alloc memory by interleaving.
3595 			 */
3596 			if (IS_ENABLED(CONFIG_NUMA) && nid == NUMA_NO_NODE)
3597 				nr = alloc_pages_bulk_mempolicy_noprof(gfp,
3598 							nr_pages_request,
3599 							pages + nr_allocated);
3600 			else
3601 				nr = alloc_pages_bulk_node_noprof(gfp, nid,
3602 							nr_pages_request,
3603 							pages + nr_allocated);
3604 
3605 			nr_allocated += nr;
3606 			cond_resched();
3607 
3608 			/*
3609 			 * If zero or pages were obtained partly,
3610 			 * fallback to a single page allocator.
3611 			 */
3612 			if (nr != nr_pages_request)
3613 				break;
3614 		}
3615 	}
3616 
3617 	/* High-order pages or fallback path if "bulk" fails. */
3618 	while (nr_allocated < nr_pages) {
3619 		if (!(gfp & __GFP_NOFAIL) && fatal_signal_pending(current))
3620 			break;
3621 
3622 		if (nid == NUMA_NO_NODE)
3623 			page = alloc_pages_noprof(gfp, order);
3624 		else
3625 			page = alloc_pages_node_noprof(nid, gfp, order);
3626 
3627 		if (unlikely(!page))
3628 			break;
3629 
3630 		/*
3631 		 * High-order allocations must be able to be treated as
3632 		 * independent small pages by callers (as they can with
3633 		 * small-page vmallocs). Some drivers do their own refcounting
3634 		 * on vmalloc_to_page() pages, some use page->mapping,
3635 		 * page->lru, etc.
3636 		 */
3637 		if (order)
3638 			split_page(page, order);
3639 
3640 		/*
3641 		 * Careful, we allocate and map page-order pages, but
3642 		 * tracking is done per PAGE_SIZE page so as to keep the
3643 		 * vm_struct APIs independent of the physical/mapped size.
3644 		 */
3645 		for (i = 0; i < (1U << order); i++)
3646 			pages[nr_allocated + i] = page + i;
3647 
3648 		cond_resched();
3649 		nr_allocated += 1U << order;
3650 	}
3651 
3652 	return nr_allocated;
3653 }
3654 
3655 static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
3656 				 pgprot_t prot, unsigned int page_shift,
3657 				 int node)
3658 {
3659 	const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
3660 	bool nofail = gfp_mask & __GFP_NOFAIL;
3661 	unsigned long addr = (unsigned long)area->addr;
3662 	unsigned long size = get_vm_area_size(area);
3663 	unsigned long array_size;
3664 	unsigned int nr_small_pages = size >> PAGE_SHIFT;
3665 	unsigned int page_order;
3666 	unsigned int flags;
3667 	int ret;
3668 
3669 	array_size = (unsigned long)nr_small_pages * sizeof(struct page *);
3670 
3671 	if (!(gfp_mask & (GFP_DMA | GFP_DMA32)))
3672 		gfp_mask |= __GFP_HIGHMEM;
3673 
3674 	/* Please note that the recursion is strictly bounded. */
3675 	if (array_size > PAGE_SIZE) {
3676 		area->pages = __vmalloc_node_noprof(array_size, 1, nested_gfp, node,
3677 					area->caller);
3678 	} else {
3679 		area->pages = kmalloc_node_noprof(array_size, nested_gfp, node);
3680 	}
3681 
3682 	if (!area->pages) {
3683 		warn_alloc(gfp_mask, NULL,
3684 			"vmalloc error: size %lu, failed to allocated page array size %lu",
3685 			nr_small_pages * PAGE_SIZE, array_size);
3686 		free_vm_area(area);
3687 		return NULL;
3688 	}
3689 
3690 	set_vm_area_page_order(area, page_shift - PAGE_SHIFT);
3691 	page_order = vm_area_page_order(area);
3692 
3693 	/*
3694 	 * High-order nofail allocations are really expensive and
3695 	 * potentially dangerous (pre-mature OOM, disruptive reclaim
3696 	 * and compaction etc.
3697 	 *
3698 	 * Please note, the __vmalloc_node_range_noprof() falls-back
3699 	 * to order-0 pages if high-order attempt is unsuccessful.
3700 	 */
3701 	area->nr_pages = vm_area_alloc_pages((page_order ?
3702 		gfp_mask & ~__GFP_NOFAIL : gfp_mask) | __GFP_NOWARN,
3703 		node, page_order, nr_small_pages, area->pages);
3704 
3705 	atomic_long_add(area->nr_pages, &nr_vmalloc_pages);
3706 	if (gfp_mask & __GFP_ACCOUNT) {
3707 		int i;
3708 
3709 		for (i = 0; i < area->nr_pages; i++)
3710 			mod_memcg_page_state(area->pages[i], MEMCG_VMALLOC, 1);
3711 	}
3712 
3713 	/*
3714 	 * If not enough pages were obtained to accomplish an
3715 	 * allocation request, free them via vfree() if any.
3716 	 */
3717 	if (area->nr_pages != nr_small_pages) {
3718 		/*
3719 		 * vm_area_alloc_pages() can fail due to insufficient memory but
3720 		 * also:-
3721 		 *
3722 		 * - a pending fatal signal
3723 		 * - insufficient huge page-order pages
3724 		 *
3725 		 * Since we always retry allocations at order-0 in the huge page
3726 		 * case a warning for either is spurious.
3727 		 */
3728 		if (!fatal_signal_pending(current) && page_order == 0)
3729 			warn_alloc(gfp_mask, NULL,
3730 				"vmalloc error: size %lu, failed to allocate pages",
3731 				area->nr_pages * PAGE_SIZE);
3732 		goto fail;
3733 	}
3734 
3735 	/*
3736 	 * page tables allocations ignore external gfp mask, enforce it
3737 	 * by the scope API
3738 	 */
3739 	if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO)
3740 		flags = memalloc_nofs_save();
3741 	else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0)
3742 		flags = memalloc_noio_save();
3743 
3744 	do {
3745 		ret = vmap_pages_range(addr, addr + size, prot, area->pages,
3746 			page_shift);
3747 		if (nofail && (ret < 0))
3748 			schedule_timeout_uninterruptible(1);
3749 	} while (nofail && (ret < 0));
3750 
3751 	if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO)
3752 		memalloc_nofs_restore(flags);
3753 	else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0)
3754 		memalloc_noio_restore(flags);
3755 
3756 	if (ret < 0) {
3757 		warn_alloc(gfp_mask, NULL,
3758 			"vmalloc error: size %lu, failed to map pages",
3759 			area->nr_pages * PAGE_SIZE);
3760 		goto fail;
3761 	}
3762 
3763 	return area->addr;
3764 
3765 fail:
3766 	vfree(area->addr);
3767 	return NULL;
3768 }
3769 
3770 /**
3771  * __vmalloc_node_range - allocate virtually contiguous memory
3772  * @size:		  allocation size
3773  * @align:		  desired alignment
3774  * @start:		  vm area range start
3775  * @end:		  vm area range end
3776  * @gfp_mask:		  flags for the page level allocator
3777  * @prot:		  protection mask for the allocated pages
3778  * @vm_flags:		  additional vm area flags (e.g. %VM_NO_GUARD)
3779  * @node:		  node to use for allocation or NUMA_NO_NODE
3780  * @caller:		  caller's return address
3781  *
3782  * Allocate enough pages to cover @size from the page level
3783  * allocator with @gfp_mask flags. Please note that the full set of gfp
3784  * flags are not supported. GFP_KERNEL, GFP_NOFS and GFP_NOIO are all
3785  * supported.
3786  * Zone modifiers are not supported. From the reclaim modifiers
3787  * __GFP_DIRECT_RECLAIM is required (aka GFP_NOWAIT is not supported)
3788  * and only __GFP_NOFAIL is supported (i.e. __GFP_NORETRY and
3789  * __GFP_RETRY_MAYFAIL are not supported).
3790  *
3791  * __GFP_NOWARN can be used to suppress failures messages.
3792  *
3793  * Map them into contiguous kernel virtual space, using a pagetable
3794  * protection of @prot.
3795  *
3796  * Return: the address of the area or %NULL on failure
3797  */
3798 void *__vmalloc_node_range_noprof(unsigned long size, unsigned long align,
3799 			unsigned long start, unsigned long end, gfp_t gfp_mask,
3800 			pgprot_t prot, unsigned long vm_flags, int node,
3801 			const void *caller)
3802 {
3803 	struct vm_struct *area;
3804 	void *ret;
3805 	kasan_vmalloc_flags_t kasan_flags = KASAN_VMALLOC_NONE;
3806 	unsigned long original_align = align;
3807 	unsigned int shift = PAGE_SHIFT;
3808 
3809 	if (WARN_ON_ONCE(!size))
3810 		return NULL;
3811 
3812 	if ((size >> PAGE_SHIFT) > totalram_pages()) {
3813 		warn_alloc(gfp_mask, NULL,
3814 			"vmalloc error: size %lu, exceeds total pages",
3815 			size);
3816 		return NULL;
3817 	}
3818 
3819 	if (vmap_allow_huge && (vm_flags & VM_ALLOW_HUGE_VMAP)) {
3820 		/*
3821 		 * Try huge pages. Only try for PAGE_KERNEL allocations,
3822 		 * others like modules don't yet expect huge pages in
3823 		 * their allocations due to apply_to_page_range not
3824 		 * supporting them.
3825 		 */
3826 
3827 		if (arch_vmap_pmd_supported(prot) && size >= PMD_SIZE)
3828 			shift = PMD_SHIFT;
3829 		else
3830 			shift = arch_vmap_pte_supported_shift(size);
3831 
3832 		align = max(original_align, 1UL << shift);
3833 	}
3834 
3835 again:
3836 	area = __get_vm_area_node(size, align, shift, VM_ALLOC |
3837 				  VM_UNINITIALIZED | vm_flags, start, end, node,
3838 				  gfp_mask, caller);
3839 	if (!area) {
3840 		bool nofail = gfp_mask & __GFP_NOFAIL;
3841 		warn_alloc(gfp_mask, NULL,
3842 			"vmalloc error: size %lu, vm_struct allocation failed%s",
3843 			size, (nofail) ? ". Retrying." : "");
3844 		if (nofail) {
3845 			schedule_timeout_uninterruptible(1);
3846 			goto again;
3847 		}
3848 		goto fail;
3849 	}
3850 
3851 	/*
3852 	 * Prepare arguments for __vmalloc_area_node() and
3853 	 * kasan_unpoison_vmalloc().
3854 	 */
3855 	if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL)) {
3856 		if (kasan_hw_tags_enabled()) {
3857 			/*
3858 			 * Modify protection bits to allow tagging.
3859 			 * This must be done before mapping.
3860 			 */
3861 			prot = arch_vmap_pgprot_tagged(prot);
3862 
3863 			/*
3864 			 * Skip page_alloc poisoning and zeroing for physical
3865 			 * pages backing VM_ALLOC mapping. Memory is instead
3866 			 * poisoned and zeroed by kasan_unpoison_vmalloc().
3867 			 */
3868 			gfp_mask |= __GFP_SKIP_KASAN | __GFP_SKIP_ZERO;
3869 		}
3870 
3871 		/* Take note that the mapping is PAGE_KERNEL. */
3872 		kasan_flags |= KASAN_VMALLOC_PROT_NORMAL;
3873 	}
3874 
3875 	/* Allocate physical pages and map them into vmalloc space. */
3876 	ret = __vmalloc_area_node(area, gfp_mask, prot, shift, node);
3877 	if (!ret)
3878 		goto fail;
3879 
3880 	/*
3881 	 * Mark the pages as accessible, now that they are mapped.
3882 	 * The condition for setting KASAN_VMALLOC_INIT should complement the
3883 	 * one in post_alloc_hook() with regards to the __GFP_SKIP_ZERO check
3884 	 * to make sure that memory is initialized under the same conditions.
3885 	 * Tag-based KASAN modes only assign tags to normal non-executable
3886 	 * allocations, see __kasan_unpoison_vmalloc().
3887 	 */
3888 	kasan_flags |= KASAN_VMALLOC_VM_ALLOC;
3889 	if (!want_init_on_free() && want_init_on_alloc(gfp_mask) &&
3890 	    (gfp_mask & __GFP_SKIP_ZERO))
3891 		kasan_flags |= KASAN_VMALLOC_INIT;
3892 	/* KASAN_VMALLOC_PROT_NORMAL already set if required. */
3893 	area->addr = kasan_unpoison_vmalloc(area->addr, size, kasan_flags);
3894 
3895 	/*
3896 	 * In this function, newly allocated vm_struct has VM_UNINITIALIZED
3897 	 * flag. It means that vm_struct is not fully initialized.
3898 	 * Now, it is fully initialized, so remove this flag here.
3899 	 */
3900 	clear_vm_uninitialized_flag(area);
3901 
3902 	if (!(vm_flags & VM_DEFER_KMEMLEAK))
3903 		kmemleak_vmalloc(area, PAGE_ALIGN(size), gfp_mask);
3904 
3905 	return area->addr;
3906 
3907 fail:
3908 	if (shift > PAGE_SHIFT) {
3909 		shift = PAGE_SHIFT;
3910 		align = original_align;
3911 		goto again;
3912 	}
3913 
3914 	return NULL;
3915 }
3916 
3917 /**
3918  * __vmalloc_node - allocate virtually contiguous memory
3919  * @size:	    allocation size
3920  * @align:	    desired alignment
3921  * @gfp_mask:	    flags for the page level allocator
3922  * @node:	    node to use for allocation or NUMA_NO_NODE
3923  * @caller:	    caller's return address
3924  *
3925  * Allocate enough pages to cover @size from the page level allocator with
3926  * @gfp_mask flags.  Map them into contiguous kernel virtual space.
3927  *
3928  * Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_RETRY_MAYFAIL
3929  * and __GFP_NOFAIL are not supported
3930  *
3931  * Any use of gfp flags outside of GFP_KERNEL should be consulted
3932  * with mm people.
3933  *
3934  * Return: pointer to the allocated memory or %NULL on error
3935  */
3936 void *__vmalloc_node_noprof(unsigned long size, unsigned long align,
3937 			    gfp_t gfp_mask, int node, const void *caller)
3938 {
3939 	return __vmalloc_node_range_noprof(size, align, VMALLOC_START, VMALLOC_END,
3940 				gfp_mask, PAGE_KERNEL, 0, node, caller);
3941 }
3942 /*
3943  * This is only for performance analysis of vmalloc and stress purpose.
3944  * It is required by vmalloc test module, therefore do not use it other
3945  * than that.
3946  */
3947 #ifdef CONFIG_TEST_VMALLOC_MODULE
3948 EXPORT_SYMBOL_GPL(__vmalloc_node_noprof);
3949 #endif
3950 
3951 void *__vmalloc_noprof(unsigned long size, gfp_t gfp_mask)
3952 {
3953 	return __vmalloc_node_noprof(size, 1, gfp_mask, NUMA_NO_NODE,
3954 				__builtin_return_address(0));
3955 }
3956 EXPORT_SYMBOL(__vmalloc_noprof);
3957 
3958 /**
3959  * vmalloc - allocate virtually contiguous memory
3960  * @size:    allocation size
3961  *
3962  * Allocate enough pages to cover @size from the page level
3963  * allocator and map them into contiguous kernel virtual space.
3964  *
3965  * For tight control over page level allocator and protection flags
3966  * use __vmalloc() instead.
3967  *
3968  * Return: pointer to the allocated memory or %NULL on error
3969  */
3970 void *vmalloc_noprof(unsigned long size)
3971 {
3972 	return __vmalloc_node_noprof(size, 1, GFP_KERNEL, NUMA_NO_NODE,
3973 				__builtin_return_address(0));
3974 }
3975 EXPORT_SYMBOL(vmalloc_noprof);
3976 
3977 /**
3978  * vmalloc_huge - allocate virtually contiguous memory, allow huge pages
3979  * @size:      allocation size
3980  * @gfp_mask:  flags for the page level allocator
3981  *
3982  * Allocate enough pages to cover @size from the page level
3983  * allocator and map them into contiguous kernel virtual space.
3984  * If @size is greater than or equal to PMD_SIZE, allow using
3985  * huge pages for the memory
3986  *
3987  * Return: pointer to the allocated memory or %NULL on error
3988  */
3989 void *vmalloc_huge_noprof(unsigned long size, gfp_t gfp_mask)
3990 {
3991 	return __vmalloc_node_range_noprof(size, 1, VMALLOC_START, VMALLOC_END,
3992 				    gfp_mask, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP,
3993 				    NUMA_NO_NODE, __builtin_return_address(0));
3994 }
3995 EXPORT_SYMBOL_GPL(vmalloc_huge_noprof);
3996 
3997 /**
3998  * vzalloc - allocate virtually contiguous memory with zero fill
3999  * @size:    allocation size
4000  *
4001  * Allocate enough pages to cover @size from the page level
4002  * allocator and map them into contiguous kernel virtual space.
4003  * The memory allocated is set to zero.
4004  *
4005  * For tight control over page level allocator and protection flags
4006  * use __vmalloc() instead.
4007  *
4008  * Return: pointer to the allocated memory or %NULL on error
4009  */
4010 void *vzalloc_noprof(unsigned long size)
4011 {
4012 	return __vmalloc_node_noprof(size, 1, GFP_KERNEL | __GFP_ZERO, NUMA_NO_NODE,
4013 				__builtin_return_address(0));
4014 }
4015 EXPORT_SYMBOL(vzalloc_noprof);
4016 
4017 /**
4018  * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
4019  * @size: allocation size
4020  *
4021  * The resulting memory area is zeroed so it can be mapped to userspace
4022  * without leaking data.
4023  *
4024  * Return: pointer to the allocated memory or %NULL on error
4025  */
4026 void *vmalloc_user_noprof(unsigned long size)
4027 {
4028 	return __vmalloc_node_range_noprof(size, SHMLBA,  VMALLOC_START, VMALLOC_END,
4029 				    GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL,
4030 				    VM_USERMAP, NUMA_NO_NODE,
4031 				    __builtin_return_address(0));
4032 }
4033 EXPORT_SYMBOL(vmalloc_user_noprof);
4034 
4035 /**
4036  * vmalloc_node - allocate memory on a specific node
4037  * @size:	  allocation size
4038  * @node:	  numa node
4039  *
4040  * Allocate enough pages to cover @size from the page level
4041  * allocator and map them into contiguous kernel virtual space.
4042  *
4043  * For tight control over page level allocator and protection flags
4044  * use __vmalloc() instead.
4045  *
4046  * Return: pointer to the allocated memory or %NULL on error
4047  */
4048 void *vmalloc_node_noprof(unsigned long size, int node)
4049 {
4050 	return __vmalloc_node_noprof(size, 1, GFP_KERNEL, node,
4051 			__builtin_return_address(0));
4052 }
4053 EXPORT_SYMBOL(vmalloc_node_noprof);
4054 
4055 /**
4056  * vzalloc_node - allocate memory on a specific node with zero fill
4057  * @size:	allocation size
4058  * @node:	numa node
4059  *
4060  * Allocate enough pages to cover @size from the page level
4061  * allocator and map them into contiguous kernel virtual space.
4062  * The memory allocated is set to zero.
4063  *
4064  * Return: pointer to the allocated memory or %NULL on error
4065  */
4066 void *vzalloc_node_noprof(unsigned long size, int node)
4067 {
4068 	return __vmalloc_node_noprof(size, 1, GFP_KERNEL | __GFP_ZERO, node,
4069 				__builtin_return_address(0));
4070 }
4071 EXPORT_SYMBOL(vzalloc_node_noprof);
4072 
4073 /**
4074  * vrealloc - reallocate virtually contiguous memory; contents remain unchanged
4075  * @p: object to reallocate memory for
4076  * @size: the size to reallocate
4077  * @flags: the flags for the page level allocator
4078  *
4079  * If @p is %NULL, vrealloc() behaves exactly like vmalloc(). If @size is 0 and
4080  * @p is not a %NULL pointer, the object pointed to is freed.
4081  *
4082  * If __GFP_ZERO logic is requested, callers must ensure that, starting with the
4083  * initial memory allocation, every subsequent call to this API for the same
4084  * memory allocation is flagged with __GFP_ZERO. Otherwise, it is possible that
4085  * __GFP_ZERO is not fully honored by this API.
4086  *
4087  * In any case, the contents of the object pointed to are preserved up to the
4088  * lesser of the new and old sizes.
4089  *
4090  * This function must not be called concurrently with itself or vfree() for the
4091  * same memory allocation.
4092  *
4093  * Return: pointer to the allocated memory; %NULL if @size is zero or in case of
4094  *         failure
4095  */
4096 void *vrealloc_noprof(const void *p, size_t size, gfp_t flags)
4097 {
4098 	size_t old_size = 0;
4099 	void *n;
4100 
4101 	if (!size) {
4102 		vfree(p);
4103 		return NULL;
4104 	}
4105 
4106 	if (p) {
4107 		struct vm_struct *vm;
4108 
4109 		vm = find_vm_area(p);
4110 		if (unlikely(!vm)) {
4111 			WARN(1, "Trying to vrealloc() nonexistent vm area (%p)\n", p);
4112 			return NULL;
4113 		}
4114 
4115 		old_size = get_vm_area_size(vm);
4116 	}
4117 
4118 	/*
4119 	 * TODO: Shrink the vm_area, i.e. unmap and free unused pages. What
4120 	 * would be a good heuristic for when to shrink the vm_area?
4121 	 */
4122 	if (size <= old_size) {
4123 		/* Zero out spare memory. */
4124 		if (want_init_on_alloc(flags))
4125 			memset((void *)p + size, 0, old_size - size);
4126 		kasan_poison_vmalloc(p + size, old_size - size);
4127 		kasan_unpoison_vmalloc(p, size, KASAN_VMALLOC_PROT_NORMAL);
4128 		return (void *)p;
4129 	}
4130 
4131 	/* TODO: Grow the vm_area, i.e. allocate and map additional pages. */
4132 	n = __vmalloc_noprof(size, flags);
4133 	if (!n)
4134 		return NULL;
4135 
4136 	if (p) {
4137 		memcpy(n, p, old_size);
4138 		vfree(p);
4139 	}
4140 
4141 	return n;
4142 }
4143 
4144 #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
4145 #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
4146 #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
4147 #define GFP_VMALLOC32 (GFP_DMA | GFP_KERNEL)
4148 #else
4149 /*
4150  * 64b systems should always have either DMA or DMA32 zones. For others
4151  * GFP_DMA32 should do the right thing and use the normal zone.
4152  */
4153 #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
4154 #endif
4155 
4156 /**
4157  * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
4158  * @size:	allocation size
4159  *
4160  * Allocate enough 32bit PA addressable pages to cover @size from the
4161  * page level allocator and map them into contiguous kernel virtual space.
4162  *
4163  * Return: pointer to the allocated memory or %NULL on error
4164  */
4165 void *vmalloc_32_noprof(unsigned long size)
4166 {
4167 	return __vmalloc_node_noprof(size, 1, GFP_VMALLOC32, NUMA_NO_NODE,
4168 			__builtin_return_address(0));
4169 }
4170 EXPORT_SYMBOL(vmalloc_32_noprof);
4171 
4172 /**
4173  * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
4174  * @size:	     allocation size
4175  *
4176  * The resulting memory area is 32bit addressable and zeroed so it can be
4177  * mapped to userspace without leaking data.
4178  *
4179  * Return: pointer to the allocated memory or %NULL on error
4180  */
4181 void *vmalloc_32_user_noprof(unsigned long size)
4182 {
4183 	return __vmalloc_node_range_noprof(size, SHMLBA,  VMALLOC_START, VMALLOC_END,
4184 				    GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
4185 				    VM_USERMAP, NUMA_NO_NODE,
4186 				    __builtin_return_address(0));
4187 }
4188 EXPORT_SYMBOL(vmalloc_32_user_noprof);
4189 
4190 /*
4191  * Atomically zero bytes in the iterator.
4192  *
4193  * Returns the number of zeroed bytes.
4194  */
4195 static size_t zero_iter(struct iov_iter *iter, size_t count)
4196 {
4197 	size_t remains = count;
4198 
4199 	while (remains > 0) {
4200 		size_t num, copied;
4201 
4202 		num = min_t(size_t, remains, PAGE_SIZE);
4203 		copied = copy_page_to_iter_nofault(ZERO_PAGE(0), 0, num, iter);
4204 		remains -= copied;
4205 
4206 		if (copied < num)
4207 			break;
4208 	}
4209 
4210 	return count - remains;
4211 }
4212 
4213 /*
4214  * small helper routine, copy contents to iter from addr.
4215  * If the page is not present, fill zero.
4216  *
4217  * Returns the number of copied bytes.
4218  */
4219 static size_t aligned_vread_iter(struct iov_iter *iter,
4220 				 const char *addr, size_t count)
4221 {
4222 	size_t remains = count;
4223 	struct page *page;
4224 
4225 	while (remains > 0) {
4226 		unsigned long offset, length;
4227 		size_t copied = 0;
4228 
4229 		offset = offset_in_page(addr);
4230 		length = PAGE_SIZE - offset;
4231 		if (length > remains)
4232 			length = remains;
4233 		page = vmalloc_to_page(addr);
4234 		/*
4235 		 * To do safe access to this _mapped_ area, we need lock. But
4236 		 * adding lock here means that we need to add overhead of
4237 		 * vmalloc()/vfree() calls for this _debug_ interface, rarely
4238 		 * used. Instead of that, we'll use an local mapping via
4239 		 * copy_page_to_iter_nofault() and accept a small overhead in
4240 		 * this access function.
4241 		 */
4242 		if (page)
4243 			copied = copy_page_to_iter_nofault(page, offset,
4244 							   length, iter);
4245 		else
4246 			copied = zero_iter(iter, length);
4247 
4248 		addr += copied;
4249 		remains -= copied;
4250 
4251 		if (copied != length)
4252 			break;
4253 	}
4254 
4255 	return count - remains;
4256 }
4257 
4258 /*
4259  * Read from a vm_map_ram region of memory.
4260  *
4261  * Returns the number of copied bytes.
4262  */
4263 static size_t vmap_ram_vread_iter(struct iov_iter *iter, const char *addr,
4264 				  size_t count, unsigned long flags)
4265 {
4266 	char *start;
4267 	struct vmap_block *vb;
4268 	struct xarray *xa;
4269 	unsigned long offset;
4270 	unsigned int rs, re;
4271 	size_t remains, n;
4272 
4273 	/*
4274 	 * If it's area created by vm_map_ram() interface directly, but
4275 	 * not further subdividing and delegating management to vmap_block,
4276 	 * handle it here.
4277 	 */
4278 	if (!(flags & VMAP_BLOCK))
4279 		return aligned_vread_iter(iter, addr, count);
4280 
4281 	remains = count;
4282 
4283 	/*
4284 	 * Area is split into regions and tracked with vmap_block, read out
4285 	 * each region and zero fill the hole between regions.
4286 	 */
4287 	xa = addr_to_vb_xa((unsigned long) addr);
4288 	vb = xa_load(xa, addr_to_vb_idx((unsigned long)addr));
4289 	if (!vb)
4290 		goto finished_zero;
4291 
4292 	spin_lock(&vb->lock);
4293 	if (bitmap_empty(vb->used_map, VMAP_BBMAP_BITS)) {
4294 		spin_unlock(&vb->lock);
4295 		goto finished_zero;
4296 	}
4297 
4298 	for_each_set_bitrange(rs, re, vb->used_map, VMAP_BBMAP_BITS) {
4299 		size_t copied;
4300 
4301 		if (remains == 0)
4302 			goto finished;
4303 
4304 		start = vmap_block_vaddr(vb->va->va_start, rs);
4305 
4306 		if (addr < start) {
4307 			size_t to_zero = min_t(size_t, start - addr, remains);
4308 			size_t zeroed = zero_iter(iter, to_zero);
4309 
4310 			addr += zeroed;
4311 			remains -= zeroed;
4312 
4313 			if (remains == 0 || zeroed != to_zero)
4314 				goto finished;
4315 		}
4316 
4317 		/*it could start reading from the middle of used region*/
4318 		offset = offset_in_page(addr);
4319 		n = ((re - rs + 1) << PAGE_SHIFT) - offset;
4320 		if (n > remains)
4321 			n = remains;
4322 
4323 		copied = aligned_vread_iter(iter, start + offset, n);
4324 
4325 		addr += copied;
4326 		remains -= copied;
4327 
4328 		if (copied != n)
4329 			goto finished;
4330 	}
4331 
4332 	spin_unlock(&vb->lock);
4333 
4334 finished_zero:
4335 	/* zero-fill the left dirty or free regions */
4336 	return count - remains + zero_iter(iter, remains);
4337 finished:
4338 	/* We couldn't copy/zero everything */
4339 	spin_unlock(&vb->lock);
4340 	return count - remains;
4341 }
4342 
4343 /**
4344  * vread_iter() - read vmalloc area in a safe way to an iterator.
4345  * @iter:         the iterator to which data should be written.
4346  * @addr:         vm address.
4347  * @count:        number of bytes to be read.
4348  *
4349  * This function checks that addr is a valid vmalloc'ed area, and
4350  * copy data from that area to a given buffer. If the given memory range
4351  * of [addr...addr+count) includes some valid address, data is copied to
4352  * proper area of @buf. If there are memory holes, they'll be zero-filled.
4353  * IOREMAP area is treated as memory hole and no copy is done.
4354  *
4355  * If [addr...addr+count) doesn't includes any intersects with alive
4356  * vm_struct area, returns 0. @buf should be kernel's buffer.
4357  *
4358  * Note: In usual ops, vread() is never necessary because the caller
4359  * should know vmalloc() area is valid and can use memcpy().
4360  * This is for routines which have to access vmalloc area without
4361  * any information, as /proc/kcore.
4362  *
4363  * Return: number of bytes for which addr and buf should be increased
4364  * (same number as @count) or %0 if [addr...addr+count) doesn't
4365  * include any intersection with valid vmalloc area
4366  */
4367 long vread_iter(struct iov_iter *iter, const char *addr, size_t count)
4368 {
4369 	struct vmap_node *vn;
4370 	struct vmap_area *va;
4371 	struct vm_struct *vm;
4372 	char *vaddr;
4373 	size_t n, size, flags, remains;
4374 	unsigned long next;
4375 
4376 	addr = kasan_reset_tag(addr);
4377 
4378 	/* Don't allow overflow */
4379 	if ((unsigned long) addr + count < count)
4380 		count = -(unsigned long) addr;
4381 
4382 	remains = count;
4383 
4384 	vn = find_vmap_area_exceed_addr_lock((unsigned long) addr, &va);
4385 	if (!vn)
4386 		goto finished_zero;
4387 
4388 	/* no intersects with alive vmap_area */
4389 	if ((unsigned long)addr + remains <= va->va_start)
4390 		goto finished_zero;
4391 
4392 	do {
4393 		size_t copied;
4394 
4395 		if (remains == 0)
4396 			goto finished;
4397 
4398 		vm = va->vm;
4399 		flags = va->flags & VMAP_FLAGS_MASK;
4400 		/*
4401 		 * VMAP_BLOCK indicates a sub-type of vm_map_ram area, need
4402 		 * be set together with VMAP_RAM.
4403 		 */
4404 		WARN_ON(flags == VMAP_BLOCK);
4405 
4406 		if (!vm && !flags)
4407 			goto next_va;
4408 
4409 		if (vm && (vm->flags & VM_UNINITIALIZED))
4410 			goto next_va;
4411 
4412 		/* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
4413 		smp_rmb();
4414 
4415 		vaddr = (char *) va->va_start;
4416 		size = vm ? get_vm_area_size(vm) : va_size(va);
4417 
4418 		if (addr >= vaddr + size)
4419 			goto next_va;
4420 
4421 		if (addr < vaddr) {
4422 			size_t to_zero = min_t(size_t, vaddr - addr, remains);
4423 			size_t zeroed = zero_iter(iter, to_zero);
4424 
4425 			addr += zeroed;
4426 			remains -= zeroed;
4427 
4428 			if (remains == 0 || zeroed != to_zero)
4429 				goto finished;
4430 		}
4431 
4432 		n = vaddr + size - addr;
4433 		if (n > remains)
4434 			n = remains;
4435 
4436 		if (flags & VMAP_RAM)
4437 			copied = vmap_ram_vread_iter(iter, addr, n, flags);
4438 		else if (!(vm && (vm->flags & (VM_IOREMAP | VM_SPARSE))))
4439 			copied = aligned_vread_iter(iter, addr, n);
4440 		else /* IOREMAP | SPARSE area is treated as memory hole */
4441 			copied = zero_iter(iter, n);
4442 
4443 		addr += copied;
4444 		remains -= copied;
4445 
4446 		if (copied != n)
4447 			goto finished;
4448 
4449 	next_va:
4450 		next = va->va_end;
4451 		spin_unlock(&vn->busy.lock);
4452 	} while ((vn = find_vmap_area_exceed_addr_lock(next, &va)));
4453 
4454 finished_zero:
4455 	if (vn)
4456 		spin_unlock(&vn->busy.lock);
4457 
4458 	/* zero-fill memory holes */
4459 	return count - remains + zero_iter(iter, remains);
4460 finished:
4461 	/* Nothing remains, or We couldn't copy/zero everything. */
4462 	if (vn)
4463 		spin_unlock(&vn->busy.lock);
4464 
4465 	return count - remains;
4466 }
4467 
4468 /**
4469  * remap_vmalloc_range_partial - map vmalloc pages to userspace
4470  * @vma:		vma to cover
4471  * @uaddr:		target user address to start at
4472  * @kaddr:		virtual address of vmalloc kernel memory
4473  * @pgoff:		offset from @kaddr to start at
4474  * @size:		size of map area
4475  *
4476  * Returns:	0 for success, -Exxx on failure
4477  *
4478  * This function checks that @kaddr is a valid vmalloc'ed area,
4479  * and that it is big enough to cover the range starting at
4480  * @uaddr in @vma. Will return failure if that criteria isn't
4481  * met.
4482  *
4483  * Similar to remap_pfn_range() (see mm/memory.c)
4484  */
4485 int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
4486 				void *kaddr, unsigned long pgoff,
4487 				unsigned long size)
4488 {
4489 	struct vm_struct *area;
4490 	unsigned long off;
4491 	unsigned long end_index;
4492 
4493 	if (check_shl_overflow(pgoff, PAGE_SHIFT, &off))
4494 		return -EINVAL;
4495 
4496 	size = PAGE_ALIGN(size);
4497 
4498 	if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
4499 		return -EINVAL;
4500 
4501 	area = find_vm_area(kaddr);
4502 	if (!area)
4503 		return -EINVAL;
4504 
4505 	if (!(area->flags & (VM_USERMAP | VM_DMA_COHERENT)))
4506 		return -EINVAL;
4507 
4508 	if (check_add_overflow(size, off, &end_index) ||
4509 	    end_index > get_vm_area_size(area))
4510 		return -EINVAL;
4511 	kaddr += off;
4512 
4513 	do {
4514 		struct page *page = vmalloc_to_page(kaddr);
4515 		int ret;
4516 
4517 		ret = vm_insert_page(vma, uaddr, page);
4518 		if (ret)
4519 			return ret;
4520 
4521 		uaddr += PAGE_SIZE;
4522 		kaddr += PAGE_SIZE;
4523 		size -= PAGE_SIZE;
4524 	} while (size > 0);
4525 
4526 	vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
4527 
4528 	return 0;
4529 }
4530 
4531 /**
4532  * remap_vmalloc_range - map vmalloc pages to userspace
4533  * @vma:		vma to cover (map full range of vma)
4534  * @addr:		vmalloc memory
4535  * @pgoff:		number of pages into addr before first page to map
4536  *
4537  * Returns:	0 for success, -Exxx on failure
4538  *
4539  * This function checks that addr is a valid vmalloc'ed area, and
4540  * that it is big enough to cover the vma. Will return failure if
4541  * that criteria isn't met.
4542  *
4543  * Similar to remap_pfn_range() (see mm/memory.c)
4544  */
4545 int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
4546 						unsigned long pgoff)
4547 {
4548 	return remap_vmalloc_range_partial(vma, vma->vm_start,
4549 					   addr, pgoff,
4550 					   vma->vm_end - vma->vm_start);
4551 }
4552 EXPORT_SYMBOL(remap_vmalloc_range);
4553 
4554 void free_vm_area(struct vm_struct *area)
4555 {
4556 	struct vm_struct *ret;
4557 	ret = remove_vm_area(area->addr);
4558 	BUG_ON(ret != area);
4559 	kfree(area);
4560 }
4561 EXPORT_SYMBOL_GPL(free_vm_area);
4562 
4563 #ifdef CONFIG_SMP
4564 static struct vmap_area *node_to_va(struct rb_node *n)
4565 {
4566 	return rb_entry_safe(n, struct vmap_area, rb_node);
4567 }
4568 
4569 /**
4570  * pvm_find_va_enclose_addr - find the vmap_area @addr belongs to
4571  * @addr: target address
4572  *
4573  * Returns: vmap_area if it is found. If there is no such area
4574  *   the first highest(reverse order) vmap_area is returned
4575  *   i.e. va->va_start < addr && va->va_end < addr or NULL
4576  *   if there are no any areas before @addr.
4577  */
4578 static struct vmap_area *
4579 pvm_find_va_enclose_addr(unsigned long addr)
4580 {
4581 	struct vmap_area *va, *tmp;
4582 	struct rb_node *n;
4583 
4584 	n = free_vmap_area_root.rb_node;
4585 	va = NULL;
4586 
4587 	while (n) {
4588 		tmp = rb_entry(n, struct vmap_area, rb_node);
4589 		if (tmp->va_start <= addr) {
4590 			va = tmp;
4591 			if (tmp->va_end >= addr)
4592 				break;
4593 
4594 			n = n->rb_right;
4595 		} else {
4596 			n = n->rb_left;
4597 		}
4598 	}
4599 
4600 	return va;
4601 }
4602 
4603 /**
4604  * pvm_determine_end_from_reverse - find the highest aligned address
4605  * of free block below VMALLOC_END
4606  * @va:
4607  *   in - the VA we start the search(reverse order);
4608  *   out - the VA with the highest aligned end address.
4609  * @align: alignment for required highest address
4610  *
4611  * Returns: determined end address within vmap_area
4612  */
4613 static unsigned long
4614 pvm_determine_end_from_reverse(struct vmap_area **va, unsigned long align)
4615 {
4616 	unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
4617 	unsigned long addr;
4618 
4619 	if (likely(*va)) {
4620 		list_for_each_entry_from_reverse((*va),
4621 				&free_vmap_area_list, list) {
4622 			addr = min((*va)->va_end & ~(align - 1), vmalloc_end);
4623 			if ((*va)->va_start < addr)
4624 				return addr;
4625 		}
4626 	}
4627 
4628 	return 0;
4629 }
4630 
4631 /**
4632  * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator
4633  * @offsets: array containing offset of each area
4634  * @sizes: array containing size of each area
4635  * @nr_vms: the number of areas to allocate
4636  * @align: alignment, all entries in @offsets and @sizes must be aligned to this
4637  *
4638  * Returns: kmalloc'd vm_struct pointer array pointing to allocated
4639  *	    vm_structs on success, %NULL on failure
4640  *
4641  * Percpu allocator wants to use congruent vm areas so that it can
4642  * maintain the offsets among percpu areas.  This function allocates
4643  * congruent vmalloc areas for it with GFP_KERNEL.  These areas tend to
4644  * be scattered pretty far, distance between two areas easily going up
4645  * to gigabytes.  To avoid interacting with regular vmallocs, these
4646  * areas are allocated from top.
4647  *
4648  * Despite its complicated look, this allocator is rather simple. It
4649  * does everything top-down and scans free blocks from the end looking
4650  * for matching base. While scanning, if any of the areas do not fit the
4651  * base address is pulled down to fit the area. Scanning is repeated till
4652  * all the areas fit and then all necessary data structures are inserted
4653  * and the result is returned.
4654  */
4655 struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
4656 				     const size_t *sizes, int nr_vms,
4657 				     size_t align)
4658 {
4659 	const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align);
4660 	const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
4661 	struct vmap_area **vas, *va;
4662 	struct vm_struct **vms;
4663 	int area, area2, last_area, term_area;
4664 	unsigned long base, start, size, end, last_end, orig_start, orig_end;
4665 	bool purged = false;
4666 
4667 	/* verify parameters and allocate data structures */
4668 	BUG_ON(offset_in_page(align) || !is_power_of_2(align));
4669 	for (last_area = 0, area = 0; area < nr_vms; area++) {
4670 		start = offsets[area];
4671 		end = start + sizes[area];
4672 
4673 		/* is everything aligned properly? */
4674 		BUG_ON(!IS_ALIGNED(offsets[area], align));
4675 		BUG_ON(!IS_ALIGNED(sizes[area], align));
4676 
4677 		/* detect the area with the highest address */
4678 		if (start > offsets[last_area])
4679 			last_area = area;
4680 
4681 		for (area2 = area + 1; area2 < nr_vms; area2++) {
4682 			unsigned long start2 = offsets[area2];
4683 			unsigned long end2 = start2 + sizes[area2];
4684 
4685 			BUG_ON(start2 < end && start < end2);
4686 		}
4687 	}
4688 	last_end = offsets[last_area] + sizes[last_area];
4689 
4690 	if (vmalloc_end - vmalloc_start < last_end) {
4691 		WARN_ON(true);
4692 		return NULL;
4693 	}
4694 
4695 	vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL);
4696 	vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL);
4697 	if (!vas || !vms)
4698 		goto err_free2;
4699 
4700 	for (area = 0; area < nr_vms; area++) {
4701 		vas[area] = kmem_cache_zalloc(vmap_area_cachep, GFP_KERNEL);
4702 		vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL);
4703 		if (!vas[area] || !vms[area])
4704 			goto err_free;
4705 	}
4706 retry:
4707 	spin_lock(&free_vmap_area_lock);
4708 
4709 	/* start scanning - we scan from the top, begin with the last area */
4710 	area = term_area = last_area;
4711 	start = offsets[area];
4712 	end = start + sizes[area];
4713 
4714 	va = pvm_find_va_enclose_addr(vmalloc_end);
4715 	base = pvm_determine_end_from_reverse(&va, align) - end;
4716 
4717 	while (true) {
4718 		/*
4719 		 * base might have underflowed, add last_end before
4720 		 * comparing.
4721 		 */
4722 		if (base + last_end < vmalloc_start + last_end)
4723 			goto overflow;
4724 
4725 		/*
4726 		 * Fitting base has not been found.
4727 		 */
4728 		if (va == NULL)
4729 			goto overflow;
4730 
4731 		/*
4732 		 * If required width exceeds current VA block, move
4733 		 * base downwards and then recheck.
4734 		 */
4735 		if (base + end > va->va_end) {
4736 			base = pvm_determine_end_from_reverse(&va, align) - end;
4737 			term_area = area;
4738 			continue;
4739 		}
4740 
4741 		/*
4742 		 * If this VA does not fit, move base downwards and recheck.
4743 		 */
4744 		if (base + start < va->va_start) {
4745 			va = node_to_va(rb_prev(&va->rb_node));
4746 			base = pvm_determine_end_from_reverse(&va, align) - end;
4747 			term_area = area;
4748 			continue;
4749 		}
4750 
4751 		/*
4752 		 * This area fits, move on to the previous one.  If
4753 		 * the previous one is the terminal one, we're done.
4754 		 */
4755 		area = (area + nr_vms - 1) % nr_vms;
4756 		if (area == term_area)
4757 			break;
4758 
4759 		start = offsets[area];
4760 		end = start + sizes[area];
4761 		va = pvm_find_va_enclose_addr(base + end);
4762 	}
4763 
4764 	/* we've found a fitting base, insert all va's */
4765 	for (area = 0; area < nr_vms; area++) {
4766 		int ret;
4767 
4768 		start = base + offsets[area];
4769 		size = sizes[area];
4770 
4771 		va = pvm_find_va_enclose_addr(start);
4772 		if (WARN_ON_ONCE(va == NULL))
4773 			/* It is a BUG(), but trigger recovery instead. */
4774 			goto recovery;
4775 
4776 		ret = va_clip(&free_vmap_area_root,
4777 			&free_vmap_area_list, va, start, size);
4778 		if (WARN_ON_ONCE(unlikely(ret)))
4779 			/* It is a BUG(), but trigger recovery instead. */
4780 			goto recovery;
4781 
4782 		/* Allocated area. */
4783 		va = vas[area];
4784 		va->va_start = start;
4785 		va->va_end = start + size;
4786 	}
4787 
4788 	spin_unlock(&free_vmap_area_lock);
4789 
4790 	/* populate the kasan shadow space */
4791 	for (area = 0; area < nr_vms; area++) {
4792 		if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area]))
4793 			goto err_free_shadow;
4794 	}
4795 
4796 	/* insert all vm's */
4797 	for (area = 0; area < nr_vms; area++) {
4798 		struct vmap_node *vn = addr_to_node(vas[area]->va_start);
4799 
4800 		spin_lock(&vn->busy.lock);
4801 		insert_vmap_area(vas[area], &vn->busy.root, &vn->busy.head);
4802 		setup_vmalloc_vm(vms[area], vas[area], VM_ALLOC,
4803 				 pcpu_get_vm_areas);
4804 		spin_unlock(&vn->busy.lock);
4805 	}
4806 
4807 	/*
4808 	 * Mark allocated areas as accessible. Do it now as a best-effort
4809 	 * approach, as they can be mapped outside of vmalloc code.
4810 	 * With hardware tag-based KASAN, marking is skipped for
4811 	 * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
4812 	 */
4813 	for (area = 0; area < nr_vms; area++)
4814 		vms[area]->addr = kasan_unpoison_vmalloc(vms[area]->addr,
4815 				vms[area]->size, KASAN_VMALLOC_PROT_NORMAL);
4816 
4817 	kfree(vas);
4818 	return vms;
4819 
4820 recovery:
4821 	/*
4822 	 * Remove previously allocated areas. There is no
4823 	 * need in removing these areas from the busy tree,
4824 	 * because they are inserted only on the final step
4825 	 * and when pcpu_get_vm_areas() is success.
4826 	 */
4827 	while (area--) {
4828 		orig_start = vas[area]->va_start;
4829 		orig_end = vas[area]->va_end;
4830 		va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root,
4831 				&free_vmap_area_list);
4832 		if (va)
4833 			kasan_release_vmalloc(orig_start, orig_end,
4834 				va->va_start, va->va_end,
4835 				KASAN_VMALLOC_PAGE_RANGE | KASAN_VMALLOC_TLB_FLUSH);
4836 		vas[area] = NULL;
4837 	}
4838 
4839 overflow:
4840 	spin_unlock(&free_vmap_area_lock);
4841 	if (!purged) {
4842 		reclaim_and_purge_vmap_areas();
4843 		purged = true;
4844 
4845 		/* Before "retry", check if we recover. */
4846 		for (area = 0; area < nr_vms; area++) {
4847 			if (vas[area])
4848 				continue;
4849 
4850 			vas[area] = kmem_cache_zalloc(
4851 				vmap_area_cachep, GFP_KERNEL);
4852 			if (!vas[area])
4853 				goto err_free;
4854 		}
4855 
4856 		goto retry;
4857 	}
4858 
4859 err_free:
4860 	for (area = 0; area < nr_vms; area++) {
4861 		if (vas[area])
4862 			kmem_cache_free(vmap_area_cachep, vas[area]);
4863 
4864 		kfree(vms[area]);
4865 	}
4866 err_free2:
4867 	kfree(vas);
4868 	kfree(vms);
4869 	return NULL;
4870 
4871 err_free_shadow:
4872 	spin_lock(&free_vmap_area_lock);
4873 	/*
4874 	 * We release all the vmalloc shadows, even the ones for regions that
4875 	 * hadn't been successfully added. This relies on kasan_release_vmalloc
4876 	 * being able to tolerate this case.
4877 	 */
4878 	for (area = 0; area < nr_vms; area++) {
4879 		orig_start = vas[area]->va_start;
4880 		orig_end = vas[area]->va_end;
4881 		va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root,
4882 				&free_vmap_area_list);
4883 		if (va)
4884 			kasan_release_vmalloc(orig_start, orig_end,
4885 				va->va_start, va->va_end,
4886 				KASAN_VMALLOC_PAGE_RANGE | KASAN_VMALLOC_TLB_FLUSH);
4887 		vas[area] = NULL;
4888 		kfree(vms[area]);
4889 	}
4890 	spin_unlock(&free_vmap_area_lock);
4891 	kfree(vas);
4892 	kfree(vms);
4893 	return NULL;
4894 }
4895 
4896 /**
4897  * pcpu_free_vm_areas - free vmalloc areas for percpu allocator
4898  * @vms: vm_struct pointer array returned by pcpu_get_vm_areas()
4899  * @nr_vms: the number of allocated areas
4900  *
4901  * Free vm_structs and the array allocated by pcpu_get_vm_areas().
4902  */
4903 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
4904 {
4905 	int i;
4906 
4907 	for (i = 0; i < nr_vms; i++)
4908 		free_vm_area(vms[i]);
4909 	kfree(vms);
4910 }
4911 #endif	/* CONFIG_SMP */
4912 
4913 #ifdef CONFIG_PRINTK
4914 bool vmalloc_dump_obj(void *object)
4915 {
4916 	const void *caller;
4917 	struct vm_struct *vm;
4918 	struct vmap_area *va;
4919 	struct vmap_node *vn;
4920 	unsigned long addr;
4921 	unsigned int nr_pages;
4922 
4923 	addr = PAGE_ALIGN((unsigned long) object);
4924 	vn = addr_to_node(addr);
4925 
4926 	if (!spin_trylock(&vn->busy.lock))
4927 		return false;
4928 
4929 	va = __find_vmap_area(addr, &vn->busy.root);
4930 	if (!va || !va->vm) {
4931 		spin_unlock(&vn->busy.lock);
4932 		return false;
4933 	}
4934 
4935 	vm = va->vm;
4936 	addr = (unsigned long) vm->addr;
4937 	caller = vm->caller;
4938 	nr_pages = vm->nr_pages;
4939 	spin_unlock(&vn->busy.lock);
4940 
4941 	pr_cont(" %u-page vmalloc region starting at %#lx allocated at %pS\n",
4942 		nr_pages, addr, caller);
4943 
4944 	return true;
4945 }
4946 #endif
4947 
4948 #ifdef CONFIG_PROC_FS
4949 static void show_numa_info(struct seq_file *m, struct vm_struct *v)
4950 {
4951 	if (IS_ENABLED(CONFIG_NUMA)) {
4952 		unsigned int nr, *counters = m->private;
4953 		unsigned int step = 1U << vm_area_page_order(v);
4954 
4955 		if (!counters)
4956 			return;
4957 
4958 		if (v->flags & VM_UNINITIALIZED)
4959 			return;
4960 		/* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
4961 		smp_rmb();
4962 
4963 		memset(counters, 0, nr_node_ids * sizeof(unsigned int));
4964 
4965 		for (nr = 0; nr < v->nr_pages; nr += step)
4966 			counters[page_to_nid(v->pages[nr])] += step;
4967 		for_each_node_state(nr, N_HIGH_MEMORY)
4968 			if (counters[nr])
4969 				seq_printf(m, " N%u=%u", nr, counters[nr]);
4970 	}
4971 }
4972 
4973 static void show_purge_info(struct seq_file *m)
4974 {
4975 	struct vmap_node *vn;
4976 	struct vmap_area *va;
4977 	int i;
4978 
4979 	for (i = 0; i < nr_vmap_nodes; i++) {
4980 		vn = &vmap_nodes[i];
4981 
4982 		spin_lock(&vn->lazy.lock);
4983 		list_for_each_entry(va, &vn->lazy.head, list) {
4984 			seq_printf(m, "0x%pK-0x%pK %7ld unpurged vm_area\n",
4985 				(void *)va->va_start, (void *)va->va_end,
4986 				va_size(va));
4987 		}
4988 		spin_unlock(&vn->lazy.lock);
4989 	}
4990 }
4991 
4992 static int vmalloc_info_show(struct seq_file *m, void *p)
4993 {
4994 	struct vmap_node *vn;
4995 	struct vmap_area *va;
4996 	struct vm_struct *v;
4997 	int i;
4998 
4999 	for (i = 0; i < nr_vmap_nodes; i++) {
5000 		vn = &vmap_nodes[i];
5001 
5002 		spin_lock(&vn->busy.lock);
5003 		list_for_each_entry(va, &vn->busy.head, list) {
5004 			if (!va->vm) {
5005 				if (va->flags & VMAP_RAM)
5006 					seq_printf(m, "0x%pK-0x%pK %7ld vm_map_ram\n",
5007 						(void *)va->va_start, (void *)va->va_end,
5008 						va_size(va));
5009 
5010 				continue;
5011 			}
5012 
5013 			v = va->vm;
5014 
5015 			seq_printf(m, "0x%pK-0x%pK %7ld",
5016 				v->addr, v->addr + v->size, v->size);
5017 
5018 			if (v->caller)
5019 				seq_printf(m, " %pS", v->caller);
5020 
5021 			if (v->nr_pages)
5022 				seq_printf(m, " pages=%d", v->nr_pages);
5023 
5024 			if (v->phys_addr)
5025 				seq_printf(m, " phys=%pa", &v->phys_addr);
5026 
5027 			if (v->flags & VM_IOREMAP)
5028 				seq_puts(m, " ioremap");
5029 
5030 			if (v->flags & VM_SPARSE)
5031 				seq_puts(m, " sparse");
5032 
5033 			if (v->flags & VM_ALLOC)
5034 				seq_puts(m, " vmalloc");
5035 
5036 			if (v->flags & VM_MAP)
5037 				seq_puts(m, " vmap");
5038 
5039 			if (v->flags & VM_USERMAP)
5040 				seq_puts(m, " user");
5041 
5042 			if (v->flags & VM_DMA_COHERENT)
5043 				seq_puts(m, " dma-coherent");
5044 
5045 			if (is_vmalloc_addr(v->pages))
5046 				seq_puts(m, " vpages");
5047 
5048 			show_numa_info(m, v);
5049 			seq_putc(m, '\n');
5050 		}
5051 		spin_unlock(&vn->busy.lock);
5052 	}
5053 
5054 	/*
5055 	 * As a final step, dump "unpurged" areas.
5056 	 */
5057 	show_purge_info(m);
5058 	return 0;
5059 }
5060 
5061 static int __init proc_vmalloc_init(void)
5062 {
5063 	void *priv_data = NULL;
5064 
5065 	if (IS_ENABLED(CONFIG_NUMA))
5066 		priv_data = kmalloc(nr_node_ids * sizeof(unsigned int), GFP_KERNEL);
5067 
5068 	proc_create_single_data("vmallocinfo",
5069 		0400, NULL, vmalloc_info_show, priv_data);
5070 
5071 	return 0;
5072 }
5073 module_init(proc_vmalloc_init);
5074 
5075 #endif
5076 
5077 static void __init vmap_init_free_space(void)
5078 {
5079 	unsigned long vmap_start = 1;
5080 	const unsigned long vmap_end = ULONG_MAX;
5081 	struct vmap_area *free;
5082 	struct vm_struct *busy;
5083 
5084 	/*
5085 	 *     B     F     B     B     B     F
5086 	 * -|-----|.....|-----|-----|-----|.....|-
5087 	 *  |           The KVA space           |
5088 	 *  |<--------------------------------->|
5089 	 */
5090 	for (busy = vmlist; busy; busy = busy->next) {
5091 		if ((unsigned long) busy->addr - vmap_start > 0) {
5092 			free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
5093 			if (!WARN_ON_ONCE(!free)) {
5094 				free->va_start = vmap_start;
5095 				free->va_end = (unsigned long) busy->addr;
5096 
5097 				insert_vmap_area_augment(free, NULL,
5098 					&free_vmap_area_root,
5099 						&free_vmap_area_list);
5100 			}
5101 		}
5102 
5103 		vmap_start = (unsigned long) busy->addr + busy->size;
5104 	}
5105 
5106 	if (vmap_end - vmap_start > 0) {
5107 		free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
5108 		if (!WARN_ON_ONCE(!free)) {
5109 			free->va_start = vmap_start;
5110 			free->va_end = vmap_end;
5111 
5112 			insert_vmap_area_augment(free, NULL,
5113 				&free_vmap_area_root,
5114 					&free_vmap_area_list);
5115 		}
5116 	}
5117 }
5118 
5119 static void vmap_init_nodes(void)
5120 {
5121 	struct vmap_node *vn;
5122 	int i, n;
5123 
5124 #if BITS_PER_LONG == 64
5125 	/*
5126 	 * A high threshold of max nodes is fixed and bound to 128,
5127 	 * thus a scale factor is 1 for systems where number of cores
5128 	 * are less or equal to specified threshold.
5129 	 *
5130 	 * As for NUMA-aware notes. For bigger systems, for example
5131 	 * NUMA with multi-sockets, where we can end-up with thousands
5132 	 * of cores in total, a "sub-numa-clustering" should be added.
5133 	 *
5134 	 * In this case a NUMA domain is considered as a single entity
5135 	 * with dedicated sub-nodes in it which describe one group or
5136 	 * set of cores. Therefore a per-domain purging is supposed to
5137 	 * be added as well as a per-domain balancing.
5138 	 */
5139 	n = clamp_t(unsigned int, num_possible_cpus(), 1, 128);
5140 
5141 	if (n > 1) {
5142 		vn = kmalloc_array(n, sizeof(*vn), GFP_NOWAIT | __GFP_NOWARN);
5143 		if (vn) {
5144 			/* Node partition is 16 pages. */
5145 			vmap_zone_size = (1 << 4) * PAGE_SIZE;
5146 			nr_vmap_nodes = n;
5147 			vmap_nodes = vn;
5148 		} else {
5149 			pr_err("Failed to allocate an array. Disable a node layer\n");
5150 		}
5151 	}
5152 #endif
5153 
5154 	for (n = 0; n < nr_vmap_nodes; n++) {
5155 		vn = &vmap_nodes[n];
5156 		vn->busy.root = RB_ROOT;
5157 		INIT_LIST_HEAD(&vn->busy.head);
5158 		spin_lock_init(&vn->busy.lock);
5159 
5160 		vn->lazy.root = RB_ROOT;
5161 		INIT_LIST_HEAD(&vn->lazy.head);
5162 		spin_lock_init(&vn->lazy.lock);
5163 
5164 		for (i = 0; i < MAX_VA_SIZE_PAGES; i++) {
5165 			INIT_LIST_HEAD(&vn->pool[i].head);
5166 			WRITE_ONCE(vn->pool[i].len, 0);
5167 		}
5168 
5169 		spin_lock_init(&vn->pool_lock);
5170 	}
5171 }
5172 
5173 static unsigned long
5174 vmap_node_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
5175 {
5176 	unsigned long count;
5177 	struct vmap_node *vn;
5178 	int i, j;
5179 
5180 	for (count = 0, i = 0; i < nr_vmap_nodes; i++) {
5181 		vn = &vmap_nodes[i];
5182 
5183 		for (j = 0; j < MAX_VA_SIZE_PAGES; j++)
5184 			count += READ_ONCE(vn->pool[j].len);
5185 	}
5186 
5187 	return count ? count : SHRINK_EMPTY;
5188 }
5189 
5190 static unsigned long
5191 vmap_node_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
5192 {
5193 	int i;
5194 
5195 	for (i = 0; i < nr_vmap_nodes; i++)
5196 		decay_va_pool_node(&vmap_nodes[i], true);
5197 
5198 	return SHRINK_STOP;
5199 }
5200 
5201 void __init vmalloc_init(void)
5202 {
5203 	struct shrinker *vmap_node_shrinker;
5204 	struct vmap_area *va;
5205 	struct vmap_node *vn;
5206 	struct vm_struct *tmp;
5207 	int i;
5208 
5209 	/*
5210 	 * Create the cache for vmap_area objects.
5211 	 */
5212 	vmap_area_cachep = KMEM_CACHE(vmap_area, SLAB_PANIC);
5213 
5214 	for_each_possible_cpu(i) {
5215 		struct vmap_block_queue *vbq;
5216 		struct vfree_deferred *p;
5217 
5218 		vbq = &per_cpu(vmap_block_queue, i);
5219 		spin_lock_init(&vbq->lock);
5220 		INIT_LIST_HEAD(&vbq->free);
5221 		p = &per_cpu(vfree_deferred, i);
5222 		init_llist_head(&p->list);
5223 		INIT_WORK(&p->wq, delayed_vfree_work);
5224 		xa_init(&vbq->vmap_blocks);
5225 	}
5226 
5227 	/*
5228 	 * Setup nodes before importing vmlist.
5229 	 */
5230 	vmap_init_nodes();
5231 
5232 	/* Import existing vmlist entries. */
5233 	for (tmp = vmlist; tmp; tmp = tmp->next) {
5234 		va = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
5235 		if (WARN_ON_ONCE(!va))
5236 			continue;
5237 
5238 		va->va_start = (unsigned long)tmp->addr;
5239 		va->va_end = va->va_start + tmp->size;
5240 		va->vm = tmp;
5241 
5242 		vn = addr_to_node(va->va_start);
5243 		insert_vmap_area(va, &vn->busy.root, &vn->busy.head);
5244 	}
5245 
5246 	/*
5247 	 * Now we can initialize a free vmap space.
5248 	 */
5249 	vmap_init_free_space();
5250 	vmap_initialized = true;
5251 
5252 	vmap_node_shrinker = shrinker_alloc(0, "vmap-node");
5253 	if (!vmap_node_shrinker) {
5254 		pr_err("Failed to allocate vmap-node shrinker!\n");
5255 		return;
5256 	}
5257 
5258 	vmap_node_shrinker->count_objects = vmap_node_shrink_count;
5259 	vmap_node_shrinker->scan_objects = vmap_node_shrink_scan;
5260 	shrinker_register(vmap_node_shrinker);
5261 }
5262