xref: /linux/mm/vmalloc.c (revision 94e48d6aafef23143f92eadd010c505c49487576)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  Copyright (C) 1993  Linus Torvalds
4  *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
5  *  SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
6  *  Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
7  *  Numa awareness, Christoph Lameter, SGI, June 2005
8  *  Improving global KVA allocator, Uladzislau Rezki, Sony, May 2019
9  */
10 
11 #include <linux/vmalloc.h>
12 #include <linux/mm.h>
13 #include <linux/module.h>
14 #include <linux/highmem.h>
15 #include <linux/sched/signal.h>
16 #include <linux/slab.h>
17 #include <linux/spinlock.h>
18 #include <linux/interrupt.h>
19 #include <linux/proc_fs.h>
20 #include <linux/seq_file.h>
21 #include <linux/set_memory.h>
22 #include <linux/debugobjects.h>
23 #include <linux/kallsyms.h>
24 #include <linux/list.h>
25 #include <linux/notifier.h>
26 #include <linux/rbtree.h>
27 #include <linux/xarray.h>
28 #include <linux/rcupdate.h>
29 #include <linux/pfn.h>
30 #include <linux/kmemleak.h>
31 #include <linux/atomic.h>
32 #include <linux/compiler.h>
33 #include <linux/llist.h>
34 #include <linux/bitops.h>
35 #include <linux/rbtree_augmented.h>
36 #include <linux/overflow.h>
37 #include <linux/pgtable.h>
38 #include <linux/uaccess.h>
39 #include <asm/tlbflush.h>
40 #include <asm/shmparam.h>
41 
42 #include "internal.h"
43 #include "pgalloc-track.h"
44 
45 #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
46 static bool __ro_after_init vmap_allow_huge = true;
47 
48 static int __init set_nohugevmalloc(char *str)
49 {
50 	vmap_allow_huge = false;
51 	return 0;
52 }
53 early_param("nohugevmalloc", set_nohugevmalloc);
54 #else /* CONFIG_HAVE_ARCH_HUGE_VMALLOC */
55 static const bool vmap_allow_huge = false;
56 #endif	/* CONFIG_HAVE_ARCH_HUGE_VMALLOC */
57 
58 bool is_vmalloc_addr(const void *x)
59 {
60 	unsigned long addr = (unsigned long)x;
61 
62 	return addr >= VMALLOC_START && addr < VMALLOC_END;
63 }
64 EXPORT_SYMBOL(is_vmalloc_addr);
65 
66 struct vfree_deferred {
67 	struct llist_head list;
68 	struct work_struct wq;
69 };
70 static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
71 
72 static void __vunmap(const void *, int);
73 
74 static void free_work(struct work_struct *w)
75 {
76 	struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
77 	struct llist_node *t, *llnode;
78 
79 	llist_for_each_safe(llnode, t, llist_del_all(&p->list))
80 		__vunmap((void *)llnode, 1);
81 }
82 
83 /*** Page table manipulation functions ***/
84 static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
85 			phys_addr_t phys_addr, pgprot_t prot,
86 			pgtbl_mod_mask *mask)
87 {
88 	pte_t *pte;
89 	u64 pfn;
90 
91 	pfn = phys_addr >> PAGE_SHIFT;
92 	pte = pte_alloc_kernel_track(pmd, addr, mask);
93 	if (!pte)
94 		return -ENOMEM;
95 	do {
96 		BUG_ON(!pte_none(*pte));
97 		set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
98 		pfn++;
99 	} while (pte++, addr += PAGE_SIZE, addr != end);
100 	*mask |= PGTBL_PTE_MODIFIED;
101 	return 0;
102 }
103 
104 static int vmap_try_huge_pmd(pmd_t *pmd, unsigned long addr, unsigned long end,
105 			phys_addr_t phys_addr, pgprot_t prot,
106 			unsigned int max_page_shift)
107 {
108 	if (max_page_shift < PMD_SHIFT)
109 		return 0;
110 
111 	if (!arch_vmap_pmd_supported(prot))
112 		return 0;
113 
114 	if ((end - addr) != PMD_SIZE)
115 		return 0;
116 
117 	if (!IS_ALIGNED(addr, PMD_SIZE))
118 		return 0;
119 
120 	if (!IS_ALIGNED(phys_addr, PMD_SIZE))
121 		return 0;
122 
123 	if (pmd_present(*pmd) && !pmd_free_pte_page(pmd, addr))
124 		return 0;
125 
126 	return pmd_set_huge(pmd, phys_addr, prot);
127 }
128 
129 static int vmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
130 			phys_addr_t phys_addr, pgprot_t prot,
131 			unsigned int max_page_shift, pgtbl_mod_mask *mask)
132 {
133 	pmd_t *pmd;
134 	unsigned long next;
135 
136 	pmd = pmd_alloc_track(&init_mm, pud, addr, mask);
137 	if (!pmd)
138 		return -ENOMEM;
139 	do {
140 		next = pmd_addr_end(addr, end);
141 
142 		if (vmap_try_huge_pmd(pmd, addr, next, phys_addr, prot,
143 					max_page_shift)) {
144 			*mask |= PGTBL_PMD_MODIFIED;
145 			continue;
146 		}
147 
148 		if (vmap_pte_range(pmd, addr, next, phys_addr, prot, mask))
149 			return -ENOMEM;
150 	} while (pmd++, phys_addr += (next - addr), addr = next, addr != end);
151 	return 0;
152 }
153 
154 static int vmap_try_huge_pud(pud_t *pud, unsigned long addr, unsigned long end,
155 			phys_addr_t phys_addr, pgprot_t prot,
156 			unsigned int max_page_shift)
157 {
158 	if (max_page_shift < PUD_SHIFT)
159 		return 0;
160 
161 	if (!arch_vmap_pud_supported(prot))
162 		return 0;
163 
164 	if ((end - addr) != PUD_SIZE)
165 		return 0;
166 
167 	if (!IS_ALIGNED(addr, PUD_SIZE))
168 		return 0;
169 
170 	if (!IS_ALIGNED(phys_addr, PUD_SIZE))
171 		return 0;
172 
173 	if (pud_present(*pud) && !pud_free_pmd_page(pud, addr))
174 		return 0;
175 
176 	return pud_set_huge(pud, phys_addr, prot);
177 }
178 
179 static int vmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
180 			phys_addr_t phys_addr, pgprot_t prot,
181 			unsigned int max_page_shift, pgtbl_mod_mask *mask)
182 {
183 	pud_t *pud;
184 	unsigned long next;
185 
186 	pud = pud_alloc_track(&init_mm, p4d, addr, mask);
187 	if (!pud)
188 		return -ENOMEM;
189 	do {
190 		next = pud_addr_end(addr, end);
191 
192 		if (vmap_try_huge_pud(pud, addr, next, phys_addr, prot,
193 					max_page_shift)) {
194 			*mask |= PGTBL_PUD_MODIFIED;
195 			continue;
196 		}
197 
198 		if (vmap_pmd_range(pud, addr, next, phys_addr, prot,
199 					max_page_shift, mask))
200 			return -ENOMEM;
201 	} while (pud++, phys_addr += (next - addr), addr = next, addr != end);
202 	return 0;
203 }
204 
205 static int vmap_try_huge_p4d(p4d_t *p4d, unsigned long addr, unsigned long end,
206 			phys_addr_t phys_addr, pgprot_t prot,
207 			unsigned int max_page_shift)
208 {
209 	if (max_page_shift < P4D_SHIFT)
210 		return 0;
211 
212 	if (!arch_vmap_p4d_supported(prot))
213 		return 0;
214 
215 	if ((end - addr) != P4D_SIZE)
216 		return 0;
217 
218 	if (!IS_ALIGNED(addr, P4D_SIZE))
219 		return 0;
220 
221 	if (!IS_ALIGNED(phys_addr, P4D_SIZE))
222 		return 0;
223 
224 	if (p4d_present(*p4d) && !p4d_free_pud_page(p4d, addr))
225 		return 0;
226 
227 	return p4d_set_huge(p4d, phys_addr, prot);
228 }
229 
230 static int vmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
231 			phys_addr_t phys_addr, pgprot_t prot,
232 			unsigned int max_page_shift, pgtbl_mod_mask *mask)
233 {
234 	p4d_t *p4d;
235 	unsigned long next;
236 
237 	p4d = p4d_alloc_track(&init_mm, pgd, addr, mask);
238 	if (!p4d)
239 		return -ENOMEM;
240 	do {
241 		next = p4d_addr_end(addr, end);
242 
243 		if (vmap_try_huge_p4d(p4d, addr, next, phys_addr, prot,
244 					max_page_shift)) {
245 			*mask |= PGTBL_P4D_MODIFIED;
246 			continue;
247 		}
248 
249 		if (vmap_pud_range(p4d, addr, next, phys_addr, prot,
250 					max_page_shift, mask))
251 			return -ENOMEM;
252 	} while (p4d++, phys_addr += (next - addr), addr = next, addr != end);
253 	return 0;
254 }
255 
256 static int vmap_range_noflush(unsigned long addr, unsigned long end,
257 			phys_addr_t phys_addr, pgprot_t prot,
258 			unsigned int max_page_shift)
259 {
260 	pgd_t *pgd;
261 	unsigned long start;
262 	unsigned long next;
263 	int err;
264 	pgtbl_mod_mask mask = 0;
265 
266 	might_sleep();
267 	BUG_ON(addr >= end);
268 
269 	start = addr;
270 	pgd = pgd_offset_k(addr);
271 	do {
272 		next = pgd_addr_end(addr, end);
273 		err = vmap_p4d_range(pgd, addr, next, phys_addr, prot,
274 					max_page_shift, &mask);
275 		if (err)
276 			break;
277 	} while (pgd++, phys_addr += (next - addr), addr = next, addr != end);
278 
279 	if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
280 		arch_sync_kernel_mappings(start, end);
281 
282 	return err;
283 }
284 
285 int vmap_range(unsigned long addr, unsigned long end,
286 			phys_addr_t phys_addr, pgprot_t prot,
287 			unsigned int max_page_shift)
288 {
289 	int err;
290 
291 	err = vmap_range_noflush(addr, end, phys_addr, prot, max_page_shift);
292 	flush_cache_vmap(addr, end);
293 
294 	return err;
295 }
296 
297 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
298 			     pgtbl_mod_mask *mask)
299 {
300 	pte_t *pte;
301 
302 	pte = pte_offset_kernel(pmd, addr);
303 	do {
304 		pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
305 		WARN_ON(!pte_none(ptent) && !pte_present(ptent));
306 	} while (pte++, addr += PAGE_SIZE, addr != end);
307 	*mask |= PGTBL_PTE_MODIFIED;
308 }
309 
310 static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
311 			     pgtbl_mod_mask *mask)
312 {
313 	pmd_t *pmd;
314 	unsigned long next;
315 	int cleared;
316 
317 	pmd = pmd_offset(pud, addr);
318 	do {
319 		next = pmd_addr_end(addr, end);
320 
321 		cleared = pmd_clear_huge(pmd);
322 		if (cleared || pmd_bad(*pmd))
323 			*mask |= PGTBL_PMD_MODIFIED;
324 
325 		if (cleared)
326 			continue;
327 		if (pmd_none_or_clear_bad(pmd))
328 			continue;
329 		vunmap_pte_range(pmd, addr, next, mask);
330 
331 		cond_resched();
332 	} while (pmd++, addr = next, addr != end);
333 }
334 
335 static void vunmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
336 			     pgtbl_mod_mask *mask)
337 {
338 	pud_t *pud;
339 	unsigned long next;
340 	int cleared;
341 
342 	pud = pud_offset(p4d, addr);
343 	do {
344 		next = pud_addr_end(addr, end);
345 
346 		cleared = pud_clear_huge(pud);
347 		if (cleared || pud_bad(*pud))
348 			*mask |= PGTBL_PUD_MODIFIED;
349 
350 		if (cleared)
351 			continue;
352 		if (pud_none_or_clear_bad(pud))
353 			continue;
354 		vunmap_pmd_range(pud, addr, next, mask);
355 	} while (pud++, addr = next, addr != end);
356 }
357 
358 static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
359 			     pgtbl_mod_mask *mask)
360 {
361 	p4d_t *p4d;
362 	unsigned long next;
363 	int cleared;
364 
365 	p4d = p4d_offset(pgd, addr);
366 	do {
367 		next = p4d_addr_end(addr, end);
368 
369 		cleared = p4d_clear_huge(p4d);
370 		if (cleared || p4d_bad(*p4d))
371 			*mask |= PGTBL_P4D_MODIFIED;
372 
373 		if (cleared)
374 			continue;
375 		if (p4d_none_or_clear_bad(p4d))
376 			continue;
377 		vunmap_pud_range(p4d, addr, next, mask);
378 	} while (p4d++, addr = next, addr != end);
379 }
380 
381 /*
382  * vunmap_range_noflush is similar to vunmap_range, but does not
383  * flush caches or TLBs.
384  *
385  * The caller is responsible for calling flush_cache_vmap() before calling
386  * this function, and flush_tlb_kernel_range after it has returned
387  * successfully (and before the addresses are expected to cause a page fault
388  * or be re-mapped for something else, if TLB flushes are being delayed or
389  * coalesced).
390  *
391  * This is an internal function only. Do not use outside mm/.
392  */
393 void vunmap_range_noflush(unsigned long start, unsigned long end)
394 {
395 	unsigned long next;
396 	pgd_t *pgd;
397 	unsigned long addr = start;
398 	pgtbl_mod_mask mask = 0;
399 
400 	BUG_ON(addr >= end);
401 	pgd = pgd_offset_k(addr);
402 	do {
403 		next = pgd_addr_end(addr, end);
404 		if (pgd_bad(*pgd))
405 			mask |= PGTBL_PGD_MODIFIED;
406 		if (pgd_none_or_clear_bad(pgd))
407 			continue;
408 		vunmap_p4d_range(pgd, addr, next, &mask);
409 	} while (pgd++, addr = next, addr != end);
410 
411 	if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
412 		arch_sync_kernel_mappings(start, end);
413 }
414 
415 /**
416  * vunmap_range - unmap kernel virtual addresses
417  * @addr: start of the VM area to unmap
418  * @end: end of the VM area to unmap (non-inclusive)
419  *
420  * Clears any present PTEs in the virtual address range, flushes TLBs and
421  * caches. Any subsequent access to the address before it has been re-mapped
422  * is a kernel bug.
423  */
424 void vunmap_range(unsigned long addr, unsigned long end)
425 {
426 	flush_cache_vunmap(addr, end);
427 	vunmap_range_noflush(addr, end);
428 	flush_tlb_kernel_range(addr, end);
429 }
430 
431 static int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr,
432 		unsigned long end, pgprot_t prot, struct page **pages, int *nr,
433 		pgtbl_mod_mask *mask)
434 {
435 	pte_t *pte;
436 
437 	/*
438 	 * nr is a running index into the array which helps higher level
439 	 * callers keep track of where we're up to.
440 	 */
441 
442 	pte = pte_alloc_kernel_track(pmd, addr, mask);
443 	if (!pte)
444 		return -ENOMEM;
445 	do {
446 		struct page *page = pages[*nr];
447 
448 		if (WARN_ON(!pte_none(*pte)))
449 			return -EBUSY;
450 		if (WARN_ON(!page))
451 			return -ENOMEM;
452 		set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
453 		(*nr)++;
454 	} while (pte++, addr += PAGE_SIZE, addr != end);
455 	*mask |= PGTBL_PTE_MODIFIED;
456 	return 0;
457 }
458 
459 static int vmap_pages_pmd_range(pud_t *pud, unsigned long addr,
460 		unsigned long end, pgprot_t prot, struct page **pages, int *nr,
461 		pgtbl_mod_mask *mask)
462 {
463 	pmd_t *pmd;
464 	unsigned long next;
465 
466 	pmd = pmd_alloc_track(&init_mm, pud, addr, mask);
467 	if (!pmd)
468 		return -ENOMEM;
469 	do {
470 		next = pmd_addr_end(addr, end);
471 		if (vmap_pages_pte_range(pmd, addr, next, prot, pages, nr, mask))
472 			return -ENOMEM;
473 	} while (pmd++, addr = next, addr != end);
474 	return 0;
475 }
476 
477 static int vmap_pages_pud_range(p4d_t *p4d, unsigned long addr,
478 		unsigned long end, pgprot_t prot, struct page **pages, int *nr,
479 		pgtbl_mod_mask *mask)
480 {
481 	pud_t *pud;
482 	unsigned long next;
483 
484 	pud = pud_alloc_track(&init_mm, p4d, addr, mask);
485 	if (!pud)
486 		return -ENOMEM;
487 	do {
488 		next = pud_addr_end(addr, end);
489 		if (vmap_pages_pmd_range(pud, addr, next, prot, pages, nr, mask))
490 			return -ENOMEM;
491 	} while (pud++, addr = next, addr != end);
492 	return 0;
493 }
494 
495 static int vmap_pages_p4d_range(pgd_t *pgd, unsigned long addr,
496 		unsigned long end, pgprot_t prot, struct page **pages, int *nr,
497 		pgtbl_mod_mask *mask)
498 {
499 	p4d_t *p4d;
500 	unsigned long next;
501 
502 	p4d = p4d_alloc_track(&init_mm, pgd, addr, mask);
503 	if (!p4d)
504 		return -ENOMEM;
505 	do {
506 		next = p4d_addr_end(addr, end);
507 		if (vmap_pages_pud_range(p4d, addr, next, prot, pages, nr, mask))
508 			return -ENOMEM;
509 	} while (p4d++, addr = next, addr != end);
510 	return 0;
511 }
512 
513 static int vmap_small_pages_range_noflush(unsigned long addr, unsigned long end,
514 		pgprot_t prot, struct page **pages)
515 {
516 	unsigned long start = addr;
517 	pgd_t *pgd;
518 	unsigned long next;
519 	int err = 0;
520 	int nr = 0;
521 	pgtbl_mod_mask mask = 0;
522 
523 	BUG_ON(addr >= end);
524 	pgd = pgd_offset_k(addr);
525 	do {
526 		next = pgd_addr_end(addr, end);
527 		if (pgd_bad(*pgd))
528 			mask |= PGTBL_PGD_MODIFIED;
529 		err = vmap_pages_p4d_range(pgd, addr, next, prot, pages, &nr, &mask);
530 		if (err)
531 			return err;
532 	} while (pgd++, addr = next, addr != end);
533 
534 	if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
535 		arch_sync_kernel_mappings(start, end);
536 
537 	return 0;
538 }
539 
540 /*
541  * vmap_pages_range_noflush is similar to vmap_pages_range, but does not
542  * flush caches.
543  *
544  * The caller is responsible for calling flush_cache_vmap() after this
545  * function returns successfully and before the addresses are accessed.
546  *
547  * This is an internal function only. Do not use outside mm/.
548  */
549 int vmap_pages_range_noflush(unsigned long addr, unsigned long end,
550 		pgprot_t prot, struct page **pages, unsigned int page_shift)
551 {
552 	unsigned int i, nr = (end - addr) >> PAGE_SHIFT;
553 
554 	WARN_ON(page_shift < PAGE_SHIFT);
555 
556 	if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMALLOC) ||
557 			page_shift == PAGE_SHIFT)
558 		return vmap_small_pages_range_noflush(addr, end, prot, pages);
559 
560 	for (i = 0; i < nr; i += 1U << (page_shift - PAGE_SHIFT)) {
561 		int err;
562 
563 		err = vmap_range_noflush(addr, addr + (1UL << page_shift),
564 					__pa(page_address(pages[i])), prot,
565 					page_shift);
566 		if (err)
567 			return err;
568 
569 		addr += 1UL << page_shift;
570 	}
571 
572 	return 0;
573 }
574 
575 /**
576  * vmap_pages_range - map pages to a kernel virtual address
577  * @addr: start of the VM area to map
578  * @end: end of the VM area to map (non-inclusive)
579  * @prot: page protection flags to use
580  * @pages: pages to map (always PAGE_SIZE pages)
581  * @page_shift: maximum shift that the pages may be mapped with, @pages must
582  * be aligned and contiguous up to at least this shift.
583  *
584  * RETURNS:
585  * 0 on success, -errno on failure.
586  */
587 static int vmap_pages_range(unsigned long addr, unsigned long end,
588 		pgprot_t prot, struct page **pages, unsigned int page_shift)
589 {
590 	int err;
591 
592 	err = vmap_pages_range_noflush(addr, end, prot, pages, page_shift);
593 	flush_cache_vmap(addr, end);
594 	return err;
595 }
596 
597 int is_vmalloc_or_module_addr(const void *x)
598 {
599 	/*
600 	 * ARM, x86-64 and sparc64 put modules in a special place,
601 	 * and fall back on vmalloc() if that fails. Others
602 	 * just put it in the vmalloc space.
603 	 */
604 #if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
605 	unsigned long addr = (unsigned long)x;
606 	if (addr >= MODULES_VADDR && addr < MODULES_END)
607 		return 1;
608 #endif
609 	return is_vmalloc_addr(x);
610 }
611 
612 /*
613  * Walk a vmap address to the struct page it maps. Huge vmap mappings will
614  * return the tail page that corresponds to the base page address, which
615  * matches small vmap mappings.
616  */
617 struct page *vmalloc_to_page(const void *vmalloc_addr)
618 {
619 	unsigned long addr = (unsigned long) vmalloc_addr;
620 	struct page *page = NULL;
621 	pgd_t *pgd = pgd_offset_k(addr);
622 	p4d_t *p4d;
623 	pud_t *pud;
624 	pmd_t *pmd;
625 	pte_t *ptep, pte;
626 
627 	/*
628 	 * XXX we might need to change this if we add VIRTUAL_BUG_ON for
629 	 * architectures that do not vmalloc module space
630 	 */
631 	VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr));
632 
633 	if (pgd_none(*pgd))
634 		return NULL;
635 	if (WARN_ON_ONCE(pgd_leaf(*pgd)))
636 		return NULL; /* XXX: no allowance for huge pgd */
637 	if (WARN_ON_ONCE(pgd_bad(*pgd)))
638 		return NULL;
639 
640 	p4d = p4d_offset(pgd, addr);
641 	if (p4d_none(*p4d))
642 		return NULL;
643 	if (p4d_leaf(*p4d))
644 		return p4d_page(*p4d) + ((addr & ~P4D_MASK) >> PAGE_SHIFT);
645 	if (WARN_ON_ONCE(p4d_bad(*p4d)))
646 		return NULL;
647 
648 	pud = pud_offset(p4d, addr);
649 	if (pud_none(*pud))
650 		return NULL;
651 	if (pud_leaf(*pud))
652 		return pud_page(*pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
653 	if (WARN_ON_ONCE(pud_bad(*pud)))
654 		return NULL;
655 
656 	pmd = pmd_offset(pud, addr);
657 	if (pmd_none(*pmd))
658 		return NULL;
659 	if (pmd_leaf(*pmd))
660 		return pmd_page(*pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
661 	if (WARN_ON_ONCE(pmd_bad(*pmd)))
662 		return NULL;
663 
664 	ptep = pte_offset_map(pmd, addr);
665 	pte = *ptep;
666 	if (pte_present(pte))
667 		page = pte_page(pte);
668 	pte_unmap(ptep);
669 
670 	return page;
671 }
672 EXPORT_SYMBOL(vmalloc_to_page);
673 
674 /*
675  * Map a vmalloc()-space virtual address to the physical page frame number.
676  */
677 unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
678 {
679 	return page_to_pfn(vmalloc_to_page(vmalloc_addr));
680 }
681 EXPORT_SYMBOL(vmalloc_to_pfn);
682 
683 
684 /*** Global kva allocator ***/
685 
686 #define DEBUG_AUGMENT_PROPAGATE_CHECK 0
687 #define DEBUG_AUGMENT_LOWEST_MATCH_CHECK 0
688 
689 
690 static DEFINE_SPINLOCK(vmap_area_lock);
691 static DEFINE_SPINLOCK(free_vmap_area_lock);
692 /* Export for kexec only */
693 LIST_HEAD(vmap_area_list);
694 static struct rb_root vmap_area_root = RB_ROOT;
695 static bool vmap_initialized __read_mostly;
696 
697 static struct rb_root purge_vmap_area_root = RB_ROOT;
698 static LIST_HEAD(purge_vmap_area_list);
699 static DEFINE_SPINLOCK(purge_vmap_area_lock);
700 
701 /*
702  * This kmem_cache is used for vmap_area objects. Instead of
703  * allocating from slab we reuse an object from this cache to
704  * make things faster. Especially in "no edge" splitting of
705  * free block.
706  */
707 static struct kmem_cache *vmap_area_cachep;
708 
709 /*
710  * This linked list is used in pair with free_vmap_area_root.
711  * It gives O(1) access to prev/next to perform fast coalescing.
712  */
713 static LIST_HEAD(free_vmap_area_list);
714 
715 /*
716  * This augment red-black tree represents the free vmap space.
717  * All vmap_area objects in this tree are sorted by va->va_start
718  * address. It is used for allocation and merging when a vmap
719  * object is released.
720  *
721  * Each vmap_area node contains a maximum available free block
722  * of its sub-tree, right or left. Therefore it is possible to
723  * find a lowest match of free area.
724  */
725 static struct rb_root free_vmap_area_root = RB_ROOT;
726 
727 /*
728  * Preload a CPU with one object for "no edge" split case. The
729  * aim is to get rid of allocations from the atomic context, thus
730  * to use more permissive allocation masks.
731  */
732 static DEFINE_PER_CPU(struct vmap_area *, ne_fit_preload_node);
733 
734 static __always_inline unsigned long
735 va_size(struct vmap_area *va)
736 {
737 	return (va->va_end - va->va_start);
738 }
739 
740 static __always_inline unsigned long
741 get_subtree_max_size(struct rb_node *node)
742 {
743 	struct vmap_area *va;
744 
745 	va = rb_entry_safe(node, struct vmap_area, rb_node);
746 	return va ? va->subtree_max_size : 0;
747 }
748 
749 /*
750  * Gets called when remove the node and rotate.
751  */
752 static __always_inline unsigned long
753 compute_subtree_max_size(struct vmap_area *va)
754 {
755 	return max3(va_size(va),
756 		get_subtree_max_size(va->rb_node.rb_left),
757 		get_subtree_max_size(va->rb_node.rb_right));
758 }
759 
760 RB_DECLARE_CALLBACKS_MAX(static, free_vmap_area_rb_augment_cb,
761 	struct vmap_area, rb_node, unsigned long, subtree_max_size, va_size)
762 
763 static void purge_vmap_area_lazy(void);
764 static BLOCKING_NOTIFIER_HEAD(vmap_notify_list);
765 static unsigned long lazy_max_pages(void);
766 
767 static atomic_long_t nr_vmalloc_pages;
768 
769 unsigned long vmalloc_nr_pages(void)
770 {
771 	return atomic_long_read(&nr_vmalloc_pages);
772 }
773 
774 static struct vmap_area *__find_vmap_area(unsigned long addr)
775 {
776 	struct rb_node *n = vmap_area_root.rb_node;
777 
778 	while (n) {
779 		struct vmap_area *va;
780 
781 		va = rb_entry(n, struct vmap_area, rb_node);
782 		if (addr < va->va_start)
783 			n = n->rb_left;
784 		else if (addr >= va->va_end)
785 			n = n->rb_right;
786 		else
787 			return va;
788 	}
789 
790 	return NULL;
791 }
792 
793 /*
794  * This function returns back addresses of parent node
795  * and its left or right link for further processing.
796  *
797  * Otherwise NULL is returned. In that case all further
798  * steps regarding inserting of conflicting overlap range
799  * have to be declined and actually considered as a bug.
800  */
801 static __always_inline struct rb_node **
802 find_va_links(struct vmap_area *va,
803 	struct rb_root *root, struct rb_node *from,
804 	struct rb_node **parent)
805 {
806 	struct vmap_area *tmp_va;
807 	struct rb_node **link;
808 
809 	if (root) {
810 		link = &root->rb_node;
811 		if (unlikely(!*link)) {
812 			*parent = NULL;
813 			return link;
814 		}
815 	} else {
816 		link = &from;
817 	}
818 
819 	/*
820 	 * Go to the bottom of the tree. When we hit the last point
821 	 * we end up with parent rb_node and correct direction, i name
822 	 * it link, where the new va->rb_node will be attached to.
823 	 */
824 	do {
825 		tmp_va = rb_entry(*link, struct vmap_area, rb_node);
826 
827 		/*
828 		 * During the traversal we also do some sanity check.
829 		 * Trigger the BUG() if there are sides(left/right)
830 		 * or full overlaps.
831 		 */
832 		if (va->va_start < tmp_va->va_end &&
833 				va->va_end <= tmp_va->va_start)
834 			link = &(*link)->rb_left;
835 		else if (va->va_end > tmp_va->va_start &&
836 				va->va_start >= tmp_va->va_end)
837 			link = &(*link)->rb_right;
838 		else {
839 			WARN(1, "vmalloc bug: 0x%lx-0x%lx overlaps with 0x%lx-0x%lx\n",
840 				va->va_start, va->va_end, tmp_va->va_start, tmp_va->va_end);
841 
842 			return NULL;
843 		}
844 	} while (*link);
845 
846 	*parent = &tmp_va->rb_node;
847 	return link;
848 }
849 
850 static __always_inline struct list_head *
851 get_va_next_sibling(struct rb_node *parent, struct rb_node **link)
852 {
853 	struct list_head *list;
854 
855 	if (unlikely(!parent))
856 		/*
857 		 * The red-black tree where we try to find VA neighbors
858 		 * before merging or inserting is empty, i.e. it means
859 		 * there is no free vmap space. Normally it does not
860 		 * happen but we handle this case anyway.
861 		 */
862 		return NULL;
863 
864 	list = &rb_entry(parent, struct vmap_area, rb_node)->list;
865 	return (&parent->rb_right == link ? list->next : list);
866 }
867 
868 static __always_inline void
869 link_va(struct vmap_area *va, struct rb_root *root,
870 	struct rb_node *parent, struct rb_node **link, struct list_head *head)
871 {
872 	/*
873 	 * VA is still not in the list, but we can
874 	 * identify its future previous list_head node.
875 	 */
876 	if (likely(parent)) {
877 		head = &rb_entry(parent, struct vmap_area, rb_node)->list;
878 		if (&parent->rb_right != link)
879 			head = head->prev;
880 	}
881 
882 	/* Insert to the rb-tree */
883 	rb_link_node(&va->rb_node, parent, link);
884 	if (root == &free_vmap_area_root) {
885 		/*
886 		 * Some explanation here. Just perform simple insertion
887 		 * to the tree. We do not set va->subtree_max_size to
888 		 * its current size before calling rb_insert_augmented().
889 		 * It is because of we populate the tree from the bottom
890 		 * to parent levels when the node _is_ in the tree.
891 		 *
892 		 * Therefore we set subtree_max_size to zero after insertion,
893 		 * to let __augment_tree_propagate_from() puts everything to
894 		 * the correct order later on.
895 		 */
896 		rb_insert_augmented(&va->rb_node,
897 			root, &free_vmap_area_rb_augment_cb);
898 		va->subtree_max_size = 0;
899 	} else {
900 		rb_insert_color(&va->rb_node, root);
901 	}
902 
903 	/* Address-sort this list */
904 	list_add(&va->list, head);
905 }
906 
907 static __always_inline void
908 unlink_va(struct vmap_area *va, struct rb_root *root)
909 {
910 	if (WARN_ON(RB_EMPTY_NODE(&va->rb_node)))
911 		return;
912 
913 	if (root == &free_vmap_area_root)
914 		rb_erase_augmented(&va->rb_node,
915 			root, &free_vmap_area_rb_augment_cb);
916 	else
917 		rb_erase(&va->rb_node, root);
918 
919 	list_del(&va->list);
920 	RB_CLEAR_NODE(&va->rb_node);
921 }
922 
923 #if DEBUG_AUGMENT_PROPAGATE_CHECK
924 static void
925 augment_tree_propagate_check(void)
926 {
927 	struct vmap_area *va;
928 	unsigned long computed_size;
929 
930 	list_for_each_entry(va, &free_vmap_area_list, list) {
931 		computed_size = compute_subtree_max_size(va);
932 		if (computed_size != va->subtree_max_size)
933 			pr_emerg("tree is corrupted: %lu, %lu\n",
934 				va_size(va), va->subtree_max_size);
935 	}
936 }
937 #endif
938 
939 /*
940  * This function populates subtree_max_size from bottom to upper
941  * levels starting from VA point. The propagation must be done
942  * when VA size is modified by changing its va_start/va_end. Or
943  * in case of newly inserting of VA to the tree.
944  *
945  * It means that __augment_tree_propagate_from() must be called:
946  * - After VA has been inserted to the tree(free path);
947  * - After VA has been shrunk(allocation path);
948  * - After VA has been increased(merging path).
949  *
950  * Please note that, it does not mean that upper parent nodes
951  * and their subtree_max_size are recalculated all the time up
952  * to the root node.
953  *
954  *       4--8
955  *        /\
956  *       /  \
957  *      /    \
958  *    2--2  8--8
959  *
960  * For example if we modify the node 4, shrinking it to 2, then
961  * no any modification is required. If we shrink the node 2 to 1
962  * its subtree_max_size is updated only, and set to 1. If we shrink
963  * the node 8 to 6, then its subtree_max_size is set to 6 and parent
964  * node becomes 4--6.
965  */
966 static __always_inline void
967 augment_tree_propagate_from(struct vmap_area *va)
968 {
969 	/*
970 	 * Populate the tree from bottom towards the root until
971 	 * the calculated maximum available size of checked node
972 	 * is equal to its current one.
973 	 */
974 	free_vmap_area_rb_augment_cb_propagate(&va->rb_node, NULL);
975 
976 #if DEBUG_AUGMENT_PROPAGATE_CHECK
977 	augment_tree_propagate_check();
978 #endif
979 }
980 
981 static void
982 insert_vmap_area(struct vmap_area *va,
983 	struct rb_root *root, struct list_head *head)
984 {
985 	struct rb_node **link;
986 	struct rb_node *parent;
987 
988 	link = find_va_links(va, root, NULL, &parent);
989 	if (link)
990 		link_va(va, root, parent, link, head);
991 }
992 
993 static void
994 insert_vmap_area_augment(struct vmap_area *va,
995 	struct rb_node *from, struct rb_root *root,
996 	struct list_head *head)
997 {
998 	struct rb_node **link;
999 	struct rb_node *parent;
1000 
1001 	if (from)
1002 		link = find_va_links(va, NULL, from, &parent);
1003 	else
1004 		link = find_va_links(va, root, NULL, &parent);
1005 
1006 	if (link) {
1007 		link_va(va, root, parent, link, head);
1008 		augment_tree_propagate_from(va);
1009 	}
1010 }
1011 
1012 /*
1013  * Merge de-allocated chunk of VA memory with previous
1014  * and next free blocks. If coalesce is not done a new
1015  * free area is inserted. If VA has been merged, it is
1016  * freed.
1017  *
1018  * Please note, it can return NULL in case of overlap
1019  * ranges, followed by WARN() report. Despite it is a
1020  * buggy behaviour, a system can be alive and keep
1021  * ongoing.
1022  */
1023 static __always_inline struct vmap_area *
1024 merge_or_add_vmap_area(struct vmap_area *va,
1025 	struct rb_root *root, struct list_head *head)
1026 {
1027 	struct vmap_area *sibling;
1028 	struct list_head *next;
1029 	struct rb_node **link;
1030 	struct rb_node *parent;
1031 	bool merged = false;
1032 
1033 	/*
1034 	 * Find a place in the tree where VA potentially will be
1035 	 * inserted, unless it is merged with its sibling/siblings.
1036 	 */
1037 	link = find_va_links(va, root, NULL, &parent);
1038 	if (!link)
1039 		return NULL;
1040 
1041 	/*
1042 	 * Get next node of VA to check if merging can be done.
1043 	 */
1044 	next = get_va_next_sibling(parent, link);
1045 	if (unlikely(next == NULL))
1046 		goto insert;
1047 
1048 	/*
1049 	 * start            end
1050 	 * |                |
1051 	 * |<------VA------>|<-----Next----->|
1052 	 *                  |                |
1053 	 *                  start            end
1054 	 */
1055 	if (next != head) {
1056 		sibling = list_entry(next, struct vmap_area, list);
1057 		if (sibling->va_start == va->va_end) {
1058 			sibling->va_start = va->va_start;
1059 
1060 			/* Free vmap_area object. */
1061 			kmem_cache_free(vmap_area_cachep, va);
1062 
1063 			/* Point to the new merged area. */
1064 			va = sibling;
1065 			merged = true;
1066 		}
1067 	}
1068 
1069 	/*
1070 	 * start            end
1071 	 * |                |
1072 	 * |<-----Prev----->|<------VA------>|
1073 	 *                  |                |
1074 	 *                  start            end
1075 	 */
1076 	if (next->prev != head) {
1077 		sibling = list_entry(next->prev, struct vmap_area, list);
1078 		if (sibling->va_end == va->va_start) {
1079 			/*
1080 			 * If both neighbors are coalesced, it is important
1081 			 * to unlink the "next" node first, followed by merging
1082 			 * with "previous" one. Otherwise the tree might not be
1083 			 * fully populated if a sibling's augmented value is
1084 			 * "normalized" because of rotation operations.
1085 			 */
1086 			if (merged)
1087 				unlink_va(va, root);
1088 
1089 			sibling->va_end = va->va_end;
1090 
1091 			/* Free vmap_area object. */
1092 			kmem_cache_free(vmap_area_cachep, va);
1093 
1094 			/* Point to the new merged area. */
1095 			va = sibling;
1096 			merged = true;
1097 		}
1098 	}
1099 
1100 insert:
1101 	if (!merged)
1102 		link_va(va, root, parent, link, head);
1103 
1104 	return va;
1105 }
1106 
1107 static __always_inline struct vmap_area *
1108 merge_or_add_vmap_area_augment(struct vmap_area *va,
1109 	struct rb_root *root, struct list_head *head)
1110 {
1111 	va = merge_or_add_vmap_area(va, root, head);
1112 	if (va)
1113 		augment_tree_propagate_from(va);
1114 
1115 	return va;
1116 }
1117 
1118 static __always_inline bool
1119 is_within_this_va(struct vmap_area *va, unsigned long size,
1120 	unsigned long align, unsigned long vstart)
1121 {
1122 	unsigned long nva_start_addr;
1123 
1124 	if (va->va_start > vstart)
1125 		nva_start_addr = ALIGN(va->va_start, align);
1126 	else
1127 		nva_start_addr = ALIGN(vstart, align);
1128 
1129 	/* Can be overflowed due to big size or alignment. */
1130 	if (nva_start_addr + size < nva_start_addr ||
1131 			nva_start_addr < vstart)
1132 		return false;
1133 
1134 	return (nva_start_addr + size <= va->va_end);
1135 }
1136 
1137 /*
1138  * Find the first free block(lowest start address) in the tree,
1139  * that will accomplish the request corresponding to passing
1140  * parameters.
1141  */
1142 static __always_inline struct vmap_area *
1143 find_vmap_lowest_match(unsigned long size,
1144 	unsigned long align, unsigned long vstart)
1145 {
1146 	struct vmap_area *va;
1147 	struct rb_node *node;
1148 	unsigned long length;
1149 
1150 	/* Start from the root. */
1151 	node = free_vmap_area_root.rb_node;
1152 
1153 	/* Adjust the search size for alignment overhead. */
1154 	length = size + align - 1;
1155 
1156 	while (node) {
1157 		va = rb_entry(node, struct vmap_area, rb_node);
1158 
1159 		if (get_subtree_max_size(node->rb_left) >= length &&
1160 				vstart < va->va_start) {
1161 			node = node->rb_left;
1162 		} else {
1163 			if (is_within_this_va(va, size, align, vstart))
1164 				return va;
1165 
1166 			/*
1167 			 * Does not make sense to go deeper towards the right
1168 			 * sub-tree if it does not have a free block that is
1169 			 * equal or bigger to the requested search length.
1170 			 */
1171 			if (get_subtree_max_size(node->rb_right) >= length) {
1172 				node = node->rb_right;
1173 				continue;
1174 			}
1175 
1176 			/*
1177 			 * OK. We roll back and find the first right sub-tree,
1178 			 * that will satisfy the search criteria. It can happen
1179 			 * only once due to "vstart" restriction.
1180 			 */
1181 			while ((node = rb_parent(node))) {
1182 				va = rb_entry(node, struct vmap_area, rb_node);
1183 				if (is_within_this_va(va, size, align, vstart))
1184 					return va;
1185 
1186 				if (get_subtree_max_size(node->rb_right) >= length &&
1187 						vstart <= va->va_start) {
1188 					node = node->rb_right;
1189 					break;
1190 				}
1191 			}
1192 		}
1193 	}
1194 
1195 	return NULL;
1196 }
1197 
1198 #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
1199 #include <linux/random.h>
1200 
1201 static struct vmap_area *
1202 find_vmap_lowest_linear_match(unsigned long size,
1203 	unsigned long align, unsigned long vstart)
1204 {
1205 	struct vmap_area *va;
1206 
1207 	list_for_each_entry(va, &free_vmap_area_list, list) {
1208 		if (!is_within_this_va(va, size, align, vstart))
1209 			continue;
1210 
1211 		return va;
1212 	}
1213 
1214 	return NULL;
1215 }
1216 
1217 static void
1218 find_vmap_lowest_match_check(unsigned long size)
1219 {
1220 	struct vmap_area *va_1, *va_2;
1221 	unsigned long vstart;
1222 	unsigned int rnd;
1223 
1224 	get_random_bytes(&rnd, sizeof(rnd));
1225 	vstart = VMALLOC_START + rnd;
1226 
1227 	va_1 = find_vmap_lowest_match(size, 1, vstart);
1228 	va_2 = find_vmap_lowest_linear_match(size, 1, vstart);
1229 
1230 	if (va_1 != va_2)
1231 		pr_emerg("not lowest: t: 0x%p, l: 0x%p, v: 0x%lx\n",
1232 			va_1, va_2, vstart);
1233 }
1234 #endif
1235 
1236 enum fit_type {
1237 	NOTHING_FIT = 0,
1238 	FL_FIT_TYPE = 1,	/* full fit */
1239 	LE_FIT_TYPE = 2,	/* left edge fit */
1240 	RE_FIT_TYPE = 3,	/* right edge fit */
1241 	NE_FIT_TYPE = 4		/* no edge fit */
1242 };
1243 
1244 static __always_inline enum fit_type
1245 classify_va_fit_type(struct vmap_area *va,
1246 	unsigned long nva_start_addr, unsigned long size)
1247 {
1248 	enum fit_type type;
1249 
1250 	/* Check if it is within VA. */
1251 	if (nva_start_addr < va->va_start ||
1252 			nva_start_addr + size > va->va_end)
1253 		return NOTHING_FIT;
1254 
1255 	/* Now classify. */
1256 	if (va->va_start == nva_start_addr) {
1257 		if (va->va_end == nva_start_addr + size)
1258 			type = FL_FIT_TYPE;
1259 		else
1260 			type = LE_FIT_TYPE;
1261 	} else if (va->va_end == nva_start_addr + size) {
1262 		type = RE_FIT_TYPE;
1263 	} else {
1264 		type = NE_FIT_TYPE;
1265 	}
1266 
1267 	return type;
1268 }
1269 
1270 static __always_inline int
1271 adjust_va_to_fit_type(struct vmap_area *va,
1272 	unsigned long nva_start_addr, unsigned long size,
1273 	enum fit_type type)
1274 {
1275 	struct vmap_area *lva = NULL;
1276 
1277 	if (type == FL_FIT_TYPE) {
1278 		/*
1279 		 * No need to split VA, it fully fits.
1280 		 *
1281 		 * |               |
1282 		 * V      NVA      V
1283 		 * |---------------|
1284 		 */
1285 		unlink_va(va, &free_vmap_area_root);
1286 		kmem_cache_free(vmap_area_cachep, va);
1287 	} else if (type == LE_FIT_TYPE) {
1288 		/*
1289 		 * Split left edge of fit VA.
1290 		 *
1291 		 * |       |
1292 		 * V  NVA  V   R
1293 		 * |-------|-------|
1294 		 */
1295 		va->va_start += size;
1296 	} else if (type == RE_FIT_TYPE) {
1297 		/*
1298 		 * Split right edge of fit VA.
1299 		 *
1300 		 *         |       |
1301 		 *     L   V  NVA  V
1302 		 * |-------|-------|
1303 		 */
1304 		va->va_end = nva_start_addr;
1305 	} else if (type == NE_FIT_TYPE) {
1306 		/*
1307 		 * Split no edge of fit VA.
1308 		 *
1309 		 *     |       |
1310 		 *   L V  NVA  V R
1311 		 * |---|-------|---|
1312 		 */
1313 		lva = __this_cpu_xchg(ne_fit_preload_node, NULL);
1314 		if (unlikely(!lva)) {
1315 			/*
1316 			 * For percpu allocator we do not do any pre-allocation
1317 			 * and leave it as it is. The reason is it most likely
1318 			 * never ends up with NE_FIT_TYPE splitting. In case of
1319 			 * percpu allocations offsets and sizes are aligned to
1320 			 * fixed align request, i.e. RE_FIT_TYPE and FL_FIT_TYPE
1321 			 * are its main fitting cases.
1322 			 *
1323 			 * There are a few exceptions though, as an example it is
1324 			 * a first allocation (early boot up) when we have "one"
1325 			 * big free space that has to be split.
1326 			 *
1327 			 * Also we can hit this path in case of regular "vmap"
1328 			 * allocations, if "this" current CPU was not preloaded.
1329 			 * See the comment in alloc_vmap_area() why. If so, then
1330 			 * GFP_NOWAIT is used instead to get an extra object for
1331 			 * split purpose. That is rare and most time does not
1332 			 * occur.
1333 			 *
1334 			 * What happens if an allocation gets failed. Basically,
1335 			 * an "overflow" path is triggered to purge lazily freed
1336 			 * areas to free some memory, then, the "retry" path is
1337 			 * triggered to repeat one more time. See more details
1338 			 * in alloc_vmap_area() function.
1339 			 */
1340 			lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT);
1341 			if (!lva)
1342 				return -1;
1343 		}
1344 
1345 		/*
1346 		 * Build the remainder.
1347 		 */
1348 		lva->va_start = va->va_start;
1349 		lva->va_end = nva_start_addr;
1350 
1351 		/*
1352 		 * Shrink this VA to remaining size.
1353 		 */
1354 		va->va_start = nva_start_addr + size;
1355 	} else {
1356 		return -1;
1357 	}
1358 
1359 	if (type != FL_FIT_TYPE) {
1360 		augment_tree_propagate_from(va);
1361 
1362 		if (lva)	/* type == NE_FIT_TYPE */
1363 			insert_vmap_area_augment(lva, &va->rb_node,
1364 				&free_vmap_area_root, &free_vmap_area_list);
1365 	}
1366 
1367 	return 0;
1368 }
1369 
1370 /*
1371  * Returns a start address of the newly allocated area, if success.
1372  * Otherwise a vend is returned that indicates failure.
1373  */
1374 static __always_inline unsigned long
1375 __alloc_vmap_area(unsigned long size, unsigned long align,
1376 	unsigned long vstart, unsigned long vend)
1377 {
1378 	unsigned long nva_start_addr;
1379 	struct vmap_area *va;
1380 	enum fit_type type;
1381 	int ret;
1382 
1383 	va = find_vmap_lowest_match(size, align, vstart);
1384 	if (unlikely(!va))
1385 		return vend;
1386 
1387 	if (va->va_start > vstart)
1388 		nva_start_addr = ALIGN(va->va_start, align);
1389 	else
1390 		nva_start_addr = ALIGN(vstart, align);
1391 
1392 	/* Check the "vend" restriction. */
1393 	if (nva_start_addr + size > vend)
1394 		return vend;
1395 
1396 	/* Classify what we have found. */
1397 	type = classify_va_fit_type(va, nva_start_addr, size);
1398 	if (WARN_ON_ONCE(type == NOTHING_FIT))
1399 		return vend;
1400 
1401 	/* Update the free vmap_area. */
1402 	ret = adjust_va_to_fit_type(va, nva_start_addr, size, type);
1403 	if (ret)
1404 		return vend;
1405 
1406 #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
1407 	find_vmap_lowest_match_check(size);
1408 #endif
1409 
1410 	return nva_start_addr;
1411 }
1412 
1413 /*
1414  * Free a region of KVA allocated by alloc_vmap_area
1415  */
1416 static void free_vmap_area(struct vmap_area *va)
1417 {
1418 	/*
1419 	 * Remove from the busy tree/list.
1420 	 */
1421 	spin_lock(&vmap_area_lock);
1422 	unlink_va(va, &vmap_area_root);
1423 	spin_unlock(&vmap_area_lock);
1424 
1425 	/*
1426 	 * Insert/Merge it back to the free tree/list.
1427 	 */
1428 	spin_lock(&free_vmap_area_lock);
1429 	merge_or_add_vmap_area_augment(va, &free_vmap_area_root, &free_vmap_area_list);
1430 	spin_unlock(&free_vmap_area_lock);
1431 }
1432 
1433 static inline void
1434 preload_this_cpu_lock(spinlock_t *lock, gfp_t gfp_mask, int node)
1435 {
1436 	struct vmap_area *va = NULL;
1437 
1438 	/*
1439 	 * Preload this CPU with one extra vmap_area object. It is used
1440 	 * when fit type of free area is NE_FIT_TYPE. It guarantees that
1441 	 * a CPU that does an allocation is preloaded.
1442 	 *
1443 	 * We do it in non-atomic context, thus it allows us to use more
1444 	 * permissive allocation masks to be more stable under low memory
1445 	 * condition and high memory pressure.
1446 	 */
1447 	if (!this_cpu_read(ne_fit_preload_node))
1448 		va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
1449 
1450 	spin_lock(lock);
1451 
1452 	if (va && __this_cpu_cmpxchg(ne_fit_preload_node, NULL, va))
1453 		kmem_cache_free(vmap_area_cachep, va);
1454 }
1455 
1456 /*
1457  * Allocate a region of KVA of the specified size and alignment, within the
1458  * vstart and vend.
1459  */
1460 static struct vmap_area *alloc_vmap_area(unsigned long size,
1461 				unsigned long align,
1462 				unsigned long vstart, unsigned long vend,
1463 				int node, gfp_t gfp_mask)
1464 {
1465 	struct vmap_area *va;
1466 	unsigned long addr;
1467 	int purged = 0;
1468 	int ret;
1469 
1470 	BUG_ON(!size);
1471 	BUG_ON(offset_in_page(size));
1472 	BUG_ON(!is_power_of_2(align));
1473 
1474 	if (unlikely(!vmap_initialized))
1475 		return ERR_PTR(-EBUSY);
1476 
1477 	might_sleep();
1478 	gfp_mask = gfp_mask & GFP_RECLAIM_MASK;
1479 
1480 	va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
1481 	if (unlikely(!va))
1482 		return ERR_PTR(-ENOMEM);
1483 
1484 	/*
1485 	 * Only scan the relevant parts containing pointers to other objects
1486 	 * to avoid false negatives.
1487 	 */
1488 	kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask);
1489 
1490 retry:
1491 	preload_this_cpu_lock(&free_vmap_area_lock, gfp_mask, node);
1492 	addr = __alloc_vmap_area(size, align, vstart, vend);
1493 	spin_unlock(&free_vmap_area_lock);
1494 
1495 	/*
1496 	 * If an allocation fails, the "vend" address is
1497 	 * returned. Therefore trigger the overflow path.
1498 	 */
1499 	if (unlikely(addr == vend))
1500 		goto overflow;
1501 
1502 	va->va_start = addr;
1503 	va->va_end = addr + size;
1504 	va->vm = NULL;
1505 
1506 	spin_lock(&vmap_area_lock);
1507 	insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
1508 	spin_unlock(&vmap_area_lock);
1509 
1510 	BUG_ON(!IS_ALIGNED(va->va_start, align));
1511 	BUG_ON(va->va_start < vstart);
1512 	BUG_ON(va->va_end > vend);
1513 
1514 	ret = kasan_populate_vmalloc(addr, size);
1515 	if (ret) {
1516 		free_vmap_area(va);
1517 		return ERR_PTR(ret);
1518 	}
1519 
1520 	return va;
1521 
1522 overflow:
1523 	if (!purged) {
1524 		purge_vmap_area_lazy();
1525 		purged = 1;
1526 		goto retry;
1527 	}
1528 
1529 	if (gfpflags_allow_blocking(gfp_mask)) {
1530 		unsigned long freed = 0;
1531 		blocking_notifier_call_chain(&vmap_notify_list, 0, &freed);
1532 		if (freed > 0) {
1533 			purged = 0;
1534 			goto retry;
1535 		}
1536 	}
1537 
1538 	if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit())
1539 		pr_warn("vmap allocation for size %lu failed: use vmalloc=<size> to increase size\n",
1540 			size);
1541 
1542 	kmem_cache_free(vmap_area_cachep, va);
1543 	return ERR_PTR(-EBUSY);
1544 }
1545 
1546 int register_vmap_purge_notifier(struct notifier_block *nb)
1547 {
1548 	return blocking_notifier_chain_register(&vmap_notify_list, nb);
1549 }
1550 EXPORT_SYMBOL_GPL(register_vmap_purge_notifier);
1551 
1552 int unregister_vmap_purge_notifier(struct notifier_block *nb)
1553 {
1554 	return blocking_notifier_chain_unregister(&vmap_notify_list, nb);
1555 }
1556 EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier);
1557 
1558 /*
1559  * lazy_max_pages is the maximum amount of virtual address space we gather up
1560  * before attempting to purge with a TLB flush.
1561  *
1562  * There is a tradeoff here: a larger number will cover more kernel page tables
1563  * and take slightly longer to purge, but it will linearly reduce the number of
1564  * global TLB flushes that must be performed. It would seem natural to scale
1565  * this number up linearly with the number of CPUs (because vmapping activity
1566  * could also scale linearly with the number of CPUs), however it is likely
1567  * that in practice, workloads might be constrained in other ways that mean
1568  * vmap activity will not scale linearly with CPUs. Also, I want to be
1569  * conservative and not introduce a big latency on huge systems, so go with
1570  * a less aggressive log scale. It will still be an improvement over the old
1571  * code, and it will be simple to change the scale factor if we find that it
1572  * becomes a problem on bigger systems.
1573  */
1574 static unsigned long lazy_max_pages(void)
1575 {
1576 	unsigned int log;
1577 
1578 	log = fls(num_online_cpus());
1579 
1580 	return log * (32UL * 1024 * 1024 / PAGE_SIZE);
1581 }
1582 
1583 static atomic_long_t vmap_lazy_nr = ATOMIC_LONG_INIT(0);
1584 
1585 /*
1586  * Serialize vmap purging.  There is no actual critical section protected
1587  * by this look, but we want to avoid concurrent calls for performance
1588  * reasons and to make the pcpu_get_vm_areas more deterministic.
1589  */
1590 static DEFINE_MUTEX(vmap_purge_lock);
1591 
1592 /* for per-CPU blocks */
1593 static void purge_fragmented_blocks_allcpus(void);
1594 
1595 /*
1596  * called before a call to iounmap() if the caller wants vm_area_struct's
1597  * immediately freed.
1598  */
1599 void set_iounmap_nonlazy(void)
1600 {
1601 	atomic_long_set(&vmap_lazy_nr, lazy_max_pages()+1);
1602 }
1603 
1604 /*
1605  * Purges all lazily-freed vmap areas.
1606  */
1607 static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
1608 {
1609 	unsigned long resched_threshold;
1610 	struct list_head local_pure_list;
1611 	struct vmap_area *va, *n_va;
1612 
1613 	lockdep_assert_held(&vmap_purge_lock);
1614 
1615 	spin_lock(&purge_vmap_area_lock);
1616 	purge_vmap_area_root = RB_ROOT;
1617 	list_replace_init(&purge_vmap_area_list, &local_pure_list);
1618 	spin_unlock(&purge_vmap_area_lock);
1619 
1620 	if (unlikely(list_empty(&local_pure_list)))
1621 		return false;
1622 
1623 	start = min(start,
1624 		list_first_entry(&local_pure_list,
1625 			struct vmap_area, list)->va_start);
1626 
1627 	end = max(end,
1628 		list_last_entry(&local_pure_list,
1629 			struct vmap_area, list)->va_end);
1630 
1631 	flush_tlb_kernel_range(start, end);
1632 	resched_threshold = lazy_max_pages() << 1;
1633 
1634 	spin_lock(&free_vmap_area_lock);
1635 	list_for_each_entry_safe(va, n_va, &local_pure_list, list) {
1636 		unsigned long nr = (va->va_end - va->va_start) >> PAGE_SHIFT;
1637 		unsigned long orig_start = va->va_start;
1638 		unsigned long orig_end = va->va_end;
1639 
1640 		/*
1641 		 * Finally insert or merge lazily-freed area. It is
1642 		 * detached and there is no need to "unlink" it from
1643 		 * anything.
1644 		 */
1645 		va = merge_or_add_vmap_area_augment(va, &free_vmap_area_root,
1646 				&free_vmap_area_list);
1647 
1648 		if (!va)
1649 			continue;
1650 
1651 		if (is_vmalloc_or_module_addr((void *)orig_start))
1652 			kasan_release_vmalloc(orig_start, orig_end,
1653 					      va->va_start, va->va_end);
1654 
1655 		atomic_long_sub(nr, &vmap_lazy_nr);
1656 
1657 		if (atomic_long_read(&vmap_lazy_nr) < resched_threshold)
1658 			cond_resched_lock(&free_vmap_area_lock);
1659 	}
1660 	spin_unlock(&free_vmap_area_lock);
1661 	return true;
1662 }
1663 
1664 /*
1665  * Kick off a purge of the outstanding lazy areas. Don't bother if somebody
1666  * is already purging.
1667  */
1668 static void try_purge_vmap_area_lazy(void)
1669 {
1670 	if (mutex_trylock(&vmap_purge_lock)) {
1671 		__purge_vmap_area_lazy(ULONG_MAX, 0);
1672 		mutex_unlock(&vmap_purge_lock);
1673 	}
1674 }
1675 
1676 /*
1677  * Kick off a purge of the outstanding lazy areas.
1678  */
1679 static void purge_vmap_area_lazy(void)
1680 {
1681 	mutex_lock(&vmap_purge_lock);
1682 	purge_fragmented_blocks_allcpus();
1683 	__purge_vmap_area_lazy(ULONG_MAX, 0);
1684 	mutex_unlock(&vmap_purge_lock);
1685 }
1686 
1687 /*
1688  * Free a vmap area, caller ensuring that the area has been unmapped
1689  * and flush_cache_vunmap had been called for the correct range
1690  * previously.
1691  */
1692 static void free_vmap_area_noflush(struct vmap_area *va)
1693 {
1694 	unsigned long nr_lazy;
1695 
1696 	spin_lock(&vmap_area_lock);
1697 	unlink_va(va, &vmap_area_root);
1698 	spin_unlock(&vmap_area_lock);
1699 
1700 	nr_lazy = atomic_long_add_return((va->va_end - va->va_start) >>
1701 				PAGE_SHIFT, &vmap_lazy_nr);
1702 
1703 	/*
1704 	 * Merge or place it to the purge tree/list.
1705 	 */
1706 	spin_lock(&purge_vmap_area_lock);
1707 	merge_or_add_vmap_area(va,
1708 		&purge_vmap_area_root, &purge_vmap_area_list);
1709 	spin_unlock(&purge_vmap_area_lock);
1710 
1711 	/* After this point, we may free va at any time */
1712 	if (unlikely(nr_lazy > lazy_max_pages()))
1713 		try_purge_vmap_area_lazy();
1714 }
1715 
1716 /*
1717  * Free and unmap a vmap area
1718  */
1719 static void free_unmap_vmap_area(struct vmap_area *va)
1720 {
1721 	flush_cache_vunmap(va->va_start, va->va_end);
1722 	vunmap_range_noflush(va->va_start, va->va_end);
1723 	if (debug_pagealloc_enabled_static())
1724 		flush_tlb_kernel_range(va->va_start, va->va_end);
1725 
1726 	free_vmap_area_noflush(va);
1727 }
1728 
1729 static struct vmap_area *find_vmap_area(unsigned long addr)
1730 {
1731 	struct vmap_area *va;
1732 
1733 	spin_lock(&vmap_area_lock);
1734 	va = __find_vmap_area(addr);
1735 	spin_unlock(&vmap_area_lock);
1736 
1737 	return va;
1738 }
1739 
1740 /*** Per cpu kva allocator ***/
1741 
1742 /*
1743  * vmap space is limited especially on 32 bit architectures. Ensure there is
1744  * room for at least 16 percpu vmap blocks per CPU.
1745  */
1746 /*
1747  * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able
1748  * to #define VMALLOC_SPACE		(VMALLOC_END-VMALLOC_START). Guess
1749  * instead (we just need a rough idea)
1750  */
1751 #if BITS_PER_LONG == 32
1752 #define VMALLOC_SPACE		(128UL*1024*1024)
1753 #else
1754 #define VMALLOC_SPACE		(128UL*1024*1024*1024)
1755 #endif
1756 
1757 #define VMALLOC_PAGES		(VMALLOC_SPACE / PAGE_SIZE)
1758 #define VMAP_MAX_ALLOC		BITS_PER_LONG	/* 256K with 4K pages */
1759 #define VMAP_BBMAP_BITS_MAX	1024	/* 4MB with 4K pages */
1760 #define VMAP_BBMAP_BITS_MIN	(VMAP_MAX_ALLOC*2)
1761 #define VMAP_MIN(x, y)		((x) < (y) ? (x) : (y)) /* can't use min() */
1762 #define VMAP_MAX(x, y)		((x) > (y) ? (x) : (y)) /* can't use max() */
1763 #define VMAP_BBMAP_BITS		\
1764 		VMAP_MIN(VMAP_BBMAP_BITS_MAX,	\
1765 		VMAP_MAX(VMAP_BBMAP_BITS_MIN,	\
1766 			VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16))
1767 
1768 #define VMAP_BLOCK_SIZE		(VMAP_BBMAP_BITS * PAGE_SIZE)
1769 
1770 struct vmap_block_queue {
1771 	spinlock_t lock;
1772 	struct list_head free;
1773 };
1774 
1775 struct vmap_block {
1776 	spinlock_t lock;
1777 	struct vmap_area *va;
1778 	unsigned long free, dirty;
1779 	unsigned long dirty_min, dirty_max; /*< dirty range */
1780 	struct list_head free_list;
1781 	struct rcu_head rcu_head;
1782 	struct list_head purge;
1783 };
1784 
1785 /* Queue of free and dirty vmap blocks, for allocation and flushing purposes */
1786 static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue);
1787 
1788 /*
1789  * XArray of vmap blocks, indexed by address, to quickly find a vmap block
1790  * in the free path. Could get rid of this if we change the API to return a
1791  * "cookie" from alloc, to be passed to free. But no big deal yet.
1792  */
1793 static DEFINE_XARRAY(vmap_blocks);
1794 
1795 /*
1796  * We should probably have a fallback mechanism to allocate virtual memory
1797  * out of partially filled vmap blocks. However vmap block sizing should be
1798  * fairly reasonable according to the vmalloc size, so it shouldn't be a
1799  * big problem.
1800  */
1801 
1802 static unsigned long addr_to_vb_idx(unsigned long addr)
1803 {
1804 	addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1);
1805 	addr /= VMAP_BLOCK_SIZE;
1806 	return addr;
1807 }
1808 
1809 static void *vmap_block_vaddr(unsigned long va_start, unsigned long pages_off)
1810 {
1811 	unsigned long addr;
1812 
1813 	addr = va_start + (pages_off << PAGE_SHIFT);
1814 	BUG_ON(addr_to_vb_idx(addr) != addr_to_vb_idx(va_start));
1815 	return (void *)addr;
1816 }
1817 
1818 /**
1819  * new_vmap_block - allocates new vmap_block and occupies 2^order pages in this
1820  *                  block. Of course pages number can't exceed VMAP_BBMAP_BITS
1821  * @order:    how many 2^order pages should be occupied in newly allocated block
1822  * @gfp_mask: flags for the page level allocator
1823  *
1824  * Return: virtual address in a newly allocated block or ERR_PTR(-errno)
1825  */
1826 static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
1827 {
1828 	struct vmap_block_queue *vbq;
1829 	struct vmap_block *vb;
1830 	struct vmap_area *va;
1831 	unsigned long vb_idx;
1832 	int node, err;
1833 	void *vaddr;
1834 
1835 	node = numa_node_id();
1836 
1837 	vb = kmalloc_node(sizeof(struct vmap_block),
1838 			gfp_mask & GFP_RECLAIM_MASK, node);
1839 	if (unlikely(!vb))
1840 		return ERR_PTR(-ENOMEM);
1841 
1842 	va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE,
1843 					VMALLOC_START, VMALLOC_END,
1844 					node, gfp_mask);
1845 	if (IS_ERR(va)) {
1846 		kfree(vb);
1847 		return ERR_CAST(va);
1848 	}
1849 
1850 	vaddr = vmap_block_vaddr(va->va_start, 0);
1851 	spin_lock_init(&vb->lock);
1852 	vb->va = va;
1853 	/* At least something should be left free */
1854 	BUG_ON(VMAP_BBMAP_BITS <= (1UL << order));
1855 	vb->free = VMAP_BBMAP_BITS - (1UL << order);
1856 	vb->dirty = 0;
1857 	vb->dirty_min = VMAP_BBMAP_BITS;
1858 	vb->dirty_max = 0;
1859 	INIT_LIST_HEAD(&vb->free_list);
1860 
1861 	vb_idx = addr_to_vb_idx(va->va_start);
1862 	err = xa_insert(&vmap_blocks, vb_idx, vb, gfp_mask);
1863 	if (err) {
1864 		kfree(vb);
1865 		free_vmap_area(va);
1866 		return ERR_PTR(err);
1867 	}
1868 
1869 	vbq = &get_cpu_var(vmap_block_queue);
1870 	spin_lock(&vbq->lock);
1871 	list_add_tail_rcu(&vb->free_list, &vbq->free);
1872 	spin_unlock(&vbq->lock);
1873 	put_cpu_var(vmap_block_queue);
1874 
1875 	return vaddr;
1876 }
1877 
1878 static void free_vmap_block(struct vmap_block *vb)
1879 {
1880 	struct vmap_block *tmp;
1881 
1882 	tmp = xa_erase(&vmap_blocks, addr_to_vb_idx(vb->va->va_start));
1883 	BUG_ON(tmp != vb);
1884 
1885 	free_vmap_area_noflush(vb->va);
1886 	kfree_rcu(vb, rcu_head);
1887 }
1888 
1889 static void purge_fragmented_blocks(int cpu)
1890 {
1891 	LIST_HEAD(purge);
1892 	struct vmap_block *vb;
1893 	struct vmap_block *n_vb;
1894 	struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
1895 
1896 	rcu_read_lock();
1897 	list_for_each_entry_rcu(vb, &vbq->free, free_list) {
1898 
1899 		if (!(vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS))
1900 			continue;
1901 
1902 		spin_lock(&vb->lock);
1903 		if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) {
1904 			vb->free = 0; /* prevent further allocs after releasing lock */
1905 			vb->dirty = VMAP_BBMAP_BITS; /* prevent purging it again */
1906 			vb->dirty_min = 0;
1907 			vb->dirty_max = VMAP_BBMAP_BITS;
1908 			spin_lock(&vbq->lock);
1909 			list_del_rcu(&vb->free_list);
1910 			spin_unlock(&vbq->lock);
1911 			spin_unlock(&vb->lock);
1912 			list_add_tail(&vb->purge, &purge);
1913 		} else
1914 			spin_unlock(&vb->lock);
1915 	}
1916 	rcu_read_unlock();
1917 
1918 	list_for_each_entry_safe(vb, n_vb, &purge, purge) {
1919 		list_del(&vb->purge);
1920 		free_vmap_block(vb);
1921 	}
1922 }
1923 
1924 static void purge_fragmented_blocks_allcpus(void)
1925 {
1926 	int cpu;
1927 
1928 	for_each_possible_cpu(cpu)
1929 		purge_fragmented_blocks(cpu);
1930 }
1931 
1932 static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
1933 {
1934 	struct vmap_block_queue *vbq;
1935 	struct vmap_block *vb;
1936 	void *vaddr = NULL;
1937 	unsigned int order;
1938 
1939 	BUG_ON(offset_in_page(size));
1940 	BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
1941 	if (WARN_ON(size == 0)) {
1942 		/*
1943 		 * Allocating 0 bytes isn't what caller wants since
1944 		 * get_order(0) returns funny result. Just warn and terminate
1945 		 * early.
1946 		 */
1947 		return NULL;
1948 	}
1949 	order = get_order(size);
1950 
1951 	rcu_read_lock();
1952 	vbq = &get_cpu_var(vmap_block_queue);
1953 	list_for_each_entry_rcu(vb, &vbq->free, free_list) {
1954 		unsigned long pages_off;
1955 
1956 		spin_lock(&vb->lock);
1957 		if (vb->free < (1UL << order)) {
1958 			spin_unlock(&vb->lock);
1959 			continue;
1960 		}
1961 
1962 		pages_off = VMAP_BBMAP_BITS - vb->free;
1963 		vaddr = vmap_block_vaddr(vb->va->va_start, pages_off);
1964 		vb->free -= 1UL << order;
1965 		if (vb->free == 0) {
1966 			spin_lock(&vbq->lock);
1967 			list_del_rcu(&vb->free_list);
1968 			spin_unlock(&vbq->lock);
1969 		}
1970 
1971 		spin_unlock(&vb->lock);
1972 		break;
1973 	}
1974 
1975 	put_cpu_var(vmap_block_queue);
1976 	rcu_read_unlock();
1977 
1978 	/* Allocate new block if nothing was found */
1979 	if (!vaddr)
1980 		vaddr = new_vmap_block(order, gfp_mask);
1981 
1982 	return vaddr;
1983 }
1984 
1985 static void vb_free(unsigned long addr, unsigned long size)
1986 {
1987 	unsigned long offset;
1988 	unsigned int order;
1989 	struct vmap_block *vb;
1990 
1991 	BUG_ON(offset_in_page(size));
1992 	BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
1993 
1994 	flush_cache_vunmap(addr, addr + size);
1995 
1996 	order = get_order(size);
1997 	offset = (addr & (VMAP_BLOCK_SIZE - 1)) >> PAGE_SHIFT;
1998 	vb = xa_load(&vmap_blocks, addr_to_vb_idx(addr));
1999 
2000 	vunmap_range_noflush(addr, addr + size);
2001 
2002 	if (debug_pagealloc_enabled_static())
2003 		flush_tlb_kernel_range(addr, addr + size);
2004 
2005 	spin_lock(&vb->lock);
2006 
2007 	/* Expand dirty range */
2008 	vb->dirty_min = min(vb->dirty_min, offset);
2009 	vb->dirty_max = max(vb->dirty_max, offset + (1UL << order));
2010 
2011 	vb->dirty += 1UL << order;
2012 	if (vb->dirty == VMAP_BBMAP_BITS) {
2013 		BUG_ON(vb->free);
2014 		spin_unlock(&vb->lock);
2015 		free_vmap_block(vb);
2016 	} else
2017 		spin_unlock(&vb->lock);
2018 }
2019 
2020 static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush)
2021 {
2022 	int cpu;
2023 
2024 	if (unlikely(!vmap_initialized))
2025 		return;
2026 
2027 	might_sleep();
2028 
2029 	for_each_possible_cpu(cpu) {
2030 		struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
2031 		struct vmap_block *vb;
2032 
2033 		rcu_read_lock();
2034 		list_for_each_entry_rcu(vb, &vbq->free, free_list) {
2035 			spin_lock(&vb->lock);
2036 			if (vb->dirty && vb->dirty != VMAP_BBMAP_BITS) {
2037 				unsigned long va_start = vb->va->va_start;
2038 				unsigned long s, e;
2039 
2040 				s = va_start + (vb->dirty_min << PAGE_SHIFT);
2041 				e = va_start + (vb->dirty_max << PAGE_SHIFT);
2042 
2043 				start = min(s, start);
2044 				end   = max(e, end);
2045 
2046 				flush = 1;
2047 			}
2048 			spin_unlock(&vb->lock);
2049 		}
2050 		rcu_read_unlock();
2051 	}
2052 
2053 	mutex_lock(&vmap_purge_lock);
2054 	purge_fragmented_blocks_allcpus();
2055 	if (!__purge_vmap_area_lazy(start, end) && flush)
2056 		flush_tlb_kernel_range(start, end);
2057 	mutex_unlock(&vmap_purge_lock);
2058 }
2059 
2060 /**
2061  * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer
2062  *
2063  * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily
2064  * to amortize TLB flushing overheads. What this means is that any page you
2065  * have now, may, in a former life, have been mapped into kernel virtual
2066  * address by the vmap layer and so there might be some CPUs with TLB entries
2067  * still referencing that page (additional to the regular 1:1 kernel mapping).
2068  *
2069  * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can
2070  * be sure that none of the pages we have control over will have any aliases
2071  * from the vmap layer.
2072  */
2073 void vm_unmap_aliases(void)
2074 {
2075 	unsigned long start = ULONG_MAX, end = 0;
2076 	int flush = 0;
2077 
2078 	_vm_unmap_aliases(start, end, flush);
2079 }
2080 EXPORT_SYMBOL_GPL(vm_unmap_aliases);
2081 
2082 /**
2083  * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram
2084  * @mem: the pointer returned by vm_map_ram
2085  * @count: the count passed to that vm_map_ram call (cannot unmap partial)
2086  */
2087 void vm_unmap_ram(const void *mem, unsigned int count)
2088 {
2089 	unsigned long size = (unsigned long)count << PAGE_SHIFT;
2090 	unsigned long addr = (unsigned long)mem;
2091 	struct vmap_area *va;
2092 
2093 	might_sleep();
2094 	BUG_ON(!addr);
2095 	BUG_ON(addr < VMALLOC_START);
2096 	BUG_ON(addr > VMALLOC_END);
2097 	BUG_ON(!PAGE_ALIGNED(addr));
2098 
2099 	kasan_poison_vmalloc(mem, size);
2100 
2101 	if (likely(count <= VMAP_MAX_ALLOC)) {
2102 		debug_check_no_locks_freed(mem, size);
2103 		vb_free(addr, size);
2104 		return;
2105 	}
2106 
2107 	va = find_vmap_area(addr);
2108 	BUG_ON(!va);
2109 	debug_check_no_locks_freed((void *)va->va_start,
2110 				    (va->va_end - va->va_start));
2111 	free_unmap_vmap_area(va);
2112 }
2113 EXPORT_SYMBOL(vm_unmap_ram);
2114 
2115 /**
2116  * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space)
2117  * @pages: an array of pointers to the pages to be mapped
2118  * @count: number of pages
2119  * @node: prefer to allocate data structures on this node
2120  *
2121  * If you use this function for less than VMAP_MAX_ALLOC pages, it could be
2122  * faster than vmap so it's good.  But if you mix long-life and short-life
2123  * objects with vm_map_ram(), it could consume lots of address space through
2124  * fragmentation (especially on a 32bit machine).  You could see failures in
2125  * the end.  Please use this function for short-lived objects.
2126  *
2127  * Returns: a pointer to the address that has been mapped, or %NULL on failure
2128  */
2129 void *vm_map_ram(struct page **pages, unsigned int count, int node)
2130 {
2131 	unsigned long size = (unsigned long)count << PAGE_SHIFT;
2132 	unsigned long addr;
2133 	void *mem;
2134 
2135 	if (likely(count <= VMAP_MAX_ALLOC)) {
2136 		mem = vb_alloc(size, GFP_KERNEL);
2137 		if (IS_ERR(mem))
2138 			return NULL;
2139 		addr = (unsigned long)mem;
2140 	} else {
2141 		struct vmap_area *va;
2142 		va = alloc_vmap_area(size, PAGE_SIZE,
2143 				VMALLOC_START, VMALLOC_END, node, GFP_KERNEL);
2144 		if (IS_ERR(va))
2145 			return NULL;
2146 
2147 		addr = va->va_start;
2148 		mem = (void *)addr;
2149 	}
2150 
2151 	kasan_unpoison_vmalloc(mem, size);
2152 
2153 	if (vmap_pages_range(addr, addr + size, PAGE_KERNEL,
2154 				pages, PAGE_SHIFT) < 0) {
2155 		vm_unmap_ram(mem, count);
2156 		return NULL;
2157 	}
2158 
2159 	return mem;
2160 }
2161 EXPORT_SYMBOL(vm_map_ram);
2162 
2163 static struct vm_struct *vmlist __initdata;
2164 
2165 static inline unsigned int vm_area_page_order(struct vm_struct *vm)
2166 {
2167 #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
2168 	return vm->page_order;
2169 #else
2170 	return 0;
2171 #endif
2172 }
2173 
2174 static inline void set_vm_area_page_order(struct vm_struct *vm, unsigned int order)
2175 {
2176 #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
2177 	vm->page_order = order;
2178 #else
2179 	BUG_ON(order != 0);
2180 #endif
2181 }
2182 
2183 /**
2184  * vm_area_add_early - add vmap area early during boot
2185  * @vm: vm_struct to add
2186  *
2187  * This function is used to add fixed kernel vm area to vmlist before
2188  * vmalloc_init() is called.  @vm->addr, @vm->size, and @vm->flags
2189  * should contain proper values and the other fields should be zero.
2190  *
2191  * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
2192  */
2193 void __init vm_area_add_early(struct vm_struct *vm)
2194 {
2195 	struct vm_struct *tmp, **p;
2196 
2197 	BUG_ON(vmap_initialized);
2198 	for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
2199 		if (tmp->addr >= vm->addr) {
2200 			BUG_ON(tmp->addr < vm->addr + vm->size);
2201 			break;
2202 		} else
2203 			BUG_ON(tmp->addr + tmp->size > vm->addr);
2204 	}
2205 	vm->next = *p;
2206 	*p = vm;
2207 }
2208 
2209 /**
2210  * vm_area_register_early - register vmap area early during boot
2211  * @vm: vm_struct to register
2212  * @align: requested alignment
2213  *
2214  * This function is used to register kernel vm area before
2215  * vmalloc_init() is called.  @vm->size and @vm->flags should contain
2216  * proper values on entry and other fields should be zero.  On return,
2217  * vm->addr contains the allocated address.
2218  *
2219  * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
2220  */
2221 void __init vm_area_register_early(struct vm_struct *vm, size_t align)
2222 {
2223 	static size_t vm_init_off __initdata;
2224 	unsigned long addr;
2225 
2226 	addr = ALIGN(VMALLOC_START + vm_init_off, align);
2227 	vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START;
2228 
2229 	vm->addr = (void *)addr;
2230 
2231 	vm_area_add_early(vm);
2232 }
2233 
2234 static void vmap_init_free_space(void)
2235 {
2236 	unsigned long vmap_start = 1;
2237 	const unsigned long vmap_end = ULONG_MAX;
2238 	struct vmap_area *busy, *free;
2239 
2240 	/*
2241 	 *     B     F     B     B     B     F
2242 	 * -|-----|.....|-----|-----|-----|.....|-
2243 	 *  |           The KVA space           |
2244 	 *  |<--------------------------------->|
2245 	 */
2246 	list_for_each_entry(busy, &vmap_area_list, list) {
2247 		if (busy->va_start - vmap_start > 0) {
2248 			free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
2249 			if (!WARN_ON_ONCE(!free)) {
2250 				free->va_start = vmap_start;
2251 				free->va_end = busy->va_start;
2252 
2253 				insert_vmap_area_augment(free, NULL,
2254 					&free_vmap_area_root,
2255 						&free_vmap_area_list);
2256 			}
2257 		}
2258 
2259 		vmap_start = busy->va_end;
2260 	}
2261 
2262 	if (vmap_end - vmap_start > 0) {
2263 		free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
2264 		if (!WARN_ON_ONCE(!free)) {
2265 			free->va_start = vmap_start;
2266 			free->va_end = vmap_end;
2267 
2268 			insert_vmap_area_augment(free, NULL,
2269 				&free_vmap_area_root,
2270 					&free_vmap_area_list);
2271 		}
2272 	}
2273 }
2274 
2275 void __init vmalloc_init(void)
2276 {
2277 	struct vmap_area *va;
2278 	struct vm_struct *tmp;
2279 	int i;
2280 
2281 	/*
2282 	 * Create the cache for vmap_area objects.
2283 	 */
2284 	vmap_area_cachep = KMEM_CACHE(vmap_area, SLAB_PANIC);
2285 
2286 	for_each_possible_cpu(i) {
2287 		struct vmap_block_queue *vbq;
2288 		struct vfree_deferred *p;
2289 
2290 		vbq = &per_cpu(vmap_block_queue, i);
2291 		spin_lock_init(&vbq->lock);
2292 		INIT_LIST_HEAD(&vbq->free);
2293 		p = &per_cpu(vfree_deferred, i);
2294 		init_llist_head(&p->list);
2295 		INIT_WORK(&p->wq, free_work);
2296 	}
2297 
2298 	/* Import existing vmlist entries. */
2299 	for (tmp = vmlist; tmp; tmp = tmp->next) {
2300 		va = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
2301 		if (WARN_ON_ONCE(!va))
2302 			continue;
2303 
2304 		va->va_start = (unsigned long)tmp->addr;
2305 		va->va_end = va->va_start + tmp->size;
2306 		va->vm = tmp;
2307 		insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
2308 	}
2309 
2310 	/*
2311 	 * Now we can initialize a free vmap space.
2312 	 */
2313 	vmap_init_free_space();
2314 	vmap_initialized = true;
2315 }
2316 
2317 static inline void setup_vmalloc_vm_locked(struct vm_struct *vm,
2318 	struct vmap_area *va, unsigned long flags, const void *caller)
2319 {
2320 	vm->flags = flags;
2321 	vm->addr = (void *)va->va_start;
2322 	vm->size = va->va_end - va->va_start;
2323 	vm->caller = caller;
2324 	va->vm = vm;
2325 }
2326 
2327 static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
2328 			      unsigned long flags, const void *caller)
2329 {
2330 	spin_lock(&vmap_area_lock);
2331 	setup_vmalloc_vm_locked(vm, va, flags, caller);
2332 	spin_unlock(&vmap_area_lock);
2333 }
2334 
2335 static void clear_vm_uninitialized_flag(struct vm_struct *vm)
2336 {
2337 	/*
2338 	 * Before removing VM_UNINITIALIZED,
2339 	 * we should make sure that vm has proper values.
2340 	 * Pair with smp_rmb() in show_numa_info().
2341 	 */
2342 	smp_wmb();
2343 	vm->flags &= ~VM_UNINITIALIZED;
2344 }
2345 
2346 static struct vm_struct *__get_vm_area_node(unsigned long size,
2347 		unsigned long align, unsigned long shift, unsigned long flags,
2348 		unsigned long start, unsigned long end, int node,
2349 		gfp_t gfp_mask, const void *caller)
2350 {
2351 	struct vmap_area *va;
2352 	struct vm_struct *area;
2353 	unsigned long requested_size = size;
2354 
2355 	BUG_ON(in_interrupt());
2356 	size = ALIGN(size, 1ul << shift);
2357 	if (unlikely(!size))
2358 		return NULL;
2359 
2360 	if (flags & VM_IOREMAP)
2361 		align = 1ul << clamp_t(int, get_count_order_long(size),
2362 				       PAGE_SHIFT, IOREMAP_MAX_ORDER);
2363 
2364 	area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
2365 	if (unlikely(!area))
2366 		return NULL;
2367 
2368 	if (!(flags & VM_NO_GUARD))
2369 		size += PAGE_SIZE;
2370 
2371 	va = alloc_vmap_area(size, align, start, end, node, gfp_mask);
2372 	if (IS_ERR(va)) {
2373 		kfree(area);
2374 		return NULL;
2375 	}
2376 
2377 	kasan_unpoison_vmalloc((void *)va->va_start, requested_size);
2378 
2379 	setup_vmalloc_vm(area, va, flags, caller);
2380 
2381 	return area;
2382 }
2383 
2384 struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
2385 				       unsigned long start, unsigned long end,
2386 				       const void *caller)
2387 {
2388 	return __get_vm_area_node(size, 1, PAGE_SHIFT, flags, start, end,
2389 				  NUMA_NO_NODE, GFP_KERNEL, caller);
2390 }
2391 
2392 /**
2393  * get_vm_area - reserve a contiguous kernel virtual area
2394  * @size:	 size of the area
2395  * @flags:	 %VM_IOREMAP for I/O mappings or VM_ALLOC
2396  *
2397  * Search an area of @size in the kernel virtual mapping area,
2398  * and reserved it for out purposes.  Returns the area descriptor
2399  * on success or %NULL on failure.
2400  *
2401  * Return: the area descriptor on success or %NULL on failure.
2402  */
2403 struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
2404 {
2405 	return __get_vm_area_node(size, 1, PAGE_SHIFT, flags,
2406 				  VMALLOC_START, VMALLOC_END,
2407 				  NUMA_NO_NODE, GFP_KERNEL,
2408 				  __builtin_return_address(0));
2409 }
2410 
2411 struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
2412 				const void *caller)
2413 {
2414 	return __get_vm_area_node(size, 1, PAGE_SHIFT, flags,
2415 				  VMALLOC_START, VMALLOC_END,
2416 				  NUMA_NO_NODE, GFP_KERNEL, caller);
2417 }
2418 
2419 /**
2420  * find_vm_area - find a continuous kernel virtual area
2421  * @addr:	  base address
2422  *
2423  * Search for the kernel VM area starting at @addr, and return it.
2424  * It is up to the caller to do all required locking to keep the returned
2425  * pointer valid.
2426  *
2427  * Return: the area descriptor on success or %NULL on failure.
2428  */
2429 struct vm_struct *find_vm_area(const void *addr)
2430 {
2431 	struct vmap_area *va;
2432 
2433 	va = find_vmap_area((unsigned long)addr);
2434 	if (!va)
2435 		return NULL;
2436 
2437 	return va->vm;
2438 }
2439 
2440 /**
2441  * remove_vm_area - find and remove a continuous kernel virtual area
2442  * @addr:	    base address
2443  *
2444  * Search for the kernel VM area starting at @addr, and remove it.
2445  * This function returns the found VM area, but using it is NOT safe
2446  * on SMP machines, except for its size or flags.
2447  *
2448  * Return: the area descriptor on success or %NULL on failure.
2449  */
2450 struct vm_struct *remove_vm_area(const void *addr)
2451 {
2452 	struct vmap_area *va;
2453 
2454 	might_sleep();
2455 
2456 	spin_lock(&vmap_area_lock);
2457 	va = __find_vmap_area((unsigned long)addr);
2458 	if (va && va->vm) {
2459 		struct vm_struct *vm = va->vm;
2460 
2461 		va->vm = NULL;
2462 		spin_unlock(&vmap_area_lock);
2463 
2464 		kasan_free_shadow(vm);
2465 		free_unmap_vmap_area(va);
2466 
2467 		return vm;
2468 	}
2469 
2470 	spin_unlock(&vmap_area_lock);
2471 	return NULL;
2472 }
2473 
2474 static inline void set_area_direct_map(const struct vm_struct *area,
2475 				       int (*set_direct_map)(struct page *page))
2476 {
2477 	int i;
2478 
2479 	/* HUGE_VMALLOC passes small pages to set_direct_map */
2480 	for (i = 0; i < area->nr_pages; i++)
2481 		if (page_address(area->pages[i]))
2482 			set_direct_map(area->pages[i]);
2483 }
2484 
2485 /* Handle removing and resetting vm mappings related to the vm_struct. */
2486 static void vm_remove_mappings(struct vm_struct *area, int deallocate_pages)
2487 {
2488 	unsigned long start = ULONG_MAX, end = 0;
2489 	unsigned int page_order = vm_area_page_order(area);
2490 	int flush_reset = area->flags & VM_FLUSH_RESET_PERMS;
2491 	int flush_dmap = 0;
2492 	int i;
2493 
2494 	remove_vm_area(area->addr);
2495 
2496 	/* If this is not VM_FLUSH_RESET_PERMS memory, no need for the below. */
2497 	if (!flush_reset)
2498 		return;
2499 
2500 	/*
2501 	 * If not deallocating pages, just do the flush of the VM area and
2502 	 * return.
2503 	 */
2504 	if (!deallocate_pages) {
2505 		vm_unmap_aliases();
2506 		return;
2507 	}
2508 
2509 	/*
2510 	 * If execution gets here, flush the vm mapping and reset the direct
2511 	 * map. Find the start and end range of the direct mappings to make sure
2512 	 * the vm_unmap_aliases() flush includes the direct map.
2513 	 */
2514 	for (i = 0; i < area->nr_pages; i += 1U << page_order) {
2515 		unsigned long addr = (unsigned long)page_address(area->pages[i]);
2516 		if (addr) {
2517 			unsigned long page_size;
2518 
2519 			page_size = PAGE_SIZE << page_order;
2520 			start = min(addr, start);
2521 			end = max(addr + page_size, end);
2522 			flush_dmap = 1;
2523 		}
2524 	}
2525 
2526 	/*
2527 	 * Set direct map to something invalid so that it won't be cached if
2528 	 * there are any accesses after the TLB flush, then flush the TLB and
2529 	 * reset the direct map permissions to the default.
2530 	 */
2531 	set_area_direct_map(area, set_direct_map_invalid_noflush);
2532 	_vm_unmap_aliases(start, end, flush_dmap);
2533 	set_area_direct_map(area, set_direct_map_default_noflush);
2534 }
2535 
2536 static void __vunmap(const void *addr, int deallocate_pages)
2537 {
2538 	struct vm_struct *area;
2539 
2540 	if (!addr)
2541 		return;
2542 
2543 	if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n",
2544 			addr))
2545 		return;
2546 
2547 	area = find_vm_area(addr);
2548 	if (unlikely(!area)) {
2549 		WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
2550 				addr);
2551 		return;
2552 	}
2553 
2554 	debug_check_no_locks_freed(area->addr, get_vm_area_size(area));
2555 	debug_check_no_obj_freed(area->addr, get_vm_area_size(area));
2556 
2557 	kasan_poison_vmalloc(area->addr, get_vm_area_size(area));
2558 
2559 	vm_remove_mappings(area, deallocate_pages);
2560 
2561 	if (deallocate_pages) {
2562 		unsigned int page_order = vm_area_page_order(area);
2563 		int i;
2564 
2565 		for (i = 0; i < area->nr_pages; i += 1U << page_order) {
2566 			struct page *page = area->pages[i];
2567 
2568 			BUG_ON(!page);
2569 			__free_pages(page, page_order);
2570 			cond_resched();
2571 		}
2572 		atomic_long_sub(area->nr_pages, &nr_vmalloc_pages);
2573 
2574 		kvfree(area->pages);
2575 	}
2576 
2577 	kfree(area);
2578 }
2579 
2580 static inline void __vfree_deferred(const void *addr)
2581 {
2582 	/*
2583 	 * Use raw_cpu_ptr() because this can be called from preemptible
2584 	 * context. Preemption is absolutely fine here, because the llist_add()
2585 	 * implementation is lockless, so it works even if we are adding to
2586 	 * another cpu's list. schedule_work() should be fine with this too.
2587 	 */
2588 	struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred);
2589 
2590 	if (llist_add((struct llist_node *)addr, &p->list))
2591 		schedule_work(&p->wq);
2592 }
2593 
2594 /**
2595  * vfree_atomic - release memory allocated by vmalloc()
2596  * @addr:	  memory base address
2597  *
2598  * This one is just like vfree() but can be called in any atomic context
2599  * except NMIs.
2600  */
2601 void vfree_atomic(const void *addr)
2602 {
2603 	BUG_ON(in_nmi());
2604 
2605 	kmemleak_free(addr);
2606 
2607 	if (!addr)
2608 		return;
2609 	__vfree_deferred(addr);
2610 }
2611 
2612 static void __vfree(const void *addr)
2613 {
2614 	if (unlikely(in_interrupt()))
2615 		__vfree_deferred(addr);
2616 	else
2617 		__vunmap(addr, 1);
2618 }
2619 
2620 /**
2621  * vfree - Release memory allocated by vmalloc()
2622  * @addr:  Memory base address
2623  *
2624  * Free the virtually continuous memory area starting at @addr, as obtained
2625  * from one of the vmalloc() family of APIs.  This will usually also free the
2626  * physical memory underlying the virtual allocation, but that memory is
2627  * reference counted, so it will not be freed until the last user goes away.
2628  *
2629  * If @addr is NULL, no operation is performed.
2630  *
2631  * Context:
2632  * May sleep if called *not* from interrupt context.
2633  * Must not be called in NMI context (strictly speaking, it could be
2634  * if we have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling
2635  * conventions for vfree() arch-dependent would be a really bad idea).
2636  */
2637 void vfree(const void *addr)
2638 {
2639 	BUG_ON(in_nmi());
2640 
2641 	kmemleak_free(addr);
2642 
2643 	might_sleep_if(!in_interrupt());
2644 
2645 	if (!addr)
2646 		return;
2647 
2648 	__vfree(addr);
2649 }
2650 EXPORT_SYMBOL(vfree);
2651 
2652 /**
2653  * vunmap - release virtual mapping obtained by vmap()
2654  * @addr:   memory base address
2655  *
2656  * Free the virtually contiguous memory area starting at @addr,
2657  * which was created from the page array passed to vmap().
2658  *
2659  * Must not be called in interrupt context.
2660  */
2661 void vunmap(const void *addr)
2662 {
2663 	BUG_ON(in_interrupt());
2664 	might_sleep();
2665 	if (addr)
2666 		__vunmap(addr, 0);
2667 }
2668 EXPORT_SYMBOL(vunmap);
2669 
2670 /**
2671  * vmap - map an array of pages into virtually contiguous space
2672  * @pages: array of page pointers
2673  * @count: number of pages to map
2674  * @flags: vm_area->flags
2675  * @prot: page protection for the mapping
2676  *
2677  * Maps @count pages from @pages into contiguous kernel virtual space.
2678  * If @flags contains %VM_MAP_PUT_PAGES the ownership of the pages array itself
2679  * (which must be kmalloc or vmalloc memory) and one reference per pages in it
2680  * are transferred from the caller to vmap(), and will be freed / dropped when
2681  * vfree() is called on the return value.
2682  *
2683  * Return: the address of the area or %NULL on failure
2684  */
2685 void *vmap(struct page **pages, unsigned int count,
2686 	   unsigned long flags, pgprot_t prot)
2687 {
2688 	struct vm_struct *area;
2689 	unsigned long addr;
2690 	unsigned long size;		/* In bytes */
2691 
2692 	might_sleep();
2693 
2694 	if (count > totalram_pages())
2695 		return NULL;
2696 
2697 	size = (unsigned long)count << PAGE_SHIFT;
2698 	area = get_vm_area_caller(size, flags, __builtin_return_address(0));
2699 	if (!area)
2700 		return NULL;
2701 
2702 	addr = (unsigned long)area->addr;
2703 	if (vmap_pages_range(addr, addr + size, pgprot_nx(prot),
2704 				pages, PAGE_SHIFT) < 0) {
2705 		vunmap(area->addr);
2706 		return NULL;
2707 	}
2708 
2709 	if (flags & VM_MAP_PUT_PAGES) {
2710 		area->pages = pages;
2711 		area->nr_pages = count;
2712 	}
2713 	return area->addr;
2714 }
2715 EXPORT_SYMBOL(vmap);
2716 
2717 #ifdef CONFIG_VMAP_PFN
2718 struct vmap_pfn_data {
2719 	unsigned long	*pfns;
2720 	pgprot_t	prot;
2721 	unsigned int	idx;
2722 };
2723 
2724 static int vmap_pfn_apply(pte_t *pte, unsigned long addr, void *private)
2725 {
2726 	struct vmap_pfn_data *data = private;
2727 
2728 	if (WARN_ON_ONCE(pfn_valid(data->pfns[data->idx])))
2729 		return -EINVAL;
2730 	*pte = pte_mkspecial(pfn_pte(data->pfns[data->idx++], data->prot));
2731 	return 0;
2732 }
2733 
2734 /**
2735  * vmap_pfn - map an array of PFNs into virtually contiguous space
2736  * @pfns: array of PFNs
2737  * @count: number of pages to map
2738  * @prot: page protection for the mapping
2739  *
2740  * Maps @count PFNs from @pfns into contiguous kernel virtual space and returns
2741  * the start address of the mapping.
2742  */
2743 void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot)
2744 {
2745 	struct vmap_pfn_data data = { .pfns = pfns, .prot = pgprot_nx(prot) };
2746 	struct vm_struct *area;
2747 
2748 	area = get_vm_area_caller(count * PAGE_SIZE, VM_IOREMAP,
2749 			__builtin_return_address(0));
2750 	if (!area)
2751 		return NULL;
2752 	if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
2753 			count * PAGE_SIZE, vmap_pfn_apply, &data)) {
2754 		free_vm_area(area);
2755 		return NULL;
2756 	}
2757 	return area->addr;
2758 }
2759 EXPORT_SYMBOL_GPL(vmap_pfn);
2760 #endif /* CONFIG_VMAP_PFN */
2761 
2762 static inline unsigned int
2763 vm_area_alloc_pages(gfp_t gfp, int nid,
2764 		unsigned int order, unsigned long nr_pages, struct page **pages)
2765 {
2766 	unsigned int nr_allocated = 0;
2767 
2768 	/*
2769 	 * For order-0 pages we make use of bulk allocator, if
2770 	 * the page array is partly or not at all populated due
2771 	 * to fails, fallback to a single page allocator that is
2772 	 * more permissive.
2773 	 */
2774 	if (!order)
2775 		nr_allocated = alloc_pages_bulk_array_node(
2776 			gfp, nid, nr_pages, pages);
2777 	else
2778 		/*
2779 		 * Compound pages required for remap_vmalloc_page if
2780 		 * high-order pages.
2781 		 */
2782 		gfp |= __GFP_COMP;
2783 
2784 	/* High-order pages or fallback path if "bulk" fails. */
2785 	while (nr_allocated < nr_pages) {
2786 		struct page *page;
2787 		int i;
2788 
2789 		page = alloc_pages_node(nid, gfp, order);
2790 		if (unlikely(!page))
2791 			break;
2792 
2793 		/*
2794 		 * Careful, we allocate and map page-order pages, but
2795 		 * tracking is done per PAGE_SIZE page so as to keep the
2796 		 * vm_struct APIs independent of the physical/mapped size.
2797 		 */
2798 		for (i = 0; i < (1U << order); i++)
2799 			pages[nr_allocated + i] = page + i;
2800 
2801 		if (gfpflags_allow_blocking(gfp))
2802 			cond_resched();
2803 
2804 		nr_allocated += 1U << order;
2805 	}
2806 
2807 	return nr_allocated;
2808 }
2809 
2810 static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
2811 				 pgprot_t prot, unsigned int page_shift,
2812 				 int node)
2813 {
2814 	const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
2815 	unsigned long addr = (unsigned long)area->addr;
2816 	unsigned long size = get_vm_area_size(area);
2817 	unsigned long array_size;
2818 	unsigned int nr_small_pages = size >> PAGE_SHIFT;
2819 	unsigned int page_order;
2820 
2821 	array_size = (unsigned long)nr_small_pages * sizeof(struct page *);
2822 	gfp_mask |= __GFP_NOWARN;
2823 	if (!(gfp_mask & (GFP_DMA | GFP_DMA32)))
2824 		gfp_mask |= __GFP_HIGHMEM;
2825 
2826 	/* Please note that the recursion is strictly bounded. */
2827 	if (array_size > PAGE_SIZE) {
2828 		area->pages = __vmalloc_node(array_size, 1, nested_gfp, node,
2829 					area->caller);
2830 	} else {
2831 		area->pages = kmalloc_node(array_size, nested_gfp, node);
2832 	}
2833 
2834 	if (!area->pages) {
2835 		warn_alloc(gfp_mask, NULL,
2836 			"vmalloc error: size %lu, failed to allocated page array size %lu",
2837 			nr_small_pages * PAGE_SIZE, array_size);
2838 		free_vm_area(area);
2839 		return NULL;
2840 	}
2841 
2842 	set_vm_area_page_order(area, page_shift - PAGE_SHIFT);
2843 	page_order = vm_area_page_order(area);
2844 
2845 	area->nr_pages = vm_area_alloc_pages(gfp_mask, node,
2846 		page_order, nr_small_pages, area->pages);
2847 
2848 	atomic_long_add(area->nr_pages, &nr_vmalloc_pages);
2849 
2850 	/*
2851 	 * If not enough pages were obtained to accomplish an
2852 	 * allocation request, free them via __vfree() if any.
2853 	 */
2854 	if (area->nr_pages != nr_small_pages) {
2855 		warn_alloc(gfp_mask, NULL,
2856 			"vmalloc error: size %lu, page order %u, failed to allocate pages",
2857 			area->nr_pages * PAGE_SIZE, page_order);
2858 		goto fail;
2859 	}
2860 
2861 	if (vmap_pages_range(addr, addr + size, prot, area->pages,
2862 			page_shift) < 0) {
2863 		warn_alloc(gfp_mask, NULL,
2864 			"vmalloc error: size %lu, failed to map pages",
2865 			area->nr_pages * PAGE_SIZE);
2866 		goto fail;
2867 	}
2868 
2869 	return area->addr;
2870 
2871 fail:
2872 	__vfree(area->addr);
2873 	return NULL;
2874 }
2875 
2876 /**
2877  * __vmalloc_node_range - allocate virtually contiguous memory
2878  * @size:		  allocation size
2879  * @align:		  desired alignment
2880  * @start:		  vm area range start
2881  * @end:		  vm area range end
2882  * @gfp_mask:		  flags for the page level allocator
2883  * @prot:		  protection mask for the allocated pages
2884  * @vm_flags:		  additional vm area flags (e.g. %VM_NO_GUARD)
2885  * @node:		  node to use for allocation or NUMA_NO_NODE
2886  * @caller:		  caller's return address
2887  *
2888  * Allocate enough pages to cover @size from the page level
2889  * allocator with @gfp_mask flags.  Map them into contiguous
2890  * kernel virtual space, using a pagetable protection of @prot.
2891  *
2892  * Return: the address of the area or %NULL on failure
2893  */
2894 void *__vmalloc_node_range(unsigned long size, unsigned long align,
2895 			unsigned long start, unsigned long end, gfp_t gfp_mask,
2896 			pgprot_t prot, unsigned long vm_flags, int node,
2897 			const void *caller)
2898 {
2899 	struct vm_struct *area;
2900 	void *addr;
2901 	unsigned long real_size = size;
2902 	unsigned long real_align = align;
2903 	unsigned int shift = PAGE_SHIFT;
2904 
2905 	if (WARN_ON_ONCE(!size))
2906 		return NULL;
2907 
2908 	if ((size >> PAGE_SHIFT) > totalram_pages()) {
2909 		warn_alloc(gfp_mask, NULL,
2910 			"vmalloc error: size %lu, exceeds total pages",
2911 			real_size);
2912 		return NULL;
2913 	}
2914 
2915 	if (vmap_allow_huge && !(vm_flags & VM_NO_HUGE_VMAP) &&
2916 			arch_vmap_pmd_supported(prot)) {
2917 		unsigned long size_per_node;
2918 
2919 		/*
2920 		 * Try huge pages. Only try for PAGE_KERNEL allocations,
2921 		 * others like modules don't yet expect huge pages in
2922 		 * their allocations due to apply_to_page_range not
2923 		 * supporting them.
2924 		 */
2925 
2926 		size_per_node = size;
2927 		if (node == NUMA_NO_NODE)
2928 			size_per_node /= num_online_nodes();
2929 		if (size_per_node >= PMD_SIZE) {
2930 			shift = PMD_SHIFT;
2931 			align = max(real_align, 1UL << shift);
2932 			size = ALIGN(real_size, 1UL << shift);
2933 		}
2934 	}
2935 
2936 again:
2937 	area = __get_vm_area_node(real_size, align, shift, VM_ALLOC |
2938 				  VM_UNINITIALIZED | vm_flags, start, end, node,
2939 				  gfp_mask, caller);
2940 	if (!area) {
2941 		warn_alloc(gfp_mask, NULL,
2942 			"vmalloc error: size %lu, vm_struct allocation failed",
2943 			real_size);
2944 		goto fail;
2945 	}
2946 
2947 	addr = __vmalloc_area_node(area, gfp_mask, prot, shift, node);
2948 	if (!addr)
2949 		goto fail;
2950 
2951 	/*
2952 	 * In this function, newly allocated vm_struct has VM_UNINITIALIZED
2953 	 * flag. It means that vm_struct is not fully initialized.
2954 	 * Now, it is fully initialized, so remove this flag here.
2955 	 */
2956 	clear_vm_uninitialized_flag(area);
2957 
2958 	size = PAGE_ALIGN(size);
2959 	kmemleak_vmalloc(area, size, gfp_mask);
2960 
2961 	return addr;
2962 
2963 fail:
2964 	if (shift > PAGE_SHIFT) {
2965 		shift = PAGE_SHIFT;
2966 		align = real_align;
2967 		size = real_size;
2968 		goto again;
2969 	}
2970 
2971 	return NULL;
2972 }
2973 
2974 /**
2975  * __vmalloc_node - allocate virtually contiguous memory
2976  * @size:	    allocation size
2977  * @align:	    desired alignment
2978  * @gfp_mask:	    flags for the page level allocator
2979  * @node:	    node to use for allocation or NUMA_NO_NODE
2980  * @caller:	    caller's return address
2981  *
2982  * Allocate enough pages to cover @size from the page level allocator with
2983  * @gfp_mask flags.  Map them into contiguous kernel virtual space.
2984  *
2985  * Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_RETRY_MAYFAIL
2986  * and __GFP_NOFAIL are not supported
2987  *
2988  * Any use of gfp flags outside of GFP_KERNEL should be consulted
2989  * with mm people.
2990  *
2991  * Return: pointer to the allocated memory or %NULL on error
2992  */
2993 void *__vmalloc_node(unsigned long size, unsigned long align,
2994 			    gfp_t gfp_mask, int node, const void *caller)
2995 {
2996 	return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
2997 				gfp_mask, PAGE_KERNEL, 0, node, caller);
2998 }
2999 /*
3000  * This is only for performance analysis of vmalloc and stress purpose.
3001  * It is required by vmalloc test module, therefore do not use it other
3002  * than that.
3003  */
3004 #ifdef CONFIG_TEST_VMALLOC_MODULE
3005 EXPORT_SYMBOL_GPL(__vmalloc_node);
3006 #endif
3007 
3008 void *__vmalloc(unsigned long size, gfp_t gfp_mask)
3009 {
3010 	return __vmalloc_node(size, 1, gfp_mask, NUMA_NO_NODE,
3011 				__builtin_return_address(0));
3012 }
3013 EXPORT_SYMBOL(__vmalloc);
3014 
3015 /**
3016  * vmalloc - allocate virtually contiguous memory
3017  * @size:    allocation size
3018  *
3019  * Allocate enough pages to cover @size from the page level
3020  * allocator and map them into contiguous kernel virtual space.
3021  *
3022  * For tight control over page level allocator and protection flags
3023  * use __vmalloc() instead.
3024  *
3025  * Return: pointer to the allocated memory or %NULL on error
3026  */
3027 void *vmalloc(unsigned long size)
3028 {
3029 	return __vmalloc_node(size, 1, GFP_KERNEL, NUMA_NO_NODE,
3030 				__builtin_return_address(0));
3031 }
3032 EXPORT_SYMBOL(vmalloc);
3033 
3034 /**
3035  * vmalloc_no_huge - allocate virtually contiguous memory using small pages
3036  * @size:    allocation size
3037  *
3038  * Allocate enough non-huge pages to cover @size from the page level
3039  * allocator and map them into contiguous kernel virtual space.
3040  *
3041  * Return: pointer to the allocated memory or %NULL on error
3042  */
3043 void *vmalloc_no_huge(unsigned long size)
3044 {
3045 	return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
3046 				    GFP_KERNEL, PAGE_KERNEL, VM_NO_HUGE_VMAP,
3047 				    NUMA_NO_NODE, __builtin_return_address(0));
3048 }
3049 EXPORT_SYMBOL(vmalloc_no_huge);
3050 
3051 /**
3052  * vzalloc - allocate virtually contiguous memory with zero fill
3053  * @size:    allocation size
3054  *
3055  * Allocate enough pages to cover @size from the page level
3056  * allocator and map them into contiguous kernel virtual space.
3057  * The memory allocated is set to zero.
3058  *
3059  * For tight control over page level allocator and protection flags
3060  * use __vmalloc() instead.
3061  *
3062  * Return: pointer to the allocated memory or %NULL on error
3063  */
3064 void *vzalloc(unsigned long size)
3065 {
3066 	return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_ZERO, NUMA_NO_NODE,
3067 				__builtin_return_address(0));
3068 }
3069 EXPORT_SYMBOL(vzalloc);
3070 
3071 /**
3072  * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
3073  * @size: allocation size
3074  *
3075  * The resulting memory area is zeroed so it can be mapped to userspace
3076  * without leaking data.
3077  *
3078  * Return: pointer to the allocated memory or %NULL on error
3079  */
3080 void *vmalloc_user(unsigned long size)
3081 {
3082 	return __vmalloc_node_range(size, SHMLBA,  VMALLOC_START, VMALLOC_END,
3083 				    GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL,
3084 				    VM_USERMAP, NUMA_NO_NODE,
3085 				    __builtin_return_address(0));
3086 }
3087 EXPORT_SYMBOL(vmalloc_user);
3088 
3089 /**
3090  * vmalloc_node - allocate memory on a specific node
3091  * @size:	  allocation size
3092  * @node:	  numa node
3093  *
3094  * Allocate enough pages to cover @size from the page level
3095  * allocator and map them into contiguous kernel virtual space.
3096  *
3097  * For tight control over page level allocator and protection flags
3098  * use __vmalloc() instead.
3099  *
3100  * Return: pointer to the allocated memory or %NULL on error
3101  */
3102 void *vmalloc_node(unsigned long size, int node)
3103 {
3104 	return __vmalloc_node(size, 1, GFP_KERNEL, node,
3105 			__builtin_return_address(0));
3106 }
3107 EXPORT_SYMBOL(vmalloc_node);
3108 
3109 /**
3110  * vzalloc_node - allocate memory on a specific node with zero fill
3111  * @size:	allocation size
3112  * @node:	numa node
3113  *
3114  * Allocate enough pages to cover @size from the page level
3115  * allocator and map them into contiguous kernel virtual space.
3116  * The memory allocated is set to zero.
3117  *
3118  * Return: pointer to the allocated memory or %NULL on error
3119  */
3120 void *vzalloc_node(unsigned long size, int node)
3121 {
3122 	return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_ZERO, node,
3123 				__builtin_return_address(0));
3124 }
3125 EXPORT_SYMBOL(vzalloc_node);
3126 
3127 #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
3128 #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
3129 #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
3130 #define GFP_VMALLOC32 (GFP_DMA | GFP_KERNEL)
3131 #else
3132 /*
3133  * 64b systems should always have either DMA or DMA32 zones. For others
3134  * GFP_DMA32 should do the right thing and use the normal zone.
3135  */
3136 #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
3137 #endif
3138 
3139 /**
3140  * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
3141  * @size:	allocation size
3142  *
3143  * Allocate enough 32bit PA addressable pages to cover @size from the
3144  * page level allocator and map them into contiguous kernel virtual space.
3145  *
3146  * Return: pointer to the allocated memory or %NULL on error
3147  */
3148 void *vmalloc_32(unsigned long size)
3149 {
3150 	return __vmalloc_node(size, 1, GFP_VMALLOC32, NUMA_NO_NODE,
3151 			__builtin_return_address(0));
3152 }
3153 EXPORT_SYMBOL(vmalloc_32);
3154 
3155 /**
3156  * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
3157  * @size:	     allocation size
3158  *
3159  * The resulting memory area is 32bit addressable and zeroed so it can be
3160  * mapped to userspace without leaking data.
3161  *
3162  * Return: pointer to the allocated memory or %NULL on error
3163  */
3164 void *vmalloc_32_user(unsigned long size)
3165 {
3166 	return __vmalloc_node_range(size, SHMLBA,  VMALLOC_START, VMALLOC_END,
3167 				    GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
3168 				    VM_USERMAP, NUMA_NO_NODE,
3169 				    __builtin_return_address(0));
3170 }
3171 EXPORT_SYMBOL(vmalloc_32_user);
3172 
3173 /*
3174  * small helper routine , copy contents to buf from addr.
3175  * If the page is not present, fill zero.
3176  */
3177 
3178 static int aligned_vread(char *buf, char *addr, unsigned long count)
3179 {
3180 	struct page *p;
3181 	int copied = 0;
3182 
3183 	while (count) {
3184 		unsigned long offset, length;
3185 
3186 		offset = offset_in_page(addr);
3187 		length = PAGE_SIZE - offset;
3188 		if (length > count)
3189 			length = count;
3190 		p = vmalloc_to_page(addr);
3191 		/*
3192 		 * To do safe access to this _mapped_ area, we need
3193 		 * lock. But adding lock here means that we need to add
3194 		 * overhead of vmalloc()/vfree() calls for this _debug_
3195 		 * interface, rarely used. Instead of that, we'll use
3196 		 * kmap() and get small overhead in this access function.
3197 		 */
3198 		if (p) {
3199 			/* We can expect USER0 is not used -- see vread() */
3200 			void *map = kmap_atomic(p);
3201 			memcpy(buf, map + offset, length);
3202 			kunmap_atomic(map);
3203 		} else
3204 			memset(buf, 0, length);
3205 
3206 		addr += length;
3207 		buf += length;
3208 		copied += length;
3209 		count -= length;
3210 	}
3211 	return copied;
3212 }
3213 
3214 /**
3215  * vread() - read vmalloc area in a safe way.
3216  * @buf:     buffer for reading data
3217  * @addr:    vm address.
3218  * @count:   number of bytes to be read.
3219  *
3220  * This function checks that addr is a valid vmalloc'ed area, and
3221  * copy data from that area to a given buffer. If the given memory range
3222  * of [addr...addr+count) includes some valid address, data is copied to
3223  * proper area of @buf. If there are memory holes, they'll be zero-filled.
3224  * IOREMAP area is treated as memory hole and no copy is done.
3225  *
3226  * If [addr...addr+count) doesn't includes any intersects with alive
3227  * vm_struct area, returns 0. @buf should be kernel's buffer.
3228  *
3229  * Note: In usual ops, vread() is never necessary because the caller
3230  * should know vmalloc() area is valid and can use memcpy().
3231  * This is for routines which have to access vmalloc area without
3232  * any information, as /proc/kcore.
3233  *
3234  * Return: number of bytes for which addr and buf should be increased
3235  * (same number as @count) or %0 if [addr...addr+count) doesn't
3236  * include any intersection with valid vmalloc area
3237  */
3238 long vread(char *buf, char *addr, unsigned long count)
3239 {
3240 	struct vmap_area *va;
3241 	struct vm_struct *vm;
3242 	char *vaddr, *buf_start = buf;
3243 	unsigned long buflen = count;
3244 	unsigned long n;
3245 
3246 	/* Don't allow overflow */
3247 	if ((unsigned long) addr + count < count)
3248 		count = -(unsigned long) addr;
3249 
3250 	spin_lock(&vmap_area_lock);
3251 	va = __find_vmap_area((unsigned long)addr);
3252 	if (!va)
3253 		goto finished;
3254 	list_for_each_entry_from(va, &vmap_area_list, list) {
3255 		if (!count)
3256 			break;
3257 
3258 		if (!va->vm)
3259 			continue;
3260 
3261 		vm = va->vm;
3262 		vaddr = (char *) vm->addr;
3263 		if (addr >= vaddr + get_vm_area_size(vm))
3264 			continue;
3265 		while (addr < vaddr) {
3266 			if (count == 0)
3267 				goto finished;
3268 			*buf = '\0';
3269 			buf++;
3270 			addr++;
3271 			count--;
3272 		}
3273 		n = vaddr + get_vm_area_size(vm) - addr;
3274 		if (n > count)
3275 			n = count;
3276 		if (!(vm->flags & VM_IOREMAP))
3277 			aligned_vread(buf, addr, n);
3278 		else /* IOREMAP area is treated as memory hole */
3279 			memset(buf, 0, n);
3280 		buf += n;
3281 		addr += n;
3282 		count -= n;
3283 	}
3284 finished:
3285 	spin_unlock(&vmap_area_lock);
3286 
3287 	if (buf == buf_start)
3288 		return 0;
3289 	/* zero-fill memory holes */
3290 	if (buf != buf_start + buflen)
3291 		memset(buf, 0, buflen - (buf - buf_start));
3292 
3293 	return buflen;
3294 }
3295 
3296 /**
3297  * remap_vmalloc_range_partial - map vmalloc pages to userspace
3298  * @vma:		vma to cover
3299  * @uaddr:		target user address to start at
3300  * @kaddr:		virtual address of vmalloc kernel memory
3301  * @pgoff:		offset from @kaddr to start at
3302  * @size:		size of map area
3303  *
3304  * Returns:	0 for success, -Exxx on failure
3305  *
3306  * This function checks that @kaddr is a valid vmalloc'ed area,
3307  * and that it is big enough to cover the range starting at
3308  * @uaddr in @vma. Will return failure if that criteria isn't
3309  * met.
3310  *
3311  * Similar to remap_pfn_range() (see mm/memory.c)
3312  */
3313 int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
3314 				void *kaddr, unsigned long pgoff,
3315 				unsigned long size)
3316 {
3317 	struct vm_struct *area;
3318 	unsigned long off;
3319 	unsigned long end_index;
3320 
3321 	if (check_shl_overflow(pgoff, PAGE_SHIFT, &off))
3322 		return -EINVAL;
3323 
3324 	size = PAGE_ALIGN(size);
3325 
3326 	if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
3327 		return -EINVAL;
3328 
3329 	area = find_vm_area(kaddr);
3330 	if (!area)
3331 		return -EINVAL;
3332 
3333 	if (!(area->flags & (VM_USERMAP | VM_DMA_COHERENT)))
3334 		return -EINVAL;
3335 
3336 	if (check_add_overflow(size, off, &end_index) ||
3337 	    end_index > get_vm_area_size(area))
3338 		return -EINVAL;
3339 	kaddr += off;
3340 
3341 	do {
3342 		struct page *page = vmalloc_to_page(kaddr);
3343 		int ret;
3344 
3345 		ret = vm_insert_page(vma, uaddr, page);
3346 		if (ret)
3347 			return ret;
3348 
3349 		uaddr += PAGE_SIZE;
3350 		kaddr += PAGE_SIZE;
3351 		size -= PAGE_SIZE;
3352 	} while (size > 0);
3353 
3354 	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
3355 
3356 	return 0;
3357 }
3358 
3359 /**
3360  * remap_vmalloc_range - map vmalloc pages to userspace
3361  * @vma:		vma to cover (map full range of vma)
3362  * @addr:		vmalloc memory
3363  * @pgoff:		number of pages into addr before first page to map
3364  *
3365  * Returns:	0 for success, -Exxx on failure
3366  *
3367  * This function checks that addr is a valid vmalloc'ed area, and
3368  * that it is big enough to cover the vma. Will return failure if
3369  * that criteria isn't met.
3370  *
3371  * Similar to remap_pfn_range() (see mm/memory.c)
3372  */
3373 int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
3374 						unsigned long pgoff)
3375 {
3376 	return remap_vmalloc_range_partial(vma, vma->vm_start,
3377 					   addr, pgoff,
3378 					   vma->vm_end - vma->vm_start);
3379 }
3380 EXPORT_SYMBOL(remap_vmalloc_range);
3381 
3382 void free_vm_area(struct vm_struct *area)
3383 {
3384 	struct vm_struct *ret;
3385 	ret = remove_vm_area(area->addr);
3386 	BUG_ON(ret != area);
3387 	kfree(area);
3388 }
3389 EXPORT_SYMBOL_GPL(free_vm_area);
3390 
3391 #ifdef CONFIG_SMP
3392 static struct vmap_area *node_to_va(struct rb_node *n)
3393 {
3394 	return rb_entry_safe(n, struct vmap_area, rb_node);
3395 }
3396 
3397 /**
3398  * pvm_find_va_enclose_addr - find the vmap_area @addr belongs to
3399  * @addr: target address
3400  *
3401  * Returns: vmap_area if it is found. If there is no such area
3402  *   the first highest(reverse order) vmap_area is returned
3403  *   i.e. va->va_start < addr && va->va_end < addr or NULL
3404  *   if there are no any areas before @addr.
3405  */
3406 static struct vmap_area *
3407 pvm_find_va_enclose_addr(unsigned long addr)
3408 {
3409 	struct vmap_area *va, *tmp;
3410 	struct rb_node *n;
3411 
3412 	n = free_vmap_area_root.rb_node;
3413 	va = NULL;
3414 
3415 	while (n) {
3416 		tmp = rb_entry(n, struct vmap_area, rb_node);
3417 		if (tmp->va_start <= addr) {
3418 			va = tmp;
3419 			if (tmp->va_end >= addr)
3420 				break;
3421 
3422 			n = n->rb_right;
3423 		} else {
3424 			n = n->rb_left;
3425 		}
3426 	}
3427 
3428 	return va;
3429 }
3430 
3431 /**
3432  * pvm_determine_end_from_reverse - find the highest aligned address
3433  * of free block below VMALLOC_END
3434  * @va:
3435  *   in - the VA we start the search(reverse order);
3436  *   out - the VA with the highest aligned end address.
3437  * @align: alignment for required highest address
3438  *
3439  * Returns: determined end address within vmap_area
3440  */
3441 static unsigned long
3442 pvm_determine_end_from_reverse(struct vmap_area **va, unsigned long align)
3443 {
3444 	unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
3445 	unsigned long addr;
3446 
3447 	if (likely(*va)) {
3448 		list_for_each_entry_from_reverse((*va),
3449 				&free_vmap_area_list, list) {
3450 			addr = min((*va)->va_end & ~(align - 1), vmalloc_end);
3451 			if ((*va)->va_start < addr)
3452 				return addr;
3453 		}
3454 	}
3455 
3456 	return 0;
3457 }
3458 
3459 /**
3460  * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator
3461  * @offsets: array containing offset of each area
3462  * @sizes: array containing size of each area
3463  * @nr_vms: the number of areas to allocate
3464  * @align: alignment, all entries in @offsets and @sizes must be aligned to this
3465  *
3466  * Returns: kmalloc'd vm_struct pointer array pointing to allocated
3467  *	    vm_structs on success, %NULL on failure
3468  *
3469  * Percpu allocator wants to use congruent vm areas so that it can
3470  * maintain the offsets among percpu areas.  This function allocates
3471  * congruent vmalloc areas for it with GFP_KERNEL.  These areas tend to
3472  * be scattered pretty far, distance between two areas easily going up
3473  * to gigabytes.  To avoid interacting with regular vmallocs, these
3474  * areas are allocated from top.
3475  *
3476  * Despite its complicated look, this allocator is rather simple. It
3477  * does everything top-down and scans free blocks from the end looking
3478  * for matching base. While scanning, if any of the areas do not fit the
3479  * base address is pulled down to fit the area. Scanning is repeated till
3480  * all the areas fit and then all necessary data structures are inserted
3481  * and the result is returned.
3482  */
3483 struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
3484 				     const size_t *sizes, int nr_vms,
3485 				     size_t align)
3486 {
3487 	const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align);
3488 	const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
3489 	struct vmap_area **vas, *va;
3490 	struct vm_struct **vms;
3491 	int area, area2, last_area, term_area;
3492 	unsigned long base, start, size, end, last_end, orig_start, orig_end;
3493 	bool purged = false;
3494 	enum fit_type type;
3495 
3496 	/* verify parameters and allocate data structures */
3497 	BUG_ON(offset_in_page(align) || !is_power_of_2(align));
3498 	for (last_area = 0, area = 0; area < nr_vms; area++) {
3499 		start = offsets[area];
3500 		end = start + sizes[area];
3501 
3502 		/* is everything aligned properly? */
3503 		BUG_ON(!IS_ALIGNED(offsets[area], align));
3504 		BUG_ON(!IS_ALIGNED(sizes[area], align));
3505 
3506 		/* detect the area with the highest address */
3507 		if (start > offsets[last_area])
3508 			last_area = area;
3509 
3510 		for (area2 = area + 1; area2 < nr_vms; area2++) {
3511 			unsigned long start2 = offsets[area2];
3512 			unsigned long end2 = start2 + sizes[area2];
3513 
3514 			BUG_ON(start2 < end && start < end2);
3515 		}
3516 	}
3517 	last_end = offsets[last_area] + sizes[last_area];
3518 
3519 	if (vmalloc_end - vmalloc_start < last_end) {
3520 		WARN_ON(true);
3521 		return NULL;
3522 	}
3523 
3524 	vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL);
3525 	vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL);
3526 	if (!vas || !vms)
3527 		goto err_free2;
3528 
3529 	for (area = 0; area < nr_vms; area++) {
3530 		vas[area] = kmem_cache_zalloc(vmap_area_cachep, GFP_KERNEL);
3531 		vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL);
3532 		if (!vas[area] || !vms[area])
3533 			goto err_free;
3534 	}
3535 retry:
3536 	spin_lock(&free_vmap_area_lock);
3537 
3538 	/* start scanning - we scan from the top, begin with the last area */
3539 	area = term_area = last_area;
3540 	start = offsets[area];
3541 	end = start + sizes[area];
3542 
3543 	va = pvm_find_va_enclose_addr(vmalloc_end);
3544 	base = pvm_determine_end_from_reverse(&va, align) - end;
3545 
3546 	while (true) {
3547 		/*
3548 		 * base might have underflowed, add last_end before
3549 		 * comparing.
3550 		 */
3551 		if (base + last_end < vmalloc_start + last_end)
3552 			goto overflow;
3553 
3554 		/*
3555 		 * Fitting base has not been found.
3556 		 */
3557 		if (va == NULL)
3558 			goto overflow;
3559 
3560 		/*
3561 		 * If required width exceeds current VA block, move
3562 		 * base downwards and then recheck.
3563 		 */
3564 		if (base + end > va->va_end) {
3565 			base = pvm_determine_end_from_reverse(&va, align) - end;
3566 			term_area = area;
3567 			continue;
3568 		}
3569 
3570 		/*
3571 		 * If this VA does not fit, move base downwards and recheck.
3572 		 */
3573 		if (base + start < va->va_start) {
3574 			va = node_to_va(rb_prev(&va->rb_node));
3575 			base = pvm_determine_end_from_reverse(&va, align) - end;
3576 			term_area = area;
3577 			continue;
3578 		}
3579 
3580 		/*
3581 		 * This area fits, move on to the previous one.  If
3582 		 * the previous one is the terminal one, we're done.
3583 		 */
3584 		area = (area + nr_vms - 1) % nr_vms;
3585 		if (area == term_area)
3586 			break;
3587 
3588 		start = offsets[area];
3589 		end = start + sizes[area];
3590 		va = pvm_find_va_enclose_addr(base + end);
3591 	}
3592 
3593 	/* we've found a fitting base, insert all va's */
3594 	for (area = 0; area < nr_vms; area++) {
3595 		int ret;
3596 
3597 		start = base + offsets[area];
3598 		size = sizes[area];
3599 
3600 		va = pvm_find_va_enclose_addr(start);
3601 		if (WARN_ON_ONCE(va == NULL))
3602 			/* It is a BUG(), but trigger recovery instead. */
3603 			goto recovery;
3604 
3605 		type = classify_va_fit_type(va, start, size);
3606 		if (WARN_ON_ONCE(type == NOTHING_FIT))
3607 			/* It is a BUG(), but trigger recovery instead. */
3608 			goto recovery;
3609 
3610 		ret = adjust_va_to_fit_type(va, start, size, type);
3611 		if (unlikely(ret))
3612 			goto recovery;
3613 
3614 		/* Allocated area. */
3615 		va = vas[area];
3616 		va->va_start = start;
3617 		va->va_end = start + size;
3618 	}
3619 
3620 	spin_unlock(&free_vmap_area_lock);
3621 
3622 	/* populate the kasan shadow space */
3623 	for (area = 0; area < nr_vms; area++) {
3624 		if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area]))
3625 			goto err_free_shadow;
3626 
3627 		kasan_unpoison_vmalloc((void *)vas[area]->va_start,
3628 				       sizes[area]);
3629 	}
3630 
3631 	/* insert all vm's */
3632 	spin_lock(&vmap_area_lock);
3633 	for (area = 0; area < nr_vms; area++) {
3634 		insert_vmap_area(vas[area], &vmap_area_root, &vmap_area_list);
3635 
3636 		setup_vmalloc_vm_locked(vms[area], vas[area], VM_ALLOC,
3637 				 pcpu_get_vm_areas);
3638 	}
3639 	spin_unlock(&vmap_area_lock);
3640 
3641 	kfree(vas);
3642 	return vms;
3643 
3644 recovery:
3645 	/*
3646 	 * Remove previously allocated areas. There is no
3647 	 * need in removing these areas from the busy tree,
3648 	 * because they are inserted only on the final step
3649 	 * and when pcpu_get_vm_areas() is success.
3650 	 */
3651 	while (area--) {
3652 		orig_start = vas[area]->va_start;
3653 		orig_end = vas[area]->va_end;
3654 		va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root,
3655 				&free_vmap_area_list);
3656 		if (va)
3657 			kasan_release_vmalloc(orig_start, orig_end,
3658 				va->va_start, va->va_end);
3659 		vas[area] = NULL;
3660 	}
3661 
3662 overflow:
3663 	spin_unlock(&free_vmap_area_lock);
3664 	if (!purged) {
3665 		purge_vmap_area_lazy();
3666 		purged = true;
3667 
3668 		/* Before "retry", check if we recover. */
3669 		for (area = 0; area < nr_vms; area++) {
3670 			if (vas[area])
3671 				continue;
3672 
3673 			vas[area] = kmem_cache_zalloc(
3674 				vmap_area_cachep, GFP_KERNEL);
3675 			if (!vas[area])
3676 				goto err_free;
3677 		}
3678 
3679 		goto retry;
3680 	}
3681 
3682 err_free:
3683 	for (area = 0; area < nr_vms; area++) {
3684 		if (vas[area])
3685 			kmem_cache_free(vmap_area_cachep, vas[area]);
3686 
3687 		kfree(vms[area]);
3688 	}
3689 err_free2:
3690 	kfree(vas);
3691 	kfree(vms);
3692 	return NULL;
3693 
3694 err_free_shadow:
3695 	spin_lock(&free_vmap_area_lock);
3696 	/*
3697 	 * We release all the vmalloc shadows, even the ones for regions that
3698 	 * hadn't been successfully added. This relies on kasan_release_vmalloc
3699 	 * being able to tolerate this case.
3700 	 */
3701 	for (area = 0; area < nr_vms; area++) {
3702 		orig_start = vas[area]->va_start;
3703 		orig_end = vas[area]->va_end;
3704 		va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root,
3705 				&free_vmap_area_list);
3706 		if (va)
3707 			kasan_release_vmalloc(orig_start, orig_end,
3708 				va->va_start, va->va_end);
3709 		vas[area] = NULL;
3710 		kfree(vms[area]);
3711 	}
3712 	spin_unlock(&free_vmap_area_lock);
3713 	kfree(vas);
3714 	kfree(vms);
3715 	return NULL;
3716 }
3717 
3718 /**
3719  * pcpu_free_vm_areas - free vmalloc areas for percpu allocator
3720  * @vms: vm_struct pointer array returned by pcpu_get_vm_areas()
3721  * @nr_vms: the number of allocated areas
3722  *
3723  * Free vm_structs and the array allocated by pcpu_get_vm_areas().
3724  */
3725 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
3726 {
3727 	int i;
3728 
3729 	for (i = 0; i < nr_vms; i++)
3730 		free_vm_area(vms[i]);
3731 	kfree(vms);
3732 }
3733 #endif	/* CONFIG_SMP */
3734 
3735 #ifdef CONFIG_PRINTK
3736 bool vmalloc_dump_obj(void *object)
3737 {
3738 	struct vm_struct *vm;
3739 	void *objp = (void *)PAGE_ALIGN((unsigned long)object);
3740 
3741 	vm = find_vm_area(objp);
3742 	if (!vm)
3743 		return false;
3744 	pr_cont(" %u-page vmalloc region starting at %#lx allocated at %pS\n",
3745 		vm->nr_pages, (unsigned long)vm->addr, vm->caller);
3746 	return true;
3747 }
3748 #endif
3749 
3750 #ifdef CONFIG_PROC_FS
3751 static void *s_start(struct seq_file *m, loff_t *pos)
3752 	__acquires(&vmap_purge_lock)
3753 	__acquires(&vmap_area_lock)
3754 {
3755 	mutex_lock(&vmap_purge_lock);
3756 	spin_lock(&vmap_area_lock);
3757 
3758 	return seq_list_start(&vmap_area_list, *pos);
3759 }
3760 
3761 static void *s_next(struct seq_file *m, void *p, loff_t *pos)
3762 {
3763 	return seq_list_next(p, &vmap_area_list, pos);
3764 }
3765 
3766 static void s_stop(struct seq_file *m, void *p)
3767 	__releases(&vmap_area_lock)
3768 	__releases(&vmap_purge_lock)
3769 {
3770 	spin_unlock(&vmap_area_lock);
3771 	mutex_unlock(&vmap_purge_lock);
3772 }
3773 
3774 static void show_numa_info(struct seq_file *m, struct vm_struct *v)
3775 {
3776 	if (IS_ENABLED(CONFIG_NUMA)) {
3777 		unsigned int nr, *counters = m->private;
3778 
3779 		if (!counters)
3780 			return;
3781 
3782 		if (v->flags & VM_UNINITIALIZED)
3783 			return;
3784 		/* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
3785 		smp_rmb();
3786 
3787 		memset(counters, 0, nr_node_ids * sizeof(unsigned int));
3788 
3789 		for (nr = 0; nr < v->nr_pages; nr++)
3790 			counters[page_to_nid(v->pages[nr])]++;
3791 
3792 		for_each_node_state(nr, N_HIGH_MEMORY)
3793 			if (counters[nr])
3794 				seq_printf(m, " N%u=%u", nr, counters[nr]);
3795 	}
3796 }
3797 
3798 static void show_purge_info(struct seq_file *m)
3799 {
3800 	struct vmap_area *va;
3801 
3802 	spin_lock(&purge_vmap_area_lock);
3803 	list_for_each_entry(va, &purge_vmap_area_list, list) {
3804 		seq_printf(m, "0x%pK-0x%pK %7ld unpurged vm_area\n",
3805 			(void *)va->va_start, (void *)va->va_end,
3806 			va->va_end - va->va_start);
3807 	}
3808 	spin_unlock(&purge_vmap_area_lock);
3809 }
3810 
3811 static int s_show(struct seq_file *m, void *p)
3812 {
3813 	struct vmap_area *va;
3814 	struct vm_struct *v;
3815 
3816 	va = list_entry(p, struct vmap_area, list);
3817 
3818 	/*
3819 	 * s_show can encounter race with remove_vm_area, !vm on behalf
3820 	 * of vmap area is being tear down or vm_map_ram allocation.
3821 	 */
3822 	if (!va->vm) {
3823 		seq_printf(m, "0x%pK-0x%pK %7ld vm_map_ram\n",
3824 			(void *)va->va_start, (void *)va->va_end,
3825 			va->va_end - va->va_start);
3826 
3827 		return 0;
3828 	}
3829 
3830 	v = va->vm;
3831 
3832 	seq_printf(m, "0x%pK-0x%pK %7ld",
3833 		v->addr, v->addr + v->size, v->size);
3834 
3835 	if (v->caller)
3836 		seq_printf(m, " %pS", v->caller);
3837 
3838 	if (v->nr_pages)
3839 		seq_printf(m, " pages=%d", v->nr_pages);
3840 
3841 	if (v->phys_addr)
3842 		seq_printf(m, " phys=%pa", &v->phys_addr);
3843 
3844 	if (v->flags & VM_IOREMAP)
3845 		seq_puts(m, " ioremap");
3846 
3847 	if (v->flags & VM_ALLOC)
3848 		seq_puts(m, " vmalloc");
3849 
3850 	if (v->flags & VM_MAP)
3851 		seq_puts(m, " vmap");
3852 
3853 	if (v->flags & VM_USERMAP)
3854 		seq_puts(m, " user");
3855 
3856 	if (v->flags & VM_DMA_COHERENT)
3857 		seq_puts(m, " dma-coherent");
3858 
3859 	if (is_vmalloc_addr(v->pages))
3860 		seq_puts(m, " vpages");
3861 
3862 	show_numa_info(m, v);
3863 	seq_putc(m, '\n');
3864 
3865 	/*
3866 	 * As a final step, dump "unpurged" areas.
3867 	 */
3868 	if (list_is_last(&va->list, &vmap_area_list))
3869 		show_purge_info(m);
3870 
3871 	return 0;
3872 }
3873 
3874 static const struct seq_operations vmalloc_op = {
3875 	.start = s_start,
3876 	.next = s_next,
3877 	.stop = s_stop,
3878 	.show = s_show,
3879 };
3880 
3881 static int __init proc_vmalloc_init(void)
3882 {
3883 	if (IS_ENABLED(CONFIG_NUMA))
3884 		proc_create_seq_private("vmallocinfo", 0400, NULL,
3885 				&vmalloc_op,
3886 				nr_node_ids * sizeof(unsigned int), NULL);
3887 	else
3888 		proc_create_seq("vmallocinfo", 0400, NULL, &vmalloc_op);
3889 	return 0;
3890 }
3891 module_init(proc_vmalloc_init);
3892 
3893 #endif
3894