xref: /linux/mm/vmalloc.c (revision 73bdf0a60e607f4b8ecc5aec597105976565a84f)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  *  linux/mm/vmalloc.c
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  *  Copyright (C) 1993  Linus Torvalds
51da177e4SLinus Torvalds  *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
61da177e4SLinus Torvalds  *  SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
71da177e4SLinus Torvalds  *  Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
8930fc45aSChristoph Lameter  *  Numa awareness, Christoph Lameter, SGI, June 2005
91da177e4SLinus Torvalds  */
101da177e4SLinus Torvalds 
111da177e4SLinus Torvalds #include <linux/mm.h>
121da177e4SLinus Torvalds #include <linux/module.h>
131da177e4SLinus Torvalds #include <linux/highmem.h>
141da177e4SLinus Torvalds #include <linux/slab.h>
151da177e4SLinus Torvalds #include <linux/spinlock.h>
161da177e4SLinus Torvalds #include <linux/interrupt.h>
17a10aa579SChristoph Lameter #include <linux/seq_file.h>
183ac7fe5aSThomas Gleixner #include <linux/debugobjects.h>
191da177e4SLinus Torvalds #include <linux/vmalloc.h>
2023016969SChristoph Lameter #include <linux/kallsyms.h>
211da177e4SLinus Torvalds 
221da177e4SLinus Torvalds #include <asm/uaccess.h>
231da177e4SLinus Torvalds #include <asm/tlbflush.h>
241da177e4SLinus Torvalds 
251da177e4SLinus Torvalds 
261da177e4SLinus Torvalds DEFINE_RWLOCK(vmlist_lock);
271da177e4SLinus Torvalds struct vm_struct *vmlist;
281da177e4SLinus Torvalds 
29b221385bSAdrian Bunk static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
3023016969SChristoph Lameter 			    int node, void *caller);
31b221385bSAdrian Bunk 
321da177e4SLinus Torvalds static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
331da177e4SLinus Torvalds {
341da177e4SLinus Torvalds 	pte_t *pte;
351da177e4SLinus Torvalds 
361da177e4SLinus Torvalds 	pte = pte_offset_kernel(pmd, addr);
371da177e4SLinus Torvalds 	do {
381da177e4SLinus Torvalds 		pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
391da177e4SLinus Torvalds 		WARN_ON(!pte_none(ptent) && !pte_present(ptent));
401da177e4SLinus Torvalds 	} while (pte++, addr += PAGE_SIZE, addr != end);
411da177e4SLinus Torvalds }
421da177e4SLinus Torvalds 
431da177e4SLinus Torvalds static inline void vunmap_pmd_range(pud_t *pud, unsigned long addr,
441da177e4SLinus Torvalds 						unsigned long end)
451da177e4SLinus Torvalds {
461da177e4SLinus Torvalds 	pmd_t *pmd;
471da177e4SLinus Torvalds 	unsigned long next;
481da177e4SLinus Torvalds 
491da177e4SLinus Torvalds 	pmd = pmd_offset(pud, addr);
501da177e4SLinus Torvalds 	do {
511da177e4SLinus Torvalds 		next = pmd_addr_end(addr, end);
521da177e4SLinus Torvalds 		if (pmd_none_or_clear_bad(pmd))
531da177e4SLinus Torvalds 			continue;
541da177e4SLinus Torvalds 		vunmap_pte_range(pmd, addr, next);
551da177e4SLinus Torvalds 	} while (pmd++, addr = next, addr != end);
561da177e4SLinus Torvalds }
571da177e4SLinus Torvalds 
581da177e4SLinus Torvalds static inline void vunmap_pud_range(pgd_t *pgd, unsigned long addr,
591da177e4SLinus Torvalds 						unsigned long end)
601da177e4SLinus Torvalds {
611da177e4SLinus Torvalds 	pud_t *pud;
621da177e4SLinus Torvalds 	unsigned long next;
631da177e4SLinus Torvalds 
641da177e4SLinus Torvalds 	pud = pud_offset(pgd, addr);
651da177e4SLinus Torvalds 	do {
661da177e4SLinus Torvalds 		next = pud_addr_end(addr, end);
671da177e4SLinus Torvalds 		if (pud_none_or_clear_bad(pud))
681da177e4SLinus Torvalds 			continue;
691da177e4SLinus Torvalds 		vunmap_pmd_range(pud, addr, next);
701da177e4SLinus Torvalds 	} while (pud++, addr = next, addr != end);
711da177e4SLinus Torvalds }
721da177e4SLinus Torvalds 
73c19c03fcSBenjamin Herrenschmidt void unmap_kernel_range(unsigned long addr, unsigned long size)
741da177e4SLinus Torvalds {
751da177e4SLinus Torvalds 	pgd_t *pgd;
761da177e4SLinus Torvalds 	unsigned long next;
77c19c03fcSBenjamin Herrenschmidt 	unsigned long start = addr;
78c19c03fcSBenjamin Herrenschmidt 	unsigned long end = addr + size;
791da177e4SLinus Torvalds 
801da177e4SLinus Torvalds 	BUG_ON(addr >= end);
811da177e4SLinus Torvalds 	pgd = pgd_offset_k(addr);
821da177e4SLinus Torvalds 	flush_cache_vunmap(addr, end);
831da177e4SLinus Torvalds 	do {
841da177e4SLinus Torvalds 		next = pgd_addr_end(addr, end);
851da177e4SLinus Torvalds 		if (pgd_none_or_clear_bad(pgd))
861da177e4SLinus Torvalds 			continue;
871da177e4SLinus Torvalds 		vunmap_pud_range(pgd, addr, next);
881da177e4SLinus Torvalds 	} while (pgd++, addr = next, addr != end);
89c19c03fcSBenjamin Herrenschmidt 	flush_tlb_kernel_range(start, end);
90c19c03fcSBenjamin Herrenschmidt }
91c19c03fcSBenjamin Herrenschmidt 
92c19c03fcSBenjamin Herrenschmidt static void unmap_vm_area(struct vm_struct *area)
93c19c03fcSBenjamin Herrenschmidt {
94c19c03fcSBenjamin Herrenschmidt 	unmap_kernel_range((unsigned long)area->addr, area->size);
951da177e4SLinus Torvalds }
961da177e4SLinus Torvalds 
971da177e4SLinus Torvalds static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
981da177e4SLinus Torvalds 			unsigned long end, pgprot_t prot, struct page ***pages)
991da177e4SLinus Torvalds {
1001da177e4SLinus Torvalds 	pte_t *pte;
1011da177e4SLinus Torvalds 
102872fec16SHugh Dickins 	pte = pte_alloc_kernel(pmd, addr);
1031da177e4SLinus Torvalds 	if (!pte)
1041da177e4SLinus Torvalds 		return -ENOMEM;
1051da177e4SLinus Torvalds 	do {
1061da177e4SLinus Torvalds 		struct page *page = **pages;
1071da177e4SLinus Torvalds 		WARN_ON(!pte_none(*pte));
1081da177e4SLinus Torvalds 		if (!page)
1091da177e4SLinus Torvalds 			return -ENOMEM;
1101da177e4SLinus Torvalds 		set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
1111da177e4SLinus Torvalds 		(*pages)++;
1121da177e4SLinus Torvalds 	} while (pte++, addr += PAGE_SIZE, addr != end);
1131da177e4SLinus Torvalds 	return 0;
1141da177e4SLinus Torvalds }
1151da177e4SLinus Torvalds 
1161da177e4SLinus Torvalds static inline int vmap_pmd_range(pud_t *pud, unsigned long addr,
1171da177e4SLinus Torvalds 			unsigned long end, pgprot_t prot, struct page ***pages)
1181da177e4SLinus Torvalds {
1191da177e4SLinus Torvalds 	pmd_t *pmd;
1201da177e4SLinus Torvalds 	unsigned long next;
1211da177e4SLinus Torvalds 
1221da177e4SLinus Torvalds 	pmd = pmd_alloc(&init_mm, pud, addr);
1231da177e4SLinus Torvalds 	if (!pmd)
1241da177e4SLinus Torvalds 		return -ENOMEM;
1251da177e4SLinus Torvalds 	do {
1261da177e4SLinus Torvalds 		next = pmd_addr_end(addr, end);
1271da177e4SLinus Torvalds 		if (vmap_pte_range(pmd, addr, next, prot, pages))
1281da177e4SLinus Torvalds 			return -ENOMEM;
1291da177e4SLinus Torvalds 	} while (pmd++, addr = next, addr != end);
1301da177e4SLinus Torvalds 	return 0;
1311da177e4SLinus Torvalds }
1321da177e4SLinus Torvalds 
1331da177e4SLinus Torvalds static inline int vmap_pud_range(pgd_t *pgd, unsigned long addr,
1341da177e4SLinus Torvalds 			unsigned long end, pgprot_t prot, struct page ***pages)
1351da177e4SLinus Torvalds {
1361da177e4SLinus Torvalds 	pud_t *pud;
1371da177e4SLinus Torvalds 	unsigned long next;
1381da177e4SLinus Torvalds 
1391da177e4SLinus Torvalds 	pud = pud_alloc(&init_mm, pgd, addr);
1401da177e4SLinus Torvalds 	if (!pud)
1411da177e4SLinus Torvalds 		return -ENOMEM;
1421da177e4SLinus Torvalds 	do {
1431da177e4SLinus Torvalds 		next = pud_addr_end(addr, end);
1441da177e4SLinus Torvalds 		if (vmap_pmd_range(pud, addr, next, prot, pages))
1451da177e4SLinus Torvalds 			return -ENOMEM;
1461da177e4SLinus Torvalds 	} while (pud++, addr = next, addr != end);
1471da177e4SLinus Torvalds 	return 0;
1481da177e4SLinus Torvalds }
1491da177e4SLinus Torvalds 
1501da177e4SLinus Torvalds int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
1511da177e4SLinus Torvalds {
1521da177e4SLinus Torvalds 	pgd_t *pgd;
1531da177e4SLinus Torvalds 	unsigned long next;
1541da177e4SLinus Torvalds 	unsigned long addr = (unsigned long) area->addr;
1551da177e4SLinus Torvalds 	unsigned long end = addr + area->size - PAGE_SIZE;
1561da177e4SLinus Torvalds 	int err;
1571da177e4SLinus Torvalds 
1581da177e4SLinus Torvalds 	BUG_ON(addr >= end);
1591da177e4SLinus Torvalds 	pgd = pgd_offset_k(addr);
1601da177e4SLinus Torvalds 	do {
1611da177e4SLinus Torvalds 		next = pgd_addr_end(addr, end);
1621da177e4SLinus Torvalds 		err = vmap_pud_range(pgd, addr, next, prot, pages);
1631da177e4SLinus Torvalds 		if (err)
1641da177e4SLinus Torvalds 			break;
1651da177e4SLinus Torvalds 	} while (pgd++, addr = next, addr != end);
1661da177e4SLinus Torvalds 	flush_cache_vmap((unsigned long) area->addr, end);
1671da177e4SLinus Torvalds 	return err;
1681da177e4SLinus Torvalds }
1695992b6daSRusty Russell EXPORT_SYMBOL_GPL(map_vm_area);
1701da177e4SLinus Torvalds 
171*73bdf0a6SLinus Torvalds static inline int is_vmalloc_or_module_addr(const void *x)
172*73bdf0a6SLinus Torvalds {
173*73bdf0a6SLinus Torvalds 	/*
174*73bdf0a6SLinus Torvalds 	 * x86-64 and sparc64 put modules in a special place,
175*73bdf0a6SLinus Torvalds 	 * and fall back on vmalloc() if that fails. Others
176*73bdf0a6SLinus Torvalds 	 * just put it in the vmalloc space.
177*73bdf0a6SLinus Torvalds 	 */
178*73bdf0a6SLinus Torvalds #if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
179*73bdf0a6SLinus Torvalds 	unsigned long addr = (unsigned long)x;
180*73bdf0a6SLinus Torvalds 	if (addr >= MODULES_VADDR && addr < MODULES_END)
181*73bdf0a6SLinus Torvalds 		return 1;
182*73bdf0a6SLinus Torvalds #endif
183*73bdf0a6SLinus Torvalds 	return is_vmalloc_addr(x);
184*73bdf0a6SLinus Torvalds }
185*73bdf0a6SLinus Torvalds 
18648667e7aSChristoph Lameter /*
18748667e7aSChristoph Lameter  * Map a vmalloc()-space virtual address to the physical page.
18848667e7aSChristoph Lameter  */
189b3bdda02SChristoph Lameter struct page *vmalloc_to_page(const void *vmalloc_addr)
19048667e7aSChristoph Lameter {
19148667e7aSChristoph Lameter 	unsigned long addr = (unsigned long) vmalloc_addr;
19248667e7aSChristoph Lameter 	struct page *page = NULL;
19348667e7aSChristoph Lameter 	pgd_t *pgd = pgd_offset_k(addr);
19448667e7aSChristoph Lameter 	pud_t *pud;
19548667e7aSChristoph Lameter 	pmd_t *pmd;
19648667e7aSChristoph Lameter 	pte_t *ptep, pte;
19748667e7aSChristoph Lameter 
1987aa413deSIngo Molnar 	/*
1997aa413deSIngo Molnar 	 * XXX we might need to change this if we add VIRTUAL_BUG_ON for
2007aa413deSIngo Molnar 	 * architectures that do not vmalloc module space
2017aa413deSIngo Molnar 	 */
202*73bdf0a6SLinus Torvalds 	VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr));
20359ea7463SJiri Slaby 
20448667e7aSChristoph Lameter 	if (!pgd_none(*pgd)) {
20548667e7aSChristoph Lameter 		pud = pud_offset(pgd, addr);
20648667e7aSChristoph Lameter 		if (!pud_none(*pud)) {
20748667e7aSChristoph Lameter 			pmd = pmd_offset(pud, addr);
20848667e7aSChristoph Lameter 			if (!pmd_none(*pmd)) {
20948667e7aSChristoph Lameter 				ptep = pte_offset_map(pmd, addr);
21048667e7aSChristoph Lameter 				pte = *ptep;
21148667e7aSChristoph Lameter 				if (pte_present(pte))
21248667e7aSChristoph Lameter 					page = pte_page(pte);
21348667e7aSChristoph Lameter 				pte_unmap(ptep);
21448667e7aSChristoph Lameter 			}
21548667e7aSChristoph Lameter 		}
21648667e7aSChristoph Lameter 	}
21748667e7aSChristoph Lameter 	return page;
21848667e7aSChristoph Lameter }
21948667e7aSChristoph Lameter EXPORT_SYMBOL(vmalloc_to_page);
22048667e7aSChristoph Lameter 
22148667e7aSChristoph Lameter /*
22248667e7aSChristoph Lameter  * Map a vmalloc()-space virtual address to the physical page frame number.
22348667e7aSChristoph Lameter  */
224b3bdda02SChristoph Lameter unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
22548667e7aSChristoph Lameter {
22648667e7aSChristoph Lameter 	return page_to_pfn(vmalloc_to_page(vmalloc_addr));
22748667e7aSChristoph Lameter }
22848667e7aSChristoph Lameter EXPORT_SYMBOL(vmalloc_to_pfn);
22948667e7aSChristoph Lameter 
23023016969SChristoph Lameter static struct vm_struct *
23123016969SChristoph Lameter __get_vm_area_node(unsigned long size, unsigned long flags, unsigned long start,
23223016969SChristoph Lameter 		unsigned long end, int node, gfp_t gfp_mask, void *caller)
2331da177e4SLinus Torvalds {
2341da177e4SLinus Torvalds 	struct vm_struct **p, *tmp, *area;
2351da177e4SLinus Torvalds 	unsigned long align = 1;
2361da177e4SLinus Torvalds 	unsigned long addr;
2371da177e4SLinus Torvalds 
23852fd24caSGiridhar Pemmasani 	BUG_ON(in_interrupt());
2391da177e4SLinus Torvalds 	if (flags & VM_IOREMAP) {
2401da177e4SLinus Torvalds 		int bit = fls(size);
2411da177e4SLinus Torvalds 
2421da177e4SLinus Torvalds 		if (bit > IOREMAP_MAX_ORDER)
2431da177e4SLinus Torvalds 			bit = IOREMAP_MAX_ORDER;
2441da177e4SLinus Torvalds 		else if (bit < PAGE_SHIFT)
2451da177e4SLinus Torvalds 			bit = PAGE_SHIFT;
2461da177e4SLinus Torvalds 
2471da177e4SLinus Torvalds 		align = 1ul << bit;
2481da177e4SLinus Torvalds 	}
2491da177e4SLinus Torvalds 	addr = ALIGN(start, align);
2501da177e4SLinus Torvalds 	size = PAGE_ALIGN(size);
25131be8309SOGAWA Hirofumi 	if (unlikely(!size))
25231be8309SOGAWA Hirofumi 		return NULL;
2531da177e4SLinus Torvalds 
2546cb06229SChristoph Lameter 	area = kmalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
2556cb06229SChristoph Lameter 
2561da177e4SLinus Torvalds 	if (unlikely(!area))
2571da177e4SLinus Torvalds 		return NULL;
2581da177e4SLinus Torvalds 
2591da177e4SLinus Torvalds 	/*
2601da177e4SLinus Torvalds 	 * We always allocate a guard page.
2611da177e4SLinus Torvalds 	 */
2621da177e4SLinus Torvalds 	size += PAGE_SIZE;
2631da177e4SLinus Torvalds 
2641da177e4SLinus Torvalds 	write_lock(&vmlist_lock);
2651da177e4SLinus Torvalds 	for (p = &vmlist; (tmp = *p) != NULL ;p = &tmp->next) {
2661da177e4SLinus Torvalds 		if ((unsigned long)tmp->addr < addr) {
2671da177e4SLinus Torvalds 			if((unsigned long)tmp->addr + tmp->size >= addr)
2681da177e4SLinus Torvalds 				addr = ALIGN(tmp->size +
2691da177e4SLinus Torvalds 					     (unsigned long)tmp->addr, align);
2701da177e4SLinus Torvalds 			continue;
2711da177e4SLinus Torvalds 		}
2721da177e4SLinus Torvalds 		if ((size + addr) < addr)
2731da177e4SLinus Torvalds 			goto out;
2741da177e4SLinus Torvalds 		if (size + addr <= (unsigned long)tmp->addr)
2751da177e4SLinus Torvalds 			goto found;
2761da177e4SLinus Torvalds 		addr = ALIGN(tmp->size + (unsigned long)tmp->addr, align);
2771da177e4SLinus Torvalds 		if (addr > end - size)
2781da177e4SLinus Torvalds 			goto out;
2791da177e4SLinus Torvalds 	}
2805dc33185SRobert Bragg 	if ((size + addr) < addr)
2815dc33185SRobert Bragg 		goto out;
2825dc33185SRobert Bragg 	if (addr > end - size)
2835dc33185SRobert Bragg 		goto out;
2841da177e4SLinus Torvalds 
2851da177e4SLinus Torvalds found:
2861da177e4SLinus Torvalds 	area->next = *p;
2871da177e4SLinus Torvalds 	*p = area;
2881da177e4SLinus Torvalds 
2891da177e4SLinus Torvalds 	area->flags = flags;
2901da177e4SLinus Torvalds 	area->addr = (void *)addr;
2911da177e4SLinus Torvalds 	area->size = size;
2921da177e4SLinus Torvalds 	area->pages = NULL;
2931da177e4SLinus Torvalds 	area->nr_pages = 0;
2941da177e4SLinus Torvalds 	area->phys_addr = 0;
29523016969SChristoph Lameter 	area->caller = caller;
2961da177e4SLinus Torvalds 	write_unlock(&vmlist_lock);
2971da177e4SLinus Torvalds 
2981da177e4SLinus Torvalds 	return area;
2991da177e4SLinus Torvalds 
3001da177e4SLinus Torvalds out:
3011da177e4SLinus Torvalds 	write_unlock(&vmlist_lock);
3021da177e4SLinus Torvalds 	kfree(area);
3031da177e4SLinus Torvalds 	if (printk_ratelimit())
3041da177e4SLinus Torvalds 		printk(KERN_WARNING "allocation failed: out of vmalloc space - use vmalloc=<size> to increase size.\n");
3051da177e4SLinus Torvalds 	return NULL;
3061da177e4SLinus Torvalds }
3071da177e4SLinus Torvalds 
308930fc45aSChristoph Lameter struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
309930fc45aSChristoph Lameter 				unsigned long start, unsigned long end)
310930fc45aSChristoph Lameter {
31123016969SChristoph Lameter 	return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL,
31223016969SChristoph Lameter 						__builtin_return_address(0));
313930fc45aSChristoph Lameter }
3145992b6daSRusty Russell EXPORT_SYMBOL_GPL(__get_vm_area);
315930fc45aSChristoph Lameter 
3161da177e4SLinus Torvalds /**
317183ff22bSSimon Arlott  *	get_vm_area  -  reserve a contiguous kernel virtual area
3181da177e4SLinus Torvalds  *	@size:		size of the area
3191da177e4SLinus Torvalds  *	@flags:		%VM_IOREMAP for I/O mappings or VM_ALLOC
3201da177e4SLinus Torvalds  *
3211da177e4SLinus Torvalds  *	Search an area of @size in the kernel virtual mapping area,
3221da177e4SLinus Torvalds  *	and reserved it for out purposes.  Returns the area descriptor
3231da177e4SLinus Torvalds  *	on success or %NULL on failure.
3241da177e4SLinus Torvalds  */
3251da177e4SLinus Torvalds struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
3261da177e4SLinus Torvalds {
32723016969SChristoph Lameter 	return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END,
32823016969SChristoph Lameter 				-1, GFP_KERNEL, __builtin_return_address(0));
32923016969SChristoph Lameter }
33023016969SChristoph Lameter 
33123016969SChristoph Lameter struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
33223016969SChristoph Lameter 				void *caller)
33323016969SChristoph Lameter {
33423016969SChristoph Lameter 	return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END,
33523016969SChristoph Lameter 						-1, GFP_KERNEL, caller);
3361da177e4SLinus Torvalds }
3371da177e4SLinus Torvalds 
33852fd24caSGiridhar Pemmasani struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags,
33952fd24caSGiridhar Pemmasani 				   int node, gfp_t gfp_mask)
340930fc45aSChristoph Lameter {
34152fd24caSGiridhar Pemmasani 	return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, node,
34223016969SChristoph Lameter 				  gfp_mask, __builtin_return_address(0));
343930fc45aSChristoph Lameter }
344930fc45aSChristoph Lameter 
3457856dfebSAndi Kleen /* Caller must hold vmlist_lock */
346b3bdda02SChristoph Lameter static struct vm_struct *__find_vm_area(const void *addr)
34783342314SNick Piggin {
34883342314SNick Piggin 	struct vm_struct *tmp;
34983342314SNick Piggin 
35083342314SNick Piggin 	for (tmp = vmlist; tmp != NULL; tmp = tmp->next) {
35183342314SNick Piggin 		 if (tmp->addr == addr)
35283342314SNick Piggin 			break;
35383342314SNick Piggin 	}
35483342314SNick Piggin 
35583342314SNick Piggin 	return tmp;
35683342314SNick Piggin }
35783342314SNick Piggin 
35883342314SNick Piggin /* Caller must hold vmlist_lock */
359b3bdda02SChristoph Lameter static struct vm_struct *__remove_vm_area(const void *addr)
3607856dfebSAndi Kleen {
3617856dfebSAndi Kleen 	struct vm_struct **p, *tmp;
3627856dfebSAndi Kleen 
3637856dfebSAndi Kleen 	for (p = &vmlist ; (tmp = *p) != NULL ;p = &tmp->next) {
3647856dfebSAndi Kleen 		 if (tmp->addr == addr)
3657856dfebSAndi Kleen 			 goto found;
3667856dfebSAndi Kleen 	}
3677856dfebSAndi Kleen 	return NULL;
3687856dfebSAndi Kleen 
3697856dfebSAndi Kleen found:
3707856dfebSAndi Kleen 	unmap_vm_area(tmp);
3717856dfebSAndi Kleen 	*p = tmp->next;
3727856dfebSAndi Kleen 
3737856dfebSAndi Kleen 	/*
3747856dfebSAndi Kleen 	 * Remove the guard page.
3757856dfebSAndi Kleen 	 */
3767856dfebSAndi Kleen 	tmp->size -= PAGE_SIZE;
3777856dfebSAndi Kleen 	return tmp;
3787856dfebSAndi Kleen }
3797856dfebSAndi Kleen 
3801da177e4SLinus Torvalds /**
381183ff22bSSimon Arlott  *	remove_vm_area  -  find and remove a continuous kernel virtual area
3821da177e4SLinus Torvalds  *	@addr:		base address
3831da177e4SLinus Torvalds  *
3841da177e4SLinus Torvalds  *	Search for the kernel VM area starting at @addr, and remove it.
3851da177e4SLinus Torvalds  *	This function returns the found VM area, but using it is NOT safe
3867856dfebSAndi Kleen  *	on SMP machines, except for its size or flags.
3871da177e4SLinus Torvalds  */
388b3bdda02SChristoph Lameter struct vm_struct *remove_vm_area(const void *addr)
3891da177e4SLinus Torvalds {
3907856dfebSAndi Kleen 	struct vm_struct *v;
3911da177e4SLinus Torvalds 	write_lock(&vmlist_lock);
3927856dfebSAndi Kleen 	v = __remove_vm_area(addr);
3931da177e4SLinus Torvalds 	write_unlock(&vmlist_lock);
3947856dfebSAndi Kleen 	return v;
3951da177e4SLinus Torvalds }
3961da177e4SLinus Torvalds 
397b3bdda02SChristoph Lameter static void __vunmap(const void *addr, int deallocate_pages)
3981da177e4SLinus Torvalds {
3991da177e4SLinus Torvalds 	struct vm_struct *area;
4001da177e4SLinus Torvalds 
4011da177e4SLinus Torvalds 	if (!addr)
4021da177e4SLinus Torvalds 		return;
4031da177e4SLinus Torvalds 
4041da177e4SLinus Torvalds 	if ((PAGE_SIZE-1) & (unsigned long)addr) {
4054c8573e2SArjan van de Ven 		WARN(1, KERN_ERR "Trying to vfree() bad address (%p)\n", addr);
4061da177e4SLinus Torvalds 		return;
4071da177e4SLinus Torvalds 	}
4081da177e4SLinus Torvalds 
4091da177e4SLinus Torvalds 	area = remove_vm_area(addr);
4101da177e4SLinus Torvalds 	if (unlikely(!area)) {
4114c8573e2SArjan van de Ven 		WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
4121da177e4SLinus Torvalds 				addr);
4131da177e4SLinus Torvalds 		return;
4141da177e4SLinus Torvalds 	}
4151da177e4SLinus Torvalds 
4169a11b49aSIngo Molnar 	debug_check_no_locks_freed(addr, area->size);
4173ac7fe5aSThomas Gleixner 	debug_check_no_obj_freed(addr, area->size);
4189a11b49aSIngo Molnar 
4191da177e4SLinus Torvalds 	if (deallocate_pages) {
4201da177e4SLinus Torvalds 		int i;
4211da177e4SLinus Torvalds 
4221da177e4SLinus Torvalds 		for (i = 0; i < area->nr_pages; i++) {
423bf53d6f8SChristoph Lameter 			struct page *page = area->pages[i];
424bf53d6f8SChristoph Lameter 
425bf53d6f8SChristoph Lameter 			BUG_ON(!page);
426bf53d6f8SChristoph Lameter 			__free_page(page);
4271da177e4SLinus Torvalds 		}
4281da177e4SLinus Torvalds 
4298757d5faSJan Kiszka 		if (area->flags & VM_VPAGES)
4301da177e4SLinus Torvalds 			vfree(area->pages);
4311da177e4SLinus Torvalds 		else
4321da177e4SLinus Torvalds 			kfree(area->pages);
4331da177e4SLinus Torvalds 	}
4341da177e4SLinus Torvalds 
4351da177e4SLinus Torvalds 	kfree(area);
4361da177e4SLinus Torvalds 	return;
4371da177e4SLinus Torvalds }
4381da177e4SLinus Torvalds 
4391da177e4SLinus Torvalds /**
4401da177e4SLinus Torvalds  *	vfree  -  release memory allocated by vmalloc()
4411da177e4SLinus Torvalds  *	@addr:		memory base address
4421da177e4SLinus Torvalds  *
443183ff22bSSimon Arlott  *	Free the virtually continuous memory area starting at @addr, as
44480e93effSPekka Enberg  *	obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is
44580e93effSPekka Enberg  *	NULL, no operation is performed.
4461da177e4SLinus Torvalds  *
44780e93effSPekka Enberg  *	Must not be called in interrupt context.
4481da177e4SLinus Torvalds  */
449b3bdda02SChristoph Lameter void vfree(const void *addr)
4501da177e4SLinus Torvalds {
4511da177e4SLinus Torvalds 	BUG_ON(in_interrupt());
4521da177e4SLinus Torvalds 	__vunmap(addr, 1);
4531da177e4SLinus Torvalds }
4541da177e4SLinus Torvalds EXPORT_SYMBOL(vfree);
4551da177e4SLinus Torvalds 
4561da177e4SLinus Torvalds /**
4571da177e4SLinus Torvalds  *	vunmap  -  release virtual mapping obtained by vmap()
4581da177e4SLinus Torvalds  *	@addr:		memory base address
4591da177e4SLinus Torvalds  *
4601da177e4SLinus Torvalds  *	Free the virtually contiguous memory area starting at @addr,
4611da177e4SLinus Torvalds  *	which was created from the page array passed to vmap().
4621da177e4SLinus Torvalds  *
46380e93effSPekka Enberg  *	Must not be called in interrupt context.
4641da177e4SLinus Torvalds  */
465b3bdda02SChristoph Lameter void vunmap(const void *addr)
4661da177e4SLinus Torvalds {
4671da177e4SLinus Torvalds 	BUG_ON(in_interrupt());
4681da177e4SLinus Torvalds 	__vunmap(addr, 0);
4691da177e4SLinus Torvalds }
4701da177e4SLinus Torvalds EXPORT_SYMBOL(vunmap);
4711da177e4SLinus Torvalds 
4721da177e4SLinus Torvalds /**
4731da177e4SLinus Torvalds  *	vmap  -  map an array of pages into virtually contiguous space
4741da177e4SLinus Torvalds  *	@pages:		array of page pointers
4751da177e4SLinus Torvalds  *	@count:		number of pages to map
4761da177e4SLinus Torvalds  *	@flags:		vm_area->flags
4771da177e4SLinus Torvalds  *	@prot:		page protection for the mapping
4781da177e4SLinus Torvalds  *
4791da177e4SLinus Torvalds  *	Maps @count pages from @pages into contiguous kernel virtual
4801da177e4SLinus Torvalds  *	space.
4811da177e4SLinus Torvalds  */
4821da177e4SLinus Torvalds void *vmap(struct page **pages, unsigned int count,
4831da177e4SLinus Torvalds 		unsigned long flags, pgprot_t prot)
4841da177e4SLinus Torvalds {
4851da177e4SLinus Torvalds 	struct vm_struct *area;
4861da177e4SLinus Torvalds 
4871da177e4SLinus Torvalds 	if (count > num_physpages)
4881da177e4SLinus Torvalds 		return NULL;
4891da177e4SLinus Torvalds 
49023016969SChristoph Lameter 	area = get_vm_area_caller((count << PAGE_SHIFT), flags,
49123016969SChristoph Lameter 					__builtin_return_address(0));
4921da177e4SLinus Torvalds 	if (!area)
4931da177e4SLinus Torvalds 		return NULL;
49423016969SChristoph Lameter 
4951da177e4SLinus Torvalds 	if (map_vm_area(area, prot, &pages)) {
4961da177e4SLinus Torvalds 		vunmap(area->addr);
4971da177e4SLinus Torvalds 		return NULL;
4981da177e4SLinus Torvalds 	}
4991da177e4SLinus Torvalds 
5001da177e4SLinus Torvalds 	return area->addr;
5011da177e4SLinus Torvalds }
5021da177e4SLinus Torvalds EXPORT_SYMBOL(vmap);
5031da177e4SLinus Torvalds 
504e31d9eb5SAdrian Bunk static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
50523016969SChristoph Lameter 				 pgprot_t prot, int node, void *caller)
5061da177e4SLinus Torvalds {
5071da177e4SLinus Torvalds 	struct page **pages;
5081da177e4SLinus Torvalds 	unsigned int nr_pages, array_size, i;
5091da177e4SLinus Torvalds 
5101da177e4SLinus Torvalds 	nr_pages = (area->size - PAGE_SIZE) >> PAGE_SHIFT;
5111da177e4SLinus Torvalds 	array_size = (nr_pages * sizeof(struct page *));
5121da177e4SLinus Torvalds 
5131da177e4SLinus Torvalds 	area->nr_pages = nr_pages;
5141da177e4SLinus Torvalds 	/* Please note that the recursion is strictly bounded. */
5158757d5faSJan Kiszka 	if (array_size > PAGE_SIZE) {
51694f6030cSChristoph Lameter 		pages = __vmalloc_node(array_size, gfp_mask | __GFP_ZERO,
51723016969SChristoph Lameter 				PAGE_KERNEL, node, caller);
5188757d5faSJan Kiszka 		area->flags |= VM_VPAGES;
519286e1ea3SAndrew Morton 	} else {
520286e1ea3SAndrew Morton 		pages = kmalloc_node(array_size,
5216cb06229SChristoph Lameter 				(gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO,
522286e1ea3SAndrew Morton 				node);
523286e1ea3SAndrew Morton 	}
5241da177e4SLinus Torvalds 	area->pages = pages;
52523016969SChristoph Lameter 	area->caller = caller;
5261da177e4SLinus Torvalds 	if (!area->pages) {
5271da177e4SLinus Torvalds 		remove_vm_area(area->addr);
5281da177e4SLinus Torvalds 		kfree(area);
5291da177e4SLinus Torvalds 		return NULL;
5301da177e4SLinus Torvalds 	}
5311da177e4SLinus Torvalds 
5321da177e4SLinus Torvalds 	for (i = 0; i < area->nr_pages; i++) {
533bf53d6f8SChristoph Lameter 		struct page *page;
534bf53d6f8SChristoph Lameter 
535930fc45aSChristoph Lameter 		if (node < 0)
536bf53d6f8SChristoph Lameter 			page = alloc_page(gfp_mask);
537930fc45aSChristoph Lameter 		else
538bf53d6f8SChristoph Lameter 			page = alloc_pages_node(node, gfp_mask, 0);
539bf53d6f8SChristoph Lameter 
540bf53d6f8SChristoph Lameter 		if (unlikely(!page)) {
5411da177e4SLinus Torvalds 			/* Successfully allocated i pages, free them in __vunmap() */
5421da177e4SLinus Torvalds 			area->nr_pages = i;
5431da177e4SLinus Torvalds 			goto fail;
5441da177e4SLinus Torvalds 		}
545bf53d6f8SChristoph Lameter 		area->pages[i] = page;
5461da177e4SLinus Torvalds 	}
5471da177e4SLinus Torvalds 
5481da177e4SLinus Torvalds 	if (map_vm_area(area, prot, &pages))
5491da177e4SLinus Torvalds 		goto fail;
5501da177e4SLinus Torvalds 	return area->addr;
5511da177e4SLinus Torvalds 
5521da177e4SLinus Torvalds fail:
5531da177e4SLinus Torvalds 	vfree(area->addr);
5541da177e4SLinus Torvalds 	return NULL;
5551da177e4SLinus Torvalds }
5561da177e4SLinus Torvalds 
557930fc45aSChristoph Lameter void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
558930fc45aSChristoph Lameter {
55923016969SChristoph Lameter 	return __vmalloc_area_node(area, gfp_mask, prot, -1,
56023016969SChristoph Lameter 					__builtin_return_address(0));
561930fc45aSChristoph Lameter }
562930fc45aSChristoph Lameter 
5631da177e4SLinus Torvalds /**
564930fc45aSChristoph Lameter  *	__vmalloc_node  -  allocate virtually contiguous memory
5651da177e4SLinus Torvalds  *	@size:		allocation size
5661da177e4SLinus Torvalds  *	@gfp_mask:	flags for the page level allocator
5671da177e4SLinus Torvalds  *	@prot:		protection mask for the allocated pages
568d44e0780SRandy Dunlap  *	@node:		node to use for allocation or -1
569c85d194bSRandy Dunlap  *	@caller:	caller's return address
5701da177e4SLinus Torvalds  *
5711da177e4SLinus Torvalds  *	Allocate enough pages to cover @size from the page level
5721da177e4SLinus Torvalds  *	allocator with @gfp_mask flags.  Map them into contiguous
5731da177e4SLinus Torvalds  *	kernel virtual space, using a pagetable protection of @prot.
5741da177e4SLinus Torvalds  */
575b221385bSAdrian Bunk static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
57623016969SChristoph Lameter 						int node, void *caller)
5771da177e4SLinus Torvalds {
5781da177e4SLinus Torvalds 	struct vm_struct *area;
5791da177e4SLinus Torvalds 
5801da177e4SLinus Torvalds 	size = PAGE_ALIGN(size);
5811da177e4SLinus Torvalds 	if (!size || (size >> PAGE_SHIFT) > num_physpages)
5821da177e4SLinus Torvalds 		return NULL;
5831da177e4SLinus Torvalds 
58423016969SChristoph Lameter 	area = __get_vm_area_node(size, VM_ALLOC, VMALLOC_START, VMALLOC_END,
58523016969SChristoph Lameter 						node, gfp_mask, caller);
58623016969SChristoph Lameter 
5871da177e4SLinus Torvalds 	if (!area)
5881da177e4SLinus Torvalds 		return NULL;
5891da177e4SLinus Torvalds 
59023016969SChristoph Lameter 	return __vmalloc_area_node(area, gfp_mask, prot, node, caller);
5911da177e4SLinus Torvalds }
5921da177e4SLinus Torvalds 
593930fc45aSChristoph Lameter void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
594930fc45aSChristoph Lameter {
59523016969SChristoph Lameter 	return __vmalloc_node(size, gfp_mask, prot, -1,
59623016969SChristoph Lameter 				__builtin_return_address(0));
597930fc45aSChristoph Lameter }
5981da177e4SLinus Torvalds EXPORT_SYMBOL(__vmalloc);
5991da177e4SLinus Torvalds 
6001da177e4SLinus Torvalds /**
6011da177e4SLinus Torvalds  *	vmalloc  -  allocate virtually contiguous memory
6021da177e4SLinus Torvalds  *	@size:		allocation size
6031da177e4SLinus Torvalds  *	Allocate enough pages to cover @size from the page level
6041da177e4SLinus Torvalds  *	allocator and map them into contiguous kernel virtual space.
6051da177e4SLinus Torvalds  *
606c1c8897fSMichael Opdenacker  *	For tight control over page level allocator and protection flags
6071da177e4SLinus Torvalds  *	use __vmalloc() instead.
6081da177e4SLinus Torvalds  */
6091da177e4SLinus Torvalds void *vmalloc(unsigned long size)
6101da177e4SLinus Torvalds {
61123016969SChristoph Lameter 	return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
61223016969SChristoph Lameter 					-1, __builtin_return_address(0));
6131da177e4SLinus Torvalds }
6141da177e4SLinus Torvalds EXPORT_SYMBOL(vmalloc);
6151da177e4SLinus Torvalds 
616930fc45aSChristoph Lameter /**
617ead04089SRolf Eike Beer  * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
61883342314SNick Piggin  * @size: allocation size
619ead04089SRolf Eike Beer  *
620ead04089SRolf Eike Beer  * The resulting memory area is zeroed so it can be mapped to userspace
621ead04089SRolf Eike Beer  * without leaking data.
62283342314SNick Piggin  */
62383342314SNick Piggin void *vmalloc_user(unsigned long size)
62483342314SNick Piggin {
62583342314SNick Piggin 	struct vm_struct *area;
62683342314SNick Piggin 	void *ret;
62783342314SNick Piggin 
62883342314SNick Piggin 	ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
6292b4ac44eSEric Dumazet 	if (ret) {
63083342314SNick Piggin 		write_lock(&vmlist_lock);
63183342314SNick Piggin 		area = __find_vm_area(ret);
63283342314SNick Piggin 		area->flags |= VM_USERMAP;
63383342314SNick Piggin 		write_unlock(&vmlist_lock);
6342b4ac44eSEric Dumazet 	}
63583342314SNick Piggin 	return ret;
63683342314SNick Piggin }
63783342314SNick Piggin EXPORT_SYMBOL(vmalloc_user);
63883342314SNick Piggin 
63983342314SNick Piggin /**
640930fc45aSChristoph Lameter  *	vmalloc_node  -  allocate memory on a specific node
641930fc45aSChristoph Lameter  *	@size:		allocation size
642d44e0780SRandy Dunlap  *	@node:		numa node
643930fc45aSChristoph Lameter  *
644930fc45aSChristoph Lameter  *	Allocate enough pages to cover @size from the page level
645930fc45aSChristoph Lameter  *	allocator and map them into contiguous kernel virtual space.
646930fc45aSChristoph Lameter  *
647c1c8897fSMichael Opdenacker  *	For tight control over page level allocator and protection flags
648930fc45aSChristoph Lameter  *	use __vmalloc() instead.
649930fc45aSChristoph Lameter  */
650930fc45aSChristoph Lameter void *vmalloc_node(unsigned long size, int node)
651930fc45aSChristoph Lameter {
65223016969SChristoph Lameter 	return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
65323016969SChristoph Lameter 					node, __builtin_return_address(0));
654930fc45aSChristoph Lameter }
655930fc45aSChristoph Lameter EXPORT_SYMBOL(vmalloc_node);
656930fc45aSChristoph Lameter 
6574dc3b16bSPavel Pisa #ifndef PAGE_KERNEL_EXEC
6584dc3b16bSPavel Pisa # define PAGE_KERNEL_EXEC PAGE_KERNEL
6594dc3b16bSPavel Pisa #endif
6604dc3b16bSPavel Pisa 
6611da177e4SLinus Torvalds /**
6621da177e4SLinus Torvalds  *	vmalloc_exec  -  allocate virtually contiguous, executable memory
6631da177e4SLinus Torvalds  *	@size:		allocation size
6641da177e4SLinus Torvalds  *
6651da177e4SLinus Torvalds  *	Kernel-internal function to allocate enough pages to cover @size
6661da177e4SLinus Torvalds  *	the page level allocator and map them into contiguous and
6671da177e4SLinus Torvalds  *	executable kernel virtual space.
6681da177e4SLinus Torvalds  *
669c1c8897fSMichael Opdenacker  *	For tight control over page level allocator and protection flags
6701da177e4SLinus Torvalds  *	use __vmalloc() instead.
6711da177e4SLinus Torvalds  */
6721da177e4SLinus Torvalds 
6731da177e4SLinus Torvalds void *vmalloc_exec(unsigned long size)
6741da177e4SLinus Torvalds {
6751da177e4SLinus Torvalds 	return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC);
6761da177e4SLinus Torvalds }
6771da177e4SLinus Torvalds 
6780d08e0d3SAndi Kleen #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
6797ac674f5SBenjamin Herrenschmidt #define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL
6800d08e0d3SAndi Kleen #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
6817ac674f5SBenjamin Herrenschmidt #define GFP_VMALLOC32 GFP_DMA | GFP_KERNEL
6820d08e0d3SAndi Kleen #else
6830d08e0d3SAndi Kleen #define GFP_VMALLOC32 GFP_KERNEL
6840d08e0d3SAndi Kleen #endif
6850d08e0d3SAndi Kleen 
6861da177e4SLinus Torvalds /**
6871da177e4SLinus Torvalds  *	vmalloc_32  -  allocate virtually contiguous memory (32bit addressable)
6881da177e4SLinus Torvalds  *	@size:		allocation size
6891da177e4SLinus Torvalds  *
6901da177e4SLinus Torvalds  *	Allocate enough 32bit PA addressable pages to cover @size from the
6911da177e4SLinus Torvalds  *	page level allocator and map them into contiguous kernel virtual space.
6921da177e4SLinus Torvalds  */
6931da177e4SLinus Torvalds void *vmalloc_32(unsigned long size)
6941da177e4SLinus Torvalds {
6950d08e0d3SAndi Kleen 	return __vmalloc(size, GFP_VMALLOC32, PAGE_KERNEL);
6961da177e4SLinus Torvalds }
6971da177e4SLinus Torvalds EXPORT_SYMBOL(vmalloc_32);
6981da177e4SLinus Torvalds 
69983342314SNick Piggin /**
700ead04089SRolf Eike Beer  * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
70183342314SNick Piggin  *	@size:		allocation size
702ead04089SRolf Eike Beer  *
703ead04089SRolf Eike Beer  * The resulting memory area is 32bit addressable and zeroed so it can be
704ead04089SRolf Eike Beer  * mapped to userspace without leaking data.
70583342314SNick Piggin  */
70683342314SNick Piggin void *vmalloc_32_user(unsigned long size)
70783342314SNick Piggin {
70883342314SNick Piggin 	struct vm_struct *area;
70983342314SNick Piggin 	void *ret;
71083342314SNick Piggin 
7110d08e0d3SAndi Kleen 	ret = __vmalloc(size, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL);
7122b4ac44eSEric Dumazet 	if (ret) {
71383342314SNick Piggin 		write_lock(&vmlist_lock);
71483342314SNick Piggin 		area = __find_vm_area(ret);
71583342314SNick Piggin 		area->flags |= VM_USERMAP;
71683342314SNick Piggin 		write_unlock(&vmlist_lock);
7172b4ac44eSEric Dumazet 	}
71883342314SNick Piggin 	return ret;
71983342314SNick Piggin }
72083342314SNick Piggin EXPORT_SYMBOL(vmalloc_32_user);
72183342314SNick Piggin 
7221da177e4SLinus Torvalds long vread(char *buf, char *addr, unsigned long count)
7231da177e4SLinus Torvalds {
7241da177e4SLinus Torvalds 	struct vm_struct *tmp;
7251da177e4SLinus Torvalds 	char *vaddr, *buf_start = buf;
7261da177e4SLinus Torvalds 	unsigned long n;
7271da177e4SLinus Torvalds 
7281da177e4SLinus Torvalds 	/* Don't allow overflow */
7291da177e4SLinus Torvalds 	if ((unsigned long) addr + count < count)
7301da177e4SLinus Torvalds 		count = -(unsigned long) addr;
7311da177e4SLinus Torvalds 
7321da177e4SLinus Torvalds 	read_lock(&vmlist_lock);
7331da177e4SLinus Torvalds 	for (tmp = vmlist; tmp; tmp = tmp->next) {
7341da177e4SLinus Torvalds 		vaddr = (char *) tmp->addr;
7351da177e4SLinus Torvalds 		if (addr >= vaddr + tmp->size - PAGE_SIZE)
7361da177e4SLinus Torvalds 			continue;
7371da177e4SLinus Torvalds 		while (addr < vaddr) {
7381da177e4SLinus Torvalds 			if (count == 0)
7391da177e4SLinus Torvalds 				goto finished;
7401da177e4SLinus Torvalds 			*buf = '\0';
7411da177e4SLinus Torvalds 			buf++;
7421da177e4SLinus Torvalds 			addr++;
7431da177e4SLinus Torvalds 			count--;
7441da177e4SLinus Torvalds 		}
7451da177e4SLinus Torvalds 		n = vaddr + tmp->size - PAGE_SIZE - addr;
7461da177e4SLinus Torvalds 		do {
7471da177e4SLinus Torvalds 			if (count == 0)
7481da177e4SLinus Torvalds 				goto finished;
7491da177e4SLinus Torvalds 			*buf = *addr;
7501da177e4SLinus Torvalds 			buf++;
7511da177e4SLinus Torvalds 			addr++;
7521da177e4SLinus Torvalds 			count--;
7531da177e4SLinus Torvalds 		} while (--n > 0);
7541da177e4SLinus Torvalds 	}
7551da177e4SLinus Torvalds finished:
7561da177e4SLinus Torvalds 	read_unlock(&vmlist_lock);
7571da177e4SLinus Torvalds 	return buf - buf_start;
7581da177e4SLinus Torvalds }
7591da177e4SLinus Torvalds 
7601da177e4SLinus Torvalds long vwrite(char *buf, char *addr, unsigned long count)
7611da177e4SLinus Torvalds {
7621da177e4SLinus Torvalds 	struct vm_struct *tmp;
7631da177e4SLinus Torvalds 	char *vaddr, *buf_start = buf;
7641da177e4SLinus Torvalds 	unsigned long n;
7651da177e4SLinus Torvalds 
7661da177e4SLinus Torvalds 	/* Don't allow overflow */
7671da177e4SLinus Torvalds 	if ((unsigned long) addr + count < count)
7681da177e4SLinus Torvalds 		count = -(unsigned long) addr;
7691da177e4SLinus Torvalds 
7701da177e4SLinus Torvalds 	read_lock(&vmlist_lock);
7711da177e4SLinus Torvalds 	for (tmp = vmlist; tmp; tmp = tmp->next) {
7721da177e4SLinus Torvalds 		vaddr = (char *) tmp->addr;
7731da177e4SLinus Torvalds 		if (addr >= vaddr + tmp->size - PAGE_SIZE)
7741da177e4SLinus Torvalds 			continue;
7751da177e4SLinus Torvalds 		while (addr < vaddr) {
7761da177e4SLinus Torvalds 			if (count == 0)
7771da177e4SLinus Torvalds 				goto finished;
7781da177e4SLinus Torvalds 			buf++;
7791da177e4SLinus Torvalds 			addr++;
7801da177e4SLinus Torvalds 			count--;
7811da177e4SLinus Torvalds 		}
7821da177e4SLinus Torvalds 		n = vaddr + tmp->size - PAGE_SIZE - addr;
7831da177e4SLinus Torvalds 		do {
7841da177e4SLinus Torvalds 			if (count == 0)
7851da177e4SLinus Torvalds 				goto finished;
7861da177e4SLinus Torvalds 			*addr = *buf;
7871da177e4SLinus Torvalds 			buf++;
7881da177e4SLinus Torvalds 			addr++;
7891da177e4SLinus Torvalds 			count--;
7901da177e4SLinus Torvalds 		} while (--n > 0);
7911da177e4SLinus Torvalds 	}
7921da177e4SLinus Torvalds finished:
7931da177e4SLinus Torvalds 	read_unlock(&vmlist_lock);
7941da177e4SLinus Torvalds 	return buf - buf_start;
7951da177e4SLinus Torvalds }
79683342314SNick Piggin 
79783342314SNick Piggin /**
79883342314SNick Piggin  *	remap_vmalloc_range  -  map vmalloc pages to userspace
79983342314SNick Piggin  *	@vma:		vma to cover (map full range of vma)
80083342314SNick Piggin  *	@addr:		vmalloc memory
80183342314SNick Piggin  *	@pgoff:		number of pages into addr before first page to map
8027682486bSRandy Dunlap  *
8037682486bSRandy Dunlap  *	Returns:	0 for success, -Exxx on failure
80483342314SNick Piggin  *
80583342314SNick Piggin  *	This function checks that addr is a valid vmalloc'ed area, and
80683342314SNick Piggin  *	that it is big enough to cover the vma. Will return failure if
80783342314SNick Piggin  *	that criteria isn't met.
80883342314SNick Piggin  *
80972fd4a35SRobert P. J. Day  *	Similar to remap_pfn_range() (see mm/memory.c)
81083342314SNick Piggin  */
81183342314SNick Piggin int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
81283342314SNick Piggin 						unsigned long pgoff)
81383342314SNick Piggin {
81483342314SNick Piggin 	struct vm_struct *area;
81583342314SNick Piggin 	unsigned long uaddr = vma->vm_start;
81683342314SNick Piggin 	unsigned long usize = vma->vm_end - vma->vm_start;
81783342314SNick Piggin 	int ret;
81883342314SNick Piggin 
81983342314SNick Piggin 	if ((PAGE_SIZE-1) & (unsigned long)addr)
82083342314SNick Piggin 		return -EINVAL;
82183342314SNick Piggin 
82283342314SNick Piggin 	read_lock(&vmlist_lock);
82383342314SNick Piggin 	area = __find_vm_area(addr);
82483342314SNick Piggin 	if (!area)
82583342314SNick Piggin 		goto out_einval_locked;
82683342314SNick Piggin 
82783342314SNick Piggin 	if (!(area->flags & VM_USERMAP))
82883342314SNick Piggin 		goto out_einval_locked;
82983342314SNick Piggin 
83083342314SNick Piggin 	if (usize + (pgoff << PAGE_SHIFT) > area->size - PAGE_SIZE)
83183342314SNick Piggin 		goto out_einval_locked;
83283342314SNick Piggin 	read_unlock(&vmlist_lock);
83383342314SNick Piggin 
83483342314SNick Piggin 	addr += pgoff << PAGE_SHIFT;
83583342314SNick Piggin 	do {
83683342314SNick Piggin 		struct page *page = vmalloc_to_page(addr);
83783342314SNick Piggin 		ret = vm_insert_page(vma, uaddr, page);
83883342314SNick Piggin 		if (ret)
83983342314SNick Piggin 			return ret;
84083342314SNick Piggin 
84183342314SNick Piggin 		uaddr += PAGE_SIZE;
84283342314SNick Piggin 		addr += PAGE_SIZE;
84383342314SNick Piggin 		usize -= PAGE_SIZE;
84483342314SNick Piggin 	} while (usize > 0);
84583342314SNick Piggin 
84683342314SNick Piggin 	/* Prevent "things" like memory migration? VM_flags need a cleanup... */
84783342314SNick Piggin 	vma->vm_flags |= VM_RESERVED;
84883342314SNick Piggin 
84983342314SNick Piggin 	return ret;
85083342314SNick Piggin 
85183342314SNick Piggin out_einval_locked:
85283342314SNick Piggin 	read_unlock(&vmlist_lock);
85383342314SNick Piggin 	return -EINVAL;
85483342314SNick Piggin }
85583342314SNick Piggin EXPORT_SYMBOL(remap_vmalloc_range);
85683342314SNick Piggin 
8571eeb66a1SChristoph Hellwig /*
8581eeb66a1SChristoph Hellwig  * Implement a stub for vmalloc_sync_all() if the architecture chose not to
8591eeb66a1SChristoph Hellwig  * have one.
8601eeb66a1SChristoph Hellwig  */
8611eeb66a1SChristoph Hellwig void  __attribute__((weak)) vmalloc_sync_all(void)
8621eeb66a1SChristoph Hellwig {
8631eeb66a1SChristoph Hellwig }
8645f4352fbSJeremy Fitzhardinge 
8655f4352fbSJeremy Fitzhardinge 
8662f569afdSMartin Schwidefsky static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data)
8675f4352fbSJeremy Fitzhardinge {
8685f4352fbSJeremy Fitzhardinge 	/* apply_to_page_range() does all the hard work. */
8695f4352fbSJeremy Fitzhardinge 	return 0;
8705f4352fbSJeremy Fitzhardinge }
8715f4352fbSJeremy Fitzhardinge 
8725f4352fbSJeremy Fitzhardinge /**
8735f4352fbSJeremy Fitzhardinge  *	alloc_vm_area - allocate a range of kernel address space
8745f4352fbSJeremy Fitzhardinge  *	@size:		size of the area
8757682486bSRandy Dunlap  *
8767682486bSRandy Dunlap  *	Returns:	NULL on failure, vm_struct on success
8775f4352fbSJeremy Fitzhardinge  *
8785f4352fbSJeremy Fitzhardinge  *	This function reserves a range of kernel address space, and
8795f4352fbSJeremy Fitzhardinge  *	allocates pagetables to map that range.  No actual mappings
8805f4352fbSJeremy Fitzhardinge  *	are created.  If the kernel address space is not shared
8815f4352fbSJeremy Fitzhardinge  *	between processes, it syncs the pagetable across all
8825f4352fbSJeremy Fitzhardinge  *	processes.
8835f4352fbSJeremy Fitzhardinge  */
8845f4352fbSJeremy Fitzhardinge struct vm_struct *alloc_vm_area(size_t size)
8855f4352fbSJeremy Fitzhardinge {
8865f4352fbSJeremy Fitzhardinge 	struct vm_struct *area;
8875f4352fbSJeremy Fitzhardinge 
88823016969SChristoph Lameter 	area = get_vm_area_caller(size, VM_IOREMAP,
88923016969SChristoph Lameter 				__builtin_return_address(0));
8905f4352fbSJeremy Fitzhardinge 	if (area == NULL)
8915f4352fbSJeremy Fitzhardinge 		return NULL;
8925f4352fbSJeremy Fitzhardinge 
8935f4352fbSJeremy Fitzhardinge 	/*
8945f4352fbSJeremy Fitzhardinge 	 * This ensures that page tables are constructed for this region
8955f4352fbSJeremy Fitzhardinge 	 * of kernel virtual address space and mapped into init_mm.
8965f4352fbSJeremy Fitzhardinge 	 */
8975f4352fbSJeremy Fitzhardinge 	if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
8985f4352fbSJeremy Fitzhardinge 				area->size, f, NULL)) {
8995f4352fbSJeremy Fitzhardinge 		free_vm_area(area);
9005f4352fbSJeremy Fitzhardinge 		return NULL;
9015f4352fbSJeremy Fitzhardinge 	}
9025f4352fbSJeremy Fitzhardinge 
9035f4352fbSJeremy Fitzhardinge 	/* Make sure the pagetables are constructed in process kernel
9045f4352fbSJeremy Fitzhardinge 	   mappings */
9055f4352fbSJeremy Fitzhardinge 	vmalloc_sync_all();
9065f4352fbSJeremy Fitzhardinge 
9075f4352fbSJeremy Fitzhardinge 	return area;
9085f4352fbSJeremy Fitzhardinge }
9095f4352fbSJeremy Fitzhardinge EXPORT_SYMBOL_GPL(alloc_vm_area);
9105f4352fbSJeremy Fitzhardinge 
9115f4352fbSJeremy Fitzhardinge void free_vm_area(struct vm_struct *area)
9125f4352fbSJeremy Fitzhardinge {
9135f4352fbSJeremy Fitzhardinge 	struct vm_struct *ret;
9145f4352fbSJeremy Fitzhardinge 	ret = remove_vm_area(area->addr);
9155f4352fbSJeremy Fitzhardinge 	BUG_ON(ret != area);
9165f4352fbSJeremy Fitzhardinge 	kfree(area);
9175f4352fbSJeremy Fitzhardinge }
9185f4352fbSJeremy Fitzhardinge EXPORT_SYMBOL_GPL(free_vm_area);
919a10aa579SChristoph Lameter 
920a10aa579SChristoph Lameter 
921a10aa579SChristoph Lameter #ifdef CONFIG_PROC_FS
922a10aa579SChristoph Lameter static void *s_start(struct seq_file *m, loff_t *pos)
923a10aa579SChristoph Lameter {
924a10aa579SChristoph Lameter 	loff_t n = *pos;
925a10aa579SChristoph Lameter 	struct vm_struct *v;
926a10aa579SChristoph Lameter 
927a10aa579SChristoph Lameter 	read_lock(&vmlist_lock);
928a10aa579SChristoph Lameter 	v = vmlist;
929a10aa579SChristoph Lameter 	while (n > 0 && v) {
930a10aa579SChristoph Lameter 		n--;
931a10aa579SChristoph Lameter 		v = v->next;
932a10aa579SChristoph Lameter 	}
933a10aa579SChristoph Lameter 	if (!n)
934a10aa579SChristoph Lameter 		return v;
935a10aa579SChristoph Lameter 
936a10aa579SChristoph Lameter 	return NULL;
937a10aa579SChristoph Lameter 
938a10aa579SChristoph Lameter }
939a10aa579SChristoph Lameter 
940a10aa579SChristoph Lameter static void *s_next(struct seq_file *m, void *p, loff_t *pos)
941a10aa579SChristoph Lameter {
942a10aa579SChristoph Lameter 	struct vm_struct *v = p;
943a10aa579SChristoph Lameter 
944a10aa579SChristoph Lameter 	++*pos;
945a10aa579SChristoph Lameter 	return v->next;
946a10aa579SChristoph Lameter }
947a10aa579SChristoph Lameter 
948a10aa579SChristoph Lameter static void s_stop(struct seq_file *m, void *p)
949a10aa579SChristoph Lameter {
950a10aa579SChristoph Lameter 	read_unlock(&vmlist_lock);
951a10aa579SChristoph Lameter }
952a10aa579SChristoph Lameter 
953a47a126aSEric Dumazet static void show_numa_info(struct seq_file *m, struct vm_struct *v)
954a47a126aSEric Dumazet {
955a47a126aSEric Dumazet 	if (NUMA_BUILD) {
956a47a126aSEric Dumazet 		unsigned int nr, *counters = m->private;
957a47a126aSEric Dumazet 
958a47a126aSEric Dumazet 		if (!counters)
959a47a126aSEric Dumazet 			return;
960a47a126aSEric Dumazet 
961a47a126aSEric Dumazet 		memset(counters, 0, nr_node_ids * sizeof(unsigned int));
962a47a126aSEric Dumazet 
963a47a126aSEric Dumazet 		for (nr = 0; nr < v->nr_pages; nr++)
964a47a126aSEric Dumazet 			counters[page_to_nid(v->pages[nr])]++;
965a47a126aSEric Dumazet 
966a47a126aSEric Dumazet 		for_each_node_state(nr, N_HIGH_MEMORY)
967a47a126aSEric Dumazet 			if (counters[nr])
968a47a126aSEric Dumazet 				seq_printf(m, " N%u=%u", nr, counters[nr]);
969a47a126aSEric Dumazet 	}
970a47a126aSEric Dumazet }
971a47a126aSEric Dumazet 
972a10aa579SChristoph Lameter static int s_show(struct seq_file *m, void *p)
973a10aa579SChristoph Lameter {
974a10aa579SChristoph Lameter 	struct vm_struct *v = p;
975a10aa579SChristoph Lameter 
976a10aa579SChristoph Lameter 	seq_printf(m, "0x%p-0x%p %7ld",
977a10aa579SChristoph Lameter 		v->addr, v->addr + v->size, v->size);
978a10aa579SChristoph Lameter 
97923016969SChristoph Lameter 	if (v->caller) {
98023016969SChristoph Lameter 		char buff[2 * KSYM_NAME_LEN];
98123016969SChristoph Lameter 
98223016969SChristoph Lameter 		seq_putc(m, ' ');
98323016969SChristoph Lameter 		sprint_symbol(buff, (unsigned long)v->caller);
98423016969SChristoph Lameter 		seq_puts(m, buff);
98523016969SChristoph Lameter 	}
98623016969SChristoph Lameter 
987a10aa579SChristoph Lameter 	if (v->nr_pages)
988a10aa579SChristoph Lameter 		seq_printf(m, " pages=%d", v->nr_pages);
989a10aa579SChristoph Lameter 
990a10aa579SChristoph Lameter 	if (v->phys_addr)
991a10aa579SChristoph Lameter 		seq_printf(m, " phys=%lx", v->phys_addr);
992a10aa579SChristoph Lameter 
993a10aa579SChristoph Lameter 	if (v->flags & VM_IOREMAP)
994a10aa579SChristoph Lameter 		seq_printf(m, " ioremap");
995a10aa579SChristoph Lameter 
996a10aa579SChristoph Lameter 	if (v->flags & VM_ALLOC)
997a10aa579SChristoph Lameter 		seq_printf(m, " vmalloc");
998a10aa579SChristoph Lameter 
999a10aa579SChristoph Lameter 	if (v->flags & VM_MAP)
1000a10aa579SChristoph Lameter 		seq_printf(m, " vmap");
1001a10aa579SChristoph Lameter 
1002a10aa579SChristoph Lameter 	if (v->flags & VM_USERMAP)
1003a10aa579SChristoph Lameter 		seq_printf(m, " user");
1004a10aa579SChristoph Lameter 
1005a10aa579SChristoph Lameter 	if (v->flags & VM_VPAGES)
1006a10aa579SChristoph Lameter 		seq_printf(m, " vpages");
1007a10aa579SChristoph Lameter 
1008a47a126aSEric Dumazet 	show_numa_info(m, v);
1009a10aa579SChristoph Lameter 	seq_putc(m, '\n');
1010a10aa579SChristoph Lameter 	return 0;
1011a10aa579SChristoph Lameter }
1012a10aa579SChristoph Lameter 
1013a10aa579SChristoph Lameter const struct seq_operations vmalloc_op = {
1014a10aa579SChristoph Lameter 	.start = s_start,
1015a10aa579SChristoph Lameter 	.next = s_next,
1016a10aa579SChristoph Lameter 	.stop = s_stop,
1017a10aa579SChristoph Lameter 	.show = s_show,
1018a10aa579SChristoph Lameter };
1019a10aa579SChristoph Lameter #endif
1020a10aa579SChristoph Lameter 
1021