xref: /linux/mm/vmalloc.c (revision f3d9478b2ce468c3115b02ecae7e975990697f15)
1 /*
2  *  linux/mm/vmalloc.c
3  *
4  *  Copyright (C) 1993  Linus Torvalds
5  *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
6  *  SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
7  *  Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
8  *  Numa awareness, Christoph Lameter, SGI, June 2005
9  */
10 
11 #include <linux/mm.h>
12 #include <linux/module.h>
13 #include <linux/highmem.h>
14 #include <linux/slab.h>
15 #include <linux/spinlock.h>
16 #include <linux/interrupt.h>
17 
18 #include <linux/vmalloc.h>
19 
20 #include <asm/uaccess.h>
21 #include <asm/tlbflush.h>
22 
23 
24 DEFINE_RWLOCK(vmlist_lock);
25 struct vm_struct *vmlist;
26 
27 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
28 {
29 	pte_t *pte;
30 
31 	pte = pte_offset_kernel(pmd, addr);
32 	do {
33 		pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
34 		WARN_ON(!pte_none(ptent) && !pte_present(ptent));
35 	} while (pte++, addr += PAGE_SIZE, addr != end);
36 }
37 
38 static inline void vunmap_pmd_range(pud_t *pud, unsigned long addr,
39 						unsigned long end)
40 {
41 	pmd_t *pmd;
42 	unsigned long next;
43 
44 	pmd = pmd_offset(pud, addr);
45 	do {
46 		next = pmd_addr_end(addr, end);
47 		if (pmd_none_or_clear_bad(pmd))
48 			continue;
49 		vunmap_pte_range(pmd, addr, next);
50 	} while (pmd++, addr = next, addr != end);
51 }
52 
53 static inline void vunmap_pud_range(pgd_t *pgd, unsigned long addr,
54 						unsigned long end)
55 {
56 	pud_t *pud;
57 	unsigned long next;
58 
59 	pud = pud_offset(pgd, addr);
60 	do {
61 		next = pud_addr_end(addr, end);
62 		if (pud_none_or_clear_bad(pud))
63 			continue;
64 		vunmap_pmd_range(pud, addr, next);
65 	} while (pud++, addr = next, addr != end);
66 }
67 
68 void unmap_vm_area(struct vm_struct *area)
69 {
70 	pgd_t *pgd;
71 	unsigned long next;
72 	unsigned long addr = (unsigned long) area->addr;
73 	unsigned long end = addr + area->size;
74 
75 	BUG_ON(addr >= end);
76 	pgd = pgd_offset_k(addr);
77 	flush_cache_vunmap(addr, end);
78 	do {
79 		next = pgd_addr_end(addr, end);
80 		if (pgd_none_or_clear_bad(pgd))
81 			continue;
82 		vunmap_pud_range(pgd, addr, next);
83 	} while (pgd++, addr = next, addr != end);
84 	flush_tlb_kernel_range((unsigned long) area->addr, end);
85 }
86 
87 static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
88 			unsigned long end, pgprot_t prot, struct page ***pages)
89 {
90 	pte_t *pte;
91 
92 	pte = pte_alloc_kernel(pmd, addr);
93 	if (!pte)
94 		return -ENOMEM;
95 	do {
96 		struct page *page = **pages;
97 		WARN_ON(!pte_none(*pte));
98 		if (!page)
99 			return -ENOMEM;
100 		set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
101 		(*pages)++;
102 	} while (pte++, addr += PAGE_SIZE, addr != end);
103 	return 0;
104 }
105 
106 static inline int vmap_pmd_range(pud_t *pud, unsigned long addr,
107 			unsigned long end, pgprot_t prot, struct page ***pages)
108 {
109 	pmd_t *pmd;
110 	unsigned long next;
111 
112 	pmd = pmd_alloc(&init_mm, pud, addr);
113 	if (!pmd)
114 		return -ENOMEM;
115 	do {
116 		next = pmd_addr_end(addr, end);
117 		if (vmap_pte_range(pmd, addr, next, prot, pages))
118 			return -ENOMEM;
119 	} while (pmd++, addr = next, addr != end);
120 	return 0;
121 }
122 
123 static inline int vmap_pud_range(pgd_t *pgd, unsigned long addr,
124 			unsigned long end, pgprot_t prot, struct page ***pages)
125 {
126 	pud_t *pud;
127 	unsigned long next;
128 
129 	pud = pud_alloc(&init_mm, pgd, addr);
130 	if (!pud)
131 		return -ENOMEM;
132 	do {
133 		next = pud_addr_end(addr, end);
134 		if (vmap_pmd_range(pud, addr, next, prot, pages))
135 			return -ENOMEM;
136 	} while (pud++, addr = next, addr != end);
137 	return 0;
138 }
139 
140 int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
141 {
142 	pgd_t *pgd;
143 	unsigned long next;
144 	unsigned long addr = (unsigned long) area->addr;
145 	unsigned long end = addr + area->size - PAGE_SIZE;
146 	int err;
147 
148 	BUG_ON(addr >= end);
149 	pgd = pgd_offset_k(addr);
150 	do {
151 		next = pgd_addr_end(addr, end);
152 		err = vmap_pud_range(pgd, addr, next, prot, pages);
153 		if (err)
154 			break;
155 	} while (pgd++, addr = next, addr != end);
156 	flush_cache_vmap((unsigned long) area->addr, end);
157 	return err;
158 }
159 
160 struct vm_struct *__get_vm_area_node(unsigned long size, unsigned long flags,
161 				unsigned long start, unsigned long end, int node)
162 {
163 	struct vm_struct **p, *tmp, *area;
164 	unsigned long align = 1;
165 	unsigned long addr;
166 
167 	if (flags & VM_IOREMAP) {
168 		int bit = fls(size);
169 
170 		if (bit > IOREMAP_MAX_ORDER)
171 			bit = IOREMAP_MAX_ORDER;
172 		else if (bit < PAGE_SHIFT)
173 			bit = PAGE_SHIFT;
174 
175 		align = 1ul << bit;
176 	}
177 	addr = ALIGN(start, align);
178 	size = PAGE_ALIGN(size);
179 
180 	area = kmalloc_node(sizeof(*area), GFP_KERNEL, node);
181 	if (unlikely(!area))
182 		return NULL;
183 
184 	if (unlikely(!size)) {
185 		kfree (area);
186 		return NULL;
187 	}
188 
189 	/*
190 	 * We always allocate a guard page.
191 	 */
192 	size += PAGE_SIZE;
193 
194 	write_lock(&vmlist_lock);
195 	for (p = &vmlist; (tmp = *p) != NULL ;p = &tmp->next) {
196 		if ((unsigned long)tmp->addr < addr) {
197 			if((unsigned long)tmp->addr + tmp->size >= addr)
198 				addr = ALIGN(tmp->size +
199 					     (unsigned long)tmp->addr, align);
200 			continue;
201 		}
202 		if ((size + addr) < addr)
203 			goto out;
204 		if (size + addr <= (unsigned long)tmp->addr)
205 			goto found;
206 		addr = ALIGN(tmp->size + (unsigned long)tmp->addr, align);
207 		if (addr > end - size)
208 			goto out;
209 	}
210 
211 found:
212 	area->next = *p;
213 	*p = area;
214 
215 	area->flags = flags;
216 	area->addr = (void *)addr;
217 	area->size = size;
218 	area->pages = NULL;
219 	area->nr_pages = 0;
220 	area->phys_addr = 0;
221 	write_unlock(&vmlist_lock);
222 
223 	return area;
224 
225 out:
226 	write_unlock(&vmlist_lock);
227 	kfree(area);
228 	if (printk_ratelimit())
229 		printk(KERN_WARNING "allocation failed: out of vmalloc space - use vmalloc=<size> to increase size.\n");
230 	return NULL;
231 }
232 
233 struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
234 				unsigned long start, unsigned long end)
235 {
236 	return __get_vm_area_node(size, flags, start, end, -1);
237 }
238 
239 /**
240  *	get_vm_area  -  reserve a contingous kernel virtual area
241  *
242  *	@size:		size of the area
243  *	@flags:		%VM_IOREMAP for I/O mappings or VM_ALLOC
244  *
245  *	Search an area of @size in the kernel virtual mapping area,
246  *	and reserved it for out purposes.  Returns the area descriptor
247  *	on success or %NULL on failure.
248  */
249 struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
250 {
251 	return __get_vm_area(size, flags, VMALLOC_START, VMALLOC_END);
252 }
253 
254 struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags, int node)
255 {
256 	return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, node);
257 }
258 
259 /* Caller must hold vmlist_lock */
260 struct vm_struct *__remove_vm_area(void *addr)
261 {
262 	struct vm_struct **p, *tmp;
263 
264 	for (p = &vmlist ; (tmp = *p) != NULL ;p = &tmp->next) {
265 		 if (tmp->addr == addr)
266 			 goto found;
267 	}
268 	return NULL;
269 
270 found:
271 	unmap_vm_area(tmp);
272 	*p = tmp->next;
273 
274 	/*
275 	 * Remove the guard page.
276 	 */
277 	tmp->size -= PAGE_SIZE;
278 	return tmp;
279 }
280 
281 /**
282  *	remove_vm_area  -  find and remove a contingous kernel virtual area
283  *
284  *	@addr:		base address
285  *
286  *	Search for the kernel VM area starting at @addr, and remove it.
287  *	This function returns the found VM area, but using it is NOT safe
288  *	on SMP machines, except for its size or flags.
289  */
290 struct vm_struct *remove_vm_area(void *addr)
291 {
292 	struct vm_struct *v;
293 	write_lock(&vmlist_lock);
294 	v = __remove_vm_area(addr);
295 	write_unlock(&vmlist_lock);
296 	return v;
297 }
298 
299 void __vunmap(void *addr, int deallocate_pages)
300 {
301 	struct vm_struct *area;
302 
303 	if (!addr)
304 		return;
305 
306 	if ((PAGE_SIZE-1) & (unsigned long)addr) {
307 		printk(KERN_ERR "Trying to vfree() bad address (%p)\n", addr);
308 		WARN_ON(1);
309 		return;
310 	}
311 
312 	area = remove_vm_area(addr);
313 	if (unlikely(!area)) {
314 		printk(KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
315 				addr);
316 		WARN_ON(1);
317 		return;
318 	}
319 
320 	if (deallocate_pages) {
321 		int i;
322 
323 		for (i = 0; i < area->nr_pages; i++) {
324 			BUG_ON(!area->pages[i]);
325 			__free_page(area->pages[i]);
326 		}
327 
328 		if (area->nr_pages > PAGE_SIZE/sizeof(struct page *))
329 			vfree(area->pages);
330 		else
331 			kfree(area->pages);
332 	}
333 
334 	kfree(area);
335 	return;
336 }
337 
338 /**
339  *	vfree  -  release memory allocated by vmalloc()
340  *
341  *	@addr:		memory base address
342  *
343  *	Free the virtually contiguous memory area starting at @addr, as
344  *	obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is
345  *	NULL, no operation is performed.
346  *
347  *	Must not be called in interrupt context.
348  */
349 void vfree(void *addr)
350 {
351 	BUG_ON(in_interrupt());
352 	__vunmap(addr, 1);
353 }
354 EXPORT_SYMBOL(vfree);
355 
356 /**
357  *	vunmap  -  release virtual mapping obtained by vmap()
358  *
359  *	@addr:		memory base address
360  *
361  *	Free the virtually contiguous memory area starting at @addr,
362  *	which was created from the page array passed to vmap().
363  *
364  *	Must not be called in interrupt context.
365  */
366 void vunmap(void *addr)
367 {
368 	BUG_ON(in_interrupt());
369 	__vunmap(addr, 0);
370 }
371 EXPORT_SYMBOL(vunmap);
372 
373 /**
374  *	vmap  -  map an array of pages into virtually contiguous space
375  *
376  *	@pages:		array of page pointers
377  *	@count:		number of pages to map
378  *	@flags:		vm_area->flags
379  *	@prot:		page protection for the mapping
380  *
381  *	Maps @count pages from @pages into contiguous kernel virtual
382  *	space.
383  */
384 void *vmap(struct page **pages, unsigned int count,
385 		unsigned long flags, pgprot_t prot)
386 {
387 	struct vm_struct *area;
388 
389 	if (count > num_physpages)
390 		return NULL;
391 
392 	area = get_vm_area((count << PAGE_SHIFT), flags);
393 	if (!area)
394 		return NULL;
395 	if (map_vm_area(area, prot, &pages)) {
396 		vunmap(area->addr);
397 		return NULL;
398 	}
399 
400 	return area->addr;
401 }
402 EXPORT_SYMBOL(vmap);
403 
404 void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
405 				pgprot_t prot, int node)
406 {
407 	struct page **pages;
408 	unsigned int nr_pages, array_size, i;
409 
410 	nr_pages = (area->size - PAGE_SIZE) >> PAGE_SHIFT;
411 	array_size = (nr_pages * sizeof(struct page *));
412 
413 	area->nr_pages = nr_pages;
414 	/* Please note that the recursion is strictly bounded. */
415 	if (array_size > PAGE_SIZE)
416 		pages = __vmalloc_node(array_size, gfp_mask, PAGE_KERNEL, node);
417 	else
418 		pages = kmalloc_node(array_size, (gfp_mask & ~__GFP_HIGHMEM), node);
419 	area->pages = pages;
420 	if (!area->pages) {
421 		remove_vm_area(area->addr);
422 		kfree(area);
423 		return NULL;
424 	}
425 	memset(area->pages, 0, array_size);
426 
427 	for (i = 0; i < area->nr_pages; i++) {
428 		if (node < 0)
429 			area->pages[i] = alloc_page(gfp_mask);
430 		else
431 			area->pages[i] = alloc_pages_node(node, gfp_mask, 0);
432 		if (unlikely(!area->pages[i])) {
433 			/* Successfully allocated i pages, free them in __vunmap() */
434 			area->nr_pages = i;
435 			goto fail;
436 		}
437 	}
438 
439 	if (map_vm_area(area, prot, &pages))
440 		goto fail;
441 	return area->addr;
442 
443 fail:
444 	vfree(area->addr);
445 	return NULL;
446 }
447 
448 void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
449 {
450 	return __vmalloc_area_node(area, gfp_mask, prot, -1);
451 }
452 
453 /**
454  *	__vmalloc_node  -  allocate virtually contiguous memory
455  *
456  *	@size:		allocation size
457  *	@gfp_mask:	flags for the page level allocator
458  *	@prot:		protection mask for the allocated pages
459  *	@node:		node to use for allocation or -1
460  *
461  *	Allocate enough pages to cover @size from the page level
462  *	allocator with @gfp_mask flags.  Map them into contiguous
463  *	kernel virtual space, using a pagetable protection of @prot.
464  */
465 void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
466 			int node)
467 {
468 	struct vm_struct *area;
469 
470 	size = PAGE_ALIGN(size);
471 	if (!size || (size >> PAGE_SHIFT) > num_physpages)
472 		return NULL;
473 
474 	area = get_vm_area_node(size, VM_ALLOC, node);
475 	if (!area)
476 		return NULL;
477 
478 	return __vmalloc_area_node(area, gfp_mask, prot, node);
479 }
480 EXPORT_SYMBOL(__vmalloc_node);
481 
482 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
483 {
484 	return __vmalloc_node(size, gfp_mask, prot, -1);
485 }
486 EXPORT_SYMBOL(__vmalloc);
487 
488 /**
489  *	vmalloc  -  allocate virtually contiguous memory
490  *
491  *	@size:		allocation size
492  *
493  *	Allocate enough pages to cover @size from the page level
494  *	allocator and map them into contiguous kernel virtual space.
495  *
496  *	For tight cotrol over page level allocator and protection flags
497  *	use __vmalloc() instead.
498  */
499 void *vmalloc(unsigned long size)
500 {
501        return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
502 }
503 EXPORT_SYMBOL(vmalloc);
504 
505 /**
506  *	vmalloc_node  -  allocate memory on a specific node
507  *
508  *	@size:		allocation size
509  *	@node:		numa node
510  *
511  *	Allocate enough pages to cover @size from the page level
512  *	allocator and map them into contiguous kernel virtual space.
513  *
514  *	For tight cotrol over page level allocator and protection flags
515  *	use __vmalloc() instead.
516  */
517 void *vmalloc_node(unsigned long size, int node)
518 {
519        return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, node);
520 }
521 EXPORT_SYMBOL(vmalloc_node);
522 
523 #ifndef PAGE_KERNEL_EXEC
524 # define PAGE_KERNEL_EXEC PAGE_KERNEL
525 #endif
526 
527 /**
528  *	vmalloc_exec  -  allocate virtually contiguous, executable memory
529  *
530  *	@size:		allocation size
531  *
532  *	Kernel-internal function to allocate enough pages to cover @size
533  *	the page level allocator and map them into contiguous and
534  *	executable kernel virtual space.
535  *
536  *	For tight cotrol over page level allocator and protection flags
537  *	use __vmalloc() instead.
538  */
539 
540 void *vmalloc_exec(unsigned long size)
541 {
542 	return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC);
543 }
544 
545 /**
546  *	vmalloc_32  -  allocate virtually contiguous memory (32bit addressable)
547  *
548  *	@size:		allocation size
549  *
550  *	Allocate enough 32bit PA addressable pages to cover @size from the
551  *	page level allocator and map them into contiguous kernel virtual space.
552  */
553 void *vmalloc_32(unsigned long size)
554 {
555 	return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL);
556 }
557 EXPORT_SYMBOL(vmalloc_32);
558 
559 long vread(char *buf, char *addr, unsigned long count)
560 {
561 	struct vm_struct *tmp;
562 	char *vaddr, *buf_start = buf;
563 	unsigned long n;
564 
565 	/* Don't allow overflow */
566 	if ((unsigned long) addr + count < count)
567 		count = -(unsigned long) addr;
568 
569 	read_lock(&vmlist_lock);
570 	for (tmp = vmlist; tmp; tmp = tmp->next) {
571 		vaddr = (char *) tmp->addr;
572 		if (addr >= vaddr + tmp->size - PAGE_SIZE)
573 			continue;
574 		while (addr < vaddr) {
575 			if (count == 0)
576 				goto finished;
577 			*buf = '\0';
578 			buf++;
579 			addr++;
580 			count--;
581 		}
582 		n = vaddr + tmp->size - PAGE_SIZE - addr;
583 		do {
584 			if (count == 0)
585 				goto finished;
586 			*buf = *addr;
587 			buf++;
588 			addr++;
589 			count--;
590 		} while (--n > 0);
591 	}
592 finished:
593 	read_unlock(&vmlist_lock);
594 	return buf - buf_start;
595 }
596 
597 long vwrite(char *buf, char *addr, unsigned long count)
598 {
599 	struct vm_struct *tmp;
600 	char *vaddr, *buf_start = buf;
601 	unsigned long n;
602 
603 	/* Don't allow overflow */
604 	if ((unsigned long) addr + count < count)
605 		count = -(unsigned long) addr;
606 
607 	read_lock(&vmlist_lock);
608 	for (tmp = vmlist; tmp; tmp = tmp->next) {
609 		vaddr = (char *) tmp->addr;
610 		if (addr >= vaddr + tmp->size - PAGE_SIZE)
611 			continue;
612 		while (addr < vaddr) {
613 			if (count == 0)
614 				goto finished;
615 			buf++;
616 			addr++;
617 			count--;
618 		}
619 		n = vaddr + tmp->size - PAGE_SIZE - addr;
620 		do {
621 			if (count == 0)
622 				goto finished;
623 			*addr = *buf;
624 			buf++;
625 			addr++;
626 			count--;
627 		} while (--n > 0);
628 	}
629 finished:
630 	read_unlock(&vmlist_lock);
631 	return buf - buf_start;
632 }
633