xref: /linux/mm/util.c (revision 41badc15cbad0350de34408c1b0c690f9df76d4b)
116d69265SAndrew Morton #include <linux/mm.h>
230992c97SMatt Mackall #include <linux/slab.h>
330992c97SMatt Mackall #include <linux/string.h>
4b95f1b31SPaul Gortmaker #include <linux/export.h>
596840aa0SDavi Arnaut #include <linux/err.h>
63b8f14b4SAdrian Bunk #include <linux/sched.h>
7eb36c587SAl Viro #include <linux/security.h>
896840aa0SDavi Arnaut #include <asm/uaccess.h>
930992c97SMatt Mackall 
106038def0SNamhyung Kim #include "internal.h"
116038def0SNamhyung Kim 
12a8d154b0SSteven Rostedt #define CREATE_TRACE_POINTS
13ad8d75ffSSteven Rostedt #include <trace/events/kmem.h>
14a8d154b0SSteven Rostedt 
1530992c97SMatt Mackall /**
1630992c97SMatt Mackall  * kstrdup - allocate space for and copy an existing string
1730992c97SMatt Mackall  * @s: the string to duplicate
1830992c97SMatt Mackall  * @gfp: the GFP mask used in the kmalloc() call when allocating memory
1930992c97SMatt Mackall  */
2030992c97SMatt Mackall char *kstrdup(const char *s, gfp_t gfp)
2130992c97SMatt Mackall {
2230992c97SMatt Mackall 	size_t len;
2330992c97SMatt Mackall 	char *buf;
2430992c97SMatt Mackall 
2530992c97SMatt Mackall 	if (!s)
2630992c97SMatt Mackall 		return NULL;
2730992c97SMatt Mackall 
2830992c97SMatt Mackall 	len = strlen(s) + 1;
291d2c8eeaSChristoph Hellwig 	buf = kmalloc_track_caller(len, gfp);
3030992c97SMatt Mackall 	if (buf)
3130992c97SMatt Mackall 		memcpy(buf, s, len);
3230992c97SMatt Mackall 	return buf;
3330992c97SMatt Mackall }
3430992c97SMatt Mackall EXPORT_SYMBOL(kstrdup);
3596840aa0SDavi Arnaut 
361a2f67b4SAlexey Dobriyan /**
371e66df3eSJeremy Fitzhardinge  * kstrndup - allocate space for and copy an existing string
381e66df3eSJeremy Fitzhardinge  * @s: the string to duplicate
391e66df3eSJeremy Fitzhardinge  * @max: read at most @max chars from @s
401e66df3eSJeremy Fitzhardinge  * @gfp: the GFP mask used in the kmalloc() call when allocating memory
411e66df3eSJeremy Fitzhardinge  */
421e66df3eSJeremy Fitzhardinge char *kstrndup(const char *s, size_t max, gfp_t gfp)
431e66df3eSJeremy Fitzhardinge {
441e66df3eSJeremy Fitzhardinge 	size_t len;
451e66df3eSJeremy Fitzhardinge 	char *buf;
461e66df3eSJeremy Fitzhardinge 
471e66df3eSJeremy Fitzhardinge 	if (!s)
481e66df3eSJeremy Fitzhardinge 		return NULL;
491e66df3eSJeremy Fitzhardinge 
501e66df3eSJeremy Fitzhardinge 	len = strnlen(s, max);
511e66df3eSJeremy Fitzhardinge 	buf = kmalloc_track_caller(len+1, gfp);
521e66df3eSJeremy Fitzhardinge 	if (buf) {
531e66df3eSJeremy Fitzhardinge 		memcpy(buf, s, len);
541e66df3eSJeremy Fitzhardinge 		buf[len] = '\0';
551e66df3eSJeremy Fitzhardinge 	}
561e66df3eSJeremy Fitzhardinge 	return buf;
571e66df3eSJeremy Fitzhardinge }
581e66df3eSJeremy Fitzhardinge EXPORT_SYMBOL(kstrndup);
591e66df3eSJeremy Fitzhardinge 
601e66df3eSJeremy Fitzhardinge /**
611a2f67b4SAlexey Dobriyan  * kmemdup - duplicate region of memory
621a2f67b4SAlexey Dobriyan  *
631a2f67b4SAlexey Dobriyan  * @src: memory region to duplicate
641a2f67b4SAlexey Dobriyan  * @len: memory region length
651a2f67b4SAlexey Dobriyan  * @gfp: GFP mask to use
661a2f67b4SAlexey Dobriyan  */
671a2f67b4SAlexey Dobriyan void *kmemdup(const void *src, size_t len, gfp_t gfp)
681a2f67b4SAlexey Dobriyan {
691a2f67b4SAlexey Dobriyan 	void *p;
701a2f67b4SAlexey Dobriyan 
711d2c8eeaSChristoph Hellwig 	p = kmalloc_track_caller(len, gfp);
721a2f67b4SAlexey Dobriyan 	if (p)
731a2f67b4SAlexey Dobriyan 		memcpy(p, src, len);
741a2f67b4SAlexey Dobriyan 	return p;
751a2f67b4SAlexey Dobriyan }
761a2f67b4SAlexey Dobriyan EXPORT_SYMBOL(kmemdup);
771a2f67b4SAlexey Dobriyan 
78ef2ad80cSChristoph Lameter /**
79610a77e0SLi Zefan  * memdup_user - duplicate memory region from user space
80610a77e0SLi Zefan  *
81610a77e0SLi Zefan  * @src: source address in user space
82610a77e0SLi Zefan  * @len: number of bytes to copy
83610a77e0SLi Zefan  *
84610a77e0SLi Zefan  * Returns an ERR_PTR() on failure.
85610a77e0SLi Zefan  */
86610a77e0SLi Zefan void *memdup_user(const void __user *src, size_t len)
87610a77e0SLi Zefan {
88610a77e0SLi Zefan 	void *p;
89610a77e0SLi Zefan 
90610a77e0SLi Zefan 	/*
91610a77e0SLi Zefan 	 * Always use GFP_KERNEL, since copy_from_user() can sleep and
92610a77e0SLi Zefan 	 * cause pagefault, which makes it pointless to use GFP_NOFS
93610a77e0SLi Zefan 	 * or GFP_ATOMIC.
94610a77e0SLi Zefan 	 */
95610a77e0SLi Zefan 	p = kmalloc_track_caller(len, GFP_KERNEL);
96610a77e0SLi Zefan 	if (!p)
97610a77e0SLi Zefan 		return ERR_PTR(-ENOMEM);
98610a77e0SLi Zefan 
99610a77e0SLi Zefan 	if (copy_from_user(p, src, len)) {
100610a77e0SLi Zefan 		kfree(p);
101610a77e0SLi Zefan 		return ERR_PTR(-EFAULT);
102610a77e0SLi Zefan 	}
103610a77e0SLi Zefan 
104610a77e0SLi Zefan 	return p;
105610a77e0SLi Zefan }
106610a77e0SLi Zefan EXPORT_SYMBOL(memdup_user);
107610a77e0SLi Zefan 
108e21827aaSEzequiel Garcia static __always_inline void *__do_krealloc(const void *p, size_t new_size,
109e21827aaSEzequiel Garcia 					   gfp_t flags)
11093bc4e89SPekka Enberg {
11193bc4e89SPekka Enberg 	void *ret;
11293bc4e89SPekka Enberg 	size_t ks = 0;
11393bc4e89SPekka Enberg 
11493bc4e89SPekka Enberg 	if (p)
11593bc4e89SPekka Enberg 		ks = ksize(p);
11693bc4e89SPekka Enberg 
11793bc4e89SPekka Enberg 	if (ks >= new_size)
11893bc4e89SPekka Enberg 		return (void *)p;
11993bc4e89SPekka Enberg 
12093bc4e89SPekka Enberg 	ret = kmalloc_track_caller(new_size, flags);
12193bc4e89SPekka Enberg 	if (ret && p)
12293bc4e89SPekka Enberg 		memcpy(ret, p, ks);
12393bc4e89SPekka Enberg 
12493bc4e89SPekka Enberg 	return ret;
12593bc4e89SPekka Enberg }
126e21827aaSEzequiel Garcia 
127e21827aaSEzequiel Garcia /**
128e21827aaSEzequiel Garcia  * __krealloc - like krealloc() but don't free @p.
129e21827aaSEzequiel Garcia  * @p: object to reallocate memory for.
130e21827aaSEzequiel Garcia  * @new_size: how many bytes of memory are required.
131e21827aaSEzequiel Garcia  * @flags: the type of memory to allocate.
132e21827aaSEzequiel Garcia  *
133e21827aaSEzequiel Garcia  * This function is like krealloc() except it never frees the originally
134e21827aaSEzequiel Garcia  * allocated buffer. Use this if you don't want to free the buffer immediately
135e21827aaSEzequiel Garcia  * like, for example, with RCU.
136e21827aaSEzequiel Garcia  */
137e21827aaSEzequiel Garcia void *__krealloc(const void *p, size_t new_size, gfp_t flags)
138e21827aaSEzequiel Garcia {
139e21827aaSEzequiel Garcia 	if (unlikely(!new_size))
140e21827aaSEzequiel Garcia 		return ZERO_SIZE_PTR;
141e21827aaSEzequiel Garcia 
142e21827aaSEzequiel Garcia 	return __do_krealloc(p, new_size, flags);
143e21827aaSEzequiel Garcia 
144e21827aaSEzequiel Garcia }
14593bc4e89SPekka Enberg EXPORT_SYMBOL(__krealloc);
14693bc4e89SPekka Enberg 
14793bc4e89SPekka Enberg /**
148ef2ad80cSChristoph Lameter  * krealloc - reallocate memory. The contents will remain unchanged.
149ef2ad80cSChristoph Lameter  * @p: object to reallocate memory for.
150ef2ad80cSChristoph Lameter  * @new_size: how many bytes of memory are required.
151ef2ad80cSChristoph Lameter  * @flags: the type of memory to allocate.
152ef2ad80cSChristoph Lameter  *
153ef2ad80cSChristoph Lameter  * The contents of the object pointed to are preserved up to the
154ef2ad80cSChristoph Lameter  * lesser of the new and old sizes.  If @p is %NULL, krealloc()
1550db10c8eSBorislav Petkov  * behaves exactly like kmalloc().  If @new_size is 0 and @p is not a
156ef2ad80cSChristoph Lameter  * %NULL pointer, the object pointed to is freed.
157ef2ad80cSChristoph Lameter  */
158ef2ad80cSChristoph Lameter void *krealloc(const void *p, size_t new_size, gfp_t flags)
159ef2ad80cSChristoph Lameter {
160ef2ad80cSChristoph Lameter 	void *ret;
161ef2ad80cSChristoph Lameter 
162ef2ad80cSChristoph Lameter 	if (unlikely(!new_size)) {
163ef2ad80cSChristoph Lameter 		kfree(p);
1646cb8f913SChristoph Lameter 		return ZERO_SIZE_PTR;
165ef2ad80cSChristoph Lameter 	}
166ef2ad80cSChristoph Lameter 
167e21827aaSEzequiel Garcia 	ret = __do_krealloc(p, new_size, flags);
16893bc4e89SPekka Enberg 	if (ret && p != ret)
169ef2ad80cSChristoph Lameter 		kfree(p);
17093bc4e89SPekka Enberg 
171ef2ad80cSChristoph Lameter 	return ret;
172ef2ad80cSChristoph Lameter }
173ef2ad80cSChristoph Lameter EXPORT_SYMBOL(krealloc);
174ef2ad80cSChristoph Lameter 
1753ef0e5baSJohannes Weiner /**
1763ef0e5baSJohannes Weiner  * kzfree - like kfree but zero memory
1773ef0e5baSJohannes Weiner  * @p: object to free memory of
1783ef0e5baSJohannes Weiner  *
1793ef0e5baSJohannes Weiner  * The memory of the object @p points to is zeroed before freed.
1803ef0e5baSJohannes Weiner  * If @p is %NULL, kzfree() does nothing.
181a234bdc9SPekka Enberg  *
182a234bdc9SPekka Enberg  * Note: this function zeroes the whole allocated buffer which can be a good
183a234bdc9SPekka Enberg  * deal bigger than the requested buffer size passed to kmalloc(). So be
184a234bdc9SPekka Enberg  * careful when using this function in performance sensitive code.
1853ef0e5baSJohannes Weiner  */
1863ef0e5baSJohannes Weiner void kzfree(const void *p)
1873ef0e5baSJohannes Weiner {
1883ef0e5baSJohannes Weiner 	size_t ks;
1893ef0e5baSJohannes Weiner 	void *mem = (void *)p;
1903ef0e5baSJohannes Weiner 
1913ef0e5baSJohannes Weiner 	if (unlikely(ZERO_OR_NULL_PTR(mem)))
1923ef0e5baSJohannes Weiner 		return;
1933ef0e5baSJohannes Weiner 	ks = ksize(mem);
1943ef0e5baSJohannes Weiner 	memset(mem, 0, ks);
1953ef0e5baSJohannes Weiner 	kfree(mem);
1963ef0e5baSJohannes Weiner }
1973ef0e5baSJohannes Weiner EXPORT_SYMBOL(kzfree);
1983ef0e5baSJohannes Weiner 
19996840aa0SDavi Arnaut /*
20096840aa0SDavi Arnaut  * strndup_user - duplicate an existing string from user space
20196840aa0SDavi Arnaut  * @s: The string to duplicate
20296840aa0SDavi Arnaut  * @n: Maximum number of bytes to copy, including the trailing NUL.
20396840aa0SDavi Arnaut  */
20496840aa0SDavi Arnaut char *strndup_user(const char __user *s, long n)
20596840aa0SDavi Arnaut {
20696840aa0SDavi Arnaut 	char *p;
20796840aa0SDavi Arnaut 	long length;
20896840aa0SDavi Arnaut 
20996840aa0SDavi Arnaut 	length = strnlen_user(s, n);
21096840aa0SDavi Arnaut 
21196840aa0SDavi Arnaut 	if (!length)
21296840aa0SDavi Arnaut 		return ERR_PTR(-EFAULT);
21396840aa0SDavi Arnaut 
21496840aa0SDavi Arnaut 	if (length > n)
21596840aa0SDavi Arnaut 		return ERR_PTR(-EINVAL);
21696840aa0SDavi Arnaut 
21790d74045SJulia Lawall 	p = memdup_user(s, length);
21896840aa0SDavi Arnaut 
21990d74045SJulia Lawall 	if (IS_ERR(p))
22090d74045SJulia Lawall 		return p;
22196840aa0SDavi Arnaut 
22296840aa0SDavi Arnaut 	p[length - 1] = '\0';
22396840aa0SDavi Arnaut 
22496840aa0SDavi Arnaut 	return p;
22596840aa0SDavi Arnaut }
22696840aa0SDavi Arnaut EXPORT_SYMBOL(strndup_user);
22716d69265SAndrew Morton 
2286038def0SNamhyung Kim void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
2296038def0SNamhyung Kim 		struct vm_area_struct *prev, struct rb_node *rb_parent)
2306038def0SNamhyung Kim {
2316038def0SNamhyung Kim 	struct vm_area_struct *next;
2326038def0SNamhyung Kim 
2336038def0SNamhyung Kim 	vma->vm_prev = prev;
2346038def0SNamhyung Kim 	if (prev) {
2356038def0SNamhyung Kim 		next = prev->vm_next;
2366038def0SNamhyung Kim 		prev->vm_next = vma;
2376038def0SNamhyung Kim 	} else {
2386038def0SNamhyung Kim 		mm->mmap = vma;
2396038def0SNamhyung Kim 		if (rb_parent)
2406038def0SNamhyung Kim 			next = rb_entry(rb_parent,
2416038def0SNamhyung Kim 					struct vm_area_struct, vm_rb);
2426038def0SNamhyung Kim 		else
2436038def0SNamhyung Kim 			next = NULL;
2446038def0SNamhyung Kim 	}
2456038def0SNamhyung Kim 	vma->vm_next = next;
2466038def0SNamhyung Kim 	if (next)
2476038def0SNamhyung Kim 		next->vm_prev = vma;
2486038def0SNamhyung Kim }
2496038def0SNamhyung Kim 
250b7643757SSiddhesh Poyarekar /* Check if the vma is being used as a stack by this task */
251b7643757SSiddhesh Poyarekar static int vm_is_stack_for_task(struct task_struct *t,
252b7643757SSiddhesh Poyarekar 				struct vm_area_struct *vma)
253b7643757SSiddhesh Poyarekar {
254b7643757SSiddhesh Poyarekar 	return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
255b7643757SSiddhesh Poyarekar }
256b7643757SSiddhesh Poyarekar 
257b7643757SSiddhesh Poyarekar /*
258b7643757SSiddhesh Poyarekar  * Check if the vma is being used as a stack.
259b7643757SSiddhesh Poyarekar  * If is_group is non-zero, check in the entire thread group or else
260b7643757SSiddhesh Poyarekar  * just check in the current task. Returns the pid of the task that
261b7643757SSiddhesh Poyarekar  * the vma is stack for.
262b7643757SSiddhesh Poyarekar  */
263b7643757SSiddhesh Poyarekar pid_t vm_is_stack(struct task_struct *task,
264b7643757SSiddhesh Poyarekar 		  struct vm_area_struct *vma, int in_group)
265b7643757SSiddhesh Poyarekar {
266b7643757SSiddhesh Poyarekar 	pid_t ret = 0;
267b7643757SSiddhesh Poyarekar 
268b7643757SSiddhesh Poyarekar 	if (vm_is_stack_for_task(task, vma))
269b7643757SSiddhesh Poyarekar 		return task->pid;
270b7643757SSiddhesh Poyarekar 
271b7643757SSiddhesh Poyarekar 	if (in_group) {
272b7643757SSiddhesh Poyarekar 		struct task_struct *t;
273b7643757SSiddhesh Poyarekar 		rcu_read_lock();
274b7643757SSiddhesh Poyarekar 		if (!pid_alive(task))
275b7643757SSiddhesh Poyarekar 			goto done;
276b7643757SSiddhesh Poyarekar 
277b7643757SSiddhesh Poyarekar 		t = task;
278b7643757SSiddhesh Poyarekar 		do {
279b7643757SSiddhesh Poyarekar 			if (vm_is_stack_for_task(t, vma)) {
280b7643757SSiddhesh Poyarekar 				ret = t->pid;
281b7643757SSiddhesh Poyarekar 				goto done;
282b7643757SSiddhesh Poyarekar 			}
283b7643757SSiddhesh Poyarekar 		} while_each_thread(task, t);
284b7643757SSiddhesh Poyarekar done:
285b7643757SSiddhesh Poyarekar 		rcu_read_unlock();
286b7643757SSiddhesh Poyarekar 	}
287b7643757SSiddhesh Poyarekar 
288b7643757SSiddhesh Poyarekar 	return ret;
289b7643757SSiddhesh Poyarekar }
290b7643757SSiddhesh Poyarekar 
291efc1a3b1SDavid Howells #if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
29216d69265SAndrew Morton void arch_pick_mmap_layout(struct mm_struct *mm)
29316d69265SAndrew Morton {
29416d69265SAndrew Morton 	mm->mmap_base = TASK_UNMAPPED_BASE;
29516d69265SAndrew Morton 	mm->get_unmapped_area = arch_get_unmapped_area;
29616d69265SAndrew Morton 	mm->unmap_area = arch_unmap_area;
29716d69265SAndrew Morton }
29816d69265SAndrew Morton #endif
299912985dcSRusty Russell 
30045888a0cSXiao Guangrong /*
30145888a0cSXiao Guangrong  * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
30245888a0cSXiao Guangrong  * back to the regular GUP.
30325985edcSLucas De Marchi  * If the architecture not support this function, simply return with no
30445888a0cSXiao Guangrong  * page pinned
30545888a0cSXiao Guangrong  */
30645888a0cSXiao Guangrong int __attribute__((weak)) __get_user_pages_fast(unsigned long start,
30745888a0cSXiao Guangrong 				 int nr_pages, int write, struct page **pages)
30845888a0cSXiao Guangrong {
30945888a0cSXiao Guangrong 	return 0;
31045888a0cSXiao Guangrong }
31145888a0cSXiao Guangrong EXPORT_SYMBOL_GPL(__get_user_pages_fast);
31245888a0cSXiao Guangrong 
3139de100d0SAndy Grover /**
3149de100d0SAndy Grover  * get_user_pages_fast() - pin user pages in memory
3159de100d0SAndy Grover  * @start:	starting user address
3169de100d0SAndy Grover  * @nr_pages:	number of pages from start to pin
3179de100d0SAndy Grover  * @write:	whether pages will be written to
3189de100d0SAndy Grover  * @pages:	array that receives pointers to the pages pinned.
3199de100d0SAndy Grover  *		Should be at least nr_pages long.
3209de100d0SAndy Grover  *
3219de100d0SAndy Grover  * Returns number of pages pinned. This may be fewer than the number
3229de100d0SAndy Grover  * requested. If nr_pages is 0 or negative, returns 0. If no pages
3239de100d0SAndy Grover  * were pinned, returns -errno.
324d2bf6be8SNick Piggin  *
325d2bf6be8SNick Piggin  * get_user_pages_fast provides equivalent functionality to get_user_pages,
326d2bf6be8SNick Piggin  * operating on current and current->mm, with force=0 and vma=NULL. However
327d2bf6be8SNick Piggin  * unlike get_user_pages, it must be called without mmap_sem held.
328d2bf6be8SNick Piggin  *
329d2bf6be8SNick Piggin  * get_user_pages_fast may take mmap_sem and page table locks, so no
330d2bf6be8SNick Piggin  * assumptions can be made about lack of locking. get_user_pages_fast is to be
331d2bf6be8SNick Piggin  * implemented in a way that is advantageous (vs get_user_pages()) when the
332d2bf6be8SNick Piggin  * user memory area is already faulted in and present in ptes. However if the
333d2bf6be8SNick Piggin  * pages have to be faulted in, it may turn out to be slightly slower so
334d2bf6be8SNick Piggin  * callers need to carefully consider what to use. On many architectures,
335d2bf6be8SNick Piggin  * get_user_pages_fast simply falls back to get_user_pages.
3369de100d0SAndy Grover  */
337912985dcSRusty Russell int __attribute__((weak)) get_user_pages_fast(unsigned long start,
338912985dcSRusty Russell 				int nr_pages, int write, struct page **pages)
339912985dcSRusty Russell {
340912985dcSRusty Russell 	struct mm_struct *mm = current->mm;
341912985dcSRusty Russell 	int ret;
342912985dcSRusty Russell 
343912985dcSRusty Russell 	down_read(&mm->mmap_sem);
344912985dcSRusty Russell 	ret = get_user_pages(current, mm, start, nr_pages,
345912985dcSRusty Russell 					write, 0, pages, NULL);
346912985dcSRusty Russell 	up_read(&mm->mmap_sem);
347912985dcSRusty Russell 
348912985dcSRusty Russell 	return ret;
349912985dcSRusty Russell }
350912985dcSRusty Russell EXPORT_SYMBOL_GPL(get_user_pages_fast);
351ca2b84cbSEduard - Gabriel Munteanu 
352eb36c587SAl Viro unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
353eb36c587SAl Viro 	unsigned long len, unsigned long prot,
354eb36c587SAl Viro 	unsigned long flag, unsigned long pgoff)
355eb36c587SAl Viro {
356eb36c587SAl Viro 	unsigned long ret;
357eb36c587SAl Viro 	struct mm_struct *mm = current->mm;
358*41badc15SMichel Lespinasse 	unsigned long populate;
359eb36c587SAl Viro 
360eb36c587SAl Viro 	ret = security_mmap_file(file, prot, flag);
361eb36c587SAl Viro 	if (!ret) {
362eb36c587SAl Viro 		down_write(&mm->mmap_sem);
363bebeb3d6SMichel Lespinasse 		ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff,
364bebeb3d6SMichel Lespinasse 				    &populate);
365eb36c587SAl Viro 		up_write(&mm->mmap_sem);
366*41badc15SMichel Lespinasse 		if (populate)
367*41badc15SMichel Lespinasse 			mm_populate(ret, populate);
368eb36c587SAl Viro 	}
369eb36c587SAl Viro 	return ret;
370eb36c587SAl Viro }
371eb36c587SAl Viro 
372eb36c587SAl Viro unsigned long vm_mmap(struct file *file, unsigned long addr,
373eb36c587SAl Viro 	unsigned long len, unsigned long prot,
374eb36c587SAl Viro 	unsigned long flag, unsigned long offset)
375eb36c587SAl Viro {
376eb36c587SAl Viro 	if (unlikely(offset + PAGE_ALIGN(len) < offset))
377eb36c587SAl Viro 		return -EINVAL;
378eb36c587SAl Viro 	if (unlikely(offset & ~PAGE_MASK))
379eb36c587SAl Viro 		return -EINVAL;
380eb36c587SAl Viro 
381eb36c587SAl Viro 	return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
382eb36c587SAl Viro }
383eb36c587SAl Viro EXPORT_SYMBOL(vm_mmap);
384eb36c587SAl Viro 
385ca2b84cbSEduard - Gabriel Munteanu /* Tracepoints definitions. */
386ca2b84cbSEduard - Gabriel Munteanu EXPORT_TRACEPOINT_SYMBOL(kmalloc);
387ca2b84cbSEduard - Gabriel Munteanu EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
388ca2b84cbSEduard - Gabriel Munteanu EXPORT_TRACEPOINT_SYMBOL(kmalloc_node);
389ca2b84cbSEduard - Gabriel Munteanu EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc_node);
390ca2b84cbSEduard - Gabriel Munteanu EXPORT_TRACEPOINT_SYMBOL(kfree);
391ca2b84cbSEduard - Gabriel Munteanu EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free);
392