xref: /linux/arch/mips/mm/mmap.c (revision 3a39d672e7f48b8d6b91a09afa4b55352773b4b5)
16f6c3c33SRalf Baechle /*
26f6c3c33SRalf Baechle  * This file is subject to the terms and conditions of the GNU General Public
36f6c3c33SRalf Baechle  * License.  See the file "COPYING" in the main directory of this archive
46f6c3c33SRalf Baechle  * for more details.
56f6c3c33SRalf Baechle  *
66f6c3c33SRalf Baechle  * Copyright (C) 2011 Wind River Systems,
76f6c3c33SRalf Baechle  *   written by Ralf Baechle <ralf@linux-mips.org>
86f6c3c33SRalf Baechle  */
916650107SKevin Cernekee #include <linux/compiler.h>
10db3fb45aSPaul Burton #include <linux/elf-randomize.h>
116f6c3c33SRalf Baechle #include <linux/errno.h>
126f6c3c33SRalf Baechle #include <linux/mm.h>
136f6c3c33SRalf Baechle #include <linux/mman.h>
14d9ba5778SPaul Gortmaker #include <linux/export.h>
15d0be89f6SJian Peng #include <linux/personality.h>
166f6c3c33SRalf Baechle #include <linux/random.h>
173f07c014SIngo Molnar #include <linux/sched/signal.h>
1801042607SIngo Molnar #include <linux/sched/mm.h>
196f6c3c33SRalf Baechle 
206f6c3c33SRalf Baechle unsigned long shm_align_mask = PAGE_SIZE - 1;	/* Sane caches */
216f6c3c33SRalf Baechle EXPORT_SYMBOL(shm_align_mask);
226f6c3c33SRalf Baechle 
236f6c3c33SRalf Baechle #define COLOUR_ALIGN(addr, pgoff)				\
246f6c3c33SRalf Baechle 	((((addr) + shm_align_mask) & ~shm_align_mask) +	\
256f6c3c33SRalf Baechle 	 (((pgoff) << PAGE_SHIFT) & shm_align_mask))
266f6c3c33SRalf Baechle 
27d0be89f6SJian Peng enum mmap_allocation_direction {UP, DOWN};
28d0be89f6SJian Peng 
arch_get_unmapped_area_common(struct file * filp,unsigned long addr0,unsigned long len,unsigned long pgoff,unsigned long flags,enum mmap_allocation_direction dir)2916650107SKevin Cernekee static unsigned long arch_get_unmapped_area_common(struct file *filp,
30d0be89f6SJian Peng 	unsigned long addr0, unsigned long len, unsigned long pgoff,
31d0be89f6SJian Peng 	unsigned long flags, enum mmap_allocation_direction dir)
326f6c3c33SRalf Baechle {
33d0be89f6SJian Peng 	struct mm_struct *mm = current->mm;
34d0be89f6SJian Peng 	struct vm_area_struct *vma;
35d0be89f6SJian Peng 	unsigned long addr = addr0;
366f6c3c33SRalf Baechle 	int do_color_align;
37b80fa3cbSRick Edgecombe 	struct vm_unmapped_area_info info = {};
386f6c3c33SRalf Baechle 
39d0be89f6SJian Peng 	if (unlikely(len > TASK_SIZE))
406f6c3c33SRalf Baechle 		return -ENOMEM;
416f6c3c33SRalf Baechle 
426f6c3c33SRalf Baechle 	if (flags & MAP_FIXED) {
43d0be89f6SJian Peng 		/* Even MAP_FIXED mappings must reside within TASK_SIZE */
446f6c3c33SRalf Baechle 		if (TASK_SIZE - len < addr)
456f6c3c33SRalf Baechle 			return -EINVAL;
466f6c3c33SRalf Baechle 
476f6c3c33SRalf Baechle 		/*
486f6c3c33SRalf Baechle 		 * We do not accept a shared mapping if it would violate
496f6c3c33SRalf Baechle 		 * cache aliasing constraints.
506f6c3c33SRalf Baechle 		 */
516f6c3c33SRalf Baechle 		if ((flags & MAP_SHARED) &&
526f6c3c33SRalf Baechle 		    ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
536f6c3c33SRalf Baechle 			return -EINVAL;
546f6c3c33SRalf Baechle 		return addr;
556f6c3c33SRalf Baechle 	}
566f6c3c33SRalf Baechle 
576f6c3c33SRalf Baechle 	do_color_align = 0;
586f6c3c33SRalf Baechle 	if (filp || (flags & MAP_SHARED))
596f6c3c33SRalf Baechle 		do_color_align = 1;
60d0be89f6SJian Peng 
61d0be89f6SJian Peng 	/* requesting a specific address */
626f6c3c33SRalf Baechle 	if (addr) {
636f6c3c33SRalf Baechle 		if (do_color_align)
646f6c3c33SRalf Baechle 			addr = COLOUR_ALIGN(addr, pgoff);
656f6c3c33SRalf Baechle 		else
666f6c3c33SRalf Baechle 			addr = PAGE_ALIGN(addr);
67d0be89f6SJian Peng 
68d0be89f6SJian Peng 		vma = find_vma(mm, addr);
696f6c3c33SRalf Baechle 		if (TASK_SIZE - len >= addr &&
701be7107fSHugh Dickins 		    (!vma || addr + len <= vm_start_gap(vma)))
716f6c3c33SRalf Baechle 			return addr;
726f6c3c33SRalf Baechle 	}
73d0be89f6SJian Peng 
74b6661861SMichel Lespinasse 	info.length = len;
75b6661861SMichel Lespinasse 	info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0;
76b6661861SMichel Lespinasse 	info.align_offset = pgoff << PAGE_SHIFT;
776f6c3c33SRalf Baechle 
78b6661861SMichel Lespinasse 	if (dir == DOWN) {
79b6661861SMichel Lespinasse 		info.flags = VM_UNMAPPED_AREA_TOPDOWN;
80b6661861SMichel Lespinasse 		info.low_limit = PAGE_SIZE;
81b6661861SMichel Lespinasse 		info.high_limit = mm->mmap_base;
82b6661861SMichel Lespinasse 		addr = vm_unmapped_area(&info);
83b6661861SMichel Lespinasse 
84b6661861SMichel Lespinasse 		if (!(addr & ~PAGE_MASK))
856f6c3c33SRalf Baechle 			return addr;
86d0be89f6SJian Peng 
8716650107SKevin Cernekee 		/*
88d0be89f6SJian Peng 		 * A failed mmap() very likely causes application failure,
89d0be89f6SJian Peng 		 * so fall back to the bottom-up function here. This scenario
90d0be89f6SJian Peng 		 * can happen with large stack limits and large mmap()
91d0be89f6SJian Peng 		 * allocations.
92d0be89f6SJian Peng 		 */
93d0be89f6SJian Peng 	}
94b6661861SMichel Lespinasse 
95b6661861SMichel Lespinasse 	info.low_limit = mm->mmap_base;
96b6661861SMichel Lespinasse 	info.high_limit = TASK_SIZE;
97b6661861SMichel Lespinasse 	return vm_unmapped_area(&info);
98d0be89f6SJian Peng }
99d0be89f6SJian Peng 
arch_get_unmapped_area(struct file * filp,unsigned long addr0,unsigned long len,unsigned long pgoff,unsigned long flags,vm_flags_t vm_flags)100d0be89f6SJian Peng unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0,
101*25d4054cSMark Brown 	unsigned long len, unsigned long pgoff, unsigned long flags,
102*25d4054cSMark Brown 	vm_flags_t vm_flags)
103d0be89f6SJian Peng {
10416650107SKevin Cernekee 	return arch_get_unmapped_area_common(filp,
105d0be89f6SJian Peng 			addr0, len, pgoff, flags, UP);
106d0be89f6SJian Peng }
107d0be89f6SJian Peng 
108d0be89f6SJian Peng /*
109d0be89f6SJian Peng  * There is no need to export this but sched.h declares the function as
110d0be89f6SJian Peng  * extern so making it static here results in an error.
111d0be89f6SJian Peng  */
arch_get_unmapped_area_topdown(struct file * filp,unsigned long addr0,unsigned long len,unsigned long pgoff,unsigned long flags,vm_flags_t vm_flags)112d0be89f6SJian Peng unsigned long arch_get_unmapped_area_topdown(struct file *filp,
113d0be89f6SJian Peng 	unsigned long addr0, unsigned long len, unsigned long pgoff,
114*25d4054cSMark Brown 	unsigned long flags, vm_flags_t vm_flags)
115d0be89f6SJian Peng {
11616650107SKevin Cernekee 	return arch_get_unmapped_area_common(filp,
117d0be89f6SJian Peng 			addr0, len, pgoff, flags, DOWN);
1186f6c3c33SRalf Baechle }
1196f6c3c33SRalf Baechle 
__virt_addr_valid(const volatile void * kaddr)12031875a54SPaul Burton bool __virt_addr_valid(const volatile void *kaddr)
121196897a2SSteven Rostedt {
122d6ed083fSHauke Mehrtens 	unsigned long vaddr = (unsigned long)kaddr;
123074a1e11SPaul Burton 
124074a1e11SPaul Burton 	if ((vaddr < PAGE_OFFSET) || (vaddr >= MAP_BASE))
12531875a54SPaul Burton 		return false;
126074a1e11SPaul Burton 
127196897a2SSteven Rostedt 	return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
128196897a2SSteven Rostedt }
129196897a2SSteven Rostedt EXPORT_SYMBOL_GPL(__virt_addr_valid);
130