xref: /linux/arch/arm/mm/mmap.c (revision 14b42963f64b98ab61fa9723c03d71aa5ef4f862)
1 /*
2  *  linux/arch/arm/mm/mmap.c
3  */
4 #include <linux/fs.h>
5 #include <linux/mm.h>
6 #include <linux/mman.h>
7 #include <linux/shm.h>
8 
9 #include <asm/system.h>
10 
11 #define COLOUR_ALIGN(addr,pgoff)		\
12 	((((addr)+SHMLBA-1)&~(SHMLBA-1)) +	\
13 	 (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
14 
15 /*
16  * We need to ensure that shared mappings are correctly aligned to
17  * avoid aliasing issues with VIPT caches.  We need to ensure that
18  * a specific page of an object is always mapped at a multiple of
19  * SHMLBA bytes.
20  *
21  * We unconditionally provide this function for all cases, however
22  * in the VIVT case, we optimise out the alignment rules.
23  */
24 unsigned long
25 arch_get_unmapped_area(struct file *filp, unsigned long addr,
26 		unsigned long len, unsigned long pgoff, unsigned long flags)
27 {
28 	struct mm_struct *mm = current->mm;
29 	struct vm_area_struct *vma;
30 	unsigned long start_addr;
31 #ifdef CONFIG_CPU_V6
32 	unsigned int cache_type;
33 	int do_align = 0, aliasing = 0;
34 
35 	/*
36 	 * We only need to do colour alignment if either the I or D
37 	 * caches alias.  This is indicated by bits 9 and 21 of the
38 	 * cache type register.
39 	 */
40 	cache_type = read_cpuid(CPUID_CACHETYPE);
41 	if (cache_type != read_cpuid(CPUID_ID)) {
42 		aliasing = (cache_type | cache_type >> 12) & (1 << 11);
43 		if (aliasing)
44 			do_align = filp || flags & MAP_SHARED;
45 	}
46 #else
47 #define do_align 0
48 #define aliasing 0
49 #endif
50 
51 	/*
52 	 * We should enforce the MAP_FIXED case.  However, currently
53 	 * the generic kernel code doesn't allow us to handle this.
54 	 */
55 	if (flags & MAP_FIXED) {
56 		if (aliasing && flags & MAP_SHARED && addr & (SHMLBA - 1))
57 			return -EINVAL;
58 		return addr;
59 	}
60 
61 	if (len > TASK_SIZE)
62 		return -ENOMEM;
63 
64 	if (addr) {
65 		if (do_align)
66 			addr = COLOUR_ALIGN(addr, pgoff);
67 		else
68 			addr = PAGE_ALIGN(addr);
69 
70 		vma = find_vma(mm, addr);
71 		if (TASK_SIZE - len >= addr &&
72 		    (!vma || addr + len <= vma->vm_start))
73 			return addr;
74 	}
75 	if (len > mm->cached_hole_size) {
76 	        start_addr = addr = mm->free_area_cache;
77 	} else {
78 	        start_addr = addr = TASK_UNMAPPED_BASE;
79 	        mm->cached_hole_size = 0;
80 	}
81 
82 full_search:
83 	if (do_align)
84 		addr = COLOUR_ALIGN(addr, pgoff);
85 	else
86 		addr = PAGE_ALIGN(addr);
87 
88 	for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
89 		/* At this point:  (!vma || addr < vma->vm_end). */
90 		if (TASK_SIZE - len < addr) {
91 			/*
92 			 * Start a new search - just in case we missed
93 			 * some holes.
94 			 */
95 			if (start_addr != TASK_UNMAPPED_BASE) {
96 				start_addr = addr = TASK_UNMAPPED_BASE;
97 				mm->cached_hole_size = 0;
98 				goto full_search;
99 			}
100 			return -ENOMEM;
101 		}
102 		if (!vma || addr + len <= vma->vm_start) {
103 			/*
104 			 * Remember the place where we stopped the search:
105 			 */
106 			mm->free_area_cache = addr + len;
107 			return addr;
108 		}
109 		if (addr + mm->cached_hole_size < vma->vm_start)
110 		        mm->cached_hole_size = vma->vm_start - addr;
111 		addr = vma->vm_end;
112 		if (do_align)
113 			addr = COLOUR_ALIGN(addr, pgoff);
114 	}
115 }
116 
117