xref: /linux/arch/csky/abiv1/mmap.c (revision 90d32e92011eaae8e70a9169b4e7acf4ca8f9d3a)
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
3 
4 #include <linux/fs.h>
5 #include <linux/mm.h>
6 #include <linux/mman.h>
7 #include <linux/shm.h>
8 #include <linux/sched.h>
9 #include <linux/random.h>
10 #include <linux/io.h>
11 
12 #define COLOUR_ALIGN(addr,pgoff)		\
13 	((((addr)+SHMLBA-1)&~(SHMLBA-1)) +	\
14 	 (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
15 
16 /*
17  * We need to ensure that shared mappings are correctly aligned to
18  * avoid aliasing issues with VIPT caches.  We need to ensure that
19  * a specific page of an object is always mapped at a multiple of
20  * SHMLBA bytes.
21  *
22  * We unconditionally provide this function for all cases.
23  */
24 unsigned long
25 arch_get_unmapped_area(struct file *filp, unsigned long addr,
26 		unsigned long len, unsigned long pgoff, unsigned long flags)
27 {
28 	struct mm_struct *mm = current->mm;
29 	struct vm_area_struct *vma;
30 	int do_align = 0;
31 	struct vm_unmapped_area_info info = {
32 		.length = len,
33 		.low_limit = mm->mmap_base,
34 		.high_limit = TASK_SIZE,
35 		.align_offset = pgoff << PAGE_SHIFT
36 	};
37 
38 	/*
39 	 * We only need to do colour alignment if either the I or D
40 	 * caches alias.
41 	 */
42 	do_align = filp || (flags & MAP_SHARED);
43 
44 	/*
45 	 * We enforce the MAP_FIXED case.
46 	 */
47 	if (flags & MAP_FIXED) {
48 		if (flags & MAP_SHARED &&
49 		    (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
50 			return -EINVAL;
51 		return addr;
52 	}
53 
54 	if (len > TASK_SIZE)
55 		return -ENOMEM;
56 
57 	if (addr) {
58 		if (do_align)
59 			addr = COLOUR_ALIGN(addr, pgoff);
60 		else
61 			addr = PAGE_ALIGN(addr);
62 
63 		vma = find_vma(mm, addr);
64 		if (TASK_SIZE - len >= addr &&
65 		    (!vma || addr + len <= vm_start_gap(vma)))
66 			return addr;
67 	}
68 
69 	info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
70 	return vm_unmapped_area(&info);
71 }
72