xref: /linux/arch/loongarch/mm/mmap.c (revision 24bce201d79807b668bf9d9e0aca801c5c0d5f78)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
4  */
5 #include <linux/compiler.h>
6 #include <linux/elf-randomize.h>
7 #include <linux/errno.h>
8 #include <linux/mm.h>
9 #include <linux/mman.h>
10 #include <linux/export.h>
11 #include <linux/personality.h>
12 #include <linux/random.h>
13 #include <linux/sched/signal.h>
14 #include <linux/sched/mm.h>
15 
16 unsigned long shm_align_mask = PAGE_SIZE - 1;	/* Sane caches */
17 EXPORT_SYMBOL(shm_align_mask);
18 
19 #define COLOUR_ALIGN(addr, pgoff)				\
20 	((((addr) + shm_align_mask) & ~shm_align_mask) +	\
21 	 (((pgoff) << PAGE_SHIFT) & shm_align_mask))
22 
23 enum mmap_allocation_direction {UP, DOWN};
24 
25 static unsigned long arch_get_unmapped_area_common(struct file *filp,
26 	unsigned long addr0, unsigned long len, unsigned long pgoff,
27 	unsigned long flags, enum mmap_allocation_direction dir)
28 {
29 	struct mm_struct *mm = current->mm;
30 	struct vm_area_struct *vma;
31 	unsigned long addr = addr0;
32 	int do_color_align;
33 	struct vm_unmapped_area_info info;
34 
35 	if (unlikely(len > TASK_SIZE))
36 		return -ENOMEM;
37 
38 	if (flags & MAP_FIXED) {
39 		/* Even MAP_FIXED mappings must reside within TASK_SIZE */
40 		if (TASK_SIZE - len < addr)
41 			return -EINVAL;
42 
43 		/*
44 		 * We do not accept a shared mapping if it would violate
45 		 * cache aliasing constraints.
46 		 */
47 		if ((flags & MAP_SHARED) &&
48 		    ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
49 			return -EINVAL;
50 		return addr;
51 	}
52 
53 	do_color_align = 0;
54 	if (filp || (flags & MAP_SHARED))
55 		do_color_align = 1;
56 
57 	/* requesting a specific address */
58 	if (addr) {
59 		if (do_color_align)
60 			addr = COLOUR_ALIGN(addr, pgoff);
61 		else
62 			addr = PAGE_ALIGN(addr);
63 
64 		vma = find_vma(mm, addr);
65 		if (TASK_SIZE - len >= addr &&
66 		    (!vma || addr + len <= vm_start_gap(vma)))
67 			return addr;
68 	}
69 
70 	info.length = len;
71 	info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0;
72 	info.align_offset = pgoff << PAGE_SHIFT;
73 
74 	if (dir == DOWN) {
75 		info.flags = VM_UNMAPPED_AREA_TOPDOWN;
76 		info.low_limit = PAGE_SIZE;
77 		info.high_limit = mm->mmap_base;
78 		addr = vm_unmapped_area(&info);
79 
80 		if (!(addr & ~PAGE_MASK))
81 			return addr;
82 
83 		/*
84 		 * A failed mmap() very likely causes application failure,
85 		 * so fall back to the bottom-up function here. This scenario
86 		 * can happen with large stack limits and large mmap()
87 		 * allocations.
88 		 */
89 	}
90 
91 	info.flags = 0;
92 	info.low_limit = mm->mmap_base;
93 	info.high_limit = TASK_SIZE;
94 	return vm_unmapped_area(&info);
95 }
96 
97 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0,
98 	unsigned long len, unsigned long pgoff, unsigned long flags)
99 {
100 	return arch_get_unmapped_area_common(filp,
101 			addr0, len, pgoff, flags, UP);
102 }
103 
104 /*
105  * There is no need to export this but sched.h declares the function as
106  * extern so making it static here results in an error.
107  */
108 unsigned long arch_get_unmapped_area_topdown(struct file *filp,
109 	unsigned long addr0, unsigned long len, unsigned long pgoff,
110 	unsigned long flags)
111 {
112 	return arch_get_unmapped_area_common(filp,
113 			addr0, len, pgoff, flags, DOWN);
114 }
115 
116 int __virt_addr_valid(volatile void *kaddr)
117 {
118 	unsigned long vaddr = (unsigned long)kaddr;
119 
120 	if ((vaddr < PAGE_OFFSET) || (vaddr >= vm_map_base))
121 		return 0;
122 
123 	return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
124 }
125 EXPORT_SYMBOL_GPL(__virt_addr_valid);
126