xref: /linux/arch/loongarch/mm/mmap.c (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
4  */
5 #include <linux/export.h>
6 #include <linux/io.h>
7 #include <linux/kfence.h>
8 #include <linux/memblock.h>
9 #include <linux/mm.h>
10 #include <linux/mman.h>
11 
12 #define SHM_ALIGN_MASK	(SHMLBA - 1)
13 
14 #define COLOUR_ALIGN(addr, pgoff)			\
15 	((((addr) + SHM_ALIGN_MASK) & ~SHM_ALIGN_MASK)	\
16 	 + (((pgoff) << PAGE_SHIFT) & SHM_ALIGN_MASK))
17 
18 enum mmap_allocation_direction {UP, DOWN};
19 
20 static unsigned long arch_get_unmapped_area_common(struct file *filp,
21 	unsigned long addr0, unsigned long len, unsigned long pgoff,
22 	unsigned long flags, enum mmap_allocation_direction dir)
23 {
24 	struct mm_struct *mm = current->mm;
25 	struct vm_area_struct *vma;
26 	unsigned long addr = addr0;
27 	int do_color_align;
28 	struct vm_unmapped_area_info info = {};
29 
30 	if (unlikely(len > TASK_SIZE))
31 		return -ENOMEM;
32 
33 	if (flags & MAP_FIXED) {
34 		/* Even MAP_FIXED mappings must reside within TASK_SIZE */
35 		if (TASK_SIZE - len < addr)
36 			return -EINVAL;
37 
38 		/*
39 		 * We do not accept a shared mapping if it would violate
40 		 * cache aliasing constraints.
41 		 */
42 		if ((flags & MAP_SHARED) &&
43 		    ((addr - (pgoff << PAGE_SHIFT)) & SHM_ALIGN_MASK))
44 			return -EINVAL;
45 		return addr;
46 	}
47 
48 	do_color_align = 0;
49 	if (filp || (flags & MAP_SHARED))
50 		do_color_align = 1;
51 
52 	/* requesting a specific address */
53 	if (addr) {
54 		if (do_color_align)
55 			addr = COLOUR_ALIGN(addr, pgoff);
56 		else
57 			addr = PAGE_ALIGN(addr);
58 
59 		vma = find_vma(mm, addr);
60 		if (TASK_SIZE - len >= addr &&
61 		    (!vma || addr + len <= vm_start_gap(vma)))
62 			return addr;
63 	}
64 
65 	info.length = len;
66 	info.align_mask = do_color_align ? (PAGE_MASK & SHM_ALIGN_MASK) : 0;
67 	info.align_offset = pgoff << PAGE_SHIFT;
68 
69 	if (dir == DOWN) {
70 		info.flags = VM_UNMAPPED_AREA_TOPDOWN;
71 		info.low_limit = PAGE_SIZE;
72 		info.high_limit = mm->mmap_base;
73 		addr = vm_unmapped_area(&info);
74 
75 		if (!(addr & ~PAGE_MASK))
76 			return addr;
77 
78 		/*
79 		 * A failed mmap() very likely causes application failure,
80 		 * so fall back to the bottom-up function here. This scenario
81 		 * can happen with large stack limits and large mmap()
82 		 * allocations.
83 		 */
84 	}
85 
86 	info.low_limit = mm->mmap_base;
87 	info.high_limit = TASK_SIZE;
88 	return vm_unmapped_area(&info);
89 }
90 
91 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0,
92 	unsigned long len, unsigned long pgoff, unsigned long flags,
93 	vm_flags_t vm_flags)
94 {
95 	return arch_get_unmapped_area_common(filp,
96 			addr0, len, pgoff, flags, UP);
97 }
98 
99 /*
100  * There is no need to export this but sched.h declares the function as
101  * extern so making it static here results in an error.
102  */
103 unsigned long arch_get_unmapped_area_topdown(struct file *filp,
104 	unsigned long addr0, unsigned long len, unsigned long pgoff,
105 	unsigned long flags, vm_flags_t vm_flags)
106 {
107 	return arch_get_unmapped_area_common(filp,
108 			addr0, len, pgoff, flags, DOWN);
109 }
110 
111 int __virt_addr_valid(volatile void *kaddr)
112 {
113 	unsigned long vaddr = (unsigned long)kaddr;
114 
115 	if (is_kfence_address((void *)kaddr))
116 		return 1;
117 
118 	if ((vaddr < PAGE_OFFSET) || (vaddr >= vm_map_base))
119 		return 0;
120 
121 	return pfn_valid(PFN_DOWN(PHYSADDR(kaddr)));
122 }
123 EXPORT_SYMBOL_GPL(__virt_addr_valid);
124 
125 /*
126  * You really shouldn't be using read() or write() on /dev/mem.  This might go
127  * away in the future.
128  */
129 int valid_phys_addr_range(phys_addr_t addr, size_t size)
130 {
131 	/*
132 	 * Check whether addr is covered by a memory region without the
133 	 * MEMBLOCK_NOMAP attribute, and whether that region covers the
134 	 * entire range. In theory, this could lead to false negatives
135 	 * if the range is covered by distinct but adjacent memory regions
136 	 * that only differ in other attributes. However, few of such
137 	 * attributes have been defined, and it is debatable whether it
138 	 * follows that /dev/mem read() calls should be able traverse
139 	 * such boundaries.
140 	 */
141 	return memblock_is_region_memory(addr, size) && memblock_is_map_memory(addr);
142 }
143 
144 /*
145  * Do not allow /dev/mem mappings beyond the supported physical range.
146  */
147 int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
148 {
149 	return !(((pfn << PAGE_SHIFT) + size) & ~(GENMASK_ULL(cpu_pabits, 0)));
150 }
151