xref: /linux/arch/mips/kernel/vdso.c (revision a514e6f8f5caa24413731bed54b322bd34d918dd)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (C) 2015 Imagination Technologies
4  * Author: Alex Smith <alex.smith@imgtec.com>
5  */
6 
7 #include <linux/binfmts.h>
8 #include <linux/elf.h>
9 #include <linux/err.h>
10 #include <linux/init.h>
11 #include <linux/ioport.h>
12 #include <linux/kernel.h>
13 #include <linux/mm.h>
14 #include <linux/random.h>
15 #include <linux/sched.h>
16 #include <linux/slab.h>
17 
18 #include <asm/abi.h>
19 #include <asm/mips-cps.h>
20 #include <asm/page.h>
21 #include <asm/vdso.h>
22 #include <vdso/helpers.h>
23 #include <vdso/vsyscall.h>
24 
25 /* Kernel-provided data used by the VDSO. */
26 static union vdso_data_store mips_vdso_data __page_aligned_data;
27 struct vdso_data *vdso_data = mips_vdso_data.data;
28 
29 /*
30  * Mapping for the VDSO data/GIC pages. The real pages are mapped manually, as
31  * what we map and where within the area they are mapped is determined at
32  * runtime.
33  */
34 static struct page *no_pages[] = { NULL };
35 static struct vm_special_mapping vdso_vvar_mapping = {
36 	.name = "[vvar]",
37 	.pages = no_pages,
38 };
39 
40 static void __init init_vdso_image(struct mips_vdso_image *image)
41 {
42 	unsigned long num_pages, i;
43 	unsigned long data_pfn;
44 
45 	BUG_ON(!PAGE_ALIGNED(image->data));
46 	BUG_ON(!PAGE_ALIGNED(image->size));
47 
48 	num_pages = image->size / PAGE_SIZE;
49 
50 	data_pfn = __phys_to_pfn(__pa_symbol(image->data));
51 	for (i = 0; i < num_pages; i++)
52 		image->mapping.pages[i] = pfn_to_page(data_pfn + i);
53 }
54 
55 static int __init init_vdso(void)
56 {
57 	init_vdso_image(&vdso_image);
58 
59 #ifdef CONFIG_MIPS32_O32
60 	init_vdso_image(&vdso_image_o32);
61 #endif
62 
63 #ifdef CONFIG_MIPS32_N32
64 	init_vdso_image(&vdso_image_n32);
65 #endif
66 
67 	return 0;
68 }
69 subsys_initcall(init_vdso);
70 
71 static unsigned long vdso_base(void)
72 {
73 	unsigned long base = STACK_TOP;
74 
75 	if (IS_ENABLED(CONFIG_MIPS_FP_SUPPORT)) {
76 		/* Skip the delay slot emulation page */
77 		base += PAGE_SIZE;
78 	}
79 
80 	if (current->flags & PF_RANDOMIZE) {
81 		base += get_random_u32_below(VDSO_RANDOMIZE_SIZE);
82 		base = PAGE_ALIGN(base);
83 	}
84 
85 	return base;
86 }
87 
88 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
89 {
90 	struct mips_vdso_image *image = current->thread.abi->vdso;
91 	struct mm_struct *mm = current->mm;
92 	unsigned long gic_size, vvar_size, size, base, data_addr, vdso_addr, gic_pfn, gic_base;
93 	struct vm_area_struct *vma;
94 	int ret;
95 
96 	if (mmap_write_lock_killable(mm))
97 		return -EINTR;
98 
99 	if (IS_ENABLED(CONFIG_MIPS_FP_SUPPORT)) {
100 		/* Map delay slot emulation page */
101 		base = mmap_region(NULL, STACK_TOP, PAGE_SIZE,
102 				VM_READ | VM_EXEC |
103 				VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
104 				0, NULL);
105 		if (IS_ERR_VALUE(base)) {
106 			ret = base;
107 			goto out;
108 		}
109 	}
110 
111 	/*
112 	 * Determine total area size. This includes the VDSO data itself, the
113 	 * data page, and the GIC user page if present. Always create a mapping
114 	 * for the GIC user area if the GIC is present regardless of whether it
115 	 * is the current clocksource, in case it comes into use later on. We
116 	 * only map a page even though the total area is 64K, as we only need
117 	 * the counter registers at the start.
118 	 */
119 	gic_size = mips_gic_present() ? PAGE_SIZE : 0;
120 	vvar_size = gic_size + PAGE_SIZE;
121 	size = vvar_size + image->size;
122 
123 	/*
124 	 * Find a region that's large enough for us to perform the
125 	 * colour-matching alignment below.
126 	 */
127 	if (cpu_has_dc_aliases)
128 		size += shm_align_mask + 1;
129 
130 	base = get_unmapped_area(NULL, vdso_base(), size, 0, 0);
131 	if (IS_ERR_VALUE(base)) {
132 		ret = base;
133 		goto out;
134 	}
135 
136 	/*
137 	 * If we suffer from dcache aliasing, ensure that the VDSO data page
138 	 * mapping is coloured the same as the kernel's mapping of that memory.
139 	 * This ensures that when the kernel updates the VDSO data userland
140 	 * will observe it without requiring cache invalidations.
141 	 */
142 	if (cpu_has_dc_aliases) {
143 		base = __ALIGN_MASK(base, shm_align_mask);
144 		base += ((unsigned long)vdso_data - gic_size) & shm_align_mask;
145 	}
146 
147 	data_addr = base + gic_size;
148 	vdso_addr = data_addr + PAGE_SIZE;
149 
150 	vma = _install_special_mapping(mm, base, vvar_size,
151 				       VM_READ | VM_MAYREAD,
152 				       &vdso_vvar_mapping);
153 	if (IS_ERR(vma)) {
154 		ret = PTR_ERR(vma);
155 		goto out;
156 	}
157 
158 	/* Map GIC user page. */
159 	if (gic_size) {
160 		gic_base = (unsigned long)mips_gic_base + MIPS_GIC_USER_OFS;
161 		gic_pfn = PFN_DOWN(__pa(gic_base));
162 
163 		ret = io_remap_pfn_range(vma, base, gic_pfn, gic_size,
164 					 pgprot_noncached(vma->vm_page_prot));
165 		if (ret)
166 			goto out;
167 	}
168 
169 	/* Map data page. */
170 	ret = remap_pfn_range(vma, data_addr,
171 			      virt_to_phys(vdso_data) >> PAGE_SHIFT,
172 			      PAGE_SIZE, vma->vm_page_prot);
173 	if (ret)
174 		goto out;
175 
176 	/* Map VDSO image. */
177 	vma = _install_special_mapping(mm, vdso_addr, image->size,
178 				       VM_READ | VM_EXEC |
179 				       VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
180 				       &image->mapping);
181 	if (IS_ERR(vma)) {
182 		ret = PTR_ERR(vma);
183 		goto out;
184 	}
185 
186 	mm->context.vdso = (void *)vdso_addr;
187 	ret = 0;
188 
189 out:
190 	mmap_write_unlock(mm);
191 	return ret;
192 }
193