xref: /linux/arch/riscv/kernel/vdso.c (revision be239684b18e1cdcafcf8c7face4a2f562c745ad)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp.
4  *                    <benh@kernel.crashing.org>
5  * Copyright (C) 2012 ARM Limited
6  * Copyright (C) 2015 Regents of the University of California
7  */
8 
9 #include <linux/elf.h>
10 #include <linux/mm.h>
11 #include <linux/slab.h>
12 #include <linux/binfmts.h>
13 #include <linux/err.h>
14 #include <asm/page.h>
15 #include <asm/vdso.h>
16 #include <linux/time_namespace.h>
17 #include <vdso/datapage.h>
18 #include <vdso/vsyscall.h>
19 
20 enum vvar_pages {
21 	VVAR_DATA_PAGE_OFFSET,
22 	VVAR_TIMENS_PAGE_OFFSET,
23 	VVAR_NR_PAGES,
24 };
25 
26 enum rv_vdso_map {
27 	RV_VDSO_MAP_VVAR,
28 	RV_VDSO_MAP_VDSO,
29 };
30 
31 #define VVAR_SIZE  (VVAR_NR_PAGES << PAGE_SHIFT)
32 
33 /*
34  * The vDSO data page.
35  */
36 static union {
37 	struct vdso_data	data;
38 	u8			page[PAGE_SIZE];
39 } vdso_data_store __page_aligned_data;
40 struct vdso_data *vdso_data = &vdso_data_store.data;
41 
42 struct __vdso_info {
43 	const char *name;
44 	const char *vdso_code_start;
45 	const char *vdso_code_end;
46 	unsigned long vdso_pages;
47 	/* Data Mapping */
48 	struct vm_special_mapping *dm;
49 	/* Code Mapping */
50 	struct vm_special_mapping *cm;
51 };
52 
53 static struct __vdso_info vdso_info;
54 #ifdef CONFIG_COMPAT
55 static struct __vdso_info compat_vdso_info;
56 #endif
57 
58 static int vdso_mremap(const struct vm_special_mapping *sm,
59 		       struct vm_area_struct *new_vma)
60 {
61 	current->mm->context.vdso = (void *)new_vma->vm_start;
62 
63 	return 0;
64 }
65 
66 static void __init __vdso_init(struct __vdso_info *vdso_info)
67 {
68 	unsigned int i;
69 	struct page **vdso_pagelist;
70 	unsigned long pfn;
71 
72 	if (memcmp(vdso_info->vdso_code_start, "\177ELF", 4))
73 		panic("vDSO is not a valid ELF object!\n");
74 
75 	vdso_info->vdso_pages = (
76 		vdso_info->vdso_code_end -
77 		vdso_info->vdso_code_start) >>
78 		PAGE_SHIFT;
79 
80 	vdso_pagelist = kcalloc(vdso_info->vdso_pages,
81 				sizeof(struct page *),
82 				GFP_KERNEL);
83 	if (vdso_pagelist == NULL)
84 		panic("vDSO kcalloc failed!\n");
85 
86 	/* Grab the vDSO code pages. */
87 	pfn = sym_to_pfn(vdso_info->vdso_code_start);
88 
89 	for (i = 0; i < vdso_info->vdso_pages; i++)
90 		vdso_pagelist[i] = pfn_to_page(pfn + i);
91 
92 	vdso_info->cm->pages = vdso_pagelist;
93 }
94 
95 #ifdef CONFIG_TIME_NS
96 struct vdso_data *arch_get_vdso_data(void *vvar_page)
97 {
98 	return (struct vdso_data *)(vvar_page);
99 }
100 
101 /*
102  * The vvar mapping contains data for a specific time namespace, so when a task
103  * changes namespace we must unmap its vvar data for the old namespace.
104  * Subsequent faults will map in data for the new namespace.
105  *
106  * For more details see timens_setup_vdso_data().
107  */
108 int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
109 {
110 	struct mm_struct *mm = task->mm;
111 	struct vm_area_struct *vma;
112 	VMA_ITERATOR(vmi, mm, 0);
113 
114 	mmap_read_lock(mm);
115 
116 	for_each_vma(vmi, vma) {
117 		if (vma_is_special_mapping(vma, vdso_info.dm))
118 			zap_vma_pages(vma);
119 #ifdef CONFIG_COMPAT
120 		if (vma_is_special_mapping(vma, compat_vdso_info.dm))
121 			zap_vma_pages(vma);
122 #endif
123 	}
124 
125 	mmap_read_unlock(mm);
126 	return 0;
127 }
128 #endif
129 
130 static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
131 			     struct vm_area_struct *vma, struct vm_fault *vmf)
132 {
133 	struct page *timens_page = find_timens_vvar_page(vma);
134 	unsigned long pfn;
135 
136 	switch (vmf->pgoff) {
137 	case VVAR_DATA_PAGE_OFFSET:
138 		if (timens_page)
139 			pfn = page_to_pfn(timens_page);
140 		else
141 			pfn = sym_to_pfn(vdso_data);
142 		break;
143 #ifdef CONFIG_TIME_NS
144 	case VVAR_TIMENS_PAGE_OFFSET:
145 		/*
146 		 * If a task belongs to a time namespace then a namespace
147 		 * specific VVAR is mapped with the VVAR_DATA_PAGE_OFFSET and
148 		 * the real VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET
149 		 * offset.
150 		 * See also the comment near timens_setup_vdso_data().
151 		 */
152 		if (!timens_page)
153 			return VM_FAULT_SIGBUS;
154 		pfn = sym_to_pfn(vdso_data);
155 		break;
156 #endif /* CONFIG_TIME_NS */
157 	default:
158 		return VM_FAULT_SIGBUS;
159 	}
160 
161 	return vmf_insert_pfn(vma, vmf->address, pfn);
162 }
163 
164 static struct vm_special_mapping rv_vdso_maps[] __ro_after_init = {
165 	[RV_VDSO_MAP_VVAR] = {
166 		.name   = "[vvar]",
167 		.fault = vvar_fault,
168 	},
169 	[RV_VDSO_MAP_VDSO] = {
170 		.name   = "[vdso]",
171 		.mremap = vdso_mremap,
172 	},
173 };
174 
175 static struct __vdso_info vdso_info __ro_after_init = {
176 	.name = "vdso",
177 	.vdso_code_start = vdso_start,
178 	.vdso_code_end = vdso_end,
179 	.dm = &rv_vdso_maps[RV_VDSO_MAP_VVAR],
180 	.cm = &rv_vdso_maps[RV_VDSO_MAP_VDSO],
181 };
182 
183 #ifdef CONFIG_COMPAT
184 static struct vm_special_mapping rv_compat_vdso_maps[] __ro_after_init = {
185 	[RV_VDSO_MAP_VVAR] = {
186 		.name   = "[vvar]",
187 		.fault = vvar_fault,
188 	},
189 	[RV_VDSO_MAP_VDSO] = {
190 		.name   = "[vdso]",
191 		.mremap = vdso_mremap,
192 	},
193 };
194 
195 static struct __vdso_info compat_vdso_info __ro_after_init = {
196 	.name = "compat_vdso",
197 	.vdso_code_start = compat_vdso_start,
198 	.vdso_code_end = compat_vdso_end,
199 	.dm = &rv_compat_vdso_maps[RV_VDSO_MAP_VVAR],
200 	.cm = &rv_compat_vdso_maps[RV_VDSO_MAP_VDSO],
201 };
202 #endif
203 
204 static int __init vdso_init(void)
205 {
206 	__vdso_init(&vdso_info);
207 #ifdef CONFIG_COMPAT
208 	__vdso_init(&compat_vdso_info);
209 #endif
210 
211 	return 0;
212 }
213 arch_initcall(vdso_init);
214 
215 static int __setup_additional_pages(struct mm_struct *mm,
216 				    struct linux_binprm *bprm,
217 				    int uses_interp,
218 				    struct __vdso_info *vdso_info)
219 {
220 	unsigned long vdso_base, vdso_text_len, vdso_mapping_len;
221 	void *ret;
222 
223 	BUILD_BUG_ON(VVAR_NR_PAGES != __VVAR_PAGES);
224 
225 	vdso_text_len = vdso_info->vdso_pages << PAGE_SHIFT;
226 	/* Be sure to map the data page */
227 	vdso_mapping_len = vdso_text_len + VVAR_SIZE;
228 
229 	vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0);
230 	if (IS_ERR_VALUE(vdso_base)) {
231 		ret = ERR_PTR(vdso_base);
232 		goto up_fail;
233 	}
234 
235 	ret = _install_special_mapping(mm, vdso_base, VVAR_SIZE,
236 		(VM_READ | VM_MAYREAD | VM_PFNMAP), vdso_info->dm);
237 	if (IS_ERR(ret))
238 		goto up_fail;
239 
240 	vdso_base += VVAR_SIZE;
241 	mm->context.vdso = (void *)vdso_base;
242 
243 	ret =
244 	   _install_special_mapping(mm, vdso_base, vdso_text_len,
245 		(VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC),
246 		vdso_info->cm);
247 
248 	if (IS_ERR(ret))
249 		goto up_fail;
250 
251 	return 0;
252 
253 up_fail:
254 	mm->context.vdso = NULL;
255 	return PTR_ERR(ret);
256 }
257 
258 #ifdef CONFIG_COMPAT
259 int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
260 				       int uses_interp)
261 {
262 	struct mm_struct *mm = current->mm;
263 	int ret;
264 
265 	if (mmap_write_lock_killable(mm))
266 		return -EINTR;
267 
268 	ret = __setup_additional_pages(mm, bprm, uses_interp,
269 							&compat_vdso_info);
270 	mmap_write_unlock(mm);
271 
272 	return ret;
273 }
274 #endif
275 
276 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
277 {
278 	struct mm_struct *mm = current->mm;
279 	int ret;
280 
281 	if (mmap_write_lock_killable(mm))
282 		return -EINTR;
283 
284 	ret = __setup_additional_pages(mm, bprm, uses_interp, &vdso_info);
285 	mmap_write_unlock(mm);
286 
287 	return ret;
288 }
289