xref: /linux/lib/vdso/datastore.c (revision 51d6ca373f459fa6c91743e14ae69854d844aa38)
1 // SPDX-License-Identifier: GPL-2.0-only
2 
3 #include <linux/linkage.h>
4 #include <linux/mmap_lock.h>
5 #include <linux/mm.h>
6 #include <linux/time_namespace.h>
7 #include <linux/types.h>
8 #include <linux/vdso_datastore.h>
9 #include <vdso/datapage.h>
10 
11 /*
12  * The vDSO data page.
13  */
14 #ifdef CONFIG_HAVE_GENERIC_VDSO
15 static union vdso_data_store vdso_time_data_store __page_aligned_data;
16 struct vdso_time_data *vdso_k_time_data = vdso_time_data_store.data;
17 static_assert(sizeof(vdso_time_data_store) == PAGE_SIZE);
18 #endif /* CONFIG_HAVE_GENERIC_VDSO */
19 
20 #ifdef CONFIG_VDSO_GETRANDOM
21 static union {
22 	struct vdso_rng_data	data;
23 	u8			page[PAGE_SIZE];
24 } vdso_rng_data_store __page_aligned_data;
25 struct vdso_rng_data *vdso_k_rng_data = &vdso_rng_data_store.data;
26 static_assert(sizeof(vdso_rng_data_store) == PAGE_SIZE);
27 #endif /* CONFIG_VDSO_GETRANDOM */
28 
29 static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
30 			     struct vm_area_struct *vma, struct vm_fault *vmf)
31 {
32 	struct page *timens_page = find_timens_vvar_page(vma);
33 	unsigned long addr, pfn;
34 	vm_fault_t err;
35 
36 	switch (vmf->pgoff) {
37 	case VDSO_TIME_PAGE_OFFSET:
38 		if (!IS_ENABLED(CONFIG_HAVE_GENERIC_VDSO))
39 			return VM_FAULT_SIGBUS;
40 		pfn = __phys_to_pfn(__pa_symbol(vdso_k_time_data));
41 		if (timens_page) {
42 			/*
43 			 * Fault in VVAR page too, since it will be accessed
44 			 * to get clock data anyway.
45 			 */
46 			addr = vmf->address + VDSO_TIMENS_PAGE_OFFSET * PAGE_SIZE;
47 			err = vmf_insert_pfn(vma, addr, pfn);
48 			if (unlikely(err & VM_FAULT_ERROR))
49 				return err;
50 			pfn = page_to_pfn(timens_page);
51 		}
52 		break;
53 	case VDSO_TIMENS_PAGE_OFFSET:
54 		/*
55 		 * If a task belongs to a time namespace then a namespace
56 		 * specific VVAR is mapped with the VVAR_DATA_PAGE_OFFSET and
57 		 * the real VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET
58 		 * offset.
59 		 * See also the comment near timens_setup_vdso_data().
60 		 */
61 		if (!IS_ENABLED(CONFIG_TIME_NS) || !timens_page)
62 			return VM_FAULT_SIGBUS;
63 		pfn = __phys_to_pfn(__pa_symbol(vdso_k_time_data));
64 		break;
65 	case VDSO_RNG_PAGE_OFFSET:
66 		if (!IS_ENABLED(CONFIG_VDSO_GETRANDOM))
67 			return VM_FAULT_SIGBUS;
68 		pfn = __phys_to_pfn(__pa_symbol(vdso_k_rng_data));
69 		break;
70 	default:
71 		return VM_FAULT_SIGBUS;
72 	}
73 
74 	return vmf_insert_pfn(vma, vmf->address, pfn);
75 }
76 
77 const struct vm_special_mapping vdso_vvar_mapping = {
78 	.name	= "[vvar]",
79 	.fault	= vvar_fault,
80 };
81 
82 struct vm_area_struct *vdso_install_vvar_mapping(struct mm_struct *mm, unsigned long addr)
83 {
84 	return _install_special_mapping(mm, addr, VDSO_NR_PAGES * PAGE_SIZE,
85 					VM_READ | VM_MAYREAD | VM_IO | VM_DONTDUMP | VM_PFNMAP,
86 					&vdso_vvar_mapping);
87 }
88 
89 #ifdef CONFIG_TIME_NS
90 /*
91  * The vvar page layout depends on whether a task belongs to the root or
92  * non-root time namespace. Whenever a task changes its namespace, the VVAR
93  * page tables are cleared and then they will be re-faulted with a
94  * corresponding layout.
95  * See also the comment near timens_setup_vdso_data() for details.
96  */
97 int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
98 {
99 	struct mm_struct *mm = task->mm;
100 	struct vm_area_struct *vma;
101 	VMA_ITERATOR(vmi, mm, 0);
102 
103 	mmap_read_lock(mm);
104 	for_each_vma(vmi, vma) {
105 		if (vma_is_special_mapping(vma, &vdso_vvar_mapping))
106 			zap_vma_pages(vma);
107 	}
108 	mmap_read_unlock(mm);
109 
110 	return 0;
111 }
112 
113 struct vdso_time_data *arch_get_vdso_data(void *vvar_page)
114 {
115 	return (struct vdso_time_data *)vvar_page;
116 }
117 #endif
118