xref: /linux/lib/vdso/datastore.c (revision 0b3bc3354eb9ad36719a044726092750a2ba01ff)
1 // SPDX-License-Identifier: GPL-2.0-only
2 
3 #include <linux/linkage.h>
4 #include <linux/mmap_lock.h>
5 #include <linux/mm.h>
6 #include <linux/time_namespace.h>
7 #include <linux/types.h>
8 #include <linux/vdso_datastore.h>
9 #include <vdso/datapage.h>
10 
11 /*
12  * The vDSO data page.
13  */
14 #ifdef CONFIG_HAVE_GENERIC_VDSO
15 static union vdso_data_store vdso_time_data_store __page_aligned_data;
16 struct vdso_time_data *vdso_k_time_data = vdso_time_data_store.data;
17 static_assert(sizeof(vdso_time_data_store) == PAGE_SIZE);
18 #endif /* CONFIG_HAVE_GENERIC_VDSO */
19 
20 #ifdef CONFIG_VDSO_GETRANDOM
21 static union {
22 	struct vdso_rng_data	data;
23 	u8			page[PAGE_SIZE];
24 } vdso_rng_data_store __page_aligned_data;
25 struct vdso_rng_data *vdso_k_rng_data = &vdso_rng_data_store.data;
26 static_assert(sizeof(vdso_rng_data_store) == PAGE_SIZE);
27 #endif /* CONFIG_VDSO_GETRANDOM */
28 
29 #ifdef CONFIG_ARCH_HAS_VDSO_ARCH_DATA
30 static union {
31 	struct vdso_arch_data	data;
32 	u8			page[VDSO_ARCH_DATA_SIZE];
33 } vdso_arch_data_store __page_aligned_data;
34 struct vdso_arch_data *vdso_k_arch_data = &vdso_arch_data_store.data;
35 #endif /* CONFIG_ARCH_HAS_VDSO_ARCH_DATA */
36 
37 static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
38 			     struct vm_area_struct *vma, struct vm_fault *vmf)
39 {
40 	struct page *timens_page = find_timens_vvar_page(vma);
41 	unsigned long addr, pfn;
42 	vm_fault_t err;
43 
44 	switch (vmf->pgoff) {
45 	case VDSO_TIME_PAGE_OFFSET:
46 		if (!IS_ENABLED(CONFIG_HAVE_GENERIC_VDSO))
47 			return VM_FAULT_SIGBUS;
48 		pfn = __phys_to_pfn(__pa_symbol(vdso_k_time_data));
49 		if (timens_page) {
50 			/*
51 			 * Fault in VVAR page too, since it will be accessed
52 			 * to get clock data anyway.
53 			 */
54 			addr = vmf->address + VDSO_TIMENS_PAGE_OFFSET * PAGE_SIZE;
55 			err = vmf_insert_pfn(vma, addr, pfn);
56 			if (unlikely(err & VM_FAULT_ERROR))
57 				return err;
58 			pfn = page_to_pfn(timens_page);
59 		}
60 		break;
61 	case VDSO_TIMENS_PAGE_OFFSET:
62 		/*
63 		 * If a task belongs to a time namespace then a namespace
64 		 * specific VVAR is mapped with the VVAR_DATA_PAGE_OFFSET and
65 		 * the real VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET
66 		 * offset.
67 		 * See also the comment near timens_setup_vdso_data().
68 		 */
69 		if (!IS_ENABLED(CONFIG_TIME_NS) || !timens_page)
70 			return VM_FAULT_SIGBUS;
71 		pfn = __phys_to_pfn(__pa_symbol(vdso_k_time_data));
72 		break;
73 	case VDSO_RNG_PAGE_OFFSET:
74 		if (!IS_ENABLED(CONFIG_VDSO_GETRANDOM))
75 			return VM_FAULT_SIGBUS;
76 		pfn = __phys_to_pfn(__pa_symbol(vdso_k_rng_data));
77 		break;
78 	case VDSO_ARCH_PAGES_START ... VDSO_ARCH_PAGES_END:
79 		if (!IS_ENABLED(CONFIG_ARCH_HAS_VDSO_ARCH_DATA))
80 			return VM_FAULT_SIGBUS;
81 		pfn = __phys_to_pfn(__pa_symbol(vdso_k_arch_data)) +
82 			vmf->pgoff - VDSO_ARCH_PAGES_START;
83 		break;
84 	default:
85 		return VM_FAULT_SIGBUS;
86 	}
87 
88 	return vmf_insert_pfn(vma, vmf->address, pfn);
89 }
90 
91 const struct vm_special_mapping vdso_vvar_mapping = {
92 	.name	= "[vvar]",
93 	.fault	= vvar_fault,
94 };
95 
96 struct vm_area_struct *vdso_install_vvar_mapping(struct mm_struct *mm, unsigned long addr)
97 {
98 	return _install_special_mapping(mm, addr, VDSO_NR_PAGES * PAGE_SIZE,
99 					VM_READ | VM_MAYREAD | VM_IO | VM_DONTDUMP | VM_PFNMAP,
100 					&vdso_vvar_mapping);
101 }
102 
103 #ifdef CONFIG_TIME_NS
104 /*
105  * The vvar page layout depends on whether a task belongs to the root or
106  * non-root time namespace. Whenever a task changes its namespace, the VVAR
107  * page tables are cleared and then they will be re-faulted with a
108  * corresponding layout.
109  * See also the comment near timens_setup_vdso_data() for details.
110  */
111 int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
112 {
113 	struct mm_struct *mm = task->mm;
114 	struct vm_area_struct *vma;
115 	VMA_ITERATOR(vmi, mm, 0);
116 
117 	mmap_read_lock(mm);
118 	for_each_vma(vmi, vma) {
119 		if (vma_is_special_mapping(vma, &vdso_vvar_mapping))
120 			zap_vma_pages(vma);
121 	}
122 	mmap_read_unlock(mm);
123 
124 	return 0;
125 }
126 
127 struct vdso_time_data *arch_get_vdso_data(void *vvar_page)
128 {
129 	return (struct vdso_time_data *)vvar_page;
130 }
131 #endif
132