1 // SPDX-License-Identifier: GPL-2.0-only 2 3 #include <linux/linkage.h> 4 #include <linux/mmap_lock.h> 5 #include <linux/mm.h> 6 #include <linux/time_namespace.h> 7 #include <linux/types.h> 8 #include <linux/vdso_datastore.h> 9 #include <vdso/datapage.h> 10 11 /* 12 * The vDSO data page. 13 */ 14 #ifdef CONFIG_HAVE_GENERIC_VDSO 15 static union { 16 struct vdso_time_data data; 17 u8 page[PAGE_SIZE]; 18 } vdso_time_data_store __page_aligned_data; 19 struct vdso_time_data *vdso_k_time_data = &vdso_time_data_store.data; 20 static_assert(sizeof(vdso_time_data_store) == PAGE_SIZE); 21 #endif /* CONFIG_HAVE_GENERIC_VDSO */ 22 23 #ifdef CONFIG_VDSO_GETRANDOM 24 static union { 25 struct vdso_rng_data data; 26 u8 page[PAGE_SIZE]; 27 } vdso_rng_data_store __page_aligned_data; 28 struct vdso_rng_data *vdso_k_rng_data = &vdso_rng_data_store.data; 29 static_assert(sizeof(vdso_rng_data_store) == PAGE_SIZE); 30 #endif /* CONFIG_VDSO_GETRANDOM */ 31 32 #ifdef CONFIG_ARCH_HAS_VDSO_ARCH_DATA 33 static union { 34 struct vdso_arch_data data; 35 u8 page[VDSO_ARCH_DATA_SIZE]; 36 } vdso_arch_data_store __page_aligned_data; 37 struct vdso_arch_data *vdso_k_arch_data = &vdso_arch_data_store.data; 38 #endif /* CONFIG_ARCH_HAS_VDSO_ARCH_DATA */ 39 40 static vm_fault_t vvar_fault(const struct vm_special_mapping *sm, 41 struct vm_area_struct *vma, struct vm_fault *vmf) 42 { 43 struct page *timens_page = find_timens_vvar_page(vma); 44 unsigned long addr, pfn; 45 vm_fault_t err; 46 47 switch (vmf->pgoff) { 48 case VDSO_TIME_PAGE_OFFSET: 49 if (!IS_ENABLED(CONFIG_HAVE_GENERIC_VDSO)) 50 return VM_FAULT_SIGBUS; 51 pfn = __phys_to_pfn(__pa_symbol(vdso_k_time_data)); 52 if (timens_page) { 53 /* 54 * Fault in VVAR page too, since it will be accessed 55 * to get clock data anyway. 56 */ 57 addr = vmf->address + VDSO_TIMENS_PAGE_OFFSET * PAGE_SIZE; 58 err = vmf_insert_pfn(vma, addr, pfn); 59 if (unlikely(err & VM_FAULT_ERROR)) 60 return err; 61 pfn = page_to_pfn(timens_page); 62 } 63 break; 64 case VDSO_TIMENS_PAGE_OFFSET: 65 /* 66 * If a task belongs to a time namespace then a namespace 67 * specific VVAR is mapped with the VVAR_DATA_PAGE_OFFSET and 68 * the real VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET 69 * offset. 70 * See also the comment near timens_setup_vdso_data(). 71 */ 72 if (!IS_ENABLED(CONFIG_TIME_NS) || !timens_page) 73 return VM_FAULT_SIGBUS; 74 pfn = __phys_to_pfn(__pa_symbol(vdso_k_time_data)); 75 break; 76 case VDSO_RNG_PAGE_OFFSET: 77 if (!IS_ENABLED(CONFIG_VDSO_GETRANDOM)) 78 return VM_FAULT_SIGBUS; 79 pfn = __phys_to_pfn(__pa_symbol(vdso_k_rng_data)); 80 break; 81 case VDSO_ARCH_PAGES_START ... VDSO_ARCH_PAGES_END: 82 if (!IS_ENABLED(CONFIG_ARCH_HAS_VDSO_ARCH_DATA)) 83 return VM_FAULT_SIGBUS; 84 pfn = __phys_to_pfn(__pa_symbol(vdso_k_arch_data)) + 85 vmf->pgoff - VDSO_ARCH_PAGES_START; 86 break; 87 default: 88 return VM_FAULT_SIGBUS; 89 } 90 91 return vmf_insert_pfn(vma, vmf->address, pfn); 92 } 93 94 const struct vm_special_mapping vdso_vvar_mapping = { 95 .name = "[vvar]", 96 .fault = vvar_fault, 97 }; 98 99 struct vm_area_struct *vdso_install_vvar_mapping(struct mm_struct *mm, unsigned long addr) 100 { 101 return _install_special_mapping(mm, addr, VDSO_NR_PAGES * PAGE_SIZE, 102 VM_READ | VM_MAYREAD | VM_IO | VM_DONTDUMP | 103 VM_PFNMAP | VM_SEALED_SYSMAP, 104 &vdso_vvar_mapping); 105 } 106 107 #ifdef CONFIG_TIME_NS 108 /* 109 * The vvar page layout depends on whether a task belongs to the root or 110 * non-root time namespace. Whenever a task changes its namespace, the VVAR 111 * page tables are cleared and then they will be re-faulted with a 112 * corresponding layout. 113 * See also the comment near timens_setup_vdso_clock_data() for details. 114 */ 115 int vdso_join_timens(struct task_struct *task, struct time_namespace *ns) 116 { 117 struct mm_struct *mm = task->mm; 118 struct vm_area_struct *vma; 119 VMA_ITERATOR(vmi, mm, 0); 120 121 mmap_read_lock(mm); 122 for_each_vma(vmi, vma) { 123 if (vma_is_special_mapping(vma, &vdso_vvar_mapping)) 124 zap_vma_pages(vma); 125 } 126 mmap_read_unlock(mm); 127 128 return 0; 129 } 130 #endif 131