1 /*- 2 * Copyright (c) 2010, 2012 Konstantin Belousov <kib@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include "opt_compat.h" 31 #include "opt_vm.h" 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/kernel.h> 36 #include <sys/lock.h> 37 #include <sys/rwlock.h> 38 #include <sys/sysent.h> 39 #include <sys/sysctl.h> 40 #include <sys/vdso.h> 41 42 #include <vm/vm.h> 43 #include <vm/vm_param.h> 44 #include <vm/pmap.h> 45 #include <vm/vm_extern.h> 46 #include <vm/vm_kern.h> 47 #include <vm/vm_map.h> 48 #include <vm/vm_object.h> 49 #include <vm/vm_page.h> 50 #include <vm/vm_pager.h> 51 52 static struct sx shared_page_alloc_sx; 53 static vm_object_t shared_page_obj; 54 static int shared_page_free; 55 char *shared_page_mapping; 56 57 void 58 shared_page_write(int base, int size, const void *data) 59 { 60 61 bcopy(data, shared_page_mapping + base, size); 62 } 63 64 static int 65 shared_page_alloc_locked(int size, int align) 66 { 67 int res; 68 69 res = roundup(shared_page_free, align); 70 if (res + size >= IDX_TO_OFF(shared_page_obj->size)) 71 res = -1; 72 else 73 shared_page_free = res + size; 74 return (res); 75 } 76 77 int 78 shared_page_alloc(int size, int align) 79 { 80 int res; 81 82 sx_xlock(&shared_page_alloc_sx); 83 res = shared_page_alloc_locked(size, align); 84 sx_xunlock(&shared_page_alloc_sx); 85 return (res); 86 } 87 88 int 89 shared_page_fill(int size, int align, const void *data) 90 { 91 int res; 92 93 sx_xlock(&shared_page_alloc_sx); 94 res = shared_page_alloc_locked(size, align); 95 if (res != -1) 96 shared_page_write(res, size, data); 97 sx_xunlock(&shared_page_alloc_sx); 98 return (res); 99 } 100 101 static void 102 shared_page_init(void *dummy __unused) 103 { 104 vm_page_t m; 105 vm_offset_t addr; 106 107 sx_init(&shared_page_alloc_sx, "shpsx"); 108 shared_page_obj = vm_pager_allocate(OBJT_PHYS, 0, PAGE_SIZE, 109 VM_PROT_DEFAULT, 0, NULL); 110 VM_OBJECT_WLOCK(shared_page_obj); 111 m = vm_page_grab(shared_page_obj, 0, VM_ALLOC_NOBUSY | VM_ALLOC_ZERO); 112 m->valid = VM_PAGE_BITS_ALL; 113 VM_OBJECT_WUNLOCK(shared_page_obj); 114 addr = kva_alloc(PAGE_SIZE); 115 pmap_qenter(addr, &m, 1); 116 shared_page_mapping = (char *)addr; 117 } 118 119 SYSINIT(shp, SI_SUB_EXEC, SI_ORDER_FIRST, (sysinit_cfunc_t)shared_page_init, 120 NULL); 121 122 /* 123 * Push the timehands update to the shared page. 124 * 125 * The lockless update scheme is similar to the one used to update the 126 * in-kernel timehands, see sys/kern/kern_tc.c:tc_windup() (which 127 * calls us after the timehands are updated). 128 */ 129 static void 130 timehands_update(struct sysentvec *sv) 131 { 132 struct vdso_timehands th; 133 struct vdso_timekeep *tk; 134 uint32_t enabled, idx; 135 136 enabled = tc_fill_vdso_timehands(&th); 137 th.th_gen = 0; 138 idx = sv->sv_timekeep_curr; 139 if (++idx >= VDSO_TH_NUM) 140 idx = 0; 141 sv->sv_timekeep_curr = idx; 142 if (++sv->sv_timekeep_gen == 0) 143 sv->sv_timekeep_gen = 1; 144 145 tk = (struct vdso_timekeep *)(shared_page_mapping + 146 sv->sv_timekeep_off); 147 tk->tk_th[idx].th_gen = 0; 148 atomic_thread_fence_rel(); 149 if (enabled) 150 tk->tk_th[idx] = th; 151 atomic_store_rel_32(&tk->tk_th[idx].th_gen, sv->sv_timekeep_gen); 152 atomic_store_rel_32(&tk->tk_current, idx); 153 154 /* 155 * The ordering of the assignment to tk_enabled relative to 156 * the update of the vdso_timehands is not important. 157 */ 158 tk->tk_enabled = enabled; 159 } 160 161 #ifdef COMPAT_FREEBSD32 162 static void 163 timehands_update32(struct sysentvec *sv) 164 { 165 struct vdso_timehands32 th; 166 struct vdso_timekeep32 *tk; 167 uint32_t enabled, idx; 168 169 enabled = tc_fill_vdso_timehands32(&th); 170 th.th_gen = 0; 171 idx = sv->sv_timekeep_curr; 172 if (++idx >= VDSO_TH_NUM) 173 idx = 0; 174 sv->sv_timekeep_curr = idx; 175 if (++sv->sv_timekeep_gen == 0) 176 sv->sv_timekeep_gen = 1; 177 178 tk = (struct vdso_timekeep32 *)(shared_page_mapping + 179 sv->sv_timekeep_off); 180 tk->tk_th[idx].th_gen = 0; 181 atomic_thread_fence_rel(); 182 if (enabled) 183 tk->tk_th[idx] = th; 184 atomic_store_rel_32(&tk->tk_th[idx].th_gen, sv->sv_timekeep_gen); 185 atomic_store_rel_32(&tk->tk_current, idx); 186 tk->tk_enabled = enabled; 187 } 188 #endif 189 190 /* 191 * This is hackish, but easiest way to avoid creating list structures 192 * that needs to be iterated over from the hardclock interrupt 193 * context. 194 */ 195 static struct sysentvec *host_sysentvec; 196 #ifdef COMPAT_FREEBSD32 197 static struct sysentvec *compat32_sysentvec; 198 #endif 199 200 void 201 timekeep_push_vdso(void) 202 { 203 204 if (host_sysentvec != NULL && host_sysentvec->sv_timekeep_base != 0) 205 timehands_update(host_sysentvec); 206 #ifdef COMPAT_FREEBSD32 207 if (compat32_sysentvec != NULL && 208 compat32_sysentvec->sv_timekeep_base != 0) 209 timehands_update32(compat32_sysentvec); 210 #endif 211 } 212 213 void 214 exec_sysvec_init(void *param) 215 { 216 struct sysentvec *sv; 217 int tk_base; 218 uint32_t tk_ver; 219 220 sv = (struct sysentvec *)param; 221 222 if ((sv->sv_flags & SV_SHP) == 0) 223 return; 224 sv->sv_shared_page_obj = shared_page_obj; 225 sv->sv_sigcode_base = sv->sv_shared_page_base + 226 shared_page_fill(*(sv->sv_szsigcode), 16, sv->sv_sigcode); 227 if ((sv->sv_flags & SV_ABI_MASK) != SV_ABI_FREEBSD) 228 return; 229 tk_ver = VDSO_TK_VER_CURR; 230 #ifdef COMPAT_FREEBSD32 231 if ((sv->sv_flags & SV_ILP32) != 0) { 232 tk_base = shared_page_alloc(sizeof(struct vdso_timekeep32) + 233 sizeof(struct vdso_timehands32) * VDSO_TH_NUM, 16); 234 KASSERT(tk_base != -1, ("tk_base -1 for 32bit")); 235 shared_page_write(tk_base + offsetof(struct vdso_timekeep32, 236 tk_ver), sizeof(uint32_t), &tk_ver); 237 KASSERT(compat32_sysentvec == 0, 238 ("Native compat32 already registered")); 239 compat32_sysentvec = sv; 240 } else { 241 #endif 242 tk_base = shared_page_alloc(sizeof(struct vdso_timekeep) + 243 sizeof(struct vdso_timehands) * VDSO_TH_NUM, 16); 244 KASSERT(tk_base != -1, ("tk_base -1 for native")); 245 shared_page_write(tk_base + offsetof(struct vdso_timekeep, 246 tk_ver), sizeof(uint32_t), &tk_ver); 247 KASSERT(host_sysentvec == 0, ("Native already registered")); 248 host_sysentvec = sv; 249 #ifdef COMPAT_FREEBSD32 250 } 251 #endif 252 sv->sv_timekeep_base = sv->sv_shared_page_base + tk_base; 253 sv->sv_timekeep_off = tk_base; 254 timekeep_push_vdso(); 255 } 256