1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2010, 2012 Konstantin Belousov <kib@FreeBSD.org> 5 * Copyright (c) 2015 The FreeBSD Foundation 6 * All rights reserved. 7 * 8 * Portions of this software were developed by Konstantin Belousov 9 * under sponsorship from the FreeBSD Foundation. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #include "opt_vm.h" 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/kernel.h> 41 #include <sys/lock.h> 42 #include <sys/malloc.h> 43 #include <sys/rwlock.h> 44 #include <sys/stddef.h> 45 #include <sys/sysent.h> 46 #include <sys/sysctl.h> 47 #include <sys/vdso.h> 48 49 #include <vm/vm.h> 50 #include <vm/vm_param.h> 51 #include <vm/pmap.h> 52 #include <vm/vm_extern.h> 53 #include <vm/vm_kern.h> 54 #include <vm/vm_map.h> 55 #include <vm/vm_object.h> 56 #include <vm/vm_page.h> 57 #include <vm/vm_pager.h> 58 59 static struct sx shared_page_alloc_sx; 60 static vm_object_t shared_page_obj; 61 static int shared_page_free; 62 char *shared_page_mapping; 63 64 #ifdef RANDOM_FENESTRASX 65 static struct vdso_fxrng_generation *fxrng_shpage_mapping; 66 67 static bool fxrng_enabled = true; 68 SYSCTL_BOOL(_debug, OID_AUTO, fxrng_vdso_enable, CTLFLAG_RWTUN, &fxrng_enabled, 69 0, "Enable FXRNG VDSO"); 70 #endif 71 72 void 73 shared_page_write(int base, int size, const void *data) 74 { 75 76 bcopy(data, shared_page_mapping + base, size); 77 } 78 79 static int 80 shared_page_alloc_locked(int size, int align) 81 { 82 int res; 83 84 res = roundup(shared_page_free, align); 85 if (res + size >= IDX_TO_OFF(shared_page_obj->size)) 86 res = -1; 87 else 88 shared_page_free = res + size; 89 return (res); 90 } 91 92 int 93 shared_page_alloc(int size, int align) 94 { 95 int res; 96 97 sx_xlock(&shared_page_alloc_sx); 98 res = shared_page_alloc_locked(size, align); 99 sx_xunlock(&shared_page_alloc_sx); 100 return (res); 101 } 102 103 int 104 shared_page_fill(int size, int align, const void *data) 105 { 106 int res; 107 108 sx_xlock(&shared_page_alloc_sx); 109 res = shared_page_alloc_locked(size, align); 110 if (res != -1) 111 shared_page_write(res, size, data); 112 sx_xunlock(&shared_page_alloc_sx); 113 return (res); 114 } 115 116 static void 117 shared_page_init(void *dummy __unused) 118 { 119 vm_page_t m; 120 vm_offset_t addr; 121 122 sx_init(&shared_page_alloc_sx, "shpsx"); 123 shared_page_obj = vm_pager_allocate(OBJT_PHYS, 0, PAGE_SIZE, 124 VM_PROT_DEFAULT, 0, NULL); 125 VM_OBJECT_WLOCK(shared_page_obj); 126 m = vm_page_grab(shared_page_obj, 0, VM_ALLOC_ZERO); 127 VM_OBJECT_WUNLOCK(shared_page_obj); 128 vm_page_valid(m); 129 vm_page_xunbusy(m); 130 addr = kva_alloc(PAGE_SIZE); 131 pmap_qenter(addr, &m, 1); 132 shared_page_mapping = (char *)addr; 133 } 134 135 SYSINIT(shp, SI_SUB_EXEC, SI_ORDER_FIRST, (sysinit_cfunc_t)shared_page_init, 136 NULL); 137 138 /* 139 * Push the timehands update to the shared page. 140 * 141 * The lockless update scheme is similar to the one used to update the 142 * in-kernel timehands, see sys/kern/kern_tc.c:tc_windup() (which 143 * calls us after the timehands are updated). 144 */ 145 static void 146 timehands_update(struct vdso_sv_tk *svtk) 147 { 148 struct vdso_timehands th; 149 struct vdso_timekeep *tk; 150 uint32_t enabled, idx; 151 152 enabled = tc_fill_vdso_timehands(&th); 153 th.th_gen = 0; 154 idx = svtk->sv_timekeep_curr; 155 if (++idx >= VDSO_TH_NUM) 156 idx = 0; 157 svtk->sv_timekeep_curr = idx; 158 if (++svtk->sv_timekeep_gen == 0) 159 svtk->sv_timekeep_gen = 1; 160 161 tk = (struct vdso_timekeep *)(shared_page_mapping + 162 svtk->sv_timekeep_off); 163 tk->tk_th[idx].th_gen = 0; 164 atomic_thread_fence_rel(); 165 if (enabled) 166 tk->tk_th[idx] = th; 167 atomic_store_rel_32(&tk->tk_th[idx].th_gen, svtk->sv_timekeep_gen); 168 atomic_store_rel_32(&tk->tk_current, idx); 169 170 /* 171 * The ordering of the assignment to tk_enabled relative to 172 * the update of the vdso_timehands is not important. 173 */ 174 tk->tk_enabled = enabled; 175 } 176 177 #ifdef COMPAT_FREEBSD32 178 static void 179 timehands_update32(struct vdso_sv_tk *svtk) 180 { 181 struct vdso_timehands32 th; 182 struct vdso_timekeep32 *tk; 183 uint32_t enabled, idx; 184 185 enabled = tc_fill_vdso_timehands32(&th); 186 th.th_gen = 0; 187 idx = svtk->sv_timekeep_curr; 188 if (++idx >= VDSO_TH_NUM) 189 idx = 0; 190 svtk->sv_timekeep_curr = idx; 191 if (++svtk->sv_timekeep_gen == 0) 192 svtk->sv_timekeep_gen = 1; 193 194 tk = (struct vdso_timekeep32 *)(shared_page_mapping + 195 svtk->sv_timekeep_off); 196 tk->tk_th[idx].th_gen = 0; 197 atomic_thread_fence_rel(); 198 if (enabled) 199 tk->tk_th[idx] = th; 200 atomic_store_rel_32(&tk->tk_th[idx].th_gen, svtk->sv_timekeep_gen); 201 atomic_store_rel_32(&tk->tk_current, idx); 202 tk->tk_enabled = enabled; 203 } 204 #endif 205 206 /* 207 * This is hackish, but easiest way to avoid creating list structures 208 * that needs to be iterated over from the hardclock interrupt 209 * context. 210 */ 211 static struct vdso_sv_tk *host_svtk; 212 #ifdef COMPAT_FREEBSD32 213 static struct vdso_sv_tk *compat32_svtk; 214 #endif 215 216 void 217 timekeep_push_vdso(void) 218 { 219 220 if (host_svtk != NULL) 221 timehands_update(host_svtk); 222 #ifdef COMPAT_FREEBSD32 223 if (compat32_svtk != NULL) 224 timehands_update32(compat32_svtk); 225 #endif 226 } 227 228 struct vdso_sv_tk * 229 alloc_sv_tk(void) 230 { 231 struct vdso_sv_tk *svtk; 232 int tk_base; 233 uint32_t tk_ver; 234 235 tk_ver = VDSO_TK_VER_CURR; 236 svtk = malloc(sizeof(struct vdso_sv_tk), M_TEMP, M_WAITOK | M_ZERO); 237 tk_base = shared_page_alloc(sizeof(struct vdso_timekeep) + 238 sizeof(struct vdso_timehands) * VDSO_TH_NUM, 16); 239 KASSERT(tk_base != -1, ("tk_base -1 for native")); 240 shared_page_write(tk_base + offsetof(struct vdso_timekeep, tk_ver), 241 sizeof(uint32_t), &tk_ver); 242 svtk->sv_timekeep_off = tk_base; 243 timekeep_push_vdso(); 244 return (svtk); 245 } 246 247 #ifdef COMPAT_FREEBSD32 248 struct vdso_sv_tk * 249 alloc_sv_tk_compat32(void) 250 { 251 struct vdso_sv_tk *svtk; 252 int tk_base; 253 uint32_t tk_ver; 254 255 svtk = malloc(sizeof(struct vdso_sv_tk), M_TEMP, M_WAITOK | M_ZERO); 256 tk_ver = VDSO_TK_VER_CURR; 257 tk_base = shared_page_alloc(sizeof(struct vdso_timekeep32) + 258 sizeof(struct vdso_timehands32) * VDSO_TH_NUM, 16); 259 KASSERT(tk_base != -1, ("tk_base -1 for 32bit")); 260 shared_page_write(tk_base + offsetof(struct vdso_timekeep32, 261 tk_ver), sizeof(uint32_t), &tk_ver); 262 svtk->sv_timekeep_off = tk_base; 263 timekeep_push_vdso(); 264 return (svtk); 265 } 266 #endif 267 268 #ifdef RANDOM_FENESTRASX 269 void 270 fxrng_push_seed_generation(uint64_t gen) 271 { 272 if (fxrng_shpage_mapping == NULL || !fxrng_enabled) 273 return; 274 KASSERT(gen < INT32_MAX, 275 ("fxrng seed version shouldn't roll over a 32-bit counter " 276 "for approximately 456,000 years")); 277 atomic_store_rel_32(&fxrng_shpage_mapping->fx_generation32, 278 (uint32_t)gen); 279 } 280 281 static void 282 alloc_sv_fxrng_generation(void) 283 { 284 int base; 285 286 /* 287 * Allocate a full cache line for the fxrng root generation (64-bit 288 * counter, or truncated 32-bit counter on ILP32 userspace). It is 289 * important that the line is not shared with frequently dirtied data, 290 * and the shared page allocator lacks a __read_mostly mechanism. 291 * However, PAGE_SIZE is typically large relative to the amount of 292 * stuff we've got in it so far, so maybe the possible waste isn't an 293 * issue. 294 */ 295 base = shared_page_alloc(CACHE_LINE_SIZE, CACHE_LINE_SIZE); 296 KASSERT(base != -1, ("%s: base allocation failed", __func__)); 297 fxrng_shpage_mapping = (void *)(shared_page_mapping + base); 298 *fxrng_shpage_mapping = (struct vdso_fxrng_generation) { 299 .fx_vdso_version = VDSO_FXRNG_VER_CURR, 300 }; 301 } 302 #endif /* RANDOM_FENESTRASX */ 303 304 void 305 exec_sysvec_init(void *param) 306 { 307 struct sysentvec *sv; 308 u_int flags; 309 int res; 310 311 sv = param; 312 flags = sv->sv_flags; 313 if ((flags & SV_SHP) == 0) 314 return; 315 MPASS(sv->sv_shared_page_obj == NULL); 316 MPASS(sv->sv_shared_page_base != 0); 317 318 sv->sv_shared_page_obj = shared_page_obj; 319 if ((flags & SV_ABI_MASK) == SV_ABI_FREEBSD) { 320 if ((flags & SV_DSO_SIG) != 0) { 321 res = shared_page_fill((uintptr_t)sv->sv_szsigcode, 322 16, sv->sv_sigcode); 323 if (res == -1) 324 panic("copying vdso to shared page"); 325 sv->sv_vdso_offset = res; 326 sv->sv_sigcode_offset = res + sv->sv_sigcodeoff; 327 } else { 328 res = shared_page_fill(*(sv->sv_szsigcode), 329 16, sv->sv_sigcode); 330 if (res == -1) 331 panic("copying sigtramp to shared page"); 332 sv->sv_sigcode_offset = res; 333 } 334 } 335 if ((flags & SV_TIMEKEEP) != 0) { 336 #ifdef COMPAT_FREEBSD32 337 if ((flags & SV_ILP32) != 0) { 338 if ((flags & SV_ABI_MASK) == SV_ABI_FREEBSD) { 339 KASSERT(compat32_svtk == NULL, 340 ("Compat32 already registered")); 341 compat32_svtk = alloc_sv_tk_compat32(); 342 } else { 343 KASSERT(compat32_svtk != NULL, 344 ("Compat32 not registered")); 345 } 346 sv->sv_timekeep_offset = compat32_svtk->sv_timekeep_off; 347 } else { 348 #endif 349 if ((flags & SV_ABI_MASK) == SV_ABI_FREEBSD) { 350 KASSERT(host_svtk == NULL, 351 ("Host already registered")); 352 host_svtk = alloc_sv_tk(); 353 } else { 354 KASSERT(host_svtk != NULL, 355 ("Host not registered")); 356 } 357 sv->sv_timekeep_offset = host_svtk->sv_timekeep_off; 358 #ifdef COMPAT_FREEBSD32 359 } 360 #endif 361 } 362 #ifdef RANDOM_FENESTRASX 363 if ((flags & (SV_ABI_MASK | SV_RNG_SEED_VER)) == 364 (SV_ABI_FREEBSD | SV_RNG_SEED_VER)) { 365 /* 366 * Only allocate a single VDSO entry for multiple sysentvecs, 367 * i.e., native and COMPAT32. 368 */ 369 if (fxrng_shpage_mapping == NULL) 370 alloc_sv_fxrng_generation(); 371 sv->sv_fxrng_gen_offset = 372 (char *)fxrng_shpage_mapping - shared_page_mapping; 373 } 374 #endif 375 } 376 377 void 378 exec_sysvec_init_secondary(struct sysentvec *sv, struct sysentvec *sv2) 379 { 380 MPASS((sv2->sv_flags & SV_ABI_MASK) == (sv->sv_flags & SV_ABI_MASK)); 381 MPASS((sv2->sv_flags & SV_TIMEKEEP) == (sv->sv_flags & SV_TIMEKEEP)); 382 MPASS((sv2->sv_flags & SV_SHP) != 0 && (sv->sv_flags & SV_SHP) != 0); 383 MPASS((sv2->sv_flags & SV_DSO_SIG) == (sv->sv_flags & SV_DSO_SIG)); 384 MPASS((sv2->sv_flags & SV_RNG_SEED_VER) == 385 (sv->sv_flags & SV_RNG_SEED_VER)); 386 387 sv2->sv_shared_page_obj = sv->sv_shared_page_obj; 388 sv2->sv_sigcode_offset = sv->sv_sigcode_offset; 389 sv2->sv_vdso_offset = sv->sv_vdso_offset; 390 if ((sv2->sv_flags & SV_ABI_MASK) != SV_ABI_FREEBSD) 391 return; 392 sv2->sv_timekeep_offset = sv->sv_timekeep_offset; 393 sv2->sv_fxrng_gen_offset = sv->sv_fxrng_gen_offset; 394 } 395