1 /*- 2 * SPDX-License-Identifier: BSD-4-Clause 3 * 4 * Copyright (C) 1994, David Greenman 5 * Copyright (c) 1990, 1993 6 * The Regents of the University of California. All rights reserved. 7 * Copyright (c) 2007, 2022 The FreeBSD Foundation 8 * 9 * This code is derived from software contributed to Berkeley by 10 * the University of Utah, and William Jolitz. 11 * 12 * Portions of this software were developed by A. Joseph Koshy under 13 * sponsorship from the FreeBSD Foundation and Google, Inc. 14 * 15 * Redistribution and use in source and binary forms, with or without 16 * modification, are permitted provided that the following conditions 17 * are met: 18 * 1. Redistributions of source code must retain the above copyright 19 * notice, this list of conditions and the following disclaimer. 20 * 2. Redistributions in binary form must reproduce the above copyright 21 * notice, this list of conditions and the following disclaimer in the 22 * documentation and/or other materials provided with the distribution. 23 * 3. All advertising materials mentioning features or use of this software 24 * must display the following acknowledgement: 25 * This product includes software developed by the University of 26 * California, Berkeley and its contributors. 27 * 4. Neither the name of the University nor the names of its contributors 28 * may be used to endorse or promote products derived from this software 29 * without specific prior written permission. 30 * 31 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 32 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 34 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 35 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 39 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 40 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 41 * SUCH DAMAGE. 42 */ 43 44 #include <sys/cdefs.h> 45 #include "opt_hwpmc_hooks.h" 46 47 #include <sys/param.h> 48 #include <sys/kernel.h> 49 #include <sys/limits.h> 50 #include <sys/lock.h> 51 #include <sys/msan.h> 52 #include <sys/mutex.h> 53 #include <sys/proc.h> 54 #include <sys/ktr.h> 55 #include <sys/resourcevar.h> 56 #include <sys/sched.h> 57 #include <sys/syscall.h> 58 #include <sys/syscallsubr.h> 59 #include <sys/sysent.h> 60 #include <sys/systm.h> 61 #include <sys/vmmeter.h> 62 63 #include <machine/cpu.h> 64 65 #ifdef VIMAGE 66 #include <net/vnet.h> 67 #endif 68 69 #ifdef HWPMC_HOOKS 70 #include <sys/pmckern.h> 71 #endif 72 73 #ifdef EPOCH_TRACE 74 #include <sys/epoch.h> 75 #endif 76 77 void (*tcp_hpts_softclock)(void); 78 79 /* 80 * Define the code needed before returning to user mode, for trap and 81 * syscall. 82 */ 83 void 84 userret(struct thread *td, struct trapframe *frame) 85 { 86 struct proc *p = td->td_proc; 87 88 CTR3(KTR_SYSC, "userret: thread %p (pid %d, %s)", td, p->p_pid, 89 td->td_name); 90 KASSERT((p->p_flag & P_WEXIT) == 0, 91 ("Exiting process returns to usermode")); 92 #ifdef DIAGNOSTIC 93 /* 94 * Check that we called signotify() enough. For 95 * multi-threaded processes, where signal distribution might 96 * change due to other threads changing sigmask, the check is 97 * racy and cannot be performed reliably. 98 * If current process is vfork child, indicated by P_PPWAIT, then 99 * issignal() ignores stops, so we block the check to avoid 100 * classifying pending signals. 101 */ 102 if (p->p_numthreads == 1) { 103 PROC_LOCK(p); 104 thread_lock(td); 105 if ((p->p_flag & P_PPWAIT) == 0 && 106 (td->td_pflags & TDP_SIGFASTBLOCK) == 0 && 107 SIGPENDING(td) && !td_ast_pending(td, TDA_AST) && 108 !td_ast_pending(td, TDA_SIG)) { 109 thread_unlock(td); 110 panic( 111 "failed to set signal flags for ast p %p " 112 "td %p td_ast %#x fl %#x", 113 p, td, td->td_ast, td->td_flags); 114 } 115 thread_unlock(td); 116 PROC_UNLOCK(p); 117 } 118 #endif 119 120 /* 121 * Charge system time if profiling. 122 */ 123 if (__predict_false(p->p_flag & P_PROFIL)) 124 addupc_task(td, TRAPF_PC(frame), td->td_pticks * psratio); 125 126 #ifdef HWPMC_HOOKS 127 if (PMC_THREAD_HAS_SAMPLES(td)) 128 PMC_CALL_HOOK(td, PMC_FN_THR_USERRET, NULL); 129 #endif 130 /* 131 * Calling tcp_hpts_softclock() here allows us to avoid frequent, 132 * expensive callouts that trash the cache and lead to a much higher 133 * number of interrupts and context switches. Testing on busy web 134 * servers at Netflix has shown that this improves CPU use by 7% over 135 * relying only on callouts to drive HPTS, and also results in idle 136 * power savings on mostly idle servers. 137 * This was inspired by the paper "Soft Timers: Efficient Microsecond 138 * Software Timer Support for Network Processing" 139 * by Mohit Aron and Peter Druschel. 140 */ 141 tcp_hpts_softclock(); 142 /* 143 * Let the scheduler adjust our priority etc. 144 */ 145 sched_userret(td); 146 147 /* 148 * Check for misbehavior. 149 * 150 * In case there is a callchain tracing ongoing because of 151 * hwpmc(4), skip the scheduler pinning check. 152 * hwpmc(4) subsystem, infact, will collect callchain informations 153 * at ast() checkpoint, which is past userret(). 154 */ 155 WITNESS_WARN(WARN_PANIC, NULL, "userret: returning"); 156 KASSERT(td->td_critnest == 0, 157 ("userret: Returning in a critical section")); 158 KASSERT(td->td_locks == 0, 159 ("userret: Returning with %d locks held", td->td_locks)); 160 KASSERT(td->td_rw_rlocks == 0, 161 ("userret: Returning with %d rwlocks held in read mode", 162 td->td_rw_rlocks)); 163 KASSERT(td->td_sx_slocks == 0, 164 ("userret: Returning with %d sx locks held in shared mode", 165 td->td_sx_slocks)); 166 KASSERT(td->td_lk_slocks == 0, 167 ("userret: Returning with %d lockmanager locks held in shared mode", 168 td->td_lk_slocks)); 169 KASSERT((td->td_pflags & TDP_NOFAULTING) == 0, 170 ("userret: Returning with pagefaults disabled")); 171 if (__predict_false(!THREAD_CAN_SLEEP())) { 172 #ifdef EPOCH_TRACE 173 epoch_trace_list(curthread); 174 #endif 175 KASSERT(0, ("userret: Returning with sleep disabled")); 176 } 177 KASSERT(td->td_pinned == 0 || (td->td_pflags & TDP_CALLCHAIN) != 0, 178 ("userret: Returning with pinned thread")); 179 KASSERT(td->td_vp_reserved == NULL, 180 ("userret: Returning with preallocated vnode")); 181 KASSERT((td->td_flags & (TDF_SBDRY | TDF_SEINTR | TDF_SERESTART)) == 0, 182 ("userret: Returning with stop signals deferred")); 183 KASSERT(td->td_vslock_sz == 0, 184 ("userret: Returning with vslock-wired space")); 185 #ifdef VIMAGE 186 /* Unfortunately td_vnet_lpush needs VNET_DEBUG. */ 187 VNET_ASSERT(curvnet == NULL, 188 ("%s: Returning on td %p (pid %d, %s) with vnet %p set in %s", 189 __func__, td, p->p_pid, td->td_name, curvnet, 190 (td->td_vnet_lpush != NULL) ? td->td_vnet_lpush : "N/A")); 191 #endif 192 } 193 194 static void 195 ast_prep(struct thread *td, int tda __unused) 196 { 197 VM_CNT_INC(v_trap); 198 td->td_pticks = 0; 199 if (td->td_cowgen != atomic_load_int(&td->td_proc->p_cowgen)) 200 thread_cow_update(td); 201 202 } 203 204 struct ast_entry { 205 int ae_flags; 206 int ae_tdp; 207 void (*ae_f)(struct thread *td, int ast); 208 }; 209 210 _Static_assert(TDAI(TDA_MAX) <= UINT_MAX, "Too many ASTs"); 211 212 static struct ast_entry ast_entries[TDA_MAX] __read_mostly = { 213 [TDA_AST] = { .ae_f = ast_prep, .ae_flags = ASTR_UNCOND}, 214 }; 215 216 void 217 ast_register(int ast, int flags, int tdp, 218 void (*f)(struct thread *, int asts)) 219 { 220 struct ast_entry *ae; 221 222 MPASS(ast < TDA_MAX); 223 MPASS((flags & ASTR_TDP) == 0 || ((flags & ASTR_ASTF_REQUIRED) != 0 224 && __bitcount(tdp) == 1)); 225 ae = &ast_entries[ast]; 226 MPASS(ae->ae_f == NULL); 227 ae->ae_flags = flags; 228 ae->ae_tdp = tdp; 229 atomic_interrupt_fence(); 230 ae->ae_f = f; 231 } 232 233 /* 234 * XXXKIB Note that the deregistration of an AST handler does not 235 * drain threads possibly executing it, which affects unloadable 236 * modules. The issue is either handled by the subsystem using 237 * handlers, or simply ignored. Fixing the problem is considered not 238 * worth the overhead. 239 */ 240 void 241 ast_deregister(int ast) 242 { 243 struct ast_entry *ae; 244 245 MPASS(ast < TDA_MAX); 246 ae = &ast_entries[ast]; 247 MPASS(ae->ae_f != NULL); 248 ae->ae_f = NULL; 249 atomic_interrupt_fence(); 250 ae->ae_flags = 0; 251 ae->ae_tdp = 0; 252 } 253 254 void 255 ast_sched_locked(struct thread *td, int tda) 256 { 257 THREAD_LOCK_ASSERT(td, MA_OWNED); 258 MPASS(tda < TDA_MAX); 259 260 td->td_ast |= TDAI(tda); 261 } 262 263 void 264 ast_unsched_locked(struct thread *td, int tda) 265 { 266 THREAD_LOCK_ASSERT(td, MA_OWNED); 267 MPASS(tda < TDA_MAX); 268 269 td->td_ast &= ~TDAI(tda); 270 } 271 272 void 273 ast_sched(struct thread *td, int tda) 274 { 275 thread_lock(td); 276 ast_sched_locked(td, tda); 277 thread_unlock(td); 278 } 279 280 void 281 ast_sched_mask(struct thread *td, int ast) 282 { 283 thread_lock(td); 284 td->td_ast |= ast; 285 thread_unlock(td); 286 } 287 288 static bool 289 ast_handler_calc_tdp_run(struct thread *td, const struct ast_entry *ae) 290 { 291 return ((ae->ae_flags & ASTR_TDP) == 0 || 292 (td->td_pflags & ae->ae_tdp) != 0); 293 } 294 295 /* 296 * Process an asynchronous software trap. 297 */ 298 static void 299 ast_handler(struct thread *td, struct trapframe *framep, bool dtor) 300 { 301 struct ast_entry *ae; 302 void (*f)(struct thread *td, int asts); 303 int a, td_ast; 304 bool run; 305 306 if (framep != NULL) { 307 kmsan_mark(framep, sizeof(*framep), KMSAN_STATE_INITED); 308 td->td_frame = framep; 309 } 310 311 if (__predict_true(!dtor)) { 312 WITNESS_WARN(WARN_PANIC, NULL, "Returning to user mode"); 313 mtx_assert(&Giant, MA_NOTOWNED); 314 THREAD_LOCK_ASSERT(td, MA_NOTOWNED); 315 316 /* 317 * This updates the td_ast for the checks below in one 318 * atomic operation with turning off all scheduled AST's. 319 * If another AST is triggered while we are handling the 320 * AST's saved in td_ast, the td_ast is again non-zero and 321 * ast() will be called again. 322 */ 323 thread_lock(td); 324 td_ast = td->td_ast; 325 td->td_ast = 0; 326 thread_unlock(td); 327 } else { 328 /* 329 * The td thread's td_lock is not guaranteed to exist, 330 * the thread might be not initialized enough when it's 331 * destructor is called. It is safe to read and 332 * update td_ast without locking since the thread is 333 * not runnable or visible to other threads. 334 */ 335 td_ast = td->td_ast; 336 td->td_ast = 0; 337 } 338 339 CTR3(KTR_SYSC, "ast: thread %p (pid %d, %s)", td, td->td_proc->p_pid, 340 td->td_proc->p_comm); 341 KASSERT(framep == NULL || TRAPF_USERMODE(framep), 342 ("ast in kernel mode")); 343 344 for (a = 0; a < nitems(ast_entries); a++) { 345 ae = &ast_entries[a]; 346 f = ae->ae_f; 347 if (f == NULL) 348 continue; 349 atomic_interrupt_fence(); 350 351 run = false; 352 if (__predict_false(framep == NULL)) { 353 if ((ae->ae_flags & ASTR_KCLEAR) != 0) 354 run = ast_handler_calc_tdp_run(td, ae); 355 } else { 356 if ((ae->ae_flags & ASTR_UNCOND) != 0) 357 run = true; 358 else if ((ae->ae_flags & ASTR_ASTF_REQUIRED) != 0 && 359 (td_ast & TDAI(a)) != 0) 360 run = ast_handler_calc_tdp_run(td, ae); 361 } 362 if (run) 363 f(td, td_ast); 364 } 365 } 366 367 void 368 ast(struct trapframe *framep) 369 { 370 struct thread *td; 371 372 td = curthread; 373 ast_handler(td, framep, false); 374 userret(td, framep); 375 } 376 377 void 378 ast_kclear(struct thread *td) 379 { 380 ast_handler(td, NULL, td != curthread); 381 } 382 383 const char * 384 syscallname(struct proc *p, u_int code) 385 { 386 static const char unknown[] = "unknown"; 387 struct sysentvec *sv; 388 389 sv = p->p_sysent; 390 if (sv->sv_syscallnames == NULL || code >= sv->sv_size) 391 return (unknown); 392 return (sv->sv_syscallnames[code]); 393 } 394