xref: /freebsd/sys/kern/subr_trap.c (revision 729362425c09cf6b362366aabc6fb547eee8035a)
1 /*-
2  * Copyright (C) 1994, David Greenman
3  * Copyright (c) 1990, 1993
4  *	The Regents of the University of California.  All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * the University of Utah, and William Jolitz.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed by the University of
20  *	California, Berkeley and its contributors.
21  * 4. Neither the name of the University nor the names of its contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  *	from: @(#)trap.c	7.4 (Berkeley) 5/13/91
38  * $FreeBSD$
39  */
40 
41 #include "opt_mac.h"
42 #ifdef __i386__
43 #include "opt_npx.h"
44 #endif
45 
46 #include <sys/param.h>
47 #include <sys/bus.h>
48 #include <sys/kernel.h>
49 #include <sys/lock.h>
50 #include <sys/mac.h>
51 #include <sys/mutex.h>
52 #include <sys/proc.h>
53 #include <sys/kse.h>
54 #include <sys/ktr.h>
55 #include <sys/resourcevar.h>
56 #include <sys/sched.h>
57 #include <sys/signalvar.h>
58 #include <sys/systm.h>
59 #include <sys/vmmeter.h>
60 #include <machine/cpu.h>
61 #include <machine/pcb.h>
62 
63 /*
64  * Define the code needed before returning to user mode, for
65  * trap and syscall.
66  *
67  * MPSAFE
68  */
69 void
70 userret(td, frame, oticks)
71 	struct thread *td;
72 	struct trapframe *frame;
73 	u_int oticks;
74 {
75 	struct proc *p = td->td_proc;
76 
77 	CTR3(KTR_SYSC, "userret: thread %p (pid %d, %s)", td, p->p_pid,
78             p->p_comm);
79 #ifdef INVARIANTS
80 	/* Check that we called signotify() enough. */
81 	mtx_lock(&Giant);
82 	PROC_LOCK(p);
83 	mtx_lock_spin(&sched_lock);
84 	if (SIGPENDING(p) && ((p->p_sflag & PS_NEEDSIGCHK) == 0 ||
85 	    (td->td_flags & TDF_ASTPENDING) == 0))
86 		printf("failed to set signal flags properly for ast()\n");
87 	mtx_unlock_spin(&sched_lock);
88 	PROC_UNLOCK(p);
89 	mtx_unlock(&Giant);
90 #endif
91 
92 	/*
93 	 * Let the scheduler adjust our priority etc.
94 	 */
95 	sched_userret(td);
96 
97 	/*
98 	 * We need to check to see if we have to exit or wait due to a
99 	 * single threading requirement or some other STOP condition.
100 	 * Don't bother doing all the work if the stop bits are not set
101 	 * at this time.. If we miss it, we miss it.. no big deal.
102 	 */
103 	if (P_SHOULDSTOP(p)) {
104 		PROC_LOCK(p);
105 		thread_suspend_check(0);	/* Can suspend or kill */
106 		PROC_UNLOCK(p);
107 	}
108 
109 	/*
110 	 * Do special thread processing, e.g. upcall tweaking and such.
111 	 */
112 	if (p->p_flag & P_THREADED) {
113 		thread_userret(td, frame);
114 	}
115 
116 	/*
117 	 * Charge system time if profiling.
118 	 *
119 	 * XXX should move PS_PROFIL to a place that can obviously be
120 	 * accessed safely without sched_lock.
121 	 */
122 	if (p->p_sflag & PS_PROFIL) {
123 		quad_t ticks;
124 
125 		mtx_lock_spin(&sched_lock);
126 		ticks = td->td_sticks - oticks;
127 		mtx_unlock_spin(&sched_lock);
128 		addupc_task(td, TRAPF_PC(frame), (u_int)ticks * psratio);
129 	}
130 }
131 
132 /*
133  * Process an asynchronous software trap.
134  * This is relatively easy.
135  * This function will return with preemption disabled.
136  */
137 void
138 ast(struct trapframe *framep)
139 {
140 	struct thread *td;
141 	struct proc *p;
142 	struct kse *ke;
143 	struct ksegrp *kg;
144 	struct rlimit *rlim;
145 	u_int prticks, sticks;
146 	int sflag;
147 	int flags;
148 	int sig;
149 #if defined(DEV_NPX) && !defined(SMP)
150 	int ucode;
151 #endif
152 
153 	td = curthread;
154 	p = td->td_proc;
155 	kg = td->td_ksegrp;
156 
157 	CTR3(KTR_SYSC, "ast: thread %p (pid %d, %s)", td, p->p_pid,
158             p->p_comm);
159 	KASSERT(TRAPF_USERMODE(framep), ("ast in kernel mode"));
160 	WITNESS_WARN(WARN_PANIC, NULL, "Returning to user mode");
161 	mtx_assert(&Giant, MA_NOTOWNED);
162 	mtx_assert(&sched_lock, MA_NOTOWNED);
163 	td->td_frame = framep;
164 
165 	/*
166 	 * This updates the p_sflag's for the checks below in one
167 	 * "atomic" operation with turning off the astpending flag.
168 	 * If another AST is triggered while we are handling the
169 	 * AST's saved in sflag, the astpending flag will be set and
170 	 * ast() will be called again.
171 	 */
172 	mtx_lock_spin(&sched_lock);
173 	ke = td->td_kse;
174 	sticks = td->td_sticks;
175 	flags = td->td_flags;
176 	sflag = p->p_sflag;
177 	p->p_sflag &= ~(PS_ALRMPEND | PS_NEEDSIGCHK | PS_PROFPEND | PS_XCPU);
178 #ifdef MAC
179 	p->p_sflag &= ~PS_MACPEND;
180 #endif
181 	td->td_flags &= ~(TDF_ASTPENDING | TDF_NEEDRESCHED | TDF_OWEUPC);
182 	cnt.v_soft++;
183 	prticks = 0;
184 	if (flags & TDF_OWEUPC && sflag & PS_PROFIL) {
185 		prticks = p->p_stats->p_prof.pr_ticks;
186 		p->p_stats->p_prof.pr_ticks = 0;
187 	}
188 	mtx_unlock_spin(&sched_lock);
189 	/*
190 	 * XXXKSE While the fact that we owe a user profiling
191 	 * tick is stored per KSE in this code, the statistics
192 	 * themselves are still stored per process.
193 	 * This should probably change, by which I mean that
194 	 * possibly the location of both might change.
195 	 */
196 
197 	if (td->td_ucred != p->p_ucred)
198 		cred_update_thread(td);
199 	if (flags & TDF_OWEUPC && sflag & PS_PROFIL)
200 		addupc_task(td, p->p_stats->p_prof.pr_addr, prticks);
201 	if (sflag & PS_ALRMPEND) {
202 		PROC_LOCK(p);
203 		psignal(p, SIGVTALRM);
204 		PROC_UNLOCK(p);
205 	}
206 #if defined(DEV_NPX) && !defined(SMP)
207 	if (PCPU_GET(curpcb)->pcb_flags & PCB_NPXTRAP) {
208 		atomic_clear_int(&PCPU_GET(curpcb)->pcb_flags,
209 		    PCB_NPXTRAP);
210 		ucode = npxtrap();
211 		if (ucode != -1) {
212 			trapsignal(p, SIGFPE, ucode);
213 		}
214 	}
215 #endif
216 	if (sflag & PS_PROFPEND) {
217 		PROC_LOCK(p);
218 		psignal(p, SIGPROF);
219 		PROC_UNLOCK(p);
220 	}
221 	if (sflag & PS_XCPU) {
222 		PROC_LOCK(p);
223 		rlim = &p->p_rlimit[RLIMIT_CPU];
224 		if (p->p_runtime.sec >= rlim->rlim_max)
225 			killproc(p, "exceeded maximum CPU limit");
226 		else {
227 			psignal(p, SIGXCPU);
228 			mtx_lock_spin(&sched_lock);
229 			if (p->p_cpulimit < rlim->rlim_max)
230 				p->p_cpulimit += 5;
231 			mtx_unlock_spin(&sched_lock);
232 		}
233 		PROC_UNLOCK(p);
234 	}
235 #ifdef MAC
236 	if (sflag & PS_MACPEND)
237 		mac_thread_userret(td);
238 #endif
239 	if (flags & TDF_NEEDRESCHED) {
240 		mtx_lock_spin(&sched_lock);
241 		sched_prio(td, kg->kg_user_pri);
242 		p->p_stats->p_ru.ru_nivcsw++;
243 		mi_switch();
244 		mtx_unlock_spin(&sched_lock);
245 	}
246 	if (sflag & PS_NEEDSIGCHK) {
247 		int sigs;
248 
249 		sigs = 0;
250 		PROC_LOCK(p);
251 		while ((sig = cursig(td)) != 0) {
252 			postsig(sig);
253 			sigs++;
254 		}
255 		PROC_UNLOCK(p);
256 		if (p->p_flag & P_THREADED && sigs) {
257 			struct kse_upcall *ku = td->td_upcall;
258 			if ((void *)TRAPF_PC(framep) != ku->ku_func) {
259 				mtx_lock_spin(&sched_lock);
260 				ku->ku_flags |= KUF_DOUPCALL;
261 				mtx_unlock_spin(&sched_lock);
262 			}
263 		}
264 	}
265 
266 	userret(td, framep, sticks);
267 #ifdef DIAGNOSTIC
268 	cred_free_thread(td);
269 #endif
270 	mtx_assert(&Giant, MA_NOTOWNED);
271 }
272