xref: /freebsd/sys/compat/linuxkpi/common/src/linux_schedule.c (revision 094fc1ed0f2627525c7b0342efcbad5be7a8546a)
1 /*-
2  * Copyright (c) 2017 Mark Johnston <markj@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conds
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conds, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conds and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/proc.h>
33 #include <sys/signalvar.h>
34 #include <sys/sleepqueue.h>
35 
36 #include <linux/errno.h>
37 #include <linux/kernel.h>
38 #include <linux/list.h>
39 #include <linux/sched.h>
40 #include <linux/spinlock.h>
41 #include <linux/wait.h>
42 
43 static int
44 linux_add_to_sleepqueue(void *wchan, const char *wmesg, int timeout, int state)
45 {
46 	int flags, ret;
47 
48 	MPASS((state & ~TASK_NORMAL) == 0);
49 
50 	flags = SLEEPQ_SLEEP | ((state & TASK_INTERRUPTIBLE) != 0 ?
51 	    SLEEPQ_INTERRUPTIBLE : 0);
52 
53 	sleepq_add(wchan, NULL, wmesg, flags, 0);
54 	if (timeout != 0)
55 		sleepq_set_timeout(wchan, timeout);
56 	if ((state & TASK_INTERRUPTIBLE) != 0) {
57 		if (timeout == 0)
58 			ret = -sleepq_wait_sig(wchan, 0);
59 		else
60 			ret = -sleepq_timedwait_sig(wchan, 0);
61 	} else {
62 		if (timeout == 0) {
63 			sleepq_wait(wchan, 0);
64 			ret = 0;
65 		} else
66 			ret = -sleepq_timedwait(wchan, 0);
67 	}
68 	/* filter return value */
69 	if (ret != 0 && ret != -EWOULDBLOCK)
70 		ret = -ERESTARTSYS;
71 	return (ret);
72 }
73 
74 static int
75 wake_up_task(struct task_struct *task, unsigned int state)
76 {
77 	int ret, wakeup_swapper;
78 
79 	ret = wakeup_swapper = 0;
80 	sleepq_lock(task);
81 	if ((atomic_load_acq_int(&task->state) & state) != 0) {
82 		set_task_state(task, TASK_WAKING);
83 		wakeup_swapper = sleepq_signal(task, SLEEPQ_SLEEP, 0, 0);
84 		ret = 1;
85 	}
86 	sleepq_release(task);
87 	if (wakeup_swapper)
88 		kick_proc0();
89 	return (ret);
90 }
91 
92 bool
93 linux_signal_pending(struct task_struct *task)
94 {
95 	struct thread *td;
96 	sigset_t pending;
97 
98 	td = task->task_thread;
99 	PROC_LOCK(td->td_proc);
100 	pending = td->td_siglist;
101 	SIGSETOR(pending, td->td_proc->p_siglist);
102 	SIGSETNAND(pending, td->td_sigmask);
103 	PROC_UNLOCK(td->td_proc);
104 	return (!SIGISEMPTY(pending));
105 }
106 
107 bool
108 linux_fatal_signal_pending(struct task_struct *task)
109 {
110 	struct thread *td;
111 	bool ret;
112 
113 	td = task->task_thread;
114 	PROC_LOCK(td->td_proc);
115 	ret = SIGISMEMBER(td->td_siglist, SIGKILL) ||
116 	    SIGISMEMBER(td->td_proc->p_siglist, SIGKILL);
117 	PROC_UNLOCK(td->td_proc);
118 	return (ret);
119 }
120 
121 bool
122 linux_signal_pending_state(long state, struct task_struct *task)
123 {
124 
125 	MPASS((state & ~TASK_NORMAL) == 0);
126 
127 	if ((state & TASK_INTERRUPTIBLE) == 0)
128 		return (false);
129 	return (linux_signal_pending(task));
130 }
131 
132 void
133 linux_send_sig(int signo, struct task_struct *task)
134 {
135 	struct thread *td;
136 
137 	td = task->task_thread;
138 	PROC_LOCK(td->td_proc);
139 	tdsignal(td, signo);
140 	PROC_UNLOCK(td->td_proc);
141 }
142 
143 int
144 autoremove_wake_function(wait_queue_t *wq, unsigned int state, int flags,
145     void *key __unused)
146 {
147 	struct task_struct *task;
148 	int ret;
149 
150 	task = wq->private;
151 	if ((ret = wake_up_task(task, state)) != 0)
152 		list_del_init(&wq->task_list);
153 	return (ret);
154 }
155 
156 void
157 linux_wake_up(wait_queue_head_t *wqh, unsigned int state, int nr, bool locked)
158 {
159 	wait_queue_t *pos, *next;
160 
161 	if (!locked)
162 		spin_lock(&wqh->lock);
163 	list_for_each_entry_safe(pos, next, &wqh->task_list, task_list) {
164 		if (pos->func == NULL) {
165 			if (wake_up_task(pos->private, state) != 0 && --nr == 0)
166 				break;
167 		} else {
168 			if (pos->func(pos, state, 0, NULL) != 0 && --nr == 0)
169 				break;
170 		}
171 	}
172 	if (!locked)
173 		spin_unlock(&wqh->lock);
174 }
175 
176 void
177 linux_prepare_to_wait(wait_queue_head_t *wqh, wait_queue_t *wq, int state)
178 {
179 
180 	spin_lock(&wqh->lock);
181 	if (list_empty(&wq->task_list))
182 		__add_wait_queue(wqh, wq);
183 	set_task_state(current, state);
184 	spin_unlock(&wqh->lock);
185 }
186 
187 void
188 linux_finish_wait(wait_queue_head_t *wqh, wait_queue_t *wq)
189 {
190 
191 	spin_lock(&wqh->lock);
192 	set_task_state(current, TASK_RUNNING);
193 	if (!list_empty(&wq->task_list)) {
194 		__remove_wait_queue(wqh, wq);
195 		INIT_LIST_HEAD(&wq->task_list);
196 	}
197 	spin_unlock(&wqh->lock);
198 }
199 
200 bool
201 linux_waitqueue_active(wait_queue_head_t *wqh)
202 {
203 	bool ret;
204 
205 	spin_lock(&wqh->lock);
206 	ret = !list_empty(&wqh->task_list);
207 	spin_unlock(&wqh->lock);
208 	return (ret);
209 }
210 
211 int
212 linux_wait_event_common(wait_queue_head_t *wqh, wait_queue_t *wq, int timeout,
213     unsigned int state, spinlock_t *lock)
214 {
215 	struct task_struct *task;
216 	int ret;
217 
218 	if (lock != NULL)
219 		spin_unlock_irq(lock);
220 
221 	DROP_GIANT();
222 
223 	/* range check timeout */
224 	if (timeout < 1)
225 		timeout = 1;
226 	else if (timeout == MAX_SCHEDULE_TIMEOUT)
227 		timeout = 0;
228 
229 	task = current;
230 
231 	/*
232 	 * Our wait queue entry is on the stack - make sure it doesn't
233 	 * get swapped out while we sleep.
234 	 */
235 #ifndef NO_SWAPPING
236 	PHOLD(task->task_thread->td_proc);
237 #endif
238 	sleepq_lock(task);
239 	if (atomic_load_acq_int(&task->state) != TASK_WAKING) {
240 		ret = linux_add_to_sleepqueue(task, "wevent", timeout, state);
241 	} else {
242 		sleepq_release(task);
243 		ret = linux_signal_pending_state(state, task) ? -ERESTARTSYS : 0;
244 	}
245 #ifndef NO_SWAPPING
246 	PRELE(task->task_thread->td_proc);
247 #endif
248 
249 	PICKUP_GIANT();
250 
251 	if (lock != NULL)
252 		spin_lock_irq(lock);
253 	return (ret);
254 }
255 
256 int
257 linux_schedule_timeout(int timeout)
258 {
259 	struct task_struct *task;
260 	int state;
261 	int remainder;
262 
263 	task = current;
264 
265 	/* range check timeout */
266 	if (timeout < 1)
267 		timeout = 1;
268 	else if (timeout == MAX_SCHEDULE_TIMEOUT)
269 		timeout = 0;
270 
271 	remainder = ticks + timeout;
272 
273 	DROP_GIANT();
274 
275 	sleepq_lock(task);
276 	state = atomic_load_acq_int(&task->state);
277 	if (state != TASK_WAKING)
278 		(void)linux_add_to_sleepqueue(task, "sched", timeout, state);
279 	else
280 		sleepq_release(task);
281 	set_task_state(task, TASK_RUNNING);
282 
283 	PICKUP_GIANT();
284 
285 	if (timeout == 0)
286 		return (MAX_SCHEDULE_TIMEOUT);
287 
288 	/* range check return value */
289 	remainder -= ticks;
290 	if (remainder < 0)
291 		remainder = 0;
292 	else if (remainder > timeout)
293 		remainder = timeout;
294 	return (remainder);
295 }
296 
297 static void
298 wake_up_sleepers(void *wchan)
299 {
300 	int wakeup_swapper;
301 
302 	sleepq_lock(wchan);
303 	wakeup_swapper = sleepq_signal(wchan, SLEEPQ_SLEEP, 0, 0);
304 	sleepq_release(wchan);
305 	if (wakeup_swapper)
306 		kick_proc0();
307 }
308 
309 #define	bit_to_wchan(word, bit)	((void *)(((uintptr_t)(word) << 6) | (bit)))
310 
311 void
312 linux_wake_up_bit(void *word, int bit)
313 {
314 
315 	wake_up_sleepers(bit_to_wchan(word, bit));
316 }
317 
318 int
319 linux_wait_on_bit_timeout(unsigned long *word, int bit, unsigned int state,
320     int timeout)
321 {
322 	struct task_struct *task;
323 	void *wchan;
324 	int ret;
325 
326 	DROP_GIANT();
327 
328 	/* range check timeout */
329 	if (timeout < 1)
330 		timeout = 1;
331 	else if (timeout == MAX_SCHEDULE_TIMEOUT)
332 		timeout = 0;
333 
334 	task = current;
335 	wchan = bit_to_wchan(word, bit);
336 	for (;;) {
337 		sleepq_lock(wchan);
338 		if ((*word & (1 << bit)) == 0) {
339 			sleepq_release(wchan);
340 			ret = 0;
341 			break;
342 		}
343 		set_task_state(task, state);
344 		ret = linux_add_to_sleepqueue(wchan, "wbit", timeout, state);
345 		if (ret != 0)
346 			break;
347 	}
348 	set_task_state(task, TASK_RUNNING);
349 
350 	PICKUP_GIANT();
351 
352 	return (ret);
353 }
354 
355 void
356 linux_wake_up_atomic_t(atomic_t *a)
357 {
358 
359 	wake_up_sleepers(a);
360 }
361 
362 int
363 linux_wait_on_atomic_t(atomic_t *a, unsigned int state)
364 {
365 	struct task_struct *task;
366 	void *wchan;
367 	int ret;
368 
369 	DROP_GIANT();
370 
371 	task = current;
372 	wchan = a;
373 	for (;;) {
374 		sleepq_lock(wchan);
375 		if (atomic_read(a) == 0) {
376 			sleepq_release(wchan);
377 			ret = 0;
378 			break;
379 		}
380 		set_task_state(task, state);
381 		ret = linux_add_to_sleepqueue(wchan, "watomic", 0, state);
382 		if (ret != 0)
383 			break;
384 	}
385 	set_task_state(task, TASK_RUNNING);
386 
387 	PICKUP_GIANT();
388 
389 	return (ret);
390 }
391 
392 bool
393 linux_wake_up_state(struct task_struct *task, unsigned int state)
394 {
395 
396 	return (wake_up_task(task, state) != 0);
397 }
398