xref: /freebsd/sys/compat/linuxkpi/common/src/linux_schedule.c (revision 24f93aa05f31d8b67368954967ef4cbdde350779)
1 /*-
2  * Copyright (c) 2017 Mark Johnston <markj@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conds
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conds, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conds and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/proc.h>
33 #include <sys/signalvar.h>
34 #include <sys/sleepqueue.h>
35 
36 #include <linux/errno.h>
37 #include <linux/kernel.h>
38 #include <linux/list.h>
39 #include <linux/sched.h>
40 #include <linux/spinlock.h>
41 #include <linux/wait.h>
42 
43 static int
44 linux_add_to_sleepqueue(void *wchan, struct task_struct *task,
45     const char *wmesg, int timeout, int state)
46 {
47 	int flags, ret;
48 
49 	MPASS((state & ~TASK_NORMAL) == 0);
50 
51 	flags = SLEEPQ_SLEEP | ((state & TASK_INTERRUPTIBLE) != 0 ?
52 	    SLEEPQ_INTERRUPTIBLE : 0);
53 
54 	sleepq_add(wchan, NULL, wmesg, flags, 0);
55 	if (timeout != 0)
56 		sleepq_set_timeout(wchan, timeout);
57 	if ((state & TASK_INTERRUPTIBLE) != 0) {
58 		if (timeout == 0)
59 			ret = -sleepq_wait_sig(wchan, 0);
60 		else
61 			ret = -sleepq_timedwait_sig(wchan, 0);
62 	} else {
63 		if (timeout == 0) {
64 			sleepq_wait(wchan, 0);
65 			ret = 0;
66 		} else
67 			ret = -sleepq_timedwait(wchan, 0);
68 	}
69 	/* filter return value */
70 	if (ret != 0 && ret != -EWOULDBLOCK) {
71 		linux_schedule_save_interrupt_value(task, ret);
72 		ret = -ERESTARTSYS;
73 	}
74 	return (ret);
75 }
76 
77 static int
78 wake_up_task(struct task_struct *task, unsigned int state)
79 {
80 	int ret, wakeup_swapper;
81 
82 	ret = wakeup_swapper = 0;
83 	sleepq_lock(task);
84 	if ((atomic_read(&task->state) & state) != 0) {
85 		set_task_state(task, TASK_WAKING);
86 		wakeup_swapper = sleepq_signal(task, SLEEPQ_SLEEP, 0, 0);
87 		ret = 1;
88 	}
89 	sleepq_release(task);
90 	if (wakeup_swapper)
91 		kick_proc0();
92 	return (ret);
93 }
94 
95 bool
96 linux_signal_pending(struct task_struct *task)
97 {
98 	struct thread *td;
99 	sigset_t pending;
100 
101 	td = task->task_thread;
102 	PROC_LOCK(td->td_proc);
103 	pending = td->td_siglist;
104 	SIGSETOR(pending, td->td_proc->p_siglist);
105 	SIGSETNAND(pending, td->td_sigmask);
106 	PROC_UNLOCK(td->td_proc);
107 	return (!SIGISEMPTY(pending));
108 }
109 
110 bool
111 linux_fatal_signal_pending(struct task_struct *task)
112 {
113 	struct thread *td;
114 	bool ret;
115 
116 	td = task->task_thread;
117 	PROC_LOCK(td->td_proc);
118 	ret = SIGISMEMBER(td->td_siglist, SIGKILL) ||
119 	    SIGISMEMBER(td->td_proc->p_siglist, SIGKILL);
120 	PROC_UNLOCK(td->td_proc);
121 	return (ret);
122 }
123 
124 bool
125 linux_signal_pending_state(long state, struct task_struct *task)
126 {
127 
128 	MPASS((state & ~TASK_NORMAL) == 0);
129 
130 	if ((state & TASK_INTERRUPTIBLE) == 0)
131 		return (false);
132 	return (linux_signal_pending(task));
133 }
134 
135 void
136 linux_send_sig(int signo, struct task_struct *task)
137 {
138 	struct thread *td;
139 
140 	td = task->task_thread;
141 	PROC_LOCK(td->td_proc);
142 	tdsignal(td, signo);
143 	PROC_UNLOCK(td->td_proc);
144 }
145 
146 int
147 autoremove_wake_function(wait_queue_t *wq, unsigned int state, int flags,
148     void *key __unused)
149 {
150 	struct task_struct *task;
151 	int ret;
152 
153 	task = wq->private;
154 	if ((ret = wake_up_task(task, state)) != 0)
155 		list_del_init(&wq->task_list);
156 	return (ret);
157 }
158 
159 void
160 linux_wake_up(wait_queue_head_t *wqh, unsigned int state, int nr, bool locked)
161 {
162 	wait_queue_t *pos, *next;
163 
164 	if (!locked)
165 		spin_lock(&wqh->lock);
166 	list_for_each_entry_safe(pos, next, &wqh->task_list, task_list) {
167 		if (pos->func == NULL) {
168 			if (wake_up_task(pos->private, state) != 0 && --nr == 0)
169 				break;
170 		} else {
171 			if (pos->func(pos, state, 0, NULL) != 0 && --nr == 0)
172 				break;
173 		}
174 	}
175 	if (!locked)
176 		spin_unlock(&wqh->lock);
177 }
178 
179 void
180 linux_prepare_to_wait(wait_queue_head_t *wqh, wait_queue_t *wq, int state)
181 {
182 
183 	spin_lock(&wqh->lock);
184 	if (list_empty(&wq->task_list))
185 		__add_wait_queue(wqh, wq);
186 	set_task_state(current, state);
187 	spin_unlock(&wqh->lock);
188 }
189 
190 void
191 linux_finish_wait(wait_queue_head_t *wqh, wait_queue_t *wq)
192 {
193 
194 	spin_lock(&wqh->lock);
195 	set_task_state(current, TASK_RUNNING);
196 	if (!list_empty(&wq->task_list)) {
197 		__remove_wait_queue(wqh, wq);
198 		INIT_LIST_HEAD(&wq->task_list);
199 	}
200 	spin_unlock(&wqh->lock);
201 }
202 
203 bool
204 linux_waitqueue_active(wait_queue_head_t *wqh)
205 {
206 	bool ret;
207 
208 	spin_lock(&wqh->lock);
209 	ret = !list_empty(&wqh->task_list);
210 	spin_unlock(&wqh->lock);
211 	return (ret);
212 }
213 
214 int
215 linux_wait_event_common(wait_queue_head_t *wqh, wait_queue_t *wq, int timeout,
216     unsigned int state, spinlock_t *lock)
217 {
218 	struct task_struct *task;
219 	int ret;
220 
221 	if (lock != NULL)
222 		spin_unlock_irq(lock);
223 
224 	DROP_GIANT();
225 
226 	/* range check timeout */
227 	if (timeout < 1)
228 		timeout = 1;
229 	else if (timeout == MAX_SCHEDULE_TIMEOUT)
230 		timeout = 0;
231 
232 	task = current;
233 
234 	/*
235 	 * Our wait queue entry is on the stack - make sure it doesn't
236 	 * get swapped out while we sleep.
237 	 */
238 	PHOLD(task->task_thread->td_proc);
239 	sleepq_lock(task);
240 	if (atomic_read(&task->state) != TASK_WAKING) {
241 		ret = linux_add_to_sleepqueue(task, task, "wevent", timeout, state);
242 	} else {
243 		sleepq_release(task);
244 		ret = 0;
245 	}
246 	PRELE(task->task_thread->td_proc);
247 
248 	PICKUP_GIANT();
249 
250 	if (lock != NULL)
251 		spin_lock_irq(lock);
252 	return (ret);
253 }
254 
255 int
256 linux_schedule_timeout(int timeout)
257 {
258 	struct task_struct *task;
259 	int ret;
260 	int state;
261 	int remainder;
262 
263 	task = current;
264 
265 	/* range check timeout */
266 	if (timeout < 1)
267 		timeout = 1;
268 	else if (timeout == MAX_SCHEDULE_TIMEOUT)
269 		timeout = 0;
270 
271 	remainder = ticks + timeout;
272 
273 	DROP_GIANT();
274 
275 	sleepq_lock(task);
276 	state = atomic_read(&task->state);
277 	if (state != TASK_WAKING) {
278 		ret = linux_add_to_sleepqueue(task, task, "sched", timeout, state);
279 	} else {
280 		sleepq_release(task);
281 		ret = 0;
282 	}
283 	set_task_state(task, TASK_RUNNING);
284 
285 	PICKUP_GIANT();
286 
287 	if (timeout == 0)
288 		return (MAX_SCHEDULE_TIMEOUT);
289 
290 	/* range check return value */
291 	remainder -= ticks;
292 
293 	/* range check return value */
294 	if (ret == -ERESTARTSYS && remainder < 1)
295 		remainder = 1;
296 	else if (remainder < 0)
297 		remainder = 0;
298 	else if (remainder > timeout)
299 		remainder = timeout;
300 	return (remainder);
301 }
302 
303 static void
304 wake_up_sleepers(void *wchan)
305 {
306 	int wakeup_swapper;
307 
308 	sleepq_lock(wchan);
309 	wakeup_swapper = sleepq_signal(wchan, SLEEPQ_SLEEP, 0, 0);
310 	sleepq_release(wchan);
311 	if (wakeup_swapper)
312 		kick_proc0();
313 }
314 
315 #define	bit_to_wchan(word, bit)	((void *)(((uintptr_t)(word) << 6) | (bit)))
316 
317 void
318 linux_wake_up_bit(void *word, int bit)
319 {
320 
321 	wake_up_sleepers(bit_to_wchan(word, bit));
322 }
323 
324 int
325 linux_wait_on_bit_timeout(unsigned long *word, int bit, unsigned int state,
326     int timeout)
327 {
328 	struct task_struct *task;
329 	void *wchan;
330 	int ret;
331 
332 	DROP_GIANT();
333 
334 	/* range check timeout */
335 	if (timeout < 1)
336 		timeout = 1;
337 	else if (timeout == MAX_SCHEDULE_TIMEOUT)
338 		timeout = 0;
339 
340 	task = current;
341 	wchan = bit_to_wchan(word, bit);
342 	for (;;) {
343 		sleepq_lock(wchan);
344 		if ((*word & (1 << bit)) == 0) {
345 			sleepq_release(wchan);
346 			ret = 0;
347 			break;
348 		}
349 		set_task_state(task, state);
350 		ret = linux_add_to_sleepqueue(wchan, task, "wbit", timeout, state);
351 		if (ret != 0)
352 			break;
353 	}
354 	set_task_state(task, TASK_RUNNING);
355 
356 	PICKUP_GIANT();
357 
358 	return (ret);
359 }
360 
361 void
362 linux_wake_up_atomic_t(atomic_t *a)
363 {
364 
365 	wake_up_sleepers(a);
366 }
367 
368 int
369 linux_wait_on_atomic_t(atomic_t *a, unsigned int state)
370 {
371 	struct task_struct *task;
372 	void *wchan;
373 	int ret;
374 
375 	DROP_GIANT();
376 
377 	task = current;
378 	wchan = a;
379 	for (;;) {
380 		sleepq_lock(wchan);
381 		if (atomic_read(a) == 0) {
382 			sleepq_release(wchan);
383 			ret = 0;
384 			break;
385 		}
386 		set_task_state(task, state);
387 		ret = linux_add_to_sleepqueue(wchan, task, "watomic", 0, state);
388 		if (ret != 0)
389 			break;
390 	}
391 	set_task_state(task, TASK_RUNNING);
392 
393 	PICKUP_GIANT();
394 
395 	return (ret);
396 }
397 
398 bool
399 linux_wake_up_state(struct task_struct *task, unsigned int state)
400 {
401 
402 	return (wake_up_task(task, state) != 0);
403 }
404