xref: /freebsd/sys/compat/linuxkpi/common/src/linux_schedule.c (revision 01518f5eede79cf65319d455eb50e78c9efa2b51)
1 /*-
2  * Copyright (c) 2017 Mark Johnston <markj@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conds
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conds, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conds and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include <sys/param.h>
28 #include <sys/systm.h>
29 #include <sys/proc.h>
30 #include <sys/signalvar.h>
31 #include <sys/sleepqueue.h>
32 
33 #include <linux/delay.h>
34 #include <linux/errno.h>
35 #include <linux/kernel.h>
36 #include <linux/list.h>
37 #include <linux/sched.h>
38 #include <linux/spinlock.h>
39 #include <linux/wait.h>
40 
41 static int
42 linux_add_to_sleepqueue(void *wchan, struct task_struct *task,
43     const char *wmesg, int timeout, int state)
44 {
45 	int flags, ret;
46 
47 	MPASS((state & ~(TASK_PARKED | TASK_NORMAL)) == 0);
48 
49 	flags = SLEEPQ_SLEEP | ((state & TASK_INTERRUPTIBLE) != 0 ?
50 	    SLEEPQ_INTERRUPTIBLE : 0);
51 
52 	sleepq_add(wchan, NULL, wmesg, flags, 0);
53 	if (timeout != 0)
54 		sleepq_set_timeout(wchan, timeout);
55 
56 	DROP_GIANT();
57 	if ((state & TASK_INTERRUPTIBLE) != 0) {
58 		if (timeout == 0)
59 			ret = -sleepq_wait_sig(wchan, 0);
60 		else
61 			ret = -sleepq_timedwait_sig(wchan, 0);
62 	} else {
63 		if (timeout == 0) {
64 			sleepq_wait(wchan, 0);
65 			ret = 0;
66 		} else
67 			ret = -sleepq_timedwait(wchan, 0);
68 	}
69 	PICKUP_GIANT();
70 
71 	/* filter return value */
72 	if (ret != 0 && ret != -EWOULDBLOCK) {
73 		linux_schedule_save_interrupt_value(task, ret);
74 		ret = -ERESTARTSYS;
75 	}
76 	return (ret);
77 }
78 
79 unsigned int
80 linux_msleep_interruptible(unsigned int ms)
81 {
82 	int ret;
83 
84 	/* guard against invalid values */
85 	if (ms == 0)
86 		ms = 1;
87 	ret = -pause_sbt("lnxsleep", mstosbt(ms), 0, C_HARDCLOCK | C_CATCH);
88 
89 	switch (ret) {
90 	case -EWOULDBLOCK:
91 		return (0);
92 	default:
93 		linux_schedule_save_interrupt_value(current, ret);
94 		return (ms);
95 	}
96 }
97 
98 static int
99 wake_up_task(struct task_struct *task, unsigned int state)
100 {
101 	int ret;
102 
103 	ret = 0;
104 	sleepq_lock(task);
105 	if ((atomic_read(&task->state) & state) != 0) {
106 		set_task_state(task, TASK_WAKING);
107 		sleepq_signal(task, SLEEPQ_SLEEP, 0, 0);
108 		ret = 1;
109 	}
110 	sleepq_release(task);
111 	return (ret);
112 }
113 
114 bool
115 linux_signal_pending(struct task_struct *task)
116 {
117 	struct thread *td;
118 	sigset_t pending;
119 
120 	td = task->task_thread;
121 	PROC_LOCK(td->td_proc);
122 	pending = td->td_siglist;
123 	SIGSETOR(pending, td->td_proc->p_siglist);
124 	SIGSETNAND(pending, td->td_sigmask);
125 	PROC_UNLOCK(td->td_proc);
126 	return (!SIGISEMPTY(pending));
127 }
128 
129 bool
130 linux_fatal_signal_pending(struct task_struct *task)
131 {
132 	struct thread *td;
133 	bool ret;
134 
135 	td = task->task_thread;
136 	PROC_LOCK(td->td_proc);
137 	ret = SIGISMEMBER(td->td_siglist, SIGKILL) ||
138 	    SIGISMEMBER(td->td_proc->p_siglist, SIGKILL);
139 	PROC_UNLOCK(td->td_proc);
140 	return (ret);
141 }
142 
143 bool
144 linux_signal_pending_state(long state, struct task_struct *task)
145 {
146 
147 	MPASS((state & ~TASK_NORMAL) == 0);
148 
149 	if ((state & TASK_INTERRUPTIBLE) == 0)
150 		return (false);
151 	return (linux_signal_pending(task));
152 }
153 
154 void
155 linux_send_sig(int signo, struct task_struct *task)
156 {
157 	struct thread *td;
158 
159 	td = task->task_thread;
160 	PROC_LOCK(td->td_proc);
161 	tdsignal(td, signo);
162 	PROC_UNLOCK(td->td_proc);
163 }
164 
165 int
166 autoremove_wake_function(wait_queue_t *wq, unsigned int state, int flags,
167     void *key __unused)
168 {
169 	struct task_struct *task;
170 	int ret;
171 
172 	task = wq->private;
173 	if ((ret = wake_up_task(task, state)) != 0)
174 		list_del_init(&wq->task_list);
175 	return (ret);
176 }
177 
178 int
179 default_wake_function(wait_queue_t *wq, unsigned int state, int flags,
180     void *key __unused)
181 {
182 	return (wake_up_task(wq->private, state));
183 }
184 
185 void
186 linux_init_wait_entry(wait_queue_t *wq, int flags)
187 {
188 
189 	memset(wq, 0, sizeof(*wq));
190 	wq->flags = flags;
191 	wq->private = current;
192 	wq->func = autoremove_wake_function;
193 	INIT_LIST_HEAD(&wq->task_list);
194 }
195 
196 void
197 linux_wake_up(wait_queue_head_t *wqh, unsigned int state, int nr, bool locked)
198 {
199 	wait_queue_t *pos, *next;
200 
201 	if (!locked)
202 		spin_lock(&wqh->lock);
203 	list_for_each_entry_safe(pos, next, &wqh->task_list, task_list) {
204 		if (pos->func == NULL) {
205 			if (wake_up_task(pos->private, state) != 0 && --nr == 0)
206 				break;
207 		} else {
208 			if (pos->func(pos, state, 0, NULL) != 0 && --nr == 0)
209 				break;
210 		}
211 	}
212 	if (!locked)
213 		spin_unlock(&wqh->lock);
214 }
215 
216 void
217 linux_prepare_to_wait(wait_queue_head_t *wqh, wait_queue_t *wq, int state)
218 {
219 
220 	spin_lock(&wqh->lock);
221 	if (list_empty(&wq->task_list))
222 		__add_wait_queue(wqh, wq);
223 	set_task_state(current, state);
224 	spin_unlock(&wqh->lock);
225 }
226 
227 void
228 linux_finish_wait(wait_queue_head_t *wqh, wait_queue_t *wq)
229 {
230 
231 	spin_lock(&wqh->lock);
232 	set_task_state(current, TASK_RUNNING);
233 	if (!list_empty(&wq->task_list)) {
234 		__remove_wait_queue(wqh, wq);
235 		INIT_LIST_HEAD(&wq->task_list);
236 	}
237 	spin_unlock(&wqh->lock);
238 }
239 
240 bool
241 linux_waitqueue_active(wait_queue_head_t *wqh)
242 {
243 	bool ret;
244 
245 	spin_lock(&wqh->lock);
246 	ret = !list_empty(&wqh->task_list);
247 	spin_unlock(&wqh->lock);
248 	return (ret);
249 }
250 
251 int
252 linux_wait_event_common(wait_queue_head_t *wqh, wait_queue_t *wq, int timeout,
253     unsigned int state, spinlock_t *lock)
254 {
255 	struct task_struct *task;
256 	int ret;
257 
258 	if (lock != NULL)
259 		spin_unlock_irq(lock);
260 
261 	/* range check timeout */
262 	if (timeout < 1)
263 		timeout = 1;
264 	else if (timeout == MAX_SCHEDULE_TIMEOUT)
265 		timeout = 0;
266 
267 	task = current;
268 
269 	sleepq_lock(task);
270 	if (atomic_read(&task->state) != TASK_WAKING) {
271 		ret = linux_add_to_sleepqueue(task, task, "wevent", timeout,
272 		    state);
273 	} else {
274 		sleepq_release(task);
275 		ret = 0;
276 	}
277 
278 	if (lock != NULL)
279 		spin_lock_irq(lock);
280 	return (ret);
281 }
282 
283 int
284 linux_schedule_timeout(int timeout)
285 {
286 	struct task_struct *task;
287 	int ret;
288 	int state;
289 	int remainder;
290 
291 	task = current;
292 
293 	/* range check timeout */
294 	if (timeout < 1)
295 		timeout = 1;
296 	else if (timeout == MAX_SCHEDULE_TIMEOUT)
297 		timeout = 0;
298 
299 	remainder = ticks + timeout;
300 
301 	sleepq_lock(task);
302 	state = atomic_read(&task->state);
303 	if (state != TASK_WAKING) {
304 		ret = linux_add_to_sleepqueue(task, task, "sched", timeout,
305 		    state);
306 	} else {
307 		sleepq_release(task);
308 		ret = 0;
309 	}
310 	set_task_state(task, TASK_RUNNING);
311 
312 	if (timeout == 0)
313 		return (MAX_SCHEDULE_TIMEOUT);
314 
315 	/* range check return value */
316 	remainder -= ticks;
317 
318 	/* range check return value */
319 	if (ret == -ERESTARTSYS && remainder < 1)
320 		remainder = 1;
321 	else if (remainder < 0)
322 		remainder = 0;
323 	else if (remainder > timeout)
324 		remainder = timeout;
325 	return (remainder);
326 }
327 
328 static void
329 wake_up_sleepers(void *wchan)
330 {
331 	sleepq_lock(wchan);
332 	sleepq_signal(wchan, SLEEPQ_SLEEP, 0, 0);
333 	sleepq_release(wchan);
334 }
335 
336 #define	bit_to_wchan(word, bit)	((void *)(((uintptr_t)(word) << 6) | (bit)))
337 
338 void
339 linux_wake_up_bit(void *word, int bit)
340 {
341 
342 	wake_up_sleepers(bit_to_wchan(word, bit));
343 }
344 
345 int
346 linux_wait_on_bit_timeout(unsigned long *word, int bit, unsigned int state,
347     int timeout)
348 {
349 	struct task_struct *task;
350 	void *wchan;
351 	int ret;
352 
353 	/* range check timeout */
354 	if (timeout < 1)
355 		timeout = 1;
356 	else if (timeout == MAX_SCHEDULE_TIMEOUT)
357 		timeout = 0;
358 
359 	task = current;
360 	wchan = bit_to_wchan(word, bit);
361 	for (;;) {
362 		sleepq_lock(wchan);
363 		if ((*word & (1 << bit)) == 0) {
364 			sleepq_release(wchan);
365 			ret = 0;
366 			break;
367 		}
368 		set_task_state(task, state);
369 		ret = linux_add_to_sleepqueue(wchan, task, "wbit", timeout,
370 		    state);
371 		if (ret != 0)
372 			break;
373 	}
374 	set_task_state(task, TASK_RUNNING);
375 
376 	return (ret);
377 }
378 
379 void
380 linux_wake_up_atomic_t(atomic_t *a)
381 {
382 
383 	wake_up_sleepers(a);
384 }
385 
386 int
387 linux_wait_on_atomic_t(atomic_t *a, unsigned int state)
388 {
389 	struct task_struct *task;
390 	void *wchan;
391 	int ret;
392 
393 	task = current;
394 	wchan = a;
395 	for (;;) {
396 		sleepq_lock(wchan);
397 		if (atomic_read(a) == 0) {
398 			sleepq_release(wchan);
399 			ret = 0;
400 			break;
401 		}
402 		set_task_state(task, state);
403 		ret = linux_add_to_sleepqueue(wchan, task, "watomic", 0, state);
404 		if (ret != 0)
405 			break;
406 	}
407 	set_task_state(task, TASK_RUNNING);
408 
409 	return (ret);
410 }
411 
412 bool
413 linux_wake_up_state(struct task_struct *task, unsigned int state)
414 {
415 
416 	return (wake_up_task(task, state) != 0);
417 }
418