1 /*-
2 * Copyright (c) 2010 Isilon Systems, Inc.
3 * Copyright (c) 2010 iX Systems, Inc.
4 * Copyright (c) 2010 Panasas, Inc.
5 * Copyright (c) 2013-2018 Mellanox Technologies, Ltd.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice unmodified, this list of conditions, and the following
13 * disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29 #ifndef _LINUXKPI_LINUX_SCHED_H_
30 #define _LINUXKPI_LINUX_SCHED_H_
31
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/proc.h>
35 #include <sys/rtprio.h>
36 #include <sys/sched.h>
37 #include <sys/sleepqueue.h>
38 #include <sys/time.h>
39
40 #include <linux/bitmap.h>
41 #include <linux/compat.h>
42 #include <linux/completion.h>
43 #include <linux/hrtimer.h>
44 #include <linux/mm_types.h>
45 #include <linux/nodemask.h>
46 #include <linux/pid.h>
47 #include <linux/slab.h>
48 #include <linux/string.h>
49 #include <linux/spinlock.h>
50 #include <linux/time.h>
51
52 #include <linux/sched/mm.h>
53
54 #include <asm/atomic.h>
55
56 #define MAX_SCHEDULE_TIMEOUT LONG_MAX
57
58 #define TASK_RUNNING 0x0000
59 #define TASK_INTERRUPTIBLE 0x0001
60 #define TASK_UNINTERRUPTIBLE 0x0002
61 #define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
62 #define TASK_WAKING 0x0100
63 #define TASK_PARKED 0x0200
64
65 #define TASK_COMM_LEN (MAXCOMLEN + 1)
66
67 struct seq_file;
68
69 struct work_struct;
70 struct task_struct {
71 struct thread *task_thread;
72 struct mm_struct *mm;
73 linux_task_fn_t *task_fn;
74 void *task_data;
75 int task_ret;
76 atomic_t usage;
77 atomic_t state;
78 atomic_t kthread_flags;
79 pid_t pid; /* BSD thread ID */
80 const char *comm;
81 void *bsd_ioctl_data;
82 unsigned bsd_ioctl_len;
83 struct completion parked;
84 struct completion exited;
85 #define TS_RCU_TYPE_MAX 2
86 TAILQ_ENTRY(task_struct) rcu_entry[TS_RCU_TYPE_MAX];
87 int rcu_recurse[TS_RCU_TYPE_MAX];
88 int bsd_interrupt_value;
89 struct work_struct *work; /* current work struct, if set */
90 struct task_struct *group_leader;
91 unsigned rcu_section[TS_RCU_TYPE_MAX];
92 unsigned int fpu_ctx_level;
93 };
94
95 #define current ({ \
96 struct thread *__td = curthread; \
97 linux_set_current(__td); \
98 ((struct task_struct *)__td->td_lkpi_task); \
99 })
100
101 #define task_pid_group_leader(task) (task)->task_thread->td_proc->p_pid
102 #define task_pid(task) ((task)->pid)
103 #define task_pid_nr(task) ((task)->pid)
104 #define task_pid_vnr(task) ((task)->pid)
105 #define get_pid(x) (x)
106 #define put_pid(x) do { } while (0)
107 #define current_euid() (curthread->td_ucred->cr_uid)
108 #define task_euid(task) ((task)->task_thread->td_ucred->cr_uid)
109
110 #define get_task_state(task) atomic_read(&(task)->state)
111 #define set_task_state(task, x) atomic_set(&(task)->state, (x))
112 #define __set_task_state(task, x) ((task)->state.counter = (x))
113 #define set_current_state(x) set_task_state(current, x)
114 #define __set_current_state(x) __set_task_state(current, x)
115
116 static inline void
get_task_struct(struct task_struct * task)117 get_task_struct(struct task_struct *task)
118 {
119 atomic_inc(&task->usage);
120 }
121
122 static inline void
put_task_struct(struct task_struct * task)123 put_task_struct(struct task_struct *task)
124 {
125 if (atomic_dec_and_test(&task->usage))
126 linux_free_current(task);
127 }
128
129 #define cond_resched() do { if (!cold) sched_relinquish(curthread); } while (0)
130
131 #define yield() kern_yield(PRI_UNCHANGED)
132 #define sched_yield() sched_relinquish(curthread)
133
134 #define need_resched() (curthread->td_owepreempt || \
135 td_ast_pending(curthread, TDA_SCHED))
136
137 static inline int
cond_resched_lock(spinlock_t * lock)138 cond_resched_lock(spinlock_t *lock)
139 {
140
141 if (need_resched() == 0)
142 return (0);
143 spin_unlock(lock);
144 cond_resched();
145 spin_lock(lock);
146 return (1);
147 }
148
149 bool linux_signal_pending(struct task_struct *task);
150 bool linux_fatal_signal_pending(struct task_struct *task);
151 bool linux_signal_pending_state(long state, struct task_struct *task);
152 void linux_send_sig(int signo, struct task_struct *task);
153
154 #define signal_pending(task) linux_signal_pending(task)
155 #define fatal_signal_pending(task) linux_fatal_signal_pending(task)
156 #define signal_pending_state(state, task) \
157 linux_signal_pending_state(state, task)
158 #define send_sig(signo, task, priv) do { \
159 CTASSERT((priv) == 0); \
160 linux_send_sig(signo, task); \
161 } while (0)
162
163 long linux_schedule_timeout(long timeout);
164
165 static inline void
linux_schedule_save_interrupt_value(struct task_struct * task,int value)166 linux_schedule_save_interrupt_value(struct task_struct *task, int value)
167 {
168 task->bsd_interrupt_value = value;
169 }
170
171 bool linux_task_exiting(struct task_struct *task);
172
173 #define current_exiting() \
174 linux_task_exiting(current)
175
176 static inline int
linux_schedule_get_interrupt_value(struct task_struct * task)177 linux_schedule_get_interrupt_value(struct task_struct *task)
178 {
179 int value = task->bsd_interrupt_value;
180 task->bsd_interrupt_value = 0;
181 return (value);
182 }
183
184 static inline void
schedule(void)185 schedule(void)
186 {
187 (void)linux_schedule_timeout(MAX_SCHEDULE_TIMEOUT);
188 }
189
190 #define schedule_timeout(timeout) \
191 linux_schedule_timeout(timeout)
192 #define schedule_timeout_killable(timeout) \
193 schedule_timeout_interruptible(timeout)
194 #define schedule_timeout_interruptible(timeout) ({ \
195 set_current_state(TASK_INTERRUPTIBLE); \
196 schedule_timeout(timeout); \
197 })
198 #define schedule_timeout_uninterruptible(timeout) ({ \
199 set_current_state(TASK_UNINTERRUPTIBLE); \
200 schedule_timeout(timeout); \
201 })
202
203 #define io_schedule() schedule()
204 #define io_schedule_timeout(timeout) schedule_timeout(timeout)
205
206 static inline uint64_t
local_clock(void)207 local_clock(void)
208 {
209 struct timespec ts;
210
211 nanotime(&ts);
212 return ((uint64_t)ts.tv_sec * NSEC_PER_SEC + ts.tv_nsec);
213 }
214
215 static inline const char *
get_task_comm(char * buf,struct task_struct * task)216 get_task_comm(char *buf, struct task_struct *task)
217 {
218
219 buf[0] = 0; /* buffer is too small */
220 return (task->comm);
221 }
222
223 static inline void
sched_set_fifo(struct task_struct * t)224 sched_set_fifo(struct task_struct *t)
225 {
226 struct rtprio rtp;
227
228 rtp.prio = (RTP_PRIO_MIN + RTP_PRIO_MAX) / 2;
229 rtp.type = RTP_PRIO_FIFO;
230 rtp_to_pri(&rtp, t->task_thread);
231 }
232
233 static inline void
sched_set_fifo_low(struct task_struct * t)234 sched_set_fifo_low(struct task_struct *t)
235 {
236 struct rtprio rtp;
237
238 rtp.prio = RTP_PRIO_MAX; /* lowest priority */
239 rtp.type = RTP_PRIO_FIFO;
240 rtp_to_pri(&rtp, t->task_thread);
241 }
242
243 #endif /* _LINUXKPI_LINUX_SCHED_H_ */
244