1 // SPDX-License-Identifier: GPL-2.0 2 3 //! Tasks (threads and processes). 4 //! 5 //! C header: [`include/linux/sched.h`](srctree/include/linux/sched.h). 6 7 use crate::{ 8 bindings, 9 ffi::{c_int, c_long, c_uint}, 10 mm::MmWithUser, 11 pid_namespace::PidNamespace, 12 sync::aref::ARef, 13 types::{NotThreadSafe, Opaque}, 14 }; 15 use core::{ 16 cmp::{Eq, PartialEq}, 17 ops::Deref, 18 ptr, 19 }; 20 21 /// A sentinel value used for infinite timeouts. 22 pub const MAX_SCHEDULE_TIMEOUT: c_long = c_long::MAX; 23 24 /// Bitmask for tasks that are sleeping in an interruptible state. 25 pub const TASK_INTERRUPTIBLE: c_int = bindings::TASK_INTERRUPTIBLE as c_int; 26 /// Bitmask for tasks that are sleeping in an uninterruptible state. 27 pub const TASK_UNINTERRUPTIBLE: c_int = bindings::TASK_UNINTERRUPTIBLE as c_int; 28 /// Bitmask for tasks that are sleeping in a freezable state. 29 pub const TASK_FREEZABLE: c_int = bindings::TASK_FREEZABLE as c_int; 30 /// Convenience constant for waking up tasks regardless of whether they are in interruptible or 31 /// uninterruptible sleep. 32 pub const TASK_NORMAL: c_uint = bindings::TASK_NORMAL as c_uint; 33 34 /// Returns the currently running task. 35 #[macro_export] 36 macro_rules! current { 37 () => { 38 // SAFETY: This expression creates a temporary value that is dropped at the end of the 39 // caller's scope. The following mechanisms ensure that the resulting `&CurrentTask` cannot 40 // leave current task context: 41 // 42 // * To return to userspace, the caller must leave the current scope. 43 // * Operations such as `begin_new_exec()` are necessarily unsafe and the caller of 44 // `begin_new_exec()` is responsible for safety. 45 // * Rust abstractions for things such as a `kthread_use_mm()` scope must require the 46 // closure to be `Send`, so the `NotThreadSafe` field of `CurrentTask` ensures that the 47 // `&CurrentTask` cannot cross the scope in either direction. 48 unsafe { &*$crate::task::Task::current() } 49 }; 50 } 51 52 /// Wraps the kernel's `struct task_struct`. 53 /// 54 /// # Invariants 55 /// 56 /// All instances are valid tasks created by the C portion of the kernel. 57 /// 58 /// Instances of this type are always refcounted, that is, a call to `get_task_struct` ensures 59 /// that the allocation remains valid at least until the matching call to `put_task_struct`. 60 /// 61 /// # Examples 62 /// 63 /// The following is an example of getting the PID of the current thread with zero additional cost 64 /// when compared to the C version: 65 /// 66 /// ``` 67 /// let pid = current!().pid(); 68 /// ``` 69 /// 70 /// Getting the PID of the current process, also zero additional cost: 71 /// 72 /// ``` 73 /// let pid = current!().group_leader().pid(); 74 /// ``` 75 /// 76 /// Getting the current task and storing it in some struct. The reference count is automatically 77 /// incremented when creating `State` and decremented when it is dropped: 78 /// 79 /// ``` 80 /// use kernel::{task::Task, sync::aref::ARef}; 81 /// 82 /// struct State { 83 /// creator: ARef<Task>, 84 /// index: u32, 85 /// } 86 /// 87 /// impl State { 88 /// fn new() -> Self { 89 /// Self { 90 /// creator: ARef::from(&**current!()), 91 /// index: 0, 92 /// } 93 /// } 94 /// } 95 /// ``` 96 #[repr(transparent)] 97 pub struct Task(pub(crate) Opaque<bindings::task_struct>); 98 99 // SAFETY: By design, the only way to access a `Task` is via the `current` function or via an 100 // `ARef<Task>` obtained through the `AlwaysRefCounted` impl. This means that the only situation in 101 // which a `Task` can be accessed mutably is when the refcount drops to zero and the destructor 102 // runs. It is safe for that to happen on any thread, so it is ok for this type to be `Send`. 103 unsafe impl Send for Task {} 104 105 // SAFETY: It's OK to access `Task` through shared references from other threads because we're 106 // either accessing properties that don't change (e.g., `pid`, `group_leader`) or that are properly 107 // synchronised by C code (e.g., `signal_pending`). 108 unsafe impl Sync for Task {} 109 110 /// Represents the [`Task`] in the `current` global. 111 /// 112 /// This type exists to provide more efficient operations that are only valid on the current task. 113 /// For example, to retrieve the pid-namespace of a task, you must use rcu protection unless it is 114 /// the current task. 115 /// 116 /// # Invariants 117 /// 118 /// Each value of this type must only be accessed from the task context it was created within. 119 /// 120 /// Of course, every thread is in a different task context, but for the purposes of this invariant, 121 /// these operations also permanently leave the task context: 122 /// 123 /// * Returning to userspace from system call context. 124 /// * Calling `release_task()`. 125 /// * Calling `begin_new_exec()` in a binary format loader. 126 /// 127 /// Other operations temporarily create a new sub-context: 128 /// 129 /// * Calling `kthread_use_mm()` creates a new context, and `kthread_unuse_mm()` returns to the 130 /// old context. 131 /// 132 /// This means that a `CurrentTask` obtained before a `kthread_use_mm()` call may be used again 133 /// once `kthread_unuse_mm()` is called, but it must not be used between these two calls. 134 /// Conversely, a `CurrentTask` obtained between a `kthread_use_mm()`/`kthread_unuse_mm()` pair 135 /// must not be used after `kthread_unuse_mm()`. 136 #[repr(transparent)] 137 pub struct CurrentTask(Task, NotThreadSafe); 138 139 // Make all `Task` methods available on `CurrentTask`. 140 impl Deref for CurrentTask { 141 type Target = Task; 142 #[inline] 143 fn deref(&self) -> &Task { 144 &self.0 145 } 146 } 147 148 /// The type of process identifiers (PIDs). 149 pub type Pid = bindings::pid_t; 150 151 /// The type of user identifiers (UIDs). 152 #[derive(Copy, Clone)] 153 pub struct Kuid { 154 kuid: bindings::kuid_t, 155 } 156 157 impl Task { 158 /// Returns a raw pointer to the current task. 159 /// 160 /// It is up to the user to use the pointer correctly. 161 #[inline] 162 pub fn current_raw() -> *mut bindings::task_struct { 163 // SAFETY: Getting the current pointer is always safe. 164 unsafe { bindings::get_current() } 165 } 166 167 /// Returns a task reference for the currently executing task/thread. 168 /// 169 /// The recommended way to get the current task/thread is to use the 170 /// [`current`] macro because it is safe. 171 /// 172 /// # Safety 173 /// 174 /// Callers must ensure that the returned object is only used to access a [`CurrentTask`] 175 /// within the task context that was active when this function was called. For more details, 176 /// see the invariants section for [`CurrentTask`]. 177 #[inline] 178 pub unsafe fn current() -> impl Deref<Target = CurrentTask> { 179 struct TaskRef { 180 task: *const CurrentTask, 181 } 182 183 impl Deref for TaskRef { 184 type Target = CurrentTask; 185 186 fn deref(&self) -> &Self::Target { 187 // SAFETY: The returned reference borrows from this `TaskRef`, so it cannot outlive 188 // the `TaskRef`, which the caller of `Task::current()` has promised will not 189 // outlive the task/thread for which `self.task` is the `current` pointer. Thus, it 190 // is okay to return a `CurrentTask` reference here. 191 unsafe { &*self.task } 192 } 193 } 194 195 TaskRef { 196 // CAST: The layout of `struct task_struct` and `CurrentTask` is identical. 197 task: Task::current_raw().cast(), 198 } 199 } 200 201 /// Returns a raw pointer to the task. 202 #[inline] 203 pub fn as_ptr(&self) -> *mut bindings::task_struct { 204 self.0.get() 205 } 206 207 /// Returns the group leader of the given task. 208 pub fn group_leader(&self) -> &Task { 209 // SAFETY: The group leader of a task never changes after initialization, so reading this 210 // field is not a data race. 211 let ptr = unsafe { *ptr::addr_of!((*self.as_ptr()).group_leader) }; 212 213 // SAFETY: The lifetime of the returned task reference is tied to the lifetime of `self`, 214 // and given that a task has a reference to its group leader, we know it must be valid for 215 // the lifetime of the returned task reference. 216 unsafe { &*ptr.cast() } 217 } 218 219 /// Returns the PID of the given task. 220 pub fn pid(&self) -> Pid { 221 // SAFETY: The pid of a task never changes after initialization, so reading this field is 222 // not a data race. 223 unsafe { *ptr::addr_of!((*self.as_ptr()).pid) } 224 } 225 226 /// Returns the UID of the given task. 227 #[inline] 228 pub fn uid(&self) -> Kuid { 229 // SAFETY: It's always safe to call `task_uid` on a valid task. 230 Kuid::from_raw(unsafe { bindings::task_uid(self.as_ptr()) }) 231 } 232 233 /// Returns the effective UID of the given task. 234 #[inline] 235 pub fn euid(&self) -> Kuid { 236 // SAFETY: It's always safe to call `task_euid` on a valid task. 237 Kuid::from_raw(unsafe { bindings::task_euid(self.as_ptr()) }) 238 } 239 240 /// Determines whether the given task has pending signals. 241 #[inline] 242 pub fn signal_pending(&self) -> bool { 243 // SAFETY: It's always safe to call `signal_pending` on a valid task. 244 unsafe { bindings::signal_pending(self.as_ptr()) != 0 } 245 } 246 247 /// Returns task's pid namespace with elevated reference count 248 #[inline] 249 pub fn get_pid_ns(&self) -> Option<ARef<PidNamespace>> { 250 // SAFETY: By the type invariant, we know that `self.0` is valid. 251 let ptr = unsafe { bindings::task_get_pid_ns(self.as_ptr()) }; 252 if ptr.is_null() { 253 None 254 } else { 255 // SAFETY: `ptr` is valid by the safety requirements of this function. And we own a 256 // reference count via `task_get_pid_ns()`. 257 // CAST: `Self` is a `repr(transparent)` wrapper around `bindings::pid_namespace`. 258 Some(unsafe { ARef::from_raw(ptr::NonNull::new_unchecked(ptr.cast::<PidNamespace>())) }) 259 } 260 } 261 262 /// Returns the given task's pid in the provided pid namespace. 263 #[doc(alias = "task_tgid_nr_ns")] 264 #[inline] 265 pub fn tgid_nr_ns(&self, pidns: Option<&PidNamespace>) -> Pid { 266 let pidns = match pidns { 267 Some(pidns) => pidns.as_ptr(), 268 None => core::ptr::null_mut(), 269 }; 270 // SAFETY: By the type invariant, we know that `self.0` is valid. We received a valid 271 // PidNamespace that we can use as a pointer or we received an empty PidNamespace and 272 // thus pass a null pointer. The underlying C function is safe to be used with NULL 273 // pointers. 274 unsafe { bindings::task_tgid_nr_ns(self.as_ptr(), pidns) } 275 } 276 277 /// Wakes up the task. 278 #[inline] 279 pub fn wake_up(&self) { 280 // SAFETY: It's always safe to call `wake_up_process` on a valid task, even if the task 281 // running. 282 unsafe { bindings::wake_up_process(self.as_ptr()) }; 283 } 284 } 285 286 impl CurrentTask { 287 /// Access the address space of the current task. 288 /// 289 /// This function does not touch the refcount of the mm. 290 #[inline] 291 pub fn mm(&self) -> Option<&MmWithUser> { 292 // SAFETY: The `mm` field of `current` is not modified from other threads, so reading it is 293 // not a data race. 294 let mm = unsafe { (*self.as_ptr()).mm }; 295 296 if mm.is_null() { 297 return None; 298 } 299 300 // SAFETY: If `current->mm` is non-null, then it references a valid mm with a non-zero 301 // value of `mm_users`. Furthermore, the returned `&MmWithUser` borrows from this 302 // `CurrentTask`, so it cannot escape the scope in which the current pointer was obtained. 303 // 304 // This is safe even if `kthread_use_mm()`/`kthread_unuse_mm()` are used. There are two 305 // relevant cases: 306 // * If the `&CurrentTask` was created before `kthread_use_mm()`, then it cannot be 307 // accessed during the `kthread_use_mm()`/`kthread_unuse_mm()` scope due to the 308 // `NotThreadSafe` field of `CurrentTask`. 309 // * If the `&CurrentTask` was created within a `kthread_use_mm()`/`kthread_unuse_mm()` 310 // scope, then the `&CurrentTask` cannot escape that scope, so the returned `&MmWithUser` 311 // also cannot escape that scope. 312 // In either case, it's not possible to read `current->mm` and keep using it after the 313 // scope is ended with `kthread_unuse_mm()`. 314 Some(unsafe { MmWithUser::from_raw(mm) }) 315 } 316 317 /// Access the pid namespace of the current task. 318 /// 319 /// This function does not touch the refcount of the namespace or use RCU protection. 320 /// 321 /// To access the pid namespace of another task, see [`Task::get_pid_ns`]. 322 #[doc(alias = "task_active_pid_ns")] 323 #[inline] 324 pub fn active_pid_ns(&self) -> Option<&PidNamespace> { 325 // SAFETY: It is safe to call `task_active_pid_ns` without RCU protection when calling it 326 // on the current task. 327 let active_ns = unsafe { bindings::task_active_pid_ns(self.as_ptr()) }; 328 329 if active_ns.is_null() { 330 return None; 331 } 332 333 // The lifetime of `PidNamespace` is bound to `Task` and `struct pid`. 334 // 335 // The `PidNamespace` of a `Task` doesn't ever change once the `Task` is alive. 336 // 337 // From system call context retrieving the `PidNamespace` for the current task is always 338 // safe and requires neither RCU locking nor a reference count to be held. Retrieving the 339 // `PidNamespace` after `release_task()` for current will return `NULL` but no codepath 340 // like that is exposed to Rust. 341 // 342 // SAFETY: If `current`'s pid ns is non-null, then it references a valid pid ns. 343 // Furthermore, the returned `&PidNamespace` borrows from this `CurrentTask`, so it cannot 344 // escape the scope in which the current pointer was obtained, e.g. it cannot live past a 345 // `release_task()` call. 346 Some(unsafe { PidNamespace::from_ptr(active_ns) }) 347 } 348 } 349 350 // SAFETY: The type invariants guarantee that `Task` is always refcounted. 351 unsafe impl crate::sync::aref::AlwaysRefCounted for Task { 352 #[inline] 353 fn inc_ref(&self) { 354 // SAFETY: The existence of a shared reference means that the refcount is nonzero. 355 unsafe { bindings::get_task_struct(self.as_ptr()) }; 356 } 357 358 #[inline] 359 unsafe fn dec_ref(obj: ptr::NonNull<Self>) { 360 // SAFETY: The safety requirements guarantee that the refcount is nonzero. 361 unsafe { bindings::put_task_struct(obj.cast().as_ptr()) } 362 } 363 } 364 365 impl Kuid { 366 /// Get the current euid. 367 #[inline] 368 pub fn current_euid() -> Kuid { 369 // SAFETY: Just an FFI call. 370 Self::from_raw(unsafe { bindings::current_euid() }) 371 } 372 373 /// Create a `Kuid` given the raw C type. 374 #[inline] 375 pub fn from_raw(kuid: bindings::kuid_t) -> Self { 376 Self { kuid } 377 } 378 379 /// Turn this kuid into the raw C type. 380 #[inline] 381 pub fn into_raw(self) -> bindings::kuid_t { 382 self.kuid 383 } 384 385 /// Converts this kernel UID into a userspace UID. 386 /// 387 /// Uses the namespace of the current task. 388 #[inline] 389 pub fn into_uid_in_current_ns(self) -> bindings::uid_t { 390 // SAFETY: Just an FFI call. 391 unsafe { bindings::from_kuid(bindings::current_user_ns(), self.kuid) } 392 } 393 } 394 395 impl PartialEq for Kuid { 396 #[inline] 397 fn eq(&self, other: &Kuid) -> bool { 398 // SAFETY: Just an FFI call. 399 unsafe { bindings::uid_eq(self.kuid, other.kuid) } 400 } 401 } 402 403 impl Eq for Kuid {} 404 405 /// Annotation for functions that can sleep. 406 /// 407 /// Equivalent to the C side [`might_sleep()`], this function serves as 408 /// a debugging aid and a potential scheduling point. 409 /// 410 /// This function can only be used in a nonatomic context. 411 /// 412 /// [`might_sleep()`]: https://docs.kernel.org/driver-api/basics.html#c.might_sleep 413 #[track_caller] 414 #[inline] 415 pub fn might_sleep() { 416 #[cfg(CONFIG_DEBUG_ATOMIC_SLEEP)] 417 { 418 let loc = core::panic::Location::caller(); 419 let file = kernel::file_from_location(loc); 420 421 // SAFETY: `file.as_ptr()` is valid for reading and guaranteed to be nul-terminated. 422 unsafe { crate::bindings::__might_sleep(file.as_ptr().cast(), loc.line() as i32) } 423 } 424 425 // SAFETY: Always safe to call. 426 unsafe { crate::bindings::might_resched() } 427 } 428