1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_SCHED_H 3 #define _LINUX_SCHED_H 4 5 /* 6 * Define 'struct task_struct' and provide the main scheduler 7 * APIs (schedule(), wakeup variants, etc.) 8 */ 9 10 #include <uapi/linux/sched.h> 11 12 #include <asm/current.h> 13 14 #include <linux/pid.h> 15 #include <linux/sem.h> 16 #include <linux/shm.h> 17 #include <linux/kmsan_types.h> 18 #include <linux/mutex.h> 19 #include <linux/plist.h> 20 #include <linux/hrtimer.h> 21 #include <linux/irqflags.h> 22 #include <linux/seccomp.h> 23 #include <linux/nodemask.h> 24 #include <linux/rcupdate.h> 25 #include <linux/refcount.h> 26 #include <linux/resource.h> 27 #include <linux/latencytop.h> 28 #include <linux/sched/prio.h> 29 #include <linux/sched/types.h> 30 #include <linux/signal_types.h> 31 #include <linux/syscall_user_dispatch.h> 32 #include <linux/mm_types_task.h> 33 #include <linux/task_io_accounting.h> 34 #include <linux/posix-timers.h> 35 #include <linux/rseq.h> 36 #include <linux/seqlock.h> 37 #include <linux/kcsan.h> 38 #include <linux/rv.h> 39 #include <linux/livepatch_sched.h> 40 #include <asm/kmap_size.h> 41 42 /* task_struct member predeclarations (sorted alphabetically): */ 43 struct audit_context; 44 struct bio_list; 45 struct blk_plug; 46 struct bpf_local_storage; 47 struct bpf_run_ctx; 48 struct capture_control; 49 struct cfs_rq; 50 struct fs_struct; 51 struct futex_pi_state; 52 struct io_context; 53 struct io_uring_task; 54 struct mempolicy; 55 struct nameidata; 56 struct nsproxy; 57 struct perf_event_context; 58 struct pid_namespace; 59 struct pipe_inode_info; 60 struct rcu_node; 61 struct reclaim_state; 62 struct robust_list_head; 63 struct root_domain; 64 struct rq; 65 struct sched_attr; 66 struct sched_param; 67 struct seq_file; 68 struct sighand_struct; 69 struct signal_struct; 70 struct task_delay_info; 71 struct task_group; 72 struct user_event_mm; 73 74 /* 75 * Task state bitmask. NOTE! These bits are also 76 * encoded in fs/proc/array.c: get_task_state(). 77 * 78 * We have two separate sets of flags: task->state 79 * is about runnability, while task->exit_state are 80 * about the task exiting. Confusing, but this way 81 * modifying one set can't modify the other one by 82 * mistake. 83 */ 84 85 /* Used in tsk->state: */ 86 #define TASK_RUNNING 0x00000000 87 #define TASK_INTERRUPTIBLE 0x00000001 88 #define TASK_UNINTERRUPTIBLE 0x00000002 89 #define __TASK_STOPPED 0x00000004 90 #define __TASK_TRACED 0x00000008 91 /* Used in tsk->exit_state: */ 92 #define EXIT_DEAD 0x00000010 93 #define EXIT_ZOMBIE 0x00000020 94 #define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD) 95 /* Used in tsk->state again: */ 96 #define TASK_PARKED 0x00000040 97 #define TASK_DEAD 0x00000080 98 #define TASK_WAKEKILL 0x00000100 99 #define TASK_WAKING 0x00000200 100 #define TASK_NOLOAD 0x00000400 101 #define TASK_NEW 0x00000800 102 #define TASK_RTLOCK_WAIT 0x00001000 103 #define TASK_FREEZABLE 0x00002000 104 #define __TASK_FREEZABLE_UNSAFE (0x00004000 * IS_ENABLED(CONFIG_LOCKDEP)) 105 #define TASK_FROZEN 0x00008000 106 #define TASK_STATE_MAX 0x00010000 107 108 #define TASK_ANY (TASK_STATE_MAX-1) 109 110 /* 111 * DO NOT ADD ANY NEW USERS ! 112 */ 113 #define TASK_FREEZABLE_UNSAFE (TASK_FREEZABLE | __TASK_FREEZABLE_UNSAFE) 114 115 /* Convenience macros for the sake of set_current_state: */ 116 #define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE) 117 #define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED) 118 #define TASK_TRACED __TASK_TRACED 119 120 #define TASK_IDLE (TASK_UNINTERRUPTIBLE | TASK_NOLOAD) 121 122 /* Convenience macros for the sake of wake_up(): */ 123 #define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE) 124 125 /* get_task_state(): */ 126 #define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \ 127 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \ 128 __TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE | \ 129 TASK_PARKED) 130 131 #define task_is_running(task) (READ_ONCE((task)->__state) == TASK_RUNNING) 132 133 #define task_is_traced(task) ((READ_ONCE(task->jobctl) & JOBCTL_TRACED) != 0) 134 #define task_is_stopped(task) ((READ_ONCE(task->jobctl) & JOBCTL_STOPPED) != 0) 135 #define task_is_stopped_or_traced(task) ((READ_ONCE(task->jobctl) & (JOBCTL_STOPPED | JOBCTL_TRACED)) != 0) 136 137 /* 138 * Special states are those that do not use the normal wait-loop pattern. See 139 * the comment with set_special_state(). 140 */ 141 #define is_special_task_state(state) \ 142 ((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_PARKED | TASK_DEAD)) 143 144 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP 145 # define debug_normal_state_change(state_value) \ 146 do { \ 147 WARN_ON_ONCE(is_special_task_state(state_value)); \ 148 current->task_state_change = _THIS_IP_; \ 149 } while (0) 150 151 # define debug_special_state_change(state_value) \ 152 do { \ 153 WARN_ON_ONCE(!is_special_task_state(state_value)); \ 154 current->task_state_change = _THIS_IP_; \ 155 } while (0) 156 157 # define debug_rtlock_wait_set_state() \ 158 do { \ 159 current->saved_state_change = current->task_state_change;\ 160 current->task_state_change = _THIS_IP_; \ 161 } while (0) 162 163 # define debug_rtlock_wait_restore_state() \ 164 do { \ 165 current->task_state_change = current->saved_state_change;\ 166 } while (0) 167 168 #else 169 # define debug_normal_state_change(cond) do { } while (0) 170 # define debug_special_state_change(cond) do { } while (0) 171 # define debug_rtlock_wait_set_state() do { } while (0) 172 # define debug_rtlock_wait_restore_state() do { } while (0) 173 #endif 174 175 /* 176 * set_current_state() includes a barrier so that the write of current->state 177 * is correctly serialised wrt the caller's subsequent test of whether to 178 * actually sleep: 179 * 180 * for (;;) { 181 * set_current_state(TASK_UNINTERRUPTIBLE); 182 * if (CONDITION) 183 * break; 184 * 185 * schedule(); 186 * } 187 * __set_current_state(TASK_RUNNING); 188 * 189 * If the caller does not need such serialisation (because, for instance, the 190 * CONDITION test and condition change and wakeup are under the same lock) then 191 * use __set_current_state(). 192 * 193 * The above is typically ordered against the wakeup, which does: 194 * 195 * CONDITION = 1; 196 * wake_up_state(p, TASK_UNINTERRUPTIBLE); 197 * 198 * where wake_up_state()/try_to_wake_up() executes a full memory barrier before 199 * accessing p->state. 200 * 201 * Wakeup will do: if (@state & p->state) p->state = TASK_RUNNING, that is, 202 * once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a 203 * TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING). 204 * 205 * However, with slightly different timing the wakeup TASK_RUNNING store can 206 * also collide with the TASK_UNINTERRUPTIBLE store. Losing that store is not 207 * a problem either because that will result in one extra go around the loop 208 * and our @cond test will save the day. 209 * 210 * Also see the comments of try_to_wake_up(). 211 */ 212 #define __set_current_state(state_value) \ 213 do { \ 214 debug_normal_state_change((state_value)); \ 215 WRITE_ONCE(current->__state, (state_value)); \ 216 } while (0) 217 218 #define set_current_state(state_value) \ 219 do { \ 220 debug_normal_state_change((state_value)); \ 221 smp_store_mb(current->__state, (state_value)); \ 222 } while (0) 223 224 /* 225 * set_special_state() should be used for those states when the blocking task 226 * can not use the regular condition based wait-loop. In that case we must 227 * serialize against wakeups such that any possible in-flight TASK_RUNNING 228 * stores will not collide with our state change. 229 */ 230 #define set_special_state(state_value) \ 231 do { \ 232 unsigned long flags; /* may shadow */ \ 233 \ 234 raw_spin_lock_irqsave(¤t->pi_lock, flags); \ 235 debug_special_state_change((state_value)); \ 236 WRITE_ONCE(current->__state, (state_value)); \ 237 raw_spin_unlock_irqrestore(¤t->pi_lock, flags); \ 238 } while (0) 239 240 /* 241 * PREEMPT_RT specific variants for "sleeping" spin/rwlocks 242 * 243 * RT's spin/rwlock substitutions are state preserving. The state of the 244 * task when blocking on the lock is saved in task_struct::saved_state and 245 * restored after the lock has been acquired. These operations are 246 * serialized by task_struct::pi_lock against try_to_wake_up(). Any non RT 247 * lock related wakeups while the task is blocked on the lock are 248 * redirected to operate on task_struct::saved_state to ensure that these 249 * are not dropped. On restore task_struct::saved_state is set to 250 * TASK_RUNNING so any wakeup attempt redirected to saved_state will fail. 251 * 252 * The lock operation looks like this: 253 * 254 * current_save_and_set_rtlock_wait_state(); 255 * for (;;) { 256 * if (try_lock()) 257 * break; 258 * raw_spin_unlock_irq(&lock->wait_lock); 259 * schedule_rtlock(); 260 * raw_spin_lock_irq(&lock->wait_lock); 261 * set_current_state(TASK_RTLOCK_WAIT); 262 * } 263 * current_restore_rtlock_saved_state(); 264 */ 265 #define current_save_and_set_rtlock_wait_state() \ 266 do { \ 267 lockdep_assert_irqs_disabled(); \ 268 raw_spin_lock(¤t->pi_lock); \ 269 current->saved_state = current->__state; \ 270 debug_rtlock_wait_set_state(); \ 271 WRITE_ONCE(current->__state, TASK_RTLOCK_WAIT); \ 272 raw_spin_unlock(¤t->pi_lock); \ 273 } while (0); 274 275 #define current_restore_rtlock_saved_state() \ 276 do { \ 277 lockdep_assert_irqs_disabled(); \ 278 raw_spin_lock(¤t->pi_lock); \ 279 debug_rtlock_wait_restore_state(); \ 280 WRITE_ONCE(current->__state, current->saved_state); \ 281 current->saved_state = TASK_RUNNING; \ 282 raw_spin_unlock(¤t->pi_lock); \ 283 } while (0); 284 285 #define get_current_state() READ_ONCE(current->__state) 286 287 /* 288 * Define the task command name length as enum, then it can be visible to 289 * BPF programs. 290 */ 291 enum { 292 TASK_COMM_LEN = 16, 293 }; 294 295 extern void scheduler_tick(void); 296 297 #define MAX_SCHEDULE_TIMEOUT LONG_MAX 298 299 extern long schedule_timeout(long timeout); 300 extern long schedule_timeout_interruptible(long timeout); 301 extern long schedule_timeout_killable(long timeout); 302 extern long schedule_timeout_uninterruptible(long timeout); 303 extern long schedule_timeout_idle(long timeout); 304 asmlinkage void schedule(void); 305 extern void schedule_preempt_disabled(void); 306 asmlinkage void preempt_schedule_irq(void); 307 #ifdef CONFIG_PREEMPT_RT 308 extern void schedule_rtlock(void); 309 #endif 310 311 extern int __must_check io_schedule_prepare(void); 312 extern void io_schedule_finish(int token); 313 extern long io_schedule_timeout(long timeout); 314 extern void io_schedule(void); 315 316 /** 317 * struct prev_cputime - snapshot of system and user cputime 318 * @utime: time spent in user mode 319 * @stime: time spent in system mode 320 * @lock: protects the above two fields 321 * 322 * Stores previous user/system time values such that we can guarantee 323 * monotonicity. 324 */ 325 struct prev_cputime { 326 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE 327 u64 utime; 328 u64 stime; 329 raw_spinlock_t lock; 330 #endif 331 }; 332 333 enum vtime_state { 334 /* Task is sleeping or running in a CPU with VTIME inactive: */ 335 VTIME_INACTIVE = 0, 336 /* Task is idle */ 337 VTIME_IDLE, 338 /* Task runs in kernelspace in a CPU with VTIME active: */ 339 VTIME_SYS, 340 /* Task runs in userspace in a CPU with VTIME active: */ 341 VTIME_USER, 342 /* Task runs as guests in a CPU with VTIME active: */ 343 VTIME_GUEST, 344 }; 345 346 struct vtime { 347 seqcount_t seqcount; 348 unsigned long long starttime; 349 enum vtime_state state; 350 unsigned int cpu; 351 u64 utime; 352 u64 stime; 353 u64 gtime; 354 }; 355 356 /* 357 * Utilization clamp constraints. 358 * @UCLAMP_MIN: Minimum utilization 359 * @UCLAMP_MAX: Maximum utilization 360 * @UCLAMP_CNT: Utilization clamp constraints count 361 */ 362 enum uclamp_id { 363 UCLAMP_MIN = 0, 364 UCLAMP_MAX, 365 UCLAMP_CNT 366 }; 367 368 #ifdef CONFIG_SMP 369 extern struct root_domain def_root_domain; 370 extern struct mutex sched_domains_mutex; 371 #endif 372 373 struct sched_info { 374 #ifdef CONFIG_SCHED_INFO 375 /* Cumulative counters: */ 376 377 /* # of times we have run on this CPU: */ 378 unsigned long pcount; 379 380 /* Time spent waiting on a runqueue: */ 381 unsigned long long run_delay; 382 383 /* Timestamps: */ 384 385 /* When did we last run on a CPU? */ 386 unsigned long long last_arrival; 387 388 /* When were we last queued to run? */ 389 unsigned long long last_queued; 390 391 #endif /* CONFIG_SCHED_INFO */ 392 }; 393 394 /* 395 * Integer metrics need fixed point arithmetic, e.g., sched/fair 396 * has a few: load, load_avg, util_avg, freq, and capacity. 397 * 398 * We define a basic fixed point arithmetic range, and then formalize 399 * all these metrics based on that basic range. 400 */ 401 # define SCHED_FIXEDPOINT_SHIFT 10 402 # define SCHED_FIXEDPOINT_SCALE (1L << SCHED_FIXEDPOINT_SHIFT) 403 404 /* Increase resolution of cpu_capacity calculations */ 405 # define SCHED_CAPACITY_SHIFT SCHED_FIXEDPOINT_SHIFT 406 # define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT) 407 408 struct load_weight { 409 unsigned long weight; 410 u32 inv_weight; 411 }; 412 413 /** 414 * struct util_est - Estimation utilization of FAIR tasks 415 * @enqueued: instantaneous estimated utilization of a task/cpu 416 * @ewma: the Exponential Weighted Moving Average (EWMA) 417 * utilization of a task 418 * 419 * Support data structure to track an Exponential Weighted Moving Average 420 * (EWMA) of a FAIR task's utilization. New samples are added to the moving 421 * average each time a task completes an activation. Sample's weight is chosen 422 * so that the EWMA will be relatively insensitive to transient changes to the 423 * task's workload. 424 * 425 * The enqueued attribute has a slightly different meaning for tasks and cpus: 426 * - task: the task's util_avg at last task dequeue time 427 * - cfs_rq: the sum of util_est.enqueued for each RUNNABLE task on that CPU 428 * Thus, the util_est.enqueued of a task represents the contribution on the 429 * estimated utilization of the CPU where that task is currently enqueued. 430 * 431 * Only for tasks we track a moving average of the past instantaneous 432 * estimated utilization. This allows to absorb sporadic drops in utilization 433 * of an otherwise almost periodic task. 434 * 435 * The UTIL_AVG_UNCHANGED flag is used to synchronize util_est with util_avg 436 * updates. When a task is dequeued, its util_est should not be updated if its 437 * util_avg has not been updated in the meantime. 438 * This information is mapped into the MSB bit of util_est.enqueued at dequeue 439 * time. Since max value of util_est.enqueued for a task is 1024 (PELT util_avg 440 * for a task) it is safe to use MSB. 441 */ 442 struct util_est { 443 unsigned int enqueued; 444 unsigned int ewma; 445 #define UTIL_EST_WEIGHT_SHIFT 2 446 #define UTIL_AVG_UNCHANGED 0x80000000 447 } __attribute__((__aligned__(sizeof(u64)))); 448 449 /* 450 * The load/runnable/util_avg accumulates an infinite geometric series 451 * (see __update_load_avg_cfs_rq() in kernel/sched/pelt.c). 452 * 453 * [load_avg definition] 454 * 455 * load_avg = runnable% * scale_load_down(load) 456 * 457 * [runnable_avg definition] 458 * 459 * runnable_avg = runnable% * SCHED_CAPACITY_SCALE 460 * 461 * [util_avg definition] 462 * 463 * util_avg = running% * SCHED_CAPACITY_SCALE 464 * 465 * where runnable% is the time ratio that a sched_entity is runnable and 466 * running% the time ratio that a sched_entity is running. 467 * 468 * For cfs_rq, they are the aggregated values of all runnable and blocked 469 * sched_entities. 470 * 471 * The load/runnable/util_avg doesn't directly factor frequency scaling and CPU 472 * capacity scaling. The scaling is done through the rq_clock_pelt that is used 473 * for computing those signals (see update_rq_clock_pelt()) 474 * 475 * N.B., the above ratios (runnable% and running%) themselves are in the 476 * range of [0, 1]. To do fixed point arithmetics, we therefore scale them 477 * to as large a range as necessary. This is for example reflected by 478 * util_avg's SCHED_CAPACITY_SCALE. 479 * 480 * [Overflow issue] 481 * 482 * The 64-bit load_sum can have 4353082796 (=2^64/47742/88761) entities 483 * with the highest load (=88761), always runnable on a single cfs_rq, 484 * and should not overflow as the number already hits PID_MAX_LIMIT. 485 * 486 * For all other cases (including 32-bit kernels), struct load_weight's 487 * weight will overflow first before we do, because: 488 * 489 * Max(load_avg) <= Max(load.weight) 490 * 491 * Then it is the load_weight's responsibility to consider overflow 492 * issues. 493 */ 494 struct sched_avg { 495 u64 last_update_time; 496 u64 load_sum; 497 u64 runnable_sum; 498 u32 util_sum; 499 u32 period_contrib; 500 unsigned long load_avg; 501 unsigned long runnable_avg; 502 unsigned long util_avg; 503 struct util_est util_est; 504 } ____cacheline_aligned; 505 506 struct sched_statistics { 507 #ifdef CONFIG_SCHEDSTATS 508 u64 wait_start; 509 u64 wait_max; 510 u64 wait_count; 511 u64 wait_sum; 512 u64 iowait_count; 513 u64 iowait_sum; 514 515 u64 sleep_start; 516 u64 sleep_max; 517 s64 sum_sleep_runtime; 518 519 u64 block_start; 520 u64 block_max; 521 s64 sum_block_runtime; 522 523 u64 exec_max; 524 u64 slice_max; 525 526 u64 nr_migrations_cold; 527 u64 nr_failed_migrations_affine; 528 u64 nr_failed_migrations_running; 529 u64 nr_failed_migrations_hot; 530 u64 nr_forced_migrations; 531 532 u64 nr_wakeups; 533 u64 nr_wakeups_sync; 534 u64 nr_wakeups_migrate; 535 u64 nr_wakeups_local; 536 u64 nr_wakeups_remote; 537 u64 nr_wakeups_affine; 538 u64 nr_wakeups_affine_attempts; 539 u64 nr_wakeups_passive; 540 u64 nr_wakeups_idle; 541 542 #ifdef CONFIG_SCHED_CORE 543 u64 core_forceidle_sum; 544 #endif 545 #endif /* CONFIG_SCHEDSTATS */ 546 } ____cacheline_aligned; 547 548 struct sched_entity { 549 /* For load-balancing: */ 550 struct load_weight load; 551 struct rb_node run_node; 552 struct list_head group_node; 553 unsigned int on_rq; 554 555 u64 exec_start; 556 u64 sum_exec_runtime; 557 u64 vruntime; 558 u64 prev_sum_exec_runtime; 559 560 u64 nr_migrations; 561 562 #ifdef CONFIG_FAIR_GROUP_SCHED 563 int depth; 564 struct sched_entity *parent; 565 /* rq on which this entity is (to be) queued: */ 566 struct cfs_rq *cfs_rq; 567 /* rq "owned" by this entity/group: */ 568 struct cfs_rq *my_q; 569 /* cached value of my_q->h_nr_running */ 570 unsigned long runnable_weight; 571 #endif 572 573 #ifdef CONFIG_SMP 574 /* 575 * Per entity load average tracking. 576 * 577 * Put into separate cache line so it does not 578 * collide with read-mostly values above. 579 */ 580 struct sched_avg avg; 581 #endif 582 }; 583 584 struct sched_rt_entity { 585 struct list_head run_list; 586 unsigned long timeout; 587 unsigned long watchdog_stamp; 588 unsigned int time_slice; 589 unsigned short on_rq; 590 unsigned short on_list; 591 592 struct sched_rt_entity *back; 593 #ifdef CONFIG_RT_GROUP_SCHED 594 struct sched_rt_entity *parent; 595 /* rq on which this entity is (to be) queued: */ 596 struct rt_rq *rt_rq; 597 /* rq "owned" by this entity/group: */ 598 struct rt_rq *my_q; 599 #endif 600 } __randomize_layout; 601 602 struct sched_dl_entity { 603 struct rb_node rb_node; 604 605 /* 606 * Original scheduling parameters. Copied here from sched_attr 607 * during sched_setattr(), they will remain the same until 608 * the next sched_setattr(). 609 */ 610 u64 dl_runtime; /* Maximum runtime for each instance */ 611 u64 dl_deadline; /* Relative deadline of each instance */ 612 u64 dl_period; /* Separation of two instances (period) */ 613 u64 dl_bw; /* dl_runtime / dl_period */ 614 u64 dl_density; /* dl_runtime / dl_deadline */ 615 616 /* 617 * Actual scheduling parameters. Initialized with the values above, 618 * they are continuously updated during task execution. Note that 619 * the remaining runtime could be < 0 in case we are in overrun. 620 */ 621 s64 runtime; /* Remaining runtime for this instance */ 622 u64 deadline; /* Absolute deadline for this instance */ 623 unsigned int flags; /* Specifying the scheduler behaviour */ 624 625 /* 626 * Some bool flags: 627 * 628 * @dl_throttled tells if we exhausted the runtime. If so, the 629 * task has to wait for a replenishment to be performed at the 630 * next firing of dl_timer. 631 * 632 * @dl_yielded tells if task gave up the CPU before consuming 633 * all its available runtime during the last job. 634 * 635 * @dl_non_contending tells if the task is inactive while still 636 * contributing to the active utilization. In other words, it 637 * indicates if the inactive timer has been armed and its handler 638 * has not been executed yet. This flag is useful to avoid race 639 * conditions between the inactive timer handler and the wakeup 640 * code. 641 * 642 * @dl_overrun tells if the task asked to be informed about runtime 643 * overruns. 644 */ 645 unsigned int dl_throttled : 1; 646 unsigned int dl_yielded : 1; 647 unsigned int dl_non_contending : 1; 648 unsigned int dl_overrun : 1; 649 650 /* 651 * Bandwidth enforcement timer. Each -deadline task has its 652 * own bandwidth to be enforced, thus we need one timer per task. 653 */ 654 struct hrtimer dl_timer; 655 656 /* 657 * Inactive timer, responsible for decreasing the active utilization 658 * at the "0-lag time". When a -deadline task blocks, it contributes 659 * to GRUB's active utilization until the "0-lag time", hence a 660 * timer is needed to decrease the active utilization at the correct 661 * time. 662 */ 663 struct hrtimer inactive_timer; 664 665 #ifdef CONFIG_RT_MUTEXES 666 /* 667 * Priority Inheritance. When a DEADLINE scheduling entity is boosted 668 * pi_se points to the donor, otherwise points to the dl_se it belongs 669 * to (the original one/itself). 670 */ 671 struct sched_dl_entity *pi_se; 672 #endif 673 }; 674 675 #ifdef CONFIG_UCLAMP_TASK 676 /* Number of utilization clamp buckets (shorter alias) */ 677 #define UCLAMP_BUCKETS CONFIG_UCLAMP_BUCKETS_COUNT 678 679 /* 680 * Utilization clamp for a scheduling entity 681 * @value: clamp value "assigned" to a se 682 * @bucket_id: bucket index corresponding to the "assigned" value 683 * @active: the se is currently refcounted in a rq's bucket 684 * @user_defined: the requested clamp value comes from user-space 685 * 686 * The bucket_id is the index of the clamp bucket matching the clamp value 687 * which is pre-computed and stored to avoid expensive integer divisions from 688 * the fast path. 689 * 690 * The active bit is set whenever a task has got an "effective" value assigned, 691 * which can be different from the clamp value "requested" from user-space. 692 * This allows to know a task is refcounted in the rq's bucket corresponding 693 * to the "effective" bucket_id. 694 * 695 * The user_defined bit is set whenever a task has got a task-specific clamp 696 * value requested from userspace, i.e. the system defaults apply to this task 697 * just as a restriction. This allows to relax default clamps when a less 698 * restrictive task-specific value has been requested, thus allowing to 699 * implement a "nice" semantic. For example, a task running with a 20% 700 * default boost can still drop its own boosting to 0%. 701 */ 702 struct uclamp_se { 703 unsigned int value : bits_per(SCHED_CAPACITY_SCALE); 704 unsigned int bucket_id : bits_per(UCLAMP_BUCKETS); 705 unsigned int active : 1; 706 unsigned int user_defined : 1; 707 }; 708 #endif /* CONFIG_UCLAMP_TASK */ 709 710 union rcu_special { 711 struct { 712 u8 blocked; 713 u8 need_qs; 714 u8 exp_hint; /* Hint for performance. */ 715 u8 need_mb; /* Readers need smp_mb(). */ 716 } b; /* Bits. */ 717 u32 s; /* Set of bits. */ 718 }; 719 720 enum perf_event_task_context { 721 perf_invalid_context = -1, 722 perf_hw_context = 0, 723 perf_sw_context, 724 perf_nr_task_contexts, 725 }; 726 727 struct wake_q_node { 728 struct wake_q_node *next; 729 }; 730 731 struct kmap_ctrl { 732 #ifdef CONFIG_KMAP_LOCAL 733 int idx; 734 pte_t pteval[KM_MAX_IDX]; 735 #endif 736 }; 737 738 struct task_struct { 739 #ifdef CONFIG_THREAD_INFO_IN_TASK 740 /* 741 * For reasons of header soup (see current_thread_info()), this 742 * must be the first element of task_struct. 743 */ 744 struct thread_info thread_info; 745 #endif 746 unsigned int __state; 747 748 #ifdef CONFIG_PREEMPT_RT 749 /* saved state for "spinlock sleepers" */ 750 unsigned int saved_state; 751 #endif 752 753 /* 754 * This begins the randomizable portion of task_struct. Only 755 * scheduling-critical items should be added above here. 756 */ 757 randomized_struct_fields_start 758 759 void *stack; 760 refcount_t usage; 761 /* Per task flags (PF_*), defined further below: */ 762 unsigned int flags; 763 unsigned int ptrace; 764 765 #ifdef CONFIG_SMP 766 int on_cpu; 767 struct __call_single_node wake_entry; 768 unsigned int wakee_flips; 769 unsigned long wakee_flip_decay_ts; 770 struct task_struct *last_wakee; 771 772 /* 773 * recent_used_cpu is initially set as the last CPU used by a task 774 * that wakes affine another task. Waker/wakee relationships can 775 * push tasks around a CPU where each wakeup moves to the next one. 776 * Tracking a recently used CPU allows a quick search for a recently 777 * used CPU that may be idle. 778 */ 779 int recent_used_cpu; 780 int wake_cpu; 781 #endif 782 int on_rq; 783 784 int prio; 785 int static_prio; 786 int normal_prio; 787 unsigned int rt_priority; 788 789 struct sched_entity se; 790 struct sched_rt_entity rt; 791 struct sched_dl_entity dl; 792 const struct sched_class *sched_class; 793 794 #ifdef CONFIG_SCHED_CORE 795 struct rb_node core_node; 796 unsigned long core_cookie; 797 unsigned int core_occupation; 798 #endif 799 800 #ifdef CONFIG_CGROUP_SCHED 801 struct task_group *sched_task_group; 802 #endif 803 804 #ifdef CONFIG_UCLAMP_TASK 805 /* 806 * Clamp values requested for a scheduling entity. 807 * Must be updated with task_rq_lock() held. 808 */ 809 struct uclamp_se uclamp_req[UCLAMP_CNT]; 810 /* 811 * Effective clamp values used for a scheduling entity. 812 * Must be updated with task_rq_lock() held. 813 */ 814 struct uclamp_se uclamp[UCLAMP_CNT]; 815 #endif 816 817 struct sched_statistics stats; 818 819 #ifdef CONFIG_PREEMPT_NOTIFIERS 820 /* List of struct preempt_notifier: */ 821 struct hlist_head preempt_notifiers; 822 #endif 823 824 #ifdef CONFIG_BLK_DEV_IO_TRACE 825 unsigned int btrace_seq; 826 #endif 827 828 unsigned int policy; 829 int nr_cpus_allowed; 830 const cpumask_t *cpus_ptr; 831 cpumask_t *user_cpus_ptr; 832 cpumask_t cpus_mask; 833 void *migration_pending; 834 #ifdef CONFIG_SMP 835 unsigned short migration_disabled; 836 #endif 837 unsigned short migration_flags; 838 839 #ifdef CONFIG_PREEMPT_RCU 840 int rcu_read_lock_nesting; 841 union rcu_special rcu_read_unlock_special; 842 struct list_head rcu_node_entry; 843 struct rcu_node *rcu_blocked_node; 844 #endif /* #ifdef CONFIG_PREEMPT_RCU */ 845 846 #ifdef CONFIG_TASKS_RCU 847 unsigned long rcu_tasks_nvcsw; 848 u8 rcu_tasks_holdout; 849 u8 rcu_tasks_idx; 850 int rcu_tasks_idle_cpu; 851 struct list_head rcu_tasks_holdout_list; 852 #endif /* #ifdef CONFIG_TASKS_RCU */ 853 854 #ifdef CONFIG_TASKS_TRACE_RCU 855 int trc_reader_nesting; 856 int trc_ipi_to_cpu; 857 union rcu_special trc_reader_special; 858 struct list_head trc_holdout_list; 859 struct list_head trc_blkd_node; 860 int trc_blkd_cpu; 861 #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */ 862 863 struct sched_info sched_info; 864 865 struct list_head tasks; 866 #ifdef CONFIG_SMP 867 struct plist_node pushable_tasks; 868 struct rb_node pushable_dl_tasks; 869 #endif 870 871 struct mm_struct *mm; 872 struct mm_struct *active_mm; 873 874 int exit_state; 875 int exit_code; 876 int exit_signal; 877 /* The signal sent when the parent dies: */ 878 int pdeath_signal; 879 /* JOBCTL_*, siglock protected: */ 880 unsigned long jobctl; 881 882 /* Used for emulating ABI behavior of previous Linux versions: */ 883 unsigned int personality; 884 885 /* Scheduler bits, serialized by scheduler locks: */ 886 unsigned sched_reset_on_fork:1; 887 unsigned sched_contributes_to_load:1; 888 unsigned sched_migrated:1; 889 890 /* Force alignment to the next boundary: */ 891 unsigned :0; 892 893 /* Unserialized, strictly 'current' */ 894 895 /* 896 * This field must not be in the scheduler word above due to wakelist 897 * queueing no longer being serialized by p->on_cpu. However: 898 * 899 * p->XXX = X; ttwu() 900 * schedule() if (p->on_rq && ..) // false 901 * smp_mb__after_spinlock(); if (smp_load_acquire(&p->on_cpu) && //true 902 * deactivate_task() ttwu_queue_wakelist()) 903 * p->on_rq = 0; p->sched_remote_wakeup = Y; 904 * 905 * guarantees all stores of 'current' are visible before 906 * ->sched_remote_wakeup gets used, so it can be in this word. 907 */ 908 unsigned sched_remote_wakeup:1; 909 910 /* Bit to tell LSMs we're in execve(): */ 911 unsigned in_execve:1; 912 unsigned in_iowait:1; 913 #ifndef TIF_RESTORE_SIGMASK 914 unsigned restore_sigmask:1; 915 #endif 916 #ifdef CONFIG_MEMCG 917 unsigned in_user_fault:1; 918 #endif 919 #ifdef CONFIG_LRU_GEN 920 /* whether the LRU algorithm may apply to this access */ 921 unsigned in_lru_fault:1; 922 #endif 923 #ifdef CONFIG_COMPAT_BRK 924 unsigned brk_randomized:1; 925 #endif 926 #ifdef CONFIG_CGROUPS 927 /* disallow userland-initiated cgroup migration */ 928 unsigned no_cgroup_migration:1; 929 /* task is frozen/stopped (used by the cgroup freezer) */ 930 unsigned frozen:1; 931 #endif 932 #ifdef CONFIG_BLK_CGROUP 933 unsigned use_memdelay:1; 934 #endif 935 #ifdef CONFIG_PSI 936 /* Stalled due to lack of memory */ 937 unsigned in_memstall:1; 938 #endif 939 #ifdef CONFIG_PAGE_OWNER 940 /* Used by page_owner=on to detect recursion in page tracking. */ 941 unsigned in_page_owner:1; 942 #endif 943 #ifdef CONFIG_EVENTFD 944 /* Recursion prevention for eventfd_signal() */ 945 unsigned in_eventfd:1; 946 #endif 947 #ifdef CONFIG_IOMMU_SVA 948 unsigned pasid_activated:1; 949 #endif 950 #ifdef CONFIG_CPU_SUP_INTEL 951 unsigned reported_split_lock:1; 952 #endif 953 #ifdef CONFIG_TASK_DELAY_ACCT 954 /* delay due to memory thrashing */ 955 unsigned in_thrashing:1; 956 #endif 957 958 unsigned long atomic_flags; /* Flags requiring atomic access. */ 959 960 struct restart_block restart_block; 961 962 pid_t pid; 963 pid_t tgid; 964 965 #ifdef CONFIG_STACKPROTECTOR 966 /* Canary value for the -fstack-protector GCC feature: */ 967 unsigned long stack_canary; 968 #endif 969 /* 970 * Pointers to the (original) parent process, youngest child, younger sibling, 971 * older sibling, respectively. (p->father can be replaced with 972 * p->real_parent->pid) 973 */ 974 975 /* Real parent process: */ 976 struct task_struct __rcu *real_parent; 977 978 /* Recipient of SIGCHLD, wait4() reports: */ 979 struct task_struct __rcu *parent; 980 981 /* 982 * Children/sibling form the list of natural children: 983 */ 984 struct list_head children; 985 struct list_head sibling; 986 struct task_struct *group_leader; 987 988 /* 989 * 'ptraced' is the list of tasks this task is using ptrace() on. 990 * 991 * This includes both natural children and PTRACE_ATTACH targets. 992 * 'ptrace_entry' is this task's link on the p->parent->ptraced list. 993 */ 994 struct list_head ptraced; 995 struct list_head ptrace_entry; 996 997 /* PID/PID hash table linkage. */ 998 struct pid *thread_pid; 999 struct hlist_node pid_links[PIDTYPE_MAX]; 1000 struct list_head thread_group; 1001 struct list_head thread_node; 1002 1003 struct completion *vfork_done; 1004 1005 /* CLONE_CHILD_SETTID: */ 1006 int __user *set_child_tid; 1007 1008 /* CLONE_CHILD_CLEARTID: */ 1009 int __user *clear_child_tid; 1010 1011 /* PF_KTHREAD | PF_IO_WORKER */ 1012 void *worker_private; 1013 1014 u64 utime; 1015 u64 stime; 1016 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME 1017 u64 utimescaled; 1018 u64 stimescaled; 1019 #endif 1020 u64 gtime; 1021 struct prev_cputime prev_cputime; 1022 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN 1023 struct vtime vtime; 1024 #endif 1025 1026 #ifdef CONFIG_NO_HZ_FULL 1027 atomic_t tick_dep_mask; 1028 #endif 1029 /* Context switch counts: */ 1030 unsigned long nvcsw; 1031 unsigned long nivcsw; 1032 1033 /* Monotonic time in nsecs: */ 1034 u64 start_time; 1035 1036 /* Boot based time in nsecs: */ 1037 u64 start_boottime; 1038 1039 /* MM fault and swap info: this can arguably be seen as either mm-specific or thread-specific: */ 1040 unsigned long min_flt; 1041 unsigned long maj_flt; 1042 1043 /* Empty if CONFIG_POSIX_CPUTIMERS=n */ 1044 struct posix_cputimers posix_cputimers; 1045 1046 #ifdef CONFIG_POSIX_CPU_TIMERS_TASK_WORK 1047 struct posix_cputimers_work posix_cputimers_work; 1048 #endif 1049 1050 /* Process credentials: */ 1051 1052 /* Tracer's credentials at attach: */ 1053 const struct cred __rcu *ptracer_cred; 1054 1055 /* Objective and real subjective task credentials (COW): */ 1056 const struct cred __rcu *real_cred; 1057 1058 /* Effective (overridable) subjective task credentials (COW): */ 1059 const struct cred __rcu *cred; 1060 1061 #ifdef CONFIG_KEYS 1062 /* Cached requested key. */ 1063 struct key *cached_requested_key; 1064 #endif 1065 1066 /* 1067 * executable name, excluding path. 1068 * 1069 * - normally initialized setup_new_exec() 1070 * - access it with [gs]et_task_comm() 1071 * - lock it with task_lock() 1072 */ 1073 char comm[TASK_COMM_LEN]; 1074 1075 struct nameidata *nameidata; 1076 1077 #ifdef CONFIG_SYSVIPC 1078 struct sysv_sem sysvsem; 1079 struct sysv_shm sysvshm; 1080 #endif 1081 #ifdef CONFIG_DETECT_HUNG_TASK 1082 unsigned long last_switch_count; 1083 unsigned long last_switch_time; 1084 #endif 1085 /* Filesystem information: */ 1086 struct fs_struct *fs; 1087 1088 /* Open file information: */ 1089 struct files_struct *files; 1090 1091 #ifdef CONFIG_IO_URING 1092 struct io_uring_task *io_uring; 1093 #endif 1094 1095 /* Namespaces: */ 1096 struct nsproxy *nsproxy; 1097 1098 /* Signal handlers: */ 1099 struct signal_struct *signal; 1100 struct sighand_struct __rcu *sighand; 1101 sigset_t blocked; 1102 sigset_t real_blocked; 1103 /* Restored if set_restore_sigmask() was used: */ 1104 sigset_t saved_sigmask; 1105 struct sigpending pending; 1106 unsigned long sas_ss_sp; 1107 size_t sas_ss_size; 1108 unsigned int sas_ss_flags; 1109 1110 struct callback_head *task_works; 1111 1112 #ifdef CONFIG_AUDIT 1113 #ifdef CONFIG_AUDITSYSCALL 1114 struct audit_context *audit_context; 1115 #endif 1116 kuid_t loginuid; 1117 unsigned int sessionid; 1118 #endif 1119 struct seccomp seccomp; 1120 struct syscall_user_dispatch syscall_dispatch; 1121 1122 /* Thread group tracking: */ 1123 u64 parent_exec_id; 1124 u64 self_exec_id; 1125 1126 /* Protection against (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, mempolicy: */ 1127 spinlock_t alloc_lock; 1128 1129 /* Protection of the PI data structures: */ 1130 raw_spinlock_t pi_lock; 1131 1132 struct wake_q_node wake_q; 1133 1134 #ifdef CONFIG_RT_MUTEXES 1135 /* PI waiters blocked on a rt_mutex held by this task: */ 1136 struct rb_root_cached pi_waiters; 1137 /* Updated under owner's pi_lock and rq lock */ 1138 struct task_struct *pi_top_task; 1139 /* Deadlock detection and priority inheritance handling: */ 1140 struct rt_mutex_waiter *pi_blocked_on; 1141 #endif 1142 1143 #ifdef CONFIG_DEBUG_MUTEXES 1144 /* Mutex deadlock detection: */ 1145 struct mutex_waiter *blocked_on; 1146 #endif 1147 1148 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP 1149 int non_block_count; 1150 #endif 1151 1152 #ifdef CONFIG_TRACE_IRQFLAGS 1153 struct irqtrace_events irqtrace; 1154 unsigned int hardirq_threaded; 1155 u64 hardirq_chain_key; 1156 int softirqs_enabled; 1157 int softirq_context; 1158 int irq_config; 1159 #endif 1160 #ifdef CONFIG_PREEMPT_RT 1161 int softirq_disable_cnt; 1162 #endif 1163 1164 #ifdef CONFIG_LOCKDEP 1165 # define MAX_LOCK_DEPTH 48UL 1166 u64 curr_chain_key; 1167 int lockdep_depth; 1168 unsigned int lockdep_recursion; 1169 struct held_lock held_locks[MAX_LOCK_DEPTH]; 1170 #endif 1171 1172 #if defined(CONFIG_UBSAN) && !defined(CONFIG_UBSAN_TRAP) 1173 unsigned int in_ubsan; 1174 #endif 1175 1176 /* Journalling filesystem info: */ 1177 void *journal_info; 1178 1179 /* Stacked block device info: */ 1180 struct bio_list *bio_list; 1181 1182 /* Stack plugging: */ 1183 struct blk_plug *plug; 1184 1185 /* VM state: */ 1186 struct reclaim_state *reclaim_state; 1187 1188 struct io_context *io_context; 1189 1190 #ifdef CONFIG_COMPACTION 1191 struct capture_control *capture_control; 1192 #endif 1193 /* Ptrace state: */ 1194 unsigned long ptrace_message; 1195 kernel_siginfo_t *last_siginfo; 1196 1197 struct task_io_accounting ioac; 1198 #ifdef CONFIG_PSI 1199 /* Pressure stall state */ 1200 unsigned int psi_flags; 1201 #endif 1202 #ifdef CONFIG_TASK_XACCT 1203 /* Accumulated RSS usage: */ 1204 u64 acct_rss_mem1; 1205 /* Accumulated virtual memory usage: */ 1206 u64 acct_vm_mem1; 1207 /* stime + utime since last update: */ 1208 u64 acct_timexpd; 1209 #endif 1210 #ifdef CONFIG_CPUSETS 1211 /* Protected by ->alloc_lock: */ 1212 nodemask_t mems_allowed; 1213 /* Sequence number to catch updates: */ 1214 seqcount_spinlock_t mems_allowed_seq; 1215 int cpuset_mem_spread_rotor; 1216 int cpuset_slab_spread_rotor; 1217 #endif 1218 #ifdef CONFIG_CGROUPS 1219 /* Control Group info protected by css_set_lock: */ 1220 struct css_set __rcu *cgroups; 1221 /* cg_list protected by css_set_lock and tsk->alloc_lock: */ 1222 struct list_head cg_list; 1223 #endif 1224 #ifdef CONFIG_X86_CPU_RESCTRL 1225 u32 closid; 1226 u32 rmid; 1227 #endif 1228 #ifdef CONFIG_FUTEX 1229 struct robust_list_head __user *robust_list; 1230 #ifdef CONFIG_COMPAT 1231 struct compat_robust_list_head __user *compat_robust_list; 1232 #endif 1233 struct list_head pi_state_list; 1234 struct futex_pi_state *pi_state_cache; 1235 struct mutex futex_exit_mutex; 1236 unsigned int futex_state; 1237 #endif 1238 #ifdef CONFIG_PERF_EVENTS 1239 struct perf_event_context *perf_event_ctxp; 1240 struct mutex perf_event_mutex; 1241 struct list_head perf_event_list; 1242 #endif 1243 #ifdef CONFIG_DEBUG_PREEMPT 1244 unsigned long preempt_disable_ip; 1245 #endif 1246 #ifdef CONFIG_NUMA 1247 /* Protected by alloc_lock: */ 1248 struct mempolicy *mempolicy; 1249 short il_prev; 1250 short pref_node_fork; 1251 #endif 1252 #ifdef CONFIG_NUMA_BALANCING 1253 int numa_scan_seq; 1254 unsigned int numa_scan_period; 1255 unsigned int numa_scan_period_max; 1256 int numa_preferred_nid; 1257 unsigned long numa_migrate_retry; 1258 /* Migration stamp: */ 1259 u64 node_stamp; 1260 u64 last_task_numa_placement; 1261 u64 last_sum_exec_runtime; 1262 struct callback_head numa_work; 1263 1264 /* 1265 * This pointer is only modified for current in syscall and 1266 * pagefault context (and for tasks being destroyed), so it can be read 1267 * from any of the following contexts: 1268 * - RCU read-side critical section 1269 * - current->numa_group from everywhere 1270 * - task's runqueue locked, task not running 1271 */ 1272 struct numa_group __rcu *numa_group; 1273 1274 /* 1275 * numa_faults is an array split into four regions: 1276 * faults_memory, faults_cpu, faults_memory_buffer, faults_cpu_buffer 1277 * in this precise order. 1278 * 1279 * faults_memory: Exponential decaying average of faults on a per-node 1280 * basis. Scheduling placement decisions are made based on these 1281 * counts. The values remain static for the duration of a PTE scan. 1282 * faults_cpu: Track the nodes the process was running on when a NUMA 1283 * hinting fault was incurred. 1284 * faults_memory_buffer and faults_cpu_buffer: Record faults per node 1285 * during the current scan window. When the scan completes, the counts 1286 * in faults_memory and faults_cpu decay and these values are copied. 1287 */ 1288 unsigned long *numa_faults; 1289 unsigned long total_numa_faults; 1290 1291 /* 1292 * numa_faults_locality tracks if faults recorded during the last 1293 * scan window were remote/local or failed to migrate. The task scan 1294 * period is adapted based on the locality of the faults with different 1295 * weights depending on whether they were shared or private faults 1296 */ 1297 unsigned long numa_faults_locality[3]; 1298 1299 unsigned long numa_pages_migrated; 1300 #endif /* CONFIG_NUMA_BALANCING */ 1301 1302 #ifdef CONFIG_RSEQ 1303 struct rseq __user *rseq; 1304 u32 rseq_len; 1305 u32 rseq_sig; 1306 /* 1307 * RmW on rseq_event_mask must be performed atomically 1308 * with respect to preemption. 1309 */ 1310 unsigned long rseq_event_mask; 1311 #endif 1312 1313 #ifdef CONFIG_SCHED_MM_CID 1314 int mm_cid; /* Current cid in mm */ 1315 int last_mm_cid; /* Most recent cid in mm */ 1316 int migrate_from_cpu; 1317 int mm_cid_active; /* Whether cid bitmap is active */ 1318 struct callback_head cid_work; 1319 #endif 1320 1321 struct tlbflush_unmap_batch tlb_ubc; 1322 1323 /* Cache last used pipe for splice(): */ 1324 struct pipe_inode_info *splice_pipe; 1325 1326 struct page_frag task_frag; 1327 1328 #ifdef CONFIG_TASK_DELAY_ACCT 1329 struct task_delay_info *delays; 1330 #endif 1331 1332 #ifdef CONFIG_FAULT_INJECTION 1333 int make_it_fail; 1334 unsigned int fail_nth; 1335 #endif 1336 /* 1337 * When (nr_dirtied >= nr_dirtied_pause), it's time to call 1338 * balance_dirty_pages() for a dirty throttling pause: 1339 */ 1340 int nr_dirtied; 1341 int nr_dirtied_pause; 1342 /* Start of a write-and-pause period: */ 1343 unsigned long dirty_paused_when; 1344 1345 #ifdef CONFIG_LATENCYTOP 1346 int latency_record_count; 1347 struct latency_record latency_record[LT_SAVECOUNT]; 1348 #endif 1349 /* 1350 * Time slack values; these are used to round up poll() and 1351 * select() etc timeout values. These are in nanoseconds. 1352 */ 1353 u64 timer_slack_ns; 1354 u64 default_timer_slack_ns; 1355 1356 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) 1357 unsigned int kasan_depth; 1358 #endif 1359 1360 #ifdef CONFIG_KCSAN 1361 struct kcsan_ctx kcsan_ctx; 1362 #ifdef CONFIG_TRACE_IRQFLAGS 1363 struct irqtrace_events kcsan_save_irqtrace; 1364 #endif 1365 #ifdef CONFIG_KCSAN_WEAK_MEMORY 1366 int kcsan_stack_depth; 1367 #endif 1368 #endif 1369 1370 #ifdef CONFIG_KMSAN 1371 struct kmsan_ctx kmsan_ctx; 1372 #endif 1373 1374 #if IS_ENABLED(CONFIG_KUNIT) 1375 struct kunit *kunit_test; 1376 #endif 1377 1378 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 1379 /* Index of current stored address in ret_stack: */ 1380 int curr_ret_stack; 1381 int curr_ret_depth; 1382 1383 /* Stack of return addresses for return function tracing: */ 1384 struct ftrace_ret_stack *ret_stack; 1385 1386 /* Timestamp for last schedule: */ 1387 unsigned long long ftrace_timestamp; 1388 1389 /* 1390 * Number of functions that haven't been traced 1391 * because of depth overrun: 1392 */ 1393 atomic_t trace_overrun; 1394 1395 /* Pause tracing: */ 1396 atomic_t tracing_graph_pause; 1397 #endif 1398 1399 #ifdef CONFIG_TRACING 1400 /* Bitmask and counter of trace recursion: */ 1401 unsigned long trace_recursion; 1402 #endif /* CONFIG_TRACING */ 1403 1404 #ifdef CONFIG_KCOV 1405 /* See kernel/kcov.c for more details. */ 1406 1407 /* Coverage collection mode enabled for this task (0 if disabled): */ 1408 unsigned int kcov_mode; 1409 1410 /* Size of the kcov_area: */ 1411 unsigned int kcov_size; 1412 1413 /* Buffer for coverage collection: */ 1414 void *kcov_area; 1415 1416 /* KCOV descriptor wired with this task or NULL: */ 1417 struct kcov *kcov; 1418 1419 /* KCOV common handle for remote coverage collection: */ 1420 u64 kcov_handle; 1421 1422 /* KCOV sequence number: */ 1423 int kcov_sequence; 1424 1425 /* Collect coverage from softirq context: */ 1426 unsigned int kcov_softirq; 1427 #endif 1428 1429 #ifdef CONFIG_MEMCG 1430 struct mem_cgroup *memcg_in_oom; 1431 gfp_t memcg_oom_gfp_mask; 1432 int memcg_oom_order; 1433 1434 /* Number of pages to reclaim on returning to userland: */ 1435 unsigned int memcg_nr_pages_over_high; 1436 1437 /* Used by memcontrol for targeted memcg charge: */ 1438 struct mem_cgroup *active_memcg; 1439 #endif 1440 1441 #ifdef CONFIG_BLK_CGROUP 1442 struct gendisk *throttle_disk; 1443 #endif 1444 1445 #ifdef CONFIG_UPROBES 1446 struct uprobe_task *utask; 1447 #endif 1448 #if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE) 1449 unsigned int sequential_io; 1450 unsigned int sequential_io_avg; 1451 #endif 1452 struct kmap_ctrl kmap_ctrl; 1453 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP 1454 unsigned long task_state_change; 1455 # ifdef CONFIG_PREEMPT_RT 1456 unsigned long saved_state_change; 1457 # endif 1458 #endif 1459 struct rcu_head rcu; 1460 refcount_t rcu_users; 1461 int pagefault_disabled; 1462 #ifdef CONFIG_MMU 1463 struct task_struct *oom_reaper_list; 1464 struct timer_list oom_reaper_timer; 1465 #endif 1466 #ifdef CONFIG_VMAP_STACK 1467 struct vm_struct *stack_vm_area; 1468 #endif 1469 #ifdef CONFIG_THREAD_INFO_IN_TASK 1470 /* A live task holds one reference: */ 1471 refcount_t stack_refcount; 1472 #endif 1473 #ifdef CONFIG_LIVEPATCH 1474 int patch_state; 1475 #endif 1476 #ifdef CONFIG_SECURITY 1477 /* Used by LSM modules for access restriction: */ 1478 void *security; 1479 #endif 1480 #ifdef CONFIG_BPF_SYSCALL 1481 /* Used by BPF task local storage */ 1482 struct bpf_local_storage __rcu *bpf_storage; 1483 /* Used for BPF run context */ 1484 struct bpf_run_ctx *bpf_ctx; 1485 #endif 1486 1487 #ifdef CONFIG_GCC_PLUGIN_STACKLEAK 1488 unsigned long lowest_stack; 1489 unsigned long prev_lowest_stack; 1490 #endif 1491 1492 #ifdef CONFIG_X86_MCE 1493 void __user *mce_vaddr; 1494 __u64 mce_kflags; 1495 u64 mce_addr; 1496 __u64 mce_ripv : 1, 1497 mce_whole_page : 1, 1498 __mce_reserved : 62; 1499 struct callback_head mce_kill_me; 1500 int mce_count; 1501 #endif 1502 1503 #ifdef CONFIG_KRETPROBES 1504 struct llist_head kretprobe_instances; 1505 #endif 1506 #ifdef CONFIG_RETHOOK 1507 struct llist_head rethooks; 1508 #endif 1509 1510 #ifdef CONFIG_ARCH_HAS_PARANOID_L1D_FLUSH 1511 /* 1512 * If L1D flush is supported on mm context switch 1513 * then we use this callback head to queue kill work 1514 * to kill tasks that are not running on SMT disabled 1515 * cores 1516 */ 1517 struct callback_head l1d_flush_kill; 1518 #endif 1519 1520 #ifdef CONFIG_RV 1521 /* 1522 * Per-task RV monitor. Nowadays fixed in RV_PER_TASK_MONITORS. 1523 * If we find justification for more monitors, we can think 1524 * about adding more or developing a dynamic method. So far, 1525 * none of these are justified. 1526 */ 1527 union rv_task_monitor rv[RV_PER_TASK_MONITORS]; 1528 #endif 1529 1530 #ifdef CONFIG_USER_EVENTS 1531 struct user_event_mm *user_event_mm; 1532 #endif 1533 1534 /* 1535 * New fields for task_struct should be added above here, so that 1536 * they are included in the randomized portion of task_struct. 1537 */ 1538 randomized_struct_fields_end 1539 1540 /* CPU-specific state of this task: */ 1541 struct thread_struct thread; 1542 1543 /* 1544 * WARNING: on x86, 'thread_struct' contains a variable-sized 1545 * structure. It *MUST* be at the end of 'task_struct'. 1546 * 1547 * Do not put anything below here! 1548 */ 1549 }; 1550 1551 static inline struct pid *task_pid(struct task_struct *task) 1552 { 1553 return task->thread_pid; 1554 } 1555 1556 /* 1557 * the helpers to get the task's different pids as they are seen 1558 * from various namespaces 1559 * 1560 * task_xid_nr() : global id, i.e. the id seen from the init namespace; 1561 * task_xid_vnr() : virtual id, i.e. the id seen from the pid namespace of 1562 * current. 1563 * task_xid_nr_ns() : id seen from the ns specified; 1564 * 1565 * see also pid_nr() etc in include/linux/pid.h 1566 */ 1567 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, struct pid_namespace *ns); 1568 1569 static inline pid_t task_pid_nr(struct task_struct *tsk) 1570 { 1571 return tsk->pid; 1572 } 1573 1574 static inline pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) 1575 { 1576 return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns); 1577 } 1578 1579 static inline pid_t task_pid_vnr(struct task_struct *tsk) 1580 { 1581 return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL); 1582 } 1583 1584 1585 static inline pid_t task_tgid_nr(struct task_struct *tsk) 1586 { 1587 return tsk->tgid; 1588 } 1589 1590 /** 1591 * pid_alive - check that a task structure is not stale 1592 * @p: Task structure to be checked. 1593 * 1594 * Test if a process is not yet dead (at most zombie state) 1595 * If pid_alive fails, then pointers within the task structure 1596 * can be stale and must not be dereferenced. 1597 * 1598 * Return: 1 if the process is alive. 0 otherwise. 1599 */ 1600 static inline int pid_alive(const struct task_struct *p) 1601 { 1602 return p->thread_pid != NULL; 1603 } 1604 1605 static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) 1606 { 1607 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns); 1608 } 1609 1610 static inline pid_t task_pgrp_vnr(struct task_struct *tsk) 1611 { 1612 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL); 1613 } 1614 1615 1616 static inline pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) 1617 { 1618 return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns); 1619 } 1620 1621 static inline pid_t task_session_vnr(struct task_struct *tsk) 1622 { 1623 return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL); 1624 } 1625 1626 static inline pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) 1627 { 1628 return __task_pid_nr_ns(tsk, PIDTYPE_TGID, ns); 1629 } 1630 1631 static inline pid_t task_tgid_vnr(struct task_struct *tsk) 1632 { 1633 return __task_pid_nr_ns(tsk, PIDTYPE_TGID, NULL); 1634 } 1635 1636 static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns) 1637 { 1638 pid_t pid = 0; 1639 1640 rcu_read_lock(); 1641 if (pid_alive(tsk)) 1642 pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns); 1643 rcu_read_unlock(); 1644 1645 return pid; 1646 } 1647 1648 static inline pid_t task_ppid_nr(const struct task_struct *tsk) 1649 { 1650 return task_ppid_nr_ns(tsk, &init_pid_ns); 1651 } 1652 1653 /* Obsolete, do not use: */ 1654 static inline pid_t task_pgrp_nr(struct task_struct *tsk) 1655 { 1656 return task_pgrp_nr_ns(tsk, &init_pid_ns); 1657 } 1658 1659 #define TASK_REPORT_IDLE (TASK_REPORT + 1) 1660 #define TASK_REPORT_MAX (TASK_REPORT_IDLE << 1) 1661 1662 static inline unsigned int __task_state_index(unsigned int tsk_state, 1663 unsigned int tsk_exit_state) 1664 { 1665 unsigned int state = (tsk_state | tsk_exit_state) & TASK_REPORT; 1666 1667 BUILD_BUG_ON_NOT_POWER_OF_2(TASK_REPORT_MAX); 1668 1669 if (tsk_state == TASK_IDLE) 1670 state = TASK_REPORT_IDLE; 1671 1672 /* 1673 * We're lying here, but rather than expose a completely new task state 1674 * to userspace, we can make this appear as if the task has gone through 1675 * a regular rt_mutex_lock() call. 1676 */ 1677 if (tsk_state == TASK_RTLOCK_WAIT) 1678 state = TASK_UNINTERRUPTIBLE; 1679 1680 return fls(state); 1681 } 1682 1683 static inline unsigned int task_state_index(struct task_struct *tsk) 1684 { 1685 return __task_state_index(READ_ONCE(tsk->__state), tsk->exit_state); 1686 } 1687 1688 static inline char task_index_to_char(unsigned int state) 1689 { 1690 static const char state_char[] = "RSDTtXZPI"; 1691 1692 BUILD_BUG_ON(1 + ilog2(TASK_REPORT_MAX) != sizeof(state_char) - 1); 1693 1694 return state_char[state]; 1695 } 1696 1697 static inline char task_state_to_char(struct task_struct *tsk) 1698 { 1699 return task_index_to_char(task_state_index(tsk)); 1700 } 1701 1702 /** 1703 * is_global_init - check if a task structure is init. Since init 1704 * is free to have sub-threads we need to check tgid. 1705 * @tsk: Task structure to be checked. 1706 * 1707 * Check if a task structure is the first user space task the kernel created. 1708 * 1709 * Return: 1 if the task structure is init. 0 otherwise. 1710 */ 1711 static inline int is_global_init(struct task_struct *tsk) 1712 { 1713 return task_tgid_nr(tsk) == 1; 1714 } 1715 1716 extern struct pid *cad_pid; 1717 1718 /* 1719 * Per process flags 1720 */ 1721 #define PF_VCPU 0x00000001 /* I'm a virtual CPU */ 1722 #define PF_IDLE 0x00000002 /* I am an IDLE thread */ 1723 #define PF_EXITING 0x00000004 /* Getting shut down */ 1724 #define PF_POSTCOREDUMP 0x00000008 /* Coredumps should ignore this task */ 1725 #define PF_IO_WORKER 0x00000010 /* Task is an IO worker */ 1726 #define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */ 1727 #define PF_FORKNOEXEC 0x00000040 /* Forked but didn't exec */ 1728 #define PF_MCE_PROCESS 0x00000080 /* Process policy on mce errors */ 1729 #define PF_SUPERPRIV 0x00000100 /* Used super-user privileges */ 1730 #define PF_DUMPCORE 0x00000200 /* Dumped core */ 1731 #define PF_SIGNALED 0x00000400 /* Killed by a signal */ 1732 #define PF_MEMALLOC 0x00000800 /* Allocating memory */ 1733 #define PF_NPROC_EXCEEDED 0x00001000 /* set_user() noticed that RLIMIT_NPROC was exceeded */ 1734 #define PF_USED_MATH 0x00002000 /* If unset the fpu must be initialized before use */ 1735 #define PF_USER_WORKER 0x00004000 /* Kernel thread cloned from userspace thread */ 1736 #define PF_NOFREEZE 0x00008000 /* This thread should not be frozen */ 1737 #define PF__HOLE__00010000 0x00010000 1738 #define PF_KSWAPD 0x00020000 /* I am kswapd */ 1739 #define PF_MEMALLOC_NOFS 0x00040000 /* All allocation requests will inherit GFP_NOFS */ 1740 #define PF_MEMALLOC_NOIO 0x00080000 /* All allocation requests will inherit GFP_NOIO */ 1741 #define PF_LOCAL_THROTTLE 0x00100000 /* Throttle writes only against the bdi I write to, 1742 * I am cleaning dirty pages from some other bdi. */ 1743 #define PF_KTHREAD 0x00200000 /* I am a kernel thread */ 1744 #define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */ 1745 #define PF__HOLE__00800000 0x00800000 1746 #define PF__HOLE__01000000 0x01000000 1747 #define PF__HOLE__02000000 0x02000000 1748 #define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_mask */ 1749 #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ 1750 #define PF_MEMALLOC_PIN 0x10000000 /* Allocation context constrained to zones which allow long term pinning. */ 1751 #define PF__HOLE__20000000 0x20000000 1752 #define PF__HOLE__40000000 0x40000000 1753 #define PF_SUSPEND_TASK 0x80000000 /* This thread called freeze_processes() and should not be frozen */ 1754 1755 /* 1756 * Only the _current_ task can read/write to tsk->flags, but other 1757 * tasks can access tsk->flags in readonly mode for example 1758 * with tsk_used_math (like during threaded core dumping). 1759 * There is however an exception to this rule during ptrace 1760 * or during fork: the ptracer task is allowed to write to the 1761 * child->flags of its traced child (same goes for fork, the parent 1762 * can write to the child->flags), because we're guaranteed the 1763 * child is not running and in turn not changing child->flags 1764 * at the same time the parent does it. 1765 */ 1766 #define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0) 1767 #define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0) 1768 #define clear_used_math() clear_stopped_child_used_math(current) 1769 #define set_used_math() set_stopped_child_used_math(current) 1770 1771 #define conditional_stopped_child_used_math(condition, child) \ 1772 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0) 1773 1774 #define conditional_used_math(condition) conditional_stopped_child_used_math(condition, current) 1775 1776 #define copy_to_stopped_child_used_math(child) \ 1777 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0) 1778 1779 /* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */ 1780 #define tsk_used_math(p) ((p)->flags & PF_USED_MATH) 1781 #define used_math() tsk_used_math(current) 1782 1783 static __always_inline bool is_percpu_thread(void) 1784 { 1785 #ifdef CONFIG_SMP 1786 return (current->flags & PF_NO_SETAFFINITY) && 1787 (current->nr_cpus_allowed == 1); 1788 #else 1789 return true; 1790 #endif 1791 } 1792 1793 /* Per-process atomic flags. */ 1794 #define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */ 1795 #define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */ 1796 #define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */ 1797 #define PFA_SPEC_SSB_DISABLE 3 /* Speculative Store Bypass disabled */ 1798 #define PFA_SPEC_SSB_FORCE_DISABLE 4 /* Speculative Store Bypass force disabled*/ 1799 #define PFA_SPEC_IB_DISABLE 5 /* Indirect branch speculation restricted */ 1800 #define PFA_SPEC_IB_FORCE_DISABLE 6 /* Indirect branch speculation permanently restricted */ 1801 #define PFA_SPEC_SSB_NOEXEC 7 /* Speculative Store Bypass clear on execve() */ 1802 1803 #define TASK_PFA_TEST(name, func) \ 1804 static inline bool task_##func(struct task_struct *p) \ 1805 { return test_bit(PFA_##name, &p->atomic_flags); } 1806 1807 #define TASK_PFA_SET(name, func) \ 1808 static inline void task_set_##func(struct task_struct *p) \ 1809 { set_bit(PFA_##name, &p->atomic_flags); } 1810 1811 #define TASK_PFA_CLEAR(name, func) \ 1812 static inline void task_clear_##func(struct task_struct *p) \ 1813 { clear_bit(PFA_##name, &p->atomic_flags); } 1814 1815 TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs) 1816 TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs) 1817 1818 TASK_PFA_TEST(SPREAD_PAGE, spread_page) 1819 TASK_PFA_SET(SPREAD_PAGE, spread_page) 1820 TASK_PFA_CLEAR(SPREAD_PAGE, spread_page) 1821 1822 TASK_PFA_TEST(SPREAD_SLAB, spread_slab) 1823 TASK_PFA_SET(SPREAD_SLAB, spread_slab) 1824 TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab) 1825 1826 TASK_PFA_TEST(SPEC_SSB_DISABLE, spec_ssb_disable) 1827 TASK_PFA_SET(SPEC_SSB_DISABLE, spec_ssb_disable) 1828 TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable) 1829 1830 TASK_PFA_TEST(SPEC_SSB_NOEXEC, spec_ssb_noexec) 1831 TASK_PFA_SET(SPEC_SSB_NOEXEC, spec_ssb_noexec) 1832 TASK_PFA_CLEAR(SPEC_SSB_NOEXEC, spec_ssb_noexec) 1833 1834 TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable) 1835 TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable) 1836 1837 TASK_PFA_TEST(SPEC_IB_DISABLE, spec_ib_disable) 1838 TASK_PFA_SET(SPEC_IB_DISABLE, spec_ib_disable) 1839 TASK_PFA_CLEAR(SPEC_IB_DISABLE, spec_ib_disable) 1840 1841 TASK_PFA_TEST(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable) 1842 TASK_PFA_SET(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable) 1843 1844 static inline void 1845 current_restore_flags(unsigned long orig_flags, unsigned long flags) 1846 { 1847 current->flags &= ~flags; 1848 current->flags |= orig_flags & flags; 1849 } 1850 1851 extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial); 1852 extern int task_can_attach(struct task_struct *p); 1853 extern int dl_bw_alloc(int cpu, u64 dl_bw); 1854 extern void dl_bw_free(int cpu, u64 dl_bw); 1855 #ifdef CONFIG_SMP 1856 extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask); 1857 extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask); 1858 extern int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, int node); 1859 extern void release_user_cpus_ptr(struct task_struct *p); 1860 extern int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask); 1861 extern void force_compatible_cpus_allowed_ptr(struct task_struct *p); 1862 extern void relax_compatible_cpus_allowed_ptr(struct task_struct *p); 1863 #else 1864 static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) 1865 { 1866 } 1867 static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) 1868 { 1869 if (!cpumask_test_cpu(0, new_mask)) 1870 return -EINVAL; 1871 return 0; 1872 } 1873 static inline int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, int node) 1874 { 1875 if (src->user_cpus_ptr) 1876 return -EINVAL; 1877 return 0; 1878 } 1879 static inline void release_user_cpus_ptr(struct task_struct *p) 1880 { 1881 WARN_ON(p->user_cpus_ptr); 1882 } 1883 1884 static inline int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask) 1885 { 1886 return 0; 1887 } 1888 #endif 1889 1890 extern int yield_to(struct task_struct *p, bool preempt); 1891 extern void set_user_nice(struct task_struct *p, long nice); 1892 extern int task_prio(const struct task_struct *p); 1893 1894 /** 1895 * task_nice - return the nice value of a given task. 1896 * @p: the task in question. 1897 * 1898 * Return: The nice value [ -20 ... 0 ... 19 ]. 1899 */ 1900 static inline int task_nice(const struct task_struct *p) 1901 { 1902 return PRIO_TO_NICE((p)->static_prio); 1903 } 1904 1905 extern int can_nice(const struct task_struct *p, const int nice); 1906 extern int task_curr(const struct task_struct *p); 1907 extern int idle_cpu(int cpu); 1908 extern int available_idle_cpu(int cpu); 1909 extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *); 1910 extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *); 1911 extern void sched_set_fifo(struct task_struct *p); 1912 extern void sched_set_fifo_low(struct task_struct *p); 1913 extern void sched_set_normal(struct task_struct *p, int nice); 1914 extern int sched_setattr(struct task_struct *, const struct sched_attr *); 1915 extern int sched_setattr_nocheck(struct task_struct *, const struct sched_attr *); 1916 extern struct task_struct *idle_task(int cpu); 1917 1918 /** 1919 * is_idle_task - is the specified task an idle task? 1920 * @p: the task in question. 1921 * 1922 * Return: 1 if @p is an idle task. 0 otherwise. 1923 */ 1924 static __always_inline bool is_idle_task(const struct task_struct *p) 1925 { 1926 return !!(p->flags & PF_IDLE); 1927 } 1928 1929 extern struct task_struct *curr_task(int cpu); 1930 extern void ia64_set_curr_task(int cpu, struct task_struct *p); 1931 1932 void yield(void); 1933 1934 union thread_union { 1935 #ifndef CONFIG_ARCH_TASK_STRUCT_ON_STACK 1936 struct task_struct task; 1937 #endif 1938 #ifndef CONFIG_THREAD_INFO_IN_TASK 1939 struct thread_info thread_info; 1940 #endif 1941 unsigned long stack[THREAD_SIZE/sizeof(long)]; 1942 }; 1943 1944 #ifndef CONFIG_THREAD_INFO_IN_TASK 1945 extern struct thread_info init_thread_info; 1946 #endif 1947 1948 extern unsigned long init_stack[THREAD_SIZE / sizeof(unsigned long)]; 1949 1950 #ifdef CONFIG_THREAD_INFO_IN_TASK 1951 # define task_thread_info(task) (&(task)->thread_info) 1952 #elif !defined(__HAVE_THREAD_FUNCTIONS) 1953 # define task_thread_info(task) ((struct thread_info *)(task)->stack) 1954 #endif 1955 1956 /* 1957 * find a task by one of its numerical ids 1958 * 1959 * find_task_by_pid_ns(): 1960 * finds a task by its pid in the specified namespace 1961 * find_task_by_vpid(): 1962 * finds a task by its virtual pid 1963 * 1964 * see also find_vpid() etc in include/linux/pid.h 1965 */ 1966 1967 extern struct task_struct *find_task_by_vpid(pid_t nr); 1968 extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns); 1969 1970 /* 1971 * find a task by its virtual pid and get the task struct 1972 */ 1973 extern struct task_struct *find_get_task_by_vpid(pid_t nr); 1974 1975 extern int wake_up_state(struct task_struct *tsk, unsigned int state); 1976 extern int wake_up_process(struct task_struct *tsk); 1977 extern void wake_up_new_task(struct task_struct *tsk); 1978 1979 #ifdef CONFIG_SMP 1980 extern void kick_process(struct task_struct *tsk); 1981 #else 1982 static inline void kick_process(struct task_struct *tsk) { } 1983 #endif 1984 1985 extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec); 1986 1987 static inline void set_task_comm(struct task_struct *tsk, const char *from) 1988 { 1989 __set_task_comm(tsk, from, false); 1990 } 1991 1992 extern char *__get_task_comm(char *to, size_t len, struct task_struct *tsk); 1993 #define get_task_comm(buf, tsk) ({ \ 1994 BUILD_BUG_ON(sizeof(buf) != TASK_COMM_LEN); \ 1995 __get_task_comm(buf, sizeof(buf), tsk); \ 1996 }) 1997 1998 #ifdef CONFIG_SMP 1999 static __always_inline void scheduler_ipi(void) 2000 { 2001 /* 2002 * Fold TIF_NEED_RESCHED into the preempt_count; anybody setting 2003 * TIF_NEED_RESCHED remotely (for the first time) will also send 2004 * this IPI. 2005 */ 2006 preempt_fold_need_resched(); 2007 } 2008 #else 2009 static inline void scheduler_ipi(void) { } 2010 #endif 2011 2012 extern unsigned long wait_task_inactive(struct task_struct *, unsigned int match_state); 2013 2014 /* 2015 * Set thread flags in other task's structures. 2016 * See asm/thread_info.h for TIF_xxxx flags available: 2017 */ 2018 static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag) 2019 { 2020 set_ti_thread_flag(task_thread_info(tsk), flag); 2021 } 2022 2023 static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag) 2024 { 2025 clear_ti_thread_flag(task_thread_info(tsk), flag); 2026 } 2027 2028 static inline void update_tsk_thread_flag(struct task_struct *tsk, int flag, 2029 bool value) 2030 { 2031 update_ti_thread_flag(task_thread_info(tsk), flag, value); 2032 } 2033 2034 static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag) 2035 { 2036 return test_and_set_ti_thread_flag(task_thread_info(tsk), flag); 2037 } 2038 2039 static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag) 2040 { 2041 return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag); 2042 } 2043 2044 static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag) 2045 { 2046 return test_ti_thread_flag(task_thread_info(tsk), flag); 2047 } 2048 2049 static inline void set_tsk_need_resched(struct task_struct *tsk) 2050 { 2051 set_tsk_thread_flag(tsk,TIF_NEED_RESCHED); 2052 } 2053 2054 static inline void clear_tsk_need_resched(struct task_struct *tsk) 2055 { 2056 clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED); 2057 } 2058 2059 static inline int test_tsk_need_resched(struct task_struct *tsk) 2060 { 2061 return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED)); 2062 } 2063 2064 /* 2065 * cond_resched() and cond_resched_lock(): latency reduction via 2066 * explicit rescheduling in places that are safe. The return 2067 * value indicates whether a reschedule was done in fact. 2068 * cond_resched_lock() will drop the spinlock before scheduling, 2069 */ 2070 #if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC) 2071 extern int __cond_resched(void); 2072 2073 #if defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL) 2074 2075 void sched_dynamic_klp_enable(void); 2076 void sched_dynamic_klp_disable(void); 2077 2078 DECLARE_STATIC_CALL(cond_resched, __cond_resched); 2079 2080 static __always_inline int _cond_resched(void) 2081 { 2082 return static_call_mod(cond_resched)(); 2083 } 2084 2085 #elif defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY) 2086 2087 extern int dynamic_cond_resched(void); 2088 2089 static __always_inline int _cond_resched(void) 2090 { 2091 return dynamic_cond_resched(); 2092 } 2093 2094 #else /* !CONFIG_PREEMPTION */ 2095 2096 static inline int _cond_resched(void) 2097 { 2098 klp_sched_try_switch(); 2099 return __cond_resched(); 2100 } 2101 2102 #endif /* PREEMPT_DYNAMIC && CONFIG_HAVE_PREEMPT_DYNAMIC_CALL */ 2103 2104 #else /* CONFIG_PREEMPTION && !CONFIG_PREEMPT_DYNAMIC */ 2105 2106 static inline int _cond_resched(void) 2107 { 2108 klp_sched_try_switch(); 2109 return 0; 2110 } 2111 2112 #endif /* !CONFIG_PREEMPTION || CONFIG_PREEMPT_DYNAMIC */ 2113 2114 #define cond_resched() ({ \ 2115 __might_resched(__FILE__, __LINE__, 0); \ 2116 _cond_resched(); \ 2117 }) 2118 2119 extern int __cond_resched_lock(spinlock_t *lock); 2120 extern int __cond_resched_rwlock_read(rwlock_t *lock); 2121 extern int __cond_resched_rwlock_write(rwlock_t *lock); 2122 2123 #define MIGHT_RESCHED_RCU_SHIFT 8 2124 #define MIGHT_RESCHED_PREEMPT_MASK ((1U << MIGHT_RESCHED_RCU_SHIFT) - 1) 2125 2126 #ifndef CONFIG_PREEMPT_RT 2127 /* 2128 * Non RT kernels have an elevated preempt count due to the held lock, 2129 * but are not allowed to be inside a RCU read side critical section 2130 */ 2131 # define PREEMPT_LOCK_RESCHED_OFFSETS PREEMPT_LOCK_OFFSET 2132 #else 2133 /* 2134 * spin/rw_lock() on RT implies rcu_read_lock(). The might_sleep() check in 2135 * cond_resched*lock() has to take that into account because it checks for 2136 * preempt_count() and rcu_preempt_depth(). 2137 */ 2138 # define PREEMPT_LOCK_RESCHED_OFFSETS \ 2139 (PREEMPT_LOCK_OFFSET + (1U << MIGHT_RESCHED_RCU_SHIFT)) 2140 #endif 2141 2142 #define cond_resched_lock(lock) ({ \ 2143 __might_resched(__FILE__, __LINE__, PREEMPT_LOCK_RESCHED_OFFSETS); \ 2144 __cond_resched_lock(lock); \ 2145 }) 2146 2147 #define cond_resched_rwlock_read(lock) ({ \ 2148 __might_resched(__FILE__, __LINE__, PREEMPT_LOCK_RESCHED_OFFSETS); \ 2149 __cond_resched_rwlock_read(lock); \ 2150 }) 2151 2152 #define cond_resched_rwlock_write(lock) ({ \ 2153 __might_resched(__FILE__, __LINE__, PREEMPT_LOCK_RESCHED_OFFSETS); \ 2154 __cond_resched_rwlock_write(lock); \ 2155 }) 2156 2157 static inline void cond_resched_rcu(void) 2158 { 2159 #if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU) 2160 rcu_read_unlock(); 2161 cond_resched(); 2162 rcu_read_lock(); 2163 #endif 2164 } 2165 2166 #ifdef CONFIG_PREEMPT_DYNAMIC 2167 2168 extern bool preempt_model_none(void); 2169 extern bool preempt_model_voluntary(void); 2170 extern bool preempt_model_full(void); 2171 2172 #else 2173 2174 static inline bool preempt_model_none(void) 2175 { 2176 return IS_ENABLED(CONFIG_PREEMPT_NONE); 2177 } 2178 static inline bool preempt_model_voluntary(void) 2179 { 2180 return IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY); 2181 } 2182 static inline bool preempt_model_full(void) 2183 { 2184 return IS_ENABLED(CONFIG_PREEMPT); 2185 } 2186 2187 #endif 2188 2189 static inline bool preempt_model_rt(void) 2190 { 2191 return IS_ENABLED(CONFIG_PREEMPT_RT); 2192 } 2193 2194 /* 2195 * Does the preemption model allow non-cooperative preemption? 2196 * 2197 * For !CONFIG_PREEMPT_DYNAMIC kernels this is an exact match with 2198 * CONFIG_PREEMPTION; for CONFIG_PREEMPT_DYNAMIC this doesn't work as the 2199 * kernel is *built* with CONFIG_PREEMPTION=y but may run with e.g. the 2200 * PREEMPT_NONE model. 2201 */ 2202 static inline bool preempt_model_preemptible(void) 2203 { 2204 return preempt_model_full() || preempt_model_rt(); 2205 } 2206 2207 /* 2208 * Does a critical section need to be broken due to another 2209 * task waiting?: (technically does not depend on CONFIG_PREEMPTION, 2210 * but a general need for low latency) 2211 */ 2212 static inline int spin_needbreak(spinlock_t *lock) 2213 { 2214 #ifdef CONFIG_PREEMPTION 2215 return spin_is_contended(lock); 2216 #else 2217 return 0; 2218 #endif 2219 } 2220 2221 /* 2222 * Check if a rwlock is contended. 2223 * Returns non-zero if there is another task waiting on the rwlock. 2224 * Returns zero if the lock is not contended or the system / underlying 2225 * rwlock implementation does not support contention detection. 2226 * Technically does not depend on CONFIG_PREEMPTION, but a general need 2227 * for low latency. 2228 */ 2229 static inline int rwlock_needbreak(rwlock_t *lock) 2230 { 2231 #ifdef CONFIG_PREEMPTION 2232 return rwlock_is_contended(lock); 2233 #else 2234 return 0; 2235 #endif 2236 } 2237 2238 static __always_inline bool need_resched(void) 2239 { 2240 return unlikely(tif_need_resched()); 2241 } 2242 2243 /* 2244 * Wrappers for p->thread_info->cpu access. No-op on UP. 2245 */ 2246 #ifdef CONFIG_SMP 2247 2248 static inline unsigned int task_cpu(const struct task_struct *p) 2249 { 2250 return READ_ONCE(task_thread_info(p)->cpu); 2251 } 2252 2253 extern void set_task_cpu(struct task_struct *p, unsigned int cpu); 2254 2255 #else 2256 2257 static inline unsigned int task_cpu(const struct task_struct *p) 2258 { 2259 return 0; 2260 } 2261 2262 static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) 2263 { 2264 } 2265 2266 #endif /* CONFIG_SMP */ 2267 2268 extern bool sched_task_on_rq(struct task_struct *p); 2269 extern unsigned long get_wchan(struct task_struct *p); 2270 extern struct task_struct *cpu_curr_snapshot(int cpu); 2271 2272 /* 2273 * In order to reduce various lock holder preemption latencies provide an 2274 * interface to see if a vCPU is currently running or not. 2275 * 2276 * This allows us to terminate optimistic spin loops and block, analogous to 2277 * the native optimistic spin heuristic of testing if the lock owner task is 2278 * running or not. 2279 */ 2280 #ifndef vcpu_is_preempted 2281 static inline bool vcpu_is_preempted(int cpu) 2282 { 2283 return false; 2284 } 2285 #endif 2286 2287 extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask); 2288 extern long sched_getaffinity(pid_t pid, struct cpumask *mask); 2289 2290 #ifndef TASK_SIZE_OF 2291 #define TASK_SIZE_OF(tsk) TASK_SIZE 2292 #endif 2293 2294 #ifdef CONFIG_SMP 2295 static inline bool owner_on_cpu(struct task_struct *owner) 2296 { 2297 /* 2298 * As lock holder preemption issue, we both skip spinning if 2299 * task is not on cpu or its cpu is preempted 2300 */ 2301 return READ_ONCE(owner->on_cpu) && !vcpu_is_preempted(task_cpu(owner)); 2302 } 2303 2304 /* Returns effective CPU energy utilization, as seen by the scheduler */ 2305 unsigned long sched_cpu_util(int cpu); 2306 #endif /* CONFIG_SMP */ 2307 2308 #ifdef CONFIG_RSEQ 2309 2310 /* 2311 * Map the event mask on the user-space ABI enum rseq_cs_flags 2312 * for direct mask checks. 2313 */ 2314 enum rseq_event_mask_bits { 2315 RSEQ_EVENT_PREEMPT_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT, 2316 RSEQ_EVENT_SIGNAL_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT, 2317 RSEQ_EVENT_MIGRATE_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT, 2318 }; 2319 2320 enum rseq_event_mask { 2321 RSEQ_EVENT_PREEMPT = (1U << RSEQ_EVENT_PREEMPT_BIT), 2322 RSEQ_EVENT_SIGNAL = (1U << RSEQ_EVENT_SIGNAL_BIT), 2323 RSEQ_EVENT_MIGRATE = (1U << RSEQ_EVENT_MIGRATE_BIT), 2324 }; 2325 2326 static inline void rseq_set_notify_resume(struct task_struct *t) 2327 { 2328 if (t->rseq) 2329 set_tsk_thread_flag(t, TIF_NOTIFY_RESUME); 2330 } 2331 2332 void __rseq_handle_notify_resume(struct ksignal *sig, struct pt_regs *regs); 2333 2334 static inline void rseq_handle_notify_resume(struct ksignal *ksig, 2335 struct pt_regs *regs) 2336 { 2337 if (current->rseq) 2338 __rseq_handle_notify_resume(ksig, regs); 2339 } 2340 2341 static inline void rseq_signal_deliver(struct ksignal *ksig, 2342 struct pt_regs *regs) 2343 { 2344 preempt_disable(); 2345 __set_bit(RSEQ_EVENT_SIGNAL_BIT, ¤t->rseq_event_mask); 2346 preempt_enable(); 2347 rseq_handle_notify_resume(ksig, regs); 2348 } 2349 2350 /* rseq_preempt() requires preemption to be disabled. */ 2351 static inline void rseq_preempt(struct task_struct *t) 2352 { 2353 __set_bit(RSEQ_EVENT_PREEMPT_BIT, &t->rseq_event_mask); 2354 rseq_set_notify_resume(t); 2355 } 2356 2357 /* rseq_migrate() requires preemption to be disabled. */ 2358 static inline void rseq_migrate(struct task_struct *t) 2359 { 2360 __set_bit(RSEQ_EVENT_MIGRATE_BIT, &t->rseq_event_mask); 2361 rseq_set_notify_resume(t); 2362 } 2363 2364 /* 2365 * If parent process has a registered restartable sequences area, the 2366 * child inherits. Unregister rseq for a clone with CLONE_VM set. 2367 */ 2368 static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags) 2369 { 2370 if (clone_flags & CLONE_VM) { 2371 t->rseq = NULL; 2372 t->rseq_len = 0; 2373 t->rseq_sig = 0; 2374 t->rseq_event_mask = 0; 2375 } else { 2376 t->rseq = current->rseq; 2377 t->rseq_len = current->rseq_len; 2378 t->rseq_sig = current->rseq_sig; 2379 t->rseq_event_mask = current->rseq_event_mask; 2380 } 2381 } 2382 2383 static inline void rseq_execve(struct task_struct *t) 2384 { 2385 t->rseq = NULL; 2386 t->rseq_len = 0; 2387 t->rseq_sig = 0; 2388 t->rseq_event_mask = 0; 2389 } 2390 2391 #else 2392 2393 static inline void rseq_set_notify_resume(struct task_struct *t) 2394 { 2395 } 2396 static inline void rseq_handle_notify_resume(struct ksignal *ksig, 2397 struct pt_regs *regs) 2398 { 2399 } 2400 static inline void rseq_signal_deliver(struct ksignal *ksig, 2401 struct pt_regs *regs) 2402 { 2403 } 2404 static inline void rseq_preempt(struct task_struct *t) 2405 { 2406 } 2407 static inline void rseq_migrate(struct task_struct *t) 2408 { 2409 } 2410 static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags) 2411 { 2412 } 2413 static inline void rseq_execve(struct task_struct *t) 2414 { 2415 } 2416 2417 #endif 2418 2419 #ifdef CONFIG_DEBUG_RSEQ 2420 2421 void rseq_syscall(struct pt_regs *regs); 2422 2423 #else 2424 2425 static inline void rseq_syscall(struct pt_regs *regs) 2426 { 2427 } 2428 2429 #endif 2430 2431 #ifdef CONFIG_SCHED_CORE 2432 extern void sched_core_free(struct task_struct *tsk); 2433 extern void sched_core_fork(struct task_struct *p); 2434 extern int sched_core_share_pid(unsigned int cmd, pid_t pid, enum pid_type type, 2435 unsigned long uaddr); 2436 #else 2437 static inline void sched_core_free(struct task_struct *tsk) { } 2438 static inline void sched_core_fork(struct task_struct *p) { } 2439 #endif 2440 2441 extern void sched_set_stop_task(int cpu, struct task_struct *stop); 2442 2443 #endif 2444