1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_SCHED_H 3 #define _LINUX_SCHED_H 4 5 /* 6 * Define 'struct task_struct' and provide the main scheduler 7 * APIs (schedule(), wakeup variants, etc.) 8 */ 9 10 #include <uapi/linux/sched.h> 11 12 #include <asm/current.h> 13 #include <asm/processor.h> 14 #include <linux/thread_info.h> 15 #include <linux/preempt.h> 16 #include <linux/cpumask_types.h> 17 18 #include <linux/cache.h> 19 #include <linux/irqflags_types.h> 20 #include <linux/smp_types.h> 21 #include <linux/pid_types.h> 22 #include <linux/sem_types.h> 23 #include <linux/shm.h> 24 #include <linux/kmsan_types.h> 25 #include <linux/mutex_types.h> 26 #include <linux/plist_types.h> 27 #include <linux/hrtimer_types.h> 28 #include <linux/timer_types.h> 29 #include <linux/seccomp_types.h> 30 #include <linux/nodemask_types.h> 31 #include <linux/refcount_types.h> 32 #include <linux/resource.h> 33 #include <linux/latencytop.h> 34 #include <linux/sched/prio.h> 35 #include <linux/sched/types.h> 36 #include <linux/signal_types.h> 37 #include <linux/syscall_user_dispatch_types.h> 38 #include <linux/mm_types_task.h> 39 #include <linux/netdevice_xmit.h> 40 #include <linux/task_io_accounting.h> 41 #include <linux/posix-timers_types.h> 42 #include <linux/restart_block.h> 43 #include <uapi/linux/rseq.h> 44 #include <linux/seqlock_types.h> 45 #include <linux/kcsan.h> 46 #include <linux/rv.h> 47 #include <linux/uidgid_types.h> 48 #include <linux/tracepoint-defs.h> 49 #include <asm/kmap_size.h> 50 51 /* task_struct member predeclarations (sorted alphabetically): */ 52 struct audit_context; 53 struct bio_list; 54 struct blk_plug; 55 struct bpf_local_storage; 56 struct bpf_run_ctx; 57 struct bpf_net_context; 58 struct capture_control; 59 struct cfs_rq; 60 struct fs_struct; 61 struct futex_pi_state; 62 struct io_context; 63 struct io_uring_task; 64 struct mempolicy; 65 struct nameidata; 66 struct nsproxy; 67 struct perf_event_context; 68 struct perf_ctx_data; 69 struct pid_namespace; 70 struct pipe_inode_info; 71 struct rcu_node; 72 struct reclaim_state; 73 struct robust_list_head; 74 struct root_domain; 75 struct rq; 76 struct sched_attr; 77 struct sched_dl_entity; 78 struct seq_file; 79 struct sighand_struct; 80 struct signal_struct; 81 struct task_delay_info; 82 struct task_group; 83 struct task_struct; 84 struct user_event_mm; 85 86 #include <linux/sched/ext.h> 87 88 /* 89 * Task state bitmask. NOTE! These bits are also 90 * encoded in fs/proc/array.c: get_task_state(). 91 * 92 * We have two separate sets of flags: task->__state 93 * is about runnability, while task->exit_state are 94 * about the task exiting. Confusing, but this way 95 * modifying one set can't modify the other one by 96 * mistake. 97 */ 98 99 /* Used in tsk->__state: */ 100 #define TASK_RUNNING 0x00000000 101 #define TASK_INTERRUPTIBLE 0x00000001 102 #define TASK_UNINTERRUPTIBLE 0x00000002 103 #define __TASK_STOPPED 0x00000004 104 #define __TASK_TRACED 0x00000008 105 /* Used in tsk->exit_state: */ 106 #define EXIT_DEAD 0x00000010 107 #define EXIT_ZOMBIE 0x00000020 108 #define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD) 109 /* Used in tsk->__state again: */ 110 #define TASK_PARKED 0x00000040 111 #define TASK_DEAD 0x00000080 112 #define TASK_WAKEKILL 0x00000100 113 #define TASK_WAKING 0x00000200 114 #define TASK_NOLOAD 0x00000400 115 #define TASK_NEW 0x00000800 116 #define TASK_RTLOCK_WAIT 0x00001000 117 #define TASK_FREEZABLE 0x00002000 118 #define __TASK_FREEZABLE_UNSAFE (0x00004000 * IS_ENABLED(CONFIG_LOCKDEP)) 119 #define TASK_FROZEN 0x00008000 120 #define TASK_STATE_MAX 0x00010000 121 122 #define TASK_ANY (TASK_STATE_MAX-1) 123 124 /* 125 * DO NOT ADD ANY NEW USERS ! 126 */ 127 #define TASK_FREEZABLE_UNSAFE (TASK_FREEZABLE | __TASK_FREEZABLE_UNSAFE) 128 129 /* Convenience macros for the sake of set_current_state: */ 130 #define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE) 131 #define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED) 132 #define TASK_TRACED __TASK_TRACED 133 134 #define TASK_IDLE (TASK_UNINTERRUPTIBLE | TASK_NOLOAD) 135 136 /* Convenience macros for the sake of wake_up(): */ 137 #define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE) 138 139 /* get_task_state(): */ 140 #define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \ 141 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \ 142 __TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE | \ 143 TASK_PARKED) 144 145 #define task_is_running(task) (READ_ONCE((task)->__state) == TASK_RUNNING) 146 147 #define task_is_traced(task) ((READ_ONCE(task->jobctl) & JOBCTL_TRACED) != 0) 148 #define task_is_stopped(task) ((READ_ONCE(task->jobctl) & JOBCTL_STOPPED) != 0) 149 #define task_is_stopped_or_traced(task) ((READ_ONCE(task->jobctl) & (JOBCTL_STOPPED | JOBCTL_TRACED)) != 0) 150 151 /* 152 * Special states are those that do not use the normal wait-loop pattern. See 153 * the comment with set_special_state(). 154 */ 155 #define is_special_task_state(state) \ 156 ((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_PARKED | \ 157 TASK_DEAD | TASK_FROZEN)) 158 159 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP 160 # define debug_normal_state_change(state_value) \ 161 do { \ 162 WARN_ON_ONCE(is_special_task_state(state_value)); \ 163 current->task_state_change = _THIS_IP_; \ 164 } while (0) 165 166 # define debug_special_state_change(state_value) \ 167 do { \ 168 WARN_ON_ONCE(!is_special_task_state(state_value)); \ 169 current->task_state_change = _THIS_IP_; \ 170 } while (0) 171 172 # define debug_rtlock_wait_set_state() \ 173 do { \ 174 current->saved_state_change = current->task_state_change;\ 175 current->task_state_change = _THIS_IP_; \ 176 } while (0) 177 178 # define debug_rtlock_wait_restore_state() \ 179 do { \ 180 current->task_state_change = current->saved_state_change;\ 181 } while (0) 182 183 #else 184 # define debug_normal_state_change(cond) do { } while (0) 185 # define debug_special_state_change(cond) do { } while (0) 186 # define debug_rtlock_wait_set_state() do { } while (0) 187 # define debug_rtlock_wait_restore_state() do { } while (0) 188 #endif 189 190 #define trace_set_current_state(state_value) \ 191 do { \ 192 if (tracepoint_enabled(sched_set_state_tp)) \ 193 __trace_set_current_state(state_value); \ 194 } while (0) 195 196 /* 197 * set_current_state() includes a barrier so that the write of current->__state 198 * is correctly serialised wrt the caller's subsequent test of whether to 199 * actually sleep: 200 * 201 * for (;;) { 202 * set_current_state(TASK_UNINTERRUPTIBLE); 203 * if (CONDITION) 204 * break; 205 * 206 * schedule(); 207 * } 208 * __set_current_state(TASK_RUNNING); 209 * 210 * If the caller does not need such serialisation (because, for instance, the 211 * CONDITION test and condition change and wakeup are under the same lock) then 212 * use __set_current_state(). 213 * 214 * The above is typically ordered against the wakeup, which does: 215 * 216 * CONDITION = 1; 217 * wake_up_state(p, TASK_UNINTERRUPTIBLE); 218 * 219 * where wake_up_state()/try_to_wake_up() executes a full memory barrier before 220 * accessing p->__state. 221 * 222 * Wakeup will do: if (@state & p->__state) p->__state = TASK_RUNNING, that is, 223 * once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a 224 * TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING). 225 * 226 * However, with slightly different timing the wakeup TASK_RUNNING store can 227 * also collide with the TASK_UNINTERRUPTIBLE store. Losing that store is not 228 * a problem either because that will result in one extra go around the loop 229 * and our @cond test will save the day. 230 * 231 * Also see the comments of try_to_wake_up(). 232 */ 233 #define __set_current_state(state_value) \ 234 do { \ 235 debug_normal_state_change((state_value)); \ 236 trace_set_current_state(state_value); \ 237 WRITE_ONCE(current->__state, (state_value)); \ 238 } while (0) 239 240 #define set_current_state(state_value) \ 241 do { \ 242 debug_normal_state_change((state_value)); \ 243 trace_set_current_state(state_value); \ 244 smp_store_mb(current->__state, (state_value)); \ 245 } while (0) 246 247 /* 248 * set_special_state() should be used for those states when the blocking task 249 * can not use the regular condition based wait-loop. In that case we must 250 * serialize against wakeups such that any possible in-flight TASK_RUNNING 251 * stores will not collide with our state change. 252 */ 253 #define set_special_state(state_value) \ 254 do { \ 255 unsigned long flags; /* may shadow */ \ 256 \ 257 raw_spin_lock_irqsave(¤t->pi_lock, flags); \ 258 debug_special_state_change((state_value)); \ 259 trace_set_current_state(state_value); \ 260 WRITE_ONCE(current->__state, (state_value)); \ 261 raw_spin_unlock_irqrestore(¤t->pi_lock, flags); \ 262 } while (0) 263 264 /* 265 * PREEMPT_RT specific variants for "sleeping" spin/rwlocks 266 * 267 * RT's spin/rwlock substitutions are state preserving. The state of the 268 * task when blocking on the lock is saved in task_struct::saved_state and 269 * restored after the lock has been acquired. These operations are 270 * serialized by task_struct::pi_lock against try_to_wake_up(). Any non RT 271 * lock related wakeups while the task is blocked on the lock are 272 * redirected to operate on task_struct::saved_state to ensure that these 273 * are not dropped. On restore task_struct::saved_state is set to 274 * TASK_RUNNING so any wakeup attempt redirected to saved_state will fail. 275 * 276 * The lock operation looks like this: 277 * 278 * current_save_and_set_rtlock_wait_state(); 279 * for (;;) { 280 * if (try_lock()) 281 * break; 282 * raw_spin_unlock_irq(&lock->wait_lock); 283 * schedule_rtlock(); 284 * raw_spin_lock_irq(&lock->wait_lock); 285 * set_current_state(TASK_RTLOCK_WAIT); 286 * } 287 * current_restore_rtlock_saved_state(); 288 */ 289 #define current_save_and_set_rtlock_wait_state() \ 290 do { \ 291 lockdep_assert_irqs_disabled(); \ 292 raw_spin_lock(¤t->pi_lock); \ 293 current->saved_state = current->__state; \ 294 debug_rtlock_wait_set_state(); \ 295 trace_set_current_state(TASK_RTLOCK_WAIT); \ 296 WRITE_ONCE(current->__state, TASK_RTLOCK_WAIT); \ 297 raw_spin_unlock(¤t->pi_lock); \ 298 } while (0); 299 300 #define current_restore_rtlock_saved_state() \ 301 do { \ 302 lockdep_assert_irqs_disabled(); \ 303 raw_spin_lock(¤t->pi_lock); \ 304 debug_rtlock_wait_restore_state(); \ 305 trace_set_current_state(current->saved_state); \ 306 WRITE_ONCE(current->__state, current->saved_state); \ 307 current->saved_state = TASK_RUNNING; \ 308 raw_spin_unlock(¤t->pi_lock); \ 309 } while (0); 310 311 #define get_current_state() READ_ONCE(current->__state) 312 313 /* 314 * Define the task command name length as enum, then it can be visible to 315 * BPF programs. 316 */ 317 enum { 318 TASK_COMM_LEN = 16, 319 }; 320 321 extern void sched_tick(void); 322 323 #define MAX_SCHEDULE_TIMEOUT LONG_MAX 324 325 extern long schedule_timeout(long timeout); 326 extern long schedule_timeout_interruptible(long timeout); 327 extern long schedule_timeout_killable(long timeout); 328 extern long schedule_timeout_uninterruptible(long timeout); 329 extern long schedule_timeout_idle(long timeout); 330 asmlinkage void schedule(void); 331 extern void schedule_preempt_disabled(void); 332 asmlinkage void preempt_schedule_irq(void); 333 #ifdef CONFIG_PREEMPT_RT 334 extern void schedule_rtlock(void); 335 #endif 336 337 extern int __must_check io_schedule_prepare(void); 338 extern void io_schedule_finish(int token); 339 extern long io_schedule_timeout(long timeout); 340 extern void io_schedule(void); 341 342 /* wrapper function to trace from this header file */ 343 DECLARE_TRACEPOINT(sched_set_state_tp); 344 extern void __trace_set_current_state(int state_value); 345 346 /** 347 * struct prev_cputime - snapshot of system and user cputime 348 * @utime: time spent in user mode 349 * @stime: time spent in system mode 350 * @lock: protects the above two fields 351 * 352 * Stores previous user/system time values such that we can guarantee 353 * monotonicity. 354 */ 355 struct prev_cputime { 356 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE 357 u64 utime; 358 u64 stime; 359 raw_spinlock_t lock; 360 #endif 361 }; 362 363 enum vtime_state { 364 /* Task is sleeping or running in a CPU with VTIME inactive: */ 365 VTIME_INACTIVE = 0, 366 /* Task is idle */ 367 VTIME_IDLE, 368 /* Task runs in kernelspace in a CPU with VTIME active: */ 369 VTIME_SYS, 370 /* Task runs in userspace in a CPU with VTIME active: */ 371 VTIME_USER, 372 /* Task runs as guests in a CPU with VTIME active: */ 373 VTIME_GUEST, 374 }; 375 376 struct vtime { 377 seqcount_t seqcount; 378 unsigned long long starttime; 379 enum vtime_state state; 380 unsigned int cpu; 381 u64 utime; 382 u64 stime; 383 u64 gtime; 384 }; 385 386 /* 387 * Utilization clamp constraints. 388 * @UCLAMP_MIN: Minimum utilization 389 * @UCLAMP_MAX: Maximum utilization 390 * @UCLAMP_CNT: Utilization clamp constraints count 391 */ 392 enum uclamp_id { 393 UCLAMP_MIN = 0, 394 UCLAMP_MAX, 395 UCLAMP_CNT 396 }; 397 398 extern struct root_domain def_root_domain; 399 extern struct mutex sched_domains_mutex; 400 extern void sched_domains_mutex_lock(void); 401 extern void sched_domains_mutex_unlock(void); 402 403 struct sched_param { 404 int sched_priority; 405 }; 406 407 struct sched_info { 408 #ifdef CONFIG_SCHED_INFO 409 /* Cumulative counters: */ 410 411 /* # of times we have run on this CPU: */ 412 unsigned long pcount; 413 414 /* Time spent waiting on a runqueue: */ 415 unsigned long long run_delay; 416 417 /* Max time spent waiting on a runqueue: */ 418 unsigned long long max_run_delay; 419 420 /* Min time spent waiting on a runqueue: */ 421 unsigned long long min_run_delay; 422 423 /* Timestamps: */ 424 425 /* When did we last run on a CPU? */ 426 unsigned long long last_arrival; 427 428 /* When were we last queued to run? */ 429 unsigned long long last_queued; 430 431 #endif /* CONFIG_SCHED_INFO */ 432 }; 433 434 /* 435 * Integer metrics need fixed point arithmetic, e.g., sched/fair 436 * has a few: load, load_avg, util_avg, freq, and capacity. 437 * 438 * We define a basic fixed point arithmetic range, and then formalize 439 * all these metrics based on that basic range. 440 */ 441 # define SCHED_FIXEDPOINT_SHIFT 10 442 # define SCHED_FIXEDPOINT_SCALE (1L << SCHED_FIXEDPOINT_SHIFT) 443 444 /* Increase resolution of cpu_capacity calculations */ 445 # define SCHED_CAPACITY_SHIFT SCHED_FIXEDPOINT_SHIFT 446 # define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT) 447 448 struct load_weight { 449 unsigned long weight; 450 u32 inv_weight; 451 }; 452 453 /* 454 * The load/runnable/util_avg accumulates an infinite geometric series 455 * (see __update_load_avg_cfs_rq() in kernel/sched/pelt.c). 456 * 457 * [load_avg definition] 458 * 459 * load_avg = runnable% * scale_load_down(load) 460 * 461 * [runnable_avg definition] 462 * 463 * runnable_avg = runnable% * SCHED_CAPACITY_SCALE 464 * 465 * [util_avg definition] 466 * 467 * util_avg = running% * SCHED_CAPACITY_SCALE 468 * 469 * where runnable% is the time ratio that a sched_entity is runnable and 470 * running% the time ratio that a sched_entity is running. 471 * 472 * For cfs_rq, they are the aggregated values of all runnable and blocked 473 * sched_entities. 474 * 475 * The load/runnable/util_avg doesn't directly factor frequency scaling and CPU 476 * capacity scaling. The scaling is done through the rq_clock_pelt that is used 477 * for computing those signals (see update_rq_clock_pelt()) 478 * 479 * N.B., the above ratios (runnable% and running%) themselves are in the 480 * range of [0, 1]. To do fixed point arithmetics, we therefore scale them 481 * to as large a range as necessary. This is for example reflected by 482 * util_avg's SCHED_CAPACITY_SCALE. 483 * 484 * [Overflow issue] 485 * 486 * The 64-bit load_sum can have 4353082796 (=2^64/47742/88761) entities 487 * with the highest load (=88761), always runnable on a single cfs_rq, 488 * and should not overflow as the number already hits PID_MAX_LIMIT. 489 * 490 * For all other cases (including 32-bit kernels), struct load_weight's 491 * weight will overflow first before we do, because: 492 * 493 * Max(load_avg) <= Max(load.weight) 494 * 495 * Then it is the load_weight's responsibility to consider overflow 496 * issues. 497 */ 498 struct sched_avg { 499 u64 last_update_time; 500 u64 load_sum; 501 u64 runnable_sum; 502 u32 util_sum; 503 u32 period_contrib; 504 unsigned long load_avg; 505 unsigned long runnable_avg; 506 unsigned long util_avg; 507 unsigned int util_est; 508 } ____cacheline_aligned; 509 510 /* 511 * The UTIL_AVG_UNCHANGED flag is used to synchronize util_est with util_avg 512 * updates. When a task is dequeued, its util_est should not be updated if its 513 * util_avg has not been updated in the meantime. 514 * This information is mapped into the MSB bit of util_est at dequeue time. 515 * Since max value of util_est for a task is 1024 (PELT util_avg for a task) 516 * it is safe to use MSB. 517 */ 518 #define UTIL_EST_WEIGHT_SHIFT 2 519 #define UTIL_AVG_UNCHANGED 0x80000000 520 521 struct sched_statistics { 522 #ifdef CONFIG_SCHEDSTATS 523 u64 wait_start; 524 u64 wait_max; 525 u64 wait_count; 526 u64 wait_sum; 527 u64 iowait_count; 528 u64 iowait_sum; 529 530 u64 sleep_start; 531 u64 sleep_max; 532 s64 sum_sleep_runtime; 533 534 u64 block_start; 535 u64 block_max; 536 s64 sum_block_runtime; 537 538 s64 exec_max; 539 u64 slice_max; 540 541 u64 nr_migrations_cold; 542 u64 nr_failed_migrations_affine; 543 u64 nr_failed_migrations_running; 544 u64 nr_failed_migrations_hot; 545 u64 nr_forced_migrations; 546 547 u64 nr_wakeups; 548 u64 nr_wakeups_sync; 549 u64 nr_wakeups_migrate; 550 u64 nr_wakeups_local; 551 u64 nr_wakeups_remote; 552 u64 nr_wakeups_affine; 553 u64 nr_wakeups_affine_attempts; 554 u64 nr_wakeups_passive; 555 u64 nr_wakeups_idle; 556 557 #ifdef CONFIG_SCHED_CORE 558 u64 core_forceidle_sum; 559 #endif 560 #endif /* CONFIG_SCHEDSTATS */ 561 } ____cacheline_aligned; 562 563 struct sched_entity { 564 /* For load-balancing: */ 565 struct load_weight load; 566 struct rb_node run_node; 567 u64 deadline; 568 u64 min_vruntime; 569 u64 min_slice; 570 571 struct list_head group_node; 572 unsigned char on_rq; 573 unsigned char sched_delayed; 574 unsigned char rel_deadline; 575 unsigned char custom_slice; 576 /* hole */ 577 578 u64 exec_start; 579 u64 sum_exec_runtime; 580 u64 prev_sum_exec_runtime; 581 u64 vruntime; 582 union { 583 /* 584 * When !@on_rq this field is vlag. 585 * When cfs_rq->curr == se (which implies @on_rq) 586 * this field is vprot. See protect_slice(). 587 */ 588 s64 vlag; 589 u64 vprot; 590 }; 591 u64 slice; 592 593 u64 nr_migrations; 594 595 #ifdef CONFIG_FAIR_GROUP_SCHED 596 int depth; 597 struct sched_entity *parent; 598 /* rq on which this entity is (to be) queued: */ 599 struct cfs_rq *cfs_rq; 600 /* rq "owned" by this entity/group: */ 601 struct cfs_rq *my_q; 602 /* cached value of my_q->h_nr_running */ 603 unsigned long runnable_weight; 604 #endif 605 606 /* 607 * Per entity load average tracking. 608 * 609 * Put into separate cache line so it does not 610 * collide with read-mostly values above. 611 */ 612 struct sched_avg avg; 613 }; 614 615 struct sched_rt_entity { 616 struct list_head run_list; 617 unsigned long timeout; 618 unsigned long watchdog_stamp; 619 unsigned int time_slice; 620 unsigned short on_rq; 621 unsigned short on_list; 622 623 struct sched_rt_entity *back; 624 #ifdef CONFIG_RT_GROUP_SCHED 625 struct sched_rt_entity *parent; 626 /* rq on which this entity is (to be) queued: */ 627 struct rt_rq *rt_rq; 628 /* rq "owned" by this entity/group: */ 629 struct rt_rq *my_q; 630 #endif 631 } __randomize_layout; 632 633 typedef bool (*dl_server_has_tasks_f)(struct sched_dl_entity *); 634 typedef struct task_struct *(*dl_server_pick_f)(struct sched_dl_entity *); 635 636 struct sched_dl_entity { 637 struct rb_node rb_node; 638 639 /* 640 * Original scheduling parameters. Copied here from sched_attr 641 * during sched_setattr(), they will remain the same until 642 * the next sched_setattr(). 643 */ 644 u64 dl_runtime; /* Maximum runtime for each instance */ 645 u64 dl_deadline; /* Relative deadline of each instance */ 646 u64 dl_period; /* Separation of two instances (period) */ 647 u64 dl_bw; /* dl_runtime / dl_period */ 648 u64 dl_density; /* dl_runtime / dl_deadline */ 649 650 /* 651 * Actual scheduling parameters. Initialized with the values above, 652 * they are continuously updated during task execution. Note that 653 * the remaining runtime could be < 0 in case we are in overrun. 654 */ 655 s64 runtime; /* Remaining runtime for this instance */ 656 u64 deadline; /* Absolute deadline for this instance */ 657 unsigned int flags; /* Specifying the scheduler behaviour */ 658 659 /* 660 * Some bool flags: 661 * 662 * @dl_throttled tells if we exhausted the runtime. If so, the 663 * task has to wait for a replenishment to be performed at the 664 * next firing of dl_timer. 665 * 666 * @dl_yielded tells if task gave up the CPU before consuming 667 * all its available runtime during the last job. 668 * 669 * @dl_non_contending tells if the task is inactive while still 670 * contributing to the active utilization. In other words, it 671 * indicates if the inactive timer has been armed and its handler 672 * has not been executed yet. This flag is useful to avoid race 673 * conditions between the inactive timer handler and the wakeup 674 * code. 675 * 676 * @dl_overrun tells if the task asked to be informed about runtime 677 * overruns. 678 * 679 * @dl_server tells if this is a server entity. 680 * 681 * @dl_defer tells if this is a deferred or regular server. For 682 * now only defer server exists. 683 * 684 * @dl_defer_armed tells if the deferrable server is waiting 685 * for the replenishment timer to activate it. 686 * 687 * @dl_server_active tells if the dlserver is active(started). 688 * dlserver is started on first cfs enqueue on an idle runqueue 689 * and is stopped when a dequeue results in 0 cfs tasks on the 690 * runqueue. In other words, dlserver is active only when cpu's 691 * runqueue has atleast one cfs task. 692 * 693 * @dl_defer_running tells if the deferrable server is actually 694 * running, skipping the defer phase. 695 */ 696 unsigned int dl_throttled : 1; 697 unsigned int dl_yielded : 1; 698 unsigned int dl_non_contending : 1; 699 unsigned int dl_overrun : 1; 700 unsigned int dl_server : 1; 701 unsigned int dl_server_active : 1; 702 unsigned int dl_defer : 1; 703 unsigned int dl_defer_armed : 1; 704 unsigned int dl_defer_running : 1; 705 unsigned int dl_server_idle : 1; 706 707 /* 708 * Bandwidth enforcement timer. Each -deadline task has its 709 * own bandwidth to be enforced, thus we need one timer per task. 710 */ 711 struct hrtimer dl_timer; 712 713 /* 714 * Inactive timer, responsible for decreasing the active utilization 715 * at the "0-lag time". When a -deadline task blocks, it contributes 716 * to GRUB's active utilization until the "0-lag time", hence a 717 * timer is needed to decrease the active utilization at the correct 718 * time. 719 */ 720 struct hrtimer inactive_timer; 721 722 /* 723 * Bits for DL-server functionality. Also see the comment near 724 * dl_server_update(). 725 * 726 * @rq the runqueue this server is for 727 * 728 * @server_has_tasks() returns true if @server_pick return a 729 * runnable task. 730 */ 731 struct rq *rq; 732 dl_server_has_tasks_f server_has_tasks; 733 dl_server_pick_f server_pick_task; 734 735 #ifdef CONFIG_RT_MUTEXES 736 /* 737 * Priority Inheritance. When a DEADLINE scheduling entity is boosted 738 * pi_se points to the donor, otherwise points to the dl_se it belongs 739 * to (the original one/itself). 740 */ 741 struct sched_dl_entity *pi_se; 742 #endif 743 }; 744 745 #ifdef CONFIG_UCLAMP_TASK 746 /* Number of utilization clamp buckets (shorter alias) */ 747 #define UCLAMP_BUCKETS CONFIG_UCLAMP_BUCKETS_COUNT 748 749 /* 750 * Utilization clamp for a scheduling entity 751 * @value: clamp value "assigned" to a se 752 * @bucket_id: bucket index corresponding to the "assigned" value 753 * @active: the se is currently refcounted in a rq's bucket 754 * @user_defined: the requested clamp value comes from user-space 755 * 756 * The bucket_id is the index of the clamp bucket matching the clamp value 757 * which is pre-computed and stored to avoid expensive integer divisions from 758 * the fast path. 759 * 760 * The active bit is set whenever a task has got an "effective" value assigned, 761 * which can be different from the clamp value "requested" from user-space. 762 * This allows to know a task is refcounted in the rq's bucket corresponding 763 * to the "effective" bucket_id. 764 * 765 * The user_defined bit is set whenever a task has got a task-specific clamp 766 * value requested from userspace, i.e. the system defaults apply to this task 767 * just as a restriction. This allows to relax default clamps when a less 768 * restrictive task-specific value has been requested, thus allowing to 769 * implement a "nice" semantic. For example, a task running with a 20% 770 * default boost can still drop its own boosting to 0%. 771 */ 772 struct uclamp_se { 773 unsigned int value : bits_per(SCHED_CAPACITY_SCALE); 774 unsigned int bucket_id : bits_per(UCLAMP_BUCKETS); 775 unsigned int active : 1; 776 unsigned int user_defined : 1; 777 }; 778 #endif /* CONFIG_UCLAMP_TASK */ 779 780 union rcu_special { 781 struct { 782 u8 blocked; 783 u8 need_qs; 784 u8 exp_hint; /* Hint for performance. */ 785 u8 need_mb; /* Readers need smp_mb(). */ 786 } b; /* Bits. */ 787 u32 s; /* Set of bits. */ 788 }; 789 790 enum perf_event_task_context { 791 perf_invalid_context = -1, 792 perf_hw_context = 0, 793 perf_sw_context, 794 perf_nr_task_contexts, 795 }; 796 797 /* 798 * Number of contexts where an event can trigger: 799 * task, softirq, hardirq, nmi. 800 */ 801 #define PERF_NR_CONTEXTS 4 802 803 struct wake_q_node { 804 struct wake_q_node *next; 805 }; 806 807 struct kmap_ctrl { 808 #ifdef CONFIG_KMAP_LOCAL 809 int idx; 810 pte_t pteval[KM_MAX_IDX]; 811 #endif 812 }; 813 814 struct task_struct { 815 #ifdef CONFIG_THREAD_INFO_IN_TASK 816 /* 817 * For reasons of header soup (see current_thread_info()), this 818 * must be the first element of task_struct. 819 */ 820 struct thread_info thread_info; 821 #endif 822 unsigned int __state; 823 824 /* saved state for "spinlock sleepers" */ 825 unsigned int saved_state; 826 827 /* 828 * This begins the randomizable portion of task_struct. Only 829 * scheduling-critical items should be added above here. 830 */ 831 randomized_struct_fields_start 832 833 void *stack; 834 refcount_t usage; 835 /* Per task flags (PF_*), defined further below: */ 836 unsigned int flags; 837 unsigned int ptrace; 838 839 #ifdef CONFIG_MEM_ALLOC_PROFILING 840 struct alloc_tag *alloc_tag; 841 #endif 842 843 int on_cpu; 844 struct __call_single_node wake_entry; 845 unsigned int wakee_flips; 846 unsigned long wakee_flip_decay_ts; 847 struct task_struct *last_wakee; 848 849 /* 850 * recent_used_cpu is initially set as the last CPU used by a task 851 * that wakes affine another task. Waker/wakee relationships can 852 * push tasks around a CPU where each wakeup moves to the next one. 853 * Tracking a recently used CPU allows a quick search for a recently 854 * used CPU that may be idle. 855 */ 856 int recent_used_cpu; 857 int wake_cpu; 858 int on_rq; 859 860 int prio; 861 int static_prio; 862 int normal_prio; 863 unsigned int rt_priority; 864 865 struct sched_entity se; 866 struct sched_rt_entity rt; 867 struct sched_dl_entity dl; 868 struct sched_dl_entity *dl_server; 869 #ifdef CONFIG_SCHED_CLASS_EXT 870 struct sched_ext_entity scx; 871 #endif 872 const struct sched_class *sched_class; 873 874 #ifdef CONFIG_SCHED_CORE 875 struct rb_node core_node; 876 unsigned long core_cookie; 877 unsigned int core_occupation; 878 #endif 879 880 #ifdef CONFIG_CGROUP_SCHED 881 struct task_group *sched_task_group; 882 #endif 883 884 885 #ifdef CONFIG_UCLAMP_TASK 886 /* 887 * Clamp values requested for a scheduling entity. 888 * Must be updated with task_rq_lock() held. 889 */ 890 struct uclamp_se uclamp_req[UCLAMP_CNT]; 891 /* 892 * Effective clamp values used for a scheduling entity. 893 * Must be updated with task_rq_lock() held. 894 */ 895 struct uclamp_se uclamp[UCLAMP_CNT]; 896 #endif 897 898 struct sched_statistics stats; 899 900 #ifdef CONFIG_PREEMPT_NOTIFIERS 901 /* List of struct preempt_notifier: */ 902 struct hlist_head preempt_notifiers; 903 #endif 904 905 #ifdef CONFIG_BLK_DEV_IO_TRACE 906 unsigned int btrace_seq; 907 #endif 908 909 unsigned int policy; 910 unsigned long max_allowed_capacity; 911 int nr_cpus_allowed; 912 const cpumask_t *cpus_ptr; 913 cpumask_t *user_cpus_ptr; 914 cpumask_t cpus_mask; 915 void *migration_pending; 916 unsigned short migration_disabled; 917 unsigned short migration_flags; 918 919 #ifdef CONFIG_PREEMPT_RCU 920 int rcu_read_lock_nesting; 921 union rcu_special rcu_read_unlock_special; 922 struct list_head rcu_node_entry; 923 struct rcu_node *rcu_blocked_node; 924 #endif /* #ifdef CONFIG_PREEMPT_RCU */ 925 926 #ifdef CONFIG_TASKS_RCU 927 unsigned long rcu_tasks_nvcsw; 928 u8 rcu_tasks_holdout; 929 u8 rcu_tasks_idx; 930 int rcu_tasks_idle_cpu; 931 struct list_head rcu_tasks_holdout_list; 932 int rcu_tasks_exit_cpu; 933 struct list_head rcu_tasks_exit_list; 934 #endif /* #ifdef CONFIG_TASKS_RCU */ 935 936 #ifdef CONFIG_TASKS_TRACE_RCU 937 int trc_reader_nesting; 938 int trc_ipi_to_cpu; 939 union rcu_special trc_reader_special; 940 struct list_head trc_holdout_list; 941 struct list_head trc_blkd_node; 942 int trc_blkd_cpu; 943 #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */ 944 945 struct sched_info sched_info; 946 947 struct list_head tasks; 948 struct plist_node pushable_tasks; 949 struct rb_node pushable_dl_tasks; 950 951 struct mm_struct *mm; 952 struct mm_struct *active_mm; 953 struct address_space *faults_disabled_mapping; 954 955 int exit_state; 956 int exit_code; 957 int exit_signal; 958 /* The signal sent when the parent dies: */ 959 int pdeath_signal; 960 /* JOBCTL_*, siglock protected: */ 961 unsigned long jobctl; 962 963 /* Used for emulating ABI behavior of previous Linux versions: */ 964 unsigned int personality; 965 966 /* Scheduler bits, serialized by scheduler locks: */ 967 unsigned sched_reset_on_fork:1; 968 unsigned sched_contributes_to_load:1; 969 unsigned sched_migrated:1; 970 unsigned sched_task_hot:1; 971 972 /* Force alignment to the next boundary: */ 973 unsigned :0; 974 975 /* Unserialized, strictly 'current' */ 976 977 /* 978 * This field must not be in the scheduler word above due to wakelist 979 * queueing no longer being serialized by p->on_cpu. However: 980 * 981 * p->XXX = X; ttwu() 982 * schedule() if (p->on_rq && ..) // false 983 * smp_mb__after_spinlock(); if (smp_load_acquire(&p->on_cpu) && //true 984 * deactivate_task() ttwu_queue_wakelist()) 985 * p->on_rq = 0; p->sched_remote_wakeup = Y; 986 * 987 * guarantees all stores of 'current' are visible before 988 * ->sched_remote_wakeup gets used, so it can be in this word. 989 */ 990 unsigned sched_remote_wakeup:1; 991 #ifdef CONFIG_RT_MUTEXES 992 unsigned sched_rt_mutex:1; 993 #endif 994 995 /* Bit to tell TOMOYO we're in execve(): */ 996 unsigned in_execve:1; 997 unsigned in_iowait:1; 998 #ifndef TIF_RESTORE_SIGMASK 999 unsigned restore_sigmask:1; 1000 #endif 1001 #ifdef CONFIG_MEMCG_V1 1002 unsigned in_user_fault:1; 1003 #endif 1004 #ifdef CONFIG_LRU_GEN 1005 /* whether the LRU algorithm may apply to this access */ 1006 unsigned in_lru_fault:1; 1007 #endif 1008 #ifdef CONFIG_COMPAT_BRK 1009 unsigned brk_randomized:1; 1010 #endif 1011 #ifdef CONFIG_CGROUPS 1012 /* disallow userland-initiated cgroup migration */ 1013 unsigned no_cgroup_migration:1; 1014 /* task is frozen/stopped (used by the cgroup freezer) */ 1015 unsigned frozen:1; 1016 #endif 1017 #ifdef CONFIG_BLK_CGROUP 1018 unsigned use_memdelay:1; 1019 #endif 1020 #ifdef CONFIG_PSI 1021 /* Stalled due to lack of memory */ 1022 unsigned in_memstall:1; 1023 #endif 1024 #ifdef CONFIG_PAGE_OWNER 1025 /* Used by page_owner=on to detect recursion in page tracking. */ 1026 unsigned in_page_owner:1; 1027 #endif 1028 #ifdef CONFIG_EVENTFD 1029 /* Recursion prevention for eventfd_signal() */ 1030 unsigned in_eventfd:1; 1031 #endif 1032 #ifdef CONFIG_ARCH_HAS_CPU_PASID 1033 unsigned pasid_activated:1; 1034 #endif 1035 #ifdef CONFIG_X86_BUS_LOCK_DETECT 1036 unsigned reported_split_lock:1; 1037 #endif 1038 #ifdef CONFIG_TASK_DELAY_ACCT 1039 /* delay due to memory thrashing */ 1040 unsigned in_thrashing:1; 1041 #endif 1042 unsigned in_nf_duplicate:1; 1043 #ifdef CONFIG_PREEMPT_RT 1044 struct netdev_xmit net_xmit; 1045 #endif 1046 unsigned long atomic_flags; /* Flags requiring atomic access. */ 1047 1048 struct restart_block restart_block; 1049 1050 pid_t pid; 1051 pid_t tgid; 1052 1053 #ifdef CONFIG_STACKPROTECTOR 1054 /* Canary value for the -fstack-protector GCC feature: */ 1055 unsigned long stack_canary; 1056 #endif 1057 /* 1058 * Pointers to the (original) parent process, youngest child, younger sibling, 1059 * older sibling, respectively. (p->father can be replaced with 1060 * p->real_parent->pid) 1061 */ 1062 1063 /* Real parent process: */ 1064 struct task_struct __rcu *real_parent; 1065 1066 /* Recipient of SIGCHLD, wait4() reports: */ 1067 struct task_struct __rcu *parent; 1068 1069 /* 1070 * Children/sibling form the list of natural children: 1071 */ 1072 struct list_head children; 1073 struct list_head sibling; 1074 struct task_struct *group_leader; 1075 1076 /* 1077 * 'ptraced' is the list of tasks this task is using ptrace() on. 1078 * 1079 * This includes both natural children and PTRACE_ATTACH targets. 1080 * 'ptrace_entry' is this task's link on the p->parent->ptraced list. 1081 */ 1082 struct list_head ptraced; 1083 struct list_head ptrace_entry; 1084 1085 /* PID/PID hash table linkage. */ 1086 struct pid *thread_pid; 1087 struct hlist_node pid_links[PIDTYPE_MAX]; 1088 struct list_head thread_node; 1089 1090 struct completion *vfork_done; 1091 1092 /* CLONE_CHILD_SETTID: */ 1093 int __user *set_child_tid; 1094 1095 /* CLONE_CHILD_CLEARTID: */ 1096 int __user *clear_child_tid; 1097 1098 /* PF_KTHREAD | PF_IO_WORKER */ 1099 void *worker_private; 1100 1101 u64 utime; 1102 u64 stime; 1103 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME 1104 u64 utimescaled; 1105 u64 stimescaled; 1106 #endif 1107 u64 gtime; 1108 struct prev_cputime prev_cputime; 1109 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN 1110 struct vtime vtime; 1111 #endif 1112 1113 #ifdef CONFIG_NO_HZ_FULL 1114 atomic_t tick_dep_mask; 1115 #endif 1116 /* Context switch counts: */ 1117 unsigned long nvcsw; 1118 unsigned long nivcsw; 1119 1120 /* Monotonic time in nsecs: */ 1121 u64 start_time; 1122 1123 /* Boot based time in nsecs: */ 1124 u64 start_boottime; 1125 1126 /* MM fault and swap info: this can arguably be seen as either mm-specific or thread-specific: */ 1127 unsigned long min_flt; 1128 unsigned long maj_flt; 1129 1130 /* Empty if CONFIG_POSIX_CPUTIMERS=n */ 1131 struct posix_cputimers posix_cputimers; 1132 1133 #ifdef CONFIG_POSIX_CPU_TIMERS_TASK_WORK 1134 struct posix_cputimers_work posix_cputimers_work; 1135 #endif 1136 1137 /* Process credentials: */ 1138 1139 /* Tracer's credentials at attach: */ 1140 const struct cred __rcu *ptracer_cred; 1141 1142 /* Objective and real subjective task credentials (COW): */ 1143 const struct cred __rcu *real_cred; 1144 1145 /* Effective (overridable) subjective task credentials (COW): */ 1146 const struct cred __rcu *cred; 1147 1148 #ifdef CONFIG_KEYS 1149 /* Cached requested key. */ 1150 struct key *cached_requested_key; 1151 #endif 1152 1153 /* 1154 * executable name, excluding path. 1155 * 1156 * - normally initialized begin_new_exec() 1157 * - set it with set_task_comm() 1158 * - strscpy_pad() to ensure it is always NUL-terminated and 1159 * zero-padded 1160 * - task_lock() to ensure the operation is atomic and the name is 1161 * fully updated. 1162 */ 1163 char comm[TASK_COMM_LEN]; 1164 1165 struct nameidata *nameidata; 1166 1167 #ifdef CONFIG_SYSVIPC 1168 struct sysv_sem sysvsem; 1169 struct sysv_shm sysvshm; 1170 #endif 1171 #ifdef CONFIG_DETECT_HUNG_TASK 1172 unsigned long last_switch_count; 1173 unsigned long last_switch_time; 1174 #endif 1175 /* Filesystem information: */ 1176 struct fs_struct *fs; 1177 1178 /* Open file information: */ 1179 struct files_struct *files; 1180 1181 #ifdef CONFIG_IO_URING 1182 struct io_uring_task *io_uring; 1183 #endif 1184 1185 /* Namespaces: */ 1186 struct nsproxy *nsproxy; 1187 1188 /* Signal handlers: */ 1189 struct signal_struct *signal; 1190 struct sighand_struct __rcu *sighand; 1191 sigset_t blocked; 1192 sigset_t real_blocked; 1193 /* Restored if set_restore_sigmask() was used: */ 1194 sigset_t saved_sigmask; 1195 struct sigpending pending; 1196 unsigned long sas_ss_sp; 1197 size_t sas_ss_size; 1198 unsigned int sas_ss_flags; 1199 1200 struct callback_head *task_works; 1201 1202 #ifdef CONFIG_AUDIT 1203 #ifdef CONFIG_AUDITSYSCALL 1204 struct audit_context *audit_context; 1205 #endif 1206 kuid_t loginuid; 1207 unsigned int sessionid; 1208 #endif 1209 struct seccomp seccomp; 1210 struct syscall_user_dispatch syscall_dispatch; 1211 1212 /* Thread group tracking: */ 1213 u64 parent_exec_id; 1214 u64 self_exec_id; 1215 1216 /* Protection against (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, mempolicy: */ 1217 spinlock_t alloc_lock; 1218 1219 /* Protection of the PI data structures: */ 1220 raw_spinlock_t pi_lock; 1221 1222 struct wake_q_node wake_q; 1223 1224 #ifdef CONFIG_RT_MUTEXES 1225 /* PI waiters blocked on a rt_mutex held by this task: */ 1226 struct rb_root_cached pi_waiters; 1227 /* Updated under owner's pi_lock and rq lock */ 1228 struct task_struct *pi_top_task; 1229 /* Deadlock detection and priority inheritance handling: */ 1230 struct rt_mutex_waiter *pi_blocked_on; 1231 #endif 1232 1233 #ifdef CONFIG_DEBUG_MUTEXES 1234 /* Mutex deadlock detection: */ 1235 struct mutex_waiter *blocked_on; 1236 #endif 1237 1238 #ifdef CONFIG_DETECT_HUNG_TASK_BLOCKER 1239 /* 1240 * Encoded lock address causing task block (lower 2 bits = type from 1241 * <linux/hung_task.h>). Accessed via hung_task_*() helpers. 1242 */ 1243 unsigned long blocker; 1244 #endif 1245 1246 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP 1247 int non_block_count; 1248 #endif 1249 1250 #ifdef CONFIG_TRACE_IRQFLAGS 1251 struct irqtrace_events irqtrace; 1252 unsigned int hardirq_threaded; 1253 u64 hardirq_chain_key; 1254 int softirqs_enabled; 1255 int softirq_context; 1256 int irq_config; 1257 #endif 1258 #ifdef CONFIG_PREEMPT_RT 1259 int softirq_disable_cnt; 1260 #endif 1261 1262 #ifdef CONFIG_LOCKDEP 1263 # define MAX_LOCK_DEPTH 48UL 1264 u64 curr_chain_key; 1265 int lockdep_depth; 1266 unsigned int lockdep_recursion; 1267 struct held_lock held_locks[MAX_LOCK_DEPTH]; 1268 #endif 1269 1270 #if defined(CONFIG_UBSAN) && !defined(CONFIG_UBSAN_TRAP) 1271 unsigned int in_ubsan; 1272 #endif 1273 1274 /* Journalling filesystem info: */ 1275 void *journal_info; 1276 1277 /* Stacked block device info: */ 1278 struct bio_list *bio_list; 1279 1280 /* Stack plugging: */ 1281 struct blk_plug *plug; 1282 1283 /* VM state: */ 1284 struct reclaim_state *reclaim_state; 1285 1286 struct io_context *io_context; 1287 1288 #ifdef CONFIG_COMPACTION 1289 struct capture_control *capture_control; 1290 #endif 1291 /* Ptrace state: */ 1292 unsigned long ptrace_message; 1293 kernel_siginfo_t *last_siginfo; 1294 1295 struct task_io_accounting ioac; 1296 #ifdef CONFIG_PSI 1297 /* Pressure stall state */ 1298 unsigned int psi_flags; 1299 #endif 1300 #ifdef CONFIG_TASK_XACCT 1301 /* Accumulated RSS usage: */ 1302 u64 acct_rss_mem1; 1303 /* Accumulated virtual memory usage: */ 1304 u64 acct_vm_mem1; 1305 /* stime + utime since last update: */ 1306 u64 acct_timexpd; 1307 #endif 1308 #ifdef CONFIG_CPUSETS 1309 /* Protected by ->alloc_lock: */ 1310 nodemask_t mems_allowed; 1311 /* Sequence number to catch updates: */ 1312 seqcount_spinlock_t mems_allowed_seq; 1313 int cpuset_mem_spread_rotor; 1314 #endif 1315 #ifdef CONFIG_CGROUPS 1316 /* Control Group info protected by css_set_lock: */ 1317 struct css_set __rcu *cgroups; 1318 /* cg_list protected by css_set_lock and tsk->alloc_lock: */ 1319 struct list_head cg_list; 1320 #endif 1321 #ifdef CONFIG_X86_CPU_RESCTRL 1322 u32 closid; 1323 u32 rmid; 1324 #endif 1325 #ifdef CONFIG_FUTEX 1326 struct robust_list_head __user *robust_list; 1327 #ifdef CONFIG_COMPAT 1328 struct compat_robust_list_head __user *compat_robust_list; 1329 #endif 1330 struct list_head pi_state_list; 1331 struct futex_pi_state *pi_state_cache; 1332 struct mutex futex_exit_mutex; 1333 unsigned int futex_state; 1334 #endif 1335 #ifdef CONFIG_PERF_EVENTS 1336 u8 perf_recursion[PERF_NR_CONTEXTS]; 1337 struct perf_event_context *perf_event_ctxp; 1338 struct mutex perf_event_mutex; 1339 struct list_head perf_event_list; 1340 struct perf_ctx_data __rcu *perf_ctx_data; 1341 #endif 1342 #ifdef CONFIG_DEBUG_PREEMPT 1343 unsigned long preempt_disable_ip; 1344 #endif 1345 #ifdef CONFIG_NUMA 1346 /* Protected by alloc_lock: */ 1347 struct mempolicy *mempolicy; 1348 short il_prev; 1349 u8 il_weight; 1350 short pref_node_fork; 1351 #endif 1352 #ifdef CONFIG_NUMA_BALANCING 1353 int numa_scan_seq; 1354 unsigned int numa_scan_period; 1355 unsigned int numa_scan_period_max; 1356 int numa_preferred_nid; 1357 unsigned long numa_migrate_retry; 1358 /* Migration stamp: */ 1359 u64 node_stamp; 1360 u64 last_task_numa_placement; 1361 u64 last_sum_exec_runtime; 1362 struct callback_head numa_work; 1363 1364 /* 1365 * This pointer is only modified for current in syscall and 1366 * pagefault context (and for tasks being destroyed), so it can be read 1367 * from any of the following contexts: 1368 * - RCU read-side critical section 1369 * - current->numa_group from everywhere 1370 * - task's runqueue locked, task not running 1371 */ 1372 struct numa_group __rcu *numa_group; 1373 1374 /* 1375 * numa_faults is an array split into four regions: 1376 * faults_memory, faults_cpu, faults_memory_buffer, faults_cpu_buffer 1377 * in this precise order. 1378 * 1379 * faults_memory: Exponential decaying average of faults on a per-node 1380 * basis. Scheduling placement decisions are made based on these 1381 * counts. The values remain static for the duration of a PTE scan. 1382 * faults_cpu: Track the nodes the process was running on when a NUMA 1383 * hinting fault was incurred. 1384 * faults_memory_buffer and faults_cpu_buffer: Record faults per node 1385 * during the current scan window. When the scan completes, the counts 1386 * in faults_memory and faults_cpu decay and these values are copied. 1387 */ 1388 unsigned long *numa_faults; 1389 unsigned long total_numa_faults; 1390 1391 /* 1392 * numa_faults_locality tracks if faults recorded during the last 1393 * scan window were remote/local or failed to migrate. The task scan 1394 * period is adapted based on the locality of the faults with different 1395 * weights depending on whether they were shared or private faults 1396 */ 1397 unsigned long numa_faults_locality[3]; 1398 1399 unsigned long numa_pages_migrated; 1400 #endif /* CONFIG_NUMA_BALANCING */ 1401 1402 #ifdef CONFIG_RSEQ 1403 struct rseq __user *rseq; 1404 u32 rseq_len; 1405 u32 rseq_sig; 1406 /* 1407 * RmW on rseq_event_mask must be performed atomically 1408 * with respect to preemption. 1409 */ 1410 unsigned long rseq_event_mask; 1411 # ifdef CONFIG_DEBUG_RSEQ 1412 /* 1413 * This is a place holder to save a copy of the rseq fields for 1414 * validation of read-only fields. The struct rseq has a 1415 * variable-length array at the end, so it cannot be used 1416 * directly. Reserve a size large enough for the known fields. 1417 */ 1418 char rseq_fields[sizeof(struct rseq)]; 1419 # endif 1420 #endif 1421 1422 #ifdef CONFIG_SCHED_MM_CID 1423 int mm_cid; /* Current cid in mm */ 1424 int last_mm_cid; /* Most recent cid in mm */ 1425 int migrate_from_cpu; 1426 int mm_cid_active; /* Whether cid bitmap is active */ 1427 struct callback_head cid_work; 1428 #endif 1429 1430 struct tlbflush_unmap_batch tlb_ubc; 1431 1432 /* Cache last used pipe for splice(): */ 1433 struct pipe_inode_info *splice_pipe; 1434 1435 struct page_frag task_frag; 1436 1437 #ifdef CONFIG_TASK_DELAY_ACCT 1438 struct task_delay_info *delays; 1439 #endif 1440 1441 #ifdef CONFIG_FAULT_INJECTION 1442 int make_it_fail; 1443 unsigned int fail_nth; 1444 #endif 1445 /* 1446 * When (nr_dirtied >= nr_dirtied_pause), it's time to call 1447 * balance_dirty_pages() for a dirty throttling pause: 1448 */ 1449 int nr_dirtied; 1450 int nr_dirtied_pause; 1451 /* Start of a write-and-pause period: */ 1452 unsigned long dirty_paused_when; 1453 1454 #ifdef CONFIG_LATENCYTOP 1455 int latency_record_count; 1456 struct latency_record latency_record[LT_SAVECOUNT]; 1457 #endif 1458 /* 1459 * Time slack values; these are used to round up poll() and 1460 * select() etc timeout values. These are in nanoseconds. 1461 */ 1462 u64 timer_slack_ns; 1463 u64 default_timer_slack_ns; 1464 1465 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) 1466 unsigned int kasan_depth; 1467 #endif 1468 1469 #ifdef CONFIG_KCSAN 1470 struct kcsan_ctx kcsan_ctx; 1471 #ifdef CONFIG_TRACE_IRQFLAGS 1472 struct irqtrace_events kcsan_save_irqtrace; 1473 #endif 1474 #ifdef CONFIG_KCSAN_WEAK_MEMORY 1475 int kcsan_stack_depth; 1476 #endif 1477 #endif 1478 1479 #ifdef CONFIG_KMSAN 1480 struct kmsan_ctx kmsan_ctx; 1481 #endif 1482 1483 #if IS_ENABLED(CONFIG_KUNIT) 1484 struct kunit *kunit_test; 1485 #endif 1486 1487 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 1488 /* Index of current stored address in ret_stack: */ 1489 int curr_ret_stack; 1490 int curr_ret_depth; 1491 1492 /* Stack of return addresses for return function tracing: */ 1493 unsigned long *ret_stack; 1494 1495 /* Timestamp for last schedule: */ 1496 unsigned long long ftrace_timestamp; 1497 unsigned long long ftrace_sleeptime; 1498 1499 /* 1500 * Number of functions that haven't been traced 1501 * because of depth overrun: 1502 */ 1503 atomic_t trace_overrun; 1504 1505 /* Pause tracing: */ 1506 atomic_t tracing_graph_pause; 1507 #endif 1508 1509 #ifdef CONFIG_TRACING 1510 /* Bitmask and counter of trace recursion: */ 1511 unsigned long trace_recursion; 1512 #endif /* CONFIG_TRACING */ 1513 1514 #ifdef CONFIG_KCOV 1515 /* See kernel/kcov.c for more details. */ 1516 1517 /* Coverage collection mode enabled for this task (0 if disabled): */ 1518 unsigned int kcov_mode; 1519 1520 /* Size of the kcov_area: */ 1521 unsigned int kcov_size; 1522 1523 /* Buffer for coverage collection: */ 1524 void *kcov_area; 1525 1526 /* KCOV descriptor wired with this task or NULL: */ 1527 struct kcov *kcov; 1528 1529 /* KCOV common handle for remote coverage collection: */ 1530 u64 kcov_handle; 1531 1532 /* KCOV sequence number: */ 1533 int kcov_sequence; 1534 1535 /* Collect coverage from softirq context: */ 1536 unsigned int kcov_softirq; 1537 #endif 1538 1539 #ifdef CONFIG_MEMCG_V1 1540 struct mem_cgroup *memcg_in_oom; 1541 #endif 1542 1543 #ifdef CONFIG_MEMCG 1544 /* Number of pages to reclaim on returning to userland: */ 1545 unsigned int memcg_nr_pages_over_high; 1546 1547 /* Used by memcontrol for targeted memcg charge: */ 1548 struct mem_cgroup *active_memcg; 1549 1550 /* Cache for current->cgroups->memcg->objcg lookups: */ 1551 struct obj_cgroup *objcg; 1552 #endif 1553 1554 #ifdef CONFIG_BLK_CGROUP 1555 struct gendisk *throttle_disk; 1556 #endif 1557 1558 #ifdef CONFIG_UPROBES 1559 struct uprobe_task *utask; 1560 #endif 1561 #if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE) 1562 unsigned int sequential_io; 1563 unsigned int sequential_io_avg; 1564 #endif 1565 struct kmap_ctrl kmap_ctrl; 1566 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP 1567 unsigned long task_state_change; 1568 # ifdef CONFIG_PREEMPT_RT 1569 unsigned long saved_state_change; 1570 # endif 1571 #endif 1572 struct rcu_head rcu; 1573 refcount_t rcu_users; 1574 int pagefault_disabled; 1575 #ifdef CONFIG_MMU 1576 struct task_struct *oom_reaper_list; 1577 struct timer_list oom_reaper_timer; 1578 #endif 1579 #ifdef CONFIG_VMAP_STACK 1580 struct vm_struct *stack_vm_area; 1581 #endif 1582 #ifdef CONFIG_THREAD_INFO_IN_TASK 1583 /* A live task holds one reference: */ 1584 refcount_t stack_refcount; 1585 #endif 1586 #ifdef CONFIG_LIVEPATCH 1587 int patch_state; 1588 #endif 1589 #ifdef CONFIG_SECURITY 1590 /* Used by LSM modules for access restriction: */ 1591 void *security; 1592 #endif 1593 #ifdef CONFIG_BPF_SYSCALL 1594 /* Used by BPF task local storage */ 1595 struct bpf_local_storage __rcu *bpf_storage; 1596 /* Used for BPF run context */ 1597 struct bpf_run_ctx *bpf_ctx; 1598 #endif 1599 /* Used by BPF for per-TASK xdp storage */ 1600 struct bpf_net_context *bpf_net_context; 1601 1602 #ifdef CONFIG_GCC_PLUGIN_STACKLEAK 1603 unsigned long lowest_stack; 1604 unsigned long prev_lowest_stack; 1605 #endif 1606 1607 #ifdef CONFIG_X86_MCE 1608 void __user *mce_vaddr; 1609 __u64 mce_kflags; 1610 u64 mce_addr; 1611 __u64 mce_ripv : 1, 1612 mce_whole_page : 1, 1613 __mce_reserved : 62; 1614 struct callback_head mce_kill_me; 1615 int mce_count; 1616 #endif 1617 1618 #ifdef CONFIG_KRETPROBES 1619 struct llist_head kretprobe_instances; 1620 #endif 1621 #ifdef CONFIG_RETHOOK 1622 struct llist_head rethooks; 1623 #endif 1624 1625 #ifdef CONFIG_ARCH_HAS_PARANOID_L1D_FLUSH 1626 /* 1627 * If L1D flush is supported on mm context switch 1628 * then we use this callback head to queue kill work 1629 * to kill tasks that are not running on SMT disabled 1630 * cores 1631 */ 1632 struct callback_head l1d_flush_kill; 1633 #endif 1634 1635 #ifdef CONFIG_RV 1636 /* 1637 * Per-task RV monitor. Nowadays fixed in RV_PER_TASK_MONITORS. 1638 * If we find justification for more monitors, we can think 1639 * about adding more or developing a dynamic method. So far, 1640 * none of these are justified. 1641 */ 1642 union rv_task_monitor rv[RV_PER_TASK_MONITORS]; 1643 #endif 1644 1645 #ifdef CONFIG_USER_EVENTS 1646 struct user_event_mm *user_event_mm; 1647 #endif 1648 1649 /* CPU-specific state of this task: */ 1650 struct thread_struct thread; 1651 1652 /* 1653 * New fields for task_struct should be added above here, so that 1654 * they are included in the randomized portion of task_struct. 1655 */ 1656 randomized_struct_fields_end 1657 } __attribute__ ((aligned (64))); 1658 1659 #define TASK_REPORT_IDLE (TASK_REPORT + 1) 1660 #define TASK_REPORT_MAX (TASK_REPORT_IDLE << 1) 1661 1662 static inline unsigned int __task_state_index(unsigned int tsk_state, 1663 unsigned int tsk_exit_state) 1664 { 1665 unsigned int state = (tsk_state | tsk_exit_state) & TASK_REPORT; 1666 1667 BUILD_BUG_ON_NOT_POWER_OF_2(TASK_REPORT_MAX); 1668 1669 if ((tsk_state & TASK_IDLE) == TASK_IDLE) 1670 state = TASK_REPORT_IDLE; 1671 1672 /* 1673 * We're lying here, but rather than expose a completely new task state 1674 * to userspace, we can make this appear as if the task has gone through 1675 * a regular rt_mutex_lock() call. 1676 * Report frozen tasks as uninterruptible. 1677 */ 1678 if ((tsk_state & TASK_RTLOCK_WAIT) || (tsk_state & TASK_FROZEN)) 1679 state = TASK_UNINTERRUPTIBLE; 1680 1681 return fls(state); 1682 } 1683 1684 static inline unsigned int task_state_index(struct task_struct *tsk) 1685 { 1686 return __task_state_index(READ_ONCE(tsk->__state), tsk->exit_state); 1687 } 1688 1689 static inline char task_index_to_char(unsigned int state) 1690 { 1691 static const char state_char[] = "RSDTtXZPI"; 1692 1693 BUILD_BUG_ON(TASK_REPORT_MAX * 2 != 1 << (sizeof(state_char) - 1)); 1694 1695 return state_char[state]; 1696 } 1697 1698 static inline char task_state_to_char(struct task_struct *tsk) 1699 { 1700 return task_index_to_char(task_state_index(tsk)); 1701 } 1702 1703 extern struct pid *cad_pid; 1704 1705 /* 1706 * Per process flags 1707 */ 1708 #define PF_VCPU 0x00000001 /* I'm a virtual CPU */ 1709 #define PF_IDLE 0x00000002 /* I am an IDLE thread */ 1710 #define PF_EXITING 0x00000004 /* Getting shut down */ 1711 #define PF_POSTCOREDUMP 0x00000008 /* Coredumps should ignore this task */ 1712 #define PF_IO_WORKER 0x00000010 /* Task is an IO worker */ 1713 #define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */ 1714 #define PF_FORKNOEXEC 0x00000040 /* Forked but didn't exec */ 1715 #define PF_MCE_PROCESS 0x00000080 /* Process policy on mce errors */ 1716 #define PF_SUPERPRIV 0x00000100 /* Used super-user privileges */ 1717 #define PF_DUMPCORE 0x00000200 /* Dumped core */ 1718 #define PF_SIGNALED 0x00000400 /* Killed by a signal */ 1719 #define PF_MEMALLOC 0x00000800 /* Allocating memory to free memory. See memalloc_noreclaim_save() */ 1720 #define PF_NPROC_EXCEEDED 0x00001000 /* set_user() noticed that RLIMIT_NPROC was exceeded */ 1721 #define PF_USED_MATH 0x00002000 /* If unset the fpu must be initialized before use */ 1722 #define PF_USER_WORKER 0x00004000 /* Kernel thread cloned from userspace thread */ 1723 #define PF_NOFREEZE 0x00008000 /* This thread should not be frozen */ 1724 #define PF_KCOMPACTD 0x00010000 /* I am kcompactd */ 1725 #define PF_KSWAPD 0x00020000 /* I am kswapd */ 1726 #define PF_MEMALLOC_NOFS 0x00040000 /* All allocations inherit GFP_NOFS. See memalloc_nfs_save() */ 1727 #define PF_MEMALLOC_NOIO 0x00080000 /* All allocations inherit GFP_NOIO. See memalloc_noio_save() */ 1728 #define PF_LOCAL_THROTTLE 0x00100000 /* Throttle writes only against the bdi I write to, 1729 * I am cleaning dirty pages from some other bdi. */ 1730 #define PF_KTHREAD 0x00200000 /* I am a kernel thread */ 1731 #define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */ 1732 #define PF__HOLE__00800000 0x00800000 1733 #define PF__HOLE__01000000 0x01000000 1734 #define PF__HOLE__02000000 0x02000000 1735 #define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_mask */ 1736 #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ 1737 #define PF_MEMALLOC_PIN 0x10000000 /* Allocations constrained to zones which allow long term pinning. 1738 * See memalloc_pin_save() */ 1739 #define PF_BLOCK_TS 0x20000000 /* plug has ts that needs updating */ 1740 #define PF__HOLE__40000000 0x40000000 1741 #define PF_SUSPEND_TASK 0x80000000 /* This thread called freeze_processes() and should not be frozen */ 1742 1743 /* 1744 * Only the _current_ task can read/write to tsk->flags, but other 1745 * tasks can access tsk->flags in readonly mode for example 1746 * with tsk_used_math (like during threaded core dumping). 1747 * There is however an exception to this rule during ptrace 1748 * or during fork: the ptracer task is allowed to write to the 1749 * child->flags of its traced child (same goes for fork, the parent 1750 * can write to the child->flags), because we're guaranteed the 1751 * child is not running and in turn not changing child->flags 1752 * at the same time the parent does it. 1753 */ 1754 #define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0) 1755 #define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0) 1756 #define clear_used_math() clear_stopped_child_used_math(current) 1757 #define set_used_math() set_stopped_child_used_math(current) 1758 1759 #define conditional_stopped_child_used_math(condition, child) \ 1760 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0) 1761 1762 #define conditional_used_math(condition) conditional_stopped_child_used_math(condition, current) 1763 1764 #define copy_to_stopped_child_used_math(child) \ 1765 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0) 1766 1767 /* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */ 1768 #define tsk_used_math(p) ((p)->flags & PF_USED_MATH) 1769 #define used_math() tsk_used_math(current) 1770 1771 static __always_inline bool is_percpu_thread(void) 1772 { 1773 return (current->flags & PF_NO_SETAFFINITY) && 1774 (current->nr_cpus_allowed == 1); 1775 } 1776 1777 /* Per-process atomic flags. */ 1778 #define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */ 1779 #define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */ 1780 #define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */ 1781 #define PFA_SPEC_SSB_DISABLE 3 /* Speculative Store Bypass disabled */ 1782 #define PFA_SPEC_SSB_FORCE_DISABLE 4 /* Speculative Store Bypass force disabled*/ 1783 #define PFA_SPEC_IB_DISABLE 5 /* Indirect branch speculation restricted */ 1784 #define PFA_SPEC_IB_FORCE_DISABLE 6 /* Indirect branch speculation permanently restricted */ 1785 #define PFA_SPEC_SSB_NOEXEC 7 /* Speculative Store Bypass clear on execve() */ 1786 1787 #define TASK_PFA_TEST(name, func) \ 1788 static inline bool task_##func(struct task_struct *p) \ 1789 { return test_bit(PFA_##name, &p->atomic_flags); } 1790 1791 #define TASK_PFA_SET(name, func) \ 1792 static inline void task_set_##func(struct task_struct *p) \ 1793 { set_bit(PFA_##name, &p->atomic_flags); } 1794 1795 #define TASK_PFA_CLEAR(name, func) \ 1796 static inline void task_clear_##func(struct task_struct *p) \ 1797 { clear_bit(PFA_##name, &p->atomic_flags); } 1798 1799 TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs) 1800 TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs) 1801 1802 TASK_PFA_TEST(SPREAD_PAGE, spread_page) 1803 TASK_PFA_SET(SPREAD_PAGE, spread_page) 1804 TASK_PFA_CLEAR(SPREAD_PAGE, spread_page) 1805 1806 TASK_PFA_TEST(SPREAD_SLAB, spread_slab) 1807 TASK_PFA_SET(SPREAD_SLAB, spread_slab) 1808 TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab) 1809 1810 TASK_PFA_TEST(SPEC_SSB_DISABLE, spec_ssb_disable) 1811 TASK_PFA_SET(SPEC_SSB_DISABLE, spec_ssb_disable) 1812 TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable) 1813 1814 TASK_PFA_TEST(SPEC_SSB_NOEXEC, spec_ssb_noexec) 1815 TASK_PFA_SET(SPEC_SSB_NOEXEC, spec_ssb_noexec) 1816 TASK_PFA_CLEAR(SPEC_SSB_NOEXEC, spec_ssb_noexec) 1817 1818 TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable) 1819 TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable) 1820 1821 TASK_PFA_TEST(SPEC_IB_DISABLE, spec_ib_disable) 1822 TASK_PFA_SET(SPEC_IB_DISABLE, spec_ib_disable) 1823 TASK_PFA_CLEAR(SPEC_IB_DISABLE, spec_ib_disable) 1824 1825 TASK_PFA_TEST(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable) 1826 TASK_PFA_SET(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable) 1827 1828 static inline void 1829 current_restore_flags(unsigned long orig_flags, unsigned long flags) 1830 { 1831 current->flags &= ~flags; 1832 current->flags |= orig_flags & flags; 1833 } 1834 1835 extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial); 1836 extern int task_can_attach(struct task_struct *p); 1837 extern int dl_bw_alloc(int cpu, u64 dl_bw); 1838 extern void dl_bw_free(int cpu, u64 dl_bw); 1839 1840 /* do_set_cpus_allowed() - consider using set_cpus_allowed_ptr() instead */ 1841 extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask); 1842 1843 /** 1844 * set_cpus_allowed_ptr - set CPU affinity mask of a task 1845 * @p: the task 1846 * @new_mask: CPU affinity mask 1847 * 1848 * Return: zero if successful, or a negative error code 1849 */ 1850 extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask); 1851 extern int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, int node); 1852 extern void release_user_cpus_ptr(struct task_struct *p); 1853 extern int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask); 1854 extern void force_compatible_cpus_allowed_ptr(struct task_struct *p); 1855 extern void relax_compatible_cpus_allowed_ptr(struct task_struct *p); 1856 1857 extern int yield_to(struct task_struct *p, bool preempt); 1858 extern void set_user_nice(struct task_struct *p, long nice); 1859 extern int task_prio(const struct task_struct *p); 1860 1861 /** 1862 * task_nice - return the nice value of a given task. 1863 * @p: the task in question. 1864 * 1865 * Return: The nice value [ -20 ... 0 ... 19 ]. 1866 */ 1867 static inline int task_nice(const struct task_struct *p) 1868 { 1869 return PRIO_TO_NICE((p)->static_prio); 1870 } 1871 1872 extern int can_nice(const struct task_struct *p, const int nice); 1873 extern int task_curr(const struct task_struct *p); 1874 extern int idle_cpu(int cpu); 1875 extern int available_idle_cpu(int cpu); 1876 extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *); 1877 extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *); 1878 extern void sched_set_fifo(struct task_struct *p); 1879 extern void sched_set_fifo_low(struct task_struct *p); 1880 extern void sched_set_normal(struct task_struct *p, int nice); 1881 extern int sched_setattr(struct task_struct *, const struct sched_attr *); 1882 extern int sched_setattr_nocheck(struct task_struct *, const struct sched_attr *); 1883 extern struct task_struct *idle_task(int cpu); 1884 1885 /** 1886 * is_idle_task - is the specified task an idle task? 1887 * @p: the task in question. 1888 * 1889 * Return: 1 if @p is an idle task. 0 otherwise. 1890 */ 1891 static __always_inline bool is_idle_task(const struct task_struct *p) 1892 { 1893 return !!(p->flags & PF_IDLE); 1894 } 1895 1896 extern struct task_struct *curr_task(int cpu); 1897 extern void ia64_set_curr_task(int cpu, struct task_struct *p); 1898 1899 void yield(void); 1900 1901 union thread_union { 1902 struct task_struct task; 1903 #ifndef CONFIG_THREAD_INFO_IN_TASK 1904 struct thread_info thread_info; 1905 #endif 1906 unsigned long stack[THREAD_SIZE/sizeof(long)]; 1907 }; 1908 1909 #ifndef CONFIG_THREAD_INFO_IN_TASK 1910 extern struct thread_info init_thread_info; 1911 #endif 1912 1913 extern unsigned long init_stack[THREAD_SIZE / sizeof(unsigned long)]; 1914 1915 #ifdef CONFIG_THREAD_INFO_IN_TASK 1916 # define task_thread_info(task) (&(task)->thread_info) 1917 #else 1918 # define task_thread_info(task) ((struct thread_info *)(task)->stack) 1919 #endif 1920 1921 /* 1922 * find a task by one of its numerical ids 1923 * 1924 * find_task_by_pid_ns(): 1925 * finds a task by its pid in the specified namespace 1926 * find_task_by_vpid(): 1927 * finds a task by its virtual pid 1928 * 1929 * see also find_vpid() etc in include/linux/pid.h 1930 */ 1931 1932 extern struct task_struct *find_task_by_vpid(pid_t nr); 1933 extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns); 1934 1935 /* 1936 * find a task by its virtual pid and get the task struct 1937 */ 1938 extern struct task_struct *find_get_task_by_vpid(pid_t nr); 1939 1940 extern int wake_up_state(struct task_struct *tsk, unsigned int state); 1941 extern int wake_up_process(struct task_struct *tsk); 1942 extern void wake_up_new_task(struct task_struct *tsk); 1943 1944 extern void kick_process(struct task_struct *tsk); 1945 1946 extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec); 1947 #define set_task_comm(tsk, from) ({ \ 1948 BUILD_BUG_ON(sizeof(from) != TASK_COMM_LEN); \ 1949 __set_task_comm(tsk, from, false); \ 1950 }) 1951 1952 /* 1953 * - Why not use task_lock()? 1954 * User space can randomly change their names anyway, so locking for readers 1955 * doesn't make sense. For writers, locking is probably necessary, as a race 1956 * condition could lead to long-term mixed results. 1957 * The strscpy_pad() in __set_task_comm() can ensure that the task comm is 1958 * always NUL-terminated and zero-padded. Therefore the race condition between 1959 * reader and writer is not an issue. 1960 * 1961 * - BUILD_BUG_ON() can help prevent the buf from being truncated. 1962 * Since the callers don't perform any return value checks, this safeguard is 1963 * necessary. 1964 */ 1965 #define get_task_comm(buf, tsk) ({ \ 1966 BUILD_BUG_ON(sizeof(buf) < TASK_COMM_LEN); \ 1967 strscpy_pad(buf, (tsk)->comm); \ 1968 buf; \ 1969 }) 1970 1971 static __always_inline void scheduler_ipi(void) 1972 { 1973 /* 1974 * Fold TIF_NEED_RESCHED into the preempt_count; anybody setting 1975 * TIF_NEED_RESCHED remotely (for the first time) will also send 1976 * this IPI. 1977 */ 1978 preempt_fold_need_resched(); 1979 } 1980 1981 extern unsigned long wait_task_inactive(struct task_struct *, unsigned int match_state); 1982 1983 /* 1984 * Set thread flags in other task's structures. 1985 * See asm/thread_info.h for TIF_xxxx flags available: 1986 */ 1987 static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag) 1988 { 1989 set_ti_thread_flag(task_thread_info(tsk), flag); 1990 } 1991 1992 static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag) 1993 { 1994 clear_ti_thread_flag(task_thread_info(tsk), flag); 1995 } 1996 1997 static inline void update_tsk_thread_flag(struct task_struct *tsk, int flag, 1998 bool value) 1999 { 2000 update_ti_thread_flag(task_thread_info(tsk), flag, value); 2001 } 2002 2003 static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag) 2004 { 2005 return test_and_set_ti_thread_flag(task_thread_info(tsk), flag); 2006 } 2007 2008 static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag) 2009 { 2010 return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag); 2011 } 2012 2013 static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag) 2014 { 2015 return test_ti_thread_flag(task_thread_info(tsk), flag); 2016 } 2017 2018 static inline void set_tsk_need_resched(struct task_struct *tsk) 2019 { 2020 set_tsk_thread_flag(tsk,TIF_NEED_RESCHED); 2021 } 2022 2023 static inline void clear_tsk_need_resched(struct task_struct *tsk) 2024 { 2025 atomic_long_andnot(_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY, 2026 (atomic_long_t *)&task_thread_info(tsk)->flags); 2027 } 2028 2029 static inline int test_tsk_need_resched(struct task_struct *tsk) 2030 { 2031 return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED)); 2032 } 2033 2034 /* 2035 * cond_resched() and cond_resched_lock(): latency reduction via 2036 * explicit rescheduling in places that are safe. The return 2037 * value indicates whether a reschedule was done in fact. 2038 * cond_resched_lock() will drop the spinlock before scheduling, 2039 */ 2040 #if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC) 2041 extern int __cond_resched(void); 2042 2043 #if defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL) 2044 2045 DECLARE_STATIC_CALL(cond_resched, __cond_resched); 2046 2047 static __always_inline int _cond_resched(void) 2048 { 2049 return static_call_mod(cond_resched)(); 2050 } 2051 2052 #elif defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY) 2053 2054 extern int dynamic_cond_resched(void); 2055 2056 static __always_inline int _cond_resched(void) 2057 { 2058 return dynamic_cond_resched(); 2059 } 2060 2061 #else /* !CONFIG_PREEMPTION */ 2062 2063 static inline int _cond_resched(void) 2064 { 2065 return __cond_resched(); 2066 } 2067 2068 #endif /* PREEMPT_DYNAMIC && CONFIG_HAVE_PREEMPT_DYNAMIC_CALL */ 2069 2070 #else /* CONFIG_PREEMPTION && !CONFIG_PREEMPT_DYNAMIC */ 2071 2072 static inline int _cond_resched(void) 2073 { 2074 return 0; 2075 } 2076 2077 #endif /* !CONFIG_PREEMPTION || CONFIG_PREEMPT_DYNAMIC */ 2078 2079 #define cond_resched() ({ \ 2080 __might_resched(__FILE__, __LINE__, 0); \ 2081 _cond_resched(); \ 2082 }) 2083 2084 extern int __cond_resched_lock(spinlock_t *lock); 2085 extern int __cond_resched_rwlock_read(rwlock_t *lock); 2086 extern int __cond_resched_rwlock_write(rwlock_t *lock); 2087 2088 #define MIGHT_RESCHED_RCU_SHIFT 8 2089 #define MIGHT_RESCHED_PREEMPT_MASK ((1U << MIGHT_RESCHED_RCU_SHIFT) - 1) 2090 2091 #ifndef CONFIG_PREEMPT_RT 2092 /* 2093 * Non RT kernels have an elevated preempt count due to the held lock, 2094 * but are not allowed to be inside a RCU read side critical section 2095 */ 2096 # define PREEMPT_LOCK_RESCHED_OFFSETS PREEMPT_LOCK_OFFSET 2097 #else 2098 /* 2099 * spin/rw_lock() on RT implies rcu_read_lock(). The might_sleep() check in 2100 * cond_resched*lock() has to take that into account because it checks for 2101 * preempt_count() and rcu_preempt_depth(). 2102 */ 2103 # define PREEMPT_LOCK_RESCHED_OFFSETS \ 2104 (PREEMPT_LOCK_OFFSET + (1U << MIGHT_RESCHED_RCU_SHIFT)) 2105 #endif 2106 2107 #define cond_resched_lock(lock) ({ \ 2108 __might_resched(__FILE__, __LINE__, PREEMPT_LOCK_RESCHED_OFFSETS); \ 2109 __cond_resched_lock(lock); \ 2110 }) 2111 2112 #define cond_resched_rwlock_read(lock) ({ \ 2113 __might_resched(__FILE__, __LINE__, PREEMPT_LOCK_RESCHED_OFFSETS); \ 2114 __cond_resched_rwlock_read(lock); \ 2115 }) 2116 2117 #define cond_resched_rwlock_write(lock) ({ \ 2118 __might_resched(__FILE__, __LINE__, PREEMPT_LOCK_RESCHED_OFFSETS); \ 2119 __cond_resched_rwlock_write(lock); \ 2120 }) 2121 2122 static __always_inline bool need_resched(void) 2123 { 2124 return unlikely(tif_need_resched()); 2125 } 2126 2127 /* 2128 * Wrappers for p->thread_info->cpu access. No-op on UP. 2129 */ 2130 #ifdef CONFIG_SMP 2131 2132 static inline unsigned int task_cpu(const struct task_struct *p) 2133 { 2134 return READ_ONCE(task_thread_info(p)->cpu); 2135 } 2136 2137 extern void set_task_cpu(struct task_struct *p, unsigned int cpu); 2138 2139 #else 2140 2141 static inline unsigned int task_cpu(const struct task_struct *p) 2142 { 2143 return 0; 2144 } 2145 2146 static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) 2147 { 2148 } 2149 2150 #endif /* CONFIG_SMP */ 2151 2152 static inline bool task_is_runnable(struct task_struct *p) 2153 { 2154 return p->on_rq && !p->se.sched_delayed; 2155 } 2156 2157 extern bool sched_task_on_rq(struct task_struct *p); 2158 extern unsigned long get_wchan(struct task_struct *p); 2159 extern struct task_struct *cpu_curr_snapshot(int cpu); 2160 2161 #include <linux/spinlock.h> 2162 2163 /* 2164 * In order to reduce various lock holder preemption latencies provide an 2165 * interface to see if a vCPU is currently running or not. 2166 * 2167 * This allows us to terminate optimistic spin loops and block, analogous to 2168 * the native optimistic spin heuristic of testing if the lock owner task is 2169 * running or not. 2170 */ 2171 #ifndef vcpu_is_preempted 2172 static inline bool vcpu_is_preempted(int cpu) 2173 { 2174 return false; 2175 } 2176 #endif 2177 2178 extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask); 2179 extern long sched_getaffinity(pid_t pid, struct cpumask *mask); 2180 2181 #ifndef TASK_SIZE_OF 2182 #define TASK_SIZE_OF(tsk) TASK_SIZE 2183 #endif 2184 2185 static inline bool owner_on_cpu(struct task_struct *owner) 2186 { 2187 /* 2188 * As lock holder preemption issue, we both skip spinning if 2189 * task is not on cpu or its cpu is preempted 2190 */ 2191 return READ_ONCE(owner->on_cpu) && !vcpu_is_preempted(task_cpu(owner)); 2192 } 2193 2194 /* Returns effective CPU energy utilization, as seen by the scheduler */ 2195 unsigned long sched_cpu_util(int cpu); 2196 2197 #ifdef CONFIG_SCHED_CORE 2198 extern void sched_core_free(struct task_struct *tsk); 2199 extern void sched_core_fork(struct task_struct *p); 2200 extern int sched_core_share_pid(unsigned int cmd, pid_t pid, enum pid_type type, 2201 unsigned long uaddr); 2202 extern int sched_core_idle_cpu(int cpu); 2203 #else 2204 static inline void sched_core_free(struct task_struct *tsk) { } 2205 static inline void sched_core_fork(struct task_struct *p) { } 2206 static inline int sched_core_idle_cpu(int cpu) { return idle_cpu(cpu); } 2207 #endif 2208 2209 extern void sched_set_stop_task(int cpu, struct task_struct *stop); 2210 2211 #ifdef CONFIG_MEM_ALLOC_PROFILING 2212 static __always_inline struct alloc_tag *alloc_tag_save(struct alloc_tag *tag) 2213 { 2214 swap(current->alloc_tag, tag); 2215 return tag; 2216 } 2217 2218 static __always_inline void alloc_tag_restore(struct alloc_tag *tag, struct alloc_tag *old) 2219 { 2220 #ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG 2221 WARN(current->alloc_tag != tag, "current->alloc_tag was changed:\n"); 2222 #endif 2223 current->alloc_tag = old; 2224 } 2225 #else 2226 #define alloc_tag_save(_tag) NULL 2227 #define alloc_tag_restore(_tag, _old) do {} while (0) 2228 #endif 2229 2230 #endif 2231