1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_SCHED_H 3 #define _LINUX_SCHED_H 4 5 /* 6 * Define 'struct task_struct' and provide the main scheduler 7 * APIs (schedule(), wakeup variants, etc.) 8 */ 9 10 #include <uapi/linux/sched.h> 11 12 #include <asm/current.h> 13 #include <asm/processor.h> 14 #include <linux/thread_info.h> 15 #include <linux/preempt.h> 16 #include <linux/cpumask_types.h> 17 18 #include <linux/cache.h> 19 #include <linux/irqflags_types.h> 20 #include <linux/smp_types.h> 21 #include <linux/pid_types.h> 22 #include <linux/sem_types.h> 23 #include <linux/shm.h> 24 #include <linux/kmsan_types.h> 25 #include <linux/mutex_types.h> 26 #include <linux/plist_types.h> 27 #include <linux/hrtimer_types.h> 28 #include <linux/timer_types.h> 29 #include <linux/seccomp_types.h> 30 #include <linux/nodemask_types.h> 31 #include <linux/refcount_types.h> 32 #include <linux/resource.h> 33 #include <linux/latencytop.h> 34 #include <linux/sched/prio.h> 35 #include <linux/sched/types.h> 36 #include <linux/signal_types.h> 37 #include <linux/syscall_user_dispatch_types.h> 38 #include <linux/mm_types_task.h> 39 #include <linux/netdevice_xmit.h> 40 #include <linux/task_io_accounting.h> 41 #include <linux/posix-timers_types.h> 42 #include <linux/restart_block.h> 43 #include <uapi/linux/rseq.h> 44 #include <linux/seqlock_types.h> 45 #include <linux/kcsan.h> 46 #include <linux/rv.h> 47 #include <linux/livepatch_sched.h> 48 #include <linux/uidgid_types.h> 49 #include <asm/kmap_size.h> 50 51 /* task_struct member predeclarations (sorted alphabetically): */ 52 struct audit_context; 53 struct bio_list; 54 struct blk_plug; 55 struct bpf_local_storage; 56 struct bpf_run_ctx; 57 struct bpf_net_context; 58 struct capture_control; 59 struct cfs_rq; 60 struct fs_struct; 61 struct futex_pi_state; 62 struct io_context; 63 struct io_uring_task; 64 struct mempolicy; 65 struct nameidata; 66 struct nsproxy; 67 struct perf_event_context; 68 struct pid_namespace; 69 struct pipe_inode_info; 70 struct rcu_node; 71 struct reclaim_state; 72 struct robust_list_head; 73 struct root_domain; 74 struct rq; 75 struct sched_attr; 76 struct sched_dl_entity; 77 struct seq_file; 78 struct sighand_struct; 79 struct signal_struct; 80 struct task_delay_info; 81 struct task_group; 82 struct task_struct; 83 struct user_event_mm; 84 85 #include <linux/sched/ext.h> 86 87 /* 88 * Task state bitmask. NOTE! These bits are also 89 * encoded in fs/proc/array.c: get_task_state(). 90 * 91 * We have two separate sets of flags: task->__state 92 * is about runnability, while task->exit_state are 93 * about the task exiting. Confusing, but this way 94 * modifying one set can't modify the other one by 95 * mistake. 96 */ 97 98 /* Used in tsk->__state: */ 99 #define TASK_RUNNING 0x00000000 100 #define TASK_INTERRUPTIBLE 0x00000001 101 #define TASK_UNINTERRUPTIBLE 0x00000002 102 #define __TASK_STOPPED 0x00000004 103 #define __TASK_TRACED 0x00000008 104 /* Used in tsk->exit_state: */ 105 #define EXIT_DEAD 0x00000010 106 #define EXIT_ZOMBIE 0x00000020 107 #define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD) 108 /* Used in tsk->__state again: */ 109 #define TASK_PARKED 0x00000040 110 #define TASK_DEAD 0x00000080 111 #define TASK_WAKEKILL 0x00000100 112 #define TASK_WAKING 0x00000200 113 #define TASK_NOLOAD 0x00000400 114 #define TASK_NEW 0x00000800 115 #define TASK_RTLOCK_WAIT 0x00001000 116 #define TASK_FREEZABLE 0x00002000 117 #define __TASK_FREEZABLE_UNSAFE (0x00004000 * IS_ENABLED(CONFIG_LOCKDEP)) 118 #define TASK_FROZEN 0x00008000 119 #define TASK_STATE_MAX 0x00010000 120 121 #define TASK_ANY (TASK_STATE_MAX-1) 122 123 /* 124 * DO NOT ADD ANY NEW USERS ! 125 */ 126 #define TASK_FREEZABLE_UNSAFE (TASK_FREEZABLE | __TASK_FREEZABLE_UNSAFE) 127 128 /* Convenience macros for the sake of set_current_state: */ 129 #define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE) 130 #define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED) 131 #define TASK_TRACED __TASK_TRACED 132 133 #define TASK_IDLE (TASK_UNINTERRUPTIBLE | TASK_NOLOAD) 134 135 /* Convenience macros for the sake of wake_up(): */ 136 #define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE) 137 138 /* get_task_state(): */ 139 #define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \ 140 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \ 141 __TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE | \ 142 TASK_PARKED) 143 144 #define task_is_running(task) (READ_ONCE((task)->__state) == TASK_RUNNING) 145 146 #define task_is_traced(task) ((READ_ONCE(task->jobctl) & JOBCTL_TRACED) != 0) 147 #define task_is_stopped(task) ((READ_ONCE(task->jobctl) & JOBCTL_STOPPED) != 0) 148 #define task_is_stopped_or_traced(task) ((READ_ONCE(task->jobctl) & (JOBCTL_STOPPED | JOBCTL_TRACED)) != 0) 149 150 /* 151 * Special states are those that do not use the normal wait-loop pattern. See 152 * the comment with set_special_state(). 153 */ 154 #define is_special_task_state(state) \ 155 ((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_PARKED | \ 156 TASK_DEAD | TASK_FROZEN)) 157 158 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP 159 # define debug_normal_state_change(state_value) \ 160 do { \ 161 WARN_ON_ONCE(is_special_task_state(state_value)); \ 162 current->task_state_change = _THIS_IP_; \ 163 } while (0) 164 165 # define debug_special_state_change(state_value) \ 166 do { \ 167 WARN_ON_ONCE(!is_special_task_state(state_value)); \ 168 current->task_state_change = _THIS_IP_; \ 169 } while (0) 170 171 # define debug_rtlock_wait_set_state() \ 172 do { \ 173 current->saved_state_change = current->task_state_change;\ 174 current->task_state_change = _THIS_IP_; \ 175 } while (0) 176 177 # define debug_rtlock_wait_restore_state() \ 178 do { \ 179 current->task_state_change = current->saved_state_change;\ 180 } while (0) 181 182 #else 183 # define debug_normal_state_change(cond) do { } while (0) 184 # define debug_special_state_change(cond) do { } while (0) 185 # define debug_rtlock_wait_set_state() do { } while (0) 186 # define debug_rtlock_wait_restore_state() do { } while (0) 187 #endif 188 189 /* 190 * set_current_state() includes a barrier so that the write of current->__state 191 * is correctly serialised wrt the caller's subsequent test of whether to 192 * actually sleep: 193 * 194 * for (;;) { 195 * set_current_state(TASK_UNINTERRUPTIBLE); 196 * if (CONDITION) 197 * break; 198 * 199 * schedule(); 200 * } 201 * __set_current_state(TASK_RUNNING); 202 * 203 * If the caller does not need such serialisation (because, for instance, the 204 * CONDITION test and condition change and wakeup are under the same lock) then 205 * use __set_current_state(). 206 * 207 * The above is typically ordered against the wakeup, which does: 208 * 209 * CONDITION = 1; 210 * wake_up_state(p, TASK_UNINTERRUPTIBLE); 211 * 212 * where wake_up_state()/try_to_wake_up() executes a full memory barrier before 213 * accessing p->__state. 214 * 215 * Wakeup will do: if (@state & p->__state) p->__state = TASK_RUNNING, that is, 216 * once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a 217 * TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING). 218 * 219 * However, with slightly different timing the wakeup TASK_RUNNING store can 220 * also collide with the TASK_UNINTERRUPTIBLE store. Losing that store is not 221 * a problem either because that will result in one extra go around the loop 222 * and our @cond test will save the day. 223 * 224 * Also see the comments of try_to_wake_up(). 225 */ 226 #define __set_current_state(state_value) \ 227 do { \ 228 debug_normal_state_change((state_value)); \ 229 WRITE_ONCE(current->__state, (state_value)); \ 230 } while (0) 231 232 #define set_current_state(state_value) \ 233 do { \ 234 debug_normal_state_change((state_value)); \ 235 smp_store_mb(current->__state, (state_value)); \ 236 } while (0) 237 238 /* 239 * set_special_state() should be used for those states when the blocking task 240 * can not use the regular condition based wait-loop. In that case we must 241 * serialize against wakeups such that any possible in-flight TASK_RUNNING 242 * stores will not collide with our state change. 243 */ 244 #define set_special_state(state_value) \ 245 do { \ 246 unsigned long flags; /* may shadow */ \ 247 \ 248 raw_spin_lock_irqsave(¤t->pi_lock, flags); \ 249 debug_special_state_change((state_value)); \ 250 WRITE_ONCE(current->__state, (state_value)); \ 251 raw_spin_unlock_irqrestore(¤t->pi_lock, flags); \ 252 } while (0) 253 254 /* 255 * PREEMPT_RT specific variants for "sleeping" spin/rwlocks 256 * 257 * RT's spin/rwlock substitutions are state preserving. The state of the 258 * task when blocking on the lock is saved in task_struct::saved_state and 259 * restored after the lock has been acquired. These operations are 260 * serialized by task_struct::pi_lock against try_to_wake_up(). Any non RT 261 * lock related wakeups while the task is blocked on the lock are 262 * redirected to operate on task_struct::saved_state to ensure that these 263 * are not dropped. On restore task_struct::saved_state is set to 264 * TASK_RUNNING so any wakeup attempt redirected to saved_state will fail. 265 * 266 * The lock operation looks like this: 267 * 268 * current_save_and_set_rtlock_wait_state(); 269 * for (;;) { 270 * if (try_lock()) 271 * break; 272 * raw_spin_unlock_irq(&lock->wait_lock); 273 * schedule_rtlock(); 274 * raw_spin_lock_irq(&lock->wait_lock); 275 * set_current_state(TASK_RTLOCK_WAIT); 276 * } 277 * current_restore_rtlock_saved_state(); 278 */ 279 #define current_save_and_set_rtlock_wait_state() \ 280 do { \ 281 lockdep_assert_irqs_disabled(); \ 282 raw_spin_lock(¤t->pi_lock); \ 283 current->saved_state = current->__state; \ 284 debug_rtlock_wait_set_state(); \ 285 WRITE_ONCE(current->__state, TASK_RTLOCK_WAIT); \ 286 raw_spin_unlock(¤t->pi_lock); \ 287 } while (0); 288 289 #define current_restore_rtlock_saved_state() \ 290 do { \ 291 lockdep_assert_irqs_disabled(); \ 292 raw_spin_lock(¤t->pi_lock); \ 293 debug_rtlock_wait_restore_state(); \ 294 WRITE_ONCE(current->__state, current->saved_state); \ 295 current->saved_state = TASK_RUNNING; \ 296 raw_spin_unlock(¤t->pi_lock); \ 297 } while (0); 298 299 #define get_current_state() READ_ONCE(current->__state) 300 301 /* 302 * Define the task command name length as enum, then it can be visible to 303 * BPF programs. 304 */ 305 enum { 306 TASK_COMM_LEN = 16, 307 }; 308 309 extern void sched_tick(void); 310 311 #define MAX_SCHEDULE_TIMEOUT LONG_MAX 312 313 extern long schedule_timeout(long timeout); 314 extern long schedule_timeout_interruptible(long timeout); 315 extern long schedule_timeout_killable(long timeout); 316 extern long schedule_timeout_uninterruptible(long timeout); 317 extern long schedule_timeout_idle(long timeout); 318 asmlinkage void schedule(void); 319 extern void schedule_preempt_disabled(void); 320 asmlinkage void preempt_schedule_irq(void); 321 #ifdef CONFIG_PREEMPT_RT 322 extern void schedule_rtlock(void); 323 #endif 324 325 extern int __must_check io_schedule_prepare(void); 326 extern void io_schedule_finish(int token); 327 extern long io_schedule_timeout(long timeout); 328 extern void io_schedule(void); 329 330 /** 331 * struct prev_cputime - snapshot of system and user cputime 332 * @utime: time spent in user mode 333 * @stime: time spent in system mode 334 * @lock: protects the above two fields 335 * 336 * Stores previous user/system time values such that we can guarantee 337 * monotonicity. 338 */ 339 struct prev_cputime { 340 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE 341 u64 utime; 342 u64 stime; 343 raw_spinlock_t lock; 344 #endif 345 }; 346 347 enum vtime_state { 348 /* Task is sleeping or running in a CPU with VTIME inactive: */ 349 VTIME_INACTIVE = 0, 350 /* Task is idle */ 351 VTIME_IDLE, 352 /* Task runs in kernelspace in a CPU with VTIME active: */ 353 VTIME_SYS, 354 /* Task runs in userspace in a CPU with VTIME active: */ 355 VTIME_USER, 356 /* Task runs as guests in a CPU with VTIME active: */ 357 VTIME_GUEST, 358 }; 359 360 struct vtime { 361 seqcount_t seqcount; 362 unsigned long long starttime; 363 enum vtime_state state; 364 unsigned int cpu; 365 u64 utime; 366 u64 stime; 367 u64 gtime; 368 }; 369 370 /* 371 * Utilization clamp constraints. 372 * @UCLAMP_MIN: Minimum utilization 373 * @UCLAMP_MAX: Maximum utilization 374 * @UCLAMP_CNT: Utilization clamp constraints count 375 */ 376 enum uclamp_id { 377 UCLAMP_MIN = 0, 378 UCLAMP_MAX, 379 UCLAMP_CNT 380 }; 381 382 #ifdef CONFIG_SMP 383 extern struct root_domain def_root_domain; 384 extern struct mutex sched_domains_mutex; 385 #endif 386 387 struct sched_param { 388 int sched_priority; 389 }; 390 391 struct sched_info { 392 #ifdef CONFIG_SCHED_INFO 393 /* Cumulative counters: */ 394 395 /* # of times we have run on this CPU: */ 396 unsigned long pcount; 397 398 /* Time spent waiting on a runqueue: */ 399 unsigned long long run_delay; 400 401 /* Timestamps: */ 402 403 /* When did we last run on a CPU? */ 404 unsigned long long last_arrival; 405 406 /* When were we last queued to run? */ 407 unsigned long long last_queued; 408 409 #endif /* CONFIG_SCHED_INFO */ 410 }; 411 412 /* 413 * Integer metrics need fixed point arithmetic, e.g., sched/fair 414 * has a few: load, load_avg, util_avg, freq, and capacity. 415 * 416 * We define a basic fixed point arithmetic range, and then formalize 417 * all these metrics based on that basic range. 418 */ 419 # define SCHED_FIXEDPOINT_SHIFT 10 420 # define SCHED_FIXEDPOINT_SCALE (1L << SCHED_FIXEDPOINT_SHIFT) 421 422 /* Increase resolution of cpu_capacity calculations */ 423 # define SCHED_CAPACITY_SHIFT SCHED_FIXEDPOINT_SHIFT 424 # define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT) 425 426 struct load_weight { 427 unsigned long weight; 428 u32 inv_weight; 429 }; 430 431 /* 432 * The load/runnable/util_avg accumulates an infinite geometric series 433 * (see __update_load_avg_cfs_rq() in kernel/sched/pelt.c). 434 * 435 * [load_avg definition] 436 * 437 * load_avg = runnable% * scale_load_down(load) 438 * 439 * [runnable_avg definition] 440 * 441 * runnable_avg = runnable% * SCHED_CAPACITY_SCALE 442 * 443 * [util_avg definition] 444 * 445 * util_avg = running% * SCHED_CAPACITY_SCALE 446 * 447 * where runnable% is the time ratio that a sched_entity is runnable and 448 * running% the time ratio that a sched_entity is running. 449 * 450 * For cfs_rq, they are the aggregated values of all runnable and blocked 451 * sched_entities. 452 * 453 * The load/runnable/util_avg doesn't directly factor frequency scaling and CPU 454 * capacity scaling. The scaling is done through the rq_clock_pelt that is used 455 * for computing those signals (see update_rq_clock_pelt()) 456 * 457 * N.B., the above ratios (runnable% and running%) themselves are in the 458 * range of [0, 1]. To do fixed point arithmetics, we therefore scale them 459 * to as large a range as necessary. This is for example reflected by 460 * util_avg's SCHED_CAPACITY_SCALE. 461 * 462 * [Overflow issue] 463 * 464 * The 64-bit load_sum can have 4353082796 (=2^64/47742/88761) entities 465 * with the highest load (=88761), always runnable on a single cfs_rq, 466 * and should not overflow as the number already hits PID_MAX_LIMIT. 467 * 468 * For all other cases (including 32-bit kernels), struct load_weight's 469 * weight will overflow first before we do, because: 470 * 471 * Max(load_avg) <= Max(load.weight) 472 * 473 * Then it is the load_weight's responsibility to consider overflow 474 * issues. 475 */ 476 struct sched_avg { 477 u64 last_update_time; 478 u64 load_sum; 479 u64 runnable_sum; 480 u32 util_sum; 481 u32 period_contrib; 482 unsigned long load_avg; 483 unsigned long runnable_avg; 484 unsigned long util_avg; 485 unsigned int util_est; 486 } ____cacheline_aligned; 487 488 /* 489 * The UTIL_AVG_UNCHANGED flag is used to synchronize util_est with util_avg 490 * updates. When a task is dequeued, its util_est should not be updated if its 491 * util_avg has not been updated in the meantime. 492 * This information is mapped into the MSB bit of util_est at dequeue time. 493 * Since max value of util_est for a task is 1024 (PELT util_avg for a task) 494 * it is safe to use MSB. 495 */ 496 #define UTIL_EST_WEIGHT_SHIFT 2 497 #define UTIL_AVG_UNCHANGED 0x80000000 498 499 struct sched_statistics { 500 #ifdef CONFIG_SCHEDSTATS 501 u64 wait_start; 502 u64 wait_max; 503 u64 wait_count; 504 u64 wait_sum; 505 u64 iowait_count; 506 u64 iowait_sum; 507 508 u64 sleep_start; 509 u64 sleep_max; 510 s64 sum_sleep_runtime; 511 512 u64 block_start; 513 u64 block_max; 514 s64 sum_block_runtime; 515 516 s64 exec_max; 517 u64 slice_max; 518 519 u64 nr_migrations_cold; 520 u64 nr_failed_migrations_affine; 521 u64 nr_failed_migrations_running; 522 u64 nr_failed_migrations_hot; 523 u64 nr_forced_migrations; 524 525 u64 nr_wakeups; 526 u64 nr_wakeups_sync; 527 u64 nr_wakeups_migrate; 528 u64 nr_wakeups_local; 529 u64 nr_wakeups_remote; 530 u64 nr_wakeups_affine; 531 u64 nr_wakeups_affine_attempts; 532 u64 nr_wakeups_passive; 533 u64 nr_wakeups_idle; 534 535 #ifdef CONFIG_SCHED_CORE 536 u64 core_forceidle_sum; 537 #endif 538 #endif /* CONFIG_SCHEDSTATS */ 539 } ____cacheline_aligned; 540 541 struct sched_entity { 542 /* For load-balancing: */ 543 struct load_weight load; 544 struct rb_node run_node; 545 u64 deadline; 546 u64 min_vruntime; 547 u64 min_slice; 548 549 struct list_head group_node; 550 unsigned char on_rq; 551 unsigned char sched_delayed; 552 unsigned char rel_deadline; 553 unsigned char custom_slice; 554 /* hole */ 555 556 u64 exec_start; 557 u64 sum_exec_runtime; 558 u64 prev_sum_exec_runtime; 559 u64 vruntime; 560 s64 vlag; 561 u64 slice; 562 563 u64 nr_migrations; 564 565 #ifdef CONFIG_FAIR_GROUP_SCHED 566 int depth; 567 struct sched_entity *parent; 568 /* rq on which this entity is (to be) queued: */ 569 struct cfs_rq *cfs_rq; 570 /* rq "owned" by this entity/group: */ 571 struct cfs_rq *my_q; 572 /* cached value of my_q->h_nr_running */ 573 unsigned long runnable_weight; 574 #endif 575 576 #ifdef CONFIG_SMP 577 /* 578 * Per entity load average tracking. 579 * 580 * Put into separate cache line so it does not 581 * collide with read-mostly values above. 582 */ 583 struct sched_avg avg; 584 #endif 585 }; 586 587 struct sched_rt_entity { 588 struct list_head run_list; 589 unsigned long timeout; 590 unsigned long watchdog_stamp; 591 unsigned int time_slice; 592 unsigned short on_rq; 593 unsigned short on_list; 594 595 struct sched_rt_entity *back; 596 #ifdef CONFIG_RT_GROUP_SCHED 597 struct sched_rt_entity *parent; 598 /* rq on which this entity is (to be) queued: */ 599 struct rt_rq *rt_rq; 600 /* rq "owned" by this entity/group: */ 601 struct rt_rq *my_q; 602 #endif 603 } __randomize_layout; 604 605 typedef bool (*dl_server_has_tasks_f)(struct sched_dl_entity *); 606 typedef struct task_struct *(*dl_server_pick_f)(struct sched_dl_entity *); 607 608 struct sched_dl_entity { 609 struct rb_node rb_node; 610 611 /* 612 * Original scheduling parameters. Copied here from sched_attr 613 * during sched_setattr(), they will remain the same until 614 * the next sched_setattr(). 615 */ 616 u64 dl_runtime; /* Maximum runtime for each instance */ 617 u64 dl_deadline; /* Relative deadline of each instance */ 618 u64 dl_period; /* Separation of two instances (period) */ 619 u64 dl_bw; /* dl_runtime / dl_period */ 620 u64 dl_density; /* dl_runtime / dl_deadline */ 621 622 /* 623 * Actual scheduling parameters. Initialized with the values above, 624 * they are continuously updated during task execution. Note that 625 * the remaining runtime could be < 0 in case we are in overrun. 626 */ 627 s64 runtime; /* Remaining runtime for this instance */ 628 u64 deadline; /* Absolute deadline for this instance */ 629 unsigned int flags; /* Specifying the scheduler behaviour */ 630 631 /* 632 * Some bool flags: 633 * 634 * @dl_throttled tells if we exhausted the runtime. If so, the 635 * task has to wait for a replenishment to be performed at the 636 * next firing of dl_timer. 637 * 638 * @dl_yielded tells if task gave up the CPU before consuming 639 * all its available runtime during the last job. 640 * 641 * @dl_non_contending tells if the task is inactive while still 642 * contributing to the active utilization. In other words, it 643 * indicates if the inactive timer has been armed and its handler 644 * has not been executed yet. This flag is useful to avoid race 645 * conditions between the inactive timer handler and the wakeup 646 * code. 647 * 648 * @dl_overrun tells if the task asked to be informed about runtime 649 * overruns. 650 * 651 * @dl_server tells if this is a server entity. 652 * 653 * @dl_defer tells if this is a deferred or regular server. For 654 * now only defer server exists. 655 * 656 * @dl_defer_armed tells if the deferrable server is waiting 657 * for the replenishment timer to activate it. 658 * 659 * @dl_defer_running tells if the deferrable server is actually 660 * running, skipping the defer phase. 661 */ 662 unsigned int dl_throttled : 1; 663 unsigned int dl_yielded : 1; 664 unsigned int dl_non_contending : 1; 665 unsigned int dl_overrun : 1; 666 unsigned int dl_server : 1; 667 unsigned int dl_defer : 1; 668 unsigned int dl_defer_armed : 1; 669 unsigned int dl_defer_running : 1; 670 671 /* 672 * Bandwidth enforcement timer. Each -deadline task has its 673 * own bandwidth to be enforced, thus we need one timer per task. 674 */ 675 struct hrtimer dl_timer; 676 677 /* 678 * Inactive timer, responsible for decreasing the active utilization 679 * at the "0-lag time". When a -deadline task blocks, it contributes 680 * to GRUB's active utilization until the "0-lag time", hence a 681 * timer is needed to decrease the active utilization at the correct 682 * time. 683 */ 684 struct hrtimer inactive_timer; 685 686 /* 687 * Bits for DL-server functionality. Also see the comment near 688 * dl_server_update(). 689 * 690 * @rq the runqueue this server is for 691 * 692 * @server_has_tasks() returns true if @server_pick return a 693 * runnable task. 694 */ 695 struct rq *rq; 696 dl_server_has_tasks_f server_has_tasks; 697 dl_server_pick_f server_pick_task; 698 699 #ifdef CONFIG_RT_MUTEXES 700 /* 701 * Priority Inheritance. When a DEADLINE scheduling entity is boosted 702 * pi_se points to the donor, otherwise points to the dl_se it belongs 703 * to (the original one/itself). 704 */ 705 struct sched_dl_entity *pi_se; 706 #endif 707 }; 708 709 #ifdef CONFIG_UCLAMP_TASK 710 /* Number of utilization clamp buckets (shorter alias) */ 711 #define UCLAMP_BUCKETS CONFIG_UCLAMP_BUCKETS_COUNT 712 713 /* 714 * Utilization clamp for a scheduling entity 715 * @value: clamp value "assigned" to a se 716 * @bucket_id: bucket index corresponding to the "assigned" value 717 * @active: the se is currently refcounted in a rq's bucket 718 * @user_defined: the requested clamp value comes from user-space 719 * 720 * The bucket_id is the index of the clamp bucket matching the clamp value 721 * which is pre-computed and stored to avoid expensive integer divisions from 722 * the fast path. 723 * 724 * The active bit is set whenever a task has got an "effective" value assigned, 725 * which can be different from the clamp value "requested" from user-space. 726 * This allows to know a task is refcounted in the rq's bucket corresponding 727 * to the "effective" bucket_id. 728 * 729 * The user_defined bit is set whenever a task has got a task-specific clamp 730 * value requested from userspace, i.e. the system defaults apply to this task 731 * just as a restriction. This allows to relax default clamps when a less 732 * restrictive task-specific value has been requested, thus allowing to 733 * implement a "nice" semantic. For example, a task running with a 20% 734 * default boost can still drop its own boosting to 0%. 735 */ 736 struct uclamp_se { 737 unsigned int value : bits_per(SCHED_CAPACITY_SCALE); 738 unsigned int bucket_id : bits_per(UCLAMP_BUCKETS); 739 unsigned int active : 1; 740 unsigned int user_defined : 1; 741 }; 742 #endif /* CONFIG_UCLAMP_TASK */ 743 744 union rcu_special { 745 struct { 746 u8 blocked; 747 u8 need_qs; 748 u8 exp_hint; /* Hint for performance. */ 749 u8 need_mb; /* Readers need smp_mb(). */ 750 } b; /* Bits. */ 751 u32 s; /* Set of bits. */ 752 }; 753 754 enum perf_event_task_context { 755 perf_invalid_context = -1, 756 perf_hw_context = 0, 757 perf_sw_context, 758 perf_nr_task_contexts, 759 }; 760 761 /* 762 * Number of contexts where an event can trigger: 763 * task, softirq, hardirq, nmi. 764 */ 765 #define PERF_NR_CONTEXTS 4 766 767 struct wake_q_node { 768 struct wake_q_node *next; 769 }; 770 771 struct kmap_ctrl { 772 #ifdef CONFIG_KMAP_LOCAL 773 int idx; 774 pte_t pteval[KM_MAX_IDX]; 775 #endif 776 }; 777 778 struct task_struct { 779 #ifdef CONFIG_THREAD_INFO_IN_TASK 780 /* 781 * For reasons of header soup (see current_thread_info()), this 782 * must be the first element of task_struct. 783 */ 784 struct thread_info thread_info; 785 #endif 786 unsigned int __state; 787 788 /* saved state for "spinlock sleepers" */ 789 unsigned int saved_state; 790 791 /* 792 * This begins the randomizable portion of task_struct. Only 793 * scheduling-critical items should be added above here. 794 */ 795 randomized_struct_fields_start 796 797 void *stack; 798 refcount_t usage; 799 /* Per task flags (PF_*), defined further below: */ 800 unsigned int flags; 801 unsigned int ptrace; 802 803 #ifdef CONFIG_MEM_ALLOC_PROFILING 804 struct alloc_tag *alloc_tag; 805 #endif 806 807 #ifdef CONFIG_SMP 808 int on_cpu; 809 struct __call_single_node wake_entry; 810 unsigned int wakee_flips; 811 unsigned long wakee_flip_decay_ts; 812 struct task_struct *last_wakee; 813 814 /* 815 * recent_used_cpu is initially set as the last CPU used by a task 816 * that wakes affine another task. Waker/wakee relationships can 817 * push tasks around a CPU where each wakeup moves to the next one. 818 * Tracking a recently used CPU allows a quick search for a recently 819 * used CPU that may be idle. 820 */ 821 int recent_used_cpu; 822 int wake_cpu; 823 #endif 824 int on_rq; 825 826 int prio; 827 int static_prio; 828 int normal_prio; 829 unsigned int rt_priority; 830 831 struct sched_entity se; 832 struct sched_rt_entity rt; 833 struct sched_dl_entity dl; 834 struct sched_dl_entity *dl_server; 835 #ifdef CONFIG_SCHED_CLASS_EXT 836 struct sched_ext_entity scx; 837 #endif 838 const struct sched_class *sched_class; 839 840 #ifdef CONFIG_SCHED_CORE 841 struct rb_node core_node; 842 unsigned long core_cookie; 843 unsigned int core_occupation; 844 #endif 845 846 #ifdef CONFIG_CGROUP_SCHED 847 struct task_group *sched_task_group; 848 #endif 849 850 851 #ifdef CONFIG_UCLAMP_TASK 852 /* 853 * Clamp values requested for a scheduling entity. 854 * Must be updated with task_rq_lock() held. 855 */ 856 struct uclamp_se uclamp_req[UCLAMP_CNT]; 857 /* 858 * Effective clamp values used for a scheduling entity. 859 * Must be updated with task_rq_lock() held. 860 */ 861 struct uclamp_se uclamp[UCLAMP_CNT]; 862 #endif 863 864 struct sched_statistics stats; 865 866 #ifdef CONFIG_PREEMPT_NOTIFIERS 867 /* List of struct preempt_notifier: */ 868 struct hlist_head preempt_notifiers; 869 #endif 870 871 #ifdef CONFIG_BLK_DEV_IO_TRACE 872 unsigned int btrace_seq; 873 #endif 874 875 unsigned int policy; 876 unsigned long max_allowed_capacity; 877 int nr_cpus_allowed; 878 const cpumask_t *cpus_ptr; 879 cpumask_t *user_cpus_ptr; 880 cpumask_t cpus_mask; 881 void *migration_pending; 882 #ifdef CONFIG_SMP 883 unsigned short migration_disabled; 884 #endif 885 unsigned short migration_flags; 886 887 #ifdef CONFIG_PREEMPT_RCU 888 int rcu_read_lock_nesting; 889 union rcu_special rcu_read_unlock_special; 890 struct list_head rcu_node_entry; 891 struct rcu_node *rcu_blocked_node; 892 #endif /* #ifdef CONFIG_PREEMPT_RCU */ 893 894 #ifdef CONFIG_TASKS_RCU 895 unsigned long rcu_tasks_nvcsw; 896 u8 rcu_tasks_holdout; 897 u8 rcu_tasks_idx; 898 int rcu_tasks_idle_cpu; 899 struct list_head rcu_tasks_holdout_list; 900 int rcu_tasks_exit_cpu; 901 struct list_head rcu_tasks_exit_list; 902 #endif /* #ifdef CONFIG_TASKS_RCU */ 903 904 #ifdef CONFIG_TASKS_TRACE_RCU 905 int trc_reader_nesting; 906 int trc_ipi_to_cpu; 907 union rcu_special trc_reader_special; 908 struct list_head trc_holdout_list; 909 struct list_head trc_blkd_node; 910 int trc_blkd_cpu; 911 #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */ 912 913 struct sched_info sched_info; 914 915 struct list_head tasks; 916 #ifdef CONFIG_SMP 917 struct plist_node pushable_tasks; 918 struct rb_node pushable_dl_tasks; 919 #endif 920 921 struct mm_struct *mm; 922 struct mm_struct *active_mm; 923 struct address_space *faults_disabled_mapping; 924 925 int exit_state; 926 int exit_code; 927 int exit_signal; 928 /* The signal sent when the parent dies: */ 929 int pdeath_signal; 930 /* JOBCTL_*, siglock protected: */ 931 unsigned long jobctl; 932 933 /* Used for emulating ABI behavior of previous Linux versions: */ 934 unsigned int personality; 935 936 /* Scheduler bits, serialized by scheduler locks: */ 937 unsigned sched_reset_on_fork:1; 938 unsigned sched_contributes_to_load:1; 939 unsigned sched_migrated:1; 940 941 /* Force alignment to the next boundary: */ 942 unsigned :0; 943 944 /* Unserialized, strictly 'current' */ 945 946 /* 947 * This field must not be in the scheduler word above due to wakelist 948 * queueing no longer being serialized by p->on_cpu. However: 949 * 950 * p->XXX = X; ttwu() 951 * schedule() if (p->on_rq && ..) // false 952 * smp_mb__after_spinlock(); if (smp_load_acquire(&p->on_cpu) && //true 953 * deactivate_task() ttwu_queue_wakelist()) 954 * p->on_rq = 0; p->sched_remote_wakeup = Y; 955 * 956 * guarantees all stores of 'current' are visible before 957 * ->sched_remote_wakeup gets used, so it can be in this word. 958 */ 959 unsigned sched_remote_wakeup:1; 960 #ifdef CONFIG_RT_MUTEXES 961 unsigned sched_rt_mutex:1; 962 #endif 963 964 /* Bit to tell TOMOYO we're in execve(): */ 965 unsigned in_execve:1; 966 unsigned in_iowait:1; 967 #ifndef TIF_RESTORE_SIGMASK 968 unsigned restore_sigmask:1; 969 #endif 970 #ifdef CONFIG_MEMCG_V1 971 unsigned in_user_fault:1; 972 #endif 973 #ifdef CONFIG_LRU_GEN 974 /* whether the LRU algorithm may apply to this access */ 975 unsigned in_lru_fault:1; 976 #endif 977 #ifdef CONFIG_COMPAT_BRK 978 unsigned brk_randomized:1; 979 #endif 980 #ifdef CONFIG_CGROUPS 981 /* disallow userland-initiated cgroup migration */ 982 unsigned no_cgroup_migration:1; 983 /* task is frozen/stopped (used by the cgroup freezer) */ 984 unsigned frozen:1; 985 #endif 986 #ifdef CONFIG_BLK_CGROUP 987 unsigned use_memdelay:1; 988 #endif 989 #ifdef CONFIG_PSI 990 /* Stalled due to lack of memory */ 991 unsigned in_memstall:1; 992 #endif 993 #ifdef CONFIG_PAGE_OWNER 994 /* Used by page_owner=on to detect recursion in page tracking. */ 995 unsigned in_page_owner:1; 996 #endif 997 #ifdef CONFIG_EVENTFD 998 /* Recursion prevention for eventfd_signal() */ 999 unsigned in_eventfd:1; 1000 #endif 1001 #ifdef CONFIG_ARCH_HAS_CPU_PASID 1002 unsigned pasid_activated:1; 1003 #endif 1004 #ifdef CONFIG_CPU_SUP_INTEL 1005 unsigned reported_split_lock:1; 1006 #endif 1007 #ifdef CONFIG_TASK_DELAY_ACCT 1008 /* delay due to memory thrashing */ 1009 unsigned in_thrashing:1; 1010 #endif 1011 #ifdef CONFIG_PREEMPT_RT 1012 struct netdev_xmit net_xmit; 1013 #endif 1014 unsigned long atomic_flags; /* Flags requiring atomic access. */ 1015 1016 struct restart_block restart_block; 1017 1018 pid_t pid; 1019 pid_t tgid; 1020 1021 #ifdef CONFIG_STACKPROTECTOR 1022 /* Canary value for the -fstack-protector GCC feature: */ 1023 unsigned long stack_canary; 1024 #endif 1025 /* 1026 * Pointers to the (original) parent process, youngest child, younger sibling, 1027 * older sibling, respectively. (p->father can be replaced with 1028 * p->real_parent->pid) 1029 */ 1030 1031 /* Real parent process: */ 1032 struct task_struct __rcu *real_parent; 1033 1034 /* Recipient of SIGCHLD, wait4() reports: */ 1035 struct task_struct __rcu *parent; 1036 1037 /* 1038 * Children/sibling form the list of natural children: 1039 */ 1040 struct list_head children; 1041 struct list_head sibling; 1042 struct task_struct *group_leader; 1043 1044 /* 1045 * 'ptraced' is the list of tasks this task is using ptrace() on. 1046 * 1047 * This includes both natural children and PTRACE_ATTACH targets. 1048 * 'ptrace_entry' is this task's link on the p->parent->ptraced list. 1049 */ 1050 struct list_head ptraced; 1051 struct list_head ptrace_entry; 1052 1053 /* PID/PID hash table linkage. */ 1054 struct pid *thread_pid; 1055 struct hlist_node pid_links[PIDTYPE_MAX]; 1056 struct list_head thread_node; 1057 1058 struct completion *vfork_done; 1059 1060 /* CLONE_CHILD_SETTID: */ 1061 int __user *set_child_tid; 1062 1063 /* CLONE_CHILD_CLEARTID: */ 1064 int __user *clear_child_tid; 1065 1066 /* PF_KTHREAD | PF_IO_WORKER */ 1067 void *worker_private; 1068 1069 u64 utime; 1070 u64 stime; 1071 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME 1072 u64 utimescaled; 1073 u64 stimescaled; 1074 #endif 1075 u64 gtime; 1076 struct prev_cputime prev_cputime; 1077 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN 1078 struct vtime vtime; 1079 #endif 1080 1081 #ifdef CONFIG_NO_HZ_FULL 1082 atomic_t tick_dep_mask; 1083 #endif 1084 /* Context switch counts: */ 1085 unsigned long nvcsw; 1086 unsigned long nivcsw; 1087 1088 /* Monotonic time in nsecs: */ 1089 u64 start_time; 1090 1091 /* Boot based time in nsecs: */ 1092 u64 start_boottime; 1093 1094 /* MM fault and swap info: this can arguably be seen as either mm-specific or thread-specific: */ 1095 unsigned long min_flt; 1096 unsigned long maj_flt; 1097 1098 /* Empty if CONFIG_POSIX_CPUTIMERS=n */ 1099 struct posix_cputimers posix_cputimers; 1100 1101 #ifdef CONFIG_POSIX_CPU_TIMERS_TASK_WORK 1102 struct posix_cputimers_work posix_cputimers_work; 1103 #endif 1104 1105 /* Process credentials: */ 1106 1107 /* Tracer's credentials at attach: */ 1108 const struct cred __rcu *ptracer_cred; 1109 1110 /* Objective and real subjective task credentials (COW): */ 1111 const struct cred __rcu *real_cred; 1112 1113 /* Effective (overridable) subjective task credentials (COW): */ 1114 const struct cred __rcu *cred; 1115 1116 #ifdef CONFIG_KEYS 1117 /* Cached requested key. */ 1118 struct key *cached_requested_key; 1119 #endif 1120 1121 /* 1122 * executable name, excluding path. 1123 * 1124 * - normally initialized setup_new_exec() 1125 * - access it with [gs]et_task_comm() 1126 * - lock it with task_lock() 1127 */ 1128 char comm[TASK_COMM_LEN]; 1129 1130 struct nameidata *nameidata; 1131 1132 #ifdef CONFIG_SYSVIPC 1133 struct sysv_sem sysvsem; 1134 struct sysv_shm sysvshm; 1135 #endif 1136 #ifdef CONFIG_DETECT_HUNG_TASK 1137 unsigned long last_switch_count; 1138 unsigned long last_switch_time; 1139 #endif 1140 /* Filesystem information: */ 1141 struct fs_struct *fs; 1142 1143 /* Open file information: */ 1144 struct files_struct *files; 1145 1146 #ifdef CONFIG_IO_URING 1147 struct io_uring_task *io_uring; 1148 #endif 1149 1150 /* Namespaces: */ 1151 struct nsproxy *nsproxy; 1152 1153 /* Signal handlers: */ 1154 struct signal_struct *signal; 1155 struct sighand_struct __rcu *sighand; 1156 sigset_t blocked; 1157 sigset_t real_blocked; 1158 /* Restored if set_restore_sigmask() was used: */ 1159 sigset_t saved_sigmask; 1160 struct sigpending pending; 1161 unsigned long sas_ss_sp; 1162 size_t sas_ss_size; 1163 unsigned int sas_ss_flags; 1164 1165 struct callback_head *task_works; 1166 1167 #ifdef CONFIG_AUDIT 1168 #ifdef CONFIG_AUDITSYSCALL 1169 struct audit_context *audit_context; 1170 #endif 1171 kuid_t loginuid; 1172 unsigned int sessionid; 1173 #endif 1174 struct seccomp seccomp; 1175 struct syscall_user_dispatch syscall_dispatch; 1176 1177 /* Thread group tracking: */ 1178 u64 parent_exec_id; 1179 u64 self_exec_id; 1180 1181 /* Protection against (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, mempolicy: */ 1182 spinlock_t alloc_lock; 1183 1184 /* Protection of the PI data structures: */ 1185 raw_spinlock_t pi_lock; 1186 1187 struct wake_q_node wake_q; 1188 1189 #ifdef CONFIG_RT_MUTEXES 1190 /* PI waiters blocked on a rt_mutex held by this task: */ 1191 struct rb_root_cached pi_waiters; 1192 /* Updated under owner's pi_lock and rq lock */ 1193 struct task_struct *pi_top_task; 1194 /* Deadlock detection and priority inheritance handling: */ 1195 struct rt_mutex_waiter *pi_blocked_on; 1196 #endif 1197 1198 #ifdef CONFIG_DEBUG_MUTEXES 1199 /* Mutex deadlock detection: */ 1200 struct mutex_waiter *blocked_on; 1201 #endif 1202 1203 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP 1204 int non_block_count; 1205 #endif 1206 1207 #ifdef CONFIG_TRACE_IRQFLAGS 1208 struct irqtrace_events irqtrace; 1209 unsigned int hardirq_threaded; 1210 u64 hardirq_chain_key; 1211 int softirqs_enabled; 1212 int softirq_context; 1213 int irq_config; 1214 #endif 1215 #ifdef CONFIG_PREEMPT_RT 1216 int softirq_disable_cnt; 1217 #endif 1218 1219 #ifdef CONFIG_LOCKDEP 1220 # define MAX_LOCK_DEPTH 48UL 1221 u64 curr_chain_key; 1222 int lockdep_depth; 1223 unsigned int lockdep_recursion; 1224 struct held_lock held_locks[MAX_LOCK_DEPTH]; 1225 #endif 1226 1227 #if defined(CONFIG_UBSAN) && !defined(CONFIG_UBSAN_TRAP) 1228 unsigned int in_ubsan; 1229 #endif 1230 1231 /* Journalling filesystem info: */ 1232 void *journal_info; 1233 1234 /* Stacked block device info: */ 1235 struct bio_list *bio_list; 1236 1237 /* Stack plugging: */ 1238 struct blk_plug *plug; 1239 1240 /* VM state: */ 1241 struct reclaim_state *reclaim_state; 1242 1243 struct io_context *io_context; 1244 1245 #ifdef CONFIG_COMPACTION 1246 struct capture_control *capture_control; 1247 #endif 1248 /* Ptrace state: */ 1249 unsigned long ptrace_message; 1250 kernel_siginfo_t *last_siginfo; 1251 1252 struct task_io_accounting ioac; 1253 #ifdef CONFIG_PSI 1254 /* Pressure stall state */ 1255 unsigned int psi_flags; 1256 #endif 1257 #ifdef CONFIG_TASK_XACCT 1258 /* Accumulated RSS usage: */ 1259 u64 acct_rss_mem1; 1260 /* Accumulated virtual memory usage: */ 1261 u64 acct_vm_mem1; 1262 /* stime + utime since last update: */ 1263 u64 acct_timexpd; 1264 #endif 1265 #ifdef CONFIG_CPUSETS 1266 /* Protected by ->alloc_lock: */ 1267 nodemask_t mems_allowed; 1268 /* Sequence number to catch updates: */ 1269 seqcount_spinlock_t mems_allowed_seq; 1270 int cpuset_mem_spread_rotor; 1271 #endif 1272 #ifdef CONFIG_CGROUPS 1273 /* Control Group info protected by css_set_lock: */ 1274 struct css_set __rcu *cgroups; 1275 /* cg_list protected by css_set_lock and tsk->alloc_lock: */ 1276 struct list_head cg_list; 1277 #endif 1278 #ifdef CONFIG_X86_CPU_RESCTRL 1279 u32 closid; 1280 u32 rmid; 1281 #endif 1282 #ifdef CONFIG_FUTEX 1283 struct robust_list_head __user *robust_list; 1284 #ifdef CONFIG_COMPAT 1285 struct compat_robust_list_head __user *compat_robust_list; 1286 #endif 1287 struct list_head pi_state_list; 1288 struct futex_pi_state *pi_state_cache; 1289 struct mutex futex_exit_mutex; 1290 unsigned int futex_state; 1291 #endif 1292 #ifdef CONFIG_PERF_EVENTS 1293 u8 perf_recursion[PERF_NR_CONTEXTS]; 1294 struct perf_event_context *perf_event_ctxp; 1295 struct mutex perf_event_mutex; 1296 struct list_head perf_event_list; 1297 #endif 1298 #ifdef CONFIG_DEBUG_PREEMPT 1299 unsigned long preempt_disable_ip; 1300 #endif 1301 #ifdef CONFIG_NUMA 1302 /* Protected by alloc_lock: */ 1303 struct mempolicy *mempolicy; 1304 short il_prev; 1305 u8 il_weight; 1306 short pref_node_fork; 1307 #endif 1308 #ifdef CONFIG_NUMA_BALANCING 1309 int numa_scan_seq; 1310 unsigned int numa_scan_period; 1311 unsigned int numa_scan_period_max; 1312 int numa_preferred_nid; 1313 unsigned long numa_migrate_retry; 1314 /* Migration stamp: */ 1315 u64 node_stamp; 1316 u64 last_task_numa_placement; 1317 u64 last_sum_exec_runtime; 1318 struct callback_head numa_work; 1319 1320 /* 1321 * This pointer is only modified for current in syscall and 1322 * pagefault context (and for tasks being destroyed), so it can be read 1323 * from any of the following contexts: 1324 * - RCU read-side critical section 1325 * - current->numa_group from everywhere 1326 * - task's runqueue locked, task not running 1327 */ 1328 struct numa_group __rcu *numa_group; 1329 1330 /* 1331 * numa_faults is an array split into four regions: 1332 * faults_memory, faults_cpu, faults_memory_buffer, faults_cpu_buffer 1333 * in this precise order. 1334 * 1335 * faults_memory: Exponential decaying average of faults on a per-node 1336 * basis. Scheduling placement decisions are made based on these 1337 * counts. The values remain static for the duration of a PTE scan. 1338 * faults_cpu: Track the nodes the process was running on when a NUMA 1339 * hinting fault was incurred. 1340 * faults_memory_buffer and faults_cpu_buffer: Record faults per node 1341 * during the current scan window. When the scan completes, the counts 1342 * in faults_memory and faults_cpu decay and these values are copied. 1343 */ 1344 unsigned long *numa_faults; 1345 unsigned long total_numa_faults; 1346 1347 /* 1348 * numa_faults_locality tracks if faults recorded during the last 1349 * scan window were remote/local or failed to migrate. The task scan 1350 * period is adapted based on the locality of the faults with different 1351 * weights depending on whether they were shared or private faults 1352 */ 1353 unsigned long numa_faults_locality[3]; 1354 1355 unsigned long numa_pages_migrated; 1356 #endif /* CONFIG_NUMA_BALANCING */ 1357 1358 #ifdef CONFIG_RSEQ 1359 struct rseq __user *rseq; 1360 u32 rseq_len; 1361 u32 rseq_sig; 1362 /* 1363 * RmW on rseq_event_mask must be performed atomically 1364 * with respect to preemption. 1365 */ 1366 unsigned long rseq_event_mask; 1367 #endif 1368 1369 #ifdef CONFIG_SCHED_MM_CID 1370 int mm_cid; /* Current cid in mm */ 1371 int last_mm_cid; /* Most recent cid in mm */ 1372 int migrate_from_cpu; 1373 int mm_cid_active; /* Whether cid bitmap is active */ 1374 struct callback_head cid_work; 1375 #endif 1376 1377 struct tlbflush_unmap_batch tlb_ubc; 1378 1379 /* Cache last used pipe for splice(): */ 1380 struct pipe_inode_info *splice_pipe; 1381 1382 struct page_frag task_frag; 1383 1384 #ifdef CONFIG_TASK_DELAY_ACCT 1385 struct task_delay_info *delays; 1386 #endif 1387 1388 #ifdef CONFIG_FAULT_INJECTION 1389 int make_it_fail; 1390 unsigned int fail_nth; 1391 #endif 1392 /* 1393 * When (nr_dirtied >= nr_dirtied_pause), it's time to call 1394 * balance_dirty_pages() for a dirty throttling pause: 1395 */ 1396 int nr_dirtied; 1397 int nr_dirtied_pause; 1398 /* Start of a write-and-pause period: */ 1399 unsigned long dirty_paused_when; 1400 1401 #ifdef CONFIG_LATENCYTOP 1402 int latency_record_count; 1403 struct latency_record latency_record[LT_SAVECOUNT]; 1404 #endif 1405 /* 1406 * Time slack values; these are used to round up poll() and 1407 * select() etc timeout values. These are in nanoseconds. 1408 */ 1409 u64 timer_slack_ns; 1410 u64 default_timer_slack_ns; 1411 1412 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) 1413 unsigned int kasan_depth; 1414 #endif 1415 1416 #ifdef CONFIG_KCSAN 1417 struct kcsan_ctx kcsan_ctx; 1418 #ifdef CONFIG_TRACE_IRQFLAGS 1419 struct irqtrace_events kcsan_save_irqtrace; 1420 #endif 1421 #ifdef CONFIG_KCSAN_WEAK_MEMORY 1422 int kcsan_stack_depth; 1423 #endif 1424 #endif 1425 1426 #ifdef CONFIG_KMSAN 1427 struct kmsan_ctx kmsan_ctx; 1428 #endif 1429 1430 #if IS_ENABLED(CONFIG_KUNIT) 1431 struct kunit *kunit_test; 1432 #endif 1433 1434 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 1435 /* Index of current stored address in ret_stack: */ 1436 int curr_ret_stack; 1437 int curr_ret_depth; 1438 1439 /* Stack of return addresses for return function tracing: */ 1440 unsigned long *ret_stack; 1441 1442 /* Timestamp for last schedule: */ 1443 unsigned long long ftrace_timestamp; 1444 1445 /* 1446 * Number of functions that haven't been traced 1447 * because of depth overrun: 1448 */ 1449 atomic_t trace_overrun; 1450 1451 /* Pause tracing: */ 1452 atomic_t tracing_graph_pause; 1453 #endif 1454 1455 #ifdef CONFIG_TRACING 1456 /* Bitmask and counter of trace recursion: */ 1457 unsigned long trace_recursion; 1458 #endif /* CONFIG_TRACING */ 1459 1460 #ifdef CONFIG_KCOV 1461 /* See kernel/kcov.c for more details. */ 1462 1463 /* Coverage collection mode enabled for this task (0 if disabled): */ 1464 unsigned int kcov_mode; 1465 1466 /* Size of the kcov_area: */ 1467 unsigned int kcov_size; 1468 1469 /* Buffer for coverage collection: */ 1470 void *kcov_area; 1471 1472 /* KCOV descriptor wired with this task or NULL: */ 1473 struct kcov *kcov; 1474 1475 /* KCOV common handle for remote coverage collection: */ 1476 u64 kcov_handle; 1477 1478 /* KCOV sequence number: */ 1479 int kcov_sequence; 1480 1481 /* Collect coverage from softirq context: */ 1482 unsigned int kcov_softirq; 1483 #endif 1484 1485 #ifdef CONFIG_MEMCG_V1 1486 struct mem_cgroup *memcg_in_oom; 1487 #endif 1488 1489 #ifdef CONFIG_MEMCG 1490 /* Number of pages to reclaim on returning to userland: */ 1491 unsigned int memcg_nr_pages_over_high; 1492 1493 /* Used by memcontrol for targeted memcg charge: */ 1494 struct mem_cgroup *active_memcg; 1495 1496 /* Cache for current->cgroups->memcg->objcg lookups: */ 1497 struct obj_cgroup *objcg; 1498 #endif 1499 1500 #ifdef CONFIG_BLK_CGROUP 1501 struct gendisk *throttle_disk; 1502 #endif 1503 1504 #ifdef CONFIG_UPROBES 1505 struct uprobe_task *utask; 1506 #endif 1507 #if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE) 1508 unsigned int sequential_io; 1509 unsigned int sequential_io_avg; 1510 #endif 1511 struct kmap_ctrl kmap_ctrl; 1512 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP 1513 unsigned long task_state_change; 1514 # ifdef CONFIG_PREEMPT_RT 1515 unsigned long saved_state_change; 1516 # endif 1517 #endif 1518 struct rcu_head rcu; 1519 refcount_t rcu_users; 1520 int pagefault_disabled; 1521 #ifdef CONFIG_MMU 1522 struct task_struct *oom_reaper_list; 1523 struct timer_list oom_reaper_timer; 1524 #endif 1525 #ifdef CONFIG_VMAP_STACK 1526 struct vm_struct *stack_vm_area; 1527 #endif 1528 #ifdef CONFIG_THREAD_INFO_IN_TASK 1529 /* A live task holds one reference: */ 1530 refcount_t stack_refcount; 1531 #endif 1532 #ifdef CONFIG_LIVEPATCH 1533 int patch_state; 1534 #endif 1535 #ifdef CONFIG_SECURITY 1536 /* Used by LSM modules for access restriction: */ 1537 void *security; 1538 #endif 1539 #ifdef CONFIG_BPF_SYSCALL 1540 /* Used by BPF task local storage */ 1541 struct bpf_local_storage __rcu *bpf_storage; 1542 /* Used for BPF run context */ 1543 struct bpf_run_ctx *bpf_ctx; 1544 #endif 1545 /* Used by BPF for per-TASK xdp storage */ 1546 struct bpf_net_context *bpf_net_context; 1547 1548 #ifdef CONFIG_GCC_PLUGIN_STACKLEAK 1549 unsigned long lowest_stack; 1550 unsigned long prev_lowest_stack; 1551 #endif 1552 1553 #ifdef CONFIG_X86_MCE 1554 void __user *mce_vaddr; 1555 __u64 mce_kflags; 1556 u64 mce_addr; 1557 __u64 mce_ripv : 1, 1558 mce_whole_page : 1, 1559 __mce_reserved : 62; 1560 struct callback_head mce_kill_me; 1561 int mce_count; 1562 #endif 1563 1564 #ifdef CONFIG_KRETPROBES 1565 struct llist_head kretprobe_instances; 1566 #endif 1567 #ifdef CONFIG_RETHOOK 1568 struct llist_head rethooks; 1569 #endif 1570 1571 #ifdef CONFIG_ARCH_HAS_PARANOID_L1D_FLUSH 1572 /* 1573 * If L1D flush is supported on mm context switch 1574 * then we use this callback head to queue kill work 1575 * to kill tasks that are not running on SMT disabled 1576 * cores 1577 */ 1578 struct callback_head l1d_flush_kill; 1579 #endif 1580 1581 #ifdef CONFIG_RV 1582 /* 1583 * Per-task RV monitor. Nowadays fixed in RV_PER_TASK_MONITORS. 1584 * If we find justification for more monitors, we can think 1585 * about adding more or developing a dynamic method. So far, 1586 * none of these are justified. 1587 */ 1588 union rv_task_monitor rv[RV_PER_TASK_MONITORS]; 1589 #endif 1590 1591 #ifdef CONFIG_USER_EVENTS 1592 struct user_event_mm *user_event_mm; 1593 #endif 1594 1595 /* 1596 * New fields for task_struct should be added above here, so that 1597 * they are included in the randomized portion of task_struct. 1598 */ 1599 randomized_struct_fields_end 1600 1601 /* CPU-specific state of this task: */ 1602 struct thread_struct thread; 1603 1604 /* 1605 * WARNING: on x86, 'thread_struct' contains a variable-sized 1606 * structure. It *MUST* be at the end of 'task_struct'. 1607 * 1608 * Do not put anything below here! 1609 */ 1610 }; 1611 1612 #define TASK_REPORT_IDLE (TASK_REPORT + 1) 1613 #define TASK_REPORT_MAX (TASK_REPORT_IDLE << 1) 1614 1615 static inline unsigned int __task_state_index(unsigned int tsk_state, 1616 unsigned int tsk_exit_state) 1617 { 1618 unsigned int state = (tsk_state | tsk_exit_state) & TASK_REPORT; 1619 1620 BUILD_BUG_ON_NOT_POWER_OF_2(TASK_REPORT_MAX); 1621 1622 if ((tsk_state & TASK_IDLE) == TASK_IDLE) 1623 state = TASK_REPORT_IDLE; 1624 1625 /* 1626 * We're lying here, but rather than expose a completely new task state 1627 * to userspace, we can make this appear as if the task has gone through 1628 * a regular rt_mutex_lock() call. 1629 */ 1630 if (tsk_state & TASK_RTLOCK_WAIT) 1631 state = TASK_UNINTERRUPTIBLE; 1632 1633 return fls(state); 1634 } 1635 1636 static inline unsigned int task_state_index(struct task_struct *tsk) 1637 { 1638 return __task_state_index(READ_ONCE(tsk->__state), tsk->exit_state); 1639 } 1640 1641 static inline char task_index_to_char(unsigned int state) 1642 { 1643 static const char state_char[] = "RSDTtXZPI"; 1644 1645 BUILD_BUG_ON(TASK_REPORT_MAX * 2 != 1 << (sizeof(state_char) - 1)); 1646 1647 return state_char[state]; 1648 } 1649 1650 static inline char task_state_to_char(struct task_struct *tsk) 1651 { 1652 return task_index_to_char(task_state_index(tsk)); 1653 } 1654 1655 extern struct pid *cad_pid; 1656 1657 /* 1658 * Per process flags 1659 */ 1660 #define PF_VCPU 0x00000001 /* I'm a virtual CPU */ 1661 #define PF_IDLE 0x00000002 /* I am an IDLE thread */ 1662 #define PF_EXITING 0x00000004 /* Getting shut down */ 1663 #define PF_POSTCOREDUMP 0x00000008 /* Coredumps should ignore this task */ 1664 #define PF_IO_WORKER 0x00000010 /* Task is an IO worker */ 1665 #define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */ 1666 #define PF_FORKNOEXEC 0x00000040 /* Forked but didn't exec */ 1667 #define PF_MCE_PROCESS 0x00000080 /* Process policy on mce errors */ 1668 #define PF_SUPERPRIV 0x00000100 /* Used super-user privileges */ 1669 #define PF_DUMPCORE 0x00000200 /* Dumped core */ 1670 #define PF_SIGNALED 0x00000400 /* Killed by a signal */ 1671 #define PF_MEMALLOC 0x00000800 /* Allocating memory to free memory. See memalloc_noreclaim_save() */ 1672 #define PF_NPROC_EXCEEDED 0x00001000 /* set_user() noticed that RLIMIT_NPROC was exceeded */ 1673 #define PF_USED_MATH 0x00002000 /* If unset the fpu must be initialized before use */ 1674 #define PF_USER_WORKER 0x00004000 /* Kernel thread cloned from userspace thread */ 1675 #define PF_NOFREEZE 0x00008000 /* This thread should not be frozen */ 1676 #define PF__HOLE__00010000 0x00010000 1677 #define PF_KSWAPD 0x00020000 /* I am kswapd */ 1678 #define PF_MEMALLOC_NOFS 0x00040000 /* All allocations inherit GFP_NOFS. See memalloc_nfs_save() */ 1679 #define PF_MEMALLOC_NOIO 0x00080000 /* All allocations inherit GFP_NOIO. See memalloc_noio_save() */ 1680 #define PF_LOCAL_THROTTLE 0x00100000 /* Throttle writes only against the bdi I write to, 1681 * I am cleaning dirty pages from some other bdi. */ 1682 #define PF_KTHREAD 0x00200000 /* I am a kernel thread */ 1683 #define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */ 1684 #define PF__HOLE__00800000 0x00800000 1685 #define PF__HOLE__01000000 0x01000000 1686 #define PF__HOLE__02000000 0x02000000 1687 #define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_mask */ 1688 #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ 1689 #define PF_MEMALLOC_PIN 0x10000000 /* Allocations constrained to zones which allow long term pinning. 1690 * See memalloc_pin_save() */ 1691 #define PF_BLOCK_TS 0x20000000 /* plug has ts that needs updating */ 1692 #define PF__HOLE__40000000 0x40000000 1693 #define PF_SUSPEND_TASK 0x80000000 /* This thread called freeze_processes() and should not be frozen */ 1694 1695 /* 1696 * Only the _current_ task can read/write to tsk->flags, but other 1697 * tasks can access tsk->flags in readonly mode for example 1698 * with tsk_used_math (like during threaded core dumping). 1699 * There is however an exception to this rule during ptrace 1700 * or during fork: the ptracer task is allowed to write to the 1701 * child->flags of its traced child (same goes for fork, the parent 1702 * can write to the child->flags), because we're guaranteed the 1703 * child is not running and in turn not changing child->flags 1704 * at the same time the parent does it. 1705 */ 1706 #define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0) 1707 #define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0) 1708 #define clear_used_math() clear_stopped_child_used_math(current) 1709 #define set_used_math() set_stopped_child_used_math(current) 1710 1711 #define conditional_stopped_child_used_math(condition, child) \ 1712 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0) 1713 1714 #define conditional_used_math(condition) conditional_stopped_child_used_math(condition, current) 1715 1716 #define copy_to_stopped_child_used_math(child) \ 1717 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0) 1718 1719 /* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */ 1720 #define tsk_used_math(p) ((p)->flags & PF_USED_MATH) 1721 #define used_math() tsk_used_math(current) 1722 1723 static __always_inline bool is_percpu_thread(void) 1724 { 1725 #ifdef CONFIG_SMP 1726 return (current->flags & PF_NO_SETAFFINITY) && 1727 (current->nr_cpus_allowed == 1); 1728 #else 1729 return true; 1730 #endif 1731 } 1732 1733 /* Per-process atomic flags. */ 1734 #define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */ 1735 #define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */ 1736 #define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */ 1737 #define PFA_SPEC_SSB_DISABLE 3 /* Speculative Store Bypass disabled */ 1738 #define PFA_SPEC_SSB_FORCE_DISABLE 4 /* Speculative Store Bypass force disabled*/ 1739 #define PFA_SPEC_IB_DISABLE 5 /* Indirect branch speculation restricted */ 1740 #define PFA_SPEC_IB_FORCE_DISABLE 6 /* Indirect branch speculation permanently restricted */ 1741 #define PFA_SPEC_SSB_NOEXEC 7 /* Speculative Store Bypass clear on execve() */ 1742 1743 #define TASK_PFA_TEST(name, func) \ 1744 static inline bool task_##func(struct task_struct *p) \ 1745 { return test_bit(PFA_##name, &p->atomic_flags); } 1746 1747 #define TASK_PFA_SET(name, func) \ 1748 static inline void task_set_##func(struct task_struct *p) \ 1749 { set_bit(PFA_##name, &p->atomic_flags); } 1750 1751 #define TASK_PFA_CLEAR(name, func) \ 1752 static inline void task_clear_##func(struct task_struct *p) \ 1753 { clear_bit(PFA_##name, &p->atomic_flags); } 1754 1755 TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs) 1756 TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs) 1757 1758 TASK_PFA_TEST(SPREAD_PAGE, spread_page) 1759 TASK_PFA_SET(SPREAD_PAGE, spread_page) 1760 TASK_PFA_CLEAR(SPREAD_PAGE, spread_page) 1761 1762 TASK_PFA_TEST(SPREAD_SLAB, spread_slab) 1763 TASK_PFA_SET(SPREAD_SLAB, spread_slab) 1764 TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab) 1765 1766 TASK_PFA_TEST(SPEC_SSB_DISABLE, spec_ssb_disable) 1767 TASK_PFA_SET(SPEC_SSB_DISABLE, spec_ssb_disable) 1768 TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable) 1769 1770 TASK_PFA_TEST(SPEC_SSB_NOEXEC, spec_ssb_noexec) 1771 TASK_PFA_SET(SPEC_SSB_NOEXEC, spec_ssb_noexec) 1772 TASK_PFA_CLEAR(SPEC_SSB_NOEXEC, spec_ssb_noexec) 1773 1774 TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable) 1775 TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable) 1776 1777 TASK_PFA_TEST(SPEC_IB_DISABLE, spec_ib_disable) 1778 TASK_PFA_SET(SPEC_IB_DISABLE, spec_ib_disable) 1779 TASK_PFA_CLEAR(SPEC_IB_DISABLE, spec_ib_disable) 1780 1781 TASK_PFA_TEST(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable) 1782 TASK_PFA_SET(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable) 1783 1784 static inline void 1785 current_restore_flags(unsigned long orig_flags, unsigned long flags) 1786 { 1787 current->flags &= ~flags; 1788 current->flags |= orig_flags & flags; 1789 } 1790 1791 extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial); 1792 extern int task_can_attach(struct task_struct *p); 1793 extern int dl_bw_alloc(int cpu, u64 dl_bw); 1794 extern void dl_bw_free(int cpu, u64 dl_bw); 1795 #ifdef CONFIG_SMP 1796 1797 /* do_set_cpus_allowed() - consider using set_cpus_allowed_ptr() instead */ 1798 extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask); 1799 1800 /** 1801 * set_cpus_allowed_ptr - set CPU affinity mask of a task 1802 * @p: the task 1803 * @new_mask: CPU affinity mask 1804 * 1805 * Return: zero if successful, or a negative error code 1806 */ 1807 extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask); 1808 extern int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, int node); 1809 extern void release_user_cpus_ptr(struct task_struct *p); 1810 extern int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask); 1811 extern void force_compatible_cpus_allowed_ptr(struct task_struct *p); 1812 extern void relax_compatible_cpus_allowed_ptr(struct task_struct *p); 1813 #else 1814 static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) 1815 { 1816 } 1817 static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) 1818 { 1819 /* Opencoded cpumask_test_cpu(0, new_mask) to avoid dependency on cpumask.h */ 1820 if ((*cpumask_bits(new_mask) & 1) == 0) 1821 return -EINVAL; 1822 return 0; 1823 } 1824 static inline int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, int node) 1825 { 1826 if (src->user_cpus_ptr) 1827 return -EINVAL; 1828 return 0; 1829 } 1830 static inline void release_user_cpus_ptr(struct task_struct *p) 1831 { 1832 WARN_ON(p->user_cpus_ptr); 1833 } 1834 1835 static inline int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask) 1836 { 1837 return 0; 1838 } 1839 #endif 1840 1841 extern int yield_to(struct task_struct *p, bool preempt); 1842 extern void set_user_nice(struct task_struct *p, long nice); 1843 extern int task_prio(const struct task_struct *p); 1844 1845 /** 1846 * task_nice - return the nice value of a given task. 1847 * @p: the task in question. 1848 * 1849 * Return: The nice value [ -20 ... 0 ... 19 ]. 1850 */ 1851 static inline int task_nice(const struct task_struct *p) 1852 { 1853 return PRIO_TO_NICE((p)->static_prio); 1854 } 1855 1856 extern int can_nice(const struct task_struct *p, const int nice); 1857 extern int task_curr(const struct task_struct *p); 1858 extern int idle_cpu(int cpu); 1859 extern int available_idle_cpu(int cpu); 1860 extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *); 1861 extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *); 1862 extern void sched_set_fifo(struct task_struct *p); 1863 extern void sched_set_fifo_low(struct task_struct *p); 1864 extern void sched_set_normal(struct task_struct *p, int nice); 1865 extern int sched_setattr(struct task_struct *, const struct sched_attr *); 1866 extern int sched_setattr_nocheck(struct task_struct *, const struct sched_attr *); 1867 extern struct task_struct *idle_task(int cpu); 1868 1869 /** 1870 * is_idle_task - is the specified task an idle task? 1871 * @p: the task in question. 1872 * 1873 * Return: 1 if @p is an idle task. 0 otherwise. 1874 */ 1875 static __always_inline bool is_idle_task(const struct task_struct *p) 1876 { 1877 return !!(p->flags & PF_IDLE); 1878 } 1879 1880 extern struct task_struct *curr_task(int cpu); 1881 extern void ia64_set_curr_task(int cpu, struct task_struct *p); 1882 1883 void yield(void); 1884 1885 union thread_union { 1886 struct task_struct task; 1887 #ifndef CONFIG_THREAD_INFO_IN_TASK 1888 struct thread_info thread_info; 1889 #endif 1890 unsigned long stack[THREAD_SIZE/sizeof(long)]; 1891 }; 1892 1893 #ifndef CONFIG_THREAD_INFO_IN_TASK 1894 extern struct thread_info init_thread_info; 1895 #endif 1896 1897 extern unsigned long init_stack[THREAD_SIZE / sizeof(unsigned long)]; 1898 1899 #ifdef CONFIG_THREAD_INFO_IN_TASK 1900 # define task_thread_info(task) (&(task)->thread_info) 1901 #elif !defined(__HAVE_THREAD_FUNCTIONS) 1902 # define task_thread_info(task) ((struct thread_info *)(task)->stack) 1903 #endif 1904 1905 /* 1906 * find a task by one of its numerical ids 1907 * 1908 * find_task_by_pid_ns(): 1909 * finds a task by its pid in the specified namespace 1910 * find_task_by_vpid(): 1911 * finds a task by its virtual pid 1912 * 1913 * see also find_vpid() etc in include/linux/pid.h 1914 */ 1915 1916 extern struct task_struct *find_task_by_vpid(pid_t nr); 1917 extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns); 1918 1919 /* 1920 * find a task by its virtual pid and get the task struct 1921 */ 1922 extern struct task_struct *find_get_task_by_vpid(pid_t nr); 1923 1924 extern int wake_up_state(struct task_struct *tsk, unsigned int state); 1925 extern int wake_up_process(struct task_struct *tsk); 1926 extern void wake_up_new_task(struct task_struct *tsk); 1927 1928 #ifdef CONFIG_SMP 1929 extern void kick_process(struct task_struct *tsk); 1930 #else 1931 static inline void kick_process(struct task_struct *tsk) { } 1932 #endif 1933 1934 extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec); 1935 1936 static inline void set_task_comm(struct task_struct *tsk, const char *from) 1937 { 1938 __set_task_comm(tsk, from, false); 1939 } 1940 1941 extern char *__get_task_comm(char *to, size_t len, struct task_struct *tsk); 1942 #define get_task_comm(buf, tsk) ({ \ 1943 BUILD_BUG_ON(sizeof(buf) != TASK_COMM_LEN); \ 1944 __get_task_comm(buf, sizeof(buf), tsk); \ 1945 }) 1946 1947 #ifdef CONFIG_SMP 1948 static __always_inline void scheduler_ipi(void) 1949 { 1950 /* 1951 * Fold TIF_NEED_RESCHED into the preempt_count; anybody setting 1952 * TIF_NEED_RESCHED remotely (for the first time) will also send 1953 * this IPI. 1954 */ 1955 preempt_fold_need_resched(); 1956 } 1957 #else 1958 static inline void scheduler_ipi(void) { } 1959 #endif 1960 1961 extern unsigned long wait_task_inactive(struct task_struct *, unsigned int match_state); 1962 1963 /* 1964 * Set thread flags in other task's structures. 1965 * See asm/thread_info.h for TIF_xxxx flags available: 1966 */ 1967 static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag) 1968 { 1969 set_ti_thread_flag(task_thread_info(tsk), flag); 1970 } 1971 1972 static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag) 1973 { 1974 clear_ti_thread_flag(task_thread_info(tsk), flag); 1975 } 1976 1977 static inline void update_tsk_thread_flag(struct task_struct *tsk, int flag, 1978 bool value) 1979 { 1980 update_ti_thread_flag(task_thread_info(tsk), flag, value); 1981 } 1982 1983 static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag) 1984 { 1985 return test_and_set_ti_thread_flag(task_thread_info(tsk), flag); 1986 } 1987 1988 static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag) 1989 { 1990 return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag); 1991 } 1992 1993 static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag) 1994 { 1995 return test_ti_thread_flag(task_thread_info(tsk), flag); 1996 } 1997 1998 static inline void set_tsk_need_resched(struct task_struct *tsk) 1999 { 2000 set_tsk_thread_flag(tsk,TIF_NEED_RESCHED); 2001 } 2002 2003 static inline void clear_tsk_need_resched(struct task_struct *tsk) 2004 { 2005 clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED); 2006 } 2007 2008 static inline int test_tsk_need_resched(struct task_struct *tsk) 2009 { 2010 return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED)); 2011 } 2012 2013 /* 2014 * cond_resched() and cond_resched_lock(): latency reduction via 2015 * explicit rescheduling in places that are safe. The return 2016 * value indicates whether a reschedule was done in fact. 2017 * cond_resched_lock() will drop the spinlock before scheduling, 2018 */ 2019 #if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC) 2020 extern int __cond_resched(void); 2021 2022 #if defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL) 2023 2024 void sched_dynamic_klp_enable(void); 2025 void sched_dynamic_klp_disable(void); 2026 2027 DECLARE_STATIC_CALL(cond_resched, __cond_resched); 2028 2029 static __always_inline int _cond_resched(void) 2030 { 2031 return static_call_mod(cond_resched)(); 2032 } 2033 2034 #elif defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY) 2035 2036 extern int dynamic_cond_resched(void); 2037 2038 static __always_inline int _cond_resched(void) 2039 { 2040 return dynamic_cond_resched(); 2041 } 2042 2043 #else /* !CONFIG_PREEMPTION */ 2044 2045 static inline int _cond_resched(void) 2046 { 2047 klp_sched_try_switch(); 2048 return __cond_resched(); 2049 } 2050 2051 #endif /* PREEMPT_DYNAMIC && CONFIG_HAVE_PREEMPT_DYNAMIC_CALL */ 2052 2053 #else /* CONFIG_PREEMPTION && !CONFIG_PREEMPT_DYNAMIC */ 2054 2055 static inline int _cond_resched(void) 2056 { 2057 klp_sched_try_switch(); 2058 return 0; 2059 } 2060 2061 #endif /* !CONFIG_PREEMPTION || CONFIG_PREEMPT_DYNAMIC */ 2062 2063 #define cond_resched() ({ \ 2064 __might_resched(__FILE__, __LINE__, 0); \ 2065 _cond_resched(); \ 2066 }) 2067 2068 extern int __cond_resched_lock(spinlock_t *lock); 2069 extern int __cond_resched_rwlock_read(rwlock_t *lock); 2070 extern int __cond_resched_rwlock_write(rwlock_t *lock); 2071 2072 #define MIGHT_RESCHED_RCU_SHIFT 8 2073 #define MIGHT_RESCHED_PREEMPT_MASK ((1U << MIGHT_RESCHED_RCU_SHIFT) - 1) 2074 2075 #ifndef CONFIG_PREEMPT_RT 2076 /* 2077 * Non RT kernels have an elevated preempt count due to the held lock, 2078 * but are not allowed to be inside a RCU read side critical section 2079 */ 2080 # define PREEMPT_LOCK_RESCHED_OFFSETS PREEMPT_LOCK_OFFSET 2081 #else 2082 /* 2083 * spin/rw_lock() on RT implies rcu_read_lock(). The might_sleep() check in 2084 * cond_resched*lock() has to take that into account because it checks for 2085 * preempt_count() and rcu_preempt_depth(). 2086 */ 2087 # define PREEMPT_LOCK_RESCHED_OFFSETS \ 2088 (PREEMPT_LOCK_OFFSET + (1U << MIGHT_RESCHED_RCU_SHIFT)) 2089 #endif 2090 2091 #define cond_resched_lock(lock) ({ \ 2092 __might_resched(__FILE__, __LINE__, PREEMPT_LOCK_RESCHED_OFFSETS); \ 2093 __cond_resched_lock(lock); \ 2094 }) 2095 2096 #define cond_resched_rwlock_read(lock) ({ \ 2097 __might_resched(__FILE__, __LINE__, PREEMPT_LOCK_RESCHED_OFFSETS); \ 2098 __cond_resched_rwlock_read(lock); \ 2099 }) 2100 2101 #define cond_resched_rwlock_write(lock) ({ \ 2102 __might_resched(__FILE__, __LINE__, PREEMPT_LOCK_RESCHED_OFFSETS); \ 2103 __cond_resched_rwlock_write(lock); \ 2104 }) 2105 2106 static __always_inline bool need_resched(void) 2107 { 2108 return unlikely(tif_need_resched()); 2109 } 2110 2111 /* 2112 * Wrappers for p->thread_info->cpu access. No-op on UP. 2113 */ 2114 #ifdef CONFIG_SMP 2115 2116 static inline unsigned int task_cpu(const struct task_struct *p) 2117 { 2118 return READ_ONCE(task_thread_info(p)->cpu); 2119 } 2120 2121 extern void set_task_cpu(struct task_struct *p, unsigned int cpu); 2122 2123 #else 2124 2125 static inline unsigned int task_cpu(const struct task_struct *p) 2126 { 2127 return 0; 2128 } 2129 2130 static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) 2131 { 2132 } 2133 2134 #endif /* CONFIG_SMP */ 2135 2136 extern bool sched_task_on_rq(struct task_struct *p); 2137 extern unsigned long get_wchan(struct task_struct *p); 2138 extern struct task_struct *cpu_curr_snapshot(int cpu); 2139 2140 #include <linux/spinlock.h> 2141 2142 /* 2143 * In order to reduce various lock holder preemption latencies provide an 2144 * interface to see if a vCPU is currently running or not. 2145 * 2146 * This allows us to terminate optimistic spin loops and block, analogous to 2147 * the native optimistic spin heuristic of testing if the lock owner task is 2148 * running or not. 2149 */ 2150 #ifndef vcpu_is_preempted 2151 static inline bool vcpu_is_preempted(int cpu) 2152 { 2153 return false; 2154 } 2155 #endif 2156 2157 extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask); 2158 extern long sched_getaffinity(pid_t pid, struct cpumask *mask); 2159 2160 #ifndef TASK_SIZE_OF 2161 #define TASK_SIZE_OF(tsk) TASK_SIZE 2162 #endif 2163 2164 #ifdef CONFIG_SMP 2165 static inline bool owner_on_cpu(struct task_struct *owner) 2166 { 2167 /* 2168 * As lock holder preemption issue, we both skip spinning if 2169 * task is not on cpu or its cpu is preempted 2170 */ 2171 return READ_ONCE(owner->on_cpu) && !vcpu_is_preempted(task_cpu(owner)); 2172 } 2173 2174 /* Returns effective CPU energy utilization, as seen by the scheduler */ 2175 unsigned long sched_cpu_util(int cpu); 2176 #endif /* CONFIG_SMP */ 2177 2178 #ifdef CONFIG_SCHED_CORE 2179 extern void sched_core_free(struct task_struct *tsk); 2180 extern void sched_core_fork(struct task_struct *p); 2181 extern int sched_core_share_pid(unsigned int cmd, pid_t pid, enum pid_type type, 2182 unsigned long uaddr); 2183 extern int sched_core_idle_cpu(int cpu); 2184 #else 2185 static inline void sched_core_free(struct task_struct *tsk) { } 2186 static inline void sched_core_fork(struct task_struct *p) { } 2187 static inline int sched_core_idle_cpu(int cpu) { return idle_cpu(cpu); } 2188 #endif 2189 2190 extern void sched_set_stop_task(int cpu, struct task_struct *stop); 2191 2192 #ifdef CONFIG_MEM_ALLOC_PROFILING 2193 static __always_inline struct alloc_tag *alloc_tag_save(struct alloc_tag *tag) 2194 { 2195 swap(current->alloc_tag, tag); 2196 return tag; 2197 } 2198 2199 static __always_inline void alloc_tag_restore(struct alloc_tag *tag, struct alloc_tag *old) 2200 { 2201 #ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG 2202 WARN(current->alloc_tag != tag, "current->alloc_tag was changed:\n"); 2203 #endif 2204 current->alloc_tag = old; 2205 } 2206 #else 2207 #define alloc_tag_save(_tag) NULL 2208 #define alloc_tag_restore(_tag, _old) do {} while (0) 2209 #endif 2210 2211 #endif 2212