1 #ifndef _LINUX_SCHED_H 2 #define _LINUX_SCHED_H 3 4 /* 5 * Define 'struct task_struct' and provide the main scheduler 6 * APIs (schedule(), wakeup variants, etc.) 7 */ 8 9 #include <uapi/linux/sched.h> 10 11 #include <asm/current.h> 12 13 #include <linux/pid.h> 14 #include <linux/sem.h> 15 #include <linux/shm.h> 16 #include <linux/kcov.h> 17 #include <linux/mutex.h> 18 #include <linux/plist.h> 19 #include <linux/hrtimer.h> 20 #include <linux/seccomp.h> 21 #include <linux/nodemask.h> 22 #include <linux/rcupdate.h> 23 #include <linux/resource.h> 24 #include <linux/latencytop.h> 25 #include <linux/sched/prio.h> 26 #include <linux/signal_types.h> 27 #include <linux/mm_types_task.h> 28 #include <linux/task_io_accounting.h> 29 30 /* task_struct member predeclarations (sorted alphabetically): */ 31 struct audit_context; 32 struct backing_dev_info; 33 struct bio_list; 34 struct blk_plug; 35 struct cfs_rq; 36 struct fs_struct; 37 struct futex_pi_state; 38 struct io_context; 39 struct mempolicy; 40 struct nameidata; 41 struct nsproxy; 42 struct perf_event_context; 43 struct pid_namespace; 44 struct pipe_inode_info; 45 struct rcu_node; 46 struct reclaim_state; 47 struct robust_list_head; 48 struct sched_attr; 49 struct sched_param; 50 struct seq_file; 51 struct sighand_struct; 52 struct signal_struct; 53 struct task_delay_info; 54 struct task_group; 55 56 /* 57 * Task state bitmask. NOTE! These bits are also 58 * encoded in fs/proc/array.c: get_task_state(). 59 * 60 * We have two separate sets of flags: task->state 61 * is about runnability, while task->exit_state are 62 * about the task exiting. Confusing, but this way 63 * modifying one set can't modify the other one by 64 * mistake. 65 */ 66 67 /* Used in tsk->state: */ 68 #define TASK_RUNNING 0 69 #define TASK_INTERRUPTIBLE 1 70 #define TASK_UNINTERRUPTIBLE 2 71 #define __TASK_STOPPED 4 72 #define __TASK_TRACED 8 73 /* Used in tsk->exit_state: */ 74 #define EXIT_DEAD 16 75 #define EXIT_ZOMBIE 32 76 #define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD) 77 /* Used in tsk->state again: */ 78 #define TASK_DEAD 64 79 #define TASK_WAKEKILL 128 80 #define TASK_WAKING 256 81 #define TASK_PARKED 512 82 #define TASK_NOLOAD 1024 83 #define TASK_NEW 2048 84 #define TASK_STATE_MAX 4096 85 86 #define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWPNn" 87 88 /* Convenience macros for the sake of set_current_state: */ 89 #define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE) 90 #define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED) 91 #define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED) 92 93 #define TASK_IDLE (TASK_UNINTERRUPTIBLE | TASK_NOLOAD) 94 95 /* Convenience macros for the sake of wake_up(): */ 96 #define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE) 97 #define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED) 98 99 /* get_task_state(): */ 100 #define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \ 101 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \ 102 __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD) 103 104 #define task_is_traced(task) ((task->state & __TASK_TRACED) != 0) 105 106 #define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0) 107 108 #define task_is_stopped_or_traced(task) ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0) 109 110 #define task_contributes_to_load(task) ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \ 111 (task->flags & PF_FROZEN) == 0 && \ 112 (task->state & TASK_NOLOAD) == 0) 113 114 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP 115 116 #define __set_current_state(state_value) \ 117 do { \ 118 current->task_state_change = _THIS_IP_; \ 119 current->state = (state_value); \ 120 } while (0) 121 #define set_current_state(state_value) \ 122 do { \ 123 current->task_state_change = _THIS_IP_; \ 124 smp_store_mb(current->state, (state_value)); \ 125 } while (0) 126 127 #else 128 /* 129 * set_current_state() includes a barrier so that the write of current->state 130 * is correctly serialised wrt the caller's subsequent test of whether to 131 * actually sleep: 132 * 133 * for (;;) { 134 * set_current_state(TASK_UNINTERRUPTIBLE); 135 * if (!need_sleep) 136 * break; 137 * 138 * schedule(); 139 * } 140 * __set_current_state(TASK_RUNNING); 141 * 142 * If the caller does not need such serialisation (because, for instance, the 143 * condition test and condition change and wakeup are under the same lock) then 144 * use __set_current_state(). 145 * 146 * The above is typically ordered against the wakeup, which does: 147 * 148 * need_sleep = false; 149 * wake_up_state(p, TASK_UNINTERRUPTIBLE); 150 * 151 * Where wake_up_state() (and all other wakeup primitives) imply enough 152 * barriers to order the store of the variable against wakeup. 153 * 154 * Wakeup will do: if (@state & p->state) p->state = TASK_RUNNING, that is, 155 * once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a 156 * TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING). 157 * 158 * This is obviously fine, since they both store the exact same value. 159 * 160 * Also see the comments of try_to_wake_up(). 161 */ 162 #define __set_current_state(state_value) do { current->state = (state_value); } while (0) 163 #define set_current_state(state_value) smp_store_mb(current->state, (state_value)) 164 #endif 165 166 /* Task command name length: */ 167 #define TASK_COMM_LEN 16 168 169 extern cpumask_var_t cpu_isolated_map; 170 171 extern void scheduler_tick(void); 172 173 #define MAX_SCHEDULE_TIMEOUT LONG_MAX 174 175 extern long schedule_timeout(long timeout); 176 extern long schedule_timeout_interruptible(long timeout); 177 extern long schedule_timeout_killable(long timeout); 178 extern long schedule_timeout_uninterruptible(long timeout); 179 extern long schedule_timeout_idle(long timeout); 180 asmlinkage void schedule(void); 181 extern void schedule_preempt_disabled(void); 182 183 extern int __must_check io_schedule_prepare(void); 184 extern void io_schedule_finish(int token); 185 extern long io_schedule_timeout(long timeout); 186 extern void io_schedule(void); 187 188 /** 189 * struct prev_cputime - snapshot of system and user cputime 190 * @utime: time spent in user mode 191 * @stime: time spent in system mode 192 * @lock: protects the above two fields 193 * 194 * Stores previous user/system time values such that we can guarantee 195 * monotonicity. 196 */ 197 struct prev_cputime { 198 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE 199 u64 utime; 200 u64 stime; 201 raw_spinlock_t lock; 202 #endif 203 }; 204 205 /** 206 * struct task_cputime - collected CPU time counts 207 * @utime: time spent in user mode, in nanoseconds 208 * @stime: time spent in kernel mode, in nanoseconds 209 * @sum_exec_runtime: total time spent on the CPU, in nanoseconds 210 * 211 * This structure groups together three kinds of CPU time that are tracked for 212 * threads and thread groups. Most things considering CPU time want to group 213 * these counts together and treat all three of them in parallel. 214 */ 215 struct task_cputime { 216 u64 utime; 217 u64 stime; 218 unsigned long long sum_exec_runtime; 219 }; 220 221 /* Alternate field names when used on cache expirations: */ 222 #define virt_exp utime 223 #define prof_exp stime 224 #define sched_exp sum_exec_runtime 225 226 enum vtime_state { 227 /* Task is sleeping or running in a CPU with VTIME inactive: */ 228 VTIME_INACTIVE = 0, 229 /* Task runs in userspace in a CPU with VTIME active: */ 230 VTIME_USER, 231 /* Task runs in kernelspace in a CPU with VTIME active: */ 232 VTIME_SYS, 233 }; 234 235 struct vtime { 236 seqcount_t seqcount; 237 unsigned long long starttime; 238 enum vtime_state state; 239 u64 utime; 240 u64 stime; 241 u64 gtime; 242 }; 243 244 struct sched_info { 245 #ifdef CONFIG_SCHED_INFO 246 /* Cumulative counters: */ 247 248 /* # of times we have run on this CPU: */ 249 unsigned long pcount; 250 251 /* Time spent waiting on a runqueue: */ 252 unsigned long long run_delay; 253 254 /* Timestamps: */ 255 256 /* When did we last run on a CPU? */ 257 unsigned long long last_arrival; 258 259 /* When were we last queued to run? */ 260 unsigned long long last_queued; 261 262 #endif /* CONFIG_SCHED_INFO */ 263 }; 264 265 /* 266 * Integer metrics need fixed point arithmetic, e.g., sched/fair 267 * has a few: load, load_avg, util_avg, freq, and capacity. 268 * 269 * We define a basic fixed point arithmetic range, and then formalize 270 * all these metrics based on that basic range. 271 */ 272 # define SCHED_FIXEDPOINT_SHIFT 10 273 # define SCHED_FIXEDPOINT_SCALE (1L << SCHED_FIXEDPOINT_SHIFT) 274 275 struct load_weight { 276 unsigned long weight; 277 u32 inv_weight; 278 }; 279 280 /* 281 * The load_avg/util_avg accumulates an infinite geometric series 282 * (see __update_load_avg() in kernel/sched/fair.c). 283 * 284 * [load_avg definition] 285 * 286 * load_avg = runnable% * scale_load_down(load) 287 * 288 * where runnable% is the time ratio that a sched_entity is runnable. 289 * For cfs_rq, it is the aggregated load_avg of all runnable and 290 * blocked sched_entities. 291 * 292 * load_avg may also take frequency scaling into account: 293 * 294 * load_avg = runnable% * scale_load_down(load) * freq% 295 * 296 * where freq% is the CPU frequency normalized to the highest frequency. 297 * 298 * [util_avg definition] 299 * 300 * util_avg = running% * SCHED_CAPACITY_SCALE 301 * 302 * where running% is the time ratio that a sched_entity is running on 303 * a CPU. For cfs_rq, it is the aggregated util_avg of all runnable 304 * and blocked sched_entities. 305 * 306 * util_avg may also factor frequency scaling and CPU capacity scaling: 307 * 308 * util_avg = running% * SCHED_CAPACITY_SCALE * freq% * capacity% 309 * 310 * where freq% is the same as above, and capacity% is the CPU capacity 311 * normalized to the greatest capacity (due to uarch differences, etc). 312 * 313 * N.B., the above ratios (runnable%, running%, freq%, and capacity%) 314 * themselves are in the range of [0, 1]. To do fixed point arithmetics, 315 * we therefore scale them to as large a range as necessary. This is for 316 * example reflected by util_avg's SCHED_CAPACITY_SCALE. 317 * 318 * [Overflow issue] 319 * 320 * The 64-bit load_sum can have 4353082796 (=2^64/47742/88761) entities 321 * with the highest load (=88761), always runnable on a single cfs_rq, 322 * and should not overflow as the number already hits PID_MAX_LIMIT. 323 * 324 * For all other cases (including 32-bit kernels), struct load_weight's 325 * weight will overflow first before we do, because: 326 * 327 * Max(load_avg) <= Max(load.weight) 328 * 329 * Then it is the load_weight's responsibility to consider overflow 330 * issues. 331 */ 332 struct sched_avg { 333 u64 last_update_time; 334 u64 load_sum; 335 u32 util_sum; 336 u32 period_contrib; 337 unsigned long load_avg; 338 unsigned long util_avg; 339 }; 340 341 struct sched_statistics { 342 #ifdef CONFIG_SCHEDSTATS 343 u64 wait_start; 344 u64 wait_max; 345 u64 wait_count; 346 u64 wait_sum; 347 u64 iowait_count; 348 u64 iowait_sum; 349 350 u64 sleep_start; 351 u64 sleep_max; 352 s64 sum_sleep_runtime; 353 354 u64 block_start; 355 u64 block_max; 356 u64 exec_max; 357 u64 slice_max; 358 359 u64 nr_migrations_cold; 360 u64 nr_failed_migrations_affine; 361 u64 nr_failed_migrations_running; 362 u64 nr_failed_migrations_hot; 363 u64 nr_forced_migrations; 364 365 u64 nr_wakeups; 366 u64 nr_wakeups_sync; 367 u64 nr_wakeups_migrate; 368 u64 nr_wakeups_local; 369 u64 nr_wakeups_remote; 370 u64 nr_wakeups_affine; 371 u64 nr_wakeups_affine_attempts; 372 u64 nr_wakeups_passive; 373 u64 nr_wakeups_idle; 374 #endif 375 }; 376 377 struct sched_entity { 378 /* For load-balancing: */ 379 struct load_weight load; 380 struct rb_node run_node; 381 struct list_head group_node; 382 unsigned int on_rq; 383 384 u64 exec_start; 385 u64 sum_exec_runtime; 386 u64 vruntime; 387 u64 prev_sum_exec_runtime; 388 389 u64 nr_migrations; 390 391 struct sched_statistics statistics; 392 393 #ifdef CONFIG_FAIR_GROUP_SCHED 394 int depth; 395 struct sched_entity *parent; 396 /* rq on which this entity is (to be) queued: */ 397 struct cfs_rq *cfs_rq; 398 /* rq "owned" by this entity/group: */ 399 struct cfs_rq *my_q; 400 #endif 401 402 #ifdef CONFIG_SMP 403 /* 404 * Per entity load average tracking. 405 * 406 * Put into separate cache line so it does not 407 * collide with read-mostly values above. 408 */ 409 struct sched_avg avg ____cacheline_aligned_in_smp; 410 #endif 411 }; 412 413 struct sched_rt_entity { 414 struct list_head run_list; 415 unsigned long timeout; 416 unsigned long watchdog_stamp; 417 unsigned int time_slice; 418 unsigned short on_rq; 419 unsigned short on_list; 420 421 struct sched_rt_entity *back; 422 #ifdef CONFIG_RT_GROUP_SCHED 423 struct sched_rt_entity *parent; 424 /* rq on which this entity is (to be) queued: */ 425 struct rt_rq *rt_rq; 426 /* rq "owned" by this entity/group: */ 427 struct rt_rq *my_q; 428 #endif 429 } __randomize_layout; 430 431 struct sched_dl_entity { 432 struct rb_node rb_node; 433 434 /* 435 * Original scheduling parameters. Copied here from sched_attr 436 * during sched_setattr(), they will remain the same until 437 * the next sched_setattr(). 438 */ 439 u64 dl_runtime; /* Maximum runtime for each instance */ 440 u64 dl_deadline; /* Relative deadline of each instance */ 441 u64 dl_period; /* Separation of two instances (period) */ 442 u64 dl_bw; /* dl_runtime / dl_period */ 443 u64 dl_density; /* dl_runtime / dl_deadline */ 444 445 /* 446 * Actual scheduling parameters. Initialized with the values above, 447 * they are continously updated during task execution. Note that 448 * the remaining runtime could be < 0 in case we are in overrun. 449 */ 450 s64 runtime; /* Remaining runtime for this instance */ 451 u64 deadline; /* Absolute deadline for this instance */ 452 unsigned int flags; /* Specifying the scheduler behaviour */ 453 454 /* 455 * Some bool flags: 456 * 457 * @dl_throttled tells if we exhausted the runtime. If so, the 458 * task has to wait for a replenishment to be performed at the 459 * next firing of dl_timer. 460 * 461 * @dl_boosted tells if we are boosted due to DI. If so we are 462 * outside bandwidth enforcement mechanism (but only until we 463 * exit the critical section); 464 * 465 * @dl_yielded tells if task gave up the CPU before consuming 466 * all its available runtime during the last job. 467 * 468 * @dl_non_contending tells if the task is inactive while still 469 * contributing to the active utilization. In other words, it 470 * indicates if the inactive timer has been armed and its handler 471 * has not been executed yet. This flag is useful to avoid race 472 * conditions between the inactive timer handler and the wakeup 473 * code. 474 */ 475 int dl_throttled; 476 int dl_boosted; 477 int dl_yielded; 478 int dl_non_contending; 479 480 /* 481 * Bandwidth enforcement timer. Each -deadline task has its 482 * own bandwidth to be enforced, thus we need one timer per task. 483 */ 484 struct hrtimer dl_timer; 485 486 /* 487 * Inactive timer, responsible for decreasing the active utilization 488 * at the "0-lag time". When a -deadline task blocks, it contributes 489 * to GRUB's active utilization until the "0-lag time", hence a 490 * timer is needed to decrease the active utilization at the correct 491 * time. 492 */ 493 struct hrtimer inactive_timer; 494 }; 495 496 union rcu_special { 497 struct { 498 u8 blocked; 499 u8 need_qs; 500 u8 exp_need_qs; 501 502 /* Otherwise the compiler can store garbage here: */ 503 u8 pad; 504 } b; /* Bits. */ 505 u32 s; /* Set of bits. */ 506 }; 507 508 enum perf_event_task_context { 509 perf_invalid_context = -1, 510 perf_hw_context = 0, 511 perf_sw_context, 512 perf_nr_task_contexts, 513 }; 514 515 struct wake_q_node { 516 struct wake_q_node *next; 517 }; 518 519 struct task_struct { 520 #ifdef CONFIG_THREAD_INFO_IN_TASK 521 /* 522 * For reasons of header soup (see current_thread_info()), this 523 * must be the first element of task_struct. 524 */ 525 struct thread_info thread_info; 526 #endif 527 /* -1 unrunnable, 0 runnable, >0 stopped: */ 528 volatile long state; 529 530 /* 531 * This begins the randomizable portion of task_struct. Only 532 * scheduling-critical items should be added above here. 533 */ 534 randomized_struct_fields_start 535 536 void *stack; 537 atomic_t usage; 538 /* Per task flags (PF_*), defined further below: */ 539 unsigned int flags; 540 unsigned int ptrace; 541 542 #ifdef CONFIG_SMP 543 struct llist_node wake_entry; 544 int on_cpu; 545 #ifdef CONFIG_THREAD_INFO_IN_TASK 546 /* Current CPU: */ 547 unsigned int cpu; 548 #endif 549 unsigned int wakee_flips; 550 unsigned long wakee_flip_decay_ts; 551 struct task_struct *last_wakee; 552 553 int wake_cpu; 554 #endif 555 int on_rq; 556 557 int prio; 558 int static_prio; 559 int normal_prio; 560 unsigned int rt_priority; 561 562 const struct sched_class *sched_class; 563 struct sched_entity se; 564 struct sched_rt_entity rt; 565 #ifdef CONFIG_CGROUP_SCHED 566 struct task_group *sched_task_group; 567 #endif 568 struct sched_dl_entity dl; 569 570 #ifdef CONFIG_PREEMPT_NOTIFIERS 571 /* List of struct preempt_notifier: */ 572 struct hlist_head preempt_notifiers; 573 #endif 574 575 #ifdef CONFIG_BLK_DEV_IO_TRACE 576 unsigned int btrace_seq; 577 #endif 578 579 unsigned int policy; 580 int nr_cpus_allowed; 581 cpumask_t cpus_allowed; 582 583 #ifdef CONFIG_PREEMPT_RCU 584 int rcu_read_lock_nesting; 585 union rcu_special rcu_read_unlock_special; 586 struct list_head rcu_node_entry; 587 struct rcu_node *rcu_blocked_node; 588 #endif /* #ifdef CONFIG_PREEMPT_RCU */ 589 590 #ifdef CONFIG_TASKS_RCU 591 unsigned long rcu_tasks_nvcsw; 592 u8 rcu_tasks_holdout; 593 u8 rcu_tasks_idx; 594 int rcu_tasks_idle_cpu; 595 struct list_head rcu_tasks_holdout_list; 596 #endif /* #ifdef CONFIG_TASKS_RCU */ 597 598 struct sched_info sched_info; 599 600 struct list_head tasks; 601 #ifdef CONFIG_SMP 602 struct plist_node pushable_tasks; 603 struct rb_node pushable_dl_tasks; 604 #endif 605 606 struct mm_struct *mm; 607 struct mm_struct *active_mm; 608 609 /* Per-thread vma caching: */ 610 struct vmacache vmacache; 611 612 #ifdef SPLIT_RSS_COUNTING 613 struct task_rss_stat rss_stat; 614 #endif 615 int exit_state; 616 int exit_code; 617 int exit_signal; 618 /* The signal sent when the parent dies: */ 619 int pdeath_signal; 620 /* JOBCTL_*, siglock protected: */ 621 unsigned long jobctl; 622 623 /* Used for emulating ABI behavior of previous Linux versions: */ 624 unsigned int personality; 625 626 /* Scheduler bits, serialized by scheduler locks: */ 627 unsigned sched_reset_on_fork:1; 628 unsigned sched_contributes_to_load:1; 629 unsigned sched_migrated:1; 630 unsigned sched_remote_wakeup:1; 631 /* Force alignment to the next boundary: */ 632 unsigned :0; 633 634 /* Unserialized, strictly 'current' */ 635 636 /* Bit to tell LSMs we're in execve(): */ 637 unsigned in_execve:1; 638 unsigned in_iowait:1; 639 #ifndef TIF_RESTORE_SIGMASK 640 unsigned restore_sigmask:1; 641 #endif 642 #ifdef CONFIG_MEMCG 643 unsigned memcg_may_oom:1; 644 #ifndef CONFIG_SLOB 645 unsigned memcg_kmem_skip_account:1; 646 #endif 647 #endif 648 #ifdef CONFIG_COMPAT_BRK 649 unsigned brk_randomized:1; 650 #endif 651 #ifdef CONFIG_CGROUPS 652 /* disallow userland-initiated cgroup migration */ 653 unsigned no_cgroup_migration:1; 654 #endif 655 656 unsigned long atomic_flags; /* Flags requiring atomic access. */ 657 658 struct restart_block restart_block; 659 660 pid_t pid; 661 pid_t tgid; 662 663 #ifdef CONFIG_CC_STACKPROTECTOR 664 /* Canary value for the -fstack-protector GCC feature: */ 665 unsigned long stack_canary; 666 #endif 667 /* 668 * Pointers to the (original) parent process, youngest child, younger sibling, 669 * older sibling, respectively. (p->father can be replaced with 670 * p->real_parent->pid) 671 */ 672 673 /* Real parent process: */ 674 struct task_struct __rcu *real_parent; 675 676 /* Recipient of SIGCHLD, wait4() reports: */ 677 struct task_struct __rcu *parent; 678 679 /* 680 * Children/sibling form the list of natural children: 681 */ 682 struct list_head children; 683 struct list_head sibling; 684 struct task_struct *group_leader; 685 686 /* 687 * 'ptraced' is the list of tasks this task is using ptrace() on. 688 * 689 * This includes both natural children and PTRACE_ATTACH targets. 690 * 'ptrace_entry' is this task's link on the p->parent->ptraced list. 691 */ 692 struct list_head ptraced; 693 struct list_head ptrace_entry; 694 695 /* PID/PID hash table linkage. */ 696 struct pid_link pids[PIDTYPE_MAX]; 697 struct list_head thread_group; 698 struct list_head thread_node; 699 700 struct completion *vfork_done; 701 702 /* CLONE_CHILD_SETTID: */ 703 int __user *set_child_tid; 704 705 /* CLONE_CHILD_CLEARTID: */ 706 int __user *clear_child_tid; 707 708 u64 utime; 709 u64 stime; 710 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME 711 u64 utimescaled; 712 u64 stimescaled; 713 #endif 714 u64 gtime; 715 struct prev_cputime prev_cputime; 716 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN 717 struct vtime vtime; 718 #endif 719 720 #ifdef CONFIG_NO_HZ_FULL 721 atomic_t tick_dep_mask; 722 #endif 723 /* Context switch counts: */ 724 unsigned long nvcsw; 725 unsigned long nivcsw; 726 727 /* Monotonic time in nsecs: */ 728 u64 start_time; 729 730 /* Boot based time in nsecs: */ 731 u64 real_start_time; 732 733 /* MM fault and swap info: this can arguably be seen as either mm-specific or thread-specific: */ 734 unsigned long min_flt; 735 unsigned long maj_flt; 736 737 #ifdef CONFIG_POSIX_TIMERS 738 struct task_cputime cputime_expires; 739 struct list_head cpu_timers[3]; 740 #endif 741 742 /* Process credentials: */ 743 744 /* Tracer's credentials at attach: */ 745 const struct cred __rcu *ptracer_cred; 746 747 /* Objective and real subjective task credentials (COW): */ 748 const struct cred __rcu *real_cred; 749 750 /* Effective (overridable) subjective task credentials (COW): */ 751 const struct cred __rcu *cred; 752 753 /* 754 * executable name, excluding path. 755 * 756 * - normally initialized setup_new_exec() 757 * - access it with [gs]et_task_comm() 758 * - lock it with task_lock() 759 */ 760 char comm[TASK_COMM_LEN]; 761 762 struct nameidata *nameidata; 763 764 #ifdef CONFIG_SYSVIPC 765 struct sysv_sem sysvsem; 766 struct sysv_shm sysvshm; 767 #endif 768 #ifdef CONFIG_DETECT_HUNG_TASK 769 unsigned long last_switch_count; 770 #endif 771 /* Filesystem information: */ 772 struct fs_struct *fs; 773 774 /* Open file information: */ 775 struct files_struct *files; 776 777 /* Namespaces: */ 778 struct nsproxy *nsproxy; 779 780 /* Signal handlers: */ 781 struct signal_struct *signal; 782 struct sighand_struct *sighand; 783 sigset_t blocked; 784 sigset_t real_blocked; 785 /* Restored if set_restore_sigmask() was used: */ 786 sigset_t saved_sigmask; 787 struct sigpending pending; 788 unsigned long sas_ss_sp; 789 size_t sas_ss_size; 790 unsigned int sas_ss_flags; 791 792 struct callback_head *task_works; 793 794 struct audit_context *audit_context; 795 #ifdef CONFIG_AUDITSYSCALL 796 kuid_t loginuid; 797 unsigned int sessionid; 798 #endif 799 struct seccomp seccomp; 800 801 /* Thread group tracking: */ 802 u32 parent_exec_id; 803 u32 self_exec_id; 804 805 /* Protection against (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, mempolicy: */ 806 spinlock_t alloc_lock; 807 808 /* Protection of the PI data structures: */ 809 raw_spinlock_t pi_lock; 810 811 struct wake_q_node wake_q; 812 813 #ifdef CONFIG_RT_MUTEXES 814 /* PI waiters blocked on a rt_mutex held by this task: */ 815 struct rb_root_cached pi_waiters; 816 /* Updated under owner's pi_lock and rq lock */ 817 struct task_struct *pi_top_task; 818 /* Deadlock detection and priority inheritance handling: */ 819 struct rt_mutex_waiter *pi_blocked_on; 820 #endif 821 822 #ifdef CONFIG_DEBUG_MUTEXES 823 /* Mutex deadlock detection: */ 824 struct mutex_waiter *blocked_on; 825 #endif 826 827 #ifdef CONFIG_TRACE_IRQFLAGS 828 unsigned int irq_events; 829 unsigned long hardirq_enable_ip; 830 unsigned long hardirq_disable_ip; 831 unsigned int hardirq_enable_event; 832 unsigned int hardirq_disable_event; 833 int hardirqs_enabled; 834 int hardirq_context; 835 unsigned long softirq_disable_ip; 836 unsigned long softirq_enable_ip; 837 unsigned int softirq_disable_event; 838 unsigned int softirq_enable_event; 839 int softirqs_enabled; 840 int softirq_context; 841 #endif 842 843 #ifdef CONFIG_LOCKDEP 844 # define MAX_LOCK_DEPTH 48UL 845 u64 curr_chain_key; 846 int lockdep_depth; 847 unsigned int lockdep_recursion; 848 struct held_lock held_locks[MAX_LOCK_DEPTH]; 849 #endif 850 851 #ifdef CONFIG_LOCKDEP_CROSSRELEASE 852 #define MAX_XHLOCKS_NR 64UL 853 struct hist_lock *xhlocks; /* Crossrelease history locks */ 854 unsigned int xhlock_idx; 855 /* For restoring at history boundaries */ 856 unsigned int xhlock_idx_hist[XHLOCK_CTX_NR]; 857 unsigned int hist_id; 858 /* For overwrite check at each context exit */ 859 unsigned int hist_id_save[XHLOCK_CTX_NR]; 860 #endif 861 862 #ifdef CONFIG_UBSAN 863 unsigned int in_ubsan; 864 #endif 865 866 /* Journalling filesystem info: */ 867 void *journal_info; 868 869 /* Stacked block device info: */ 870 struct bio_list *bio_list; 871 872 #ifdef CONFIG_BLOCK 873 /* Stack plugging: */ 874 struct blk_plug *plug; 875 #endif 876 877 /* VM state: */ 878 struct reclaim_state *reclaim_state; 879 880 struct backing_dev_info *backing_dev_info; 881 882 struct io_context *io_context; 883 884 /* Ptrace state: */ 885 unsigned long ptrace_message; 886 siginfo_t *last_siginfo; 887 888 struct task_io_accounting ioac; 889 #ifdef CONFIG_TASK_XACCT 890 /* Accumulated RSS usage: */ 891 u64 acct_rss_mem1; 892 /* Accumulated virtual memory usage: */ 893 u64 acct_vm_mem1; 894 /* stime + utime since last update: */ 895 u64 acct_timexpd; 896 #endif 897 #ifdef CONFIG_CPUSETS 898 /* Protected by ->alloc_lock: */ 899 nodemask_t mems_allowed; 900 /* Seqence number to catch updates: */ 901 seqcount_t mems_allowed_seq; 902 int cpuset_mem_spread_rotor; 903 int cpuset_slab_spread_rotor; 904 #endif 905 #ifdef CONFIG_CGROUPS 906 /* Control Group info protected by css_set_lock: */ 907 struct css_set __rcu *cgroups; 908 /* cg_list protected by css_set_lock and tsk->alloc_lock: */ 909 struct list_head cg_list; 910 #endif 911 #ifdef CONFIG_INTEL_RDT 912 u32 closid; 913 u32 rmid; 914 #endif 915 #ifdef CONFIG_FUTEX 916 struct robust_list_head __user *robust_list; 917 #ifdef CONFIG_COMPAT 918 struct compat_robust_list_head __user *compat_robust_list; 919 #endif 920 struct list_head pi_state_list; 921 struct futex_pi_state *pi_state_cache; 922 #endif 923 #ifdef CONFIG_PERF_EVENTS 924 struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts]; 925 struct mutex perf_event_mutex; 926 struct list_head perf_event_list; 927 #endif 928 #ifdef CONFIG_DEBUG_PREEMPT 929 unsigned long preempt_disable_ip; 930 #endif 931 #ifdef CONFIG_NUMA 932 /* Protected by alloc_lock: */ 933 struct mempolicy *mempolicy; 934 short il_prev; 935 short pref_node_fork; 936 #endif 937 #ifdef CONFIG_NUMA_BALANCING 938 int numa_scan_seq; 939 unsigned int numa_scan_period; 940 unsigned int numa_scan_period_max; 941 int numa_preferred_nid; 942 unsigned long numa_migrate_retry; 943 /* Migration stamp: */ 944 u64 node_stamp; 945 u64 last_task_numa_placement; 946 u64 last_sum_exec_runtime; 947 struct callback_head numa_work; 948 949 struct list_head numa_entry; 950 struct numa_group *numa_group; 951 952 /* 953 * numa_faults is an array split into four regions: 954 * faults_memory, faults_cpu, faults_memory_buffer, faults_cpu_buffer 955 * in this precise order. 956 * 957 * faults_memory: Exponential decaying average of faults on a per-node 958 * basis. Scheduling placement decisions are made based on these 959 * counts. The values remain static for the duration of a PTE scan. 960 * faults_cpu: Track the nodes the process was running on when a NUMA 961 * hinting fault was incurred. 962 * faults_memory_buffer and faults_cpu_buffer: Record faults per node 963 * during the current scan window. When the scan completes, the counts 964 * in faults_memory and faults_cpu decay and these values are copied. 965 */ 966 unsigned long *numa_faults; 967 unsigned long total_numa_faults; 968 969 /* 970 * numa_faults_locality tracks if faults recorded during the last 971 * scan window were remote/local or failed to migrate. The task scan 972 * period is adapted based on the locality of the faults with different 973 * weights depending on whether they were shared or private faults 974 */ 975 unsigned long numa_faults_locality[3]; 976 977 unsigned long numa_pages_migrated; 978 #endif /* CONFIG_NUMA_BALANCING */ 979 980 struct tlbflush_unmap_batch tlb_ubc; 981 982 struct rcu_head rcu; 983 984 /* Cache last used pipe for splice(): */ 985 struct pipe_inode_info *splice_pipe; 986 987 struct page_frag task_frag; 988 989 #ifdef CONFIG_TASK_DELAY_ACCT 990 struct task_delay_info *delays; 991 #endif 992 993 #ifdef CONFIG_FAULT_INJECTION 994 int make_it_fail; 995 unsigned int fail_nth; 996 #endif 997 /* 998 * When (nr_dirtied >= nr_dirtied_pause), it's time to call 999 * balance_dirty_pages() for a dirty throttling pause: 1000 */ 1001 int nr_dirtied; 1002 int nr_dirtied_pause; 1003 /* Start of a write-and-pause period: */ 1004 unsigned long dirty_paused_when; 1005 1006 #ifdef CONFIG_LATENCYTOP 1007 int latency_record_count; 1008 struct latency_record latency_record[LT_SAVECOUNT]; 1009 #endif 1010 /* 1011 * Time slack values; these are used to round up poll() and 1012 * select() etc timeout values. These are in nanoseconds. 1013 */ 1014 u64 timer_slack_ns; 1015 u64 default_timer_slack_ns; 1016 1017 #ifdef CONFIG_KASAN 1018 unsigned int kasan_depth; 1019 #endif 1020 1021 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 1022 /* Index of current stored address in ret_stack: */ 1023 int curr_ret_stack; 1024 1025 /* Stack of return addresses for return function tracing: */ 1026 struct ftrace_ret_stack *ret_stack; 1027 1028 /* Timestamp for last schedule: */ 1029 unsigned long long ftrace_timestamp; 1030 1031 /* 1032 * Number of functions that haven't been traced 1033 * because of depth overrun: 1034 */ 1035 atomic_t trace_overrun; 1036 1037 /* Pause tracing: */ 1038 atomic_t tracing_graph_pause; 1039 #endif 1040 1041 #ifdef CONFIG_TRACING 1042 /* State flags for use by tracers: */ 1043 unsigned long trace; 1044 1045 /* Bitmask and counter of trace recursion: */ 1046 unsigned long trace_recursion; 1047 #endif /* CONFIG_TRACING */ 1048 1049 #ifdef CONFIG_KCOV 1050 /* Coverage collection mode enabled for this task (0 if disabled): */ 1051 enum kcov_mode kcov_mode; 1052 1053 /* Size of the kcov_area: */ 1054 unsigned int kcov_size; 1055 1056 /* Buffer for coverage collection: */ 1057 void *kcov_area; 1058 1059 /* KCOV descriptor wired with this task or NULL: */ 1060 struct kcov *kcov; 1061 #endif 1062 1063 #ifdef CONFIG_MEMCG 1064 struct mem_cgroup *memcg_in_oom; 1065 gfp_t memcg_oom_gfp_mask; 1066 int memcg_oom_order; 1067 1068 /* Number of pages to reclaim on returning to userland: */ 1069 unsigned int memcg_nr_pages_over_high; 1070 #endif 1071 1072 #ifdef CONFIG_UPROBES 1073 struct uprobe_task *utask; 1074 #endif 1075 #if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE) 1076 unsigned int sequential_io; 1077 unsigned int sequential_io_avg; 1078 #endif 1079 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP 1080 unsigned long task_state_change; 1081 #endif 1082 int pagefault_disabled; 1083 #ifdef CONFIG_MMU 1084 struct task_struct *oom_reaper_list; 1085 #endif 1086 #ifdef CONFIG_VMAP_STACK 1087 struct vm_struct *stack_vm_area; 1088 #endif 1089 #ifdef CONFIG_THREAD_INFO_IN_TASK 1090 /* A live task holds one reference: */ 1091 atomic_t stack_refcount; 1092 #endif 1093 #ifdef CONFIG_LIVEPATCH 1094 int patch_state; 1095 #endif 1096 #ifdef CONFIG_SECURITY 1097 /* Used by LSM modules for access restriction: */ 1098 void *security; 1099 #endif 1100 1101 /* 1102 * New fields for task_struct should be added above here, so that 1103 * they are included in the randomized portion of task_struct. 1104 */ 1105 randomized_struct_fields_end 1106 1107 /* CPU-specific state of this task: */ 1108 struct thread_struct thread; 1109 1110 /* 1111 * WARNING: on x86, 'thread_struct' contains a variable-sized 1112 * structure. It *MUST* be at the end of 'task_struct'. 1113 * 1114 * Do not put anything below here! 1115 */ 1116 }; 1117 1118 static inline struct pid *task_pid(struct task_struct *task) 1119 { 1120 return task->pids[PIDTYPE_PID].pid; 1121 } 1122 1123 static inline struct pid *task_tgid(struct task_struct *task) 1124 { 1125 return task->group_leader->pids[PIDTYPE_PID].pid; 1126 } 1127 1128 /* 1129 * Without tasklist or RCU lock it is not safe to dereference 1130 * the result of task_pgrp/task_session even if task == current, 1131 * we can race with another thread doing sys_setsid/sys_setpgid. 1132 */ 1133 static inline struct pid *task_pgrp(struct task_struct *task) 1134 { 1135 return task->group_leader->pids[PIDTYPE_PGID].pid; 1136 } 1137 1138 static inline struct pid *task_session(struct task_struct *task) 1139 { 1140 return task->group_leader->pids[PIDTYPE_SID].pid; 1141 } 1142 1143 /* 1144 * the helpers to get the task's different pids as they are seen 1145 * from various namespaces 1146 * 1147 * task_xid_nr() : global id, i.e. the id seen from the init namespace; 1148 * task_xid_vnr() : virtual id, i.e. the id seen from the pid namespace of 1149 * current. 1150 * task_xid_nr_ns() : id seen from the ns specified; 1151 * 1152 * see also pid_nr() etc in include/linux/pid.h 1153 */ 1154 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, struct pid_namespace *ns); 1155 1156 static inline pid_t task_pid_nr(struct task_struct *tsk) 1157 { 1158 return tsk->pid; 1159 } 1160 1161 static inline pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) 1162 { 1163 return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns); 1164 } 1165 1166 static inline pid_t task_pid_vnr(struct task_struct *tsk) 1167 { 1168 return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL); 1169 } 1170 1171 1172 static inline pid_t task_tgid_nr(struct task_struct *tsk) 1173 { 1174 return tsk->tgid; 1175 } 1176 1177 /** 1178 * pid_alive - check that a task structure is not stale 1179 * @p: Task structure to be checked. 1180 * 1181 * Test if a process is not yet dead (at most zombie state) 1182 * If pid_alive fails, then pointers within the task structure 1183 * can be stale and must not be dereferenced. 1184 * 1185 * Return: 1 if the process is alive. 0 otherwise. 1186 */ 1187 static inline int pid_alive(const struct task_struct *p) 1188 { 1189 return p->pids[PIDTYPE_PID].pid != NULL; 1190 } 1191 1192 static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) 1193 { 1194 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns); 1195 } 1196 1197 static inline pid_t task_pgrp_vnr(struct task_struct *tsk) 1198 { 1199 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL); 1200 } 1201 1202 1203 static inline pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) 1204 { 1205 return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns); 1206 } 1207 1208 static inline pid_t task_session_vnr(struct task_struct *tsk) 1209 { 1210 return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL); 1211 } 1212 1213 static inline pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) 1214 { 1215 return __task_pid_nr_ns(tsk, __PIDTYPE_TGID, ns); 1216 } 1217 1218 static inline pid_t task_tgid_vnr(struct task_struct *tsk) 1219 { 1220 return __task_pid_nr_ns(tsk, __PIDTYPE_TGID, NULL); 1221 } 1222 1223 static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns) 1224 { 1225 pid_t pid = 0; 1226 1227 rcu_read_lock(); 1228 if (pid_alive(tsk)) 1229 pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns); 1230 rcu_read_unlock(); 1231 1232 return pid; 1233 } 1234 1235 static inline pid_t task_ppid_nr(const struct task_struct *tsk) 1236 { 1237 return task_ppid_nr_ns(tsk, &init_pid_ns); 1238 } 1239 1240 /* Obsolete, do not use: */ 1241 static inline pid_t task_pgrp_nr(struct task_struct *tsk) 1242 { 1243 return task_pgrp_nr_ns(tsk, &init_pid_ns); 1244 } 1245 1246 static inline char task_state_to_char(struct task_struct *task) 1247 { 1248 const char stat_nam[] = TASK_STATE_TO_CHAR_STR; 1249 unsigned long state = task->state; 1250 1251 state = state ? __ffs(state) + 1 : 0; 1252 1253 /* Make sure the string lines up properly with the number of task states: */ 1254 BUILD_BUG_ON(sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1); 1255 1256 return state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?'; 1257 } 1258 1259 /** 1260 * is_global_init - check if a task structure is init. Since init 1261 * is free to have sub-threads we need to check tgid. 1262 * @tsk: Task structure to be checked. 1263 * 1264 * Check if a task structure is the first user space task the kernel created. 1265 * 1266 * Return: 1 if the task structure is init. 0 otherwise. 1267 */ 1268 static inline int is_global_init(struct task_struct *tsk) 1269 { 1270 return task_tgid_nr(tsk) == 1; 1271 } 1272 1273 extern struct pid *cad_pid; 1274 1275 /* 1276 * Per process flags 1277 */ 1278 #define PF_IDLE 0x00000002 /* I am an IDLE thread */ 1279 #define PF_EXITING 0x00000004 /* Getting shut down */ 1280 #define PF_EXITPIDONE 0x00000008 /* PI exit done on shut down */ 1281 #define PF_VCPU 0x00000010 /* I'm a virtual CPU */ 1282 #define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */ 1283 #define PF_FORKNOEXEC 0x00000040 /* Forked but didn't exec */ 1284 #define PF_MCE_PROCESS 0x00000080 /* Process policy on mce errors */ 1285 #define PF_SUPERPRIV 0x00000100 /* Used super-user privileges */ 1286 #define PF_DUMPCORE 0x00000200 /* Dumped core */ 1287 #define PF_SIGNALED 0x00000400 /* Killed by a signal */ 1288 #define PF_MEMALLOC 0x00000800 /* Allocating memory */ 1289 #define PF_NPROC_EXCEEDED 0x00001000 /* set_user() noticed that RLIMIT_NPROC was exceeded */ 1290 #define PF_USED_MATH 0x00002000 /* If unset the fpu must be initialized before use */ 1291 #define PF_USED_ASYNC 0x00004000 /* Used async_schedule*(), used by module init */ 1292 #define PF_NOFREEZE 0x00008000 /* This thread should not be frozen */ 1293 #define PF_FROZEN 0x00010000 /* Frozen for system suspend */ 1294 #define PF_KSWAPD 0x00020000 /* I am kswapd */ 1295 #define PF_MEMALLOC_NOFS 0x00040000 /* All allocation requests will inherit GFP_NOFS */ 1296 #define PF_MEMALLOC_NOIO 0x00080000 /* All allocation requests will inherit GFP_NOIO */ 1297 #define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */ 1298 #define PF_KTHREAD 0x00200000 /* I am a kernel thread */ 1299 #define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */ 1300 #define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ 1301 #define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */ 1302 #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ 1303 #define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ 1304 #define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */ 1305 #define PF_SUSPEND_TASK 0x80000000 /* This thread called freeze_processes() and should not be frozen */ 1306 1307 /* 1308 * Only the _current_ task can read/write to tsk->flags, but other 1309 * tasks can access tsk->flags in readonly mode for example 1310 * with tsk_used_math (like during threaded core dumping). 1311 * There is however an exception to this rule during ptrace 1312 * or during fork: the ptracer task is allowed to write to the 1313 * child->flags of its traced child (same goes for fork, the parent 1314 * can write to the child->flags), because we're guaranteed the 1315 * child is not running and in turn not changing child->flags 1316 * at the same time the parent does it. 1317 */ 1318 #define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0) 1319 #define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0) 1320 #define clear_used_math() clear_stopped_child_used_math(current) 1321 #define set_used_math() set_stopped_child_used_math(current) 1322 1323 #define conditional_stopped_child_used_math(condition, child) \ 1324 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0) 1325 1326 #define conditional_used_math(condition) conditional_stopped_child_used_math(condition, current) 1327 1328 #define copy_to_stopped_child_used_math(child) \ 1329 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0) 1330 1331 /* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */ 1332 #define tsk_used_math(p) ((p)->flags & PF_USED_MATH) 1333 #define used_math() tsk_used_math(current) 1334 1335 static inline bool is_percpu_thread(void) 1336 { 1337 #ifdef CONFIG_SMP 1338 return (current->flags & PF_NO_SETAFFINITY) && 1339 (current->nr_cpus_allowed == 1); 1340 #else 1341 return true; 1342 #endif 1343 } 1344 1345 /* Per-process atomic flags. */ 1346 #define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */ 1347 #define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */ 1348 #define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */ 1349 1350 1351 #define TASK_PFA_TEST(name, func) \ 1352 static inline bool task_##func(struct task_struct *p) \ 1353 { return test_bit(PFA_##name, &p->atomic_flags); } 1354 1355 #define TASK_PFA_SET(name, func) \ 1356 static inline void task_set_##func(struct task_struct *p) \ 1357 { set_bit(PFA_##name, &p->atomic_flags); } 1358 1359 #define TASK_PFA_CLEAR(name, func) \ 1360 static inline void task_clear_##func(struct task_struct *p) \ 1361 { clear_bit(PFA_##name, &p->atomic_flags); } 1362 1363 TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs) 1364 TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs) 1365 1366 TASK_PFA_TEST(SPREAD_PAGE, spread_page) 1367 TASK_PFA_SET(SPREAD_PAGE, spread_page) 1368 TASK_PFA_CLEAR(SPREAD_PAGE, spread_page) 1369 1370 TASK_PFA_TEST(SPREAD_SLAB, spread_slab) 1371 TASK_PFA_SET(SPREAD_SLAB, spread_slab) 1372 TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab) 1373 1374 static inline void 1375 current_restore_flags(unsigned long orig_flags, unsigned long flags) 1376 { 1377 current->flags &= ~flags; 1378 current->flags |= orig_flags & flags; 1379 } 1380 1381 extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial); 1382 extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed); 1383 #ifdef CONFIG_SMP 1384 extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask); 1385 extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask); 1386 #else 1387 static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) 1388 { 1389 } 1390 static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) 1391 { 1392 if (!cpumask_test_cpu(0, new_mask)) 1393 return -EINVAL; 1394 return 0; 1395 } 1396 #endif 1397 1398 #ifndef cpu_relax_yield 1399 #define cpu_relax_yield() cpu_relax() 1400 #endif 1401 1402 extern int yield_to(struct task_struct *p, bool preempt); 1403 extern void set_user_nice(struct task_struct *p, long nice); 1404 extern int task_prio(const struct task_struct *p); 1405 1406 /** 1407 * task_nice - return the nice value of a given task. 1408 * @p: the task in question. 1409 * 1410 * Return: The nice value [ -20 ... 0 ... 19 ]. 1411 */ 1412 static inline int task_nice(const struct task_struct *p) 1413 { 1414 return PRIO_TO_NICE((p)->static_prio); 1415 } 1416 1417 extern int can_nice(const struct task_struct *p, const int nice); 1418 extern int task_curr(const struct task_struct *p); 1419 extern int idle_cpu(int cpu); 1420 extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *); 1421 extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *); 1422 extern int sched_setattr(struct task_struct *, const struct sched_attr *); 1423 extern struct task_struct *idle_task(int cpu); 1424 1425 /** 1426 * is_idle_task - is the specified task an idle task? 1427 * @p: the task in question. 1428 * 1429 * Return: 1 if @p is an idle task. 0 otherwise. 1430 */ 1431 static inline bool is_idle_task(const struct task_struct *p) 1432 { 1433 return !!(p->flags & PF_IDLE); 1434 } 1435 1436 extern struct task_struct *curr_task(int cpu); 1437 extern void ia64_set_curr_task(int cpu, struct task_struct *p); 1438 1439 void yield(void); 1440 1441 union thread_union { 1442 #ifndef CONFIG_THREAD_INFO_IN_TASK 1443 struct thread_info thread_info; 1444 #endif 1445 unsigned long stack[THREAD_SIZE/sizeof(long)]; 1446 }; 1447 1448 #ifdef CONFIG_THREAD_INFO_IN_TASK 1449 static inline struct thread_info *task_thread_info(struct task_struct *task) 1450 { 1451 return &task->thread_info; 1452 } 1453 #elif !defined(__HAVE_THREAD_FUNCTIONS) 1454 # define task_thread_info(task) ((struct thread_info *)(task)->stack) 1455 #endif 1456 1457 /* 1458 * find a task by one of its numerical ids 1459 * 1460 * find_task_by_pid_ns(): 1461 * finds a task by its pid in the specified namespace 1462 * find_task_by_vpid(): 1463 * finds a task by its virtual pid 1464 * 1465 * see also find_vpid() etc in include/linux/pid.h 1466 */ 1467 1468 extern struct task_struct *find_task_by_vpid(pid_t nr); 1469 extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns); 1470 1471 extern int wake_up_state(struct task_struct *tsk, unsigned int state); 1472 extern int wake_up_process(struct task_struct *tsk); 1473 extern void wake_up_new_task(struct task_struct *tsk); 1474 1475 #ifdef CONFIG_SMP 1476 extern void kick_process(struct task_struct *tsk); 1477 #else 1478 static inline void kick_process(struct task_struct *tsk) { } 1479 #endif 1480 1481 extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec); 1482 1483 static inline void set_task_comm(struct task_struct *tsk, const char *from) 1484 { 1485 __set_task_comm(tsk, from, false); 1486 } 1487 1488 extern char *get_task_comm(char *to, struct task_struct *tsk); 1489 1490 #ifdef CONFIG_SMP 1491 void scheduler_ipi(void); 1492 extern unsigned long wait_task_inactive(struct task_struct *, long match_state); 1493 #else 1494 static inline void scheduler_ipi(void) { } 1495 static inline unsigned long wait_task_inactive(struct task_struct *p, long match_state) 1496 { 1497 return 1; 1498 } 1499 #endif 1500 1501 /* 1502 * Set thread flags in other task's structures. 1503 * See asm/thread_info.h for TIF_xxxx flags available: 1504 */ 1505 static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag) 1506 { 1507 set_ti_thread_flag(task_thread_info(tsk), flag); 1508 } 1509 1510 static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag) 1511 { 1512 clear_ti_thread_flag(task_thread_info(tsk), flag); 1513 } 1514 1515 static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag) 1516 { 1517 return test_and_set_ti_thread_flag(task_thread_info(tsk), flag); 1518 } 1519 1520 static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag) 1521 { 1522 return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag); 1523 } 1524 1525 static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag) 1526 { 1527 return test_ti_thread_flag(task_thread_info(tsk), flag); 1528 } 1529 1530 static inline void set_tsk_need_resched(struct task_struct *tsk) 1531 { 1532 set_tsk_thread_flag(tsk,TIF_NEED_RESCHED); 1533 } 1534 1535 static inline void clear_tsk_need_resched(struct task_struct *tsk) 1536 { 1537 clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED); 1538 } 1539 1540 static inline int test_tsk_need_resched(struct task_struct *tsk) 1541 { 1542 return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED)); 1543 } 1544 1545 /* 1546 * cond_resched() and cond_resched_lock(): latency reduction via 1547 * explicit rescheduling in places that are safe. The return 1548 * value indicates whether a reschedule was done in fact. 1549 * cond_resched_lock() will drop the spinlock before scheduling, 1550 * cond_resched_softirq() will enable bhs before scheduling. 1551 */ 1552 #ifndef CONFIG_PREEMPT 1553 extern int _cond_resched(void); 1554 #else 1555 static inline int _cond_resched(void) { return 0; } 1556 #endif 1557 1558 #define cond_resched() ({ \ 1559 ___might_sleep(__FILE__, __LINE__, 0); \ 1560 _cond_resched(); \ 1561 }) 1562 1563 extern int __cond_resched_lock(spinlock_t *lock); 1564 1565 #define cond_resched_lock(lock) ({ \ 1566 ___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\ 1567 __cond_resched_lock(lock); \ 1568 }) 1569 1570 extern int __cond_resched_softirq(void); 1571 1572 #define cond_resched_softirq() ({ \ 1573 ___might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \ 1574 __cond_resched_softirq(); \ 1575 }) 1576 1577 static inline void cond_resched_rcu(void) 1578 { 1579 #if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU) 1580 rcu_read_unlock(); 1581 cond_resched(); 1582 rcu_read_lock(); 1583 #endif 1584 } 1585 1586 /* 1587 * Does a critical section need to be broken due to another 1588 * task waiting?: (technically does not depend on CONFIG_PREEMPT, 1589 * but a general need for low latency) 1590 */ 1591 static inline int spin_needbreak(spinlock_t *lock) 1592 { 1593 #ifdef CONFIG_PREEMPT 1594 return spin_is_contended(lock); 1595 #else 1596 return 0; 1597 #endif 1598 } 1599 1600 static __always_inline bool need_resched(void) 1601 { 1602 return unlikely(tif_need_resched()); 1603 } 1604 1605 /* 1606 * Wrappers for p->thread_info->cpu access. No-op on UP. 1607 */ 1608 #ifdef CONFIG_SMP 1609 1610 static inline unsigned int task_cpu(const struct task_struct *p) 1611 { 1612 #ifdef CONFIG_THREAD_INFO_IN_TASK 1613 return p->cpu; 1614 #else 1615 return task_thread_info(p)->cpu; 1616 #endif 1617 } 1618 1619 extern void set_task_cpu(struct task_struct *p, unsigned int cpu); 1620 1621 #else 1622 1623 static inline unsigned int task_cpu(const struct task_struct *p) 1624 { 1625 return 0; 1626 } 1627 1628 static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) 1629 { 1630 } 1631 1632 #endif /* CONFIG_SMP */ 1633 1634 /* 1635 * In order to reduce various lock holder preemption latencies provide an 1636 * interface to see if a vCPU is currently running or not. 1637 * 1638 * This allows us to terminate optimistic spin loops and block, analogous to 1639 * the native optimistic spin heuristic of testing if the lock owner task is 1640 * running or not. 1641 */ 1642 #ifndef vcpu_is_preempted 1643 # define vcpu_is_preempted(cpu) false 1644 #endif 1645 1646 extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask); 1647 extern long sched_getaffinity(pid_t pid, struct cpumask *mask); 1648 1649 #ifndef TASK_SIZE_OF 1650 #define TASK_SIZE_OF(tsk) TASK_SIZE 1651 #endif 1652 1653 #endif 1654