1 #ifndef _LINUX_SCHED_H 2 #define _LINUX_SCHED_H 3 4 /* 5 * Define 'struct task_struct' and provide the main scheduler 6 * APIs (schedule(), wakeup variants, etc.) 7 */ 8 9 #include <uapi/linux/sched.h> 10 11 #include <asm/current.h> 12 13 #include <linux/pid.h> 14 #include <linux/sem.h> 15 #include <linux/shm.h> 16 #include <linux/kcov.h> 17 #include <linux/mutex.h> 18 #include <linux/plist.h> 19 #include <linux/hrtimer.h> 20 #include <linux/seccomp.h> 21 #include <linux/nodemask.h> 22 #include <linux/rcupdate.h> 23 #include <linux/resource.h> 24 #include <linux/latencytop.h> 25 #include <linux/sched/prio.h> 26 #include <linux/signal_types.h> 27 #include <linux/mm_types_task.h> 28 #include <linux/task_io_accounting.h> 29 30 /* task_struct member predeclarations (sorted alphabetically): */ 31 struct audit_context; 32 struct backing_dev_info; 33 struct bio_list; 34 struct blk_plug; 35 struct cfs_rq; 36 struct fs_struct; 37 struct futex_pi_state; 38 struct io_context; 39 struct mempolicy; 40 struct nameidata; 41 struct nsproxy; 42 struct perf_event_context; 43 struct pid_namespace; 44 struct pipe_inode_info; 45 struct rcu_node; 46 struct reclaim_state; 47 struct robust_list_head; 48 struct sched_attr; 49 struct sched_param; 50 struct seq_file; 51 struct sighand_struct; 52 struct signal_struct; 53 struct task_delay_info; 54 struct task_group; 55 56 /* 57 * Task state bitmask. NOTE! These bits are also 58 * encoded in fs/proc/array.c: get_task_state(). 59 * 60 * We have two separate sets of flags: task->state 61 * is about runnability, while task->exit_state are 62 * about the task exiting. Confusing, but this way 63 * modifying one set can't modify the other one by 64 * mistake. 65 */ 66 67 /* Used in tsk->state: */ 68 #define TASK_RUNNING 0 69 #define TASK_INTERRUPTIBLE 1 70 #define TASK_UNINTERRUPTIBLE 2 71 #define __TASK_STOPPED 4 72 #define __TASK_TRACED 8 73 /* Used in tsk->exit_state: */ 74 #define EXIT_DEAD 16 75 #define EXIT_ZOMBIE 32 76 #define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD) 77 /* Used in tsk->state again: */ 78 #define TASK_DEAD 64 79 #define TASK_WAKEKILL 128 80 #define TASK_WAKING 256 81 #define TASK_PARKED 512 82 #define TASK_NOLOAD 1024 83 #define TASK_NEW 2048 84 #define TASK_STATE_MAX 4096 85 86 #define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWPNn" 87 88 /* Convenience macros for the sake of set_current_state: */ 89 #define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE) 90 #define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED) 91 #define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED) 92 93 #define TASK_IDLE (TASK_UNINTERRUPTIBLE | TASK_NOLOAD) 94 95 /* Convenience macros for the sake of wake_up(): */ 96 #define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE) 97 #define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED) 98 99 /* get_task_state(): */ 100 #define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \ 101 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \ 102 __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD) 103 104 #define task_is_traced(task) ((task->state & __TASK_TRACED) != 0) 105 106 #define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0) 107 108 #define task_is_stopped_or_traced(task) ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0) 109 110 #define task_contributes_to_load(task) ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \ 111 (task->flags & PF_FROZEN) == 0 && \ 112 (task->state & TASK_NOLOAD) == 0) 113 114 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP 115 116 #define __set_current_state(state_value) \ 117 do { \ 118 current->task_state_change = _THIS_IP_; \ 119 current->state = (state_value); \ 120 } while (0) 121 #define set_current_state(state_value) \ 122 do { \ 123 current->task_state_change = _THIS_IP_; \ 124 smp_store_mb(current->state, (state_value)); \ 125 } while (0) 126 127 #else 128 /* 129 * set_current_state() includes a barrier so that the write of current->state 130 * is correctly serialised wrt the caller's subsequent test of whether to 131 * actually sleep: 132 * 133 * for (;;) { 134 * set_current_state(TASK_UNINTERRUPTIBLE); 135 * if (!need_sleep) 136 * break; 137 * 138 * schedule(); 139 * } 140 * __set_current_state(TASK_RUNNING); 141 * 142 * If the caller does not need such serialisation (because, for instance, the 143 * condition test and condition change and wakeup are under the same lock) then 144 * use __set_current_state(). 145 * 146 * The above is typically ordered against the wakeup, which does: 147 * 148 * need_sleep = false; 149 * wake_up_state(p, TASK_UNINTERRUPTIBLE); 150 * 151 * Where wake_up_state() (and all other wakeup primitives) imply enough 152 * barriers to order the store of the variable against wakeup. 153 * 154 * Wakeup will do: if (@state & p->state) p->state = TASK_RUNNING, that is, 155 * once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a 156 * TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING). 157 * 158 * This is obviously fine, since they both store the exact same value. 159 * 160 * Also see the comments of try_to_wake_up(). 161 */ 162 #define __set_current_state(state_value) do { current->state = (state_value); } while (0) 163 #define set_current_state(state_value) smp_store_mb(current->state, (state_value)) 164 #endif 165 166 /* Task command name length: */ 167 #define TASK_COMM_LEN 16 168 169 extern cpumask_var_t cpu_isolated_map; 170 171 extern void scheduler_tick(void); 172 173 #define MAX_SCHEDULE_TIMEOUT LONG_MAX 174 175 extern long schedule_timeout(long timeout); 176 extern long schedule_timeout_interruptible(long timeout); 177 extern long schedule_timeout_killable(long timeout); 178 extern long schedule_timeout_uninterruptible(long timeout); 179 extern long schedule_timeout_idle(long timeout); 180 asmlinkage void schedule(void); 181 extern void schedule_preempt_disabled(void); 182 183 extern int __must_check io_schedule_prepare(void); 184 extern void io_schedule_finish(int token); 185 extern long io_schedule_timeout(long timeout); 186 extern void io_schedule(void); 187 188 /** 189 * struct prev_cputime - snaphsot of system and user cputime 190 * @utime: time spent in user mode 191 * @stime: time spent in system mode 192 * @lock: protects the above two fields 193 * 194 * Stores previous user/system time values such that we can guarantee 195 * monotonicity. 196 */ 197 struct prev_cputime { 198 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE 199 u64 utime; 200 u64 stime; 201 raw_spinlock_t lock; 202 #endif 203 }; 204 205 /** 206 * struct task_cputime - collected CPU time counts 207 * @utime: time spent in user mode, in nanoseconds 208 * @stime: time spent in kernel mode, in nanoseconds 209 * @sum_exec_runtime: total time spent on the CPU, in nanoseconds 210 * 211 * This structure groups together three kinds of CPU time that are tracked for 212 * threads and thread groups. Most things considering CPU time want to group 213 * these counts together and treat all three of them in parallel. 214 */ 215 struct task_cputime { 216 u64 utime; 217 u64 stime; 218 unsigned long long sum_exec_runtime; 219 }; 220 221 /* Alternate field names when used on cache expirations: */ 222 #define virt_exp utime 223 #define prof_exp stime 224 #define sched_exp sum_exec_runtime 225 226 struct sched_info { 227 #ifdef CONFIG_SCHED_INFO 228 /* Cumulative counters: */ 229 230 /* # of times we have run on this CPU: */ 231 unsigned long pcount; 232 233 /* Time spent waiting on a runqueue: */ 234 unsigned long long run_delay; 235 236 /* Timestamps: */ 237 238 /* When did we last run on a CPU? */ 239 unsigned long long last_arrival; 240 241 /* When were we last queued to run? */ 242 unsigned long long last_queued; 243 244 #endif /* CONFIG_SCHED_INFO */ 245 }; 246 247 /* 248 * Integer metrics need fixed point arithmetic, e.g., sched/fair 249 * has a few: load, load_avg, util_avg, freq, and capacity. 250 * 251 * We define a basic fixed point arithmetic range, and then formalize 252 * all these metrics based on that basic range. 253 */ 254 # define SCHED_FIXEDPOINT_SHIFT 10 255 # define SCHED_FIXEDPOINT_SCALE (1L << SCHED_FIXEDPOINT_SHIFT) 256 257 struct load_weight { 258 unsigned long weight; 259 u32 inv_weight; 260 }; 261 262 /* 263 * The load_avg/util_avg accumulates an infinite geometric series 264 * (see __update_load_avg() in kernel/sched/fair.c). 265 * 266 * [load_avg definition] 267 * 268 * load_avg = runnable% * scale_load_down(load) 269 * 270 * where runnable% is the time ratio that a sched_entity is runnable. 271 * For cfs_rq, it is the aggregated load_avg of all runnable and 272 * blocked sched_entities. 273 * 274 * load_avg may also take frequency scaling into account: 275 * 276 * load_avg = runnable% * scale_load_down(load) * freq% 277 * 278 * where freq% is the CPU frequency normalized to the highest frequency. 279 * 280 * [util_avg definition] 281 * 282 * util_avg = running% * SCHED_CAPACITY_SCALE 283 * 284 * where running% is the time ratio that a sched_entity is running on 285 * a CPU. For cfs_rq, it is the aggregated util_avg of all runnable 286 * and blocked sched_entities. 287 * 288 * util_avg may also factor frequency scaling and CPU capacity scaling: 289 * 290 * util_avg = running% * SCHED_CAPACITY_SCALE * freq% * capacity% 291 * 292 * where freq% is the same as above, and capacity% is the CPU capacity 293 * normalized to the greatest capacity (due to uarch differences, etc). 294 * 295 * N.B., the above ratios (runnable%, running%, freq%, and capacity%) 296 * themselves are in the range of [0, 1]. To do fixed point arithmetics, 297 * we therefore scale them to as large a range as necessary. This is for 298 * example reflected by util_avg's SCHED_CAPACITY_SCALE. 299 * 300 * [Overflow issue] 301 * 302 * The 64-bit load_sum can have 4353082796 (=2^64/47742/88761) entities 303 * with the highest load (=88761), always runnable on a single cfs_rq, 304 * and should not overflow as the number already hits PID_MAX_LIMIT. 305 * 306 * For all other cases (including 32-bit kernels), struct load_weight's 307 * weight will overflow first before we do, because: 308 * 309 * Max(load_avg) <= Max(load.weight) 310 * 311 * Then it is the load_weight's responsibility to consider overflow 312 * issues. 313 */ 314 struct sched_avg { 315 u64 last_update_time; 316 u64 load_sum; 317 u32 util_sum; 318 u32 period_contrib; 319 unsigned long load_avg; 320 unsigned long util_avg; 321 }; 322 323 struct sched_statistics { 324 #ifdef CONFIG_SCHEDSTATS 325 u64 wait_start; 326 u64 wait_max; 327 u64 wait_count; 328 u64 wait_sum; 329 u64 iowait_count; 330 u64 iowait_sum; 331 332 u64 sleep_start; 333 u64 sleep_max; 334 s64 sum_sleep_runtime; 335 336 u64 block_start; 337 u64 block_max; 338 u64 exec_max; 339 u64 slice_max; 340 341 u64 nr_migrations_cold; 342 u64 nr_failed_migrations_affine; 343 u64 nr_failed_migrations_running; 344 u64 nr_failed_migrations_hot; 345 u64 nr_forced_migrations; 346 347 u64 nr_wakeups; 348 u64 nr_wakeups_sync; 349 u64 nr_wakeups_migrate; 350 u64 nr_wakeups_local; 351 u64 nr_wakeups_remote; 352 u64 nr_wakeups_affine; 353 u64 nr_wakeups_affine_attempts; 354 u64 nr_wakeups_passive; 355 u64 nr_wakeups_idle; 356 #endif 357 }; 358 359 struct sched_entity { 360 /* For load-balancing: */ 361 struct load_weight load; 362 struct rb_node run_node; 363 struct list_head group_node; 364 unsigned int on_rq; 365 366 u64 exec_start; 367 u64 sum_exec_runtime; 368 u64 vruntime; 369 u64 prev_sum_exec_runtime; 370 371 u64 nr_migrations; 372 373 struct sched_statistics statistics; 374 375 #ifdef CONFIG_FAIR_GROUP_SCHED 376 int depth; 377 struct sched_entity *parent; 378 /* rq on which this entity is (to be) queued: */ 379 struct cfs_rq *cfs_rq; 380 /* rq "owned" by this entity/group: */ 381 struct cfs_rq *my_q; 382 #endif 383 384 #ifdef CONFIG_SMP 385 /* 386 * Per entity load average tracking. 387 * 388 * Put into separate cache line so it does not 389 * collide with read-mostly values above. 390 */ 391 struct sched_avg avg ____cacheline_aligned_in_smp; 392 #endif 393 }; 394 395 struct sched_rt_entity { 396 struct list_head run_list; 397 unsigned long timeout; 398 unsigned long watchdog_stamp; 399 unsigned int time_slice; 400 unsigned short on_rq; 401 unsigned short on_list; 402 403 struct sched_rt_entity *back; 404 #ifdef CONFIG_RT_GROUP_SCHED 405 struct sched_rt_entity *parent; 406 /* rq on which this entity is (to be) queued: */ 407 struct rt_rq *rt_rq; 408 /* rq "owned" by this entity/group: */ 409 struct rt_rq *my_q; 410 #endif 411 }; 412 413 struct sched_dl_entity { 414 struct rb_node rb_node; 415 416 /* 417 * Original scheduling parameters. Copied here from sched_attr 418 * during sched_setattr(), they will remain the same until 419 * the next sched_setattr(). 420 */ 421 u64 dl_runtime; /* Maximum runtime for each instance */ 422 u64 dl_deadline; /* Relative deadline of each instance */ 423 u64 dl_period; /* Separation of two instances (period) */ 424 u64 dl_bw; /* dl_runtime / dl_deadline */ 425 426 /* 427 * Actual scheduling parameters. Initialized with the values above, 428 * they are continously updated during task execution. Note that 429 * the remaining runtime could be < 0 in case we are in overrun. 430 */ 431 s64 runtime; /* Remaining runtime for this instance */ 432 u64 deadline; /* Absolute deadline for this instance */ 433 unsigned int flags; /* Specifying the scheduler behaviour */ 434 435 /* 436 * Some bool flags: 437 * 438 * @dl_throttled tells if we exhausted the runtime. If so, the 439 * task has to wait for a replenishment to be performed at the 440 * next firing of dl_timer. 441 * 442 * @dl_boosted tells if we are boosted due to DI. If so we are 443 * outside bandwidth enforcement mechanism (but only until we 444 * exit the critical section); 445 * 446 * @dl_yielded tells if task gave up the CPU before consuming 447 * all its available runtime during the last job. 448 */ 449 int dl_throttled; 450 int dl_boosted; 451 int dl_yielded; 452 453 /* 454 * Bandwidth enforcement timer. Each -deadline task has its 455 * own bandwidth to be enforced, thus we need one timer per task. 456 */ 457 struct hrtimer dl_timer; 458 }; 459 460 union rcu_special { 461 struct { 462 u8 blocked; 463 u8 need_qs; 464 u8 exp_need_qs; 465 466 /* Otherwise the compiler can store garbage here: */ 467 u8 pad; 468 } b; /* Bits. */ 469 u32 s; /* Set of bits. */ 470 }; 471 472 enum perf_event_task_context { 473 perf_invalid_context = -1, 474 perf_hw_context = 0, 475 perf_sw_context, 476 perf_nr_task_contexts, 477 }; 478 479 struct wake_q_node { 480 struct wake_q_node *next; 481 }; 482 483 struct task_struct { 484 #ifdef CONFIG_THREAD_INFO_IN_TASK 485 /* 486 * For reasons of header soup (see current_thread_info()), this 487 * must be the first element of task_struct. 488 */ 489 struct thread_info thread_info; 490 #endif 491 /* -1 unrunnable, 0 runnable, >0 stopped: */ 492 volatile long state; 493 void *stack; 494 atomic_t usage; 495 /* Per task flags (PF_*), defined further below: */ 496 unsigned int flags; 497 unsigned int ptrace; 498 499 #ifdef CONFIG_SMP 500 struct llist_node wake_entry; 501 int on_cpu; 502 #ifdef CONFIG_THREAD_INFO_IN_TASK 503 /* Current CPU: */ 504 unsigned int cpu; 505 #endif 506 unsigned int wakee_flips; 507 unsigned long wakee_flip_decay_ts; 508 struct task_struct *last_wakee; 509 510 int wake_cpu; 511 #endif 512 int on_rq; 513 514 int prio; 515 int static_prio; 516 int normal_prio; 517 unsigned int rt_priority; 518 519 const struct sched_class *sched_class; 520 struct sched_entity se; 521 struct sched_rt_entity rt; 522 #ifdef CONFIG_CGROUP_SCHED 523 struct task_group *sched_task_group; 524 #endif 525 struct sched_dl_entity dl; 526 527 #ifdef CONFIG_PREEMPT_NOTIFIERS 528 /* List of struct preempt_notifier: */ 529 struct hlist_head preempt_notifiers; 530 #endif 531 532 #ifdef CONFIG_BLK_DEV_IO_TRACE 533 unsigned int btrace_seq; 534 #endif 535 536 unsigned int policy; 537 int nr_cpus_allowed; 538 cpumask_t cpus_allowed; 539 540 #ifdef CONFIG_PREEMPT_RCU 541 int rcu_read_lock_nesting; 542 union rcu_special rcu_read_unlock_special; 543 struct list_head rcu_node_entry; 544 struct rcu_node *rcu_blocked_node; 545 #endif /* #ifdef CONFIG_PREEMPT_RCU */ 546 547 #ifdef CONFIG_TASKS_RCU 548 unsigned long rcu_tasks_nvcsw; 549 bool rcu_tasks_holdout; 550 struct list_head rcu_tasks_holdout_list; 551 int rcu_tasks_idle_cpu; 552 #endif /* #ifdef CONFIG_TASKS_RCU */ 553 554 struct sched_info sched_info; 555 556 struct list_head tasks; 557 #ifdef CONFIG_SMP 558 struct plist_node pushable_tasks; 559 struct rb_node pushable_dl_tasks; 560 #endif 561 562 struct mm_struct *mm; 563 struct mm_struct *active_mm; 564 565 /* Per-thread vma caching: */ 566 struct vmacache vmacache; 567 568 #ifdef SPLIT_RSS_COUNTING 569 struct task_rss_stat rss_stat; 570 #endif 571 int exit_state; 572 int exit_code; 573 int exit_signal; 574 /* The signal sent when the parent dies: */ 575 int pdeath_signal; 576 /* JOBCTL_*, siglock protected: */ 577 unsigned long jobctl; 578 579 /* Used for emulating ABI behavior of previous Linux versions: */ 580 unsigned int personality; 581 582 /* Scheduler bits, serialized by scheduler locks: */ 583 unsigned sched_reset_on_fork:1; 584 unsigned sched_contributes_to_load:1; 585 unsigned sched_migrated:1; 586 unsigned sched_remote_wakeup:1; 587 /* Force alignment to the next boundary: */ 588 unsigned :0; 589 590 /* Unserialized, strictly 'current' */ 591 592 /* Bit to tell LSMs we're in execve(): */ 593 unsigned in_execve:1; 594 unsigned in_iowait:1; 595 #ifndef TIF_RESTORE_SIGMASK 596 unsigned restore_sigmask:1; 597 #endif 598 #ifdef CONFIG_MEMCG 599 unsigned memcg_may_oom:1; 600 #ifndef CONFIG_SLOB 601 unsigned memcg_kmem_skip_account:1; 602 #endif 603 #endif 604 #ifdef CONFIG_COMPAT_BRK 605 unsigned brk_randomized:1; 606 #endif 607 608 unsigned long atomic_flags; /* Flags requiring atomic access. */ 609 610 struct restart_block restart_block; 611 612 pid_t pid; 613 pid_t tgid; 614 615 #ifdef CONFIG_CC_STACKPROTECTOR 616 /* Canary value for the -fstack-protector GCC feature: */ 617 unsigned long stack_canary; 618 #endif 619 /* 620 * Pointers to the (original) parent process, youngest child, younger sibling, 621 * older sibling, respectively. (p->father can be replaced with 622 * p->real_parent->pid) 623 */ 624 625 /* Real parent process: */ 626 struct task_struct __rcu *real_parent; 627 628 /* Recipient of SIGCHLD, wait4() reports: */ 629 struct task_struct __rcu *parent; 630 631 /* 632 * Children/sibling form the list of natural children: 633 */ 634 struct list_head children; 635 struct list_head sibling; 636 struct task_struct *group_leader; 637 638 /* 639 * 'ptraced' is the list of tasks this task is using ptrace() on. 640 * 641 * This includes both natural children and PTRACE_ATTACH targets. 642 * 'ptrace_entry' is this task's link on the p->parent->ptraced list. 643 */ 644 struct list_head ptraced; 645 struct list_head ptrace_entry; 646 647 /* PID/PID hash table linkage. */ 648 struct pid_link pids[PIDTYPE_MAX]; 649 struct list_head thread_group; 650 struct list_head thread_node; 651 652 struct completion *vfork_done; 653 654 /* CLONE_CHILD_SETTID: */ 655 int __user *set_child_tid; 656 657 /* CLONE_CHILD_CLEARTID: */ 658 int __user *clear_child_tid; 659 660 u64 utime; 661 u64 stime; 662 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME 663 u64 utimescaled; 664 u64 stimescaled; 665 #endif 666 u64 gtime; 667 struct prev_cputime prev_cputime; 668 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN 669 seqcount_t vtime_seqcount; 670 unsigned long long vtime_snap; 671 enum { 672 /* Task is sleeping or running in a CPU with VTIME inactive: */ 673 VTIME_INACTIVE = 0, 674 /* Task runs in userspace in a CPU with VTIME active: */ 675 VTIME_USER, 676 /* Task runs in kernelspace in a CPU with VTIME active: */ 677 VTIME_SYS, 678 } vtime_snap_whence; 679 #endif 680 681 #ifdef CONFIG_NO_HZ_FULL 682 atomic_t tick_dep_mask; 683 #endif 684 /* Context switch counts: */ 685 unsigned long nvcsw; 686 unsigned long nivcsw; 687 688 /* Monotonic time in nsecs: */ 689 u64 start_time; 690 691 /* Boot based time in nsecs: */ 692 u64 real_start_time; 693 694 /* MM fault and swap info: this can arguably be seen as either mm-specific or thread-specific: */ 695 unsigned long min_flt; 696 unsigned long maj_flt; 697 698 #ifdef CONFIG_POSIX_TIMERS 699 struct task_cputime cputime_expires; 700 struct list_head cpu_timers[3]; 701 #endif 702 703 /* Process credentials: */ 704 705 /* Tracer's credentials at attach: */ 706 const struct cred __rcu *ptracer_cred; 707 708 /* Objective and real subjective task credentials (COW): */ 709 const struct cred __rcu *real_cred; 710 711 /* Effective (overridable) subjective task credentials (COW): */ 712 const struct cred __rcu *cred; 713 714 /* 715 * executable name, excluding path. 716 * 717 * - normally initialized setup_new_exec() 718 * - access it with [gs]et_task_comm() 719 * - lock it with task_lock() 720 */ 721 char comm[TASK_COMM_LEN]; 722 723 struct nameidata *nameidata; 724 725 #ifdef CONFIG_SYSVIPC 726 struct sysv_sem sysvsem; 727 struct sysv_shm sysvshm; 728 #endif 729 #ifdef CONFIG_DETECT_HUNG_TASK 730 unsigned long last_switch_count; 731 #endif 732 /* Filesystem information: */ 733 struct fs_struct *fs; 734 735 /* Open file information: */ 736 struct files_struct *files; 737 738 /* Namespaces: */ 739 struct nsproxy *nsproxy; 740 741 /* Signal handlers: */ 742 struct signal_struct *signal; 743 struct sighand_struct *sighand; 744 sigset_t blocked; 745 sigset_t real_blocked; 746 /* Restored if set_restore_sigmask() was used: */ 747 sigset_t saved_sigmask; 748 struct sigpending pending; 749 unsigned long sas_ss_sp; 750 size_t sas_ss_size; 751 unsigned int sas_ss_flags; 752 753 struct callback_head *task_works; 754 755 struct audit_context *audit_context; 756 #ifdef CONFIG_AUDITSYSCALL 757 kuid_t loginuid; 758 unsigned int sessionid; 759 #endif 760 struct seccomp seccomp; 761 762 /* Thread group tracking: */ 763 u32 parent_exec_id; 764 u32 self_exec_id; 765 766 /* Protection against (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, mempolicy: */ 767 spinlock_t alloc_lock; 768 769 /* Protection of the PI data structures: */ 770 raw_spinlock_t pi_lock; 771 772 struct wake_q_node wake_q; 773 774 #ifdef CONFIG_RT_MUTEXES 775 /* PI waiters blocked on a rt_mutex held by this task: */ 776 struct rb_root pi_waiters; 777 struct rb_node *pi_waiters_leftmost; 778 /* Deadlock detection and priority inheritance handling: */ 779 struct rt_mutex_waiter *pi_blocked_on; 780 #endif 781 782 #ifdef CONFIG_DEBUG_MUTEXES 783 /* Mutex deadlock detection: */ 784 struct mutex_waiter *blocked_on; 785 #endif 786 787 #ifdef CONFIG_TRACE_IRQFLAGS 788 unsigned int irq_events; 789 unsigned long hardirq_enable_ip; 790 unsigned long hardirq_disable_ip; 791 unsigned int hardirq_enable_event; 792 unsigned int hardirq_disable_event; 793 int hardirqs_enabled; 794 int hardirq_context; 795 unsigned long softirq_disable_ip; 796 unsigned long softirq_enable_ip; 797 unsigned int softirq_disable_event; 798 unsigned int softirq_enable_event; 799 int softirqs_enabled; 800 int softirq_context; 801 #endif 802 803 #ifdef CONFIG_LOCKDEP 804 # define MAX_LOCK_DEPTH 48UL 805 u64 curr_chain_key; 806 int lockdep_depth; 807 unsigned int lockdep_recursion; 808 struct held_lock held_locks[MAX_LOCK_DEPTH]; 809 gfp_t lockdep_reclaim_gfp; 810 #endif 811 812 #ifdef CONFIG_UBSAN 813 unsigned int in_ubsan; 814 #endif 815 816 /* Journalling filesystem info: */ 817 void *journal_info; 818 819 /* Stacked block device info: */ 820 struct bio_list *bio_list; 821 822 #ifdef CONFIG_BLOCK 823 /* Stack plugging: */ 824 struct blk_plug *plug; 825 #endif 826 827 /* VM state: */ 828 struct reclaim_state *reclaim_state; 829 830 struct backing_dev_info *backing_dev_info; 831 832 struct io_context *io_context; 833 834 /* Ptrace state: */ 835 unsigned long ptrace_message; 836 siginfo_t *last_siginfo; 837 838 struct task_io_accounting ioac; 839 #ifdef CONFIG_TASK_XACCT 840 /* Accumulated RSS usage: */ 841 u64 acct_rss_mem1; 842 /* Accumulated virtual memory usage: */ 843 u64 acct_vm_mem1; 844 /* stime + utime since last update: */ 845 u64 acct_timexpd; 846 #endif 847 #ifdef CONFIG_CPUSETS 848 /* Protected by ->alloc_lock: */ 849 nodemask_t mems_allowed; 850 /* Seqence number to catch updates: */ 851 seqcount_t mems_allowed_seq; 852 int cpuset_mem_spread_rotor; 853 int cpuset_slab_spread_rotor; 854 #endif 855 #ifdef CONFIG_CGROUPS 856 /* Control Group info protected by css_set_lock: */ 857 struct css_set __rcu *cgroups; 858 /* cg_list protected by css_set_lock and tsk->alloc_lock: */ 859 struct list_head cg_list; 860 #endif 861 #ifdef CONFIG_INTEL_RDT_A 862 int closid; 863 #endif 864 #ifdef CONFIG_FUTEX 865 struct robust_list_head __user *robust_list; 866 #ifdef CONFIG_COMPAT 867 struct compat_robust_list_head __user *compat_robust_list; 868 #endif 869 struct list_head pi_state_list; 870 struct futex_pi_state *pi_state_cache; 871 #endif 872 #ifdef CONFIG_PERF_EVENTS 873 struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts]; 874 struct mutex perf_event_mutex; 875 struct list_head perf_event_list; 876 #endif 877 #ifdef CONFIG_DEBUG_PREEMPT 878 unsigned long preempt_disable_ip; 879 #endif 880 #ifdef CONFIG_NUMA 881 /* Protected by alloc_lock: */ 882 struct mempolicy *mempolicy; 883 short il_next; 884 short pref_node_fork; 885 #endif 886 #ifdef CONFIG_NUMA_BALANCING 887 int numa_scan_seq; 888 unsigned int numa_scan_period; 889 unsigned int numa_scan_period_max; 890 int numa_preferred_nid; 891 unsigned long numa_migrate_retry; 892 /* Migration stamp: */ 893 u64 node_stamp; 894 u64 last_task_numa_placement; 895 u64 last_sum_exec_runtime; 896 struct callback_head numa_work; 897 898 struct list_head numa_entry; 899 struct numa_group *numa_group; 900 901 /* 902 * numa_faults is an array split into four regions: 903 * faults_memory, faults_cpu, faults_memory_buffer, faults_cpu_buffer 904 * in this precise order. 905 * 906 * faults_memory: Exponential decaying average of faults on a per-node 907 * basis. Scheduling placement decisions are made based on these 908 * counts. The values remain static for the duration of a PTE scan. 909 * faults_cpu: Track the nodes the process was running on when a NUMA 910 * hinting fault was incurred. 911 * faults_memory_buffer and faults_cpu_buffer: Record faults per node 912 * during the current scan window. When the scan completes, the counts 913 * in faults_memory and faults_cpu decay and these values are copied. 914 */ 915 unsigned long *numa_faults; 916 unsigned long total_numa_faults; 917 918 /* 919 * numa_faults_locality tracks if faults recorded during the last 920 * scan window were remote/local or failed to migrate. The task scan 921 * period is adapted based on the locality of the faults with different 922 * weights depending on whether they were shared or private faults 923 */ 924 unsigned long numa_faults_locality[3]; 925 926 unsigned long numa_pages_migrated; 927 #endif /* CONFIG_NUMA_BALANCING */ 928 929 struct tlbflush_unmap_batch tlb_ubc; 930 931 struct rcu_head rcu; 932 933 /* Cache last used pipe for splice(): */ 934 struct pipe_inode_info *splice_pipe; 935 936 struct page_frag task_frag; 937 938 #ifdef CONFIG_TASK_DELAY_ACCT 939 struct task_delay_info *delays; 940 #endif 941 942 #ifdef CONFIG_FAULT_INJECTION 943 int make_it_fail; 944 #endif 945 /* 946 * When (nr_dirtied >= nr_dirtied_pause), it's time to call 947 * balance_dirty_pages() for a dirty throttling pause: 948 */ 949 int nr_dirtied; 950 int nr_dirtied_pause; 951 /* Start of a write-and-pause period: */ 952 unsigned long dirty_paused_when; 953 954 #ifdef CONFIG_LATENCYTOP 955 int latency_record_count; 956 struct latency_record latency_record[LT_SAVECOUNT]; 957 #endif 958 /* 959 * Time slack values; these are used to round up poll() and 960 * select() etc timeout values. These are in nanoseconds. 961 */ 962 u64 timer_slack_ns; 963 u64 default_timer_slack_ns; 964 965 #ifdef CONFIG_KASAN 966 unsigned int kasan_depth; 967 #endif 968 969 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 970 /* Index of current stored address in ret_stack: */ 971 int curr_ret_stack; 972 973 /* Stack of return addresses for return function tracing: */ 974 struct ftrace_ret_stack *ret_stack; 975 976 /* Timestamp for last schedule: */ 977 unsigned long long ftrace_timestamp; 978 979 /* 980 * Number of functions that haven't been traced 981 * because of depth overrun: 982 */ 983 atomic_t trace_overrun; 984 985 /* Pause tracing: */ 986 atomic_t tracing_graph_pause; 987 #endif 988 989 #ifdef CONFIG_TRACING 990 /* State flags for use by tracers: */ 991 unsigned long trace; 992 993 /* Bitmask and counter of trace recursion: */ 994 unsigned long trace_recursion; 995 #endif /* CONFIG_TRACING */ 996 997 #ifdef CONFIG_KCOV 998 /* Coverage collection mode enabled for this task (0 if disabled): */ 999 enum kcov_mode kcov_mode; 1000 1001 /* Size of the kcov_area: */ 1002 unsigned int kcov_size; 1003 1004 /* Buffer for coverage collection: */ 1005 void *kcov_area; 1006 1007 /* KCOV descriptor wired with this task or NULL: */ 1008 struct kcov *kcov; 1009 #endif 1010 1011 #ifdef CONFIG_MEMCG 1012 struct mem_cgroup *memcg_in_oom; 1013 gfp_t memcg_oom_gfp_mask; 1014 int memcg_oom_order; 1015 1016 /* Number of pages to reclaim on returning to userland: */ 1017 unsigned int memcg_nr_pages_over_high; 1018 #endif 1019 1020 #ifdef CONFIG_UPROBES 1021 struct uprobe_task *utask; 1022 #endif 1023 #if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE) 1024 unsigned int sequential_io; 1025 unsigned int sequential_io_avg; 1026 #endif 1027 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP 1028 unsigned long task_state_change; 1029 #endif 1030 int pagefault_disabled; 1031 #ifdef CONFIG_MMU 1032 struct task_struct *oom_reaper_list; 1033 #endif 1034 #ifdef CONFIG_VMAP_STACK 1035 struct vm_struct *stack_vm_area; 1036 #endif 1037 #ifdef CONFIG_THREAD_INFO_IN_TASK 1038 /* A live task holds one reference: */ 1039 atomic_t stack_refcount; 1040 #endif 1041 /* CPU-specific state of this task: */ 1042 struct thread_struct thread; 1043 1044 /* 1045 * WARNING: on x86, 'thread_struct' contains a variable-sized 1046 * structure. It *MUST* be at the end of 'task_struct'. 1047 * 1048 * Do not put anything below here! 1049 */ 1050 }; 1051 1052 static inline struct pid *task_pid(struct task_struct *task) 1053 { 1054 return task->pids[PIDTYPE_PID].pid; 1055 } 1056 1057 static inline struct pid *task_tgid(struct task_struct *task) 1058 { 1059 return task->group_leader->pids[PIDTYPE_PID].pid; 1060 } 1061 1062 /* 1063 * Without tasklist or RCU lock it is not safe to dereference 1064 * the result of task_pgrp/task_session even if task == current, 1065 * we can race with another thread doing sys_setsid/sys_setpgid. 1066 */ 1067 static inline struct pid *task_pgrp(struct task_struct *task) 1068 { 1069 return task->group_leader->pids[PIDTYPE_PGID].pid; 1070 } 1071 1072 static inline struct pid *task_session(struct task_struct *task) 1073 { 1074 return task->group_leader->pids[PIDTYPE_SID].pid; 1075 } 1076 1077 /* 1078 * the helpers to get the task's different pids as they are seen 1079 * from various namespaces 1080 * 1081 * task_xid_nr() : global id, i.e. the id seen from the init namespace; 1082 * task_xid_vnr() : virtual id, i.e. the id seen from the pid namespace of 1083 * current. 1084 * task_xid_nr_ns() : id seen from the ns specified; 1085 * 1086 * set_task_vxid() : assigns a virtual id to a task; 1087 * 1088 * see also pid_nr() etc in include/linux/pid.h 1089 */ 1090 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, struct pid_namespace *ns); 1091 1092 static inline pid_t task_pid_nr(struct task_struct *tsk) 1093 { 1094 return tsk->pid; 1095 } 1096 1097 static inline pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) 1098 { 1099 return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns); 1100 } 1101 1102 static inline pid_t task_pid_vnr(struct task_struct *tsk) 1103 { 1104 return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL); 1105 } 1106 1107 1108 static inline pid_t task_tgid_nr(struct task_struct *tsk) 1109 { 1110 return tsk->tgid; 1111 } 1112 1113 extern pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns); 1114 1115 static inline pid_t task_tgid_vnr(struct task_struct *tsk) 1116 { 1117 return pid_vnr(task_tgid(tsk)); 1118 } 1119 1120 /** 1121 * pid_alive - check that a task structure is not stale 1122 * @p: Task structure to be checked. 1123 * 1124 * Test if a process is not yet dead (at most zombie state) 1125 * If pid_alive fails, then pointers within the task structure 1126 * can be stale and must not be dereferenced. 1127 * 1128 * Return: 1 if the process is alive. 0 otherwise. 1129 */ 1130 static inline int pid_alive(const struct task_struct *p) 1131 { 1132 return p->pids[PIDTYPE_PID].pid != NULL; 1133 } 1134 1135 static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns) 1136 { 1137 pid_t pid = 0; 1138 1139 rcu_read_lock(); 1140 if (pid_alive(tsk)) 1141 pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns); 1142 rcu_read_unlock(); 1143 1144 return pid; 1145 } 1146 1147 static inline pid_t task_ppid_nr(const struct task_struct *tsk) 1148 { 1149 return task_ppid_nr_ns(tsk, &init_pid_ns); 1150 } 1151 1152 static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) 1153 { 1154 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns); 1155 } 1156 1157 static inline pid_t task_pgrp_vnr(struct task_struct *tsk) 1158 { 1159 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL); 1160 } 1161 1162 1163 static inline pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) 1164 { 1165 return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns); 1166 } 1167 1168 static inline pid_t task_session_vnr(struct task_struct *tsk) 1169 { 1170 return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL); 1171 } 1172 1173 /* Obsolete, do not use: */ 1174 static inline pid_t task_pgrp_nr(struct task_struct *tsk) 1175 { 1176 return task_pgrp_nr_ns(tsk, &init_pid_ns); 1177 } 1178 1179 /** 1180 * is_global_init - check if a task structure is init. Since init 1181 * is free to have sub-threads we need to check tgid. 1182 * @tsk: Task structure to be checked. 1183 * 1184 * Check if a task structure is the first user space task the kernel created. 1185 * 1186 * Return: 1 if the task structure is init. 0 otherwise. 1187 */ 1188 static inline int is_global_init(struct task_struct *tsk) 1189 { 1190 return task_tgid_nr(tsk) == 1; 1191 } 1192 1193 extern struct pid *cad_pid; 1194 1195 /* 1196 * Per process flags 1197 */ 1198 #define PF_IDLE 0x00000002 /* I am an IDLE thread */ 1199 #define PF_EXITING 0x00000004 /* Getting shut down */ 1200 #define PF_EXITPIDONE 0x00000008 /* PI exit done on shut down */ 1201 #define PF_VCPU 0x00000010 /* I'm a virtual CPU */ 1202 #define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */ 1203 #define PF_FORKNOEXEC 0x00000040 /* Forked but didn't exec */ 1204 #define PF_MCE_PROCESS 0x00000080 /* Process policy on mce errors */ 1205 #define PF_SUPERPRIV 0x00000100 /* Used super-user privileges */ 1206 #define PF_DUMPCORE 0x00000200 /* Dumped core */ 1207 #define PF_SIGNALED 0x00000400 /* Killed by a signal */ 1208 #define PF_MEMALLOC 0x00000800 /* Allocating memory */ 1209 #define PF_NPROC_EXCEEDED 0x00001000 /* set_user() noticed that RLIMIT_NPROC was exceeded */ 1210 #define PF_USED_MATH 0x00002000 /* If unset the fpu must be initialized before use */ 1211 #define PF_USED_ASYNC 0x00004000 /* Used async_schedule*(), used by module init */ 1212 #define PF_NOFREEZE 0x00008000 /* This thread should not be frozen */ 1213 #define PF_FROZEN 0x00010000 /* Frozen for system suspend */ 1214 #define PF_FSTRANS 0x00020000 /* Inside a filesystem transaction */ 1215 #define PF_KSWAPD 0x00040000 /* I am kswapd */ 1216 #define PF_MEMALLOC_NOIO 0x00080000 /* Allocating memory without IO involved */ 1217 #define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */ 1218 #define PF_KTHREAD 0x00200000 /* I am a kernel thread */ 1219 #define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */ 1220 #define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ 1221 #define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */ 1222 #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ 1223 #define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ 1224 #define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */ 1225 #define PF_SUSPEND_TASK 0x80000000 /* This thread called freeze_processes() and should not be frozen */ 1226 1227 /* 1228 * Only the _current_ task can read/write to tsk->flags, but other 1229 * tasks can access tsk->flags in readonly mode for example 1230 * with tsk_used_math (like during threaded core dumping). 1231 * There is however an exception to this rule during ptrace 1232 * or during fork: the ptracer task is allowed to write to the 1233 * child->flags of its traced child (same goes for fork, the parent 1234 * can write to the child->flags), because we're guaranteed the 1235 * child is not running and in turn not changing child->flags 1236 * at the same time the parent does it. 1237 */ 1238 #define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0) 1239 #define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0) 1240 #define clear_used_math() clear_stopped_child_used_math(current) 1241 #define set_used_math() set_stopped_child_used_math(current) 1242 1243 #define conditional_stopped_child_used_math(condition, child) \ 1244 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0) 1245 1246 #define conditional_used_math(condition) conditional_stopped_child_used_math(condition, current) 1247 1248 #define copy_to_stopped_child_used_math(child) \ 1249 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0) 1250 1251 /* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */ 1252 #define tsk_used_math(p) ((p)->flags & PF_USED_MATH) 1253 #define used_math() tsk_used_math(current) 1254 1255 /* Per-process atomic flags. */ 1256 #define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */ 1257 #define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */ 1258 #define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */ 1259 #define PFA_LMK_WAITING 3 /* Lowmemorykiller is waiting */ 1260 1261 1262 #define TASK_PFA_TEST(name, func) \ 1263 static inline bool task_##func(struct task_struct *p) \ 1264 { return test_bit(PFA_##name, &p->atomic_flags); } 1265 1266 #define TASK_PFA_SET(name, func) \ 1267 static inline void task_set_##func(struct task_struct *p) \ 1268 { set_bit(PFA_##name, &p->atomic_flags); } 1269 1270 #define TASK_PFA_CLEAR(name, func) \ 1271 static inline void task_clear_##func(struct task_struct *p) \ 1272 { clear_bit(PFA_##name, &p->atomic_flags); } 1273 1274 TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs) 1275 TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs) 1276 1277 TASK_PFA_TEST(SPREAD_PAGE, spread_page) 1278 TASK_PFA_SET(SPREAD_PAGE, spread_page) 1279 TASK_PFA_CLEAR(SPREAD_PAGE, spread_page) 1280 1281 TASK_PFA_TEST(SPREAD_SLAB, spread_slab) 1282 TASK_PFA_SET(SPREAD_SLAB, spread_slab) 1283 TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab) 1284 1285 TASK_PFA_TEST(LMK_WAITING, lmk_waiting) 1286 TASK_PFA_SET(LMK_WAITING, lmk_waiting) 1287 1288 static inline void 1289 tsk_restore_flags(struct task_struct *task, unsigned long orig_flags, unsigned long flags) 1290 { 1291 task->flags &= ~flags; 1292 task->flags |= orig_flags & flags; 1293 } 1294 1295 extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial); 1296 extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed); 1297 #ifdef CONFIG_SMP 1298 extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask); 1299 extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask); 1300 #else 1301 static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) 1302 { 1303 } 1304 static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) 1305 { 1306 if (!cpumask_test_cpu(0, new_mask)) 1307 return -EINVAL; 1308 return 0; 1309 } 1310 #endif 1311 1312 #ifndef cpu_relax_yield 1313 #define cpu_relax_yield() cpu_relax() 1314 #endif 1315 1316 extern int yield_to(struct task_struct *p, bool preempt); 1317 extern void set_user_nice(struct task_struct *p, long nice); 1318 extern int task_prio(const struct task_struct *p); 1319 1320 /** 1321 * task_nice - return the nice value of a given task. 1322 * @p: the task in question. 1323 * 1324 * Return: The nice value [ -20 ... 0 ... 19 ]. 1325 */ 1326 static inline int task_nice(const struct task_struct *p) 1327 { 1328 return PRIO_TO_NICE((p)->static_prio); 1329 } 1330 1331 extern int can_nice(const struct task_struct *p, const int nice); 1332 extern int task_curr(const struct task_struct *p); 1333 extern int idle_cpu(int cpu); 1334 extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *); 1335 extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *); 1336 extern int sched_setattr(struct task_struct *, const struct sched_attr *); 1337 extern struct task_struct *idle_task(int cpu); 1338 1339 /** 1340 * is_idle_task - is the specified task an idle task? 1341 * @p: the task in question. 1342 * 1343 * Return: 1 if @p is an idle task. 0 otherwise. 1344 */ 1345 static inline bool is_idle_task(const struct task_struct *p) 1346 { 1347 return !!(p->flags & PF_IDLE); 1348 } 1349 1350 extern struct task_struct *curr_task(int cpu); 1351 extern void ia64_set_curr_task(int cpu, struct task_struct *p); 1352 1353 void yield(void); 1354 1355 union thread_union { 1356 #ifndef CONFIG_THREAD_INFO_IN_TASK 1357 struct thread_info thread_info; 1358 #endif 1359 unsigned long stack[THREAD_SIZE/sizeof(long)]; 1360 }; 1361 1362 #ifdef CONFIG_THREAD_INFO_IN_TASK 1363 static inline struct thread_info *task_thread_info(struct task_struct *task) 1364 { 1365 return &task->thread_info; 1366 } 1367 #elif !defined(__HAVE_THREAD_FUNCTIONS) 1368 # define task_thread_info(task) ((struct thread_info *)(task)->stack) 1369 #endif 1370 1371 /* 1372 * find a task by one of its numerical ids 1373 * 1374 * find_task_by_pid_ns(): 1375 * finds a task by its pid in the specified namespace 1376 * find_task_by_vpid(): 1377 * finds a task by its virtual pid 1378 * 1379 * see also find_vpid() etc in include/linux/pid.h 1380 */ 1381 1382 extern struct task_struct *find_task_by_vpid(pid_t nr); 1383 extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns); 1384 1385 extern int wake_up_state(struct task_struct *tsk, unsigned int state); 1386 extern int wake_up_process(struct task_struct *tsk); 1387 extern void wake_up_new_task(struct task_struct *tsk); 1388 1389 #ifdef CONFIG_SMP 1390 extern void kick_process(struct task_struct *tsk); 1391 #else 1392 static inline void kick_process(struct task_struct *tsk) { } 1393 #endif 1394 1395 extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec); 1396 1397 static inline void set_task_comm(struct task_struct *tsk, const char *from) 1398 { 1399 __set_task_comm(tsk, from, false); 1400 } 1401 1402 extern char *get_task_comm(char *to, struct task_struct *tsk); 1403 1404 #ifdef CONFIG_SMP 1405 void scheduler_ipi(void); 1406 extern unsigned long wait_task_inactive(struct task_struct *, long match_state); 1407 #else 1408 static inline void scheduler_ipi(void) { } 1409 static inline unsigned long wait_task_inactive(struct task_struct *p, long match_state) 1410 { 1411 return 1; 1412 } 1413 #endif 1414 1415 /* 1416 * Set thread flags in other task's structures. 1417 * See asm/thread_info.h for TIF_xxxx flags available: 1418 */ 1419 static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag) 1420 { 1421 set_ti_thread_flag(task_thread_info(tsk), flag); 1422 } 1423 1424 static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag) 1425 { 1426 clear_ti_thread_flag(task_thread_info(tsk), flag); 1427 } 1428 1429 static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag) 1430 { 1431 return test_and_set_ti_thread_flag(task_thread_info(tsk), flag); 1432 } 1433 1434 static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag) 1435 { 1436 return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag); 1437 } 1438 1439 static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag) 1440 { 1441 return test_ti_thread_flag(task_thread_info(tsk), flag); 1442 } 1443 1444 static inline void set_tsk_need_resched(struct task_struct *tsk) 1445 { 1446 set_tsk_thread_flag(tsk,TIF_NEED_RESCHED); 1447 } 1448 1449 static inline void clear_tsk_need_resched(struct task_struct *tsk) 1450 { 1451 clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED); 1452 } 1453 1454 static inline int test_tsk_need_resched(struct task_struct *tsk) 1455 { 1456 return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED)); 1457 } 1458 1459 /* 1460 * cond_resched() and cond_resched_lock(): latency reduction via 1461 * explicit rescheduling in places that are safe. The return 1462 * value indicates whether a reschedule was done in fact. 1463 * cond_resched_lock() will drop the spinlock before scheduling, 1464 * cond_resched_softirq() will enable bhs before scheduling. 1465 */ 1466 #ifndef CONFIG_PREEMPT 1467 extern int _cond_resched(void); 1468 #else 1469 static inline int _cond_resched(void) { return 0; } 1470 #endif 1471 1472 #define cond_resched() ({ \ 1473 ___might_sleep(__FILE__, __LINE__, 0); \ 1474 _cond_resched(); \ 1475 }) 1476 1477 extern int __cond_resched_lock(spinlock_t *lock); 1478 1479 #define cond_resched_lock(lock) ({ \ 1480 ___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\ 1481 __cond_resched_lock(lock); \ 1482 }) 1483 1484 extern int __cond_resched_softirq(void); 1485 1486 #define cond_resched_softirq() ({ \ 1487 ___might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \ 1488 __cond_resched_softirq(); \ 1489 }) 1490 1491 static inline void cond_resched_rcu(void) 1492 { 1493 #if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU) 1494 rcu_read_unlock(); 1495 cond_resched(); 1496 rcu_read_lock(); 1497 #endif 1498 } 1499 1500 /* 1501 * Does a critical section need to be broken due to another 1502 * task waiting?: (technically does not depend on CONFIG_PREEMPT, 1503 * but a general need for low latency) 1504 */ 1505 static inline int spin_needbreak(spinlock_t *lock) 1506 { 1507 #ifdef CONFIG_PREEMPT 1508 return spin_is_contended(lock); 1509 #else 1510 return 0; 1511 #endif 1512 } 1513 1514 static __always_inline bool need_resched(void) 1515 { 1516 return unlikely(tif_need_resched()); 1517 } 1518 1519 /* 1520 * Wrappers for p->thread_info->cpu access. No-op on UP. 1521 */ 1522 #ifdef CONFIG_SMP 1523 1524 static inline unsigned int task_cpu(const struct task_struct *p) 1525 { 1526 #ifdef CONFIG_THREAD_INFO_IN_TASK 1527 return p->cpu; 1528 #else 1529 return task_thread_info(p)->cpu; 1530 #endif 1531 } 1532 1533 extern void set_task_cpu(struct task_struct *p, unsigned int cpu); 1534 1535 #else 1536 1537 static inline unsigned int task_cpu(const struct task_struct *p) 1538 { 1539 return 0; 1540 } 1541 1542 static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) 1543 { 1544 } 1545 1546 #endif /* CONFIG_SMP */ 1547 1548 /* 1549 * In order to reduce various lock holder preemption latencies provide an 1550 * interface to see if a vCPU is currently running or not. 1551 * 1552 * This allows us to terminate optimistic spin loops and block, analogous to 1553 * the native optimistic spin heuristic of testing if the lock owner task is 1554 * running or not. 1555 */ 1556 #ifndef vcpu_is_preempted 1557 # define vcpu_is_preempted(cpu) false 1558 #endif 1559 1560 extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask); 1561 extern long sched_getaffinity(pid_t pid, struct cpumask *mask); 1562 1563 #ifndef TASK_SIZE_OF 1564 #define TASK_SIZE_OF(tsk) TASK_SIZE 1565 #endif 1566 1567 #endif 1568