1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * stop-task scheduling class. 4 * 5 * The stop task is the highest priority task in the system, it preempts 6 * everything and will be preempted by nothing. 7 * 8 * See kernel/stop_machine.c 9 */ 10 #include "sched.h" 11 12 #ifdef CONFIG_SMP 13 static int 14 select_task_rq_stop(struct task_struct *p, int cpu, int sd_flag, int flags) 15 { 16 return task_cpu(p); /* stop tasks as never migrate */ 17 } 18 19 static int 20 balance_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) 21 { 22 return sched_stop_runnable(rq); 23 } 24 #endif /* CONFIG_SMP */ 25 26 static void 27 check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags) 28 { 29 /* we're never preempted */ 30 } 31 32 static void set_next_task_stop(struct rq *rq, struct task_struct *stop) 33 { 34 stop->se.exec_start = rq_clock_task(rq); 35 } 36 37 static struct task_struct * 38 pick_next_task_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) 39 { 40 WARN_ON_ONCE(prev || rf); 41 42 if (!sched_stop_runnable(rq)) 43 return NULL; 44 45 set_next_task_stop(rq, rq->stop); 46 return rq->stop; 47 } 48 49 static void 50 enqueue_task_stop(struct rq *rq, struct task_struct *p, int flags) 51 { 52 add_nr_running(rq, 1); 53 } 54 55 static void 56 dequeue_task_stop(struct rq *rq, struct task_struct *p, int flags) 57 { 58 sub_nr_running(rq, 1); 59 } 60 61 static void yield_task_stop(struct rq *rq) 62 { 63 BUG(); /* the stop task should never yield, its pointless. */ 64 } 65 66 static void put_prev_task_stop(struct rq *rq, struct task_struct *prev) 67 { 68 struct task_struct *curr = rq->curr; 69 u64 delta_exec; 70 71 delta_exec = rq_clock_task(rq) - curr->se.exec_start; 72 if (unlikely((s64)delta_exec < 0)) 73 delta_exec = 0; 74 75 schedstat_set(curr->se.statistics.exec_max, 76 max(curr->se.statistics.exec_max, delta_exec)); 77 78 curr->se.sum_exec_runtime += delta_exec; 79 account_group_exec_runtime(curr, delta_exec); 80 81 curr->se.exec_start = rq_clock_task(rq); 82 cgroup_account_cputime(curr, delta_exec); 83 } 84 85 /* 86 * scheduler tick hitting a task of our scheduling class. 87 * 88 * NOTE: This function can be called remotely by the tick offload that 89 * goes along full dynticks. Therefore no local assumption can be made 90 * and everything must be accessed through the @rq and @curr passed in 91 * parameters. 92 */ 93 static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued) 94 { 95 } 96 97 static void switched_to_stop(struct rq *rq, struct task_struct *p) 98 { 99 BUG(); /* its impossible to change to this class */ 100 } 101 102 static void 103 prio_changed_stop(struct rq *rq, struct task_struct *p, int oldprio) 104 { 105 BUG(); /* how!?, what priority? */ 106 } 107 108 static unsigned int 109 get_rr_interval_stop(struct rq *rq, struct task_struct *task) 110 { 111 return 0; 112 } 113 114 static void update_curr_stop(struct rq *rq) 115 { 116 } 117 118 /* 119 * Simple, special scheduling class for the per-CPU stop tasks: 120 */ 121 const struct sched_class stop_sched_class = { 122 .next = &dl_sched_class, 123 124 .enqueue_task = enqueue_task_stop, 125 .dequeue_task = dequeue_task_stop, 126 .yield_task = yield_task_stop, 127 128 .check_preempt_curr = check_preempt_curr_stop, 129 130 .pick_next_task = pick_next_task_stop, 131 .put_prev_task = put_prev_task_stop, 132 .set_next_task = set_next_task_stop, 133 134 #ifdef CONFIG_SMP 135 .balance = balance_stop, 136 .select_task_rq = select_task_rq_stop, 137 .set_cpus_allowed = set_cpus_allowed_common, 138 #endif 139 140 .task_tick = task_tick_stop, 141 142 .get_rr_interval = get_rr_interval_stop, 143 144 .prio_changed = prio_changed_stop, 145 .switched_to = switched_to_stop, 146 .update_curr = update_curr_stop, 147 }; 148