| /linux/tools/verification/models/sched/ |
| H A D | sssw.dot | 5 {node [shape = doublecircle] "runnable"}; 6 {node [shape = circle] "runnable"}; 10 "__init_runnable" -> "runnable"; 11 "runnable" [label = "runnable", color = green3]; 12 …"runnable" -> "runnable" [ label = "sched_set_state_runnable\nsched_wakeup\nsched_switch_in\nsched… 13 "runnable" -> "sleepable" [ label = "sched_set_state_sleepable" ]; 14 "runnable" -> "sleeping" [ label = "sched_switch_blocking" ]; 16 "signal_wakeup" -> "runnable" [ label = "signal_deliver" ]; 20 "sleepable" -> "runnable" [ label = "sched_set_state_runnable\nsched_wakeup" ]; 25 "sleeping" -> "runnable" [ label = "sched_wakeup" ]; [all …]
|
| /linux/kernel/sched/ |
| H A D | pelt.c | 104 unsigned long load, unsigned long runnable, int running) in accumulate_sum() argument 144 if (runnable) in accumulate_sum() 145 sa->runnable_sum += runnable * contrib << SCHED_CAPACITY_SHIFT; in accumulate_sum() 182 unsigned long load, unsigned long runnable, int running) in ___update_load_sum() argument 218 runnable = running = 0; in ___update_load_sum() 227 if (!accumulate_sum(delta, sa, load, runnable, running)) in ___update_load_sum()
|
| H A D | ext_internal.h | 389 void (*runnable)(struct task_struct *p, u64 enq_flags); member 426 void (*stopping)(struct task_struct *p, bool runnable);
|
| H A D | fair.c | 2019 unsigned long runnable; member 2055 ((ns->compute_capacity * imbalance_pct) < (ns->runnable * 100)))) in numa_classify() 2060 ((ns->compute_capacity * imbalance_pct) > (ns->runnable * 100)))) in numa_classify() 2111 ns->runnable += cpu_runnable(rq); in update_numa_stats() 7203 unsigned int runnable; in cpu_runnable_without() local 7210 runnable = READ_ONCE(cfs_rq->avg.runnable_avg); in cpu_runnable_without() 7213 lsub_positive(&runnable, p->se.avg.runnable_avg); in cpu_runnable_without() 7215 return runnable; in cpu_runnable_without() 7946 unsigned long runnable; in cpu_util() local 7949 runnable = READ_ONCE(cfs_rq->avg.runnable_avg); in cpu_util() [all …]
|
| H A D | ext.c | 1497 if (SCX_HAS_OP(sch, runnable) && !task_on_rq_migrating(p)) in enqueue_task_scx() 1498 SCX_CALL_OP_TASK(sch, SCX_KF_REST, runnable, rq, p, enq_flags); in enqueue_task_scx() 5307 static void sched_ext_ops__stopping(struct task_struct *p, bool runnable) {} in sched_ext_ops__stopping() argument 5344 .runnable = sched_ext_ops__runnable,
|
| /linux/tools/sched_ext/ |
| H A D | scx_flatcg.bpf.c | 416 static void update_active_weight_sums(struct cgroup *cgrp, bool runnable) in update_active_weight_sums() argument 433 if (runnable) { in update_active_weight_sums() 450 if (!runnable) in update_active_weight_sums() 476 if (runnable) { in update_active_weight_sums() 503 if (runnable) in update_active_weight_sums() 539 void BPF_STRUCT_OPS(fcg_stopping, struct task_struct *p, bool runnable) in BPF_STRUCT_OPS() argument 942 .runnable = (void *)fcg_runnable,
|
| H A D | scx_central.bpf.c | 246 void BPF_STRUCT_OPS(central_stopping, struct task_struct *p, bool runnable) in BPF_STRUCT_OPS() argument
|
| /linux/Documentation/scheduler/ |
| H A D | schedutil.rst | 35 Using this we track 2 key metrics: 'running' and 'runnable'. 'Running' 36 reflects the time an entity spends on the CPU, while 'runnable' reflects the 40 while 'runnable' will increase to reflect the amount of contention. 83 The result is that the above 'running' and 'runnable' metrics become invariant 104 A further runqueue wide sum (of runnable tasks) is maintained of:
|
| H A D | sched-eevdf.rst | 14 runnable tasks with the same priority. To do so, it assigns a virtual run
|
| H A D | sched-capacity.rst | 243 accurately be predicted the moment a task first becomes runnable. The CFS class
|
| /linux/tools/testing/selftests/sched_ext/ |
| H A D | maximal.bpf.c | 42 void BPF_STRUCT_OPS(maximal_stopping, struct task_struct *p, bool runnable) in BPF_STRUCT_OPS() argument 144 .runnable = (void *) maximal_runnable,
|
| H A D | select_cpu_vtime.bpf.c | 67 bool runnable) in BPF_STRUCT_OPS() argument
|
| /linux/Documentation/timers/ |
| H A D | no_hz.rst | 24 have only one runnable task (CONFIG_NO_HZ_FULL=y). Unless you 44 will frequently be multiple runnable tasks per CPU. In these cases, 107 If a CPU has only one runnable task, there is little point in sending it 109 Note that omitting scheduling-clock ticks for CPUs with only one runnable 113 sending scheduling-clock interrupts to CPUs with a single runnable task, 257 runnable task for a given CPU, even though there are a number 260 runnable high-priority SCHED_FIFO task and an arbitrary number 267 single runnable SCHED_FIFO task and multiple runnable SCHED_OTHER 270 And even when there are multiple runnable tasks on a given CPU,
|
| /linux/Documentation/trace/rv/ |
| H A D | monitor_sched.rst | 67 The set non runnable on its own context (snroc) monitor ensures changes in a 270 is woken up or set to ``runnable``. 287 back to runnable, the resulting switch (if there) looks like a yield to the 292 This monitor doesn't include a running state, ``sleepable`` and ``runnable`` 312 | | _blocking H runnable H | |
|
| /linux/drivers/gpu/drm/panthor/ |
| H A D | panthor_sched.c | 239 struct list_head runnable[PANTHOR_CSG_PRIORITY_COUNT]; member 2258 &sched->groups.runnable[group->priority]); in tick_ctx_cleanup() 2278 &sched->groups.runnable[group->priority]); in tick_ctx_cleanup() 2432 list_move_tail(&group->run_node, &sched->groups.runnable[prio]); in tick_ctx_apply() 2462 if (!list_empty(&sched->groups.runnable[ctx->min_priority])) { in tick_ctx_update_resched_target() 2517 &sched->groups.runnable[prio], in tick_work() 2527 tick_ctx_pick_groups_from_list(sched, &ctx, &sched->groups.runnable[prio], in tick_work() 2639 &sched->groups.runnable[group->priority]); in sync_upd_work() 2658 struct list_head *queue = &sched->groups.runnable[group->priority]; in group_schedule_locked() 2760 &sched->groups.runnable[group->priority]); in panthor_group_start() [all …]
|
| /linux/Documentation/tools/rv/ |
| H A D | rv-mon-sched.rst | 52 * snroc: set non runnable on its own context
|
| /linux/Documentation/virt/ |
| H A D | guest-halt-polling.rst | 18 even with other runnable tasks in the host.
|
| /linux/Documentation/virt/kvm/ |
| H A D | halt-polling.rst | 18 interval or some other task on the runqueue is runnable the scheduler is 150 - Halt polling will only be conducted by the host when no other tasks are runnable on
|
| H A D | api.rst | 1593 whether the vcpu is runnable. 1618 whether the vcpu is runnable. 5762 KVM automatically accounts running and runnable time but blocked 6930 marking the exiting vCPU as runnable, or deny it and call KVM_RUN again.
|
| /linux/Documentation/accounting/ |
| H A D | delay-accounting.rst | 7 runnable task may wait for a free CPU to run on. 12 a) waiting for a CPU (while being runnable)
|
| /linux/Documentation/arch/s390/ |
| H A D | vfio-ccw.rst | 331 space, and assemble a runnable kernel channel program by updating the 382 channel program, which becomes runnable for a real device.
|
| /linux/Documentation/admin-guide/hw-vuln/ |
| H A D | core-scheduling.rst | 108 highest priority task with the same cookie is selected if there is one runnable
|
| /linux/Documentation/filesystems/ |
| H A D | proc.rst | 746 number of processes currently runnable (running or on ready queue); 750 processes currently runnable" and "total number of processes 1612 running or ready to run (i.e., the total number of runnable threads). 2260 out. With CPU isolation and a single runnable task this can take
|
| /linux/Documentation/locking/ |
| H A D | rt-mutex-design.rst | 69 of A. So now if B becomes runnable, it would not preempt C, since C now has
|