/linux/Documentation/scsi/scsi_transport_srp/ |
H A D | rport_state_diagram.dot | 2 node [shape = doublecircle]; running lost; 7 running_rta [ label = "running;\nreconnect\ntimer\nactive" ]; 9 running [ label = "running;\nreconnect\ntimer\nstopped" ]; 14 …running -> running_rta [ label = "fast_io_fail_tmo = off and\ndev_loss_tmo = off;\nsrp_start_tl_f… 15 …running_rta -> running [ label = "fast_io_fail_tmo = off and\ndev_loss_tmo = off;\nreconnecting s… 16 …running -> blocked [ label = "fast_io_fail_tmo >= 0 or\ndev_loss_tmo >= 0;\nsrp_start_tl_fail_tim… 17 …running -> failfast [ label = "fast_io_fail_tmo = off and\ndev_loss_tmo = off;\nreconnecting fail… 21 blocked -> running [ label = "reconnecting\nsucceeded" ]; 23 failfast -> running [ label = "reconnecting\nsucceeded" ]; 24 running -> lost [ label = "srp_stop_rport_timers()" ];
|
/linux/Documentation/networking/devlink/ |
H A D | sfc.rst | 26 - running 30 - running 34 - running 37 - running 40 - running 43 - running 46 - running 49 - running 52 - running 55 - running [all …]
|
H A D | nfp.rst | 49 - stored, running 52 - stored, running 55 - stored, running 58 - stored, running 61 - stored, running 64 - stored, running 67 - stored, running
|
H A D | i40e.rst | 27 - running 32 - running 38 - running 42 - running 52 - running 56 - running
|
H A D | bnxt.rst | 68 - stored, running 71 - stored, running 74 - stored, running 77 - running 80 - stored, running 83 - stored, running
|
/linux/Documentation/scheduler/ |
H A D | schedutil.rst | 33 resume running. 35 Using this we track 2 key metrics: 'running' and 'runnable'. 'Running' 38 two metrics are the same, but once there is contention for the CPU 'running' 49 for 50% at 2GHz, nor is running 50% on a LITTLE CPU the same as running 50% on 83 The result is that the above 'running' and 'runnable' metrics become invariant 97 though when running their expected utilization will be the same, they suffer a 98 (DVFS) ramp-up after they are running again. 101 Impulse Response (IIR) EWMA with the 'running' value on dequeue -- when it is 115 the runqueue keeps an max aggregate of these clamps for all running tasks. 127 The basis is the CPU runqueue's 'running' metric, which per the above it is [all …]
|
/linux/Documentation/ABI/stable/ |
H A D | sysfs-hypervisor-xen | 5 Description: If running under Xen: 14 Description: If running under Xen: 23 Description: If running under Xen: 32 Description: If running under Xen: 54 Description: If running under Xen: 63 Description: If running under Xen: 71 Description: If running under Xen: 80 Description: If running under Xen: 87 Description: If running under Xen: 95 Description: If running under Xen: [all …]
|
/linux/kernel/sched/ |
H A D | pelt.c | 103 unsigned long load, unsigned long runnable, int running) in accumulate_sum() argument 129 * runnable = running = 0; in accumulate_sum() 145 if (running) in accumulate_sum() 181 unsigned long load, unsigned long runnable, int running) in ___update_load_sum() argument 206 * running is a subset of runnable (weight) so running can't be set if in ___update_load_sum() 209 * This means that weight will be 0 but not running for a sched_entity in ___update_load_sum() 217 runnable = running = 0; in ___update_load_sum() 226 if (!accumulate_sum(delta, sa, load, runnable, running)) in ___update_load_sum() 346 int update_rt_rq_load_avg(u64 now, struct rq *rq, int running) in update_rt_rq_load_avg() argument 349 running, in update_rt_rq_load_avg() [all …]
|
H A D | pelt.h | 7 int update_rt_rq_load_avg(u64 now, struct rq *rq, int running); 8 int update_dl_rq_load_avg(u64 now, struct rq *rq, int running); 32 int update_irq_load_avg(struct rq *rq, u64 running); 35 update_irq_load_avg(struct rq *rq, u64 running) in update_irq_load_avg() argument 86 * computation done during the running delta time but then sync back to 108 * Running longer results in stealing idle time that will in update_rq_clock_pelt() 145 * considered as an always running rq without idle time to in update_idle_rq_clock_pelt() 194 update_rt_rq_load_avg(u64 now, struct rq *rq, int running) in update_rt_rq_load_avg() argument 200 update_dl_rq_load_avg(u64 now, struct rq *rq, int running) in update_dl_rq_load_avg() argument 217 update_irq_load_avg(struct rq *rq, u64 running) in update_irq_load_avg() argument
|
/linux/include/uapi/linux/ |
H A D | membarrier.h | 34 * @MEMBARRIER_CMD_GLOBAL: Execute a memory barrier on all running threads. 36 * is ensured that all running threads have passed 40 * (non-running threads are de facto in such a 42 * running on the system. This command returns 0. 44 * Execute a memory barrier on all running threads 48 * is ensured that all running threads have passed 52 * (non-running threads are de facto in such a 66 * Execute a memory barrier on each running 69 * caller thread is ensured that all its running 74 * (non-running threads are de facto in such a [all …]
|
/linux/drivers/gpu/drm/i915/gvt/ |
H A D | execlist.c | 66 gvt_dbg_el("[before] running slot %d/context %x pending slot %d\n", in switch_virtual_execlist_slot() 79 gvt_dbg_el("[after] running slot %d/context %x pending slot %d\n", in switch_virtual_execlist_slot() 90 struct intel_vgpu_execlist_slot *running = execlist->running_slot; in emulate_execlist_status() local 101 if (running) { in emulate_execlist_status() 102 status.current_execlist_pointer = !!running->index; in emulate_execlist_status() 103 status.execlist_write_pointer = !!!running->index; in emulate_execlist_status() 105 !!!(running->index); in emulate_execlist_status() 107 !!(running->index); in emulate_execlist_status() 185 struct intel_vgpu_execlist_slot *running = execlist->running_slot; in emulate_execlist_ctx_schedule_out() local 187 struct execlist_ctx_descriptor_format *ctx0 = &running->ctx[0]; in emulate_execlist_ctx_schedule_out() [all …]
|
/linux/include/kunit/ |
H A D | test-bug.h | 19 /* Static key if KUnit is running any tests. */ 29 * kunit_get_current_test() - Return a pointer to the currently running 32 * If a KUnit test is running in the current task, returns a pointer to its 34 * function or assertion. If no test is running (or a test is running in a 39 * test is running. 51 * kunit_fail_current_test() - If a KUnit test is running, fail it. 53 * If a KUnit test is running in the current task, mark that test as failed.
|
/linux/Documentation/virt/kvm/x86/ |
H A D | running-nested-guests.rst | 4 Running nested guests with KVM 31 - L0 – level-0; the bare metal host, running KVM 33 - L1 – level-1 guest; a VM running on L0; also called the "guest 34 hypervisor", as it itself is capable of running KVM. 36 - L2 – level-2 guest; a VM running on L1, this is the "nested guest" 43 hypervisor running on bare metal, adding another layer and 45 metal, running the LPAR hypervisor), L1 (host hypervisor), L2 63 multiple nested guests (level-2 guests), running different OSes, on 83 In case you are running a Linux kernel older than v4.19, to enable 139 .. note:: If you suspect your L2 (i.e. nested guest) is running slower, [all …]
|
/linux/Documentation/ABI/testing/ |
H A D | sysfs-driver-qat | 10 * up: the device is up and running 28 * sym;asym: the device is configured for running crypto 31 * dc: the device is configured for running compression services 34 * sym: the device is configured for running symmetric crypto 36 * asym: the device is configured for running asymmetric crypto 38 * asym;dc: the device is configured for running asymmetric 41 * sym;dc: the device is configured for running symmetric crypto 49 a device configured for running crypto services in order to 117 * dc: the ring pair is configured for running compression services 118 * sym: the ring pair is configured for running symmetric crypto [all …]
|
/linux/tools/testing/selftests/media_tests/ |
H A D | media_dev_allocator.sh | 19 echo "Running unbind of $MDEV from $MDRIVER" 28 echo "Running unbind of $ADEV from $ADRIVER" 38 echo "Running bind of $MDEV from $MDRIVER" 44 echo "Running bind of $ADEV from $ADRIVER" 56 echo "Running unbind of $MDEV from $MDRIVER" 64 echo "Running bind of $MDEV from $MDRIVER" 70 echo "Running unbind of $ADEV from $ADRIVER" 78 echo "Running bind of $ADEV from $ADRIVER"
|
/linux/Documentation/driver-api/thermal/ |
H A D | cpu-idle-cooling.rst | 83 running 95 cycle (aka the cooling device state), the running duration can be 111 running 125 running 160 (Ptarget) resulting in an amount of time running at full power on a 164 P(opp)target = ((Trunning x (P(opp)running) + (Tidle x P(opp)idle)) / 169 Tidle = Trunning x ((P(opp)running / P(opp)target) - 1) 171 At this point if we know the running period for the CPU, that gives us 173 injection duration, we can compute the running duration with:: 175 Trunning = Tidle / ((P(opp)running / P(opp)target) - 1) [all …]
|
/linux/sound/pci/ctxfi/ |
H A D | cttimer.c | 40 unsigned int running:1; member 55 unsigned int running:1; /* global timer running */ member 86 if (ti->running) in ct_systimer_callback() 102 ti->running = 1; in ct_systimer_start() 114 ti->running = 0; in ct_systimer_stop() 150 if (!atimer->running) in ct_xfitimer_irq_rearm() 152 atimer->running = 1; in ct_xfitimer_irq_rearm() 157 if (atimer->running) { in ct_xfitimer_irq_stop() 161 atimer->running = 0; in ct_xfitimer_irq_stop() 173 * checks the running instance list and determines the next timer interval. [all …]
|
/linux/net/core/ |
H A D | gen_stats.c | 151 struct gnet_stats_basic_sync *b, bool running) in gnet_stats_add_basic() argument 157 WARN_ON_ONCE((cpu || running) && in_hardirq()); in gnet_stats_add_basic() 164 if (running) in gnet_stats_add_basic() 168 } while (running && u64_stats_fetch_retry(&b->syncp, start)); in gnet_stats_add_basic() 176 struct gnet_stats_basic_sync *b, bool running) in gnet_stats_read_basic() argument 203 if (running) in gnet_stats_read_basic() 207 } while (running && u64_stats_fetch_retry(&b->syncp, start)); in gnet_stats_read_basic() 214 int type, bool running) in ___gnet_stats_copy_basic() argument 218 gnet_stats_read_basic(&bstats_bytes, &bstats_packets, cpu, b, running); in ___gnet_stats_copy_basic() 247 * @running: true if @b represents a running qdisc, thus @b's [all …]
|
H A D | gen_estimator.c | 45 bool running; member 69 gnet_stats_add_basic(b, e->cpu_bstats, e->bstats, e->running); in est_fetch_counters() 116 * @running: true if @bstats represents a running qdisc, thus @bstats' 134 bool running, in gen_new_estimator() argument 163 est->running = running; in gen_new_estimator() 223 * @running: true if @bstats represents a running qdisc, thus @bstats' 237 bool running, struct nlattr *opt) in gen_replace_estimator() argument 240 lock, running, opt); in gen_replace_estimator()
|
/linux/tools/perf/pmu-events/arch/x86/tigerlake/ |
H A D | other.json | 3 …"BriefDescription": "Core cycles where the core was running in a manner where Turbo may be clipped… 7 …"PublicDescription": "Counts Core cycles where the core was running with power-delivery for baseli… 12 …"BriefDescription": "Core cycles where the core was running in a manner where Turbo may be clipped… 16 …"PublicDescription": "Counts Core cycles where the core was running with power-delivery for licens… 21 …"BriefDescription": "Core cycles where the core was running in a manner where Turbo may be clipped… 25 …"PublicDescription": "Core cycles where the core was running with power-delivery for license level…
|
/linux/tools/verification/models/ |
H A D | wwnr.dot | 5 {node [shape = plaintext] "running"}; 9 "not_running" -> "running" [ label = "switch_in" ]; 10 "running" [label = "running"]; 11 "running" -> "not_running" [ label = "switch_out" ];
|
/linux/rust/kernel/time/ |
H A D | hrtimer.rs | 5 //! Allows running timer callbacks without doing allocations at the time of 14 //! - Running: executing the callback. 42 //! --------->| Stopped | | Started +---------->| Running | 56 //! **expires**, the timer enters the **running** state and the handler is 59 //! handler. A timer in the **started** or **running** state may be **canceled** 63 //! A `cancel` or `restart` operation on a timer in the **running** state takes 65 //! out of the **running** state. 154 /// Cancel an initialized and potentially running timer. 156 /// If the timer handler is running, this function will block until the 177 // If the handler is running, this will wait for the handler to return in raw_cancel() [all …]
|
/linux/Documentation/driver-api/dmaengine/ |
H A D | pxa_dma.rst | 10 is queued even on a running DMA channel. 15 stop and restart, but is submitted on a "running channel". The other 25 c) Channel running state 26 A driver should be able to query if a channel is running or not. For the 30 know if a channel is in running or stopped state. 71 Suppose the running chain is: 107 this specific case if the DMA is already running in aligned mode. 117 any lock to find out what is the latest completed transfer in a running 139 currently running descriptor. 150 - a driver issued tx1+tx2 => channel is running in aligned mode
|
/linux/drivers/thermal/ |
H A D | cpuidle_cooling.c | 31 * cpuidle_cooling_runtime - Running time computation 35 * The running duration is computed from the idle injection duration 37 * means the running duration is zero. If we have a 50% ratio 39 * running duration. 43 * running = idle x ((100 / ratio) - 1) 47 * running = (idle x 100) / ratio - idle 50 * with 10ms of idle injection and 10ms of running duration. 77 * Depending on the configuration or the hardware, the running in cpuidle_cooling_get_max_state() 84 * means for 10ms of idle injection, we have 10ms of running in cpuidle_cooling_get_max_state()
|
/linux/tools/testing/selftests/powerpc/math/ |
H A D | fpu_preempt.c | 36 int running; variable 38 extern int preempt_fpu(double *darray, int *threads_starting, int *running); 46 rc = preempt_fpu(darray, &threads_starting, &running); in preempt_fpu_c() 60 running = true; in test_preempt_fpu() 81 * r5 will have loaded the value of running. in test_preempt_fpu() 83 running = 0; in test_preempt_fpu()
|