| /linux/Documentation/translations/zh_CN/core-api/ |
| H A D | workqueue.rst | 577 pool[00] ref= 1 nice= 0 idle/workers= 4/ 4 cpu= 0 578 pool[01] ref= 1 nice=-20 idle/workers= 2/ 2 cpu= 0 579 pool[02] ref= 1 nice= 0 idle/workers= 4/ 4 cpu= 1 580 pool[03] ref= 1 nice=-20 idle/workers= 2/ 2 cpu= 1 581 pool[04] ref= 1 nice= 0 idle/workers= 4/ 4 cpu= 2 582 pool[05] ref= 1 nice=-20 idle/workers= 2/ 2 cpu= 2 583 pool[06] ref= 1 nice= 0 idle/workers= 3/ 3 cpu= 3 584 pool[07] ref= 1 nice=-20 idle/workers= 2/ 2 cpu= 3 585 pool[08] ref=42 nice= 0 idle/workers= 6/ 6 cpus=0000000f 586 pool[09] ref=28 nice= 0 idle/workers= 3/ 3 cpus=00000003 [all …]
|
| /linux/tools/testing/selftests/powerpc/math/ |
| H A D | fpu_preempt.c | 24 /* Time to wait for workers to get preempted (seconds) */ 69 printf("\tWaiting for all workers to start..."); in test_preempt_fpu() 74 printf("\tWaiting for %d seconds to let some workers get preempted...", PREEMPT_TIME); in test_preempt_fpu() 78 printf("\tStopping workers..."); in test_preempt_fpu()
|
| H A D | vmx_preempt.c | 24 /* Time to wait for workers to get preempted (seconds) */ 78 printf("\tWaiting for all workers to start..."); in test_preempt_vmx() 83 printf("\tWaiting for %d seconds to let some workers get preempted...", PREEMPT_TIME); in test_preempt_vmx() 87 printf("\tStopping workers..."); in test_preempt_vmx()
|
| H A D | vsx_preempt.c | 23 /* Time to wait for workers to get preempted (seconds) */ 110 printf("\tWaiting for %d workers to start...", threads_starting); in test_preempt_vsx() 115 printf("\tWaiting for %d seconds to let some workers get preempted...", PREEMPT_TIME); in test_preempt_vsx() 119 printf("\tStopping workers..."); in test_preempt_vsx()
|
| H A D | fpu_signal.c | 89 printf("\tWaiting for all workers to start..."); in test_signal_fpu() 103 printf("\tStopping workers..."); in test_signal_fpu()
|
| H A D | vmx_signal.c | 114 printf("\tWaiting for %d workers to start... %d", threads, threads_starting); in test_signal_vmx() 131 printf("\tKilling workers..."); in test_signal_vmx()
|
| /linux/tools/testing/selftests/bpf/ |
| H A D | test_progs.c | 536 if (verbose() && !env.workers) in test__end_subtest() 888 { "workers", ARG_NUM_WORKERS, "WORKERS", OPTION_ARG_OPTIONAL, 889 "Number of workers to run in parallel, default to number of cpus." }, 1088 env->workers = atoi(arg); in parse_arg() 1089 if (!env->workers) { in parse_arg() 1094 env->workers = get_nprocs(); in parse_arg() 1305 for (i = 0; i < env.workers; i++) in sigint_handler() 1683 dispatcher_threads = calloc(sizeof(pthread_t), env.workers); in server_main() 1684 data = calloc(sizeof(struct dispatch_data), env.workers); in server_main() [all...] |
| /linux/fs/erofs/ |
| H A D | Kconfig | 180 bool "EROFS per-cpu decompression kthread workers" 183 Saying Y here enables per-CPU kthread workers pool to carry out 189 bool "EROFS high priority per-CPU kthread workers" 193 This permits EROFS to configure per-CPU kthread workers to run
|
| /linux/tools/workqueue/ |
| H A D | wq_dump.py | 28 idle number of idle workers 29 workers number of all workers 31 cpus CPUs the workers in the pool can run on (unbound pool) 160 print(f'idle/workers={pool.nr_idle.value_():3}/{pool.nr_workers.value_():3} ', end='')
|
| H A D | wq_monitor.py | 64 PWQ_STAT_REPATRIATED = prog['PWQ_STAT_REPATRIATED'] # unbound workers brought back into scope
|
| /linux/tools/testing/selftests/kvm/x86/ |
| H A D | hyperv_tlb_flush.c | 57 * Pass the following info to 'workers' and 'sender' 156 * Prepare to test: 'disable' workers by setting the expectation to '0', 164 /* 'Disable' workers */ in prepare_to_test() 168 /* Make sure workers are 'disabled' before we swap PTEs. */ in prepare_to_test() 171 /* Make sure workers have enough time to notice */ in prepare_to_test() 187 /* Set the expectation for workers, '0' means don't test */ in post_test() 191 /* Make sure workers have enough time to test */ in post_test() 630 * for 'workers' and issues TLB flush hypercalls. in main()
|
| /linux/tools/testing/selftests/mm/ |
| H A D | test_vmalloc.sh | 63 echo "available test cases are run by NUM_CPUS workers simultaneously." 96 echo "# Runs 1 test(id_1), repeats it 5 times by NUM_CPUS workers" 104 echo -n "# Runs all tests by NUM_CPUS workers, shuffled order, repeats "
|
| /linux/kernel/ |
| H A D | workqueue.c | 66 * While associated (!DISASSOCIATED), all workers are bound to the 70 * While DISASSOCIATED, the cpu may be offline and all workers have 83 POOL_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */ 121 * Rescue workers are used only on emergencies and shared by 204 int nr_workers; /* L: total number of workers */ 205 int nr_idle; /* L: currently idle workers */ 207 struct list_head idle_list; /* L: list of idle workers */ 211 struct timer_list mayday_timer; /* L: SOS timer for workers */ 213 /* a workers is either on busy_hash or idle_list, or the manager */ 215 /* L: hash of busy workers */ [all …]
|
| H A D | workqueue_internal.h | 18 * The poor guys doing the actual heavy lifting. All on-duty workers are 47 struct list_head node; /* A: anchored at pool->workers */
|
| H A D | padata.c | 396 /* Restrict parallel_wq workers to pd->cpumask.pcpu. */ in padata_setup_cpumasks() 509 /* Initialize all percpu queues used by serial workers */ 894 * serial_cpumask [RW] - cpumask for serial workers 895 * parallel_cpumask [RW] - cpumask for parallel workers
|
| /linux/lib/ |
| H A D | test_vmalloc.c | 27 "Number of workers to perform tests(min: 1 max: USHRT_MAX)"); 535 * A maximum number of workers is defined as hard-coded in init_test_configuration() 567 * Put on hold all workers. in do_concurrent_test() 584 * Now let the workers do their job. in do_concurrent_test() 589 * Sleep quiet until all workers are done with 1 second in do_concurrent_test()
|
| H A D | test_objpool.c | 396 /* tell workers threads to quit */ in ot_start_sync() 399 /* wait all workers threads finish and quit */ in ot_start_sync() 580 /* tell workers threads to quit */ in ot_start_async() 586 /* wait all workers threads finish and quit */ in ot_start_async()
|
| /linux/include/uapi/linux/ |
| H A D | vhost.h | 63 * virtqueue. If userspace is not able to call this for workers its created, 64 * the kernel will free all the device's workers when the device is closed. 261 * - Vhost will create vhost workers as kernel threads.
|
| /linux/tools/perf/trace/beauty/include/uapi/linux/ |
| H A D | vhost.h | 63 * virtqueue. If userspace is not able to call this for workers its created, 64 * the kernel will free all the device's workers when the device is closed. 261 * - Vhost will create vhost workers as kernel threads.
|
| /linux/io_uring/ |
| H A D | io-wq.c | 89 * The list of free workers. Protected by #workers_lock 95 * The list of all workers. Protected by #workers_lock 319 * below the max number of workers, create one. 325 * wasn't setup with any unbounded workers. in io_wq_create_worker() 328 pr_warn_once("io-wq is not configured for unbound workers"); in io_wq_create_worker() 762 * Called when worker is going to sleep. If there are no workers currently 1431 * Set max number of unbounded workers, returns old value. If new_count is 0,
|
| /linux/tools/include/uapi/linux/ |
| H A D | vhost.h | |
| /linux/drivers/vhost/ |
| H A D | Kconfig | 107 to configure the default mode for vhost workers.
|
| /linux/net/l2tp/ |
| H A D | Kconfig | 23 with home workers to connect to their offices.
|
| /linux/tools/docs/ |
| H A D | get_abi.py | 154 parser.add_argument("-j", "--jobs", "--max-workers", type=int, default=1,
|
| /linux/drivers/net/ethernet/intel/ice/ |
| H A D | ice_gnss.c | 165 * Initialize GNSS structures and workers.
|