| /linux/Documentation/translations/zh_CN/core-api/ |
| H A D | workqueue.rst | 577 pool[00] ref= 1 nice= 0 idle/workers= 4/ 4 cpu= 0 578 pool[01] ref= 1 nice=-20 idle/workers= 2/ 2 cpu= 0 579 pool[02] ref= 1 nice= 0 idle/workers= 4/ 4 cpu= 1 580 pool[03] ref= 1 nice=-20 idle/workers= 2/ 2 cpu= 1 581 pool[04] ref= 1 nice= 0 idle/workers= 4/ 4 cpu= 2 582 pool[05] ref= 1 nice=-20 idle/workers= 2/ 2 cpu= 2 583 pool[06] ref= 1 nice= 0 idle/workers= 3/ 3 cpu= 3 584 pool[07] ref= 1 nice=-20 idle/workers= 2/ 2 cpu= 3 585 pool[08] ref=42 nice= 0 idle/workers= 6/ 6 cpus=0000000f 586 pool[09] ref=28 nice= 0 idle/workers= 3/ 3 cpus=00000003 [all …]
|
| /linux/tools/testing/selftests/powerpc/math/ |
| H A D | fpu_preempt.c | 24 /* Time to wait for workers to get preempted (seconds) */ 69 printf("\tWaiting for all workers to start..."); in test_preempt_fpu() 74 printf("\tWaiting for %d seconds to let some workers get preempted...", PREEMPT_TIME); in test_preempt_fpu() 78 printf("\tStopping workers..."); in test_preempt_fpu()
|
| H A D | vmx_preempt.c | 24 /* Time to wait for workers to get preempted (seconds) */ 78 printf("\tWaiting for all workers to start..."); in test_preempt_vmx() 83 printf("\tWaiting for %d seconds to let some workers get preempted...", PREEMPT_TIME); in test_preempt_vmx() 87 printf("\tStopping workers..."); in test_preempt_vmx()
|
| H A D | vsx_preempt.c | 23 /* Time to wait for workers to get preempted (seconds) */ 110 printf("\tWaiting for %d workers to start...", threads_starting); in test_preempt_vsx() 115 printf("\tWaiting for %d seconds to let some workers get preempted...", PREEMPT_TIME); in test_preempt_vsx() 119 printf("\tStopping workers..."); in test_preempt_vsx()
|
| H A D | fpu_signal.c | 89 printf("\tWaiting for all workers to start..."); in test_signal_fpu() 103 printf("\tStopping workers..."); in test_signal_fpu()
|
| H A D | vmx_signal.c | 114 printf("\tWaiting for %d workers to start... %d", threads, threads_starting); in test_signal_vmx() 131 printf("\tKilling workers..."); in test_signal_vmx()
|
| /linux/tools/testing/selftests/bpf/ |
| H A D | test_progs.c | 536 if (verbose() && !env.workers) in test__end_subtest() 888 { "workers", ARG_NUM_WORKERS, "WORKERS", OPTION_ARG_OPTIONAL, 889 "Number of workers to run in parallel, default to number of cpus." }, 1088 env->workers = atoi(arg); in parse_arg() 1089 if (!env->workers) { in parse_arg() 1094 env->workers = get_nprocs(); in parse_arg() 1305 for (i = 0; i < env.workers; i++) in sigint_handler() 1683 dispatcher_threads = calloc(sizeof(pthread_t), env.workers); in server_main() 1684 data = calloc(sizeof(struct dispatch_data), env.workers); in server_main() [all...] |
| /linux/fs/erofs/ |
| H A D | Kconfig | 180 bool "EROFS per-cpu decompression kthread workers" 183 Saying Y here enables per-CPU kthread workers pool to carry out 189 bool "EROFS high priority per-CPU kthread workers" 193 This permits EROFS to configure per-CPU kthread workers to run
|
| /linux/tools/workqueue/ |
| H A D | wq_dump.py | 28 idle number of idle workers 29 workers number of all workers 31 cpus CPUs the workers in the pool can run on (unbound pool) 160 print(f'idle/workers={pool.nr_idle.value_():3}/{pool.nr_workers.value_():3} ', end='')
|
| H A D | wq_monitor.py | 64 PWQ_STAT_REPATRIATED = prog['PWQ_STAT_REPATRIATED'] # unbound workers brought back into scope
|
| /linux/tools/testing/selftests/mm/ |
| H A D | test_vmalloc.sh | 63 echo "available test cases are run by NUM_CPUS workers simultaneously." 96 echo "# Runs 1 test(id_1), repeats it 5 times by NUM_CPUS workers" 104 echo -n "# Runs all tests by NUM_CPUS workers, shuffled order, repeats "
|
| /linux/kernel/ |
| H A D | workqueue.c | 66 * While associated (!DISASSOCIATED), all workers are bound to the 70 * While DISASSOCIATED, the cpu may be offline and all workers have 83 POOL_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */ 121 * Rescue workers are used only on emergencies and shared by 204 int nr_workers; /* L: total number of workers */ 205 int nr_idle; /* L: currently idle workers */ 207 struct list_head idle_list; /* L: list of idle workers */ 211 struct timer_list mayday_timer; /* L: SOS timer for workers */ 213 /* a workers is either on busy_hash or idle_list, or the manager */ 215 /* L: hash of busy workers */ [all …]
|
| H A D | workqueue_internal.h | 18 * The poor guys doing the actual heavy lifting. All on-duty workers are 47 struct list_head node; /* A: anchored at pool->workers */
|
| H A D | padata.c | 396 /* Restrict parallel_wq workers to pd->cpumask.pcpu. */ in padata_serial_worker() 515 /* Initialize all percpu queues used by serial workers */ in padata_do_multithreaded() 898 * serial_cpumask [RW] - cpumask for serial workers 899 * parallel_cpumask [RW] - cpumask for parallel workers
|
| /linux/lib/ |
| H A D | test_vmalloc.c | 27 "Number of workers to perform tests(min: 1 max: USHRT_MAX)"); 509 * A maximum number of workers is defined as hard-coded in init_test_configuration() 541 * Put on hold all workers. in do_concurrent_test() 558 * Now let the workers do their job. in do_concurrent_test() 563 * Sleep quiet until all workers are done with 1 second in do_concurrent_test()
|
| H A D | test_objpool.c | 396 /* tell workers threads to quit */ in ot_start_sync() 399 /* wait all workers threads finish and quit */ in ot_start_sync() 580 /* tell workers threads to quit */ in ot_start_async() 586 /* wait all workers threads finish and quit */ in ot_start_async()
|
| /linux/include/uapi/linux/ |
| H A D | vhost.h | 63 * virtqueue. If userspace is not able to call this for workers its created, 64 * the kernel will free all the device's workers when the device is closed. 261 * - Vhost will create vhost workers as kernel threads.
|
| /linux/tools/perf/trace/beauty/include/uapi/linux/ |
| H A D | vhost.h | 63 * virtqueue. If userspace is not able to call this for workers its created, 64 * the kernel will free all the device's workers when the device is closed. 261 * - Vhost will create vhost workers as kernel threads.
|
| /linux/io_uring/ |
| H A D | io-wq.c | 89 * The list of free workers. Protected by #workers_lock 95 * The list of all workers. Protected by #workers_lock 319 * below the max number of workers, create one. 325 * wasn't setup with any unbounded workers. in io_wq_create_worker() 328 pr_warn_once("io-wq is not configured for unbound workers"); in io_wq_create_worker() 762 * Called when worker is going to sleep. If there are no workers currently 1430 * Set max number of unbounded workers, returns old value. If new_count is 0,
|
| /linux/tools/include/uapi/linux/ |
| H A D | vhost.h | |
| /linux/drivers/vhost/ |
| H A D | Kconfig | 107 to configure the default mode for vhost workers.
|
| /linux/net/l2tp/ |
| H A D | Kconfig | 23 with home workers to connect to their offices.
|
| /linux/fs/xfs/ |
| H A D | xfs_icache.c | 374 * the actual reclaim workers from stomping over us while we recycle in xfs_iget_recycle() 545 * inodegc workers would result in deadlock. For a regular iget, the in xfs_iget_cache_hit() 618 * Do not wait for the workers, because the caller could hold an AGI in xfs_iget_cache_hit() 1594 * and inodegc workers immediately and waiting for them all to clear. 2025 * workers and wait for them to stop. Caller must hold sb->s_umount to 2057 * Enable the inode inactivation background workers and schedule deferred inode 2168 * workers.
|
| /linux/fs/btrfs/ |
| H A D | disk-io.c | 1766 /* helper to cleanup workers */ 1771 btrfs_destroy_workqueue(fs_info->workers); in btrfs_stop_all_workers() 1964 fs_info->workers = in btrfs_init_workqueues() 2003 if (!(fs_info->workers && in btrfs_init_workqueues() 4259 * Wait for any fixup workers to complete. in close_ctree() 4269 * Similar case here, we have to wait for delalloc workers before we in close_ctree() 4280 * the fs_info->workers queue because for async writes for data bios we in close_ctree() 4289 btrfs_flush_workqueue(fs_info->workers); in close_ctree() 4299 * workers of compressed writes. in close_ctree() 4360 * There might be existing delayed inode workers still running in close_ctree() [all …]
|
| /linux/drivers/net/ethernet/intel/ice/ |
| H A D | ice_gnss.c | 165 * Initialize GNSS structures and workers.
|