/linux/Documentation/translations/zh_CN/core-api/ |
H A D | workqueue.rst | 577 pool[00] ref= 1 nice= 0 idle/workers= 4/ 4 cpu= 0 578 pool[01] ref= 1 nice=-20 idle/workers= 2/ 2 cpu= 0 579 pool[02] ref= 1 nice= 0 idle/workers= 4/ 4 cpu= 1 580 pool[03] ref= 1 nice=-20 idle/workers= 2/ 2 cpu= 1 581 pool[04] ref= 1 nice= 0 idle/workers= 4/ 4 cpu= 2 582 pool[05] ref= 1 nice=-20 idle/workers= 2/ 2 cpu= 2 583 pool[06] ref= 1 nice= 0 idle/workers= 3/ 3 cpu= 3 584 pool[07] ref= 1 nice=-20 idle/workers= 2/ 2 cpu= 3 585 pool[08] ref=42 nice= 0 idle/workers= 6/ 6 cpus=0000000f 586 pool[09] ref=28 nice= 0 idle/workers= 3/ 3 cpus=00000003 [all …]
|
/linux/Documentation/core-api/ |
H A D | workqueue.rst | 34 number of workers as the number of CPUs. The kernel grew a lot of MT 82 For threaded workqueues, special purpose threads, called [k]workers, execute 126 number of the currently runnable workers. Generally, work items are 130 workers on the CPU, the worker-pool doesn't start execution of a new 133 are pending work items. This allows using a minimal number of workers 136 Keeping idle workers around doesn't cost other than the memory space 148 Forward progress guarantee relies on that workers can be created when 150 through the use of rescue workers. All work items which might be used 188 worker-pools which host workers which are not bound to any 197 of mostly unused workers across different CPUs as the issuer [all …]
|
/linux/tools/testing/selftests/bpf/ |
H A D | test_progs.c | 452 if (verbose() && !env.workers) in test__end_subtest() 998 env->workers = atoi(arg); in parse_arg() 999 if (!env->workers) { in parse_arg() 1004 env->workers = get_nprocs(); in parse_arg() 1188 for (i = 0; i < env.workers; i++) in sigint_handler() 1551 dispatcher_threads = calloc(sizeof(pthread_t), env.workers); in server_main() 1552 data = calloc(sizeof(struct dispatch_data), env.workers); in server_main() 1554 env.worker_current_test = calloc(sizeof(int), env.workers); in server_main() 1555 for (i = 0; i < env.workers; i++) { in server_main() 1568 for (i = 0; i < env.workers; i++) { in server_main() [all …]
|
H A D | test_progs.h | 129 int workers; /* number of worker process */ member
|
/linux/fs/erofs/ |
H A D | Kconfig | 164 bool "EROFS per-cpu decompression kthread workers" 167 Saying Y here enables per-CPU kthread workers pool to carry out 173 bool "EROFS high priority per-CPU kthread workers" 177 This permits EROFS to configure per-CPU kthread workers to run
|
/linux/net/l2tp/ |
H A D | Kconfig | 23 with home workers to connect to their offices.
|
/linux/drivers/md/ |
H A D | raid5.h | 518 struct r5worker *workers; member
|
H A D | raid5.c | 205 group->workers[0].working = true; in raid5_wakeup_stripe_thread() 207 queue_work_on(sh->cpu, raid5_wq, &group->workers[0].work); in raid5_wakeup_stripe_thread() 212 if (group->workers[i].working == false) { in raid5_wakeup_stripe_thread() 213 group->workers[i].working = true; in raid5_wakeup_stripe_thread() 215 &group->workers[i].work); in raid5_wakeup_stripe_thread() 7197 kfree(old_groups[0].workers); in raid5_store_group_thread_cnt() 7233 struct r5worker *workers; in alloc_thread_groups() local 7242 workers = kcalloc(size, *group_cnt, GFP_NOIO); in alloc_thread_groups() 7245 if (!*worker_groups || !workers) { in alloc_thread_groups() 7246 kfree(workers); in alloc_thread_groups() [all …]
|
/linux/drivers/block/mtip32xx/ |
H A D | mtip32xx.c | 733 int do_irq_enable = 1, i, workers; in mtip_handle_irq() local 754 for (i = 0, workers = 0; i < MTIP_MAX_SLOT_GROUPS; in mtip_handle_irq() 759 workers++; in mtip_handle_irq() 762 atomic_set(&dd->irq_workers_active, workers); in mtip_handle_irq() 763 if (workers) { in mtip_handle_irq()
|
/linux/fs/btrfs/ |
H A D | fs.h | 601 struct btrfs_workqueue *workers; member
|
H A D | bio.c | 640 btrfs_queue_work(fs_info->workers, &async->work); in btrfs_wq_submit_bio()
|
H A D | disk-io.c | 1781 btrfs_destroy_workqueue(fs_info->workers); in btrfs_stop_all_workers() 1972 fs_info->workers = in btrfs_init_workqueues() 2011 if (!(fs_info->workers && in btrfs_init_workqueues()
|
/linux/kernel/ |
H A D | workqueue.c | 218 struct list_head workers; /* A: attached workers */ member 582 list_for_each_entry((worker), &(pool)->workers, node) \ 2686 list_add_tail(&worker->node, &pool->workers); in worker_attach_to_pool() 3597 bh_worker(list_first_entry(&pool->workers, struct worker, node)); in workqueue_softirq_action() 3624 bh_worker(list_first_entry(&pool->workers, struct worker, node)); in drain_dead_softirq_workfn() 4753 INIT_LIST_HEAD(&pool->workers); in init_worker_pool()
|
/linux/Documentation/admin-guide/ |
H A D | workload-tracing.rst | 126 starts specified number (N) of workers that exercise various netdevice 264 The netdev stressor starts N workers that exercise various netdevice ioctl
|
H A D | kernel-per-CPU-kthreads.rst | 258 c. As of v3.18, Christoph Lameter's on-demand vmstat workers
|
/linux/Documentation/dev-tools/ |
H A D | kcov.rst | 248 exits (e.g. vhost workers).
|