Lines Matching defs:nr_running

211 	int			nr_running;
946 * Note that, because unbound workers never contribute to nr_running, this
952 return !list_empty(&pool->worklist) && !pool->nr_running;
964 return !list_empty(&pool->worklist) && (pool->nr_running <= 1);
984 * worker_set_flags - set worker flags and adjust nr_running accordingly
988 * Set @flags in @worker->flags and adjust nr_running accordingly.
996 /* If transitioning into NOT_RUNNING, adjust nr_running. */
999 pool->nr_running--;
1006 * worker_clr_flags - clear worker flags and adjust nr_running accordingly
1010 * Clear @flags in @worker->flags and adjust nr_running accordingly.
1022 * If transitioning out of NOT_RUNNING, increment nr_running. Note
1028 pool->nr_running++;
1070 /* Sanity check nr_running. */
1071 WARN_ON_ONCE(pool->nr_workers == pool->nr_idle && pool->nr_running);
1428 * and the nr_running increment below, we may ruin the nr_running reset
1429 * and leave with an unexpected pool->nr_running == 1 on the newly unbound
1434 worker->pool->nr_running++;
1477 * want to decrement nr_running after the worker is unbound
1478 * and nr_running has been reset.
1485 pool->nr_running--;
1520 * @pool->nr_running until it wakes up. As wq_worker_sleeping() also
1521 * decrements ->nr_running, setting CPU_INTENSIVE here can lead to
3256 * since nr_running would always be >= 1 at this point. This is used to
6696 * The handling of nr_running in sched callbacks are disabled
6697 * now. Zap nr_running. After this, nr_running stays zero and
6703 pool->nr_running = 0;
6755 * worker_clr_flags() or adjust nr_running. Atomically