/linux/tools/testing/radix-tree/ |
H A D | regression1.c | 171 int nr_threads; in regression1_test() local 177 nr_threads = 2; in regression1_test() 178 pthread_barrier_init(&worker_barrier, NULL, nr_threads); in regression1_test() 180 threads = malloc(nr_threads * sizeof(*threads)); in regression1_test() 182 for (i = 0; i < nr_threads; i++) { in regression1_test() 190 for (i = 0; i < nr_threads; i++) { in regression1_test()
|
/linux/fs/xfs/ |
H A D | xfs_pwork.c | 66 unsigned int nr_threads = 0; in xfs_pwork_init() local 70 nr_threads = xfs_globals.pwork_threads; in xfs_pwork_init() 72 trace_xfs_pwork_init(mp, nr_threads, current->pid); in xfs_pwork_init() 75 WQ_UNBOUND | WQ_SYSFS | WQ_FREEZABLE, nr_threads, tag, in xfs_pwork_init()
|
H A D | xfs_trace.h | 4453 TP_PROTO(struct xfs_mount *mp, unsigned int nr_threads, pid_t pid), 4454 TP_ARGS(mp, nr_threads, pid), 4457 __field(unsigned int, nr_threads) 4462 __entry->nr_threads = nr_threads; 4467 __entry->nr_threads, __entry->pid)
|
/linux/tools/lib/perf/ |
H A D | threadmap.c | 45 struct perf_thread_map *perf_thread_map__new_array(int nr_threads, pid_t *array) in perf_thread_map__new_array() argument 47 struct perf_thread_map *threads = thread_map__alloc(nr_threads); in perf_thread_map__new_array() 53 for (i = 0; i < nr_threads; i++) in perf_thread_map__new_array() 56 threads->nr = nr_threads; in perf_thread_map__new_array()
|
H A D | evlist.c | 342 int nr_threads = perf_thread_map__nr(evlist->threads); in perf_evlist__alloc_pollfd() local 350 nfds += nr_cpus * nr_threads; in perf_evlist__alloc_pollfd() 566 int nr_threads = perf_thread_map__nr(evlist->threads); in mmap_per_thread() local 572 __func__, nr_cpus, nr_threads); in mmap_per_thread() 575 for (thread = 0; thread < nr_threads; thread++, idx++) { in mmap_per_thread() 608 int nr_threads = perf_thread_map__nr(evlist->threads); in mmap_per_cpu() local 613 pr_debug("%s: nr cpu values %d nr threads %d\n", __func__, nr_cpus, nr_threads); in mmap_per_cpu() 619 for (thread = 0; thread < nr_threads; thread++) { in mmap_per_cpu()
|
/linux/tools/testing/selftests/cgroup/ |
H A D | test_kmem.c | 102 int nr_threads = 2 * get_nprocs(); in alloc_kmem_smp() 107 tinfo = calloc(nr_threads, sizeof(pthread_t)); in alloc_kmem_smp() 111 for (i = 0; i < nr_threads; i++) { in alloc_kmem_smp() 119 for (i = 0; i < nr_threads; i++) { in alloc_kmem_smp() 247 int nr_threads = 1000; in spawn_1000_threads() 253 tinfo = calloc(nr_threads, sizeof(pthread_t)); in spawn_1000_threads() 257 for (i = 0; i < nr_threads; i++) { in spawn_1000_threads() 100 int nr_threads = 2 * get_nprocs(); alloc_kmem_smp() local 245 int nr_threads = 1000; spawn_1000_threads() local
|
/linux/kernel/power/ |
H A D | swap.c | 704 unsigned thr, run_threads, nr_threads; in save_compressed_image() local 717 nr_threads = num_online_cpus() - 1; in save_compressed_image() 718 nr_threads = clamp_val(nr_threads, 1, CMP_THREADS); in save_compressed_image() 727 data = vzalloc(array_size(nr_threads, sizeof(*data))); in save_compressed_image() 744 for (thr = 0; thr < nr_threads; thr++) { in save_compressed_image() 774 for (thr = 0; thr < nr_threads; thr++) { in save_compressed_image() 793 pr_info("Using %u thread(s) for %s compression\n", nr_threads, hib_comp_algo); in save_compressed_image() 802 for (thr = 0; thr < nr_threads; thr++) { in save_compressed_image() 899 for (thr = 0; thr < nr_threads; thr++) { in save_compressed_image() 1210 unsigned i, thr, run_threads, nr_threads; in load_compressed_image() local [all …]
|
/linux/lib/ |
H A D | test_vmalloc.c | 26 __param(int, nr_threads, 0, 511 nr_threads = clamp(nr_threads, 1, (int) USHRT_MAX); in init_test_configuration() 514 tdriver = kvcalloc(nr_threads, sizeof(*tdriver), GFP_KERNEL); in init_test_configuration() 543 for (i = 0; i < nr_threads; i++) { in do_concurrent_test() 570 for (i = 0; i < nr_threads; i++) { in do_concurrent_test()
|
/linux/tools/testing/selftests/bpf/benchs/ |
H A D | run_bench_bpf_hashmap_full_update.sh | 8 nr_threads=`expr $(cat /proc/cpuinfo | grep "processor"| wc -l) - 1` 9 summary=$($RUN_BENCH -p $nr_threads bpf-hashmap-full-update)
|
/linux/fs/bcachefs/ |
H A D | tests.c | 762 unsigned nr_threads; member 788 ret = j->fn(j->c, div64_u64(j->nr, j->nr_threads)); in btree_perf_test_thread() 803 u64 nr, unsigned nr_threads) in bch2_btree_perf_test() argument 805 struct test_job j = { .c = c, .nr = nr, .nr_threads = nr_threads }; in bch2_btree_perf_test() 812 if (nr == 0 || nr_threads == 0) { in bch2_btree_perf_test() 817 atomic_set(&j.ready, nr_threads); in bch2_btree_perf_test() 820 atomic_set(&j.done, nr_threads); in bch2_btree_perf_test() 862 if (nr_threads == 1) in bch2_btree_perf_test() 865 for (i = 0; i < nr_threads; i++) in bch2_btree_perf_test() 878 name_buf, nr_buf.buf, nr_threads, in bch2_btree_perf_test() [all …]
|
/linux/tools/testing/selftests/net/tcp_ao/lib/ |
H A D | setup.c | 175 static unsigned int nr_threads = 1; variable 189 if (stage_threads[q] == nr_threads) { in synchronize_threads() 194 while (stage_threads[q] < nr_threads) in synchronize_threads() 269 nr_threads++; in __test_init()
|
/linux/arch/s390/appldata/ |
H A D | appldata_os.c | 65 u32 nr_threads; /* number of threads */ member 101 os_data->nr_threads = nr_threads; in appldata_get_os_data()
|
H A D | appldata_base.c | 424 EXPORT_SYMBOL_GPL(nr_threads);
|
/linux/tools/testing/selftests/kvm/aarch64/ |
H A D | vgic_lpi_stress.c |
|
/linux/tools/testing/selftests/bpf/prog_tests/ |
H A D | task_local_storage.c | 176 const int nr_threads = 32; in test_nodeadlock() local 177 pthread_t tids[nr_threads]; in test_nodeadlock() 208 for (i = 0; i < nr_threads; i++) { in test_nodeadlock() 222 waitall(tids, nr_threads); in test_nodeadlock()
|
/linux/include/linux/sched/ |
H A D | stat.h | 17 extern int nr_threads;
|
H A D | signal.h | 82 atomic_t nr_threads; member 97 int nr_threads; member 696 return task->signal->nr_threads; in get_nr_threads()
|
/linux/fs/proc/ |
H A D | loadavg.c | 24 nr_running(), nr_threads, in loadavg_proc_show()
|
/linux/tools/lib/perf/include/perf/ |
H A D | threadmap.h | 11 LIBPERF_API struct perf_thread_map *perf_thread_map__new_array(int nr_threads, pid_t *array);
|
/linux/tools/tracing/latency/ |
H A D | latency-collector.c | 77 static unsigned int nr_threads = DEFAULT_NR_PRINTER_THREADS; variable 1642 if (nr_threads > MAX_THREADS) { in start_printthread() 1645 nr_threads, MAX_THREADS); in start_printthread() 1646 nr_threads = MAX_THREADS; in start_printthread() 1648 for (i = 0; i < nr_threads; i++) { in start_printthread() 1931 nr_threads = value; in scan_arguments() 2040 policy_name(sched_policy), sched_pri, nr_threads); in show_params()
|
/linux/init/ |
H A D | init_task.c | 21 .nr_threads = 1,
|
/linux/kernel/ |
H A D | fork.c | 136 int nr_threads; /* The idle threads do not count.. */ variable 1875 sig->nr_threads = 1; in copy_signal() 2285 if (data_race(nr_threads >= max_threads)) in copy_process() 2607 current->signal->nr_threads++; in copy_process() 2616 nr_threads++; in copy_process()
|
/linux/tools/lib/perf/Documentation/ |
H A D | libperf.txt | 64 struct perf_thread_map *perf_thread_map__new_array(int nr_threads, pid_t *array);
|
/linux/tools/perf/util/ |
H A D | evsel.c | 1945 static void evsel__remove_fd(struct evsel *pos, int nr_cpus, int nr_threads, int thread_idx) in evsel__remove_fd() argument 1948 for (int thread = thread_idx; thread < nr_threads - 1; thread++) in evsel__remove_fd() 1954 int nr_threads, int thread_idx) in update_fds() argument 1958 if (cpu_map_idx >= nr_cpus || thread_idx >= nr_threads) in update_fds() 1964 evsel__remove_fd(pos, nr_cpus, nr_threads, thread_idx); in update_fds()
|
/linux/fs/ |
H A D | coredump.c | 400 atomic_set(&core_state->nr_threads, nr); in zap_threads()
|