Home
last modified time | relevance | path

Searched refs:nthreads (Results 1 – 25 of 45) sorted by relevance

12

/linux/tools/testing/selftests/mm/
H A Dmigration.c30 int nthreads; in FIXTURE() local
40 self->nthreads = numa_num_task_cpus() - 1; in FIXTURE_SETUP()
54 self->threads = malloc(self->nthreads * sizeof(*self->threads)); in FIXTURE_SETUP()
56 self->pids = malloc(self->nthreads * sizeof(*self->pids)); in FIXTURE_SETUP()
129 if (self->nthreads < 2 || self->n1 < 0 || self->n2 < 0)
137 for (i = 0; i < self->nthreads - 1; i++)
142 for (i = 0; i < self->nthreads - 1; i++)
155 if (self->nthreads < 2 || self->n1 < 0 || self->n2 < 0)
163 for (i = 0; i < self->nthreads - 1; i++) {
177 for (i = 0; i < self->nthreads - 1; i++)
[all …]
H A Dgup_test.c94 int filed, i, opt, nr_pages = 1, thp = -1, write = 1, nthreads = 1, ret; in main() local
131 nthreads = atoi(optarg); in main()
202 ksft_set_plan(nthreads); in main()
246 tid = malloc(sizeof(pthread_t) * nthreads); in main()
248 for (i = 0; i < nthreads; i++) { in main()
252 for (i = 0; i < nthreads; i++) { in main()
/linux/tools/perf/bench/
H A Dfutex-requeue.c55 OPT_UINTEGER('t', "threads", &params.nthreads, "Specify amount of threads"),
79 params.nthreads, in print_summary()
134 threads_starting = params.nthreads; in block_threads()
141 for (i = 0; i < params.nthreads; i++) { in block_threads()
194 if (!params.nthreads) in bench_futex_requeue()
195 params.nthreads = perf_cpu_map__nr(cpu); in bench_futex_requeue()
197 worker = calloc(params.nthreads, sizeof(*worker)); in bench_futex_requeue()
204 if (params.nrequeue > params.nthreads) in bench_futex_requeue()
205 params.nrequeue = params.nthreads; in bench_futex_requeue()
208 params.nrequeue = params.nthreads; in bench_futex_requeue()
[all …]
H A Dbreakpoint.c22 unsigned int nthreads; member
26 .nthreads = 1,
33 OPT_UINTEGER('t', "threads", &thread_params.nthreads, "Specify amount of threads"),
91 threads = calloc(thread_params.nthreads, sizeof(threads[0])); in breakpoint_thread()
97 for (i = 0; i < thread_params.nthreads; i++) { in breakpoint_thread()
102 futex_wake(&done, thread_params.nthreads, 0); in breakpoint_thread()
103 for (i = 0; i < thread_params.nthreads; i++) in breakpoint_thread()
161 (double)result_usec / bench_repeat / thread_params.nthreads); in bench_breakpoint_thread()
164 thread_params.nthreads * thread_params.nparallel); in bench_breakpoint_thread()
200 unsigned int i, nthreads, result_usec, done = 0; in bench_breakpoint_enable() local
[all …]
H A Dfutex-wake.c55 OPT_UINTEGER('t', "threads", &params.nthreads, "Specify amount of threads"),
95 params.nthreads, in print_summary()
107 threads_starting = params.nthreads; in block_threads()
114 for (i = 0; i < params.nthreads; i++) { in block_threads()
169 if (!params.nthreads) in bench_futex_wake()
170 params.nthreads = perf_cpu_map__nr(cpu); in bench_futex_wake()
172 worker = calloc(params.nthreads, sizeof(*worker)); in bench_futex_wake()
181 getpid(), params.nthreads, params.fshared ? "shared":"private", in bench_futex_wake()
208 while (nwoken != params.nthreads) in bench_futex_wake()
219 j + 1, nwoken, params.nthreads, in bench_futex_wake()
[all …]
H A Dfutex-wake-parallel.c66 OPT_UINTEGER('t', "threads", &params.nthreads, "Specify amount of threads"),
158 threads_starting = params.nthreads; in block_threads()
165 for (i = 0; i < params.nthreads; i++) { in block_threads()
206 params.nthreads, waketime_avg / USEC_PER_MSEC, in print_run()
221 params.nthreads, in print_summary()
275 if (!params.nthreads) in bench_futex_wake_parallel()
276 params.nthreads = perf_cpu_map__nr(cpu); in bench_futex_wake_parallel()
279 if (params.nwakes > params.nthreads || in bench_futex_wake_parallel()
281 params.nwakes = params.nthreads; in bench_futex_wake_parallel()
283 if (params.nthreads % params.nwakes) in bench_futex_wake_parallel()
[all …]
H A Dfutex-lock-pi.c50 OPT_UINTEGER('t', "threads", &params.nthreads, "Specify amount of threads"),
131 threads_starting = params.nthreads; in create_threads()
137 for (i = 0; i < params.nthreads; i++) { in create_threads()
192 if (!params.nthreads) in bench_futex_lock_pi()
193 params.nthreads = perf_cpu_map__nr(cpu); in bench_futex_lock_pi()
195 worker = calloc(params.nthreads, sizeof(*worker)); in bench_futex_lock_pi()
203 getpid(), params.nthreads, params.runtime); in bench_futex_lock_pi()
211 threads_starting = params.nthreads; in bench_futex_lock_pi()
225 for (i = 0; i < params.nthreads; i++) { in bench_futex_lock_pi()
236 for (i = 0; i < params.nthreads; i++) { in bench_futex_lock_pi()
H A Dfutex-hash.c59 OPT_UINTEGER('t', "threads", &params.nthreads, "Specify amount of threads"),
159 if (!params.nthreads) /* default to the number of CPUs */ in bench_futex_hash()
160 params.nthreads = perf_cpu_map__nr(cpu); in bench_futex_hash()
162 worker = calloc(params.nthreads, sizeof(*worker)); in bench_futex_hash()
171 … getpid(), params.nthreads, params.nfutexes, params.fshared ? "shared":"private", params.runtime); in bench_futex_hash()
178 threads_starting = params.nthreads; in bench_futex_hash()
187 for (i = 0; i < params.nthreads; i++) { in bench_futex_hash()
221 for (i = 0; i < params.nthreads; i++) { in bench_futex_hash()
232 for (i = 0; i < params.nthreads; i++) { in bench_futex_hash()
H A Depoll-ctl.c36 static unsigned int nthreads = 0; variable
75 OPT_UINTEGER('t', "threads", &nthreads, "Specify amount of threads"),
240 for (i = 0; i < nthreads; i++) { in do_threads()
349 if (!nthreads) in bench_epoll_ctl()
350 nthreads = perf_cpu_map__nr(cpu); in bench_epoll_ctl()
352 worker = calloc(nthreads, sizeof(*worker)); in bench_epoll_ctl()
358 rl.rlim_cur = rl.rlim_max = nfds * nthreads * 2 + 50; in bench_epoll_ctl()
366 getpid(), nthreads, nfds, nsecs); in bench_epoll_ctl()
375 threads_starting = nthreads; in bench_epoll_ctl()
391 for (i = 0; i < nthreads; i++) { in bench_epoll_ctl()
[all …]
/linux/arch/powerpc/platforms/pseries/
H A Dhotplug-cpu.c156 static int find_cpu_id_range(unsigned int nthreads, int assigned_node, in find_cpu_id_range() argument
167 for (cpu = 0; cpu < nthreads; cpu++) in find_cpu_id_range()
196 cpumask_shift_left(*cpu_mask, *cpu_mask, nthreads); in find_cpu_id_range()
216 int len, nthreads, node, cpu, assigned_node; in pseries_add_processor() local
225 nthreads = len / sizeof(u32); in pseries_add_processor()
243 rc = find_cpu_id_range(nthreads, node, &cpu_mask); in pseries_add_processor()
249 rc = find_cpu_id_range(nthreads, NUMA_NO_NODE, &cpu_mask); in pseries_add_processor()
275 cpu, cpu + nthreads - 1); in pseries_add_processor()
299 int len, nthreads, i; in pseries_remove_processor() local
307 nthreads = len / sizeof(u32); in pseries_remove_processor()
[all …]
/linux/tools/perf/util/
H A Dcounts.c10 struct perf_counts *perf_counts__new(int ncpus, int nthreads) in perf_counts__new() argument
17 values = xyarray__new(ncpus, nthreads, sizeof(struct perf_counts_values)); in perf_counts__new()
25 values = xyarray__new(ncpus, nthreads, sizeof(bool)); in perf_counts__new()
61 int nthreads = perf_thread_map__nr(evsel->core.threads); in evsel__alloc_counts() local
63 evsel->counts = perf_counts__new(perf_cpu_map__nr(cpus), nthreads); in evsel__alloc_counts()
H A Dtool_pmu.c206 int nthreads) in evsel__tool_pmu_prepare_open() argument
212 nthreads, in evsel__tool_pmu_prepare_open()
227 int pid = -1, idx = 0, thread = 0, nthreads, err = 0, old_errno; in evsel__tool_pmu_open() local
242 nthreads = perf_thread_map__nr(threads); in evsel__tool_pmu_open()
244 for (thread = 0; thread < nthreads; thread++) { in evsel__tool_pmu_open()
301 thread = nthreads; in evsel__tool_pmu_open()
/linux/tools/testing/selftests/ublk/
H A Dtest_stress_03.sh45 ublk_io_and_remove 8G -t null -q 4 --auto_zc --nthreads 8 --per_io_tasks &
46 ublk_io_and_remove 256M -t loop -q 4 --auto_zc --nthreads 8 --per_io_tasks "${UBLK_BACKFILES[0]}" &
47 ublk_io_and_remove 256M -t stripe -q 4 --auto_zc --nthreads 8 --per_io_tasks "${UBLK_BACKFILES[1]}" "${UBLK_BACKFILES[2]}" &
48 ublk_io_and_remove 8G -t null -q 4 -z --auto_zc --auto_zc_fallback --nthreads 8 --per_io_tasks &
H A Dtest_stress_04.sh44 ublk_io_and_kill_daemon 8G -t null -q 4 --auto_zc --nthreads 8 --per_io_tasks &
45 …ublk_io_and_kill_daemon 256M -t loop -q 4 --auto_zc --nthreads 8 --per_io_tasks "${UBLK_BACKFILES[…
46 …ublk_io_and_kill_daemon 256M -t stripe -q 4 --auto_zc --nthreads 8 --per_io_tasks "${UBLK_BACKFILE…
47 …ublk_io_and_kill_daemon 8G -t null -q 4 -z --auto_zc --auto_zc_fallback --nthreads 8 --per_io_task…
H A Dtest_stress_06.sh32 ublk_io_and_remove 8G -t null -q 4 -u --nthreads 8 --per_io_tasks &
33 ublk_io_and_remove 256M -t loop -q 4 -u --nthreads 8 --per_io_tasks "${UBLK_BACKFILES[0]}" &
34 ublk_io_and_remove 256M -t stripe -q 4 -u --nthreads 8 --per_io_tasks "${UBLK_BACKFILES[1]}" "${UBL…
H A Dtest_stress_07.sh32 ublk_io_and_kill_daemon 8G -t null -q 4 -u --nthreads 8 --per_io_tasks &
33 ublk_io_and_kill_daemon 256M -t loop -q 4 -u --nthreads 8 --per_io_tasks "${UBLK_BACKFILES[0]}" &
34 ublk_io_and_kill_daemon 256M -t stripe -q 4 -u --nthreads 8 --per_io_tasks "${UBLK_BACKFILES[1]}" "…
H A Dtest_stress_05.sh76 ublk_io_and_remove 8G -t null -q 4 --nthreads 8 --per_io_tasks -r 1 -i "$reissue" &
77 …ublk_io_and_remove 256M -t loop -q 4 --nthreads 8 --per_io_tasks -r 1 -i "$reissue" "${UBLK_BACKFI…
78 ublk_io_and_remove 8G -t null -q 4 --nthreads 8 --per_io_tasks -r 1 -i "$reissue" &
H A Dkublk.c539 unsigned max_nr_ios_per_thread = nr_ios / dev->nthreads; in ublk_thread_init()
540 max_nr_ios_per_thread += !!(nr_ios % dev->nthreads); in ublk_thread_init()
793 for (i = t->idx; i < nr_ios; i += t->dev->nthreads) { in ublk_submit_fetch_commands()
1101 tinfo = calloc(sizeof(struct ublk_thread_info), dev->nthreads); in ublk_start_daemon()
1115 q_thread_map = calloc(dev->nthreads, sizeof(*q_thread_map)); in ublk_start_daemon()
1120 ublk_batch_setup_map(q_thread_map, dev->nthreads, in ublk_start_daemon()
1142 for (i = 0; i < dev->nthreads; i++) { in ublk_start_daemon()
1157 if (dev->nthreads == dinfo->nr_hw_queues) in ublk_start_daemon()
1164 for (i = 0; i < dev->nthreads; i++) in ublk_start_daemon()
1190 for (i = 0; i < dev->nthreads; i++) in ublk_start_daemon()
[all …]
/linux/kernel/
H A Dscftorture.c54 torture_param(int, nthreads, -1, "# threads, defaults to -1 for all CPUs.");
160 cpu = raw_smp_processor_id() % nthreads; in scf_add_to_free_list()
191 for (i = 0; i < nthreads; i++) { in scf_torture_stats_print()
537 …verbose, holdoff, longwait, nthreads, onoff_holdoff, onoff_interval, shutdown, stat_interval, stut… in scftorture_print_module_parms()
552 if (nthreads && scf_stats_p) in scf_torture_cleanup()
553 for (i = 0; i < nthreads; i++) in scf_torture_cleanup()
662 if (nthreads < 0) in scf_torture_init()
663 nthreads = num_online_cpus(); in scf_torture_init()
664 scf_stats_p = kzalloc_objs(scf_stats_p[0], nthreads); in scf_torture_init()
671 VERBOSE_SCFTORTOUT("Starting %d smp_call_function() threads", nthreads); in scf_torture_init()
[all …]
/linux/fs/nfsd/
H A Dnfssvc.c654 int nfsd_get_nrthreads(int n, int *nthreads, struct net *net) in nfsd_get_nrthreads() argument
662 nthreads[i] = serv->sv_pools[i].sp_nrthreads; in nfsd_get_nrthreads()
679 int nfsd_set_nrthreads(int n, int *nthreads, struct net *net) in nfsd_set_nrthreads() argument
693 return svc_set_num_threads(nn->nfsd_serv, nn->min_threads, nthreads[0]); in nfsd_set_nrthreads()
701 nthreads[i] = min(nthreads[i], NFSD_MAXSERVS); in nfsd_set_nrthreads()
702 tot += nthreads[i]; in nfsd_set_nrthreads()
707 int new = nthreads[i] * NFSD_MAXSERVS / tot; in nfsd_set_nrthreads()
708 tot -= (nthreads[i] - new); in nfsd_set_nrthreads()
709 nthreads[i] = new; in nfsd_set_nrthreads()
712 nthreads[i]--; in nfsd_set_nrthreads()
[all …]
/linux/kernel/kcsan/
H A Dkcsan_test.c1388 long nthreads = (long)prev; in nthreads_gen_params() local
1390 if (nthreads < 0 || nthreads >= 32) in nthreads_gen_params()
1391 nthreads = 0; /* stop */ in nthreads_gen_params()
1392 else if (!nthreads) in nthreads_gen_params()
1393 nthreads = 2; /* initial value */ in nthreads_gen_params()
1394 else if (nthreads < 5) in nthreads_gen_params()
1395 nthreads++; in nthreads_gen_params()
1396 else if (nthreads == 5) in nthreads_gen_params()
1397 nthreads = 8; in nthreads_gen_params()
1399 nthreads *= 2; in nthreads_gen_params()
[all …]
/linux/kernel/locking/
H A Dtest-ww_mutex.c321 static int __test_cycle(struct ww_class *class, unsigned int nthreads) in __test_cycle() argument
324 unsigned int n, last = nthreads - 1; in __test_cycle()
327 cycles = kmalloc_objs(*cycles, nthreads); in __test_cycle()
331 for (n = 0; n < nthreads; n++) { in __test_cycle()
351 for (n = 0; n < nthreads; n++) in __test_cycle()
357 for (n = 0; n < nthreads; n++) { in __test_cycle()
364 n, nthreads, cycle->result); in __test_cycle()
369 for (n = 0; n < nthreads; n++) in __test_cycle()
579 static int stress(struct ww_class *class, int nlocks, int nthreads, unsigned int flags) in stress() argument
589 stress_array = kmalloc_objs(*stress_array, nthreads); in stress()
[all …]
/linux/lib/
H A Dtest_objpool.c39 atomic_t nthreads ____cacheline_aligned_in_smp;
142 atomic_set(&data->nthreads, 1); in ot_init_data()
222 atomic_inc(&test->data.nthreads); in ot_thread_worker()
235 if (atomic_dec_and_test(&test->data.nthreads)) in ot_thread_worker()
244 int cpu, nthreads = 0; in ot_perf_report() local
265 nthreads++; in ot_perf_report()
271 pr_info("ALL: \tnthreads: %d duration: %lluus\n", nthreads, duration); in ot_perf_report()
383 if (atomic_dec_and_test(&test->data.nthreads)) in ot_start_sync()
569 if (atomic_dec_and_test(&test->data.nthreads)) in ot_start_async()
/linux/tools/lib/perf/
H A Devsel.c65 int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads) in perf_evsel__alloc_fd() argument
67 evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int)); in perf_evsel__alloc_fd()
73 for (thread = 0; thread < nthreads; thread++) { in perf_evsel__alloc_fd()
85 static int perf_evsel__alloc_mmap(struct perf_evsel *evsel, int ncpus, int nthreads) in perf_evsel__alloc_mmap() argument
87 evsel->mmap = xyarray__new(ncpus, nthreads, sizeof(struct perf_mmap)); in perf_evsel__alloc_mmap()
526 int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads) in perf_evsel__alloc_id() argument
528 if (ncpus == 0 || nthreads == 0) in perf_evsel__alloc_id()
531 evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id)); in perf_evsel__alloc_id()
535 evsel->id = zalloc(ncpus * nthreads * sizeof(u64)); in perf_evsel__alloc_id()
/linux/arch/x86/kernel/cpu/
H A Dtopology_amd.c98 unsigned int nthreads = leaf.core_nthreads + 1; in parse_8000_001e() local
101 get_count_order(nthreads), nthreads); in parse_8000_001e()

12