xref: /linux/tools/perf/bench/futex-lock-pi.c (revision b3570b00dc3062c5a5e8d9602b923618d679636a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2015 Davidlohr Bueso.
4  */
5 
6 /* For the CLR_() macros */
7 #include <string.h>
8 #include <pthread.h>
9 
10 #include <signal.h>
11 #include "../util/mutex.h"
12 #include "../util/stat.h"
13 #include <subcmd/parse-options.h>
14 #include <linux/compiler.h>
15 #include <linux/kernel.h>
16 #include <linux/zalloc.h>
17 #include <errno.h>
18 #include <perf/cpumap.h>
19 #include "bench.h"
20 #include "futex.h"
21 
22 #include <err.h>
23 #include <stdlib.h>
24 #include <sys/time.h>
25 #include <sys/mman.h>
26 
27 struct worker {
28 	int tid;
29 	u_int32_t *futex;
30 	pthread_t thread;
31 	unsigned long ops;
32 };
33 
34 static u_int32_t global_futex = 0;
35 static struct worker *worker;
36 static bool done = false;
37 static int futex_flag = 0;
38 static struct mutex thread_lock;
39 static unsigned int threads_starting;
40 static struct stats throughput_stats;
41 static struct cond thread_parent, thread_worker;
42 
43 static struct bench_futex_parameters params = {
44 	.nbuckets = -1,
45 	.runtime  = 10,
46 };
47 
48 static const struct option options[] = {
49 	OPT_INTEGER( 'b', "buckets", &params.nbuckets, "Specify amount of hash buckets"),
50 	OPT_BOOLEAN( 'I', "immutable", &params.buckets_immutable, "Make the hash buckets immutable"),
51 	OPT_UINTEGER('t', "threads", &params.nthreads, "Specify amount of threads"),
52 	OPT_UINTEGER('r', "runtime", &params.runtime, "Specify runtime (in seconds)"),
53 	OPT_BOOLEAN( 'M', "multi",   &params.multi, "Use multiple futexes"),
54 	OPT_BOOLEAN( 's', "silent",  &params.silent, "Silent mode: do not display data/details"),
55 	OPT_BOOLEAN( 'S', "shared",  &params.fshared, "Use shared futexes instead of private ones"),
56 	OPT_BOOLEAN( 'm', "mlockall", &params.mlockall, "Lock all current and future memory"),
57 	OPT_END()
58 };
59 
60 static const char * const bench_futex_lock_pi_usage[] = {
61 	"perf bench futex lock-pi <options>",
62 	NULL
63 };
64 
65 static void print_summary(void)
66 {
67 	unsigned long avg = avg_stats(&throughput_stats);
68 	double stddev = stddev_stats(&throughput_stats);
69 
70 	printf("%sAveraged %ld operations/sec (+- %.2f%%), total secs = %d\n",
71 	       !params.silent ? "\n" : "", avg, rel_stddev_stats(stddev, avg),
72 	       (int)bench__runtime.tv_sec);
73 	futex_print_nbuckets(&params);
74 }
75 
76 static void toggle_done(int sig __maybe_unused,
77 			siginfo_t *info __maybe_unused,
78 			void *uc __maybe_unused)
79 {
80 	/* inform all threads that we're done for the day */
81 	done = true;
82 	gettimeofday(&bench__end, NULL);
83 	timersub(&bench__end, &bench__start, &bench__runtime);
84 }
85 
86 static void *workerfn(void *arg)
87 {
88 	struct worker *w = (struct worker *) arg;
89 	unsigned long ops = w->ops;
90 
91 	mutex_lock(&thread_lock);
92 	threads_starting--;
93 	if (!threads_starting)
94 		cond_signal(&thread_parent);
95 	cond_wait(&thread_worker, &thread_lock);
96 	mutex_unlock(&thread_lock);
97 
98 	do {
99 		int ret;
100 	again:
101 		ret = futex_lock_pi(w->futex, NULL, futex_flag);
102 
103 		if (ret) { /* handle lock acquisition */
104 			if (!params.silent)
105 				warn("thread %d: Could not lock pi-lock for %p (%d)",
106 				     w->tid, w->futex, ret);
107 			if (done)
108 				break;
109 
110 			goto again;
111 		}
112 
113 		usleep(1);
114 		ret = futex_unlock_pi(w->futex, futex_flag);
115 		if (ret && !params.silent)
116 			warn("thread %d: Could not unlock pi-lock for %p (%d)",
117 			     w->tid, w->futex, ret);
118 		ops++; /* account for thread's share of work */
119 	}  while (!done);
120 
121 	w->ops = ops;
122 	return NULL;
123 }
124 
125 static void create_threads(struct worker *w, struct perf_cpu_map *cpu)
126 {
127 	cpu_set_t *cpuset;
128 	unsigned int i;
129 	int nrcpus =  cpu__max_cpu().cpu;
130 	size_t size;
131 
132 	threads_starting = params.nthreads;
133 
134 	cpuset = CPU_ALLOC(nrcpus);
135 	BUG_ON(!cpuset);
136 	size = CPU_ALLOC_SIZE(nrcpus);
137 
138 	for (i = 0; i < params.nthreads; i++) {
139 		pthread_attr_t thread_attr;
140 
141 		pthread_attr_init(&thread_attr);
142 		worker[i].tid = i;
143 
144 		if (params.multi) {
145 			worker[i].futex = calloc(1, sizeof(u_int32_t));
146 			if (!worker[i].futex)
147 				err(EXIT_FAILURE, "calloc");
148 		} else
149 			worker[i].futex = &global_futex;
150 
151 		CPU_ZERO_S(size, cpuset);
152 		CPU_SET_S(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, size, cpuset);
153 
154 		if (pthread_attr_setaffinity_np(&thread_attr, size, cpuset)) {
155 			CPU_FREE(cpuset);
156 			err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
157 		}
158 
159 		if (pthread_create(&w[i].thread, &thread_attr, workerfn, &worker[i])) {
160 			CPU_FREE(cpuset);
161 			err(EXIT_FAILURE, "pthread_create");
162 		}
163 		pthread_attr_destroy(&thread_attr);
164 	}
165 	CPU_FREE(cpuset);
166 }
167 
168 int bench_futex_lock_pi(int argc, const char **argv)
169 {
170 	int ret = 0;
171 	unsigned int i;
172 	struct sigaction act;
173 	struct perf_cpu_map *cpu;
174 
175 	argc = parse_options(argc, argv, options, bench_futex_lock_pi_usage, 0);
176 	if (argc)
177 		goto err;
178 
179 	cpu = perf_cpu_map__new_online_cpus();
180 	if (!cpu)
181 		err(EXIT_FAILURE, "calloc");
182 
183 	memset(&act, 0, sizeof(act));
184 	sigfillset(&act.sa_mask);
185 	act.sa_sigaction = toggle_done;
186 	sigaction(SIGINT, &act, NULL);
187 
188 	if (params.mlockall) {
189 		if (mlockall(MCL_CURRENT | MCL_FUTURE))
190 			err(EXIT_FAILURE, "mlockall");
191 	}
192 
193 	if (!params.nthreads)
194 		params.nthreads = perf_cpu_map__nr(cpu);
195 
196 	worker = calloc(params.nthreads, sizeof(*worker));
197 	if (!worker)
198 		err(EXIT_FAILURE, "calloc");
199 
200 	if (!params.fshared)
201 		futex_flag = FUTEX_PRIVATE_FLAG;
202 
203 	printf("Run summary [PID %d]: %d threads doing pi lock/unlock pairing for %d secs.\n\n",
204 	       getpid(), params.nthreads, params.runtime);
205 
206 	init_stats(&throughput_stats);
207 	mutex_init(&thread_lock);
208 	cond_init(&thread_parent);
209 	cond_init(&thread_worker);
210 	futex_set_nbuckets_param(&params);
211 
212 	threads_starting = params.nthreads;
213 	gettimeofday(&bench__start, NULL);
214 
215 	create_threads(worker, cpu);
216 
217 	mutex_lock(&thread_lock);
218 	while (threads_starting)
219 		cond_wait(&thread_parent, &thread_lock);
220 	cond_broadcast(&thread_worker);
221 	mutex_unlock(&thread_lock);
222 
223 	sleep(params.runtime);
224 	toggle_done(0, NULL, NULL);
225 
226 	for (i = 0; i < params.nthreads; i++) {
227 		ret = pthread_join(worker[i].thread, NULL);
228 		if (ret)
229 			err(EXIT_FAILURE, "pthread_join");
230 	}
231 
232 	/* cleanup & report results */
233 	cond_destroy(&thread_parent);
234 	cond_destroy(&thread_worker);
235 	mutex_destroy(&thread_lock);
236 
237 	for (i = 0; i < params.nthreads; i++) {
238 		unsigned long t = bench__runtime.tv_sec > 0 ?
239 			worker[i].ops / bench__runtime.tv_sec : 0;
240 
241 		update_stats(&throughput_stats, t);
242 		if (!params.silent)
243 			printf("[thread %3d] futex: %p [ %ld ops/sec ]\n",
244 			       worker[i].tid, worker[i].futex, t);
245 
246 		if (params.multi)
247 			zfree(&worker[i].futex);
248 	}
249 
250 	print_summary();
251 
252 	free(worker);
253 	perf_cpu_map__put(cpu);
254 	return ret;
255 err:
256 	usage_with_options(bench_futex_lock_pi_usage, options);
257 	exit(EXIT_FAILURE);
258 }
259