1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2013 Davidlohr Bueso <davidlohr@hp.com> 4 * 5 * futex-hash: Stress the hell out of the Linux kernel futex uaddr hashing. 6 * 7 * This program is particularly useful for measuring the kernel's futex hash 8 * table/function implementation. In order for it to make sense, use with as 9 * many threads and futexes as possible. 10 */ 11 12 /* For the CLR_() macros */ 13 #include <string.h> 14 #include <pthread.h> 15 16 #include <errno.h> 17 #include <signal.h> 18 #include <stdlib.h> 19 #include <linux/compiler.h> 20 #include <linux/kernel.h> 21 #include <linux/zalloc.h> 22 #include <sys/time.h> 23 #include <sys/mman.h> 24 #include <sys/prctl.h> 25 #include <perf/cpumap.h> 26 27 #include "../util/mutex.h" 28 #include "../util/stat.h" 29 #include <subcmd/parse-options.h> 30 #include "bench.h" 31 #include "futex.h" 32 33 #include <err.h> 34 35 static bool done = false; 36 static int futex_flag = 0; 37 38 struct timeval bench__start, bench__end, bench__runtime; 39 static struct mutex thread_lock; 40 static unsigned int threads_starting; 41 static struct stats throughput_stats; 42 static struct cond thread_parent, thread_worker; 43 44 struct worker { 45 int tid; 46 u_int32_t *futex; 47 pthread_t thread; 48 unsigned long ops; 49 }; 50 51 static struct bench_futex_parameters params = { 52 .nfutexes = 1024, 53 .runtime = 10, 54 .nbuckets = -1, 55 }; 56 57 static const struct option options[] = { 58 OPT_INTEGER( 'b', "buckets", ¶ms.nbuckets, "Specify amount of hash buckets"), 59 OPT_UINTEGER('t', "threads", ¶ms.nthreads, "Specify amount of threads"), 60 OPT_UINTEGER('r', "runtime", ¶ms.runtime, "Specify runtime (in seconds)"), 61 OPT_UINTEGER('f', "futexes", ¶ms.nfutexes, "Specify amount of futexes per threads"), 62 OPT_BOOLEAN( 's', "silent", ¶ms.silent, "Silent mode: do not display data/details"), 63 OPT_BOOLEAN( 'S', "shared", ¶ms.fshared, "Use shared futexes instead of private ones"), 64 OPT_BOOLEAN( 'm', "mlockall", ¶ms.mlockall, "Lock all current and future memory"), 65 OPT_END() 66 }; 67 68 static const char * const bench_futex_hash_usage[] = { 69 "perf bench futex hash <options>", 70 NULL 71 }; 72 73 static void *workerfn(void *arg) 74 { 75 int ret; 76 struct worker *w = (struct worker *) arg; 77 unsigned int i; 78 unsigned long ops = w->ops; /* avoid cacheline bouncing */ 79 80 mutex_lock(&thread_lock); 81 threads_starting--; 82 if (!threads_starting) 83 cond_signal(&thread_parent); 84 cond_wait(&thread_worker, &thread_lock); 85 mutex_unlock(&thread_lock); 86 87 do { 88 for (i = 0; i < params.nfutexes; i++, ops++) { 89 /* 90 * We want the futex calls to fail in order to stress 91 * the hashing of uaddr and not measure other steps, 92 * such as internal waitqueue handling, thus enlarging 93 * the critical region protected by hb->lock. 94 */ 95 ret = futex_wait(&w->futex[i], 1234, NULL, futex_flag); 96 if (!params.silent && 97 (!ret || errno != EAGAIN || errno != EWOULDBLOCK)) 98 warn("Non-expected futex return call"); 99 } 100 } while (!done); 101 102 w->ops = ops; 103 return NULL; 104 } 105 106 static void toggle_done(int sig __maybe_unused, 107 siginfo_t *info __maybe_unused, 108 void *uc __maybe_unused) 109 { 110 /* inform all threads that we're done for the day */ 111 done = true; 112 gettimeofday(&bench__end, NULL); 113 timersub(&bench__end, &bench__start, &bench__runtime); 114 } 115 116 static void print_summary(void) 117 { 118 unsigned long avg = avg_stats(&throughput_stats); 119 double stddev = stddev_stats(&throughput_stats); 120 121 printf("%sAveraged %ld operations/sec (+- %.2f%%), total secs = %d\n", 122 !params.silent ? "\n" : "", avg, rel_stddev_stats(stddev, avg), 123 (int)bench__runtime.tv_sec); 124 futex_print_nbuckets(¶ms); 125 } 126 127 int bench_futex_hash(int argc, const char **argv) 128 { 129 int ret = 0; 130 cpu_set_t *cpuset; 131 struct sigaction act; 132 unsigned int i; 133 pthread_attr_t thread_attr; 134 struct worker *worker = NULL; 135 struct perf_cpu_map *cpu; 136 int nrcpus; 137 size_t size; 138 139 argc = parse_options(argc, argv, options, bench_futex_hash_usage, 0); 140 if (argc) { 141 usage_with_options(bench_futex_hash_usage, options); 142 exit(EXIT_FAILURE); 143 } 144 145 cpu = perf_cpu_map__new_online_cpus(); 146 if (!cpu) 147 goto errmem; 148 149 memset(&act, 0, sizeof(act)); 150 sigfillset(&act.sa_mask); 151 act.sa_sigaction = toggle_done; 152 sigaction(SIGINT, &act, NULL); 153 154 if (params.mlockall) { 155 if (mlockall(MCL_CURRENT | MCL_FUTURE)) 156 err(EXIT_FAILURE, "mlockall"); 157 } 158 159 if (!params.nthreads) /* default to the number of CPUs */ 160 params.nthreads = perf_cpu_map__nr(cpu); 161 162 worker = calloc(params.nthreads, sizeof(*worker)); 163 if (!worker) 164 goto errmem; 165 166 if (!params.fshared) 167 futex_flag = FUTEX_PRIVATE_FLAG; 168 futex_set_nbuckets_param(¶ms); 169 170 printf("Run summary [PID %d]: %d threads, each operating on %d [%s] futexes for %d secs.\n\n", 171 getpid(), params.nthreads, params.nfutexes, params.fshared ? "shared":"private", params.runtime); 172 173 init_stats(&throughput_stats); 174 mutex_init(&thread_lock); 175 cond_init(&thread_parent); 176 cond_init(&thread_worker); 177 178 threads_starting = params.nthreads; 179 pthread_attr_init(&thread_attr); 180 gettimeofday(&bench__start, NULL); 181 182 nrcpus = cpu__max_cpu().cpu; 183 cpuset = CPU_ALLOC(nrcpus); 184 BUG_ON(!cpuset); 185 size = CPU_ALLOC_SIZE(nrcpus); 186 187 for (i = 0; i < params.nthreads; i++) { 188 worker[i].tid = i; 189 worker[i].futex = calloc(params.nfutexes, sizeof(*worker[i].futex)); 190 if (!worker[i].futex) 191 goto errmem; 192 193 CPU_ZERO_S(size, cpuset); 194 195 CPU_SET_S(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, size, cpuset); 196 ret = pthread_attr_setaffinity_np(&thread_attr, size, cpuset); 197 if (ret) { 198 CPU_FREE(cpuset); 199 err(EXIT_FAILURE, "pthread_attr_setaffinity_np"); 200 } 201 ret = pthread_create(&worker[i].thread, &thread_attr, workerfn, 202 (void *)(struct worker *) &worker[i]); 203 if (ret) { 204 CPU_FREE(cpuset); 205 err(EXIT_FAILURE, "pthread_create"); 206 } 207 208 } 209 CPU_FREE(cpuset); 210 pthread_attr_destroy(&thread_attr); 211 212 mutex_lock(&thread_lock); 213 while (threads_starting) 214 cond_wait(&thread_parent, &thread_lock); 215 cond_broadcast(&thread_worker); 216 mutex_unlock(&thread_lock); 217 218 sleep(params.runtime); 219 toggle_done(0, NULL, NULL); 220 221 for (i = 0; i < params.nthreads; i++) { 222 ret = pthread_join(worker[i].thread, NULL); 223 if (ret) 224 err(EXIT_FAILURE, "pthread_join"); 225 } 226 227 /* cleanup & report results */ 228 cond_destroy(&thread_parent); 229 cond_destroy(&thread_worker); 230 mutex_destroy(&thread_lock); 231 232 for (i = 0; i < params.nthreads; i++) { 233 unsigned long t = bench__runtime.tv_sec > 0 ? 234 worker[i].ops / bench__runtime.tv_sec : 0; 235 update_stats(&throughput_stats, t); 236 if (!params.silent) { 237 if (params.nfutexes == 1) 238 printf("[thread %2d] futex: %p [ %ld ops/sec ]\n", 239 worker[i].tid, &worker[i].futex[0], t); 240 else 241 printf("[thread %2d] futexes: %p ... %p [ %ld ops/sec ]\n", 242 worker[i].tid, &worker[i].futex[0], 243 &worker[i].futex[params.nfutexes-1], t); 244 } 245 246 zfree(&worker[i].futex); 247 } 248 249 print_summary(); 250 251 free(worker); 252 free(cpu); 253 return ret; 254 errmem: 255 err(EXIT_FAILURE, "calloc"); 256 } 257