1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2021 Facebook */ 3 #include <linux/bpf.h> 4 #include <time.h> 5 #include <stdbool.h> 6 #include <errno.h> 7 #include <bpf/bpf_helpers.h> 8 #include <bpf/bpf_tracing.h> 9 10 char _license[] SEC("license") = "GPL"; 11 struct hmap_elem { 12 int counter; 13 struct bpf_timer timer; 14 struct bpf_spin_lock lock; /* unused */ 15 }; 16 17 struct { 18 __uint(type, BPF_MAP_TYPE_HASH); 19 __uint(max_entries, 1000); 20 __type(key, int); 21 __type(value, struct hmap_elem); 22 } hmap SEC(".maps"); 23 24 struct { 25 __uint(type, BPF_MAP_TYPE_HASH); 26 __uint(map_flags, BPF_F_NO_PREALLOC); 27 __uint(max_entries, 1000); 28 __type(key, int); 29 __type(value, struct hmap_elem); 30 } hmap_malloc SEC(".maps"); 31 32 struct elem { 33 struct bpf_timer t; 34 }; 35 36 struct { 37 __uint(type, BPF_MAP_TYPE_ARRAY); 38 __uint(max_entries, 2); 39 __type(key, int); 40 __type(value, struct elem); 41 } array SEC(".maps"); 42 43 struct { 44 __uint(type, BPF_MAP_TYPE_LRU_HASH); 45 __uint(max_entries, 4); 46 __type(key, int); 47 __type(value, struct elem); 48 } lru SEC(".maps"); 49 50 struct { 51 __uint(type, BPF_MAP_TYPE_ARRAY); 52 __uint(max_entries, 1); 53 __type(key, int); 54 __type(value, struct elem); 55 } abs_timer SEC(".maps"), soft_timer_pinned SEC(".maps"), abs_timer_pinned SEC(".maps"), 56 race_array SEC(".maps"); 57 58 __u64 bss_data; 59 __u64 abs_data; 60 __u64 err; 61 __u64 ok; 62 __u64 callback_check = 52; 63 __u64 callback2_check = 52; 64 __u64 pinned_callback_check; 65 __s32 pinned_cpu; 66 67 #define ARRAY 1 68 #define HTAB 2 69 #define HTAB_MALLOC 3 70 #define LRU 4 71 72 /* callback for array and lru timers */ 73 static int timer_cb1(void *map, int *key, struct bpf_timer *timer) 74 { 75 /* increment bss variable twice. 76 * Once via array timer callback and once via lru timer callback 77 */ 78 bss_data += 5; 79 80 /* *key == 0 - the callback was called for array timer. 81 * *key == 4 - the callback was called from lru timer. 82 */ 83 if (*key == ARRAY) { 84 struct bpf_timer *lru_timer; 85 int lru_key = LRU; 86 87 /* rearm array timer to be called again in ~35 seconds */ 88 if (bpf_timer_start(timer, 1ull << 35, 0) != 0) 89 err |= 1; 90 91 lru_timer = bpf_map_lookup_elem(&lru, &lru_key); 92 if (!lru_timer) 93 return 0; 94 bpf_timer_set_callback(lru_timer, timer_cb1); 95 if (bpf_timer_start(lru_timer, 0, 0) != 0) 96 err |= 2; 97 } else if (*key == LRU) { 98 int lru_key, i; 99 100 for (i = LRU + 1; 101 i <= 100 /* for current LRU eviction algorithm this number 102 * should be larger than ~ lru->max_entries * 2 103 */; 104 i++) { 105 struct elem init = {}; 106 107 /* lru_key cannot be used as loop induction variable 108 * otherwise the loop will be unbounded. 109 */ 110 lru_key = i; 111 112 /* add more elements into lru map to push out current 113 * element and force deletion of this timer 114 */ 115 bpf_map_update_elem(map, &lru_key, &init, 0); 116 /* look it up to bump it into active list */ 117 bpf_map_lookup_elem(map, &lru_key); 118 119 /* keep adding until *key changes underneath, 120 * which means that key/timer memory was reused 121 */ 122 if (*key != LRU) 123 break; 124 } 125 126 /* check that the timer was removed */ 127 if (bpf_timer_cancel(timer) != -EINVAL) 128 err |= 4; 129 ok |= 1; 130 } 131 return 0; 132 } 133 134 SEC("fentry/bpf_fentry_test1") 135 int BPF_PROG2(test1, int, a) 136 { 137 struct bpf_timer *arr_timer, *lru_timer; 138 struct elem init = {}; 139 int lru_key = LRU; 140 int array_key = ARRAY; 141 142 arr_timer = bpf_map_lookup_elem(&array, &array_key); 143 if (!arr_timer) 144 return 0; 145 bpf_timer_init(arr_timer, &array, CLOCK_MONOTONIC); 146 147 bpf_map_update_elem(&lru, &lru_key, &init, 0); 148 lru_timer = bpf_map_lookup_elem(&lru, &lru_key); 149 if (!lru_timer) 150 return 0; 151 bpf_timer_init(lru_timer, &lru, CLOCK_MONOTONIC); 152 153 bpf_timer_set_callback(arr_timer, timer_cb1); 154 bpf_timer_start(arr_timer, 0 /* call timer_cb1 asap */, 0); 155 156 /* init more timers to check that array destruction 157 * doesn't leak timer memory. 158 */ 159 array_key = 0; 160 arr_timer = bpf_map_lookup_elem(&array, &array_key); 161 if (!arr_timer) 162 return 0; 163 bpf_timer_init(arr_timer, &array, CLOCK_MONOTONIC); 164 return 0; 165 } 166 167 /* callback for prealloc and non-prealloca hashtab timers */ 168 static int timer_cb2(void *map, int *key, struct hmap_elem *val) 169 { 170 if (*key == HTAB) 171 callback_check--; 172 else 173 callback2_check--; 174 if (val->counter > 0 && --val->counter) { 175 /* re-arm the timer again to execute after 1 usec */ 176 bpf_timer_start(&val->timer, 1000, 0); 177 } else if (*key == HTAB) { 178 struct bpf_timer *arr_timer; 179 int array_key = ARRAY; 180 181 /* cancel arr_timer otherwise bpf_fentry_test1 prog 182 * will stay alive forever. 183 */ 184 arr_timer = bpf_map_lookup_elem(&array, &array_key); 185 if (!arr_timer) 186 return 0; 187 if (bpf_timer_cancel(arr_timer) != 1) 188 /* bpf_timer_cancel should return 1 to indicate 189 * that arr_timer was active at this time 190 */ 191 err |= 8; 192 193 /* try to cancel ourself. It shouldn't deadlock. */ 194 if (bpf_timer_cancel(&val->timer) != -EDEADLK) 195 err |= 16; 196 197 /* delete this key and this timer anyway. 198 * It shouldn't deadlock either. 199 */ 200 bpf_map_delete_elem(map, key); 201 202 /* in preallocated hashmap both 'key' and 'val' could have been 203 * reused to store another map element (like in LRU above), 204 * but in controlled test environment the below test works. 205 * It's not a use-after-free. The memory is owned by the map. 206 */ 207 if (bpf_timer_start(&val->timer, 1000, 0) != -EINVAL) 208 err |= 32; 209 ok |= 2; 210 } else { 211 if (*key != HTAB_MALLOC) 212 err |= 64; 213 214 /* try to cancel ourself. It shouldn't deadlock. */ 215 if (bpf_timer_cancel(&val->timer) != -EDEADLK) 216 err |= 128; 217 218 /* delete this key and this timer anyway. 219 * It shouldn't deadlock either. 220 */ 221 bpf_map_delete_elem(map, key); 222 223 ok |= 4; 224 } 225 return 0; 226 } 227 228 int bpf_timer_test(void) 229 { 230 struct hmap_elem *val; 231 int key = HTAB, key_malloc = HTAB_MALLOC; 232 233 val = bpf_map_lookup_elem(&hmap, &key); 234 if (val) { 235 if (bpf_timer_init(&val->timer, &hmap, CLOCK_BOOTTIME) != 0) 236 err |= 512; 237 bpf_timer_set_callback(&val->timer, timer_cb2); 238 bpf_timer_start(&val->timer, 1000, 0); 239 } 240 val = bpf_map_lookup_elem(&hmap_malloc, &key_malloc); 241 if (val) { 242 if (bpf_timer_init(&val->timer, &hmap_malloc, CLOCK_BOOTTIME) != 0) 243 err |= 1024; 244 bpf_timer_set_callback(&val->timer, timer_cb2); 245 bpf_timer_start(&val->timer, 1000, 0); 246 } 247 return 0; 248 } 249 250 SEC("fentry/bpf_fentry_test2") 251 int BPF_PROG2(test2, int, a, int, b) 252 { 253 struct hmap_elem init = {}, *val; 254 int key = HTAB, key_malloc = HTAB_MALLOC; 255 256 init.counter = 10; /* number of times to trigger timer_cb2 */ 257 bpf_map_update_elem(&hmap, &key, &init, 0); 258 val = bpf_map_lookup_elem(&hmap, &key); 259 if (val) 260 bpf_timer_init(&val->timer, &hmap, CLOCK_BOOTTIME); 261 /* update the same key to free the timer */ 262 bpf_map_update_elem(&hmap, &key, &init, 0); 263 264 bpf_map_update_elem(&hmap_malloc, &key_malloc, &init, 0); 265 val = bpf_map_lookup_elem(&hmap_malloc, &key_malloc); 266 if (val) 267 bpf_timer_init(&val->timer, &hmap_malloc, CLOCK_BOOTTIME); 268 /* update the same key to free the timer */ 269 bpf_map_update_elem(&hmap_malloc, &key_malloc, &init, 0); 270 271 /* init more timers to check that htab operations 272 * don't leak timer memory. 273 */ 274 key = 0; 275 bpf_map_update_elem(&hmap, &key, &init, 0); 276 val = bpf_map_lookup_elem(&hmap, &key); 277 if (val) 278 bpf_timer_init(&val->timer, &hmap, CLOCK_BOOTTIME); 279 bpf_map_delete_elem(&hmap, &key); 280 bpf_map_update_elem(&hmap, &key, &init, 0); 281 val = bpf_map_lookup_elem(&hmap, &key); 282 if (val) 283 bpf_timer_init(&val->timer, &hmap, CLOCK_BOOTTIME); 284 285 /* and with non-prealloc htab */ 286 key_malloc = 0; 287 bpf_map_update_elem(&hmap_malloc, &key_malloc, &init, 0); 288 val = bpf_map_lookup_elem(&hmap_malloc, &key_malloc); 289 if (val) 290 bpf_timer_init(&val->timer, &hmap_malloc, CLOCK_BOOTTIME); 291 bpf_map_delete_elem(&hmap_malloc, &key_malloc); 292 bpf_map_update_elem(&hmap_malloc, &key_malloc, &init, 0); 293 val = bpf_map_lookup_elem(&hmap_malloc, &key_malloc); 294 if (val) 295 bpf_timer_init(&val->timer, &hmap_malloc, CLOCK_BOOTTIME); 296 297 return bpf_timer_test(); 298 } 299 300 /* callback for absolute timer */ 301 static int timer_cb3(void *map, int *key, struct bpf_timer *timer) 302 { 303 abs_data += 6; 304 305 if (abs_data < 12) { 306 bpf_timer_start(timer, bpf_ktime_get_boot_ns() + 1000, 307 BPF_F_TIMER_ABS); 308 } else { 309 /* Re-arm timer ~35 seconds in future */ 310 bpf_timer_start(timer, bpf_ktime_get_boot_ns() + (1ull << 35), 311 BPF_F_TIMER_ABS); 312 } 313 314 return 0; 315 } 316 317 SEC("fentry/bpf_fentry_test3") 318 int BPF_PROG2(test3, int, a) 319 { 320 int key = 0; 321 struct bpf_timer *timer; 322 323 bpf_printk("test3"); 324 325 timer = bpf_map_lookup_elem(&abs_timer, &key); 326 if (timer) { 327 if (bpf_timer_init(timer, &abs_timer, CLOCK_BOOTTIME) != 0) 328 err |= 2048; 329 bpf_timer_set_callback(timer, timer_cb3); 330 bpf_timer_start(timer, bpf_ktime_get_boot_ns() + 1000, 331 BPF_F_TIMER_ABS); 332 } 333 334 return 0; 335 } 336 337 /* callback for pinned timer */ 338 static int timer_cb_pinned(void *map, int *key, struct bpf_timer *timer) 339 { 340 __s32 cpu = bpf_get_smp_processor_id(); 341 342 if (cpu != pinned_cpu) 343 err |= 16384; 344 345 pinned_callback_check++; 346 return 0; 347 } 348 349 static void test_pinned_timer(bool soft) 350 { 351 int key = 0; 352 void *map; 353 struct bpf_timer *timer; 354 __u64 flags = BPF_F_TIMER_CPU_PIN; 355 __u64 start_time; 356 357 if (soft) { 358 map = &soft_timer_pinned; 359 start_time = 0; 360 } else { 361 map = &abs_timer_pinned; 362 start_time = bpf_ktime_get_boot_ns(); 363 flags |= BPF_F_TIMER_ABS; 364 } 365 366 timer = bpf_map_lookup_elem(map, &key); 367 if (timer) { 368 if (bpf_timer_init(timer, map, CLOCK_BOOTTIME) != 0) 369 err |= 4096; 370 bpf_timer_set_callback(timer, timer_cb_pinned); 371 pinned_cpu = bpf_get_smp_processor_id(); 372 bpf_timer_start(timer, start_time + 1000, flags); 373 } else { 374 err |= 8192; 375 } 376 } 377 378 SEC("fentry/bpf_fentry_test4") 379 int BPF_PROG2(test4, int, a) 380 { 381 bpf_printk("test4"); 382 test_pinned_timer(true); 383 384 return 0; 385 } 386 387 SEC("fentry/bpf_fentry_test5") 388 int BPF_PROG2(test5, int, a) 389 { 390 bpf_printk("test5"); 391 test_pinned_timer(false); 392 393 return 0; 394 } 395 396 static int race_timer_callback(void *race_array, int *race_key, struct bpf_timer *timer) 397 { 398 bpf_timer_start(timer, 1000000, 0); 399 return 0; 400 } 401 402 SEC("syscall") 403 int race(void *ctx) 404 { 405 struct bpf_timer *timer; 406 int err, race_key = 0; 407 struct elem init; 408 409 __builtin_memset(&init, 0, sizeof(struct elem)); 410 bpf_map_update_elem(&race_array, &race_key, &init, BPF_ANY); 411 412 timer = bpf_map_lookup_elem(&race_array, &race_key); 413 if (!timer) 414 return 1; 415 416 err = bpf_timer_init(timer, &race_array, CLOCK_MONOTONIC); 417 if (err && err != -EBUSY) 418 return 1; 419 420 bpf_timer_set_callback(timer, race_timer_callback); 421 bpf_timer_start(timer, 0, 0); 422 bpf_timer_cancel(timer); 423 424 return 0; 425 } 426