1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (C) 2022. Huawei Technologies Co., Ltd */ 3 #define _GNU_SOURCE 4 #include <sched.h> 5 #include <stdbool.h> 6 #include <test_progs.h> 7 #include "htab_update.skel.h" 8 9 struct htab_update_ctx { 10 int fd; 11 int loop; 12 bool stop; 13 }; 14 15 static void test_reenter_update(void) 16 { 17 struct htab_update *skel; 18 void *value = NULL; 19 unsigned int key, value_size; 20 int err; 21 22 skel = htab_update__open(); 23 if (!ASSERT_OK_PTR(skel, "htab_update__open")) 24 return; 25 26 bpf_program__set_autoload(skel->progs.bpf_obj_free_fields, true); 27 err = htab_update__load(skel); 28 if (!ASSERT_TRUE(!err, "htab_update__load") || err) 29 goto out; 30 31 skel->bss->pid = getpid(); 32 err = htab_update__attach(skel); 33 if (!ASSERT_OK(err, "htab_update__attach")) 34 goto out; 35 36 value_size = bpf_map__value_size(skel->maps.htab); 37 38 value = calloc(1, value_size); 39 if (!ASSERT_OK_PTR(value, "calloc value")) 40 goto out; 41 /* 42 * First update: plain insert. This should NOT trigger the re-entrancy 43 * path, because there is no old element to free yet. 44 */ 45 key = 0; 46 err = bpf_map_update_elem(bpf_map__fd(skel->maps.htab), &key, value, BPF_ANY); 47 if (!ASSERT_OK(err, "first update (insert)")) 48 goto out; 49 50 /* 51 * Second update: replace existing element with same key and trigger 52 * the reentrancy of bpf_map_update_elem(). 53 * check_and_free_fields() calls bpf_obj_free_fields() on the old 54 * value, which is where fentry program runs and performs a nested 55 * bpf_map_update_elem(), triggering -EDEADLK. 56 */ 57 memset(value, 0, value_size); 58 err = bpf_map_update_elem(bpf_map__fd(skel->maps.htab), &key, value, BPF_ANY); 59 if (!ASSERT_OK(err, "second update (replace)")) 60 goto out; 61 62 ASSERT_EQ(skel->bss->update_err, -EDEADLK, "no reentrancy"); 63 out: 64 htab_update__destroy(skel); 65 } 66 67 static void *htab_update_thread(void *arg) 68 { 69 struct htab_update_ctx *ctx = arg; 70 cpu_set_t cpus; 71 int i; 72 73 /* Pinned on CPU 0 */ 74 CPU_ZERO(&cpus); 75 CPU_SET(0, &cpus); 76 pthread_setaffinity_np(pthread_self(), sizeof(cpus), &cpus); 77 78 i = 0; 79 while (i++ < ctx->loop && !ctx->stop) { 80 unsigned int key = 0, value = 0; 81 int err; 82 83 err = bpf_map_update_elem(ctx->fd, &key, &value, 0); 84 if (err) { 85 ctx->stop = true; 86 return (void *)(long)err; 87 } 88 } 89 90 return NULL; 91 } 92 93 static void test_concurrent_update(void) 94 { 95 struct htab_update_ctx ctx; 96 struct htab_update *skel; 97 unsigned int i, nr; 98 pthread_t *tids; 99 int err; 100 101 skel = htab_update__open_and_load(); 102 if (!ASSERT_OK_PTR(skel, "htab_update__open_and_load")) 103 return; 104 105 ctx.fd = bpf_map__fd(skel->maps.htab); 106 ctx.loop = 1000; 107 ctx.stop = false; 108 109 nr = 4; 110 tids = calloc(nr, sizeof(*tids)); 111 if (!ASSERT_NEQ(tids, NULL, "no mem")) 112 goto out; 113 114 for (i = 0; i < nr; i++) { 115 err = pthread_create(&tids[i], NULL, htab_update_thread, &ctx); 116 if (!ASSERT_OK(err, "pthread_create")) { 117 unsigned int j; 118 119 ctx.stop = true; 120 for (j = 0; j < i; j++) 121 pthread_join(tids[j], NULL); 122 goto out; 123 } 124 } 125 126 for (i = 0; i < nr; i++) { 127 void *thread_err = NULL; 128 129 pthread_join(tids[i], &thread_err); 130 ASSERT_EQ(thread_err, NULL, "update error"); 131 } 132 133 out: 134 if (tids) 135 free(tids); 136 htab_update__destroy(skel); 137 } 138 139 void test_htab_update(void) 140 { 141 if (test__start_subtest("reenter_update")) 142 test_reenter_update(); 143 if (test__start_subtest("concurrent_update")) 144 test_concurrent_update(); 145 } 146