1 #include "bpf_experimental.h" 2 3 struct val_t { 4 long b, c, d; 5 }; 6 7 struct elem { 8 long sum; 9 struct val_t __percpu_kptr *pc; 10 }; 11 12 struct { 13 __uint(type, BPF_MAP_TYPE_ARRAY); 14 __uint(max_entries, 1); 15 __type(key, int); 16 __type(value, struct elem); 17 } array SEC(".maps"); 18 19 void bpf_rcu_read_lock(void) __ksym; 20 void bpf_rcu_read_unlock(void) __ksym; 21 22 const volatile int nr_cpus; 23 24 /* Initialize the percpu object */ 25 SEC("?fentry/bpf_fentry_test1") 26 int BPF_PROG(test_array_map_1) 27 { 28 struct val_t __percpu_kptr *p; 29 struct elem *e; 30 int index = 0; 31 32 e = bpf_map_lookup_elem(&array, &index); 33 if (!e) 34 return 0; 35 36 p = bpf_percpu_obj_new(struct val_t); 37 if (!p) 38 return 0; 39 40 p = bpf_kptr_xchg(&e->pc, p); 41 if (p) 42 bpf_percpu_obj_drop(p); 43 44 return 0; 45 } 46 47 /* Update percpu data */ 48 SEC("?fentry/bpf_fentry_test2") 49 int BPF_PROG(test_array_map_2) 50 { 51 struct val_t __percpu_kptr *p; 52 struct val_t *v; 53 struct elem *e; 54 int index = 0; 55 56 e = bpf_map_lookup_elem(&array, &index); 57 if (!e) 58 return 0; 59 60 p = e->pc; 61 if (!p) 62 return 0; 63 64 v = bpf_per_cpu_ptr(p, 0); 65 if (!v) 66 return 0; 67 v->c = 1; 68 v->d = 2; 69 70 return 0; 71 } 72 73 int cpu0_field_d, sum_field_c; 74 75 /* Summarize percpu data */ 76 SEC("?fentry/bpf_fentry_test3") 77 int BPF_PROG(test_array_map_3) 78 { 79 struct val_t __percpu_kptr *p; 80 int i, index = 0; 81 struct val_t *v; 82 struct elem *e; 83 84 e = bpf_map_lookup_elem(&array, &index); 85 if (!e) 86 return 0; 87 88 p = e->pc; 89 if (!p) 90 return 0; 91 92 bpf_for(i, 0, nr_cpus) { 93 v = bpf_per_cpu_ptr(p, i); 94 if (v) { 95 if (i == 0) 96 cpu0_field_d = v->d; 97 sum_field_c += v->c; 98 } 99 } 100 101 return 0; 102 } 103 104 /* Explicitly free allocated percpu data */ 105 SEC("?fentry/bpf_fentry_test4") 106 int BPF_PROG(test_array_map_4) 107 { 108 struct val_t __percpu_kptr *p; 109 struct elem *e; 110 int index = 0; 111 112 e = bpf_map_lookup_elem(&array, &index); 113 if (!e) 114 return 0; 115 116 /* delete */ 117 p = bpf_kptr_xchg(&e->pc, NULL); 118 if (p) { 119 bpf_percpu_obj_drop(p); 120 } 121 122 return 0; 123 } 124 125 SEC("?fentry.s/bpf_fentry_test1") 126 int BPF_PROG(test_array_map_10) 127 { 128 struct val_t __percpu_kptr *p, *p1; 129 int i, index = 0; 130 struct val_t *v; 131 struct elem *e; 132 133 e = bpf_map_lookup_elem(&array, &index); 134 if (!e) 135 return 0; 136 137 bpf_rcu_read_lock(); 138 p = e->pc; 139 if (!p) { 140 p = bpf_percpu_obj_new(struct val_t); 141 if (!p) 142 goto out; 143 144 p1 = bpf_kptr_xchg(&e->pc, p); 145 if (p1) { 146 /* race condition */ 147 bpf_percpu_obj_drop(p1); 148 } 149 } 150 151 v = bpf_this_cpu_ptr(p); 152 v->c = 3; 153 v = bpf_this_cpu_ptr(p); 154 v->c = 0; 155 156 v = bpf_per_cpu_ptr(p, 0); 157 if (!v) 158 goto out; 159 v->c = 1; 160 v->d = 2; 161 162 /* delete */ 163 p1 = bpf_kptr_xchg(&e->pc, NULL); 164 if (!p1) 165 goto out; 166 167 bpf_for(i, 0, nr_cpus) { 168 v = bpf_per_cpu_ptr(p, i); 169 if (v) { 170 if (i == 0) 171 cpu0_field_d = v->d; 172 sum_field_c += v->c; 173 } 174 } 175 176 /* finally release p */ 177 bpf_percpu_obj_drop(p1); 178 out: 179 bpf_rcu_read_unlock(); 180 return 0; 181 } 182 183 char _license[] SEC("license") = "GPL"; 184