1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */ 3 4 #include <vmlinux.h> 5 #include <bpf/bpf_tracing.h> 6 #include <bpf/bpf_helpers.h> 7 8 #include "bpf_misc.h" 9 #include "task_kfunc_common.h" 10 11 char _license[] SEC("license") = "GPL"; 12 13 /* Prototype for all of the program trace events below: 14 * 15 * TRACE_EVENT(task_newtask, 16 * TP_PROTO(struct task_struct *p, u64 clone_flags) 17 */ 18 19 static struct __tasks_kfunc_map_value *insert_lookup_task(struct task_struct *task) 20 { 21 int status; 22 23 status = tasks_kfunc_map_insert(task); 24 if (status) 25 return NULL; 26 27 return tasks_kfunc_map_value_lookup(task); 28 } 29 30 SEC("tp_btf/task_newtask") 31 __failure __msg("Possibly NULL pointer passed to trusted arg0") 32 int BPF_PROG(task_kfunc_acquire_untrusted, struct task_struct *task, u64 clone_flags) 33 { 34 struct task_struct *acquired; 35 struct __tasks_kfunc_map_value *v; 36 37 v = insert_lookup_task(task); 38 if (!v) 39 return 0; 40 41 /* Can't invoke bpf_task_acquire() on an untrusted pointer. */ 42 acquired = bpf_task_acquire(v->task); 43 if (!acquired) 44 return 0; 45 46 bpf_task_release(acquired); 47 48 return 0; 49 } 50 51 SEC("tp_btf/task_newtask") 52 __failure __msg("arg#0 pointer type STRUCT task_struct must point") 53 int BPF_PROG(task_kfunc_acquire_fp, struct task_struct *task, u64 clone_flags) 54 { 55 struct task_struct *acquired, *stack_task = (struct task_struct *)&clone_flags; 56 57 /* Can't invoke bpf_task_acquire() on a random frame pointer. */ 58 acquired = bpf_task_acquire((struct task_struct *)&stack_task); 59 if (!acquired) 60 return 0; 61 62 bpf_task_release(acquired); 63 64 return 0; 65 } 66 67 SEC("kretprobe/free_task") 68 __failure __msg("calling kernel function bpf_task_acquire is not allowed") 69 int BPF_PROG(task_kfunc_acquire_unsafe_kretprobe, struct task_struct *task, u64 clone_flags) 70 { 71 struct task_struct *acquired; 72 73 /* Can't call bpf_task_acquire() or bpf_task_release() in an untrusted prog. */ 74 acquired = bpf_task_acquire(task); 75 if (!acquired) 76 return 0; 77 bpf_task_release(acquired); 78 79 return 0; 80 } 81 82 SEC("kretprobe/free_task") 83 __failure __msg("calling kernel function bpf_task_acquire is not allowed") 84 int BPF_PROG(task_kfunc_acquire_unsafe_kretprobe_rcu, struct task_struct *task, u64 clone_flags) 85 { 86 struct task_struct *acquired; 87 88 bpf_rcu_read_lock(); 89 if (!task) { 90 bpf_rcu_read_unlock(); 91 return 0; 92 } 93 /* Can't call bpf_task_acquire() or bpf_task_release() in an untrusted prog. */ 94 acquired = bpf_task_acquire(task); 95 if (acquired) 96 bpf_task_release(acquired); 97 bpf_rcu_read_unlock(); 98 99 return 0; 100 } 101 102 SEC("tp_btf/task_newtask") 103 __failure __msg("Possibly NULL pointer passed to trusted arg0") 104 int BPF_PROG(task_kfunc_acquire_null, struct task_struct *task, u64 clone_flags) 105 { 106 struct task_struct *acquired; 107 108 /* Can't invoke bpf_task_acquire() on a NULL pointer. */ 109 acquired = bpf_task_acquire(NULL); 110 if (!acquired) 111 return 0; 112 bpf_task_release(acquired); 113 114 return 0; 115 } 116 117 SEC("tp_btf/task_newtask") 118 __failure __msg("Unreleased reference") 119 int BPF_PROG(task_kfunc_acquire_unreleased, struct task_struct *task, u64 clone_flags) 120 { 121 struct task_struct *acquired; 122 123 acquired = bpf_task_acquire(task); 124 125 /* Acquired task is never released. */ 126 __sink(acquired); 127 128 return 0; 129 } 130 131 SEC("tp_btf/task_newtask") 132 __failure __msg("Unreleased reference") 133 int BPF_PROG(task_kfunc_xchg_unreleased, struct task_struct *task, u64 clone_flags) 134 { 135 struct task_struct *kptr; 136 struct __tasks_kfunc_map_value *v; 137 138 v = insert_lookup_task(task); 139 if (!v) 140 return 0; 141 142 kptr = bpf_kptr_xchg(&v->task, NULL); 143 if (!kptr) 144 return 0; 145 146 /* Kptr retrieved from map is never released. */ 147 148 return 0; 149 } 150 151 SEC("tp_btf/task_newtask") 152 __failure __msg("Possibly NULL pointer passed to trusted arg0") 153 int BPF_PROG(task_kfunc_acquire_release_no_null_check, struct task_struct *task, u64 clone_flags) 154 { 155 struct task_struct *acquired; 156 157 acquired = bpf_task_acquire(task); 158 /* Can't invoke bpf_task_release() on an acquired task without a NULL check. */ 159 bpf_task_release(acquired); 160 161 return 0; 162 } 163 164 SEC("tp_btf/task_newtask") 165 __failure __msg("Possibly NULL pointer passed to trusted arg0") 166 int BPF_PROG(task_kfunc_release_untrusted, struct task_struct *task, u64 clone_flags) 167 { 168 struct __tasks_kfunc_map_value *v; 169 170 v = insert_lookup_task(task); 171 if (!v) 172 return 0; 173 174 /* Can't invoke bpf_task_release() on an untrusted pointer. */ 175 bpf_task_release(v->task); 176 177 return 0; 178 } 179 180 SEC("tp_btf/task_newtask") 181 __failure __msg("arg#0 pointer type STRUCT task_struct must point") 182 int BPF_PROG(task_kfunc_release_fp, struct task_struct *task, u64 clone_flags) 183 { 184 struct task_struct *acquired = (struct task_struct *)&clone_flags; 185 186 /* Cannot release random frame pointer. */ 187 bpf_task_release(acquired); 188 189 return 0; 190 } 191 192 SEC("tp_btf/task_newtask") 193 __failure __msg("Possibly NULL pointer passed to trusted arg0") 194 int BPF_PROG(task_kfunc_release_null, struct task_struct *task, u64 clone_flags) 195 { 196 struct __tasks_kfunc_map_value local, *v; 197 long status; 198 struct task_struct *acquired, *old; 199 s32 pid; 200 201 status = bpf_probe_read_kernel(&pid, sizeof(pid), &task->pid); 202 if (status) 203 return 0; 204 205 local.task = NULL; 206 status = bpf_map_update_elem(&__tasks_kfunc_map, &pid, &local, BPF_NOEXIST); 207 if (status) 208 return status; 209 210 v = bpf_map_lookup_elem(&__tasks_kfunc_map, &pid); 211 if (!v) 212 return -ENOENT; 213 214 acquired = bpf_task_acquire(task); 215 if (!acquired) 216 return -EEXIST; 217 218 old = bpf_kptr_xchg(&v->task, acquired); 219 220 /* old cannot be passed to bpf_task_release() without a NULL check. */ 221 bpf_task_release(old); 222 223 return 0; 224 } 225 226 SEC("tp_btf/task_newtask") 227 __failure __msg("release kernel function bpf_task_release expects") 228 int BPF_PROG(task_kfunc_release_unacquired, struct task_struct *task, u64 clone_flags) 229 { 230 /* Cannot release trusted task pointer which was not acquired. */ 231 bpf_task_release(task); 232 233 return 0; 234 } 235 236 SEC("tp_btf/task_newtask") 237 __failure __msg("Possibly NULL pointer passed to trusted arg0") 238 int BPF_PROG(task_kfunc_from_pid_no_null_check, struct task_struct *task, u64 clone_flags) 239 { 240 struct task_struct *acquired; 241 242 acquired = bpf_task_from_pid(task->pid); 243 244 /* Releasing bpf_task_from_pid() lookup without a NULL check. */ 245 bpf_task_release(acquired); 246 247 return 0; 248 } 249 250 SEC("tp_btf/task_newtask") 251 __failure __msg("Possibly NULL pointer passed to trusted arg0") 252 int BPF_PROG(task_kfunc_from_vpid_no_null_check, struct task_struct *task, u64 clone_flags) 253 { 254 struct task_struct *acquired; 255 256 acquired = bpf_task_from_vpid(task->pid); 257 258 /* Releasing bpf_task_from_vpid() lookup without a NULL check. */ 259 bpf_task_release(acquired); 260 261 return 0; 262 } 263 264 SEC("lsm/task_free") 265 __failure __msg("R1 must be a rcu pointer") 266 int BPF_PROG(task_kfunc_from_lsm_task_free, struct task_struct *task) 267 { 268 struct task_struct *acquired; 269 270 /* the argument of lsm task_free hook is untrusted. */ 271 acquired = bpf_task_acquire(task); 272 if (!acquired) 273 return 0; 274 275 bpf_task_release(acquired); 276 return 0; 277 } 278 279 SEC("tp_btf/task_newtask") 280 __failure __msg("access beyond the end of member comm") 281 int BPF_PROG(task_access_comm1, struct task_struct *task, u64 clone_flags) 282 { 283 bpf_strncmp(task->comm, 17, "foo"); 284 return 0; 285 } 286 287 SEC("tp_btf/task_newtask") 288 __failure __msg("access beyond the end of member comm") 289 int BPF_PROG(task_access_comm2, struct task_struct *task, u64 clone_flags) 290 { 291 bpf_strncmp(task->comm + 1, 16, "foo"); 292 return 0; 293 } 294 295 SEC("tp_btf/task_newtask") 296 __failure __msg("write into memory") 297 int BPF_PROG(task_access_comm3, struct task_struct *task, u64 clone_flags) 298 { 299 bpf_probe_read_kernel(task->comm, 16, task->comm); 300 return 0; 301 } 302 303 SEC("fentry/__set_task_comm") 304 __failure __msg("R1 type=ptr_ expected") 305 int BPF_PROG(task_access_comm4, struct task_struct *task, const char *buf, bool exec) 306 { 307 /* 308 * task->comm is a legacy ptr_to_btf_id. The verifier cannot guarantee 309 * its safety. Hence it cannot be accessed with normal load insns. 310 */ 311 bpf_strncmp(task->comm, 16, "foo"); 312 return 0; 313 } 314 315 SEC("tp_btf/task_newtask") 316 __failure __msg("R1 must be referenced or trusted") 317 int BPF_PROG(task_kfunc_release_in_map, struct task_struct *task, u64 clone_flags) 318 { 319 struct task_struct *local; 320 struct __tasks_kfunc_map_value *v; 321 322 if (tasks_kfunc_map_insert(task)) 323 return 0; 324 325 v = tasks_kfunc_map_value_lookup(task); 326 if (!v) 327 return 0; 328 329 bpf_rcu_read_lock(); 330 local = v->task; 331 if (!local) { 332 bpf_rcu_read_unlock(); 333 return 0; 334 } 335 /* Can't release a kptr that's still stored in a map. */ 336 bpf_task_release(local); 337 bpf_rcu_read_unlock(); 338 339 return 0; 340 } 341