1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2020 Facebook 4 * Copyright 2020 Google LLC. 5 */ 6 7 #include <linux/pid.h> 8 #include <linux/sched.h> 9 #include <linux/rculist.h> 10 #include <linux/list.h> 11 #include <linux/hash.h> 12 #include <linux/types.h> 13 #include <linux/spinlock.h> 14 #include <linux/bpf.h> 15 #include <linux/bpf_local_storage.h> 16 #include <linux/filter.h> 17 #include <uapi/linux/btf.h> 18 #include <linux/bpf_lsm.h> 19 #include <linux/btf_ids.h> 20 #include <linux/fdtable.h> 21 22 DEFINE_BPF_STORAGE_CACHE(task_cache); 23 24 static struct bpf_local_storage __rcu **task_storage_ptr(void *owner) 25 { 26 struct task_struct *task = owner; 27 struct bpf_storage_blob *bsb; 28 29 bsb = bpf_task(task); 30 if (!bsb) 31 return NULL; 32 return &bsb->storage; 33 } 34 35 static struct bpf_local_storage_data * 36 task_storage_lookup(struct task_struct *task, struct bpf_map *map, 37 bool cacheit_lockit) 38 { 39 struct bpf_local_storage *task_storage; 40 struct bpf_local_storage_map *smap; 41 struct bpf_storage_blob *bsb; 42 43 bsb = bpf_task(task); 44 if (!bsb) 45 return NULL; 46 47 task_storage = rcu_dereference(bsb->storage); 48 if (!task_storage) 49 return NULL; 50 51 smap = (struct bpf_local_storage_map *)map; 52 return bpf_local_storage_lookup(task_storage, smap, cacheit_lockit); 53 } 54 55 void bpf_task_storage_free(struct task_struct *task) 56 { 57 struct bpf_local_storage_elem *selem; 58 struct bpf_local_storage *local_storage; 59 bool free_task_storage = false; 60 struct bpf_storage_blob *bsb; 61 struct hlist_node *n; 62 63 bsb = bpf_task(task); 64 if (!bsb) 65 return; 66 67 rcu_read_lock(); 68 69 local_storage = rcu_dereference(bsb->storage); 70 if (!local_storage) { 71 rcu_read_unlock(); 72 return; 73 } 74 75 /* Neither the bpf_prog nor the bpf-map's syscall 76 * could be modifying the local_storage->list now. 77 * Thus, no elem can be added-to or deleted-from the 78 * local_storage->list by the bpf_prog or by the bpf-map's syscall. 79 * 80 * It is racing with bpf_local_storage_map_free() alone 81 * when unlinking elem from the local_storage->list and 82 * the map's bucket->list. 83 */ 84 raw_spin_lock_bh(&local_storage->lock); 85 hlist_for_each_entry_safe(selem, n, &local_storage->list, snode) { 86 /* Always unlink from map before unlinking from 87 * local_storage. 88 */ 89 bpf_selem_unlink_map(selem); 90 free_task_storage = bpf_selem_unlink_storage_nolock( 91 local_storage, selem, false); 92 } 93 raw_spin_unlock_bh(&local_storage->lock); 94 rcu_read_unlock(); 95 96 /* free_task_storage should always be true as long as 97 * local_storage->list was non-empty. 98 */ 99 if (free_task_storage) 100 kfree_rcu(local_storage, rcu); 101 } 102 103 static void *bpf_pid_task_storage_lookup_elem(struct bpf_map *map, void *key) 104 { 105 struct bpf_local_storage_data *sdata; 106 struct task_struct *task; 107 unsigned int f_flags; 108 struct pid *pid; 109 int fd, err; 110 111 fd = *(int *)key; 112 pid = pidfd_get_pid(fd, &f_flags); 113 if (IS_ERR(pid)) 114 return ERR_CAST(pid); 115 116 /* We should be in an RCU read side critical section, it should be safe 117 * to call pid_task. 118 */ 119 WARN_ON_ONCE(!rcu_read_lock_held()); 120 task = pid_task(pid, PIDTYPE_PID); 121 if (!task) { 122 err = -ENOENT; 123 goto out; 124 } 125 126 sdata = task_storage_lookup(task, map, true); 127 put_pid(pid); 128 return sdata ? sdata->data : NULL; 129 out: 130 put_pid(pid); 131 return ERR_PTR(err); 132 } 133 134 static int bpf_pid_task_storage_update_elem(struct bpf_map *map, void *key, 135 void *value, u64 map_flags) 136 { 137 struct bpf_local_storage_data *sdata; 138 struct task_struct *task; 139 unsigned int f_flags; 140 struct pid *pid; 141 int fd, err; 142 143 fd = *(int *)key; 144 pid = pidfd_get_pid(fd, &f_flags); 145 if (IS_ERR(pid)) 146 return PTR_ERR(pid); 147 148 /* We should be in an RCU read side critical section, it should be safe 149 * to call pid_task. 150 */ 151 WARN_ON_ONCE(!rcu_read_lock_held()); 152 task = pid_task(pid, PIDTYPE_PID); 153 if (!task || !task_storage_ptr(task)) { 154 err = -ENOENT; 155 goto out; 156 } 157 158 sdata = bpf_local_storage_update( 159 task, (struct bpf_local_storage_map *)map, value, map_flags); 160 161 err = PTR_ERR_OR_ZERO(sdata); 162 out: 163 put_pid(pid); 164 return err; 165 } 166 167 static int task_storage_delete(struct task_struct *task, struct bpf_map *map) 168 { 169 struct bpf_local_storage_data *sdata; 170 171 sdata = task_storage_lookup(task, map, false); 172 if (!sdata) 173 return -ENOENT; 174 175 bpf_selem_unlink(SELEM(sdata)); 176 177 return 0; 178 } 179 180 static int bpf_pid_task_storage_delete_elem(struct bpf_map *map, void *key) 181 { 182 struct task_struct *task; 183 unsigned int f_flags; 184 struct pid *pid; 185 int fd, err; 186 187 fd = *(int *)key; 188 pid = pidfd_get_pid(fd, &f_flags); 189 if (IS_ERR(pid)) 190 return PTR_ERR(pid); 191 192 /* We should be in an RCU read side critical section, it should be safe 193 * to call pid_task. 194 */ 195 WARN_ON_ONCE(!rcu_read_lock_held()); 196 task = pid_task(pid, PIDTYPE_PID); 197 if (!task) { 198 err = -ENOENT; 199 goto out; 200 } 201 202 err = task_storage_delete(task, map); 203 out: 204 put_pid(pid); 205 return err; 206 } 207 208 BPF_CALL_4(bpf_task_storage_get, struct bpf_map *, map, struct task_struct *, 209 task, void *, value, u64, flags) 210 { 211 struct bpf_local_storage_data *sdata; 212 213 if (flags & ~(BPF_LOCAL_STORAGE_GET_F_CREATE)) 214 return (unsigned long)NULL; 215 216 /* explicitly check that the task_storage_ptr is not 217 * NULL as task_storage_lookup returns NULL in this case and 218 * bpf_local_storage_update expects the owner to have a 219 * valid storage pointer. 220 */ 221 if (!task || !task_storage_ptr(task)) 222 return (unsigned long)NULL; 223 224 sdata = task_storage_lookup(task, map, true); 225 if (sdata) 226 return (unsigned long)sdata->data; 227 228 /* This helper must only be called from places where the lifetime of the task 229 * is guaranteed. Either by being refcounted or by being protected 230 * by an RCU read-side critical section. 231 */ 232 if (flags & BPF_LOCAL_STORAGE_GET_F_CREATE) { 233 sdata = bpf_local_storage_update( 234 task, (struct bpf_local_storage_map *)map, value, 235 BPF_NOEXIST); 236 return IS_ERR(sdata) ? (unsigned long)NULL : 237 (unsigned long)sdata->data; 238 } 239 240 return (unsigned long)NULL; 241 } 242 243 BPF_CALL_2(bpf_task_storage_delete, struct bpf_map *, map, struct task_struct *, 244 task) 245 { 246 if (!task) 247 return -EINVAL; 248 249 /* This helper must only be called from places where the lifetime of the task 250 * is guaranteed. Either by being refcounted or by being protected 251 * by an RCU read-side critical section. 252 */ 253 return task_storage_delete(task, map); 254 } 255 256 static int notsupp_get_next_key(struct bpf_map *map, void *key, void *next_key) 257 { 258 return -ENOTSUPP; 259 } 260 261 static struct bpf_map *task_storage_map_alloc(union bpf_attr *attr) 262 { 263 struct bpf_local_storage_map *smap; 264 265 smap = bpf_local_storage_map_alloc(attr); 266 if (IS_ERR(smap)) 267 return ERR_CAST(smap); 268 269 smap->cache_idx = bpf_local_storage_cache_idx_get(&task_cache); 270 return &smap->map; 271 } 272 273 static void task_storage_map_free(struct bpf_map *map) 274 { 275 struct bpf_local_storage_map *smap; 276 277 smap = (struct bpf_local_storage_map *)map; 278 bpf_local_storage_cache_idx_free(&task_cache, smap->cache_idx); 279 bpf_local_storage_map_free(smap); 280 } 281 282 static int task_storage_map_btf_id; 283 const struct bpf_map_ops task_storage_map_ops = { 284 .map_meta_equal = bpf_map_meta_equal, 285 .map_alloc_check = bpf_local_storage_map_alloc_check, 286 .map_alloc = task_storage_map_alloc, 287 .map_free = task_storage_map_free, 288 .map_get_next_key = notsupp_get_next_key, 289 .map_lookup_elem = bpf_pid_task_storage_lookup_elem, 290 .map_update_elem = bpf_pid_task_storage_update_elem, 291 .map_delete_elem = bpf_pid_task_storage_delete_elem, 292 .map_check_btf = bpf_local_storage_map_check_btf, 293 .map_btf_name = "bpf_local_storage_map", 294 .map_btf_id = &task_storage_map_btf_id, 295 .map_owner_storage_ptr = task_storage_ptr, 296 }; 297 298 BTF_ID_LIST_SINGLE(bpf_task_storage_btf_ids, struct, task_struct) 299 300 const struct bpf_func_proto bpf_task_storage_get_proto = { 301 .func = bpf_task_storage_get, 302 .gpl_only = false, 303 .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, 304 .arg1_type = ARG_CONST_MAP_PTR, 305 .arg2_type = ARG_PTR_TO_BTF_ID, 306 .arg2_btf_id = &bpf_task_storage_btf_ids[0], 307 .arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL, 308 .arg4_type = ARG_ANYTHING, 309 }; 310 311 const struct bpf_func_proto bpf_task_storage_delete_proto = { 312 .func = bpf_task_storage_delete, 313 .gpl_only = false, 314 .ret_type = RET_INTEGER, 315 .arg1_type = ARG_CONST_MAP_PTR, 316 .arg2_type = ARG_PTR_TO_BTF_ID, 317 .arg2_btf_id = &bpf_task_storage_btf_ids[0], 318 }; 319