1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2022 Meta Platforms, Inc. and affiliates. 4 */ 5 6 #include <linux/types.h> 7 #include <linux/bpf.h> 8 #include <linux/bpf_local_storage.h> 9 #include <uapi/linux/btf.h> 10 #include <linux/btf_ids.h> 11 12 DEFINE_BPF_STORAGE_CACHE(cgroup_cache); 13 14 static DEFINE_PER_CPU(int, bpf_cgrp_storage_busy); 15 16 static void bpf_cgrp_storage_lock(void) 17 { 18 migrate_disable(); 19 this_cpu_inc(bpf_cgrp_storage_busy); 20 } 21 22 static void bpf_cgrp_storage_unlock(void) 23 { 24 this_cpu_dec(bpf_cgrp_storage_busy); 25 migrate_enable(); 26 } 27 28 static bool bpf_cgrp_storage_trylock(void) 29 { 30 migrate_disable(); 31 if (unlikely(this_cpu_inc_return(bpf_cgrp_storage_busy) != 1)) { 32 this_cpu_dec(bpf_cgrp_storage_busy); 33 migrate_enable(); 34 return false; 35 } 36 return true; 37 } 38 39 static struct bpf_local_storage __rcu **cgroup_storage_ptr(void *owner) 40 { 41 struct cgroup *cg = owner; 42 43 return &cg->bpf_cgrp_storage; 44 } 45 46 void bpf_cgrp_storage_free(struct cgroup *cgroup) 47 { 48 struct bpf_local_storage *local_storage; 49 bool free_cgroup_storage = false; 50 unsigned long flags; 51 52 rcu_read_lock(); 53 local_storage = rcu_dereference(cgroup->bpf_cgrp_storage); 54 if (!local_storage) { 55 rcu_read_unlock(); 56 return; 57 } 58 59 bpf_cgrp_storage_lock(); 60 raw_spin_lock_irqsave(&local_storage->lock, flags); 61 free_cgroup_storage = bpf_local_storage_unlink_nolock(local_storage); 62 raw_spin_unlock_irqrestore(&local_storage->lock, flags); 63 bpf_cgrp_storage_unlock(); 64 rcu_read_unlock(); 65 66 if (free_cgroup_storage) 67 kfree_rcu(local_storage, rcu); 68 } 69 70 static struct bpf_local_storage_data * 71 cgroup_storage_lookup(struct cgroup *cgroup, struct bpf_map *map, bool cacheit_lockit) 72 { 73 struct bpf_local_storage *cgroup_storage; 74 struct bpf_local_storage_map *smap; 75 76 cgroup_storage = rcu_dereference_check(cgroup->bpf_cgrp_storage, 77 bpf_rcu_lock_held()); 78 if (!cgroup_storage) 79 return NULL; 80 81 smap = (struct bpf_local_storage_map *)map; 82 return bpf_local_storage_lookup(cgroup_storage, smap, cacheit_lockit); 83 } 84 85 static void *bpf_cgrp_storage_lookup_elem(struct bpf_map *map, void *key) 86 { 87 struct bpf_local_storage_data *sdata; 88 struct cgroup *cgroup; 89 int fd; 90 91 fd = *(int *)key; 92 cgroup = cgroup_get_from_fd(fd); 93 if (IS_ERR(cgroup)) 94 return ERR_CAST(cgroup); 95 96 bpf_cgrp_storage_lock(); 97 sdata = cgroup_storage_lookup(cgroup, map, true); 98 bpf_cgrp_storage_unlock(); 99 cgroup_put(cgroup); 100 return sdata ? sdata->data : NULL; 101 } 102 103 static int bpf_cgrp_storage_update_elem(struct bpf_map *map, void *key, 104 void *value, u64 map_flags) 105 { 106 struct bpf_local_storage_data *sdata; 107 struct cgroup *cgroup; 108 int fd; 109 110 fd = *(int *)key; 111 cgroup = cgroup_get_from_fd(fd); 112 if (IS_ERR(cgroup)) 113 return PTR_ERR(cgroup); 114 115 bpf_cgrp_storage_lock(); 116 sdata = bpf_local_storage_update(cgroup, (struct bpf_local_storage_map *)map, 117 value, map_flags, GFP_ATOMIC); 118 bpf_cgrp_storage_unlock(); 119 cgroup_put(cgroup); 120 return PTR_ERR_OR_ZERO(sdata); 121 } 122 123 static int cgroup_storage_delete(struct cgroup *cgroup, struct bpf_map *map) 124 { 125 struct bpf_local_storage_data *sdata; 126 127 sdata = cgroup_storage_lookup(cgroup, map, false); 128 if (!sdata) 129 return -ENOENT; 130 131 bpf_selem_unlink(SELEM(sdata), true); 132 return 0; 133 } 134 135 static int bpf_cgrp_storage_delete_elem(struct bpf_map *map, void *key) 136 { 137 struct cgroup *cgroup; 138 int err, fd; 139 140 fd = *(int *)key; 141 cgroup = cgroup_get_from_fd(fd); 142 if (IS_ERR(cgroup)) 143 return PTR_ERR(cgroup); 144 145 bpf_cgrp_storage_lock(); 146 err = cgroup_storage_delete(cgroup, map); 147 bpf_cgrp_storage_unlock(); 148 cgroup_put(cgroup); 149 return err; 150 } 151 152 static int notsupp_get_next_key(struct bpf_map *map, void *key, void *next_key) 153 { 154 return -ENOTSUPP; 155 } 156 157 static struct bpf_map *cgroup_storage_map_alloc(union bpf_attr *attr) 158 { 159 return bpf_local_storage_map_alloc(attr, &cgroup_cache); 160 } 161 162 static void cgroup_storage_map_free(struct bpf_map *map) 163 { 164 bpf_local_storage_map_free(map, &cgroup_cache, NULL); 165 } 166 167 /* *gfp_flags* is a hidden argument provided by the verifier */ 168 BPF_CALL_5(bpf_cgrp_storage_get, struct bpf_map *, map, struct cgroup *, cgroup, 169 void *, value, u64, flags, gfp_t, gfp_flags) 170 { 171 struct bpf_local_storage_data *sdata; 172 173 WARN_ON_ONCE(!bpf_rcu_lock_held()); 174 if (flags & ~(BPF_LOCAL_STORAGE_GET_F_CREATE)) 175 return (unsigned long)NULL; 176 177 if (!cgroup) 178 return (unsigned long)NULL; 179 180 if (!bpf_cgrp_storage_trylock()) 181 return (unsigned long)NULL; 182 183 sdata = cgroup_storage_lookup(cgroup, map, true); 184 if (sdata) 185 goto unlock; 186 187 /* only allocate new storage, when the cgroup is refcounted */ 188 if (!percpu_ref_is_dying(&cgroup->self.refcnt) && 189 (flags & BPF_LOCAL_STORAGE_GET_F_CREATE)) 190 sdata = bpf_local_storage_update(cgroup, (struct bpf_local_storage_map *)map, 191 value, BPF_NOEXIST, gfp_flags); 192 193 unlock: 194 bpf_cgrp_storage_unlock(); 195 return IS_ERR_OR_NULL(sdata) ? (unsigned long)NULL : (unsigned long)sdata->data; 196 } 197 198 BPF_CALL_2(bpf_cgrp_storage_delete, struct bpf_map *, map, struct cgroup *, cgroup) 199 { 200 int ret; 201 202 WARN_ON_ONCE(!bpf_rcu_lock_held()); 203 if (!cgroup) 204 return -EINVAL; 205 206 if (!bpf_cgrp_storage_trylock()) 207 return -EBUSY; 208 209 ret = cgroup_storage_delete(cgroup, map); 210 bpf_cgrp_storage_unlock(); 211 return ret; 212 } 213 214 const struct bpf_map_ops cgrp_storage_map_ops = { 215 .map_meta_equal = bpf_map_meta_equal, 216 .map_alloc_check = bpf_local_storage_map_alloc_check, 217 .map_alloc = cgroup_storage_map_alloc, 218 .map_free = cgroup_storage_map_free, 219 .map_get_next_key = notsupp_get_next_key, 220 .map_lookup_elem = bpf_cgrp_storage_lookup_elem, 221 .map_update_elem = bpf_cgrp_storage_update_elem, 222 .map_delete_elem = bpf_cgrp_storage_delete_elem, 223 .map_check_btf = bpf_local_storage_map_check_btf, 224 .map_btf_id = &bpf_local_storage_map_btf_id[0], 225 .map_owner_storage_ptr = cgroup_storage_ptr, 226 }; 227 228 const struct bpf_func_proto bpf_cgrp_storage_get_proto = { 229 .func = bpf_cgrp_storage_get, 230 .gpl_only = false, 231 .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, 232 .arg1_type = ARG_CONST_MAP_PTR, 233 .arg2_type = ARG_PTR_TO_BTF_ID, 234 .arg2_btf_id = &bpf_cgroup_btf_id[0], 235 .arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL, 236 .arg4_type = ARG_ANYTHING, 237 }; 238 239 const struct bpf_func_proto bpf_cgrp_storage_delete_proto = { 240 .func = bpf_cgrp_storage_delete, 241 .gpl_only = false, 242 .ret_type = RET_INTEGER, 243 .arg1_type = ARG_CONST_MAP_PTR, 244 .arg2_type = ARG_PTR_TO_BTF_ID, 245 .arg2_btf_id = &bpf_cgroup_btf_id[0], 246 }; 247