1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2022 Meta Platforms, Inc. and affiliates.
4 */
5
6 #include <linux/types.h>
7 #include <linux/bpf.h>
8 #include <linux/bpf_local_storage.h>
9 #include <uapi/linux/btf.h>
10 #include <linux/btf_ids.h>
11
12 DEFINE_BPF_STORAGE_CACHE(cgroup_cache);
13
14 static DEFINE_PER_CPU(int, bpf_cgrp_storage_busy);
15
bpf_cgrp_storage_lock(void)16 static void bpf_cgrp_storage_lock(void)
17 {
18 cant_migrate();
19 this_cpu_inc(bpf_cgrp_storage_busy);
20 }
21
bpf_cgrp_storage_unlock(void)22 static void bpf_cgrp_storage_unlock(void)
23 {
24 this_cpu_dec(bpf_cgrp_storage_busy);
25 }
26
bpf_cgrp_storage_trylock(void)27 static bool bpf_cgrp_storage_trylock(void)
28 {
29 cant_migrate();
30 if (unlikely(this_cpu_inc_return(bpf_cgrp_storage_busy) != 1)) {
31 this_cpu_dec(bpf_cgrp_storage_busy);
32 return false;
33 }
34 return true;
35 }
36
cgroup_storage_ptr(void * owner)37 static struct bpf_local_storage __rcu **cgroup_storage_ptr(void *owner)
38 {
39 struct cgroup *cg = owner;
40
41 return &cg->bpf_cgrp_storage;
42 }
43
bpf_cgrp_storage_free(struct cgroup * cgroup)44 void bpf_cgrp_storage_free(struct cgroup *cgroup)
45 {
46 struct bpf_local_storage *local_storage;
47
48 migrate_disable();
49 rcu_read_lock();
50 local_storage = rcu_dereference(cgroup->bpf_cgrp_storage);
51 if (!local_storage)
52 goto out;
53
54 bpf_cgrp_storage_lock();
55 bpf_local_storage_destroy(local_storage);
56 bpf_cgrp_storage_unlock();
57 out:
58 rcu_read_unlock();
59 migrate_enable();
60 }
61
62 static struct bpf_local_storage_data *
cgroup_storage_lookup(struct cgroup * cgroup,struct bpf_map * map,bool cacheit_lockit)63 cgroup_storage_lookup(struct cgroup *cgroup, struct bpf_map *map, bool cacheit_lockit)
64 {
65 struct bpf_local_storage *cgroup_storage;
66 struct bpf_local_storage_map *smap;
67
68 cgroup_storage = rcu_dereference_check(cgroup->bpf_cgrp_storage,
69 bpf_rcu_lock_held());
70 if (!cgroup_storage)
71 return NULL;
72
73 smap = (struct bpf_local_storage_map *)map;
74 return bpf_local_storage_lookup(cgroup_storage, smap, cacheit_lockit);
75 }
76
bpf_cgrp_storage_lookup_elem(struct bpf_map * map,void * key)77 static void *bpf_cgrp_storage_lookup_elem(struct bpf_map *map, void *key)
78 {
79 struct bpf_local_storage_data *sdata;
80 struct cgroup *cgroup;
81 int fd;
82
83 fd = *(int *)key;
84 cgroup = cgroup_v1v2_get_from_fd(fd);
85 if (IS_ERR(cgroup))
86 return ERR_CAST(cgroup);
87
88 bpf_cgrp_storage_lock();
89 sdata = cgroup_storage_lookup(cgroup, map, true);
90 bpf_cgrp_storage_unlock();
91 cgroup_put(cgroup);
92 return sdata ? sdata->data : NULL;
93 }
94
bpf_cgrp_storage_update_elem(struct bpf_map * map,void * key,void * value,u64 map_flags)95 static long bpf_cgrp_storage_update_elem(struct bpf_map *map, void *key,
96 void *value, u64 map_flags)
97 {
98 struct bpf_local_storage_data *sdata;
99 struct cgroup *cgroup;
100 int fd;
101
102 fd = *(int *)key;
103 cgroup = cgroup_v1v2_get_from_fd(fd);
104 if (IS_ERR(cgroup))
105 return PTR_ERR(cgroup);
106
107 bpf_cgrp_storage_lock();
108 sdata = bpf_local_storage_update(cgroup, (struct bpf_local_storage_map *)map,
109 value, map_flags, false, GFP_ATOMIC);
110 bpf_cgrp_storage_unlock();
111 cgroup_put(cgroup);
112 return PTR_ERR_OR_ZERO(sdata);
113 }
114
cgroup_storage_delete(struct cgroup * cgroup,struct bpf_map * map)115 static int cgroup_storage_delete(struct cgroup *cgroup, struct bpf_map *map)
116 {
117 struct bpf_local_storage_data *sdata;
118
119 sdata = cgroup_storage_lookup(cgroup, map, false);
120 if (!sdata)
121 return -ENOENT;
122
123 bpf_selem_unlink(SELEM(sdata), false);
124 return 0;
125 }
126
bpf_cgrp_storage_delete_elem(struct bpf_map * map,void * key)127 static long bpf_cgrp_storage_delete_elem(struct bpf_map *map, void *key)
128 {
129 struct cgroup *cgroup;
130 int err, fd;
131
132 fd = *(int *)key;
133 cgroup = cgroup_v1v2_get_from_fd(fd);
134 if (IS_ERR(cgroup))
135 return PTR_ERR(cgroup);
136
137 bpf_cgrp_storage_lock();
138 err = cgroup_storage_delete(cgroup, map);
139 bpf_cgrp_storage_unlock();
140 cgroup_put(cgroup);
141 return err;
142 }
143
notsupp_get_next_key(struct bpf_map * map,void * key,void * next_key)144 static int notsupp_get_next_key(struct bpf_map *map, void *key, void *next_key)
145 {
146 return -ENOTSUPP;
147 }
148
cgroup_storage_map_alloc(union bpf_attr * attr)149 static struct bpf_map *cgroup_storage_map_alloc(union bpf_attr *attr)
150 {
151 return bpf_local_storage_map_alloc(attr, &cgroup_cache, true);
152 }
153
cgroup_storage_map_free(struct bpf_map * map)154 static void cgroup_storage_map_free(struct bpf_map *map)
155 {
156 bpf_local_storage_map_free(map, &cgroup_cache, &bpf_cgrp_storage_busy);
157 }
158
159 /* *gfp_flags* is a hidden argument provided by the verifier */
BPF_CALL_5(bpf_cgrp_storage_get,struct bpf_map *,map,struct cgroup *,cgroup,void *,value,u64,flags,gfp_t,gfp_flags)160 BPF_CALL_5(bpf_cgrp_storage_get, struct bpf_map *, map, struct cgroup *, cgroup,
161 void *, value, u64, flags, gfp_t, gfp_flags)
162 {
163 struct bpf_local_storage_data *sdata;
164
165 WARN_ON_ONCE(!bpf_rcu_lock_held());
166 if (flags & ~(BPF_LOCAL_STORAGE_GET_F_CREATE))
167 return (unsigned long)NULL;
168
169 if (!cgroup)
170 return (unsigned long)NULL;
171
172 if (!bpf_cgrp_storage_trylock())
173 return (unsigned long)NULL;
174
175 sdata = cgroup_storage_lookup(cgroup, map, true);
176 if (sdata)
177 goto unlock;
178
179 /* only allocate new storage, when the cgroup is refcounted */
180 if (!percpu_ref_is_dying(&cgroup->self.refcnt) &&
181 (flags & BPF_LOCAL_STORAGE_GET_F_CREATE))
182 sdata = bpf_local_storage_update(cgroup, (struct bpf_local_storage_map *)map,
183 value, BPF_NOEXIST, false, gfp_flags);
184
185 unlock:
186 bpf_cgrp_storage_unlock();
187 return IS_ERR_OR_NULL(sdata) ? (unsigned long)NULL : (unsigned long)sdata->data;
188 }
189
BPF_CALL_2(bpf_cgrp_storage_delete,struct bpf_map *,map,struct cgroup *,cgroup)190 BPF_CALL_2(bpf_cgrp_storage_delete, struct bpf_map *, map, struct cgroup *, cgroup)
191 {
192 int ret;
193
194 WARN_ON_ONCE(!bpf_rcu_lock_held());
195 if (!cgroup)
196 return -EINVAL;
197
198 if (!bpf_cgrp_storage_trylock())
199 return -EBUSY;
200
201 ret = cgroup_storage_delete(cgroup, map);
202 bpf_cgrp_storage_unlock();
203 return ret;
204 }
205
206 const struct bpf_map_ops cgrp_storage_map_ops = {
207 .map_meta_equal = bpf_map_meta_equal,
208 .map_alloc_check = bpf_local_storage_map_alloc_check,
209 .map_alloc = cgroup_storage_map_alloc,
210 .map_free = cgroup_storage_map_free,
211 .map_get_next_key = notsupp_get_next_key,
212 .map_lookup_elem = bpf_cgrp_storage_lookup_elem,
213 .map_update_elem = bpf_cgrp_storage_update_elem,
214 .map_delete_elem = bpf_cgrp_storage_delete_elem,
215 .map_check_btf = bpf_local_storage_map_check_btf,
216 .map_mem_usage = bpf_local_storage_map_mem_usage,
217 .map_btf_id = &bpf_local_storage_map_btf_id[0],
218 .map_owner_storage_ptr = cgroup_storage_ptr,
219 };
220
221 const struct bpf_func_proto bpf_cgrp_storage_get_proto = {
222 .func = bpf_cgrp_storage_get,
223 .gpl_only = false,
224 .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
225 .arg1_type = ARG_CONST_MAP_PTR,
226 .arg2_type = ARG_PTR_TO_BTF_ID_OR_NULL,
227 .arg2_btf_id = &bpf_cgroup_btf_id[0],
228 .arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL,
229 .arg4_type = ARG_ANYTHING,
230 };
231
232 const struct bpf_func_proto bpf_cgrp_storage_delete_proto = {
233 .func = bpf_cgrp_storage_delete,
234 .gpl_only = false,
235 .ret_type = RET_INTEGER,
236 .arg1_type = ARG_CONST_MAP_PTR,
237 .arg2_type = ARG_PTR_TO_BTF_ID_OR_NULL,
238 .arg2_btf_id = &bpf_cgroup_btf_id[0],
239 };
240