1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 Facebook */
3 #include <linux/rculist.h>
4 #include <linux/list.h>
5 #include <linux/hash.h>
6 #include <linux/types.h>
7 #include <linux/spinlock.h>
8 #include <linux/bpf.h>
9 #include <linux/btf.h>
10 #include <linux/btf_ids.h>
11 #include <linux/bpf_local_storage.h>
12 #include <net/bpf_sk_storage.h>
13 #include <net/sock.h>
14 #include <uapi/linux/sock_diag.h>
15 #include <uapi/linux/btf.h>
16 #include <linux/rcupdate_trace.h>
17
18 DEFINE_BPF_STORAGE_CACHE(sk_cache);
19
20 static struct bpf_local_storage_data *
bpf_sk_storage_lookup(struct sock * sk,struct bpf_map * map,bool cacheit_lockit)21 bpf_sk_storage_lookup(struct sock *sk, struct bpf_map *map, bool cacheit_lockit)
22 {
23 struct bpf_local_storage *sk_storage;
24 struct bpf_local_storage_map *smap;
25
26 sk_storage =
27 rcu_dereference_check(sk->sk_bpf_storage, bpf_rcu_lock_held());
28 if (!sk_storage)
29 return NULL;
30
31 smap = (struct bpf_local_storage_map *)map;
32 return bpf_local_storage_lookup(sk_storage, smap, cacheit_lockit);
33 }
34
bpf_sk_storage_del(struct sock * sk,struct bpf_map * map)35 static int bpf_sk_storage_del(struct sock *sk, struct bpf_map *map)
36 {
37 struct bpf_local_storage_data *sdata;
38
39 sdata = bpf_sk_storage_lookup(sk, map, false);
40 if (!sdata)
41 return -ENOENT;
42
43 return bpf_selem_unlink(SELEM(sdata));
44 }
45
46 /* Called by __sk_destruct() & bpf_sk_storage_clone() */
bpf_sk_storage_free(struct sock * sk)47 void bpf_sk_storage_free(struct sock *sk)
48 {
49 struct bpf_local_storage *sk_storage;
50 u32 uncharge;
51
52 rcu_read_lock_dont_migrate();
53 sk_storage = rcu_dereference(sk->sk_bpf_storage);
54 if (!sk_storage)
55 goto out;
56
57 uncharge = bpf_local_storage_destroy(sk_storage);
58 if (uncharge)
59 atomic_sub(uncharge, &sk->sk_omem_alloc);
60 out:
61 rcu_read_unlock_migrate();
62 }
63
bpf_sk_storage_map_free(struct bpf_map * map)64 static void bpf_sk_storage_map_free(struct bpf_map *map)
65 {
66 bpf_local_storage_map_free(map, &sk_cache);
67 }
68
bpf_sk_storage_map_alloc(union bpf_attr * attr)69 static struct bpf_map *bpf_sk_storage_map_alloc(union bpf_attr *attr)
70 {
71 return bpf_local_storage_map_alloc(attr, &sk_cache, false);
72 }
73
notsupp_get_next_key(struct bpf_map * map,void * key,void * next_key)74 static int notsupp_get_next_key(struct bpf_map *map, void *key,
75 void *next_key)
76 {
77 return -ENOTSUPP;
78 }
79
bpf_fd_sk_storage_lookup_elem(struct bpf_map * map,void * key)80 static void *bpf_fd_sk_storage_lookup_elem(struct bpf_map *map, void *key)
81 {
82 struct bpf_local_storage_data *sdata;
83 struct socket *sock;
84 int fd, err;
85
86 fd = *(int *)key;
87 sock = sockfd_lookup(fd, &err);
88 if (sock) {
89 sdata = bpf_sk_storage_lookup(sock->sk, map, true);
90 sockfd_put(sock);
91 return sdata ? sdata->data : NULL;
92 }
93
94 return ERR_PTR(err);
95 }
96
bpf_fd_sk_storage_update_elem(struct bpf_map * map,void * key,void * value,u64 map_flags)97 static long bpf_fd_sk_storage_update_elem(struct bpf_map *map, void *key,
98 void *value, u64 map_flags)
99 {
100 struct bpf_local_storage_data *sdata;
101 struct socket *sock;
102 int fd, err;
103
104 fd = *(int *)key;
105 sock = sockfd_lookup(fd, &err);
106 if (sock) {
107 sdata = bpf_local_storage_update(
108 sock->sk, (struct bpf_local_storage_map *)map, value,
109 map_flags, false, GFP_ATOMIC);
110 sockfd_put(sock);
111 return PTR_ERR_OR_ZERO(sdata);
112 }
113
114 return err;
115 }
116
bpf_fd_sk_storage_delete_elem(struct bpf_map * map,void * key)117 static long bpf_fd_sk_storage_delete_elem(struct bpf_map *map, void *key)
118 {
119 struct socket *sock;
120 int fd, err;
121
122 fd = *(int *)key;
123 sock = sockfd_lookup(fd, &err);
124 if (sock) {
125 err = bpf_sk_storage_del(sock->sk, map);
126 sockfd_put(sock);
127 return err;
128 }
129
130 return err;
131 }
132
133 static struct bpf_local_storage_elem *
bpf_sk_storage_clone_elem(struct sock * newsk,struct bpf_local_storage_map * smap,struct bpf_local_storage_elem * selem)134 bpf_sk_storage_clone_elem(struct sock *newsk,
135 struct bpf_local_storage_map *smap,
136 struct bpf_local_storage_elem *selem)
137 {
138 struct bpf_local_storage_elem *copy_selem;
139
140 copy_selem = bpf_selem_alloc(smap, newsk, NULL, false, GFP_ATOMIC);
141 if (!copy_selem)
142 return NULL;
143
144 if (btf_record_has_field(smap->map.record, BPF_SPIN_LOCK))
145 copy_map_value_locked(&smap->map, SDATA(copy_selem)->data,
146 SDATA(selem)->data, true);
147 else
148 copy_map_value(&smap->map, SDATA(copy_selem)->data,
149 SDATA(selem)->data);
150
151 return copy_selem;
152 }
153
bpf_sk_storage_clone(const struct sock * sk,struct sock * newsk)154 int bpf_sk_storage_clone(const struct sock *sk, struct sock *newsk)
155 {
156 struct bpf_local_storage *new_sk_storage = NULL;
157 struct bpf_local_storage *sk_storage;
158 struct bpf_local_storage_elem *selem;
159 int ret = 0;
160
161 RCU_INIT_POINTER(newsk->sk_bpf_storage, NULL);
162
163 rcu_read_lock_dont_migrate();
164 sk_storage = rcu_dereference(sk->sk_bpf_storage);
165
166 if (!sk_storage || hlist_empty(&sk_storage->list))
167 goto out;
168
169 hlist_for_each_entry_rcu(selem, &sk_storage->list, snode) {
170 struct bpf_local_storage_elem *copy_selem;
171 struct bpf_local_storage_map *smap;
172 struct bpf_map *map;
173
174 smap = rcu_dereference(SDATA(selem)->smap);
175 if (!(smap->map.map_flags & BPF_F_CLONE))
176 continue;
177
178 /* Note that for lockless listeners adding new element
179 * here can race with cleanup in bpf_local_storage_map_free.
180 * Try to grab map refcnt to make sure that it's still
181 * alive and prevent concurrent removal.
182 */
183 map = bpf_map_inc_not_zero(&smap->map);
184 if (IS_ERR(map))
185 continue;
186
187 copy_selem = bpf_sk_storage_clone_elem(newsk, smap, selem);
188 if (!copy_selem) {
189 ret = -ENOMEM;
190 bpf_map_put(map);
191 goto out;
192 }
193
194 if (new_sk_storage) {
195 ret = bpf_selem_link_map(smap, new_sk_storage, copy_selem);
196 if (ret) {
197 bpf_selem_free(copy_selem, true);
198 atomic_sub(smap->elem_size,
199 &newsk->sk_omem_alloc);
200 bpf_map_put(map);
201 goto out;
202 }
203 bpf_selem_link_storage_nolock(new_sk_storage, copy_selem);
204 } else {
205 ret = bpf_local_storage_alloc(newsk, smap, copy_selem, GFP_ATOMIC);
206 if (ret) {
207 bpf_selem_free(copy_selem, true);
208 atomic_sub(smap->elem_size,
209 &newsk->sk_omem_alloc);
210 bpf_map_put(map);
211 goto out;
212 }
213
214 new_sk_storage =
215 rcu_dereference(copy_selem->local_storage);
216 }
217 bpf_map_put(map);
218 }
219
220 out:
221 rcu_read_unlock_migrate();
222
223 /* In case of an error, don't free anything explicitly here, the
224 * caller is responsible to call bpf_sk_storage_free.
225 */
226
227 return ret;
228 }
229
230 /* *gfp_flags* is a hidden argument provided by the verifier */
BPF_CALL_5(bpf_sk_storage_get,struct bpf_map *,map,struct sock *,sk,void *,value,u64,flags,gfp_t,gfp_flags)231 BPF_CALL_5(bpf_sk_storage_get, struct bpf_map *, map, struct sock *, sk,
232 void *, value, u64, flags, gfp_t, gfp_flags)
233 {
234 struct bpf_local_storage_data *sdata;
235
236 WARN_ON_ONCE(!bpf_rcu_lock_held());
237 if (!sk || !sk_fullsock(sk) || flags > BPF_SK_STORAGE_GET_F_CREATE)
238 return (unsigned long)NULL;
239
240 sdata = bpf_sk_storage_lookup(sk, map, true);
241 if (sdata)
242 return (unsigned long)sdata->data;
243
244 if (flags == BPF_SK_STORAGE_GET_F_CREATE &&
245 /* Cannot add new elem to a going away sk.
246 * Otherwise, the new elem may become a leak
247 * (and also other memory issues during map
248 * destruction).
249 */
250 refcount_inc_not_zero(&sk->sk_refcnt)) {
251 sdata = bpf_local_storage_update(
252 sk, (struct bpf_local_storage_map *)map, value,
253 BPF_NOEXIST, false, gfp_flags);
254 /* sk must be a fullsock (guaranteed by verifier),
255 * so sock_gen_put() is unnecessary.
256 */
257 sock_put(sk);
258 return IS_ERR(sdata) ?
259 (unsigned long)NULL : (unsigned long)sdata->data;
260 }
261
262 return (unsigned long)NULL;
263 }
264
BPF_CALL_2(bpf_sk_storage_delete,struct bpf_map *,map,struct sock *,sk)265 BPF_CALL_2(bpf_sk_storage_delete, struct bpf_map *, map, struct sock *, sk)
266 {
267 WARN_ON_ONCE(!bpf_rcu_lock_held());
268 if (!sk || !sk_fullsock(sk))
269 return -EINVAL;
270
271 if (refcount_inc_not_zero(&sk->sk_refcnt)) {
272 int err;
273
274 err = bpf_sk_storage_del(sk, map);
275 sock_put(sk);
276 return err;
277 }
278
279 return -ENOENT;
280 }
281
bpf_sk_storage_charge(struct bpf_local_storage_map * smap,void * owner,u32 size)282 static int bpf_sk_storage_charge(struct bpf_local_storage_map *smap,
283 void *owner, u32 size)
284 {
285 struct sock *sk = (struct sock *)owner;
286 int optmem_max;
287
288 optmem_max = READ_ONCE(sock_net(sk)->core.sysctl_optmem_max);
289 /* same check as in sock_kmalloc() */
290 if (size <= optmem_max &&
291 atomic_read(&sk->sk_omem_alloc) + size < optmem_max) {
292 atomic_add(size, &sk->sk_omem_alloc);
293 return 0;
294 }
295
296 return -ENOMEM;
297 }
298
bpf_sk_storage_uncharge(struct bpf_local_storage_map * smap,void * owner,u32 size)299 static void bpf_sk_storage_uncharge(struct bpf_local_storage_map *smap,
300 void *owner, u32 size)
301 {
302 struct sock *sk = owner;
303
304 atomic_sub(size, &sk->sk_omem_alloc);
305 }
306
307 static struct bpf_local_storage __rcu **
bpf_sk_storage_ptr(void * owner)308 bpf_sk_storage_ptr(void *owner)
309 {
310 struct sock *sk = owner;
311
312 return &sk->sk_bpf_storage;
313 }
314
315 const struct bpf_map_ops sk_storage_map_ops = {
316 .map_meta_equal = bpf_map_meta_equal,
317 .map_alloc_check = bpf_local_storage_map_alloc_check,
318 .map_alloc = bpf_sk_storage_map_alloc,
319 .map_free = bpf_sk_storage_map_free,
320 .map_get_next_key = notsupp_get_next_key,
321 .map_lookup_elem = bpf_fd_sk_storage_lookup_elem,
322 .map_update_elem = bpf_fd_sk_storage_update_elem,
323 .map_delete_elem = bpf_fd_sk_storage_delete_elem,
324 .map_check_btf = bpf_local_storage_map_check_btf,
325 .map_btf_id = &bpf_local_storage_map_btf_id[0],
326 .map_local_storage_charge = bpf_sk_storage_charge,
327 .map_local_storage_uncharge = bpf_sk_storage_uncharge,
328 .map_owner_storage_ptr = bpf_sk_storage_ptr,
329 .map_mem_usage = bpf_local_storage_map_mem_usage,
330 };
331
332 const struct bpf_func_proto bpf_sk_storage_get_proto = {
333 .func = bpf_sk_storage_get,
334 .gpl_only = false,
335 .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
336 .arg1_type = ARG_CONST_MAP_PTR,
337 .arg2_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
338 .arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL,
339 .arg4_type = ARG_ANYTHING,
340 };
341
342 const struct bpf_func_proto bpf_sk_storage_get_cg_sock_proto = {
343 .func = bpf_sk_storage_get,
344 .gpl_only = false,
345 .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
346 .arg1_type = ARG_CONST_MAP_PTR,
347 .arg2_type = ARG_PTR_TO_CTX, /* context is 'struct sock' */
348 .arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL,
349 .arg4_type = ARG_ANYTHING,
350 };
351
352 const struct bpf_func_proto bpf_sk_storage_delete_proto = {
353 .func = bpf_sk_storage_delete,
354 .gpl_only = false,
355 .ret_type = RET_INTEGER,
356 .arg1_type = ARG_CONST_MAP_PTR,
357 .arg2_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
358 };
359
bpf_sk_storage_tracing_allowed(const struct bpf_prog * prog)360 static bool bpf_sk_storage_tracing_allowed(const struct bpf_prog *prog)
361 {
362 if (prog->aux->dst_prog)
363 return false;
364
365 /* Ensure the tracing program is not tracing
366 * any bpf_sk_storage*() function and also
367 * use the bpf_sk_storage_(get|delete) helper.
368 */
369 switch (prog->expected_attach_type) {
370 case BPF_TRACE_ITER:
371 case BPF_TRACE_RAW_TP:
372 /* bpf_sk_storage has no trace point */
373 return true;
374 case BPF_TRACE_FENTRY:
375 case BPF_TRACE_FEXIT:
376 case BPF_TRACE_FSESSION:
377 return !!strncmp(prog->aux->attach_func_name, "bpf_sk_storage",
378 strlen("bpf_sk_storage"));
379 default:
380 return false;
381 }
382
383 return false;
384 }
385
386 /* *gfp_flags* is a hidden argument provided by the verifier */
BPF_CALL_5(bpf_sk_storage_get_tracing,struct bpf_map *,map,struct sock *,sk,void *,value,u64,flags,gfp_t,gfp_flags)387 BPF_CALL_5(bpf_sk_storage_get_tracing, struct bpf_map *, map, struct sock *, sk,
388 void *, value, u64, flags, gfp_t, gfp_flags)
389 {
390 WARN_ON_ONCE(!bpf_rcu_lock_held());
391 if (in_hardirq() || in_nmi())
392 return (unsigned long)NULL;
393
394 return (unsigned long)____bpf_sk_storage_get(map, sk, value, flags,
395 gfp_flags);
396 }
397
BPF_CALL_2(bpf_sk_storage_delete_tracing,struct bpf_map *,map,struct sock *,sk)398 BPF_CALL_2(bpf_sk_storage_delete_tracing, struct bpf_map *, map,
399 struct sock *, sk)
400 {
401 WARN_ON_ONCE(!bpf_rcu_lock_held());
402 if (in_hardirq() || in_nmi())
403 return -EPERM;
404
405 return ____bpf_sk_storage_delete(map, sk);
406 }
407
408 const struct bpf_func_proto bpf_sk_storage_get_tracing_proto = {
409 .func = bpf_sk_storage_get_tracing,
410 .gpl_only = false,
411 .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
412 .arg1_type = ARG_CONST_MAP_PTR,
413 .arg2_type = ARG_PTR_TO_BTF_ID_OR_NULL,
414 .arg2_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON],
415 .arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL,
416 .arg4_type = ARG_ANYTHING,
417 .allowed = bpf_sk_storage_tracing_allowed,
418 };
419
420 const struct bpf_func_proto bpf_sk_storage_delete_tracing_proto = {
421 .func = bpf_sk_storage_delete_tracing,
422 .gpl_only = false,
423 .ret_type = RET_INTEGER,
424 .arg1_type = ARG_CONST_MAP_PTR,
425 .arg2_type = ARG_PTR_TO_BTF_ID_OR_NULL,
426 .arg2_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON],
427 .allowed = bpf_sk_storage_tracing_allowed,
428 };
429
430 struct bpf_sk_storage_diag {
431 u32 nr_maps;
432 struct bpf_map *maps[];
433 };
434
435 /* The reply will be like:
436 * INET_DIAG_BPF_SK_STORAGES (nla_nest)
437 * SK_DIAG_BPF_STORAGE (nla_nest)
438 * SK_DIAG_BPF_STORAGE_MAP_ID (nla_put_u32)
439 * SK_DIAG_BPF_STORAGE_MAP_VALUE (nla_reserve_64bit)
440 * SK_DIAG_BPF_STORAGE (nla_nest)
441 * SK_DIAG_BPF_STORAGE_MAP_ID (nla_put_u32)
442 * SK_DIAG_BPF_STORAGE_MAP_VALUE (nla_reserve_64bit)
443 * ....
444 */
nla_value_size(u32 value_size)445 static int nla_value_size(u32 value_size)
446 {
447 /* SK_DIAG_BPF_STORAGE (nla_nest)
448 * SK_DIAG_BPF_STORAGE_MAP_ID (nla_put_u32)
449 * SK_DIAG_BPF_STORAGE_MAP_VALUE (nla_reserve_64bit)
450 */
451 return nla_total_size(0) + nla_total_size(sizeof(u32)) +
452 nla_total_size_64bit(value_size);
453 }
454
bpf_sk_storage_diag_free(struct bpf_sk_storage_diag * diag)455 void bpf_sk_storage_diag_free(struct bpf_sk_storage_diag *diag)
456 {
457 u32 i;
458
459 if (!diag)
460 return;
461
462 for (i = 0; i < diag->nr_maps; i++)
463 bpf_map_put(diag->maps[i]);
464
465 kfree(diag);
466 }
467 EXPORT_SYMBOL_GPL(bpf_sk_storage_diag_free);
468
diag_check_dup(const struct bpf_sk_storage_diag * diag,const struct bpf_map * map)469 static bool diag_check_dup(const struct bpf_sk_storage_diag *diag,
470 const struct bpf_map *map)
471 {
472 u32 i;
473
474 for (i = 0; i < diag->nr_maps; i++) {
475 if (diag->maps[i] == map)
476 return true;
477 }
478
479 return false;
480 }
481
482 struct bpf_sk_storage_diag *
bpf_sk_storage_diag_alloc(const struct nlattr * nla_stgs)483 bpf_sk_storage_diag_alloc(const struct nlattr *nla_stgs)
484 {
485 struct bpf_sk_storage_diag *diag;
486 struct nlattr *nla;
487 u32 nr_maps = 0;
488 int rem, err;
489
490 /* bpf_local_storage_map is currently limited to CAP_SYS_ADMIN as
491 * the map_alloc_check() side also does.
492 */
493 if (!bpf_capable())
494 return ERR_PTR(-EPERM);
495
496 nla_for_each_nested_type(nla, SK_DIAG_BPF_STORAGE_REQ_MAP_FD,
497 nla_stgs, rem) {
498 if (nla_len(nla) != sizeof(u32))
499 return ERR_PTR(-EINVAL);
500 nr_maps++;
501 }
502
503 diag = kzalloc_flex(*diag, maps, nr_maps);
504 if (!diag)
505 return ERR_PTR(-ENOMEM);
506
507 nla_for_each_nested_type(nla, SK_DIAG_BPF_STORAGE_REQ_MAP_FD,
508 nla_stgs, rem) {
509 int map_fd = nla_get_u32(nla);
510 struct bpf_map *map = bpf_map_get(map_fd);
511
512 if (IS_ERR(map)) {
513 err = PTR_ERR(map);
514 goto err_free;
515 }
516 if (map->map_type != BPF_MAP_TYPE_SK_STORAGE) {
517 bpf_map_put(map);
518 err = -EINVAL;
519 goto err_free;
520 }
521 if (diag_check_dup(diag, map)) {
522 bpf_map_put(map);
523 err = -EEXIST;
524 goto err_free;
525 }
526 diag->maps[diag->nr_maps++] = map;
527 }
528
529 return diag;
530
531 err_free:
532 bpf_sk_storage_diag_free(diag);
533 return ERR_PTR(err);
534 }
535 EXPORT_SYMBOL_GPL(bpf_sk_storage_diag_alloc);
536
diag_get(struct bpf_local_storage_data * sdata,struct sk_buff * skb)537 static int diag_get(struct bpf_local_storage_data *sdata, struct sk_buff *skb)
538 {
539 struct nlattr *nla_stg, *nla_value;
540 struct bpf_local_storage_map *smap;
541
542 /* It cannot exceed max nlattr's payload */
543 BUILD_BUG_ON(U16_MAX - NLA_HDRLEN < BPF_LOCAL_STORAGE_MAX_VALUE_SIZE);
544
545 nla_stg = nla_nest_start(skb, SK_DIAG_BPF_STORAGE);
546 if (!nla_stg)
547 return -EMSGSIZE;
548
549 smap = rcu_dereference(sdata->smap);
550 if (nla_put_u32(skb, SK_DIAG_BPF_STORAGE_MAP_ID, smap->map.id))
551 goto errout;
552
553 nla_value = nla_reserve_64bit(skb, SK_DIAG_BPF_STORAGE_MAP_VALUE,
554 smap->map.value_size,
555 SK_DIAG_BPF_STORAGE_PAD);
556 if (!nla_value)
557 goto errout;
558
559 if (btf_record_has_field(smap->map.record, BPF_SPIN_LOCK))
560 copy_map_value_locked(&smap->map, nla_data(nla_value),
561 sdata->data, true);
562 else
563 copy_map_value(&smap->map, nla_data(nla_value), sdata->data);
564
565 nla_nest_end(skb, nla_stg);
566 return 0;
567
568 errout:
569 nla_nest_cancel(skb, nla_stg);
570 return -EMSGSIZE;
571 }
572
bpf_sk_storage_diag_put_all(struct sock * sk,struct sk_buff * skb,int stg_array_type,unsigned int * res_diag_size)573 static int bpf_sk_storage_diag_put_all(struct sock *sk, struct sk_buff *skb,
574 int stg_array_type,
575 unsigned int *res_diag_size)
576 {
577 /* stg_array_type (e.g. INET_DIAG_BPF_SK_STORAGES) */
578 unsigned int diag_size = nla_total_size(0);
579 struct bpf_local_storage *sk_storage;
580 struct bpf_local_storage_elem *selem;
581 struct bpf_local_storage_map *smap;
582 struct nlattr *nla_stgs;
583 unsigned int saved_len;
584 int err = 0;
585
586 rcu_read_lock();
587
588 sk_storage = rcu_dereference(sk->sk_bpf_storage);
589 if (!sk_storage || hlist_empty(&sk_storage->list)) {
590 rcu_read_unlock();
591 return 0;
592 }
593
594 nla_stgs = nla_nest_start(skb, stg_array_type);
595 if (!nla_stgs)
596 /* Continue to learn diag_size */
597 err = -EMSGSIZE;
598
599 saved_len = skb->len;
600 hlist_for_each_entry_rcu(selem, &sk_storage->list, snode) {
601 smap = rcu_dereference(SDATA(selem)->smap);
602 diag_size += nla_value_size(smap->map.value_size);
603
604 if (nla_stgs && diag_get(SDATA(selem), skb))
605 /* Continue to learn diag_size */
606 err = -EMSGSIZE;
607 }
608
609 rcu_read_unlock();
610
611 if (nla_stgs) {
612 if (saved_len == skb->len)
613 nla_nest_cancel(skb, nla_stgs);
614 else
615 nla_nest_end(skb, nla_stgs);
616 }
617
618 if (diag_size == nla_total_size(0)) {
619 *res_diag_size = 0;
620 return 0;
621 }
622
623 *res_diag_size = diag_size;
624 return err;
625 }
626
bpf_sk_storage_diag_put(struct bpf_sk_storage_diag * diag,struct sock * sk,struct sk_buff * skb,int stg_array_type,unsigned int * res_diag_size)627 int bpf_sk_storage_diag_put(struct bpf_sk_storage_diag *diag,
628 struct sock *sk, struct sk_buff *skb,
629 int stg_array_type,
630 unsigned int *res_diag_size)
631 {
632 /* stg_array_type (e.g. INET_DIAG_BPF_SK_STORAGES) */
633 unsigned int diag_size = nla_total_size(0);
634 struct bpf_local_storage *sk_storage;
635 struct bpf_local_storage_data *sdata;
636 struct nlattr *nla_stgs;
637 unsigned int saved_len;
638 int err = 0;
639 u32 i;
640
641 *res_diag_size = 0;
642
643 /* No map has been specified. Dump all. */
644 if (!diag->nr_maps)
645 return bpf_sk_storage_diag_put_all(sk, skb, stg_array_type,
646 res_diag_size);
647
648 rcu_read_lock();
649 sk_storage = rcu_dereference(sk->sk_bpf_storage);
650 if (!sk_storage || hlist_empty(&sk_storage->list)) {
651 rcu_read_unlock();
652 return 0;
653 }
654
655 nla_stgs = nla_nest_start(skb, stg_array_type);
656 if (!nla_stgs)
657 /* Continue to learn diag_size */
658 err = -EMSGSIZE;
659
660 saved_len = skb->len;
661 for (i = 0; i < diag->nr_maps; i++) {
662 sdata = bpf_local_storage_lookup(sk_storage,
663 (struct bpf_local_storage_map *)diag->maps[i],
664 false);
665
666 if (!sdata)
667 continue;
668
669 diag_size += nla_value_size(diag->maps[i]->value_size);
670
671 if (nla_stgs && diag_get(sdata, skb))
672 /* Continue to learn diag_size */
673 err = -EMSGSIZE;
674 }
675 rcu_read_unlock();
676
677 if (nla_stgs) {
678 if (saved_len == skb->len)
679 nla_nest_cancel(skb, nla_stgs);
680 else
681 nla_nest_end(skb, nla_stgs);
682 }
683
684 if (diag_size == nla_total_size(0)) {
685 *res_diag_size = 0;
686 return 0;
687 }
688
689 *res_diag_size = diag_size;
690 return err;
691 }
692 EXPORT_SYMBOL_GPL(bpf_sk_storage_diag_put);
693
694 struct bpf_iter_seq_sk_storage_map_info {
695 struct bpf_map *map;
696 unsigned int bucket_id;
697 unsigned skip_elems;
698 };
699
700 static struct bpf_local_storage_elem *
bpf_sk_storage_map_seq_find_next(struct bpf_iter_seq_sk_storage_map_info * info,struct bpf_local_storage_elem * prev_selem)701 bpf_sk_storage_map_seq_find_next(struct bpf_iter_seq_sk_storage_map_info *info,
702 struct bpf_local_storage_elem *prev_selem)
703 __acquires(RCU) __releases(RCU)
704 {
705 struct bpf_local_storage *sk_storage;
706 struct bpf_local_storage_elem *selem;
707 u32 skip_elems = info->skip_elems;
708 struct bpf_local_storage_map *smap;
709 u32 bucket_id = info->bucket_id;
710 u32 i, count, n_buckets;
711 struct bpf_local_storage_map_bucket *b;
712
713 smap = (struct bpf_local_storage_map *)info->map;
714 n_buckets = 1U << smap->bucket_log;
715 if (bucket_id >= n_buckets)
716 return NULL;
717
718 /* try to find next selem in the same bucket */
719 selem = prev_selem;
720 count = 0;
721 while (selem) {
722 selem = hlist_entry_safe(rcu_dereference(hlist_next_rcu(&selem->map_node)),
723 struct bpf_local_storage_elem, map_node);
724 if (!selem) {
725 /* not found, unlock and go to the next bucket */
726 b = &smap->buckets[bucket_id++];
727 rcu_read_unlock();
728 skip_elems = 0;
729 break;
730 }
731 sk_storage = rcu_dereference(selem->local_storage);
732 if (sk_storage) {
733 info->skip_elems = skip_elems + count;
734 return selem;
735 }
736 count++;
737 }
738
739 for (i = bucket_id; i < (1U << smap->bucket_log); i++) {
740 b = &smap->buckets[i];
741 rcu_read_lock();
742 count = 0;
743 hlist_for_each_entry_rcu(selem, &b->list, map_node) {
744 sk_storage = rcu_dereference(selem->local_storage);
745 if (sk_storage && count >= skip_elems) {
746 info->bucket_id = i;
747 info->skip_elems = count;
748 return selem;
749 }
750 count++;
751 }
752 rcu_read_unlock();
753 skip_elems = 0;
754 }
755
756 info->bucket_id = i;
757 info->skip_elems = 0;
758 return NULL;
759 }
760
bpf_sk_storage_map_seq_start(struct seq_file * seq,loff_t * pos)761 static void *bpf_sk_storage_map_seq_start(struct seq_file *seq, loff_t *pos)
762 {
763 struct bpf_local_storage_elem *selem;
764
765 selem = bpf_sk_storage_map_seq_find_next(seq->private, NULL);
766 if (!selem)
767 return NULL;
768
769 if (*pos == 0)
770 ++*pos;
771 return selem;
772 }
773
bpf_sk_storage_map_seq_next(struct seq_file * seq,void * v,loff_t * pos)774 static void *bpf_sk_storage_map_seq_next(struct seq_file *seq, void *v,
775 loff_t *pos)
776 {
777 struct bpf_iter_seq_sk_storage_map_info *info = seq->private;
778
779 ++*pos;
780 ++info->skip_elems;
781 return bpf_sk_storage_map_seq_find_next(seq->private, v);
782 }
783
784 struct bpf_iter__bpf_sk_storage_map {
785 __bpf_md_ptr(struct bpf_iter_meta *, meta);
786 __bpf_md_ptr(struct bpf_map *, map);
787 __bpf_md_ptr(struct sock *, sk);
788 __bpf_md_ptr(void *, value);
789 };
790
DEFINE_BPF_ITER_FUNC(bpf_sk_storage_map,struct bpf_iter_meta * meta,struct bpf_map * map,struct sock * sk,void * value)791 DEFINE_BPF_ITER_FUNC(bpf_sk_storage_map, struct bpf_iter_meta *meta,
792 struct bpf_map *map, struct sock *sk,
793 void *value)
794
795 static int __bpf_sk_storage_map_seq_show(struct seq_file *seq,
796 struct bpf_local_storage_elem *selem)
797 {
798 struct bpf_iter_seq_sk_storage_map_info *info = seq->private;
799 struct bpf_iter__bpf_sk_storage_map ctx = {};
800 struct bpf_local_storage *sk_storage;
801 struct bpf_iter_meta meta;
802 struct bpf_prog *prog;
803 int ret = 0;
804
805 meta.seq = seq;
806 prog = bpf_iter_get_info(&meta, selem == NULL);
807 if (prog) {
808 ctx.meta = &meta;
809 ctx.map = info->map;
810 if (selem) {
811 sk_storage = rcu_dereference(selem->local_storage);
812 ctx.sk = sk_storage->owner;
813 ctx.value = SDATA(selem)->data;
814 }
815 ret = bpf_iter_run_prog(prog, &ctx);
816 }
817
818 return ret;
819 }
820
bpf_sk_storage_map_seq_show(struct seq_file * seq,void * v)821 static int bpf_sk_storage_map_seq_show(struct seq_file *seq, void *v)
822 {
823 return __bpf_sk_storage_map_seq_show(seq, v);
824 }
825
bpf_sk_storage_map_seq_stop(struct seq_file * seq,void * v)826 static void bpf_sk_storage_map_seq_stop(struct seq_file *seq, void *v)
827 __releases(RCU)
828 {
829 if (!v)
830 (void)__bpf_sk_storage_map_seq_show(seq, v);
831 else
832 rcu_read_unlock();
833 }
834
bpf_iter_init_sk_storage_map(void * priv_data,struct bpf_iter_aux_info * aux)835 static int bpf_iter_init_sk_storage_map(void *priv_data,
836 struct bpf_iter_aux_info *aux)
837 {
838 struct bpf_iter_seq_sk_storage_map_info *seq_info = priv_data;
839
840 bpf_map_inc_with_uref(aux->map);
841 seq_info->map = aux->map;
842 return 0;
843 }
844
bpf_iter_fini_sk_storage_map(void * priv_data)845 static void bpf_iter_fini_sk_storage_map(void *priv_data)
846 {
847 struct bpf_iter_seq_sk_storage_map_info *seq_info = priv_data;
848
849 bpf_map_put_with_uref(seq_info->map);
850 }
851
bpf_iter_attach_map(struct bpf_prog * prog,union bpf_iter_link_info * linfo,struct bpf_iter_aux_info * aux)852 static int bpf_iter_attach_map(struct bpf_prog *prog,
853 union bpf_iter_link_info *linfo,
854 struct bpf_iter_aux_info *aux)
855 {
856 struct bpf_map *map;
857 int err = -EINVAL;
858
859 if (!linfo->map.map_fd)
860 return -EBADF;
861
862 map = bpf_map_get_with_uref(linfo->map.map_fd);
863 if (IS_ERR(map))
864 return PTR_ERR(map);
865
866 if (map->map_type != BPF_MAP_TYPE_SK_STORAGE)
867 goto put_map;
868
869 if (prog->aux->max_rdwr_access > map->value_size) {
870 err = -EACCES;
871 goto put_map;
872 }
873
874 aux->map = map;
875 return 0;
876
877 put_map:
878 bpf_map_put_with_uref(map);
879 return err;
880 }
881
bpf_iter_detach_map(struct bpf_iter_aux_info * aux)882 static void bpf_iter_detach_map(struct bpf_iter_aux_info *aux)
883 {
884 bpf_map_put_with_uref(aux->map);
885 }
886
887 static const struct seq_operations bpf_sk_storage_map_seq_ops = {
888 .start = bpf_sk_storage_map_seq_start,
889 .next = bpf_sk_storage_map_seq_next,
890 .stop = bpf_sk_storage_map_seq_stop,
891 .show = bpf_sk_storage_map_seq_show,
892 };
893
894 static const struct bpf_iter_seq_info iter_seq_info = {
895 .seq_ops = &bpf_sk_storage_map_seq_ops,
896 .init_seq_private = bpf_iter_init_sk_storage_map,
897 .fini_seq_private = bpf_iter_fini_sk_storage_map,
898 .seq_priv_size = sizeof(struct bpf_iter_seq_sk_storage_map_info),
899 };
900
901 static struct bpf_iter_reg bpf_sk_storage_map_reg_info = {
902 .target = "bpf_sk_storage_map",
903 .attach_target = bpf_iter_attach_map,
904 .detach_target = bpf_iter_detach_map,
905 .show_fdinfo = bpf_iter_map_show_fdinfo,
906 .fill_link_info = bpf_iter_map_fill_link_info,
907 .ctx_arg_info_size = 2,
908 .ctx_arg_info = {
909 { offsetof(struct bpf_iter__bpf_sk_storage_map, sk),
910 PTR_TO_BTF_ID_OR_NULL },
911 { offsetof(struct bpf_iter__bpf_sk_storage_map, value),
912 PTR_TO_BUF | PTR_MAYBE_NULL },
913 },
914 .seq_info = &iter_seq_info,
915 };
916
bpf_sk_storage_map_iter_init(void)917 static int __init bpf_sk_storage_map_iter_init(void)
918 {
919 bpf_sk_storage_map_reg_info.ctx_arg_info[0].btf_id =
920 btf_sock_ids[BTF_SOCK_TYPE_SOCK];
921 return bpf_iter_reg_target(&bpf_sk_storage_map_reg_info);
922 }
923 late_initcall(bpf_sk_storage_map_iter_init);
924