xref: /linux/net/core/bpf_sk_storage.c (revision 015e7b0b0e8e51f7321ec2aafc1d7fc0a8a5536f)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 Facebook  */
3 #include <linux/rculist.h>
4 #include <linux/list.h>
5 #include <linux/hash.h>
6 #include <linux/types.h>
7 #include <linux/spinlock.h>
8 #include <linux/bpf.h>
9 #include <linux/btf.h>
10 #include <linux/btf_ids.h>
11 #include <linux/bpf_local_storage.h>
12 #include <net/bpf_sk_storage.h>
13 #include <net/sock.h>
14 #include <uapi/linux/sock_diag.h>
15 #include <uapi/linux/btf.h>
16 #include <linux/rcupdate_trace.h>
17 
18 DEFINE_BPF_STORAGE_CACHE(sk_cache);
19 
20 static struct bpf_local_storage_data *
21 bpf_sk_storage_lookup(struct sock *sk, struct bpf_map *map, bool cacheit_lockit)
22 {
23 	struct bpf_local_storage *sk_storage;
24 	struct bpf_local_storage_map *smap;
25 
26 	sk_storage =
27 		rcu_dereference_check(sk->sk_bpf_storage, bpf_rcu_lock_held());
28 	if (!sk_storage)
29 		return NULL;
30 
31 	smap = (struct bpf_local_storage_map *)map;
32 	return bpf_local_storage_lookup(sk_storage, smap, cacheit_lockit);
33 }
34 
35 static int bpf_sk_storage_del(struct sock *sk, struct bpf_map *map)
36 {
37 	struct bpf_local_storage_data *sdata;
38 
39 	sdata = bpf_sk_storage_lookup(sk, map, false);
40 	if (!sdata)
41 		return -ENOENT;
42 
43 	bpf_selem_unlink(SELEM(sdata), false);
44 
45 	return 0;
46 }
47 
48 /* Called by __sk_destruct() & bpf_sk_storage_clone() */
49 void bpf_sk_storage_free(struct sock *sk)
50 {
51 	struct bpf_local_storage *sk_storage;
52 
53 	rcu_read_lock_dont_migrate();
54 	sk_storage = rcu_dereference(sk->sk_bpf_storage);
55 	if (!sk_storage)
56 		goto out;
57 
58 	bpf_local_storage_destroy(sk_storage);
59 out:
60 	rcu_read_unlock_migrate();
61 }
62 
63 static void bpf_sk_storage_map_free(struct bpf_map *map)
64 {
65 	bpf_local_storage_map_free(map, &sk_cache, NULL);
66 }
67 
68 static struct bpf_map *bpf_sk_storage_map_alloc(union bpf_attr *attr)
69 {
70 	return bpf_local_storage_map_alloc(attr, &sk_cache, false);
71 }
72 
73 static int notsupp_get_next_key(struct bpf_map *map, void *key,
74 				void *next_key)
75 {
76 	return -ENOTSUPP;
77 }
78 
79 static void *bpf_fd_sk_storage_lookup_elem(struct bpf_map *map, void *key)
80 {
81 	struct bpf_local_storage_data *sdata;
82 	struct socket *sock;
83 	int fd, err;
84 
85 	fd = *(int *)key;
86 	sock = sockfd_lookup(fd, &err);
87 	if (sock) {
88 		sdata = bpf_sk_storage_lookup(sock->sk, map, true);
89 		sockfd_put(sock);
90 		return sdata ? sdata->data : NULL;
91 	}
92 
93 	return ERR_PTR(err);
94 }
95 
96 static long bpf_fd_sk_storage_update_elem(struct bpf_map *map, void *key,
97 					  void *value, u64 map_flags)
98 {
99 	struct bpf_local_storage_data *sdata;
100 	struct socket *sock;
101 	int fd, err;
102 
103 	fd = *(int *)key;
104 	sock = sockfd_lookup(fd, &err);
105 	if (sock) {
106 		sdata = bpf_local_storage_update(
107 			sock->sk, (struct bpf_local_storage_map *)map, value,
108 			map_flags, false, GFP_ATOMIC);
109 		sockfd_put(sock);
110 		return PTR_ERR_OR_ZERO(sdata);
111 	}
112 
113 	return err;
114 }
115 
116 static long bpf_fd_sk_storage_delete_elem(struct bpf_map *map, void *key)
117 {
118 	struct socket *sock;
119 	int fd, err;
120 
121 	fd = *(int *)key;
122 	sock = sockfd_lookup(fd, &err);
123 	if (sock) {
124 		err = bpf_sk_storage_del(sock->sk, map);
125 		sockfd_put(sock);
126 		return err;
127 	}
128 
129 	return err;
130 }
131 
132 static struct bpf_local_storage_elem *
133 bpf_sk_storage_clone_elem(struct sock *newsk,
134 			  struct bpf_local_storage_map *smap,
135 			  struct bpf_local_storage_elem *selem)
136 {
137 	struct bpf_local_storage_elem *copy_selem;
138 
139 	copy_selem = bpf_selem_alloc(smap, newsk, NULL, false, GFP_ATOMIC);
140 	if (!copy_selem)
141 		return NULL;
142 
143 	if (btf_record_has_field(smap->map.record, BPF_SPIN_LOCK))
144 		copy_map_value_locked(&smap->map, SDATA(copy_selem)->data,
145 				      SDATA(selem)->data, true);
146 	else
147 		copy_map_value(&smap->map, SDATA(copy_selem)->data,
148 			       SDATA(selem)->data);
149 
150 	return copy_selem;
151 }
152 
153 int bpf_sk_storage_clone(const struct sock *sk, struct sock *newsk)
154 {
155 	struct bpf_local_storage *new_sk_storage = NULL;
156 	struct bpf_local_storage *sk_storage;
157 	struct bpf_local_storage_elem *selem;
158 	int ret = 0;
159 
160 	RCU_INIT_POINTER(newsk->sk_bpf_storage, NULL);
161 
162 	rcu_read_lock_dont_migrate();
163 	sk_storage = rcu_dereference(sk->sk_bpf_storage);
164 
165 	if (!sk_storage || hlist_empty(&sk_storage->list))
166 		goto out;
167 
168 	hlist_for_each_entry_rcu(selem, &sk_storage->list, snode) {
169 		struct bpf_local_storage_elem *copy_selem;
170 		struct bpf_local_storage_map *smap;
171 		struct bpf_map *map;
172 
173 		smap = rcu_dereference(SDATA(selem)->smap);
174 		if (!(smap->map.map_flags & BPF_F_CLONE))
175 			continue;
176 
177 		/* Note that for lockless listeners adding new element
178 		 * here can race with cleanup in bpf_local_storage_map_free.
179 		 * Try to grab map refcnt to make sure that it's still
180 		 * alive and prevent concurrent removal.
181 		 */
182 		map = bpf_map_inc_not_zero(&smap->map);
183 		if (IS_ERR(map))
184 			continue;
185 
186 		copy_selem = bpf_sk_storage_clone_elem(newsk, smap, selem);
187 		if (!copy_selem) {
188 			ret = -ENOMEM;
189 			bpf_map_put(map);
190 			goto out;
191 		}
192 
193 		if (new_sk_storage) {
194 			bpf_selem_link_map(smap, copy_selem);
195 			bpf_selem_link_storage_nolock(new_sk_storage, copy_selem);
196 		} else {
197 			ret = bpf_local_storage_alloc(newsk, smap, copy_selem, GFP_ATOMIC);
198 			if (ret) {
199 				bpf_selem_free(copy_selem, true);
200 				atomic_sub(smap->elem_size,
201 					   &newsk->sk_omem_alloc);
202 				bpf_map_put(map);
203 				goto out;
204 			}
205 
206 			new_sk_storage =
207 				rcu_dereference(copy_selem->local_storage);
208 		}
209 		bpf_map_put(map);
210 	}
211 
212 out:
213 	rcu_read_unlock_migrate();
214 
215 	/* In case of an error, don't free anything explicitly here, the
216 	 * caller is responsible to call bpf_sk_storage_free.
217 	 */
218 
219 	return ret;
220 }
221 
222 /* *gfp_flags* is a hidden argument provided by the verifier */
223 BPF_CALL_5(bpf_sk_storage_get, struct bpf_map *, map, struct sock *, sk,
224 	   void *, value, u64, flags, gfp_t, gfp_flags)
225 {
226 	struct bpf_local_storage_data *sdata;
227 
228 	WARN_ON_ONCE(!bpf_rcu_lock_held());
229 	if (!sk || !sk_fullsock(sk) || flags > BPF_SK_STORAGE_GET_F_CREATE)
230 		return (unsigned long)NULL;
231 
232 	sdata = bpf_sk_storage_lookup(sk, map, true);
233 	if (sdata)
234 		return (unsigned long)sdata->data;
235 
236 	if (flags == BPF_SK_STORAGE_GET_F_CREATE &&
237 	    /* Cannot add new elem to a going away sk.
238 	     * Otherwise, the new elem may become a leak
239 	     * (and also other memory issues during map
240 	     *  destruction).
241 	     */
242 	    refcount_inc_not_zero(&sk->sk_refcnt)) {
243 		sdata = bpf_local_storage_update(
244 			sk, (struct bpf_local_storage_map *)map, value,
245 			BPF_NOEXIST, false, gfp_flags);
246 		/* sk must be a fullsock (guaranteed by verifier),
247 		 * so sock_gen_put() is unnecessary.
248 		 */
249 		sock_put(sk);
250 		return IS_ERR(sdata) ?
251 			(unsigned long)NULL : (unsigned long)sdata->data;
252 	}
253 
254 	return (unsigned long)NULL;
255 }
256 
257 BPF_CALL_2(bpf_sk_storage_delete, struct bpf_map *, map, struct sock *, sk)
258 {
259 	WARN_ON_ONCE(!bpf_rcu_lock_held());
260 	if (!sk || !sk_fullsock(sk))
261 		return -EINVAL;
262 
263 	if (refcount_inc_not_zero(&sk->sk_refcnt)) {
264 		int err;
265 
266 		err = bpf_sk_storage_del(sk, map);
267 		sock_put(sk);
268 		return err;
269 	}
270 
271 	return -ENOENT;
272 }
273 
274 static int bpf_sk_storage_charge(struct bpf_local_storage_map *smap,
275 				 void *owner, u32 size)
276 {
277 	struct sock *sk = (struct sock *)owner;
278 	int optmem_max;
279 
280 	optmem_max = READ_ONCE(sock_net(sk)->core.sysctl_optmem_max);
281 	/* same check as in sock_kmalloc() */
282 	if (size <= optmem_max &&
283 	    atomic_read(&sk->sk_omem_alloc) + size < optmem_max) {
284 		atomic_add(size, &sk->sk_omem_alloc);
285 		return 0;
286 	}
287 
288 	return -ENOMEM;
289 }
290 
291 static void bpf_sk_storage_uncharge(struct bpf_local_storage_map *smap,
292 				    void *owner, u32 size)
293 {
294 	struct sock *sk = owner;
295 
296 	atomic_sub(size, &sk->sk_omem_alloc);
297 }
298 
299 static struct bpf_local_storage __rcu **
300 bpf_sk_storage_ptr(void *owner)
301 {
302 	struct sock *sk = owner;
303 
304 	return &sk->sk_bpf_storage;
305 }
306 
307 const struct bpf_map_ops sk_storage_map_ops = {
308 	.map_meta_equal = bpf_map_meta_equal,
309 	.map_alloc_check = bpf_local_storage_map_alloc_check,
310 	.map_alloc = bpf_sk_storage_map_alloc,
311 	.map_free = bpf_sk_storage_map_free,
312 	.map_get_next_key = notsupp_get_next_key,
313 	.map_lookup_elem = bpf_fd_sk_storage_lookup_elem,
314 	.map_update_elem = bpf_fd_sk_storage_update_elem,
315 	.map_delete_elem = bpf_fd_sk_storage_delete_elem,
316 	.map_check_btf = bpf_local_storage_map_check_btf,
317 	.map_btf_id = &bpf_local_storage_map_btf_id[0],
318 	.map_local_storage_charge = bpf_sk_storage_charge,
319 	.map_local_storage_uncharge = bpf_sk_storage_uncharge,
320 	.map_owner_storage_ptr = bpf_sk_storage_ptr,
321 	.map_mem_usage = bpf_local_storage_map_mem_usage,
322 };
323 
324 const struct bpf_func_proto bpf_sk_storage_get_proto = {
325 	.func		= bpf_sk_storage_get,
326 	.gpl_only	= false,
327 	.ret_type	= RET_PTR_TO_MAP_VALUE_OR_NULL,
328 	.arg1_type	= ARG_CONST_MAP_PTR,
329 	.arg2_type	= ARG_PTR_TO_BTF_ID_SOCK_COMMON,
330 	.arg3_type	= ARG_PTR_TO_MAP_VALUE_OR_NULL,
331 	.arg4_type	= ARG_ANYTHING,
332 };
333 
334 const struct bpf_func_proto bpf_sk_storage_get_cg_sock_proto = {
335 	.func		= bpf_sk_storage_get,
336 	.gpl_only	= false,
337 	.ret_type	= RET_PTR_TO_MAP_VALUE_OR_NULL,
338 	.arg1_type	= ARG_CONST_MAP_PTR,
339 	.arg2_type	= ARG_PTR_TO_CTX, /* context is 'struct sock' */
340 	.arg3_type	= ARG_PTR_TO_MAP_VALUE_OR_NULL,
341 	.arg4_type	= ARG_ANYTHING,
342 };
343 
344 const struct bpf_func_proto bpf_sk_storage_delete_proto = {
345 	.func		= bpf_sk_storage_delete,
346 	.gpl_only	= false,
347 	.ret_type	= RET_INTEGER,
348 	.arg1_type	= ARG_CONST_MAP_PTR,
349 	.arg2_type	= ARG_PTR_TO_BTF_ID_SOCK_COMMON,
350 };
351 
352 static bool bpf_sk_storage_tracing_allowed(const struct bpf_prog *prog)
353 {
354 	if (prog->aux->dst_prog)
355 		return false;
356 
357 	/* Ensure the tracing program is not tracing
358 	 * any bpf_sk_storage*() function and also
359 	 * use the bpf_sk_storage_(get|delete) helper.
360 	 */
361 	switch (prog->expected_attach_type) {
362 	case BPF_TRACE_ITER:
363 	case BPF_TRACE_RAW_TP:
364 		/* bpf_sk_storage has no trace point */
365 		return true;
366 	case BPF_TRACE_FENTRY:
367 	case BPF_TRACE_FEXIT:
368 		return !!strncmp(prog->aux->attach_func_name, "bpf_sk_storage",
369 				 strlen("bpf_sk_storage"));
370 	default:
371 		return false;
372 	}
373 
374 	return false;
375 }
376 
377 /* *gfp_flags* is a hidden argument provided by the verifier */
378 BPF_CALL_5(bpf_sk_storage_get_tracing, struct bpf_map *, map, struct sock *, sk,
379 	   void *, value, u64, flags, gfp_t, gfp_flags)
380 {
381 	WARN_ON_ONCE(!bpf_rcu_lock_held());
382 	if (in_hardirq() || in_nmi())
383 		return (unsigned long)NULL;
384 
385 	return (unsigned long)____bpf_sk_storage_get(map, sk, value, flags,
386 						     gfp_flags);
387 }
388 
389 BPF_CALL_2(bpf_sk_storage_delete_tracing, struct bpf_map *, map,
390 	   struct sock *, sk)
391 {
392 	WARN_ON_ONCE(!bpf_rcu_lock_held());
393 	if (in_hardirq() || in_nmi())
394 		return -EPERM;
395 
396 	return ____bpf_sk_storage_delete(map, sk);
397 }
398 
399 const struct bpf_func_proto bpf_sk_storage_get_tracing_proto = {
400 	.func		= bpf_sk_storage_get_tracing,
401 	.gpl_only	= false,
402 	.ret_type	= RET_PTR_TO_MAP_VALUE_OR_NULL,
403 	.arg1_type	= ARG_CONST_MAP_PTR,
404 	.arg2_type	= ARG_PTR_TO_BTF_ID_OR_NULL,
405 	.arg2_btf_id	= &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON],
406 	.arg3_type	= ARG_PTR_TO_MAP_VALUE_OR_NULL,
407 	.arg4_type	= ARG_ANYTHING,
408 	.allowed	= bpf_sk_storage_tracing_allowed,
409 };
410 
411 const struct bpf_func_proto bpf_sk_storage_delete_tracing_proto = {
412 	.func		= bpf_sk_storage_delete_tracing,
413 	.gpl_only	= false,
414 	.ret_type	= RET_INTEGER,
415 	.arg1_type	= ARG_CONST_MAP_PTR,
416 	.arg2_type	= ARG_PTR_TO_BTF_ID_OR_NULL,
417 	.arg2_btf_id	= &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON],
418 	.allowed	= bpf_sk_storage_tracing_allowed,
419 };
420 
421 struct bpf_sk_storage_diag {
422 	u32 nr_maps;
423 	struct bpf_map *maps[];
424 };
425 
426 /* The reply will be like:
427  * INET_DIAG_BPF_SK_STORAGES (nla_nest)
428  *	SK_DIAG_BPF_STORAGE (nla_nest)
429  *		SK_DIAG_BPF_STORAGE_MAP_ID (nla_put_u32)
430  *		SK_DIAG_BPF_STORAGE_MAP_VALUE (nla_reserve_64bit)
431  *	SK_DIAG_BPF_STORAGE (nla_nest)
432  *		SK_DIAG_BPF_STORAGE_MAP_ID (nla_put_u32)
433  *		SK_DIAG_BPF_STORAGE_MAP_VALUE (nla_reserve_64bit)
434  *	....
435  */
436 static int nla_value_size(u32 value_size)
437 {
438 	/* SK_DIAG_BPF_STORAGE (nla_nest)
439 	 *	SK_DIAG_BPF_STORAGE_MAP_ID (nla_put_u32)
440 	 *	SK_DIAG_BPF_STORAGE_MAP_VALUE (nla_reserve_64bit)
441 	 */
442 	return nla_total_size(0) + nla_total_size(sizeof(u32)) +
443 		nla_total_size_64bit(value_size);
444 }
445 
446 void bpf_sk_storage_diag_free(struct bpf_sk_storage_diag *diag)
447 {
448 	u32 i;
449 
450 	if (!diag)
451 		return;
452 
453 	for (i = 0; i < diag->nr_maps; i++)
454 		bpf_map_put(diag->maps[i]);
455 
456 	kfree(diag);
457 }
458 EXPORT_SYMBOL_GPL(bpf_sk_storage_diag_free);
459 
460 static bool diag_check_dup(const struct bpf_sk_storage_diag *diag,
461 			   const struct bpf_map *map)
462 {
463 	u32 i;
464 
465 	for (i = 0; i < diag->nr_maps; i++) {
466 		if (diag->maps[i] == map)
467 			return true;
468 	}
469 
470 	return false;
471 }
472 
473 struct bpf_sk_storage_diag *
474 bpf_sk_storage_diag_alloc(const struct nlattr *nla_stgs)
475 {
476 	struct bpf_sk_storage_diag *diag;
477 	struct nlattr *nla;
478 	u32 nr_maps = 0;
479 	int rem, err;
480 
481 	/* bpf_local_storage_map is currently limited to CAP_SYS_ADMIN as
482 	 * the map_alloc_check() side also does.
483 	 */
484 	if (!bpf_capable())
485 		return ERR_PTR(-EPERM);
486 
487 	nla_for_each_nested_type(nla, SK_DIAG_BPF_STORAGE_REQ_MAP_FD,
488 				 nla_stgs, rem) {
489 		if (nla_len(nla) != sizeof(u32))
490 			return ERR_PTR(-EINVAL);
491 		nr_maps++;
492 	}
493 
494 	diag = kzalloc(struct_size(diag, maps, nr_maps), GFP_KERNEL);
495 	if (!diag)
496 		return ERR_PTR(-ENOMEM);
497 
498 	nla_for_each_nested_type(nla, SK_DIAG_BPF_STORAGE_REQ_MAP_FD,
499 				 nla_stgs, rem) {
500 		int map_fd = nla_get_u32(nla);
501 		struct bpf_map *map = bpf_map_get(map_fd);
502 
503 		if (IS_ERR(map)) {
504 			err = PTR_ERR(map);
505 			goto err_free;
506 		}
507 		if (map->map_type != BPF_MAP_TYPE_SK_STORAGE) {
508 			bpf_map_put(map);
509 			err = -EINVAL;
510 			goto err_free;
511 		}
512 		if (diag_check_dup(diag, map)) {
513 			bpf_map_put(map);
514 			err = -EEXIST;
515 			goto err_free;
516 		}
517 		diag->maps[diag->nr_maps++] = map;
518 	}
519 
520 	return diag;
521 
522 err_free:
523 	bpf_sk_storage_diag_free(diag);
524 	return ERR_PTR(err);
525 }
526 EXPORT_SYMBOL_GPL(bpf_sk_storage_diag_alloc);
527 
528 static int diag_get(struct bpf_local_storage_data *sdata, struct sk_buff *skb)
529 {
530 	struct nlattr *nla_stg, *nla_value;
531 	struct bpf_local_storage_map *smap;
532 
533 	/* It cannot exceed max nlattr's payload */
534 	BUILD_BUG_ON(U16_MAX - NLA_HDRLEN < BPF_LOCAL_STORAGE_MAX_VALUE_SIZE);
535 
536 	nla_stg = nla_nest_start(skb, SK_DIAG_BPF_STORAGE);
537 	if (!nla_stg)
538 		return -EMSGSIZE;
539 
540 	smap = rcu_dereference(sdata->smap);
541 	if (nla_put_u32(skb, SK_DIAG_BPF_STORAGE_MAP_ID, smap->map.id))
542 		goto errout;
543 
544 	nla_value = nla_reserve_64bit(skb, SK_DIAG_BPF_STORAGE_MAP_VALUE,
545 				      smap->map.value_size,
546 				      SK_DIAG_BPF_STORAGE_PAD);
547 	if (!nla_value)
548 		goto errout;
549 
550 	if (btf_record_has_field(smap->map.record, BPF_SPIN_LOCK))
551 		copy_map_value_locked(&smap->map, nla_data(nla_value),
552 				      sdata->data, true);
553 	else
554 		copy_map_value(&smap->map, nla_data(nla_value), sdata->data);
555 
556 	nla_nest_end(skb, nla_stg);
557 	return 0;
558 
559 errout:
560 	nla_nest_cancel(skb, nla_stg);
561 	return -EMSGSIZE;
562 }
563 
564 static int bpf_sk_storage_diag_put_all(struct sock *sk, struct sk_buff *skb,
565 				       int stg_array_type,
566 				       unsigned int *res_diag_size)
567 {
568 	/* stg_array_type (e.g. INET_DIAG_BPF_SK_STORAGES) */
569 	unsigned int diag_size = nla_total_size(0);
570 	struct bpf_local_storage *sk_storage;
571 	struct bpf_local_storage_elem *selem;
572 	struct bpf_local_storage_map *smap;
573 	struct nlattr *nla_stgs;
574 	unsigned int saved_len;
575 	int err = 0;
576 
577 	rcu_read_lock();
578 
579 	sk_storage = rcu_dereference(sk->sk_bpf_storage);
580 	if (!sk_storage || hlist_empty(&sk_storage->list)) {
581 		rcu_read_unlock();
582 		return 0;
583 	}
584 
585 	nla_stgs = nla_nest_start(skb, stg_array_type);
586 	if (!nla_stgs)
587 		/* Continue to learn diag_size */
588 		err = -EMSGSIZE;
589 
590 	saved_len = skb->len;
591 	hlist_for_each_entry_rcu(selem, &sk_storage->list, snode) {
592 		smap = rcu_dereference(SDATA(selem)->smap);
593 		diag_size += nla_value_size(smap->map.value_size);
594 
595 		if (nla_stgs && diag_get(SDATA(selem), skb))
596 			/* Continue to learn diag_size */
597 			err = -EMSGSIZE;
598 	}
599 
600 	rcu_read_unlock();
601 
602 	if (nla_stgs) {
603 		if (saved_len == skb->len)
604 			nla_nest_cancel(skb, nla_stgs);
605 		else
606 			nla_nest_end(skb, nla_stgs);
607 	}
608 
609 	if (diag_size == nla_total_size(0)) {
610 		*res_diag_size = 0;
611 		return 0;
612 	}
613 
614 	*res_diag_size = diag_size;
615 	return err;
616 }
617 
618 int bpf_sk_storage_diag_put(struct bpf_sk_storage_diag *diag,
619 			    struct sock *sk, struct sk_buff *skb,
620 			    int stg_array_type,
621 			    unsigned int *res_diag_size)
622 {
623 	/* stg_array_type (e.g. INET_DIAG_BPF_SK_STORAGES) */
624 	unsigned int diag_size = nla_total_size(0);
625 	struct bpf_local_storage *sk_storage;
626 	struct bpf_local_storage_data *sdata;
627 	struct nlattr *nla_stgs;
628 	unsigned int saved_len;
629 	int err = 0;
630 	u32 i;
631 
632 	*res_diag_size = 0;
633 
634 	/* No map has been specified.  Dump all. */
635 	if (!diag->nr_maps)
636 		return bpf_sk_storage_diag_put_all(sk, skb, stg_array_type,
637 						   res_diag_size);
638 
639 	rcu_read_lock();
640 	sk_storage = rcu_dereference(sk->sk_bpf_storage);
641 	if (!sk_storage || hlist_empty(&sk_storage->list)) {
642 		rcu_read_unlock();
643 		return 0;
644 	}
645 
646 	nla_stgs = nla_nest_start(skb, stg_array_type);
647 	if (!nla_stgs)
648 		/* Continue to learn diag_size */
649 		err = -EMSGSIZE;
650 
651 	saved_len = skb->len;
652 	for (i = 0; i < diag->nr_maps; i++) {
653 		sdata = bpf_local_storage_lookup(sk_storage,
654 				(struct bpf_local_storage_map *)diag->maps[i],
655 				false);
656 
657 		if (!sdata)
658 			continue;
659 
660 		diag_size += nla_value_size(diag->maps[i]->value_size);
661 
662 		if (nla_stgs && diag_get(sdata, skb))
663 			/* Continue to learn diag_size */
664 			err = -EMSGSIZE;
665 	}
666 	rcu_read_unlock();
667 
668 	if (nla_stgs) {
669 		if (saved_len == skb->len)
670 			nla_nest_cancel(skb, nla_stgs);
671 		else
672 			nla_nest_end(skb, nla_stgs);
673 	}
674 
675 	if (diag_size == nla_total_size(0)) {
676 		*res_diag_size = 0;
677 		return 0;
678 	}
679 
680 	*res_diag_size = diag_size;
681 	return err;
682 }
683 EXPORT_SYMBOL_GPL(bpf_sk_storage_diag_put);
684 
685 struct bpf_iter_seq_sk_storage_map_info {
686 	struct bpf_map *map;
687 	unsigned int bucket_id;
688 	unsigned skip_elems;
689 };
690 
691 static struct bpf_local_storage_elem *
692 bpf_sk_storage_map_seq_find_next(struct bpf_iter_seq_sk_storage_map_info *info,
693 				 struct bpf_local_storage_elem *prev_selem)
694 	__acquires(RCU) __releases(RCU)
695 {
696 	struct bpf_local_storage *sk_storage;
697 	struct bpf_local_storage_elem *selem;
698 	u32 skip_elems = info->skip_elems;
699 	struct bpf_local_storage_map *smap;
700 	u32 bucket_id = info->bucket_id;
701 	u32 i, count, n_buckets;
702 	struct bpf_local_storage_map_bucket *b;
703 
704 	smap = (struct bpf_local_storage_map *)info->map;
705 	n_buckets = 1U << smap->bucket_log;
706 	if (bucket_id >= n_buckets)
707 		return NULL;
708 
709 	/* try to find next selem in the same bucket */
710 	selem = prev_selem;
711 	count = 0;
712 	while (selem) {
713 		selem = hlist_entry_safe(rcu_dereference(hlist_next_rcu(&selem->map_node)),
714 					 struct bpf_local_storage_elem, map_node);
715 		if (!selem) {
716 			/* not found, unlock and go to the next bucket */
717 			b = &smap->buckets[bucket_id++];
718 			rcu_read_unlock();
719 			skip_elems = 0;
720 			break;
721 		}
722 		sk_storage = rcu_dereference(selem->local_storage);
723 		if (sk_storage) {
724 			info->skip_elems = skip_elems + count;
725 			return selem;
726 		}
727 		count++;
728 	}
729 
730 	for (i = bucket_id; i < (1U << smap->bucket_log); i++) {
731 		b = &smap->buckets[i];
732 		rcu_read_lock();
733 		count = 0;
734 		hlist_for_each_entry_rcu(selem, &b->list, map_node) {
735 			sk_storage = rcu_dereference(selem->local_storage);
736 			if (sk_storage && count >= skip_elems) {
737 				info->bucket_id = i;
738 				info->skip_elems = count;
739 				return selem;
740 			}
741 			count++;
742 		}
743 		rcu_read_unlock();
744 		skip_elems = 0;
745 	}
746 
747 	info->bucket_id = i;
748 	info->skip_elems = 0;
749 	return NULL;
750 }
751 
752 static void *bpf_sk_storage_map_seq_start(struct seq_file *seq, loff_t *pos)
753 {
754 	struct bpf_local_storage_elem *selem;
755 
756 	selem = bpf_sk_storage_map_seq_find_next(seq->private, NULL);
757 	if (!selem)
758 		return NULL;
759 
760 	if (*pos == 0)
761 		++*pos;
762 	return selem;
763 }
764 
765 static void *bpf_sk_storage_map_seq_next(struct seq_file *seq, void *v,
766 					 loff_t *pos)
767 {
768 	struct bpf_iter_seq_sk_storage_map_info *info = seq->private;
769 
770 	++*pos;
771 	++info->skip_elems;
772 	return bpf_sk_storage_map_seq_find_next(seq->private, v);
773 }
774 
775 struct bpf_iter__bpf_sk_storage_map {
776 	__bpf_md_ptr(struct bpf_iter_meta *, meta);
777 	__bpf_md_ptr(struct bpf_map *, map);
778 	__bpf_md_ptr(struct sock *, sk);
779 	__bpf_md_ptr(void *, value);
780 };
781 
782 DEFINE_BPF_ITER_FUNC(bpf_sk_storage_map, struct bpf_iter_meta *meta,
783 		     struct bpf_map *map, struct sock *sk,
784 		     void *value)
785 
786 static int __bpf_sk_storage_map_seq_show(struct seq_file *seq,
787 					 struct bpf_local_storage_elem *selem)
788 {
789 	struct bpf_iter_seq_sk_storage_map_info *info = seq->private;
790 	struct bpf_iter__bpf_sk_storage_map ctx = {};
791 	struct bpf_local_storage *sk_storage;
792 	struct bpf_iter_meta meta;
793 	struct bpf_prog *prog;
794 	int ret = 0;
795 
796 	meta.seq = seq;
797 	prog = bpf_iter_get_info(&meta, selem == NULL);
798 	if (prog) {
799 		ctx.meta = &meta;
800 		ctx.map = info->map;
801 		if (selem) {
802 			sk_storage = rcu_dereference(selem->local_storage);
803 			ctx.sk = sk_storage->owner;
804 			ctx.value = SDATA(selem)->data;
805 		}
806 		ret = bpf_iter_run_prog(prog, &ctx);
807 	}
808 
809 	return ret;
810 }
811 
812 static int bpf_sk_storage_map_seq_show(struct seq_file *seq, void *v)
813 {
814 	return __bpf_sk_storage_map_seq_show(seq, v);
815 }
816 
817 static void bpf_sk_storage_map_seq_stop(struct seq_file *seq, void *v)
818 	__releases(RCU)
819 {
820 	if (!v)
821 		(void)__bpf_sk_storage_map_seq_show(seq, v);
822 	else
823 		rcu_read_unlock();
824 }
825 
826 static int bpf_iter_init_sk_storage_map(void *priv_data,
827 					struct bpf_iter_aux_info *aux)
828 {
829 	struct bpf_iter_seq_sk_storage_map_info *seq_info = priv_data;
830 
831 	bpf_map_inc_with_uref(aux->map);
832 	seq_info->map = aux->map;
833 	return 0;
834 }
835 
836 static void bpf_iter_fini_sk_storage_map(void *priv_data)
837 {
838 	struct bpf_iter_seq_sk_storage_map_info *seq_info = priv_data;
839 
840 	bpf_map_put_with_uref(seq_info->map);
841 }
842 
843 static int bpf_iter_attach_map(struct bpf_prog *prog,
844 			       union bpf_iter_link_info *linfo,
845 			       struct bpf_iter_aux_info *aux)
846 {
847 	struct bpf_map *map;
848 	int err = -EINVAL;
849 
850 	if (!linfo->map.map_fd)
851 		return -EBADF;
852 
853 	map = bpf_map_get_with_uref(linfo->map.map_fd);
854 	if (IS_ERR(map))
855 		return PTR_ERR(map);
856 
857 	if (map->map_type != BPF_MAP_TYPE_SK_STORAGE)
858 		goto put_map;
859 
860 	if (prog->aux->max_rdwr_access > map->value_size) {
861 		err = -EACCES;
862 		goto put_map;
863 	}
864 
865 	aux->map = map;
866 	return 0;
867 
868 put_map:
869 	bpf_map_put_with_uref(map);
870 	return err;
871 }
872 
873 static void bpf_iter_detach_map(struct bpf_iter_aux_info *aux)
874 {
875 	bpf_map_put_with_uref(aux->map);
876 }
877 
878 static const struct seq_operations bpf_sk_storage_map_seq_ops = {
879 	.start  = bpf_sk_storage_map_seq_start,
880 	.next   = bpf_sk_storage_map_seq_next,
881 	.stop   = bpf_sk_storage_map_seq_stop,
882 	.show   = bpf_sk_storage_map_seq_show,
883 };
884 
885 static const struct bpf_iter_seq_info iter_seq_info = {
886 	.seq_ops		= &bpf_sk_storage_map_seq_ops,
887 	.init_seq_private	= bpf_iter_init_sk_storage_map,
888 	.fini_seq_private	= bpf_iter_fini_sk_storage_map,
889 	.seq_priv_size		= sizeof(struct bpf_iter_seq_sk_storage_map_info),
890 };
891 
892 static struct bpf_iter_reg bpf_sk_storage_map_reg_info = {
893 	.target			= "bpf_sk_storage_map",
894 	.attach_target		= bpf_iter_attach_map,
895 	.detach_target		= bpf_iter_detach_map,
896 	.show_fdinfo		= bpf_iter_map_show_fdinfo,
897 	.fill_link_info		= bpf_iter_map_fill_link_info,
898 	.ctx_arg_info_size	= 2,
899 	.ctx_arg_info		= {
900 		{ offsetof(struct bpf_iter__bpf_sk_storage_map, sk),
901 		  PTR_TO_BTF_ID_OR_NULL },
902 		{ offsetof(struct bpf_iter__bpf_sk_storage_map, value),
903 		  PTR_TO_BUF | PTR_MAYBE_NULL },
904 	},
905 	.seq_info		= &iter_seq_info,
906 };
907 
908 static int __init bpf_sk_storage_map_iter_init(void)
909 {
910 	bpf_sk_storage_map_reg_info.ctx_arg_info[0].btf_id =
911 		btf_sock_ids[BTF_SOCK_TYPE_SOCK];
912 	return bpf_iter_reg_target(&bpf_sk_storage_map_reg_info);
913 }
914 late_initcall(bpf_sk_storage_map_iter_init);
915