xref: /linux/kernel/bpf/bpf_local_storage.c (revision bbefef2f07080cd502a93cb1c529e1c8a6c4ac8e)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 Facebook  */
3 #include <linux/rculist.h>
4 #include <linux/list.h>
5 #include <linux/hash.h>
6 #include <linux/types.h>
7 #include <linux/spinlock.h>
8 #include <linux/bpf.h>
9 #include <linux/btf_ids.h>
10 #include <linux/bpf_local_storage.h>
11 #include <net/sock.h>
12 #include <uapi/linux/sock_diag.h>
13 #include <uapi/linux/btf.h>
14 #include <linux/rcupdate.h>
15 #include <linux/rcupdate_trace.h>
16 #include <linux/rcupdate_wait.h>
17 
18 #define BPF_LOCAL_STORAGE_CREATE_FLAG_MASK (BPF_F_NO_PREALLOC | BPF_F_CLONE)
19 
20 static struct bpf_local_storage_map_bucket *
21 select_bucket(struct bpf_local_storage_map *smap,
22 	      struct bpf_local_storage_elem *selem)
23 {
24 	return &smap->buckets[hash_ptr(selem, smap->bucket_log)];
25 }
26 
27 static int mem_charge(struct bpf_local_storage_map *smap, void *owner, u32 size)
28 {
29 	struct bpf_map *map = &smap->map;
30 
31 	if (!map->ops->map_local_storage_charge)
32 		return 0;
33 
34 	return map->ops->map_local_storage_charge(smap, owner, size);
35 }
36 
37 static void mem_uncharge(struct bpf_local_storage_map *smap, void *owner,
38 			 u32 size)
39 {
40 	struct bpf_map *map = &smap->map;
41 
42 	if (map->ops->map_local_storage_uncharge)
43 		map->ops->map_local_storage_uncharge(smap, owner, size);
44 }
45 
46 static struct bpf_local_storage __rcu **
47 owner_storage(struct bpf_local_storage_map *smap, void *owner)
48 {
49 	struct bpf_map *map = &smap->map;
50 
51 	return map->ops->map_owner_storage_ptr(owner);
52 }
53 
54 static bool selem_linked_to_storage_lockless(const struct bpf_local_storage_elem *selem)
55 {
56 	return !hlist_unhashed_lockless(&selem->snode);
57 }
58 
59 static bool selem_linked_to_storage(const struct bpf_local_storage_elem *selem)
60 {
61 	return !hlist_unhashed(&selem->snode);
62 }
63 
64 static bool selem_linked_to_map_lockless(const struct bpf_local_storage_elem *selem)
65 {
66 	return !hlist_unhashed_lockless(&selem->map_node);
67 }
68 
69 static bool selem_linked_to_map(const struct bpf_local_storage_elem *selem)
70 {
71 	return !hlist_unhashed(&selem->map_node);
72 }
73 
74 struct bpf_local_storage_elem *
75 bpf_selem_alloc(struct bpf_local_storage_map *smap, void *owner,
76 		void *value, bool charge_mem, gfp_t gfp_flags)
77 {
78 	struct bpf_local_storage_elem *selem;
79 
80 	if (charge_mem && mem_charge(smap, owner, smap->elem_size))
81 		return NULL;
82 
83 	selem = bpf_map_kzalloc(&smap->map, smap->elem_size,
84 				gfp_flags | __GFP_NOWARN);
85 	if (selem) {
86 		if (value)
87 			copy_map_value(&smap->map, SDATA(selem)->data, value);
88 		return selem;
89 	}
90 
91 	if (charge_mem)
92 		mem_uncharge(smap, owner, smap->elem_size);
93 
94 	return NULL;
95 }
96 
97 void bpf_local_storage_free_rcu(struct rcu_head *rcu)
98 {
99 	struct bpf_local_storage *local_storage;
100 
101 	/* If RCU Tasks Trace grace period implies RCU grace period, do
102 	 * kfree(), else do kfree_rcu().
103 	 */
104 	local_storage = container_of(rcu, struct bpf_local_storage, rcu);
105 	if (rcu_trace_implies_rcu_gp())
106 		kfree(local_storage);
107 	else
108 		kfree_rcu(local_storage, rcu);
109 }
110 
111 static void bpf_selem_free_rcu(struct rcu_head *rcu)
112 {
113 	struct bpf_local_storage_elem *selem;
114 
115 	selem = container_of(rcu, struct bpf_local_storage_elem, rcu);
116 	if (rcu_trace_implies_rcu_gp())
117 		kfree(selem);
118 	else
119 		kfree_rcu(selem, rcu);
120 }
121 
122 /* local_storage->lock must be held and selem->local_storage == local_storage.
123  * The caller must ensure selem->smap is still valid to be
124  * dereferenced for its smap->elem_size and smap->cache_idx.
125  */
126 static bool bpf_selem_unlink_storage_nolock(struct bpf_local_storage *local_storage,
127 					    struct bpf_local_storage_elem *selem,
128 					    bool uncharge_mem, bool use_trace_rcu)
129 {
130 	struct bpf_local_storage_map *smap;
131 	bool free_local_storage;
132 	void *owner;
133 
134 	smap = rcu_dereference_check(SDATA(selem)->smap, bpf_rcu_lock_held());
135 	owner = local_storage->owner;
136 
137 	/* All uncharging on the owner must be done first.
138 	 * The owner may be freed once the last selem is unlinked
139 	 * from local_storage.
140 	 */
141 	if (uncharge_mem)
142 		mem_uncharge(smap, owner, smap->elem_size);
143 
144 	free_local_storage = hlist_is_singular_node(&selem->snode,
145 						    &local_storage->list);
146 	if (free_local_storage) {
147 		mem_uncharge(smap, owner, sizeof(struct bpf_local_storage));
148 		local_storage->owner = NULL;
149 
150 		/* After this RCU_INIT, owner may be freed and cannot be used */
151 		RCU_INIT_POINTER(*owner_storage(smap, owner), NULL);
152 
153 		/* local_storage is not freed now.  local_storage->lock is
154 		 * still held and raw_spin_unlock_bh(&local_storage->lock)
155 		 * will be done by the caller.
156 		 *
157 		 * Although the unlock will be done under
158 		 * rcu_read_lock(),  it is more intuitive to
159 		 * read if the freeing of the storage is done
160 		 * after the raw_spin_unlock_bh(&local_storage->lock).
161 		 *
162 		 * Hence, a "bool free_local_storage" is returned
163 		 * to the caller which then calls then frees the storage after
164 		 * all the RCU grace periods have expired.
165 		 */
166 	}
167 	hlist_del_init_rcu(&selem->snode);
168 	if (rcu_access_pointer(local_storage->cache[smap->cache_idx]) ==
169 	    SDATA(selem))
170 		RCU_INIT_POINTER(local_storage->cache[smap->cache_idx], NULL);
171 
172 	if (use_trace_rcu)
173 		call_rcu_tasks_trace(&selem->rcu, bpf_selem_free_rcu);
174 	else
175 		kfree_rcu(selem, rcu);
176 
177 	return free_local_storage;
178 }
179 
180 static void __bpf_selem_unlink_storage(struct bpf_local_storage_elem *selem,
181 				       bool use_trace_rcu)
182 {
183 	struct bpf_local_storage *local_storage;
184 	bool free_local_storage = false;
185 	unsigned long flags;
186 
187 	if (unlikely(!selem_linked_to_storage_lockless(selem)))
188 		/* selem has already been unlinked from sk */
189 		return;
190 
191 	local_storage = rcu_dereference_check(selem->local_storage,
192 					      bpf_rcu_lock_held());
193 	raw_spin_lock_irqsave(&local_storage->lock, flags);
194 	if (likely(selem_linked_to_storage(selem)))
195 		free_local_storage = bpf_selem_unlink_storage_nolock(
196 			local_storage, selem, true, use_trace_rcu);
197 	raw_spin_unlock_irqrestore(&local_storage->lock, flags);
198 
199 	if (free_local_storage) {
200 		if (use_trace_rcu)
201 			call_rcu_tasks_trace(&local_storage->rcu,
202 				     bpf_local_storage_free_rcu);
203 		else
204 			kfree_rcu(local_storage, rcu);
205 	}
206 }
207 
208 void bpf_selem_link_storage_nolock(struct bpf_local_storage *local_storage,
209 				   struct bpf_local_storage_elem *selem)
210 {
211 	RCU_INIT_POINTER(selem->local_storage, local_storage);
212 	hlist_add_head_rcu(&selem->snode, &local_storage->list);
213 }
214 
215 void bpf_selem_unlink_map(struct bpf_local_storage_elem *selem)
216 {
217 	struct bpf_local_storage_map *smap;
218 	struct bpf_local_storage_map_bucket *b;
219 	unsigned long flags;
220 
221 	if (unlikely(!selem_linked_to_map_lockless(selem)))
222 		/* selem has already be unlinked from smap */
223 		return;
224 
225 	smap = rcu_dereference_check(SDATA(selem)->smap, bpf_rcu_lock_held());
226 	b = select_bucket(smap, selem);
227 	raw_spin_lock_irqsave(&b->lock, flags);
228 	if (likely(selem_linked_to_map(selem)))
229 		hlist_del_init_rcu(&selem->map_node);
230 	raw_spin_unlock_irqrestore(&b->lock, flags);
231 }
232 
233 void bpf_selem_link_map(struct bpf_local_storage_map *smap,
234 			struct bpf_local_storage_elem *selem)
235 {
236 	struct bpf_local_storage_map_bucket *b = select_bucket(smap, selem);
237 	unsigned long flags;
238 
239 	raw_spin_lock_irqsave(&b->lock, flags);
240 	RCU_INIT_POINTER(SDATA(selem)->smap, smap);
241 	hlist_add_head_rcu(&selem->map_node, &b->list);
242 	raw_spin_unlock_irqrestore(&b->lock, flags);
243 }
244 
245 void bpf_selem_unlink(struct bpf_local_storage_elem *selem, bool use_trace_rcu)
246 {
247 	/* Always unlink from map before unlinking from local_storage
248 	 * because selem will be freed after successfully unlinked from
249 	 * the local_storage.
250 	 */
251 	bpf_selem_unlink_map(selem);
252 	__bpf_selem_unlink_storage(selem, use_trace_rcu);
253 }
254 
255 /* If cacheit_lockit is false, this lookup function is lockless */
256 struct bpf_local_storage_data *
257 bpf_local_storage_lookup(struct bpf_local_storage *local_storage,
258 			 struct bpf_local_storage_map *smap,
259 			 bool cacheit_lockit)
260 {
261 	struct bpf_local_storage_data *sdata;
262 	struct bpf_local_storage_elem *selem;
263 
264 	/* Fast path (cache hit) */
265 	sdata = rcu_dereference_check(local_storage->cache[smap->cache_idx],
266 				      bpf_rcu_lock_held());
267 	if (sdata && rcu_access_pointer(sdata->smap) == smap)
268 		return sdata;
269 
270 	/* Slow path (cache miss) */
271 	hlist_for_each_entry_rcu(selem, &local_storage->list, snode,
272 				  rcu_read_lock_trace_held())
273 		if (rcu_access_pointer(SDATA(selem)->smap) == smap)
274 			break;
275 
276 	if (!selem)
277 		return NULL;
278 
279 	sdata = SDATA(selem);
280 	if (cacheit_lockit) {
281 		unsigned long flags;
282 
283 		/* spinlock is needed to avoid racing with the
284 		 * parallel delete.  Otherwise, publishing an already
285 		 * deleted sdata to the cache will become a use-after-free
286 		 * problem in the next bpf_local_storage_lookup().
287 		 */
288 		raw_spin_lock_irqsave(&local_storage->lock, flags);
289 		if (selem_linked_to_storage(selem))
290 			rcu_assign_pointer(local_storage->cache[smap->cache_idx],
291 					   sdata);
292 		raw_spin_unlock_irqrestore(&local_storage->lock, flags);
293 	}
294 
295 	return sdata;
296 }
297 
298 static int check_flags(const struct bpf_local_storage_data *old_sdata,
299 		       u64 map_flags)
300 {
301 	if (old_sdata && (map_flags & ~BPF_F_LOCK) == BPF_NOEXIST)
302 		/* elem already exists */
303 		return -EEXIST;
304 
305 	if (!old_sdata && (map_flags & ~BPF_F_LOCK) == BPF_EXIST)
306 		/* elem doesn't exist, cannot update it */
307 		return -ENOENT;
308 
309 	return 0;
310 }
311 
312 int bpf_local_storage_alloc(void *owner,
313 			    struct bpf_local_storage_map *smap,
314 			    struct bpf_local_storage_elem *first_selem,
315 			    gfp_t gfp_flags)
316 {
317 	struct bpf_local_storage *prev_storage, *storage;
318 	struct bpf_local_storage **owner_storage_ptr;
319 	int err;
320 
321 	err = mem_charge(smap, owner, sizeof(*storage));
322 	if (err)
323 		return err;
324 
325 	storage = bpf_map_kzalloc(&smap->map, sizeof(*storage),
326 				  gfp_flags | __GFP_NOWARN);
327 	if (!storage) {
328 		err = -ENOMEM;
329 		goto uncharge;
330 	}
331 
332 	INIT_HLIST_HEAD(&storage->list);
333 	raw_spin_lock_init(&storage->lock);
334 	storage->owner = owner;
335 
336 	bpf_selem_link_storage_nolock(storage, first_selem);
337 	bpf_selem_link_map(smap, first_selem);
338 
339 	owner_storage_ptr =
340 		(struct bpf_local_storage **)owner_storage(smap, owner);
341 	/* Publish storage to the owner.
342 	 * Instead of using any lock of the kernel object (i.e. owner),
343 	 * cmpxchg will work with any kernel object regardless what
344 	 * the running context is, bh, irq...etc.
345 	 *
346 	 * From now on, the owner->storage pointer (e.g. sk->sk_bpf_storage)
347 	 * is protected by the storage->lock.  Hence, when freeing
348 	 * the owner->storage, the storage->lock must be held before
349 	 * setting owner->storage ptr to NULL.
350 	 */
351 	prev_storage = cmpxchg(owner_storage_ptr, NULL, storage);
352 	if (unlikely(prev_storage)) {
353 		bpf_selem_unlink_map(first_selem);
354 		err = -EAGAIN;
355 		goto uncharge;
356 
357 		/* Note that even first_selem was linked to smap's
358 		 * bucket->list, first_selem can be freed immediately
359 		 * (instead of kfree_rcu) because
360 		 * bpf_local_storage_map_free() does a
361 		 * synchronize_rcu_mult (waiting for both sleepable and
362 		 * normal programs) before walking the bucket->list.
363 		 * Hence, no one is accessing selem from the
364 		 * bucket->list under rcu_read_lock().
365 		 */
366 	}
367 
368 	return 0;
369 
370 uncharge:
371 	kfree(storage);
372 	mem_uncharge(smap, owner, sizeof(*storage));
373 	return err;
374 }
375 
376 /* sk cannot be going away because it is linking new elem
377  * to sk->sk_bpf_storage. (i.e. sk->sk_refcnt cannot be 0).
378  * Otherwise, it will become a leak (and other memory issues
379  * during map destruction).
380  */
381 struct bpf_local_storage_data *
382 bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
383 			 void *value, u64 map_flags, gfp_t gfp_flags)
384 {
385 	struct bpf_local_storage_data *old_sdata = NULL;
386 	struct bpf_local_storage_elem *selem = NULL;
387 	struct bpf_local_storage *local_storage;
388 	unsigned long flags;
389 	int err;
390 
391 	/* BPF_EXIST and BPF_NOEXIST cannot be both set */
392 	if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST) ||
393 	    /* BPF_F_LOCK can only be used in a value with spin_lock */
394 	    unlikely((map_flags & BPF_F_LOCK) &&
395 		     !btf_record_has_field(smap->map.record, BPF_SPIN_LOCK)))
396 		return ERR_PTR(-EINVAL);
397 
398 	if (gfp_flags == GFP_KERNEL && (map_flags & ~BPF_F_LOCK) != BPF_NOEXIST)
399 		return ERR_PTR(-EINVAL);
400 
401 	local_storage = rcu_dereference_check(*owner_storage(smap, owner),
402 					      bpf_rcu_lock_held());
403 	if (!local_storage || hlist_empty(&local_storage->list)) {
404 		/* Very first elem for the owner */
405 		err = check_flags(NULL, map_flags);
406 		if (err)
407 			return ERR_PTR(err);
408 
409 		selem = bpf_selem_alloc(smap, owner, value, true, gfp_flags);
410 		if (!selem)
411 			return ERR_PTR(-ENOMEM);
412 
413 		err = bpf_local_storage_alloc(owner, smap, selem, gfp_flags);
414 		if (err) {
415 			kfree(selem);
416 			mem_uncharge(smap, owner, smap->elem_size);
417 			return ERR_PTR(err);
418 		}
419 
420 		return SDATA(selem);
421 	}
422 
423 	if ((map_flags & BPF_F_LOCK) && !(map_flags & BPF_NOEXIST)) {
424 		/* Hoping to find an old_sdata to do inline update
425 		 * such that it can avoid taking the local_storage->lock
426 		 * and changing the lists.
427 		 */
428 		old_sdata =
429 			bpf_local_storage_lookup(local_storage, smap, false);
430 		err = check_flags(old_sdata, map_flags);
431 		if (err)
432 			return ERR_PTR(err);
433 		if (old_sdata && selem_linked_to_storage_lockless(SELEM(old_sdata))) {
434 			copy_map_value_locked(&smap->map, old_sdata->data,
435 					      value, false);
436 			return old_sdata;
437 		}
438 	}
439 
440 	if (gfp_flags == GFP_KERNEL) {
441 		selem = bpf_selem_alloc(smap, owner, value, true, gfp_flags);
442 		if (!selem)
443 			return ERR_PTR(-ENOMEM);
444 	}
445 
446 	raw_spin_lock_irqsave(&local_storage->lock, flags);
447 
448 	/* Recheck local_storage->list under local_storage->lock */
449 	if (unlikely(hlist_empty(&local_storage->list))) {
450 		/* A parallel del is happening and local_storage is going
451 		 * away.  It has just been checked before, so very
452 		 * unlikely.  Return instead of retry to keep things
453 		 * simple.
454 		 */
455 		err = -EAGAIN;
456 		goto unlock_err;
457 	}
458 
459 	old_sdata = bpf_local_storage_lookup(local_storage, smap, false);
460 	err = check_flags(old_sdata, map_flags);
461 	if (err)
462 		goto unlock_err;
463 
464 	if (old_sdata && (map_flags & BPF_F_LOCK)) {
465 		copy_map_value_locked(&smap->map, old_sdata->data, value,
466 				      false);
467 		selem = SELEM(old_sdata);
468 		goto unlock;
469 	}
470 
471 	if (gfp_flags != GFP_KERNEL) {
472 		/* local_storage->lock is held.  Hence, we are sure
473 		 * we can unlink and uncharge the old_sdata successfully
474 		 * later.  Hence, instead of charging the new selem now
475 		 * and then uncharge the old selem later (which may cause
476 		 * a potential but unnecessary charge failure),  avoid taking
477 		 * a charge at all here (the "!old_sdata" check) and the
478 		 * old_sdata will not be uncharged later during
479 		 * bpf_selem_unlink_storage_nolock().
480 		 */
481 		selem = bpf_selem_alloc(smap, owner, value, !old_sdata, gfp_flags);
482 		if (!selem) {
483 			err = -ENOMEM;
484 			goto unlock_err;
485 		}
486 	}
487 
488 	/* First, link the new selem to the map */
489 	bpf_selem_link_map(smap, selem);
490 
491 	/* Second, link (and publish) the new selem to local_storage */
492 	bpf_selem_link_storage_nolock(local_storage, selem);
493 
494 	/* Third, remove old selem, SELEM(old_sdata) */
495 	if (old_sdata) {
496 		bpf_selem_unlink_map(SELEM(old_sdata));
497 		bpf_selem_unlink_storage_nolock(local_storage, SELEM(old_sdata),
498 						false, true);
499 	}
500 
501 unlock:
502 	raw_spin_unlock_irqrestore(&local_storage->lock, flags);
503 	return SDATA(selem);
504 
505 unlock_err:
506 	raw_spin_unlock_irqrestore(&local_storage->lock, flags);
507 	if (selem) {
508 		mem_uncharge(smap, owner, smap->elem_size);
509 		kfree(selem);
510 	}
511 	return ERR_PTR(err);
512 }
513 
514 static u16 bpf_local_storage_cache_idx_get(struct bpf_local_storage_cache *cache)
515 {
516 	u64 min_usage = U64_MAX;
517 	u16 i, res = 0;
518 
519 	spin_lock(&cache->idx_lock);
520 
521 	for (i = 0; i < BPF_LOCAL_STORAGE_CACHE_SIZE; i++) {
522 		if (cache->idx_usage_counts[i] < min_usage) {
523 			min_usage = cache->idx_usage_counts[i];
524 			res = i;
525 
526 			/* Found a free cache_idx */
527 			if (!min_usage)
528 				break;
529 		}
530 	}
531 	cache->idx_usage_counts[res]++;
532 
533 	spin_unlock(&cache->idx_lock);
534 
535 	return res;
536 }
537 
538 static void bpf_local_storage_cache_idx_free(struct bpf_local_storage_cache *cache,
539 					     u16 idx)
540 {
541 	spin_lock(&cache->idx_lock);
542 	cache->idx_usage_counts[idx]--;
543 	spin_unlock(&cache->idx_lock);
544 }
545 
546 int bpf_local_storage_map_alloc_check(union bpf_attr *attr)
547 {
548 	if (attr->map_flags & ~BPF_LOCAL_STORAGE_CREATE_FLAG_MASK ||
549 	    !(attr->map_flags & BPF_F_NO_PREALLOC) ||
550 	    attr->max_entries ||
551 	    attr->key_size != sizeof(int) || !attr->value_size ||
552 	    /* Enforce BTF for userspace sk dumping */
553 	    !attr->btf_key_type_id || !attr->btf_value_type_id)
554 		return -EINVAL;
555 
556 	if (!bpf_capable())
557 		return -EPERM;
558 
559 	if (attr->value_size > BPF_LOCAL_STORAGE_MAX_VALUE_SIZE)
560 		return -E2BIG;
561 
562 	return 0;
563 }
564 
565 static struct bpf_local_storage_map *__bpf_local_storage_map_alloc(union bpf_attr *attr)
566 {
567 	struct bpf_local_storage_map *smap;
568 	unsigned int i;
569 	u32 nbuckets;
570 
571 	smap = bpf_map_area_alloc(sizeof(*smap), NUMA_NO_NODE);
572 	if (!smap)
573 		return ERR_PTR(-ENOMEM);
574 	bpf_map_init_from_attr(&smap->map, attr);
575 
576 	nbuckets = roundup_pow_of_two(num_possible_cpus());
577 	/* Use at least 2 buckets, select_bucket() is undefined behavior with 1 bucket */
578 	nbuckets = max_t(u32, 2, nbuckets);
579 	smap->bucket_log = ilog2(nbuckets);
580 
581 	smap->buckets = bpf_map_kvcalloc(&smap->map, sizeof(*smap->buckets),
582 					 nbuckets, GFP_USER | __GFP_NOWARN);
583 	if (!smap->buckets) {
584 		bpf_map_area_free(smap);
585 		return ERR_PTR(-ENOMEM);
586 	}
587 
588 	for (i = 0; i < nbuckets; i++) {
589 		INIT_HLIST_HEAD(&smap->buckets[i].list);
590 		raw_spin_lock_init(&smap->buckets[i].lock);
591 	}
592 
593 	smap->elem_size = offsetof(struct bpf_local_storage_elem,
594 				   sdata.data[attr->value_size]);
595 
596 	return smap;
597 }
598 
599 int bpf_local_storage_map_check_btf(const struct bpf_map *map,
600 				    const struct btf *btf,
601 				    const struct btf_type *key_type,
602 				    const struct btf_type *value_type)
603 {
604 	u32 int_data;
605 
606 	if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT)
607 		return -EINVAL;
608 
609 	int_data = *(u32 *)(key_type + 1);
610 	if (BTF_INT_BITS(int_data) != 32 || BTF_INT_OFFSET(int_data))
611 		return -EINVAL;
612 
613 	return 0;
614 }
615 
616 bool bpf_local_storage_unlink_nolock(struct bpf_local_storage *local_storage)
617 {
618 	struct bpf_local_storage_elem *selem;
619 	bool free_storage = false;
620 	struct hlist_node *n;
621 
622 	/* Neither the bpf_prog nor the bpf_map's syscall
623 	 * could be modifying the local_storage->list now.
624 	 * Thus, no elem can be added to or deleted from the
625 	 * local_storage->list by the bpf_prog or by the bpf_map's syscall.
626 	 *
627 	 * It is racing with bpf_local_storage_map_free() alone
628 	 * when unlinking elem from the local_storage->list and
629 	 * the map's bucket->list.
630 	 */
631 	hlist_for_each_entry_safe(selem, n, &local_storage->list, snode) {
632 		/* Always unlink from map before unlinking from
633 		 * local_storage.
634 		 */
635 		bpf_selem_unlink_map(selem);
636 		/* If local_storage list has only one element, the
637 		 * bpf_selem_unlink_storage_nolock() will return true.
638 		 * Otherwise, it will return false. The current loop iteration
639 		 * intends to remove all local storage. So the last iteration
640 		 * of the loop will set the free_cgroup_storage to true.
641 		 */
642 		free_storage = bpf_selem_unlink_storage_nolock(
643 			local_storage, selem, false, false);
644 	}
645 
646 	return free_storage;
647 }
648 
649 struct bpf_map *
650 bpf_local_storage_map_alloc(union bpf_attr *attr,
651 			    struct bpf_local_storage_cache *cache)
652 {
653 	struct bpf_local_storage_map *smap;
654 
655 	smap = __bpf_local_storage_map_alloc(attr);
656 	if (IS_ERR(smap))
657 		return ERR_CAST(smap);
658 
659 	smap->cache_idx = bpf_local_storage_cache_idx_get(cache);
660 	return &smap->map;
661 }
662 
663 void bpf_local_storage_map_free(struct bpf_map *map,
664 				struct bpf_local_storage_cache *cache,
665 				int __percpu *busy_counter)
666 {
667 	struct bpf_local_storage_map_bucket *b;
668 	struct bpf_local_storage_elem *selem;
669 	struct bpf_local_storage_map *smap;
670 	unsigned int i;
671 
672 	smap = (struct bpf_local_storage_map *)map;
673 	bpf_local_storage_cache_idx_free(cache, smap->cache_idx);
674 
675 	/* Note that this map might be concurrently cloned from
676 	 * bpf_sk_storage_clone. Wait for any existing bpf_sk_storage_clone
677 	 * RCU read section to finish before proceeding. New RCU
678 	 * read sections should be prevented via bpf_map_inc_not_zero.
679 	 */
680 	synchronize_rcu();
681 
682 	/* bpf prog and the userspace can no longer access this map
683 	 * now.  No new selem (of this map) can be added
684 	 * to the owner->storage or to the map bucket's list.
685 	 *
686 	 * The elem of this map can be cleaned up here
687 	 * or when the storage is freed e.g.
688 	 * by bpf_sk_storage_free() during __sk_destruct().
689 	 */
690 	for (i = 0; i < (1U << smap->bucket_log); i++) {
691 		b = &smap->buckets[i];
692 
693 		rcu_read_lock();
694 		/* No one is adding to b->list now */
695 		while ((selem = hlist_entry_safe(
696 				rcu_dereference_raw(hlist_first_rcu(&b->list)),
697 				struct bpf_local_storage_elem, map_node))) {
698 			if (busy_counter) {
699 				migrate_disable();
700 				this_cpu_inc(*busy_counter);
701 			}
702 			bpf_selem_unlink(selem, false);
703 			if (busy_counter) {
704 				this_cpu_dec(*busy_counter);
705 				migrate_enable();
706 			}
707 			cond_resched_rcu();
708 		}
709 		rcu_read_unlock();
710 	}
711 
712 	/* While freeing the storage we may still need to access the map.
713 	 *
714 	 * e.g. when bpf_sk_storage_free() has unlinked selem from the map
715 	 * which then made the above while((selem = ...)) loop
716 	 * exit immediately.
717 	 *
718 	 * However, while freeing the storage one still needs to access the
719 	 * smap->elem_size to do the uncharging in
720 	 * bpf_selem_unlink_storage_nolock().
721 	 *
722 	 * Hence, wait another rcu grace period for the storage to be freed.
723 	 */
724 	synchronize_rcu();
725 
726 	kvfree(smap->buckets);
727 	bpf_map_area_free(smap);
728 }
729