xref: /linux/kernel/bpf/bpf_local_storage.c (revision d0d106a2bd21499901299160744e5fe9f4c83ddb)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 Facebook  */
3 #include <linux/rculist.h>
4 #include <linux/list.h>
5 #include <linux/hash.h>
6 #include <linux/types.h>
7 #include <linux/spinlock.h>
8 #include <linux/bpf.h>
9 #include <linux/btf_ids.h>
10 #include <linux/bpf_local_storage.h>
11 #include <net/sock.h>
12 #include <uapi/linux/sock_diag.h>
13 #include <uapi/linux/btf.h>
14 #include <linux/rcupdate.h>
15 #include <linux/rcupdate_trace.h>
16 #include <linux/rcupdate_wait.h>
17 
18 #define BPF_LOCAL_STORAGE_CREATE_FLAG_MASK (BPF_F_NO_PREALLOC | BPF_F_CLONE)
19 
20 static struct bpf_local_storage_map_bucket *
select_bucket(struct bpf_local_storage_map * smap,struct bpf_local_storage_elem * selem)21 select_bucket(struct bpf_local_storage_map *smap,
22 	      struct bpf_local_storage_elem *selem)
23 {
24 	return &smap->buckets[hash_ptr(selem, smap->bucket_log)];
25 }
26 
mem_charge(struct bpf_local_storage_map * smap,void * owner,u32 size)27 static int mem_charge(struct bpf_local_storage_map *smap, void *owner, u32 size)
28 {
29 	struct bpf_map *map = &smap->map;
30 
31 	if (!map->ops->map_local_storage_charge)
32 		return 0;
33 
34 	return map->ops->map_local_storage_charge(smap, owner, size);
35 }
36 
mem_uncharge(struct bpf_local_storage_map * smap,void * owner,u32 size)37 static void mem_uncharge(struct bpf_local_storage_map *smap, void *owner,
38 			 u32 size)
39 {
40 	struct bpf_map *map = &smap->map;
41 
42 	if (map->ops->map_local_storage_uncharge)
43 		map->ops->map_local_storage_uncharge(smap, owner, size);
44 }
45 
46 static struct bpf_local_storage __rcu **
owner_storage(struct bpf_local_storage_map * smap,void * owner)47 owner_storage(struct bpf_local_storage_map *smap, void *owner)
48 {
49 	struct bpf_map *map = &smap->map;
50 
51 	return map->ops->map_owner_storage_ptr(owner);
52 }
53 
selem_linked_to_storage_lockless(const struct bpf_local_storage_elem * selem)54 static bool selem_linked_to_storage_lockless(const struct bpf_local_storage_elem *selem)
55 {
56 	return !hlist_unhashed_lockless(&selem->snode);
57 }
58 
selem_linked_to_storage(const struct bpf_local_storage_elem * selem)59 static bool selem_linked_to_storage(const struct bpf_local_storage_elem *selem)
60 {
61 	return !hlist_unhashed(&selem->snode);
62 }
63 
selem_linked_to_map_lockless(const struct bpf_local_storage_elem * selem)64 static bool selem_linked_to_map_lockless(const struct bpf_local_storage_elem *selem)
65 {
66 	return !hlist_unhashed_lockless(&selem->map_node);
67 }
68 
selem_linked_to_map(const struct bpf_local_storage_elem * selem)69 static bool selem_linked_to_map(const struct bpf_local_storage_elem *selem)
70 {
71 	return !hlist_unhashed(&selem->map_node);
72 }
73 
74 struct bpf_local_storage_elem *
bpf_selem_alloc(struct bpf_local_storage_map * smap,void * owner,void * value,bool charge_mem,bool swap_uptrs,gfp_t gfp_flags)75 bpf_selem_alloc(struct bpf_local_storage_map *smap, void *owner,
76 		void *value, bool charge_mem, bool swap_uptrs, gfp_t gfp_flags)
77 {
78 	struct bpf_local_storage_elem *selem;
79 
80 	if (charge_mem && mem_charge(smap, owner, smap->elem_size))
81 		return NULL;
82 
83 	if (smap->bpf_ma) {
84 		selem = bpf_mem_cache_alloc_flags(&smap->selem_ma, gfp_flags);
85 		if (selem)
86 			/* Keep the original bpf_map_kzalloc behavior
87 			 * before started using the bpf_mem_cache_alloc.
88 			 *
89 			 * No need to use zero_map_value. The bpf_selem_free()
90 			 * only does bpf_mem_cache_free when there is
91 			 * no other bpf prog is using the selem.
92 			 */
93 			memset(SDATA(selem)->data, 0, smap->map.value_size);
94 	} else {
95 		selem = bpf_map_kzalloc(&smap->map, smap->elem_size,
96 					gfp_flags | __GFP_NOWARN);
97 	}
98 
99 	if (selem) {
100 		if (value) {
101 			/* No need to call check_and_init_map_value as memory is zero init */
102 			copy_map_value(&smap->map, SDATA(selem)->data, value);
103 			if (swap_uptrs)
104 				bpf_obj_swap_uptrs(smap->map.record, SDATA(selem)->data, value);
105 		}
106 		return selem;
107 	}
108 
109 	if (charge_mem)
110 		mem_uncharge(smap, owner, smap->elem_size);
111 
112 	return NULL;
113 }
114 
115 /* rcu tasks trace callback for bpf_ma == false */
__bpf_local_storage_free_trace_rcu(struct rcu_head * rcu)116 static void __bpf_local_storage_free_trace_rcu(struct rcu_head *rcu)
117 {
118 	struct bpf_local_storage *local_storage;
119 
120 	/* If RCU Tasks Trace grace period implies RCU grace period, do
121 	 * kfree(), else do kfree_rcu().
122 	 */
123 	local_storage = container_of(rcu, struct bpf_local_storage, rcu);
124 	if (rcu_trace_implies_rcu_gp())
125 		kfree(local_storage);
126 	else
127 		kfree_rcu(local_storage, rcu);
128 }
129 
bpf_local_storage_free_rcu(struct rcu_head * rcu)130 static void bpf_local_storage_free_rcu(struct rcu_head *rcu)
131 {
132 	struct bpf_local_storage *local_storage;
133 
134 	local_storage = container_of(rcu, struct bpf_local_storage, rcu);
135 	bpf_mem_cache_raw_free(local_storage);
136 }
137 
bpf_local_storage_free_trace_rcu(struct rcu_head * rcu)138 static void bpf_local_storage_free_trace_rcu(struct rcu_head *rcu)
139 {
140 	if (rcu_trace_implies_rcu_gp())
141 		bpf_local_storage_free_rcu(rcu);
142 	else
143 		call_rcu(rcu, bpf_local_storage_free_rcu);
144 }
145 
146 /* Handle bpf_ma == false */
__bpf_local_storage_free(struct bpf_local_storage * local_storage,bool vanilla_rcu)147 static void __bpf_local_storage_free(struct bpf_local_storage *local_storage,
148 				     bool vanilla_rcu)
149 {
150 	if (vanilla_rcu)
151 		kfree_rcu(local_storage, rcu);
152 	else
153 		call_rcu_tasks_trace(&local_storage->rcu,
154 				     __bpf_local_storage_free_trace_rcu);
155 }
156 
bpf_local_storage_free(struct bpf_local_storage * local_storage,struct bpf_local_storage_map * smap,bool bpf_ma,bool reuse_now)157 static void bpf_local_storage_free(struct bpf_local_storage *local_storage,
158 				   struct bpf_local_storage_map *smap,
159 				   bool bpf_ma, bool reuse_now)
160 {
161 	if (!local_storage)
162 		return;
163 
164 	if (!bpf_ma) {
165 		__bpf_local_storage_free(local_storage, reuse_now);
166 		return;
167 	}
168 
169 	if (!reuse_now) {
170 		call_rcu_tasks_trace(&local_storage->rcu,
171 				     bpf_local_storage_free_trace_rcu);
172 		return;
173 	}
174 
175 	if (smap)
176 		bpf_mem_cache_free(&smap->storage_ma, local_storage);
177 	else
178 		/* smap could be NULL if the selem that triggered
179 		 * this 'local_storage' creation had been long gone.
180 		 * In this case, directly do call_rcu().
181 		 */
182 		call_rcu(&local_storage->rcu, bpf_local_storage_free_rcu);
183 }
184 
185 /* rcu tasks trace callback for bpf_ma == false */
__bpf_selem_free_trace_rcu(struct rcu_head * rcu)186 static void __bpf_selem_free_trace_rcu(struct rcu_head *rcu)
187 {
188 	struct bpf_local_storage_elem *selem;
189 
190 	selem = container_of(rcu, struct bpf_local_storage_elem, rcu);
191 	if (rcu_trace_implies_rcu_gp())
192 		kfree(selem);
193 	else
194 		kfree_rcu(selem, rcu);
195 }
196 
197 /* Handle bpf_ma == false */
__bpf_selem_free(struct bpf_local_storage_elem * selem,bool vanilla_rcu)198 static void __bpf_selem_free(struct bpf_local_storage_elem *selem,
199 			     bool vanilla_rcu)
200 {
201 	if (vanilla_rcu)
202 		kfree_rcu(selem, rcu);
203 	else
204 		call_rcu_tasks_trace(&selem->rcu, __bpf_selem_free_trace_rcu);
205 }
206 
bpf_selem_free_rcu(struct rcu_head * rcu)207 static void bpf_selem_free_rcu(struct rcu_head *rcu)
208 {
209 	struct bpf_local_storage_elem *selem;
210 	struct bpf_local_storage_map *smap;
211 
212 	selem = container_of(rcu, struct bpf_local_storage_elem, rcu);
213 	/* The bpf_local_storage_map_free will wait for rcu_barrier */
214 	smap = rcu_dereference_check(SDATA(selem)->smap, 1);
215 
216 	migrate_disable();
217 	bpf_obj_free_fields(smap->map.record, SDATA(selem)->data);
218 	migrate_enable();
219 	bpf_mem_cache_raw_free(selem);
220 }
221 
bpf_selem_free_trace_rcu(struct rcu_head * rcu)222 static void bpf_selem_free_trace_rcu(struct rcu_head *rcu)
223 {
224 	if (rcu_trace_implies_rcu_gp())
225 		bpf_selem_free_rcu(rcu);
226 	else
227 		call_rcu(rcu, bpf_selem_free_rcu);
228 }
229 
bpf_selem_free(struct bpf_local_storage_elem * selem,struct bpf_local_storage_map * smap,bool reuse_now)230 void bpf_selem_free(struct bpf_local_storage_elem *selem,
231 		    struct bpf_local_storage_map *smap,
232 		    bool reuse_now)
233 {
234 	if (!smap->bpf_ma) {
235 		/* Only task storage has uptrs and task storage
236 		 * has moved to bpf_mem_alloc. Meaning smap->bpf_ma == true
237 		 * for task storage, so this bpf_obj_free_fields() won't unpin
238 		 * any uptr.
239 		 */
240 		bpf_obj_free_fields(smap->map.record, SDATA(selem)->data);
241 		__bpf_selem_free(selem, reuse_now);
242 		return;
243 	}
244 
245 	if (reuse_now) {
246 		/* reuse_now == true only happens when the storage owner
247 		 * (e.g. task_struct) is being destructed or the map itself
248 		 * is being destructed (ie map_free). In both cases,
249 		 * no bpf prog can have a hold on the selem. It is
250 		 * safe to unpin the uptrs and free the selem now.
251 		 */
252 		bpf_obj_free_fields(smap->map.record, SDATA(selem)->data);
253 		/* Instead of using the vanilla call_rcu(),
254 		 * bpf_mem_cache_free will be able to reuse selem
255 		 * immediately.
256 		 */
257 		bpf_mem_cache_free(&smap->selem_ma, selem);
258 		return;
259 	}
260 
261 	call_rcu_tasks_trace(&selem->rcu, bpf_selem_free_trace_rcu);
262 }
263 
bpf_selem_free_list(struct hlist_head * list,bool reuse_now)264 static void bpf_selem_free_list(struct hlist_head *list, bool reuse_now)
265 {
266 	struct bpf_local_storage_elem *selem;
267 	struct bpf_local_storage_map *smap;
268 	struct hlist_node *n;
269 
270 	/* The "_safe" iteration is needed.
271 	 * The loop is not removing the selem from the list
272 	 * but bpf_selem_free will use the selem->rcu_head
273 	 * which is union-ized with the selem->free_node.
274 	 */
275 	hlist_for_each_entry_safe(selem, n, list, free_node) {
276 		smap = rcu_dereference_check(SDATA(selem)->smap, bpf_rcu_lock_held());
277 		bpf_selem_free(selem, smap, reuse_now);
278 	}
279 }
280 
281 /* local_storage->lock must be held and selem->local_storage == local_storage.
282  * The caller must ensure selem->smap is still valid to be
283  * dereferenced for its smap->elem_size and smap->cache_idx.
284  */
bpf_selem_unlink_storage_nolock(struct bpf_local_storage * local_storage,struct bpf_local_storage_elem * selem,bool uncharge_mem,struct hlist_head * free_selem_list)285 static bool bpf_selem_unlink_storage_nolock(struct bpf_local_storage *local_storage,
286 					    struct bpf_local_storage_elem *selem,
287 					    bool uncharge_mem, struct hlist_head *free_selem_list)
288 {
289 	struct bpf_local_storage_map *smap;
290 	bool free_local_storage;
291 	void *owner;
292 
293 	smap = rcu_dereference_check(SDATA(selem)->smap, bpf_rcu_lock_held());
294 	owner = local_storage->owner;
295 
296 	/* All uncharging on the owner must be done first.
297 	 * The owner may be freed once the last selem is unlinked
298 	 * from local_storage.
299 	 */
300 	if (uncharge_mem)
301 		mem_uncharge(smap, owner, smap->elem_size);
302 
303 	free_local_storage = hlist_is_singular_node(&selem->snode,
304 						    &local_storage->list);
305 	if (free_local_storage) {
306 		mem_uncharge(smap, owner, sizeof(struct bpf_local_storage));
307 		local_storage->owner = NULL;
308 
309 		/* After this RCU_INIT, owner may be freed and cannot be used */
310 		RCU_INIT_POINTER(*owner_storage(smap, owner), NULL);
311 
312 		/* local_storage is not freed now.  local_storage->lock is
313 		 * still held and raw_spin_unlock_bh(&local_storage->lock)
314 		 * will be done by the caller.
315 		 *
316 		 * Although the unlock will be done under
317 		 * rcu_read_lock(),  it is more intuitive to
318 		 * read if the freeing of the storage is done
319 		 * after the raw_spin_unlock_bh(&local_storage->lock).
320 		 *
321 		 * Hence, a "bool free_local_storage" is returned
322 		 * to the caller which then calls then frees the storage after
323 		 * all the RCU grace periods have expired.
324 		 */
325 	}
326 	hlist_del_init_rcu(&selem->snode);
327 	if (rcu_access_pointer(local_storage->cache[smap->cache_idx]) ==
328 	    SDATA(selem))
329 		RCU_INIT_POINTER(local_storage->cache[smap->cache_idx], NULL);
330 
331 	hlist_add_head(&selem->free_node, free_selem_list);
332 
333 	if (rcu_access_pointer(local_storage->smap) == smap)
334 		RCU_INIT_POINTER(local_storage->smap, NULL);
335 
336 	return free_local_storage;
337 }
338 
check_storage_bpf_ma(struct bpf_local_storage * local_storage,struct bpf_local_storage_map * storage_smap,struct bpf_local_storage_elem * selem)339 static bool check_storage_bpf_ma(struct bpf_local_storage *local_storage,
340 				 struct bpf_local_storage_map *storage_smap,
341 				 struct bpf_local_storage_elem *selem)
342 {
343 
344 	struct bpf_local_storage_map *selem_smap;
345 
346 	/* local_storage->smap may be NULL. If it is, get the bpf_ma
347 	 * from any selem in the local_storage->list. The bpf_ma of all
348 	 * local_storage and selem should have the same value
349 	 * for the same map type.
350 	 *
351 	 * If the local_storage->list is already empty, the caller will not
352 	 * care about the bpf_ma value also because the caller is not
353 	 * responsible to free the local_storage.
354 	 */
355 
356 	if (storage_smap)
357 		return storage_smap->bpf_ma;
358 
359 	if (!selem) {
360 		struct hlist_node *n;
361 
362 		n = rcu_dereference_check(hlist_first_rcu(&local_storage->list),
363 					  bpf_rcu_lock_held());
364 		if (!n)
365 			return false;
366 
367 		selem = hlist_entry(n, struct bpf_local_storage_elem, snode);
368 	}
369 	selem_smap = rcu_dereference_check(SDATA(selem)->smap, bpf_rcu_lock_held());
370 
371 	return selem_smap->bpf_ma;
372 }
373 
bpf_selem_unlink_storage(struct bpf_local_storage_elem * selem,bool reuse_now)374 static void bpf_selem_unlink_storage(struct bpf_local_storage_elem *selem,
375 				     bool reuse_now)
376 {
377 	struct bpf_local_storage_map *storage_smap;
378 	struct bpf_local_storage *local_storage;
379 	bool bpf_ma, free_local_storage = false;
380 	HLIST_HEAD(selem_free_list);
381 	unsigned long flags;
382 
383 	if (unlikely(!selem_linked_to_storage_lockless(selem)))
384 		/* selem has already been unlinked from sk */
385 		return;
386 
387 	local_storage = rcu_dereference_check(selem->local_storage,
388 					      bpf_rcu_lock_held());
389 	storage_smap = rcu_dereference_check(local_storage->smap,
390 					     bpf_rcu_lock_held());
391 	bpf_ma = check_storage_bpf_ma(local_storage, storage_smap, selem);
392 
393 	raw_spin_lock_irqsave(&local_storage->lock, flags);
394 	if (likely(selem_linked_to_storage(selem)))
395 		free_local_storage = bpf_selem_unlink_storage_nolock(
396 			local_storage, selem, true, &selem_free_list);
397 	raw_spin_unlock_irqrestore(&local_storage->lock, flags);
398 
399 	bpf_selem_free_list(&selem_free_list, reuse_now);
400 
401 	if (free_local_storage)
402 		bpf_local_storage_free(local_storage, storage_smap, bpf_ma, reuse_now);
403 }
404 
bpf_selem_link_storage_nolock(struct bpf_local_storage * local_storage,struct bpf_local_storage_elem * selem)405 void bpf_selem_link_storage_nolock(struct bpf_local_storage *local_storage,
406 				   struct bpf_local_storage_elem *selem)
407 {
408 	RCU_INIT_POINTER(selem->local_storage, local_storage);
409 	hlist_add_head_rcu(&selem->snode, &local_storage->list);
410 }
411 
bpf_selem_unlink_map(struct bpf_local_storage_elem * selem)412 static void bpf_selem_unlink_map(struct bpf_local_storage_elem *selem)
413 {
414 	struct bpf_local_storage_map *smap;
415 	struct bpf_local_storage_map_bucket *b;
416 	unsigned long flags;
417 
418 	if (unlikely(!selem_linked_to_map_lockless(selem)))
419 		/* selem has already be unlinked from smap */
420 		return;
421 
422 	smap = rcu_dereference_check(SDATA(selem)->smap, bpf_rcu_lock_held());
423 	b = select_bucket(smap, selem);
424 	raw_spin_lock_irqsave(&b->lock, flags);
425 	if (likely(selem_linked_to_map(selem)))
426 		hlist_del_init_rcu(&selem->map_node);
427 	raw_spin_unlock_irqrestore(&b->lock, flags);
428 }
429 
bpf_selem_link_map(struct bpf_local_storage_map * smap,struct bpf_local_storage_elem * selem)430 void bpf_selem_link_map(struct bpf_local_storage_map *smap,
431 			struct bpf_local_storage_elem *selem)
432 {
433 	struct bpf_local_storage_map_bucket *b = select_bucket(smap, selem);
434 	unsigned long flags;
435 
436 	raw_spin_lock_irqsave(&b->lock, flags);
437 	RCU_INIT_POINTER(SDATA(selem)->smap, smap);
438 	hlist_add_head_rcu(&selem->map_node, &b->list);
439 	raw_spin_unlock_irqrestore(&b->lock, flags);
440 }
441 
bpf_selem_unlink(struct bpf_local_storage_elem * selem,bool reuse_now)442 void bpf_selem_unlink(struct bpf_local_storage_elem *selem, bool reuse_now)
443 {
444 	/* Always unlink from map before unlinking from local_storage
445 	 * because selem will be freed after successfully unlinked from
446 	 * the local_storage.
447 	 */
448 	bpf_selem_unlink_map(selem);
449 	bpf_selem_unlink_storage(selem, reuse_now);
450 }
451 
__bpf_local_storage_insert_cache(struct bpf_local_storage * local_storage,struct bpf_local_storage_map * smap,struct bpf_local_storage_elem * selem)452 void __bpf_local_storage_insert_cache(struct bpf_local_storage *local_storage,
453 				      struct bpf_local_storage_map *smap,
454 				      struct bpf_local_storage_elem *selem)
455 {
456 	unsigned long flags;
457 
458 	/* spinlock is needed to avoid racing with the
459 	 * parallel delete.  Otherwise, publishing an already
460 	 * deleted sdata to the cache will become a use-after-free
461 	 * problem in the next bpf_local_storage_lookup().
462 	 */
463 	raw_spin_lock_irqsave(&local_storage->lock, flags);
464 	if (selem_linked_to_storage(selem))
465 		rcu_assign_pointer(local_storage->cache[smap->cache_idx], SDATA(selem));
466 	raw_spin_unlock_irqrestore(&local_storage->lock, flags);
467 }
468 
check_flags(const struct bpf_local_storage_data * old_sdata,u64 map_flags)469 static int check_flags(const struct bpf_local_storage_data *old_sdata,
470 		       u64 map_flags)
471 {
472 	if (old_sdata && (map_flags & ~BPF_F_LOCK) == BPF_NOEXIST)
473 		/* elem already exists */
474 		return -EEXIST;
475 
476 	if (!old_sdata && (map_flags & ~BPF_F_LOCK) == BPF_EXIST)
477 		/* elem doesn't exist, cannot update it */
478 		return -ENOENT;
479 
480 	return 0;
481 }
482 
bpf_local_storage_alloc(void * owner,struct bpf_local_storage_map * smap,struct bpf_local_storage_elem * first_selem,gfp_t gfp_flags)483 int bpf_local_storage_alloc(void *owner,
484 			    struct bpf_local_storage_map *smap,
485 			    struct bpf_local_storage_elem *first_selem,
486 			    gfp_t gfp_flags)
487 {
488 	struct bpf_local_storage *prev_storage, *storage;
489 	struct bpf_local_storage **owner_storage_ptr;
490 	int err;
491 
492 	err = mem_charge(smap, owner, sizeof(*storage));
493 	if (err)
494 		return err;
495 
496 	if (smap->bpf_ma)
497 		storage = bpf_mem_cache_alloc_flags(&smap->storage_ma, gfp_flags);
498 	else
499 		storage = bpf_map_kzalloc(&smap->map, sizeof(*storage),
500 					  gfp_flags | __GFP_NOWARN);
501 	if (!storage) {
502 		err = -ENOMEM;
503 		goto uncharge;
504 	}
505 
506 	RCU_INIT_POINTER(storage->smap, smap);
507 	INIT_HLIST_HEAD(&storage->list);
508 	raw_spin_lock_init(&storage->lock);
509 	storage->owner = owner;
510 
511 	bpf_selem_link_storage_nolock(storage, first_selem);
512 	bpf_selem_link_map(smap, first_selem);
513 
514 	owner_storage_ptr =
515 		(struct bpf_local_storage **)owner_storage(smap, owner);
516 	/* Publish storage to the owner.
517 	 * Instead of using any lock of the kernel object (i.e. owner),
518 	 * cmpxchg will work with any kernel object regardless what
519 	 * the running context is, bh, irq...etc.
520 	 *
521 	 * From now on, the owner->storage pointer (e.g. sk->sk_bpf_storage)
522 	 * is protected by the storage->lock.  Hence, when freeing
523 	 * the owner->storage, the storage->lock must be held before
524 	 * setting owner->storage ptr to NULL.
525 	 */
526 	prev_storage = cmpxchg(owner_storage_ptr, NULL, storage);
527 	if (unlikely(prev_storage)) {
528 		bpf_selem_unlink_map(first_selem);
529 		err = -EAGAIN;
530 		goto uncharge;
531 
532 		/* Note that even first_selem was linked to smap's
533 		 * bucket->list, first_selem can be freed immediately
534 		 * (instead of kfree_rcu) because
535 		 * bpf_local_storage_map_free() does a
536 		 * synchronize_rcu_mult (waiting for both sleepable and
537 		 * normal programs) before walking the bucket->list.
538 		 * Hence, no one is accessing selem from the
539 		 * bucket->list under rcu_read_lock().
540 		 */
541 	}
542 
543 	return 0;
544 
545 uncharge:
546 	bpf_local_storage_free(storage, smap, smap->bpf_ma, true);
547 	mem_uncharge(smap, owner, sizeof(*storage));
548 	return err;
549 }
550 
551 /* sk cannot be going away because it is linking new elem
552  * to sk->sk_bpf_storage. (i.e. sk->sk_refcnt cannot be 0).
553  * Otherwise, it will become a leak (and other memory issues
554  * during map destruction).
555  */
556 struct bpf_local_storage_data *
bpf_local_storage_update(void * owner,struct bpf_local_storage_map * smap,void * value,u64 map_flags,bool swap_uptrs,gfp_t gfp_flags)557 bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
558 			 void *value, u64 map_flags, bool swap_uptrs, gfp_t gfp_flags)
559 {
560 	struct bpf_local_storage_data *old_sdata = NULL;
561 	struct bpf_local_storage_elem *alloc_selem, *selem = NULL;
562 	struct bpf_local_storage *local_storage;
563 	HLIST_HEAD(old_selem_free_list);
564 	unsigned long flags;
565 	int err;
566 
567 	/* BPF_EXIST and BPF_NOEXIST cannot be both set */
568 	if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST) ||
569 	    /* BPF_F_LOCK can only be used in a value with spin_lock */
570 	    unlikely((map_flags & BPF_F_LOCK) &&
571 		     !btf_record_has_field(smap->map.record, BPF_SPIN_LOCK)))
572 		return ERR_PTR(-EINVAL);
573 
574 	if (gfp_flags == GFP_KERNEL && (map_flags & ~BPF_F_LOCK) != BPF_NOEXIST)
575 		return ERR_PTR(-EINVAL);
576 
577 	local_storage = rcu_dereference_check(*owner_storage(smap, owner),
578 					      bpf_rcu_lock_held());
579 	if (!local_storage || hlist_empty(&local_storage->list)) {
580 		/* Very first elem for the owner */
581 		err = check_flags(NULL, map_flags);
582 		if (err)
583 			return ERR_PTR(err);
584 
585 		selem = bpf_selem_alloc(smap, owner, value, true, swap_uptrs, gfp_flags);
586 		if (!selem)
587 			return ERR_PTR(-ENOMEM);
588 
589 		err = bpf_local_storage_alloc(owner, smap, selem, gfp_flags);
590 		if (err) {
591 			bpf_selem_free(selem, smap, true);
592 			mem_uncharge(smap, owner, smap->elem_size);
593 			return ERR_PTR(err);
594 		}
595 
596 		return SDATA(selem);
597 	}
598 
599 	if ((map_flags & BPF_F_LOCK) && !(map_flags & BPF_NOEXIST)) {
600 		/* Hoping to find an old_sdata to do inline update
601 		 * such that it can avoid taking the local_storage->lock
602 		 * and changing the lists.
603 		 */
604 		old_sdata =
605 			bpf_local_storage_lookup(local_storage, smap, false);
606 		err = check_flags(old_sdata, map_flags);
607 		if (err)
608 			return ERR_PTR(err);
609 		if (old_sdata && selem_linked_to_storage_lockless(SELEM(old_sdata))) {
610 			copy_map_value_locked(&smap->map, old_sdata->data,
611 					      value, false);
612 			return old_sdata;
613 		}
614 	}
615 
616 	/* A lookup has just been done before and concluded a new selem is
617 	 * needed. The chance of an unnecessary alloc is unlikely.
618 	 */
619 	alloc_selem = selem = bpf_selem_alloc(smap, owner, value, true, swap_uptrs, gfp_flags);
620 	if (!alloc_selem)
621 		return ERR_PTR(-ENOMEM);
622 
623 	raw_spin_lock_irqsave(&local_storage->lock, flags);
624 
625 	/* Recheck local_storage->list under local_storage->lock */
626 	if (unlikely(hlist_empty(&local_storage->list))) {
627 		/* A parallel del is happening and local_storage is going
628 		 * away.  It has just been checked before, so very
629 		 * unlikely.  Return instead of retry to keep things
630 		 * simple.
631 		 */
632 		err = -EAGAIN;
633 		goto unlock;
634 	}
635 
636 	old_sdata = bpf_local_storage_lookup(local_storage, smap, false);
637 	err = check_flags(old_sdata, map_flags);
638 	if (err)
639 		goto unlock;
640 
641 	if (old_sdata && (map_flags & BPF_F_LOCK)) {
642 		copy_map_value_locked(&smap->map, old_sdata->data, value,
643 				      false);
644 		selem = SELEM(old_sdata);
645 		goto unlock;
646 	}
647 
648 	alloc_selem = NULL;
649 	/* First, link the new selem to the map */
650 	bpf_selem_link_map(smap, selem);
651 
652 	/* Second, link (and publish) the new selem to local_storage */
653 	bpf_selem_link_storage_nolock(local_storage, selem);
654 
655 	/* Third, remove old selem, SELEM(old_sdata) */
656 	if (old_sdata) {
657 		bpf_selem_unlink_map(SELEM(old_sdata));
658 		bpf_selem_unlink_storage_nolock(local_storage, SELEM(old_sdata),
659 						true, &old_selem_free_list);
660 	}
661 
662 unlock:
663 	raw_spin_unlock_irqrestore(&local_storage->lock, flags);
664 	bpf_selem_free_list(&old_selem_free_list, false);
665 	if (alloc_selem) {
666 		mem_uncharge(smap, owner, smap->elem_size);
667 		bpf_selem_free(alloc_selem, smap, true);
668 	}
669 	return err ? ERR_PTR(err) : SDATA(selem);
670 }
671 
bpf_local_storage_cache_idx_get(struct bpf_local_storage_cache * cache)672 static u16 bpf_local_storage_cache_idx_get(struct bpf_local_storage_cache *cache)
673 {
674 	u64 min_usage = U64_MAX;
675 	u16 i, res = 0;
676 
677 	spin_lock(&cache->idx_lock);
678 
679 	for (i = 0; i < BPF_LOCAL_STORAGE_CACHE_SIZE; i++) {
680 		if (cache->idx_usage_counts[i] < min_usage) {
681 			min_usage = cache->idx_usage_counts[i];
682 			res = i;
683 
684 			/* Found a free cache_idx */
685 			if (!min_usage)
686 				break;
687 		}
688 	}
689 	cache->idx_usage_counts[res]++;
690 
691 	spin_unlock(&cache->idx_lock);
692 
693 	return res;
694 }
695 
bpf_local_storage_cache_idx_free(struct bpf_local_storage_cache * cache,u16 idx)696 static void bpf_local_storage_cache_idx_free(struct bpf_local_storage_cache *cache,
697 					     u16 idx)
698 {
699 	spin_lock(&cache->idx_lock);
700 	cache->idx_usage_counts[idx]--;
701 	spin_unlock(&cache->idx_lock);
702 }
703 
bpf_local_storage_map_alloc_check(union bpf_attr * attr)704 int bpf_local_storage_map_alloc_check(union bpf_attr *attr)
705 {
706 	if (attr->map_flags & ~BPF_LOCAL_STORAGE_CREATE_FLAG_MASK ||
707 	    !(attr->map_flags & BPF_F_NO_PREALLOC) ||
708 	    attr->max_entries ||
709 	    attr->key_size != sizeof(int) || !attr->value_size ||
710 	    /* Enforce BTF for userspace sk dumping */
711 	    !attr->btf_key_type_id || !attr->btf_value_type_id)
712 		return -EINVAL;
713 
714 	if (attr->value_size > BPF_LOCAL_STORAGE_MAX_VALUE_SIZE)
715 		return -E2BIG;
716 
717 	return 0;
718 }
719 
bpf_local_storage_map_check_btf(const struct bpf_map * map,const struct btf * btf,const struct btf_type * key_type,const struct btf_type * value_type)720 int bpf_local_storage_map_check_btf(const struct bpf_map *map,
721 				    const struct btf *btf,
722 				    const struct btf_type *key_type,
723 				    const struct btf_type *value_type)
724 {
725 	u32 int_data;
726 
727 	if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT)
728 		return -EINVAL;
729 
730 	int_data = *(u32 *)(key_type + 1);
731 	if (BTF_INT_BITS(int_data) != 32 || BTF_INT_OFFSET(int_data))
732 		return -EINVAL;
733 
734 	return 0;
735 }
736 
bpf_local_storage_destroy(struct bpf_local_storage * local_storage)737 void bpf_local_storage_destroy(struct bpf_local_storage *local_storage)
738 {
739 	struct bpf_local_storage_map *storage_smap;
740 	struct bpf_local_storage_elem *selem;
741 	bool bpf_ma, free_storage = false;
742 	HLIST_HEAD(free_selem_list);
743 	struct hlist_node *n;
744 	unsigned long flags;
745 
746 	storage_smap = rcu_dereference_check(local_storage->smap, bpf_rcu_lock_held());
747 	bpf_ma = check_storage_bpf_ma(local_storage, storage_smap, NULL);
748 
749 	/* Neither the bpf_prog nor the bpf_map's syscall
750 	 * could be modifying the local_storage->list now.
751 	 * Thus, no elem can be added to or deleted from the
752 	 * local_storage->list by the bpf_prog or by the bpf_map's syscall.
753 	 *
754 	 * It is racing with bpf_local_storage_map_free() alone
755 	 * when unlinking elem from the local_storage->list and
756 	 * the map's bucket->list.
757 	 */
758 	raw_spin_lock_irqsave(&local_storage->lock, flags);
759 	hlist_for_each_entry_safe(selem, n, &local_storage->list, snode) {
760 		/* Always unlink from map before unlinking from
761 		 * local_storage.
762 		 */
763 		bpf_selem_unlink_map(selem);
764 		/* If local_storage list has only one element, the
765 		 * bpf_selem_unlink_storage_nolock() will return true.
766 		 * Otherwise, it will return false. The current loop iteration
767 		 * intends to remove all local storage. So the last iteration
768 		 * of the loop will set the free_cgroup_storage to true.
769 		 */
770 		free_storage = bpf_selem_unlink_storage_nolock(
771 			local_storage, selem, true, &free_selem_list);
772 	}
773 	raw_spin_unlock_irqrestore(&local_storage->lock, flags);
774 
775 	bpf_selem_free_list(&free_selem_list, true);
776 
777 	if (free_storage)
778 		bpf_local_storage_free(local_storage, storage_smap, bpf_ma, true);
779 }
780 
bpf_local_storage_map_mem_usage(const struct bpf_map * map)781 u64 bpf_local_storage_map_mem_usage(const struct bpf_map *map)
782 {
783 	struct bpf_local_storage_map *smap = (struct bpf_local_storage_map *)map;
784 	u64 usage = sizeof(*smap);
785 
786 	/* The dynamically callocated selems are not counted currently. */
787 	usage += sizeof(*smap->buckets) * (1ULL << smap->bucket_log);
788 	return usage;
789 }
790 
791 /* When bpf_ma == true, the bpf_mem_alloc is used to allocate and free memory.
792  * A deadlock free allocator is useful for storage that the bpf prog can easily
793  * get a hold of the owner PTR_TO_BTF_ID in any context. eg. bpf_get_current_task_btf.
794  * The task and cgroup storage fall into this case. The bpf_mem_alloc reuses
795  * memory immediately. To be reuse-immediate safe, the owner destruction
796  * code path needs to go through a rcu grace period before calling
797  * bpf_local_storage_destroy().
798  *
799  * When bpf_ma == false, the kmalloc and kfree are used.
800  */
801 struct bpf_map *
bpf_local_storage_map_alloc(union bpf_attr * attr,struct bpf_local_storage_cache * cache,bool bpf_ma)802 bpf_local_storage_map_alloc(union bpf_attr *attr,
803 			    struct bpf_local_storage_cache *cache,
804 			    bool bpf_ma)
805 {
806 	struct bpf_local_storage_map *smap;
807 	unsigned int i;
808 	u32 nbuckets;
809 	int err;
810 
811 	smap = bpf_map_area_alloc(sizeof(*smap), NUMA_NO_NODE);
812 	if (!smap)
813 		return ERR_PTR(-ENOMEM);
814 	bpf_map_init_from_attr(&smap->map, attr);
815 
816 	nbuckets = roundup_pow_of_two(num_possible_cpus());
817 	/* Use at least 2 buckets, select_bucket() is undefined behavior with 1 bucket */
818 	nbuckets = max_t(u32, 2, nbuckets);
819 	smap->bucket_log = ilog2(nbuckets);
820 
821 	smap->buckets = bpf_map_kvcalloc(&smap->map, nbuckets,
822 					 sizeof(*smap->buckets), GFP_USER | __GFP_NOWARN);
823 	if (!smap->buckets) {
824 		err = -ENOMEM;
825 		goto free_smap;
826 	}
827 
828 	for (i = 0; i < nbuckets; i++) {
829 		INIT_HLIST_HEAD(&smap->buckets[i].list);
830 		raw_spin_lock_init(&smap->buckets[i].lock);
831 	}
832 
833 	smap->elem_size = offsetof(struct bpf_local_storage_elem,
834 				   sdata.data[attr->value_size]);
835 
836 	/* In PREEMPT_RT, kmalloc(GFP_ATOMIC) is still not safe in non
837 	 * preemptible context. Thus, enforce all storages to use
838 	 * bpf_mem_alloc when CONFIG_PREEMPT_RT is enabled.
839 	 */
840 	smap->bpf_ma = IS_ENABLED(CONFIG_PREEMPT_RT) ? true : bpf_ma;
841 	if (smap->bpf_ma) {
842 		err = bpf_mem_alloc_init(&smap->selem_ma, smap->elem_size, false);
843 		if (err)
844 			goto free_smap;
845 
846 		err = bpf_mem_alloc_init(&smap->storage_ma, sizeof(struct bpf_local_storage), false);
847 		if (err) {
848 			bpf_mem_alloc_destroy(&smap->selem_ma);
849 			goto free_smap;
850 		}
851 	}
852 
853 	smap->cache_idx = bpf_local_storage_cache_idx_get(cache);
854 	return &smap->map;
855 
856 free_smap:
857 	kvfree(smap->buckets);
858 	bpf_map_area_free(smap);
859 	return ERR_PTR(err);
860 }
861 
bpf_local_storage_map_free(struct bpf_map * map,struct bpf_local_storage_cache * cache,int __percpu * busy_counter)862 void bpf_local_storage_map_free(struct bpf_map *map,
863 				struct bpf_local_storage_cache *cache,
864 				int __percpu *busy_counter)
865 {
866 	struct bpf_local_storage_map_bucket *b;
867 	struct bpf_local_storage_elem *selem;
868 	struct bpf_local_storage_map *smap;
869 	unsigned int i;
870 
871 	smap = (struct bpf_local_storage_map *)map;
872 	bpf_local_storage_cache_idx_free(cache, smap->cache_idx);
873 
874 	/* Note that this map might be concurrently cloned from
875 	 * bpf_sk_storage_clone. Wait for any existing bpf_sk_storage_clone
876 	 * RCU read section to finish before proceeding. New RCU
877 	 * read sections should be prevented via bpf_map_inc_not_zero.
878 	 */
879 	synchronize_rcu();
880 
881 	/* bpf prog and the userspace can no longer access this map
882 	 * now.  No new selem (of this map) can be added
883 	 * to the owner->storage or to the map bucket's list.
884 	 *
885 	 * The elem of this map can be cleaned up here
886 	 * or when the storage is freed e.g.
887 	 * by bpf_sk_storage_free() during __sk_destruct().
888 	 */
889 	for (i = 0; i < (1U << smap->bucket_log); i++) {
890 		b = &smap->buckets[i];
891 
892 		rcu_read_lock();
893 		/* No one is adding to b->list now */
894 		while ((selem = hlist_entry_safe(
895 				rcu_dereference_raw(hlist_first_rcu(&b->list)),
896 				struct bpf_local_storage_elem, map_node))) {
897 			if (busy_counter)
898 				this_cpu_inc(*busy_counter);
899 			bpf_selem_unlink(selem, true);
900 			if (busy_counter)
901 				this_cpu_dec(*busy_counter);
902 			cond_resched_rcu();
903 		}
904 		rcu_read_unlock();
905 	}
906 
907 	/* While freeing the storage we may still need to access the map.
908 	 *
909 	 * e.g. when bpf_sk_storage_free() has unlinked selem from the map
910 	 * which then made the above while((selem = ...)) loop
911 	 * exit immediately.
912 	 *
913 	 * However, while freeing the storage one still needs to access the
914 	 * smap->elem_size to do the uncharging in
915 	 * bpf_selem_unlink_storage_nolock().
916 	 *
917 	 * Hence, wait another rcu grace period for the storage to be freed.
918 	 */
919 	synchronize_rcu();
920 
921 	if (smap->bpf_ma) {
922 		rcu_barrier_tasks_trace();
923 		if (!rcu_trace_implies_rcu_gp())
924 			rcu_barrier();
925 		bpf_mem_alloc_destroy(&smap->selem_ma);
926 		bpf_mem_alloc_destroy(&smap->storage_ma);
927 	}
928 	kvfree(smap->buckets);
929 	bpf_map_area_free(smap);
930 }
931