xref: /linux/include/linux/bpf_local_storage.h (revision eb71ab2bf72260054677e348498ba995a057c463)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (c) 2019 Facebook
4  * Copyright 2020 Google LLC.
5  */
6 
7 #ifndef _BPF_LOCAL_STORAGE_H
8 #define _BPF_LOCAL_STORAGE_H
9 
10 #include <linux/bpf.h>
11 #include <linux/filter.h>
12 #include <linux/rculist.h>
13 #include <linux/list.h>
14 #include <linux/hash.h>
15 #include <linux/types.h>
16 #include <linux/bpf_mem_alloc.h>
17 #include <uapi/linux/btf.h>
18 #include <asm/rqspinlock.h>
19 
20 #define BPF_LOCAL_STORAGE_CACHE_SIZE	16
21 
22 struct bpf_local_storage_map_bucket {
23 	struct hlist_head list;
24 	rqspinlock_t lock;
25 };
26 
27 /* Thp map is not the primary owner of a bpf_local_storage_elem.
28  * Instead, the container object (eg. sk->sk_bpf_storage) is.
29  *
30  * The map (bpf_local_storage_map) is for two purposes
31  * 1. Define the size of the "local storage".  It is
32  *    the map's value_size.
33  *
34  * 2. Maintain a list to keep track of all elems such
35  *    that they can be cleaned up during the map destruction.
36  *
37  * When a bpf local storage is being looked up for a
38  * particular object,  the "bpf_map" pointer is actually used
39  * as the "key" to search in the list of elem in
40  * the respective bpf_local_storage owned by the object.
41  *
42  * e.g. sk->sk_bpf_storage is the mini-map with the "bpf_map" pointer
43  * as the searching key.
44  */
45 struct bpf_local_storage_map {
46 	struct bpf_map map;
47 	/* Lookup elem does not require accessing the map.
48 	 *
49 	 * Updating/Deleting requires a bucket lock to
50 	 * link/unlink the elem from the map.  Having
51 	 * multiple buckets to improve contention.
52 	 */
53 	struct bpf_local_storage_map_bucket *buckets;
54 	u32 bucket_log;
55 	u16 elem_size;
56 	u16 cache_idx;
57 	bool use_kmalloc_nolock;
58 };
59 
60 struct bpf_local_storage_data {
61 	/* smap is used as the searching key when looking up
62 	 * from the object's bpf_local_storage.
63 	 *
64 	 * Put it in the same cacheline as the data to minimize
65 	 * the number of cachelines accessed during the cache hit case.
66 	 */
67 	struct bpf_local_storage_map __rcu *smap;
68 	u8 data[] __aligned(8);
69 };
70 
71 #define SELEM_MAP_UNLINKED	(1 << 0)
72 #define SELEM_STORAGE_UNLINKED	(1 << 1)
73 #define SELEM_UNLINKED		(SELEM_MAP_UNLINKED | SELEM_STORAGE_UNLINKED)
74 #define SELEM_TOFREE		(1 << 2)
75 
76 /* Linked to bpf_local_storage and bpf_local_storage_map */
77 struct bpf_local_storage_elem {
78 	struct hlist_node map_node;	/* Linked to bpf_local_storage_map */
79 	struct hlist_node snode;	/* Linked to bpf_local_storage */
80 	struct bpf_local_storage __rcu *local_storage;
81 	union {
82 		struct rcu_head rcu;
83 		struct hlist_node free_node;	/* used to postpone
84 						 * bpf_selem_free
85 						 * after raw_spin_unlock
86 						 */
87 	};
88 	atomic_t state;
89 	bool use_kmalloc_nolock;
90 	/* 3 bytes hole */
91 	/* The data is stored in another cacheline to minimize
92 	 * the number of cachelines access during a cache hit.
93 	 */
94 	struct bpf_local_storage_data sdata ____cacheline_aligned;
95 };
96 
97 struct bpf_local_storage {
98 	struct bpf_local_storage_data __rcu *cache[BPF_LOCAL_STORAGE_CACHE_SIZE];
99 	struct hlist_head list; /* List of bpf_local_storage_elem */
100 	void *owner;		/* The object that owns the above "list" of
101 				 * bpf_local_storage_elem.
102 				 */
103 	struct rcu_head rcu;
104 	rqspinlock_t lock;	/* Protect adding/removing from the "list" */
105 	u64 mem_charge;		/* Copy of mem charged to owner. Protected by "lock" */
106 	refcount_t owner_refcnt;/* Used to pin owner when map_free is uncharging */
107 	bool use_kmalloc_nolock;
108 };
109 
110 /* U16_MAX is much more than enough for sk local storage
111  * considering a tcp_sock is ~2k.
112  */
113 #define BPF_LOCAL_STORAGE_MAX_VALUE_SIZE				       \
114 	min_t(u32,                                                             \
115 	      (KMALLOC_MAX_SIZE - MAX_BPF_STACK -                              \
116 	       sizeof(struct bpf_local_storage_elem)),                         \
117 	      (U16_MAX - sizeof(struct bpf_local_storage_elem)))
118 
119 #define SELEM(_SDATA)                                                          \
120 	container_of((_SDATA), struct bpf_local_storage_elem, sdata)
121 #define SDATA(_SELEM) (&(_SELEM)->sdata)
122 
123 #define BPF_LOCAL_STORAGE_CACHE_SIZE	16
124 
125 struct bpf_local_storage_cache {
126 	spinlock_t idx_lock;
127 	u64 idx_usage_counts[BPF_LOCAL_STORAGE_CACHE_SIZE];
128 };
129 
130 #define DEFINE_BPF_STORAGE_CACHE(name)				\
131 static struct bpf_local_storage_cache name = {			\
132 	.idx_lock = __SPIN_LOCK_UNLOCKED(name.idx_lock),	\
133 }
134 
135 /* Helper functions for bpf_local_storage */
136 int bpf_local_storage_map_alloc_check(union bpf_attr *attr);
137 
138 struct bpf_map *
139 bpf_local_storage_map_alloc(union bpf_attr *attr,
140 			    struct bpf_local_storage_cache *cache,
141 			    bool use_kmalloc_nolock);
142 
143 void __bpf_local_storage_insert_cache(struct bpf_local_storage *local_storage,
144 				      struct bpf_local_storage_map *smap,
145 				      struct bpf_local_storage_elem *selem);
146 /* If cacheit_lockit is false, this lookup function is lockless */
147 static inline struct bpf_local_storage_data *
bpf_local_storage_lookup(struct bpf_local_storage * local_storage,struct bpf_local_storage_map * smap,bool cacheit_lockit)148 bpf_local_storage_lookup(struct bpf_local_storage *local_storage,
149 			 struct bpf_local_storage_map *smap,
150 			 bool cacheit_lockit)
151 {
152 	struct bpf_local_storage_data *sdata;
153 	struct bpf_local_storage_elem *selem;
154 
155 	/* Fast path (cache hit) */
156 	sdata = rcu_dereference_check(local_storage->cache[smap->cache_idx],
157 				      bpf_rcu_lock_held());
158 	if (sdata && rcu_access_pointer(sdata->smap) == smap)
159 		return sdata;
160 
161 	/* Slow path (cache miss) */
162 	hlist_for_each_entry_rcu(selem, &local_storage->list, snode,
163 				  rcu_read_lock_trace_held())
164 		if (rcu_access_pointer(SDATA(selem)->smap) == smap)
165 			break;
166 
167 	if (!selem)
168 		return NULL;
169 	if (cacheit_lockit)
170 		__bpf_local_storage_insert_cache(local_storage, smap, selem);
171 	return SDATA(selem);
172 }
173 
174 u32 bpf_local_storage_destroy(struct bpf_local_storage *local_storage);
175 
176 void bpf_local_storage_map_free(struct bpf_map *map,
177 				struct bpf_local_storage_cache *cache);
178 
179 int bpf_local_storage_map_check_btf(struct bpf_map *map,
180 				    const struct btf *btf,
181 				    const struct btf_type *key_type,
182 				    const struct btf_type *value_type);
183 
184 void bpf_selem_link_storage_nolock(struct bpf_local_storage *local_storage,
185 				   struct bpf_local_storage_elem *selem);
186 
187 int bpf_selem_unlink(struct bpf_local_storage_elem *selem);
188 
189 int bpf_selem_link_map(struct bpf_local_storage_map *smap,
190 		       struct bpf_local_storage *local_storage,
191 		       struct bpf_local_storage_elem *selem);
192 
193 struct bpf_local_storage_elem *
194 bpf_selem_alloc(struct bpf_local_storage_map *smap, void *owner, void *value,
195 		bool swap_uptrs, gfp_t gfp_flags);
196 
197 void bpf_selem_free(struct bpf_local_storage_elem *selem,
198 		    bool reuse_now);
199 
200 int
201 bpf_local_storage_alloc(void *owner,
202 			struct bpf_local_storage_map *smap,
203 			struct bpf_local_storage_elem *first_selem,
204 			gfp_t gfp_flags);
205 
206 struct bpf_local_storage_data *
207 bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
208 			 void *value, u64 map_flags, bool swap_uptrs, gfp_t gfp_flags);
209 
210 u64 bpf_local_storage_map_mem_usage(const struct bpf_map *map);
211 
212 #endif /* _BPF_LOCAL_STORAGE_H */
213