xref: /linux/net/core/sock_reuseport.c (revision 905e46acd3272d04566fec49afbd7ad9e2ed9ae3)
1 /*
2  * To speed up listener socket lookup, create an array to store all sockets
3  * listening on the same port.  This allows a decision to be made after finding
4  * the first socket.  An optional BPF program can also be configured for
5  * selecting the socket index from the array of available sockets.
6  */
7 
8 #include <net/sock_reuseport.h>
9 #include <linux/bpf.h>
10 #include <linux/rcupdate.h>
11 
12 #define INIT_SOCKS 128
13 
14 static DEFINE_SPINLOCK(reuseport_lock);
15 
16 static struct sock_reuseport *__reuseport_alloc(unsigned int max_socks)
17 {
18 	unsigned int size = sizeof(struct sock_reuseport) +
19 		      sizeof(struct sock *) * max_socks;
20 	struct sock_reuseport *reuse = kzalloc(size, GFP_ATOMIC);
21 
22 	if (!reuse)
23 		return NULL;
24 
25 	reuse->max_socks = max_socks;
26 
27 	RCU_INIT_POINTER(reuse->prog, NULL);
28 	return reuse;
29 }
30 
31 int reuseport_alloc(struct sock *sk)
32 {
33 	struct sock_reuseport *reuse;
34 
35 	/* bh lock used since this function call may precede hlist lock in
36 	 * soft irq of receive path or setsockopt from process context
37 	 */
38 	spin_lock_bh(&reuseport_lock);
39 	WARN_ONCE(rcu_dereference_protected(sk->sk_reuseport_cb,
40 					    lockdep_is_held(&reuseport_lock)),
41 		  "multiple allocations for the same socket");
42 	reuse = __reuseport_alloc(INIT_SOCKS);
43 	if (!reuse) {
44 		spin_unlock_bh(&reuseport_lock);
45 		return -ENOMEM;
46 	}
47 
48 	reuse->socks[0] = sk;
49 	reuse->num_socks = 1;
50 	rcu_assign_pointer(sk->sk_reuseport_cb, reuse);
51 
52 	spin_unlock_bh(&reuseport_lock);
53 
54 	return 0;
55 }
56 EXPORT_SYMBOL(reuseport_alloc);
57 
58 static struct sock_reuseport *reuseport_grow(struct sock_reuseport *reuse)
59 {
60 	struct sock_reuseport *more_reuse;
61 	u32 more_socks_size, i;
62 
63 	more_socks_size = reuse->max_socks * 2U;
64 	if (more_socks_size > U16_MAX)
65 		return NULL;
66 
67 	more_reuse = __reuseport_alloc(more_socks_size);
68 	if (!more_reuse)
69 		return NULL;
70 
71 	more_reuse->max_socks = more_socks_size;
72 	more_reuse->num_socks = reuse->num_socks;
73 	more_reuse->prog = reuse->prog;
74 
75 	memcpy(more_reuse->socks, reuse->socks,
76 	       reuse->num_socks * sizeof(struct sock *));
77 
78 	for (i = 0; i < reuse->num_socks; ++i)
79 		rcu_assign_pointer(reuse->socks[i]->sk_reuseport_cb,
80 				   more_reuse);
81 
82 	/* Note: we use kfree_rcu here instead of reuseport_free_rcu so
83 	 * that reuse and more_reuse can temporarily share a reference
84 	 * to prog.
85 	 */
86 	kfree_rcu(reuse, rcu);
87 	return more_reuse;
88 }
89 
90 /**
91  *  reuseport_add_sock - Add a socket to the reuseport group of another.
92  *  @sk:  New socket to add to the group.
93  *  @sk2: Socket belonging to the existing reuseport group.
94  *  May return ENOMEM and not add socket to group under memory pressure.
95  */
96 int reuseport_add_sock(struct sock *sk, struct sock *sk2)
97 {
98 	struct sock_reuseport *reuse;
99 
100 	if (!rcu_access_pointer(sk2->sk_reuseport_cb)) {
101 		int err = reuseport_alloc(sk2);
102 
103 		if (err)
104 			return err;
105 	}
106 
107 	spin_lock_bh(&reuseport_lock);
108 	reuse = rcu_dereference_protected(sk2->sk_reuseport_cb,
109 					  lockdep_is_held(&reuseport_lock)),
110 	WARN_ONCE(rcu_dereference_protected(sk->sk_reuseport_cb,
111 					    lockdep_is_held(&reuseport_lock)),
112 		  "socket already in reuseport group");
113 
114 	if (reuse->num_socks == reuse->max_socks) {
115 		reuse = reuseport_grow(reuse);
116 		if (!reuse) {
117 			spin_unlock_bh(&reuseport_lock);
118 			return -ENOMEM;
119 		}
120 	}
121 
122 	reuse->socks[reuse->num_socks] = sk;
123 	/* paired with smp_rmb() in reuseport_select_sock() */
124 	smp_wmb();
125 	reuse->num_socks++;
126 	rcu_assign_pointer(sk->sk_reuseport_cb, reuse);
127 
128 	spin_unlock_bh(&reuseport_lock);
129 
130 	return 0;
131 }
132 
133 static void reuseport_free_rcu(struct rcu_head *head)
134 {
135 	struct sock_reuseport *reuse;
136 
137 	reuse = container_of(head, struct sock_reuseport, rcu);
138 	if (reuse->prog)
139 		bpf_prog_destroy(reuse->prog);
140 	kfree(reuse);
141 }
142 
143 void reuseport_detach_sock(struct sock *sk)
144 {
145 	struct sock_reuseport *reuse;
146 	int i;
147 
148 	spin_lock_bh(&reuseport_lock);
149 	reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
150 					  lockdep_is_held(&reuseport_lock));
151 	rcu_assign_pointer(sk->sk_reuseport_cb, NULL);
152 
153 	for (i = 0; i < reuse->num_socks; i++) {
154 		if (reuse->socks[i] == sk) {
155 			reuse->socks[i] = reuse->socks[reuse->num_socks - 1];
156 			reuse->num_socks--;
157 			if (reuse->num_socks == 0)
158 				call_rcu(&reuse->rcu, reuseport_free_rcu);
159 			break;
160 		}
161 	}
162 	spin_unlock_bh(&reuseport_lock);
163 }
164 EXPORT_SYMBOL(reuseport_detach_sock);
165 
166 static struct sock *run_bpf(struct sock_reuseport *reuse, u16 socks,
167 			    struct bpf_prog *prog, struct sk_buff *skb,
168 			    int hdr_len)
169 {
170 	struct sk_buff *nskb = NULL;
171 	u32 index;
172 
173 	if (skb_shared(skb)) {
174 		nskb = skb_clone(skb, GFP_ATOMIC);
175 		if (!nskb)
176 			return NULL;
177 		skb = nskb;
178 	}
179 
180 	/* temporarily advance data past protocol header */
181 	if (!pskb_pull(skb, hdr_len)) {
182 		kfree_skb(nskb);
183 		return NULL;
184 	}
185 	index = bpf_prog_run_save_cb(prog, skb);
186 	__skb_push(skb, hdr_len);
187 
188 	consume_skb(nskb);
189 
190 	if (index >= socks)
191 		return NULL;
192 
193 	return reuse->socks[index];
194 }
195 
196 /**
197  *  reuseport_select_sock - Select a socket from an SO_REUSEPORT group.
198  *  @sk: First socket in the group.
199  *  @hash: When no BPF filter is available, use this hash to select.
200  *  @skb: skb to run through BPF filter.
201  *  @hdr_len: BPF filter expects skb data pointer at payload data.  If
202  *    the skb does not yet point at the payload, this parameter represents
203  *    how far the pointer needs to advance to reach the payload.
204  *  Returns a socket that should receive the packet (or NULL on error).
205  */
206 struct sock *reuseport_select_sock(struct sock *sk,
207 				   u32 hash,
208 				   struct sk_buff *skb,
209 				   int hdr_len)
210 {
211 	struct sock_reuseport *reuse;
212 	struct bpf_prog *prog;
213 	struct sock *sk2 = NULL;
214 	u16 socks;
215 
216 	rcu_read_lock();
217 	reuse = rcu_dereference(sk->sk_reuseport_cb);
218 
219 	/* if memory allocation failed or add call is not yet complete */
220 	if (!reuse)
221 		goto out;
222 
223 	prog = rcu_dereference(reuse->prog);
224 	socks = READ_ONCE(reuse->num_socks);
225 	if (likely(socks)) {
226 		/* paired with smp_wmb() in reuseport_add_sock() */
227 		smp_rmb();
228 
229 		if (prog && skb)
230 			sk2 = run_bpf(reuse, socks, prog, skb, hdr_len);
231 		else
232 			sk2 = reuse->socks[reciprocal_scale(hash, socks)];
233 	}
234 
235 out:
236 	rcu_read_unlock();
237 	return sk2;
238 }
239 EXPORT_SYMBOL(reuseport_select_sock);
240 
241 struct bpf_prog *
242 reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog)
243 {
244 	struct sock_reuseport *reuse;
245 	struct bpf_prog *old_prog;
246 
247 	spin_lock_bh(&reuseport_lock);
248 	reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
249 					  lockdep_is_held(&reuseport_lock));
250 	old_prog = rcu_dereference_protected(reuse->prog,
251 					     lockdep_is_held(&reuseport_lock));
252 	rcu_assign_pointer(reuse->prog, prog);
253 	spin_unlock_bh(&reuseport_lock);
254 
255 	return old_prog;
256 }
257 EXPORT_SYMBOL(reuseport_attach_prog);
258