1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * Generic TIME_WAIT sockets functions
8 *
9 * From code orinally in TCP
10 */
11
12 #include <linux/kernel.h>
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <net/inet_hashtables.h>
16 #include <net/inet_timewait_sock.h>
17 #include <net/ip.h>
18
19
20 /**
21 * inet_twsk_bind_unhash - unhash a timewait socket from bind hash
22 * @tw: timewait socket
23 * @hashinfo: hashinfo pointer
24 *
25 * unhash a timewait socket from bind hash, if hashed.
26 * bind hash lock must be held by caller.
27 * Returns 1 if caller should call inet_twsk_put() after lock release.
28 */
inet_twsk_bind_unhash(struct inet_timewait_sock * tw,struct inet_hashinfo * hashinfo)29 void inet_twsk_bind_unhash(struct inet_timewait_sock *tw,
30 struct inet_hashinfo *hashinfo)
31 {
32 struct inet_bind2_bucket *tb2 = tw->tw_tb2;
33 struct inet_bind_bucket *tb = tw->tw_tb;
34
35 if (!tb)
36 return;
37
38 __sk_del_bind_node((struct sock *)tw);
39 tw->tw_tb = NULL;
40 tw->tw_tb2 = NULL;
41 inet_bind2_bucket_destroy(hashinfo->bind2_bucket_cachep, tb2);
42 inet_bind_bucket_destroy(tb);
43
44 __sock_put((struct sock *)tw);
45 }
46
47 /* Must be called with locally disabled BHs. */
inet_twsk_kill(struct inet_timewait_sock * tw)48 static void inet_twsk_kill(struct inet_timewait_sock *tw)
49 {
50 struct inet_hashinfo *hashinfo = tw->tw_dr->hashinfo;
51 spinlock_t *lock = inet_ehash_lockp(hashinfo, tw->tw_hash);
52 struct inet_bind_hashbucket *bhead, *bhead2;
53
54 spin_lock(lock);
55 sk_nulls_del_node_init_rcu((struct sock *)tw);
56 spin_unlock(lock);
57
58 /* Disassociate with bind bucket. */
59 bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), tw->tw_num,
60 hashinfo->bhash_size)];
61 bhead2 = inet_bhashfn_portaddr(hashinfo, (struct sock *)tw,
62 twsk_net(tw), tw->tw_num);
63
64 spin_lock(&bhead->lock);
65 spin_lock(&bhead2->lock);
66 inet_twsk_bind_unhash(tw, hashinfo);
67 spin_unlock(&bhead2->lock);
68 spin_unlock(&bhead->lock);
69
70 refcount_dec(&tw->tw_dr->tw_refcount);
71 inet_twsk_put(tw);
72 }
73
inet_twsk_free(struct inet_timewait_sock * tw)74 void inet_twsk_free(struct inet_timewait_sock *tw)
75 {
76 struct module *owner = tw->tw_prot->owner;
77 twsk_destructor((struct sock *)tw);
78 kmem_cache_free(tw->tw_prot->twsk_prot->twsk_slab, tw);
79 module_put(owner);
80 }
81
inet_twsk_put(struct inet_timewait_sock * tw)82 void inet_twsk_put(struct inet_timewait_sock *tw)
83 {
84 if (refcount_dec_and_test(&tw->tw_refcnt))
85 inet_twsk_free(tw);
86 }
87 EXPORT_SYMBOL_GPL(inet_twsk_put);
88
inet_twsk_add_node_rcu(struct inet_timewait_sock * tw,struct hlist_nulls_head * list)89 static void inet_twsk_add_node_rcu(struct inet_timewait_sock *tw,
90 struct hlist_nulls_head *list)
91 {
92 hlist_nulls_add_head_rcu(&tw->tw_node, list);
93 }
94
inet_twsk_schedule(struct inet_timewait_sock * tw,int timeo)95 static void inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo)
96 {
97 __inet_twsk_schedule(tw, timeo, false);
98 }
99
100 /*
101 * Enter the time wait state.
102 * Essentially we whip up a timewait bucket, copy the relevant info into it
103 * from the SK, and mess with hash chains and list linkage.
104 *
105 * The caller must not access @tw anymore after this function returns.
106 */
inet_twsk_hashdance_schedule(struct inet_timewait_sock * tw,struct sock * sk,struct inet_hashinfo * hashinfo,int timeo)107 void inet_twsk_hashdance_schedule(struct inet_timewait_sock *tw,
108 struct sock *sk,
109 struct inet_hashinfo *hashinfo,
110 int timeo)
111 {
112 const struct inet_sock *inet = inet_sk(sk);
113 const struct inet_connection_sock *icsk = inet_csk(sk);
114 struct inet_ehash_bucket *ehead = inet_ehash_bucket(hashinfo, sk->sk_hash);
115 spinlock_t *lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
116 struct inet_bind_hashbucket *bhead, *bhead2;
117
118 /* Step 1: Put TW into bind hash. Original socket stays there too.
119 Note, that any socket with inet->num != 0 MUST be bound in
120 binding cache, even if it is closed.
121 */
122 bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), inet->inet_num,
123 hashinfo->bhash_size)];
124 bhead2 = inet_bhashfn_portaddr(hashinfo, sk, twsk_net(tw), inet->inet_num);
125
126 local_bh_disable();
127 spin_lock(&bhead->lock);
128 spin_lock(&bhead2->lock);
129
130 tw->tw_tb = icsk->icsk_bind_hash;
131 WARN_ON(!icsk->icsk_bind_hash);
132
133 tw->tw_tb2 = icsk->icsk_bind2_hash;
134 WARN_ON(!icsk->icsk_bind2_hash);
135 sk_add_bind_node((struct sock *)tw, &tw->tw_tb2->owners);
136
137 spin_unlock(&bhead2->lock);
138 spin_unlock(&bhead->lock);
139
140 spin_lock(lock);
141
142 /* Step 2: Hash TW into tcp ehash chain */
143 inet_twsk_add_node_rcu(tw, &ehead->chain);
144
145 /* Step 3: Remove SK from hash chain */
146 if (__sk_nulls_del_node_init_rcu(sk))
147 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
148
149
150 /* Ensure above writes are committed into memory before updating the
151 * refcount.
152 * Provides ordering vs later refcount_inc().
153 */
154 smp_wmb();
155 /* tw_refcnt is set to 3 because we have :
156 * - one reference for bhash chain.
157 * - one reference for ehash chain.
158 * - one reference for timer.
159 * Also note that after this point, we lost our implicit reference
160 * so we are not allowed to use tw anymore.
161 */
162 refcount_set(&tw->tw_refcnt, 3);
163
164 inet_twsk_schedule(tw, timeo);
165
166 spin_unlock(lock);
167 local_bh_enable();
168 }
169
tw_timer_handler(struct timer_list * t)170 static void tw_timer_handler(struct timer_list *t)
171 {
172 struct inet_timewait_sock *tw = timer_container_of(tw, t, tw_timer);
173
174 inet_twsk_kill(tw);
175 }
176
inet_twsk_alloc(const struct sock * sk,struct inet_timewait_death_row * dr,const int state)177 struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk,
178 struct inet_timewait_death_row *dr,
179 const int state)
180 {
181 struct inet_timewait_sock *tw;
182
183 if (refcount_read(&dr->tw_refcount) - 1 >=
184 READ_ONCE(dr->sysctl_max_tw_buckets))
185 return NULL;
186
187 tw = kmem_cache_alloc(sk->sk_prot_creator->twsk_prot->twsk_slab,
188 GFP_ATOMIC);
189 if (tw) {
190 const struct inet_sock *inet = inet_sk(sk);
191
192 tw->tw_dr = dr;
193 /* Give us an identity. */
194 tw->tw_daddr = inet->inet_daddr;
195 tw->tw_rcv_saddr = inet->inet_rcv_saddr;
196 tw->tw_bound_dev_if = sk->sk_bound_dev_if;
197 tw->tw_tos = inet->tos;
198 tw->tw_num = inet->inet_num;
199 tw->tw_state = TCP_TIME_WAIT;
200 tw->tw_substate = state;
201 tw->tw_sport = inet->inet_sport;
202 tw->tw_dport = inet->inet_dport;
203 tw->tw_family = sk->sk_family;
204 tw->tw_reuse = sk->sk_reuse;
205 tw->tw_reuseport = sk->sk_reuseport;
206 tw->tw_hash = sk->sk_hash;
207 tw->tw_ipv6only = 0;
208 tw->tw_transparent = inet_test_bit(TRANSPARENT, sk);
209 tw->tw_prot = sk->sk_prot_creator;
210 atomic64_set(&tw->tw_cookie, atomic64_read(&sk->sk_cookie));
211 twsk_net_set(tw, sock_net(sk));
212 timer_setup(&tw->tw_timer, tw_timer_handler, 0);
213 /*
214 * Because we use RCU lookups, we should not set tw_refcnt
215 * to a non null value before everything is setup for this
216 * timewait socket.
217 */
218 refcount_set(&tw->tw_refcnt, 0);
219
220 __module_get(tw->tw_prot->owner);
221 }
222
223 return tw;
224 }
225
226 /* These are always called from BH context. See callers in
227 * tcp_input.c to verify this.
228 */
229
230 /* This is for handling early-kills of TIME_WAIT sockets.
231 * Warning : consume reference.
232 * Caller should not access tw anymore.
233 */
inet_twsk_deschedule_put(struct inet_timewait_sock * tw)234 void inet_twsk_deschedule_put(struct inet_timewait_sock *tw)
235 {
236 struct inet_hashinfo *hashinfo = tw->tw_dr->hashinfo;
237 spinlock_t *lock = inet_ehash_lockp(hashinfo, tw->tw_hash);
238
239 /* inet_twsk_purge() walks over all sockets, including tw ones,
240 * and removes them via inet_twsk_deschedule_put() after a
241 * refcount_inc_not_zero().
242 *
243 * inet_twsk_hashdance_schedule() must (re)init the refcount before
244 * arming the timer, i.e. inet_twsk_purge can obtain a reference to
245 * a twsk that did not yet schedule the timer.
246 *
247 * The ehash lock synchronizes these two:
248 * After acquiring the lock, the timer is always scheduled (else
249 * timer_shutdown returns false), because hashdance_schedule releases
250 * the ehash lock only after completing the timer initialization.
251 *
252 * Without grabbing the ehash lock, we get:
253 * 1) cpu x sets twsk refcount to 3
254 * 2) cpu y bumps refcount to 4
255 * 3) cpu y calls inet_twsk_deschedule_put() and shuts timer down
256 * 4) cpu x tries to start timer, but mod_timer is a noop post-shutdown
257 * -> timer refcount is never decremented.
258 */
259 spin_lock(lock);
260 /* Makes sure hashdance_schedule() has completed */
261 spin_unlock(lock);
262
263 if (timer_shutdown_sync(&tw->tw_timer))
264 inet_twsk_kill(tw);
265 inet_twsk_put(tw);
266 }
267 EXPORT_SYMBOL(inet_twsk_deschedule_put);
268
__inet_twsk_schedule(struct inet_timewait_sock * tw,int timeo,bool rearm)269 void __inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo, bool rearm)
270 {
271 /* timeout := RTO * 3.5
272 *
273 * 3.5 = 1+2+0.5 to wait for two retransmits.
274 *
275 * RATIONALE: if FIN arrived and we entered TIME-WAIT state,
276 * our ACK acking that FIN can be lost. If N subsequent retransmitted
277 * FINs (or previous seqments) are lost (probability of such event
278 * is p^(N+1), where p is probability to lose single packet and
279 * time to detect the loss is about RTO*(2^N - 1) with exponential
280 * backoff). Normal timewait length is calculated so, that we
281 * waited at least for one retransmitted FIN (maximal RTO is 120sec).
282 * [ BTW Linux. following BSD, violates this requirement waiting
283 * only for 60sec, we should wait at least for 240 secs.
284 * Well, 240 consumes too much of resources 8)
285 * ]
286 * This interval is not reduced to catch old duplicate and
287 * responces to our wandering segments living for two MSLs.
288 * However, if we use PAWS to detect
289 * old duplicates, we can reduce the interval to bounds required
290 * by RTO, rather than MSL. So, if peer understands PAWS, we
291 * kill tw bucket after 3.5*RTO (it is important that this number
292 * is greater than TS tick!) and detect old duplicates with help
293 * of PAWS.
294 */
295
296 if (!rearm) {
297 bool kill = timeo <= 4*HZ;
298
299 __NET_INC_STATS(twsk_net(tw), kill ? LINUX_MIB_TIMEWAITKILLED :
300 LINUX_MIB_TIMEWAITED);
301 BUG_ON(mod_timer(&tw->tw_timer, jiffies + timeo));
302 refcount_inc(&tw->tw_dr->tw_refcount);
303 } else {
304 mod_timer_pending(&tw->tw_timer, jiffies + timeo);
305 }
306 }
307
308 /* Remove all non full sockets (TIME_WAIT and NEW_SYN_RECV) for dead netns */
inet_twsk_purge(struct inet_hashinfo * hashinfo)309 void inet_twsk_purge(struct inet_hashinfo *hashinfo)
310 {
311 struct inet_ehash_bucket *head = &hashinfo->ehash[0];
312 unsigned int ehash_mask = hashinfo->ehash_mask;
313 struct hlist_nulls_node *node;
314 unsigned int slot;
315 struct sock *sk;
316
317 for (slot = 0; slot <= ehash_mask; slot++, head++) {
318 if (hlist_nulls_empty(&head->chain))
319 continue;
320
321 restart_rcu:
322 cond_resched();
323 rcu_read_lock();
324 restart:
325 sk_nulls_for_each_rcu(sk, node, &head->chain) {
326 int state = inet_sk_state_load(sk);
327
328 if ((1 << state) & ~(TCPF_TIME_WAIT |
329 TCPF_NEW_SYN_RECV))
330 continue;
331
332 if (refcount_read(&sock_net(sk)->ns.count))
333 continue;
334
335 if (unlikely(!refcount_inc_not_zero(&sk->sk_refcnt)))
336 continue;
337
338 if (refcount_read(&sock_net(sk)->ns.count)) {
339 sock_gen_put(sk);
340 goto restart;
341 }
342
343 rcu_read_unlock();
344 local_bh_disable();
345 if (state == TCP_TIME_WAIT) {
346 inet_twsk_deschedule_put(inet_twsk(sk));
347 } else {
348 struct request_sock *req = inet_reqsk(sk);
349
350 inet_csk_reqsk_queue_drop_and_put(req->rsk_listener,
351 req);
352 }
353 local_bh_enable();
354 goto restart_rcu;
355 }
356 /* If the nulls value we got at the end of this lookup is
357 * not the expected one, we must restart lookup.
358 * We probably met an item that was moved to another chain.
359 */
360 if (get_nulls_value(node) != slot)
361 goto restart;
362 rcu_read_unlock();
363 }
364 }
365