xref: /linux/net/ipv4/tcp_metrics.c (revision b2d0f5d5dc53532e6f07bc546a476a55ebdfe0f3)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/rcupdate.h>
3 #include <linux/spinlock.h>
4 #include <linux/jiffies.h>
5 #include <linux/module.h>
6 #include <linux/cache.h>
7 #include <linux/slab.h>
8 #include <linux/init.h>
9 #include <linux/tcp.h>
10 #include <linux/hash.h>
11 #include <linux/tcp_metrics.h>
12 #include <linux/vmalloc.h>
13 
14 #include <net/inet_connection_sock.h>
15 #include <net/net_namespace.h>
16 #include <net/request_sock.h>
17 #include <net/inetpeer.h>
18 #include <net/sock.h>
19 #include <net/ipv6.h>
20 #include <net/dst.h>
21 #include <net/tcp.h>
22 #include <net/genetlink.h>
23 
24 static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *saddr,
25 						   const struct inetpeer_addr *daddr,
26 						   struct net *net, unsigned int hash);
27 
28 struct tcp_fastopen_metrics {
29 	u16	mss;
30 	u16	syn_loss:10,		/* Recurring Fast Open SYN losses */
31 		try_exp:2;		/* Request w/ exp. option (once) */
32 	unsigned long	last_syn_loss;	/* Last Fast Open SYN loss */
33 	struct	tcp_fastopen_cookie	cookie;
34 };
35 
36 /* TCP_METRIC_MAX includes 2 extra fields for userspace compatibility
37  * Kernel only stores RTT and RTTVAR in usec resolution
38  */
39 #define TCP_METRIC_MAX_KERNEL (TCP_METRIC_MAX - 2)
40 
41 struct tcp_metrics_block {
42 	struct tcp_metrics_block __rcu	*tcpm_next;
43 	possible_net_t			tcpm_net;
44 	struct inetpeer_addr		tcpm_saddr;
45 	struct inetpeer_addr		tcpm_daddr;
46 	unsigned long			tcpm_stamp;
47 	u32				tcpm_lock;
48 	u32				tcpm_vals[TCP_METRIC_MAX_KERNEL + 1];
49 	struct tcp_fastopen_metrics	tcpm_fastopen;
50 
51 	struct rcu_head			rcu_head;
52 };
53 
54 static inline struct net *tm_net(struct tcp_metrics_block *tm)
55 {
56 	return read_pnet(&tm->tcpm_net);
57 }
58 
59 static bool tcp_metric_locked(struct tcp_metrics_block *tm,
60 			      enum tcp_metric_index idx)
61 {
62 	return tm->tcpm_lock & (1 << idx);
63 }
64 
65 static u32 tcp_metric_get(struct tcp_metrics_block *tm,
66 			  enum tcp_metric_index idx)
67 {
68 	return tm->tcpm_vals[idx];
69 }
70 
71 static void tcp_metric_set(struct tcp_metrics_block *tm,
72 			   enum tcp_metric_index idx,
73 			   u32 val)
74 {
75 	tm->tcpm_vals[idx] = val;
76 }
77 
78 static bool addr_same(const struct inetpeer_addr *a,
79 		      const struct inetpeer_addr *b)
80 {
81 	return inetpeer_addr_cmp(a, b) == 0;
82 }
83 
84 struct tcpm_hash_bucket {
85 	struct tcp_metrics_block __rcu	*chain;
86 };
87 
88 static struct tcpm_hash_bucket	*tcp_metrics_hash __read_mostly;
89 static unsigned int		tcp_metrics_hash_log __read_mostly;
90 
91 static DEFINE_SPINLOCK(tcp_metrics_lock);
92 
93 static void tcpm_suck_dst(struct tcp_metrics_block *tm,
94 			  const struct dst_entry *dst,
95 			  bool fastopen_clear)
96 {
97 	u32 msval;
98 	u32 val;
99 
100 	tm->tcpm_stamp = jiffies;
101 
102 	val = 0;
103 	if (dst_metric_locked(dst, RTAX_RTT))
104 		val |= 1 << TCP_METRIC_RTT;
105 	if (dst_metric_locked(dst, RTAX_RTTVAR))
106 		val |= 1 << TCP_METRIC_RTTVAR;
107 	if (dst_metric_locked(dst, RTAX_SSTHRESH))
108 		val |= 1 << TCP_METRIC_SSTHRESH;
109 	if (dst_metric_locked(dst, RTAX_CWND))
110 		val |= 1 << TCP_METRIC_CWND;
111 	if (dst_metric_locked(dst, RTAX_REORDERING))
112 		val |= 1 << TCP_METRIC_REORDERING;
113 	tm->tcpm_lock = val;
114 
115 	msval = dst_metric_raw(dst, RTAX_RTT);
116 	tm->tcpm_vals[TCP_METRIC_RTT] = msval * USEC_PER_MSEC;
117 
118 	msval = dst_metric_raw(dst, RTAX_RTTVAR);
119 	tm->tcpm_vals[TCP_METRIC_RTTVAR] = msval * USEC_PER_MSEC;
120 	tm->tcpm_vals[TCP_METRIC_SSTHRESH] = dst_metric_raw(dst, RTAX_SSTHRESH);
121 	tm->tcpm_vals[TCP_METRIC_CWND] = dst_metric_raw(dst, RTAX_CWND);
122 	tm->tcpm_vals[TCP_METRIC_REORDERING] = dst_metric_raw(dst, RTAX_REORDERING);
123 	if (fastopen_clear) {
124 		tm->tcpm_fastopen.mss = 0;
125 		tm->tcpm_fastopen.syn_loss = 0;
126 		tm->tcpm_fastopen.try_exp = 0;
127 		tm->tcpm_fastopen.cookie.exp = false;
128 		tm->tcpm_fastopen.cookie.len = 0;
129 	}
130 }
131 
132 #define TCP_METRICS_TIMEOUT		(60 * 60 * HZ)
133 
134 static void tcpm_check_stamp(struct tcp_metrics_block *tm, struct dst_entry *dst)
135 {
136 	if (tm && unlikely(time_after(jiffies, tm->tcpm_stamp + TCP_METRICS_TIMEOUT)))
137 		tcpm_suck_dst(tm, dst, false);
138 }
139 
140 #define TCP_METRICS_RECLAIM_DEPTH	5
141 #define TCP_METRICS_RECLAIM_PTR		(struct tcp_metrics_block *) 0x1UL
142 
143 #define deref_locked(p)	\
144 	rcu_dereference_protected(p, lockdep_is_held(&tcp_metrics_lock))
145 
146 static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst,
147 					  struct inetpeer_addr *saddr,
148 					  struct inetpeer_addr *daddr,
149 					  unsigned int hash)
150 {
151 	struct tcp_metrics_block *tm;
152 	struct net *net;
153 	bool reclaim = false;
154 
155 	spin_lock_bh(&tcp_metrics_lock);
156 	net = dev_net(dst->dev);
157 
158 	/* While waiting for the spin-lock the cache might have been populated
159 	 * with this entry and so we have to check again.
160 	 */
161 	tm = __tcp_get_metrics(saddr, daddr, net, hash);
162 	if (tm == TCP_METRICS_RECLAIM_PTR) {
163 		reclaim = true;
164 		tm = NULL;
165 	}
166 	if (tm) {
167 		tcpm_check_stamp(tm, dst);
168 		goto out_unlock;
169 	}
170 
171 	if (unlikely(reclaim)) {
172 		struct tcp_metrics_block *oldest;
173 
174 		oldest = deref_locked(tcp_metrics_hash[hash].chain);
175 		for (tm = deref_locked(oldest->tcpm_next); tm;
176 		     tm = deref_locked(tm->tcpm_next)) {
177 			if (time_before(tm->tcpm_stamp, oldest->tcpm_stamp))
178 				oldest = tm;
179 		}
180 		tm = oldest;
181 	} else {
182 		tm = kmalloc(sizeof(*tm), GFP_ATOMIC);
183 		if (!tm)
184 			goto out_unlock;
185 	}
186 	write_pnet(&tm->tcpm_net, net);
187 	tm->tcpm_saddr = *saddr;
188 	tm->tcpm_daddr = *daddr;
189 
190 	tcpm_suck_dst(tm, dst, true);
191 
192 	if (likely(!reclaim)) {
193 		tm->tcpm_next = tcp_metrics_hash[hash].chain;
194 		rcu_assign_pointer(tcp_metrics_hash[hash].chain, tm);
195 	}
196 
197 out_unlock:
198 	spin_unlock_bh(&tcp_metrics_lock);
199 	return tm;
200 }
201 
202 static struct tcp_metrics_block *tcp_get_encode(struct tcp_metrics_block *tm, int depth)
203 {
204 	if (tm)
205 		return tm;
206 	if (depth > TCP_METRICS_RECLAIM_DEPTH)
207 		return TCP_METRICS_RECLAIM_PTR;
208 	return NULL;
209 }
210 
211 static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *saddr,
212 						   const struct inetpeer_addr *daddr,
213 						   struct net *net, unsigned int hash)
214 {
215 	struct tcp_metrics_block *tm;
216 	int depth = 0;
217 
218 	for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
219 	     tm = rcu_dereference(tm->tcpm_next)) {
220 		if (addr_same(&tm->tcpm_saddr, saddr) &&
221 		    addr_same(&tm->tcpm_daddr, daddr) &&
222 		    net_eq(tm_net(tm), net))
223 			break;
224 		depth++;
225 	}
226 	return tcp_get_encode(tm, depth);
227 }
228 
229 static struct tcp_metrics_block *__tcp_get_metrics_req(struct request_sock *req,
230 						       struct dst_entry *dst)
231 {
232 	struct tcp_metrics_block *tm;
233 	struct inetpeer_addr saddr, daddr;
234 	unsigned int hash;
235 	struct net *net;
236 
237 	saddr.family = req->rsk_ops->family;
238 	daddr.family = req->rsk_ops->family;
239 	switch (daddr.family) {
240 	case AF_INET:
241 		inetpeer_set_addr_v4(&saddr, inet_rsk(req)->ir_loc_addr);
242 		inetpeer_set_addr_v4(&daddr, inet_rsk(req)->ir_rmt_addr);
243 		hash = ipv4_addr_hash(inet_rsk(req)->ir_rmt_addr);
244 		break;
245 #if IS_ENABLED(CONFIG_IPV6)
246 	case AF_INET6:
247 		inetpeer_set_addr_v6(&saddr, &inet_rsk(req)->ir_v6_loc_addr);
248 		inetpeer_set_addr_v6(&daddr, &inet_rsk(req)->ir_v6_rmt_addr);
249 		hash = ipv6_addr_hash(&inet_rsk(req)->ir_v6_rmt_addr);
250 		break;
251 #endif
252 	default:
253 		return NULL;
254 	}
255 
256 	net = dev_net(dst->dev);
257 	hash ^= net_hash_mix(net);
258 	hash = hash_32(hash, tcp_metrics_hash_log);
259 
260 	for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
261 	     tm = rcu_dereference(tm->tcpm_next)) {
262 		if (addr_same(&tm->tcpm_saddr, &saddr) &&
263 		    addr_same(&tm->tcpm_daddr, &daddr) &&
264 		    net_eq(tm_net(tm), net))
265 			break;
266 	}
267 	tcpm_check_stamp(tm, dst);
268 	return tm;
269 }
270 
271 static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk,
272 						 struct dst_entry *dst,
273 						 bool create)
274 {
275 	struct tcp_metrics_block *tm;
276 	struct inetpeer_addr saddr, daddr;
277 	unsigned int hash;
278 	struct net *net;
279 
280 	if (sk->sk_family == AF_INET) {
281 		inetpeer_set_addr_v4(&saddr, inet_sk(sk)->inet_saddr);
282 		inetpeer_set_addr_v4(&daddr, inet_sk(sk)->inet_daddr);
283 		hash = ipv4_addr_hash(inet_sk(sk)->inet_daddr);
284 	}
285 #if IS_ENABLED(CONFIG_IPV6)
286 	else if (sk->sk_family == AF_INET6) {
287 		if (ipv6_addr_v4mapped(&sk->sk_v6_daddr)) {
288 			inetpeer_set_addr_v4(&saddr, inet_sk(sk)->inet_saddr);
289 			inetpeer_set_addr_v4(&daddr, inet_sk(sk)->inet_daddr);
290 			hash = ipv4_addr_hash(inet_sk(sk)->inet_daddr);
291 		} else {
292 			inetpeer_set_addr_v6(&saddr, &sk->sk_v6_rcv_saddr);
293 			inetpeer_set_addr_v6(&daddr, &sk->sk_v6_daddr);
294 			hash = ipv6_addr_hash(&sk->sk_v6_daddr);
295 		}
296 	}
297 #endif
298 	else
299 		return NULL;
300 
301 	net = dev_net(dst->dev);
302 	hash ^= net_hash_mix(net);
303 	hash = hash_32(hash, tcp_metrics_hash_log);
304 
305 	tm = __tcp_get_metrics(&saddr, &daddr, net, hash);
306 	if (tm == TCP_METRICS_RECLAIM_PTR)
307 		tm = NULL;
308 	if (!tm && create)
309 		tm = tcpm_new(dst, &saddr, &daddr, hash);
310 	else
311 		tcpm_check_stamp(tm, dst);
312 
313 	return tm;
314 }
315 
316 /* Save metrics learned by this TCP session.  This function is called
317  * only, when TCP finishes successfully i.e. when it enters TIME-WAIT
318  * or goes from LAST-ACK to CLOSE.
319  */
320 void tcp_update_metrics(struct sock *sk)
321 {
322 	const struct inet_connection_sock *icsk = inet_csk(sk);
323 	struct dst_entry *dst = __sk_dst_get(sk);
324 	struct tcp_sock *tp = tcp_sk(sk);
325 	struct net *net = sock_net(sk);
326 	struct tcp_metrics_block *tm;
327 	unsigned long rtt;
328 	u32 val;
329 	int m;
330 
331 	sk_dst_confirm(sk);
332 	if (net->ipv4.sysctl_tcp_nometrics_save || !dst)
333 		return;
334 
335 	rcu_read_lock();
336 	if (icsk->icsk_backoff || !tp->srtt_us) {
337 		/* This session failed to estimate rtt. Why?
338 		 * Probably, no packets returned in time.  Reset our
339 		 * results.
340 		 */
341 		tm = tcp_get_metrics(sk, dst, false);
342 		if (tm && !tcp_metric_locked(tm, TCP_METRIC_RTT))
343 			tcp_metric_set(tm, TCP_METRIC_RTT, 0);
344 		goto out_unlock;
345 	} else
346 		tm = tcp_get_metrics(sk, dst, true);
347 
348 	if (!tm)
349 		goto out_unlock;
350 
351 	rtt = tcp_metric_get(tm, TCP_METRIC_RTT);
352 	m = rtt - tp->srtt_us;
353 
354 	/* If newly calculated rtt larger than stored one, store new
355 	 * one. Otherwise, use EWMA. Remember, rtt overestimation is
356 	 * always better than underestimation.
357 	 */
358 	if (!tcp_metric_locked(tm, TCP_METRIC_RTT)) {
359 		if (m <= 0)
360 			rtt = tp->srtt_us;
361 		else
362 			rtt -= (m >> 3);
363 		tcp_metric_set(tm, TCP_METRIC_RTT, rtt);
364 	}
365 
366 	if (!tcp_metric_locked(tm, TCP_METRIC_RTTVAR)) {
367 		unsigned long var;
368 
369 		if (m < 0)
370 			m = -m;
371 
372 		/* Scale deviation to rttvar fixed point */
373 		m >>= 1;
374 		if (m < tp->mdev_us)
375 			m = tp->mdev_us;
376 
377 		var = tcp_metric_get(tm, TCP_METRIC_RTTVAR);
378 		if (m >= var)
379 			var = m;
380 		else
381 			var -= (var - m) >> 2;
382 
383 		tcp_metric_set(tm, TCP_METRIC_RTTVAR, var);
384 	}
385 
386 	if (tcp_in_initial_slowstart(tp)) {
387 		/* Slow start still did not finish. */
388 		if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
389 			val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
390 			if (val && (tp->snd_cwnd >> 1) > val)
391 				tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
392 					       tp->snd_cwnd >> 1);
393 		}
394 		if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
395 			val = tcp_metric_get(tm, TCP_METRIC_CWND);
396 			if (tp->snd_cwnd > val)
397 				tcp_metric_set(tm, TCP_METRIC_CWND,
398 					       tp->snd_cwnd);
399 		}
400 	} else if (!tcp_in_slow_start(tp) &&
401 		   icsk->icsk_ca_state == TCP_CA_Open) {
402 		/* Cong. avoidance phase, cwnd is reliable. */
403 		if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH))
404 			tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
405 				       max(tp->snd_cwnd >> 1, tp->snd_ssthresh));
406 		if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
407 			val = tcp_metric_get(tm, TCP_METRIC_CWND);
408 			tcp_metric_set(tm, TCP_METRIC_CWND, (val + tp->snd_cwnd) >> 1);
409 		}
410 	} else {
411 		/* Else slow start did not finish, cwnd is non-sense,
412 		 * ssthresh may be also invalid.
413 		 */
414 		if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
415 			val = tcp_metric_get(tm, TCP_METRIC_CWND);
416 			tcp_metric_set(tm, TCP_METRIC_CWND,
417 				       (val + tp->snd_ssthresh) >> 1);
418 		}
419 		if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
420 			val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
421 			if (val && tp->snd_ssthresh > val)
422 				tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
423 					       tp->snd_ssthresh);
424 		}
425 		if (!tcp_metric_locked(tm, TCP_METRIC_REORDERING)) {
426 			val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
427 			if (val < tp->reordering &&
428 			    tp->reordering != net->ipv4.sysctl_tcp_reordering)
429 				tcp_metric_set(tm, TCP_METRIC_REORDERING,
430 					       tp->reordering);
431 		}
432 	}
433 	tm->tcpm_stamp = jiffies;
434 out_unlock:
435 	rcu_read_unlock();
436 }
437 
438 /* Initialize metrics on socket. */
439 
440 void tcp_init_metrics(struct sock *sk)
441 {
442 	struct dst_entry *dst = __sk_dst_get(sk);
443 	struct tcp_sock *tp = tcp_sk(sk);
444 	struct tcp_metrics_block *tm;
445 	u32 val, crtt = 0; /* cached RTT scaled by 8 */
446 
447 	sk_dst_confirm(sk);
448 	if (!dst)
449 		goto reset;
450 
451 	rcu_read_lock();
452 	tm = tcp_get_metrics(sk, dst, true);
453 	if (!tm) {
454 		rcu_read_unlock();
455 		goto reset;
456 	}
457 
458 	if (tcp_metric_locked(tm, TCP_METRIC_CWND))
459 		tp->snd_cwnd_clamp = tcp_metric_get(tm, TCP_METRIC_CWND);
460 
461 	val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
462 	if (val) {
463 		tp->snd_ssthresh = val;
464 		if (tp->snd_ssthresh > tp->snd_cwnd_clamp)
465 			tp->snd_ssthresh = tp->snd_cwnd_clamp;
466 	} else {
467 		/* ssthresh may have been reduced unnecessarily during.
468 		 * 3WHS. Restore it back to its initial default.
469 		 */
470 		tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
471 	}
472 	val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
473 	if (val && tp->reordering != val) {
474 		tcp_disable_fack(tp);
475 		tp->reordering = val;
476 	}
477 
478 	crtt = tcp_metric_get(tm, TCP_METRIC_RTT);
479 	rcu_read_unlock();
480 reset:
481 	/* The initial RTT measurement from the SYN/SYN-ACK is not ideal
482 	 * to seed the RTO for later data packets because SYN packets are
483 	 * small. Use the per-dst cached values to seed the RTO but keep
484 	 * the RTT estimator variables intact (e.g., srtt, mdev, rttvar).
485 	 * Later the RTO will be updated immediately upon obtaining the first
486 	 * data RTT sample (tcp_rtt_estimator()). Hence the cached RTT only
487 	 * influences the first RTO but not later RTT estimation.
488 	 *
489 	 * But if RTT is not available from the SYN (due to retransmits or
490 	 * syn cookies) or the cache, force a conservative 3secs timeout.
491 	 *
492 	 * A bit of theory. RTT is time passed after "normal" sized packet
493 	 * is sent until it is ACKed. In normal circumstances sending small
494 	 * packets force peer to delay ACKs and calculation is correct too.
495 	 * The algorithm is adaptive and, provided we follow specs, it
496 	 * NEVER underestimate RTT. BUT! If peer tries to make some clever
497 	 * tricks sort of "quick acks" for time long enough to decrease RTT
498 	 * to low value, and then abruptly stops to do it and starts to delay
499 	 * ACKs, wait for troubles.
500 	 */
501 	if (crtt > tp->srtt_us) {
502 		/* Set RTO like tcp_rtt_estimator(), but from cached RTT. */
503 		crtt /= 8 * USEC_PER_SEC / HZ;
504 		inet_csk(sk)->icsk_rto = crtt + max(2 * crtt, tcp_rto_min(sk));
505 	} else if (tp->srtt_us == 0) {
506 		/* RFC6298: 5.7 We've failed to get a valid RTT sample from
507 		 * 3WHS. This is most likely due to retransmission,
508 		 * including spurious one. Reset the RTO back to 3secs
509 		 * from the more aggressive 1sec to avoid more spurious
510 		 * retransmission.
511 		 */
512 		tp->rttvar_us = jiffies_to_usecs(TCP_TIMEOUT_FALLBACK);
513 		tp->mdev_us = tp->mdev_max_us = tp->rttvar_us;
514 
515 		inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK;
516 	}
517 	/* Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been
518 	 * retransmitted. In light of RFC6298 more aggressive 1sec
519 	 * initRTO, we only reset cwnd when more than 1 SYN/SYN-ACK
520 	 * retransmission has occurred.
521 	 */
522 	if (tp->total_retrans > 1)
523 		tp->snd_cwnd = 1;
524 	else
525 		tp->snd_cwnd = tcp_init_cwnd(tp, dst);
526 	tp->snd_cwnd_stamp = tcp_jiffies32;
527 }
528 
529 bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst)
530 {
531 	struct tcp_metrics_block *tm;
532 	bool ret;
533 
534 	if (!dst)
535 		return false;
536 
537 	rcu_read_lock();
538 	tm = __tcp_get_metrics_req(req, dst);
539 	if (tm && tcp_metric_get(tm, TCP_METRIC_RTT))
540 		ret = true;
541 	else
542 		ret = false;
543 	rcu_read_unlock();
544 
545 	return ret;
546 }
547 
548 static DEFINE_SEQLOCK(fastopen_seqlock);
549 
550 void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
551 			    struct tcp_fastopen_cookie *cookie,
552 			    int *syn_loss, unsigned long *last_syn_loss)
553 {
554 	struct tcp_metrics_block *tm;
555 
556 	rcu_read_lock();
557 	tm = tcp_get_metrics(sk, __sk_dst_get(sk), false);
558 	if (tm) {
559 		struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
560 		unsigned int seq;
561 
562 		do {
563 			seq = read_seqbegin(&fastopen_seqlock);
564 			if (tfom->mss)
565 				*mss = tfom->mss;
566 			*cookie = tfom->cookie;
567 			if (cookie->len <= 0 && tfom->try_exp == 1)
568 				cookie->exp = true;
569 			*syn_loss = tfom->syn_loss;
570 			*last_syn_loss = *syn_loss ? tfom->last_syn_loss : 0;
571 		} while (read_seqretry(&fastopen_seqlock, seq));
572 	}
573 	rcu_read_unlock();
574 }
575 
576 void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
577 			    struct tcp_fastopen_cookie *cookie, bool syn_lost,
578 			    u16 try_exp)
579 {
580 	struct dst_entry *dst = __sk_dst_get(sk);
581 	struct tcp_metrics_block *tm;
582 
583 	if (!dst)
584 		return;
585 	rcu_read_lock();
586 	tm = tcp_get_metrics(sk, dst, true);
587 	if (tm) {
588 		struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
589 
590 		write_seqlock_bh(&fastopen_seqlock);
591 		if (mss)
592 			tfom->mss = mss;
593 		if (cookie && cookie->len > 0)
594 			tfom->cookie = *cookie;
595 		else if (try_exp > tfom->try_exp &&
596 			 tfom->cookie.len <= 0 && !tfom->cookie.exp)
597 			tfom->try_exp = try_exp;
598 		if (syn_lost) {
599 			++tfom->syn_loss;
600 			tfom->last_syn_loss = jiffies;
601 		} else
602 			tfom->syn_loss = 0;
603 		write_sequnlock_bh(&fastopen_seqlock);
604 	}
605 	rcu_read_unlock();
606 }
607 
608 static struct genl_family tcp_metrics_nl_family;
609 
610 static const struct nla_policy tcp_metrics_nl_policy[TCP_METRICS_ATTR_MAX + 1] = {
611 	[TCP_METRICS_ATTR_ADDR_IPV4]	= { .type = NLA_U32, },
612 	[TCP_METRICS_ATTR_ADDR_IPV6]	= { .type = NLA_BINARY,
613 					    .len = sizeof(struct in6_addr), },
614 	/* Following attributes are not received for GET/DEL,
615 	 * we keep them for reference
616 	 */
617 #if 0
618 	[TCP_METRICS_ATTR_AGE]		= { .type = NLA_MSECS, },
619 	[TCP_METRICS_ATTR_TW_TSVAL]	= { .type = NLA_U32, },
620 	[TCP_METRICS_ATTR_TW_TS_STAMP]	= { .type = NLA_S32, },
621 	[TCP_METRICS_ATTR_VALS]		= { .type = NLA_NESTED, },
622 	[TCP_METRICS_ATTR_FOPEN_MSS]	= { .type = NLA_U16, },
623 	[TCP_METRICS_ATTR_FOPEN_SYN_DROPS]	= { .type = NLA_U16, },
624 	[TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS]	= { .type = NLA_MSECS, },
625 	[TCP_METRICS_ATTR_FOPEN_COOKIE]	= { .type = NLA_BINARY,
626 					    .len = TCP_FASTOPEN_COOKIE_MAX, },
627 #endif
628 };
629 
630 /* Add attributes, caller cancels its header on failure */
631 static int tcp_metrics_fill_info(struct sk_buff *msg,
632 				 struct tcp_metrics_block *tm)
633 {
634 	struct nlattr *nest;
635 	int i;
636 
637 	switch (tm->tcpm_daddr.family) {
638 	case AF_INET:
639 		if (nla_put_in_addr(msg, TCP_METRICS_ATTR_ADDR_IPV4,
640 				    inetpeer_get_addr_v4(&tm->tcpm_daddr)) < 0)
641 			goto nla_put_failure;
642 		if (nla_put_in_addr(msg, TCP_METRICS_ATTR_SADDR_IPV4,
643 				    inetpeer_get_addr_v4(&tm->tcpm_saddr)) < 0)
644 			goto nla_put_failure;
645 		break;
646 	case AF_INET6:
647 		if (nla_put_in6_addr(msg, TCP_METRICS_ATTR_ADDR_IPV6,
648 				     inetpeer_get_addr_v6(&tm->tcpm_daddr)) < 0)
649 			goto nla_put_failure;
650 		if (nla_put_in6_addr(msg, TCP_METRICS_ATTR_SADDR_IPV6,
651 				     inetpeer_get_addr_v6(&tm->tcpm_saddr)) < 0)
652 			goto nla_put_failure;
653 		break;
654 	default:
655 		return -EAFNOSUPPORT;
656 	}
657 
658 	if (nla_put_msecs(msg, TCP_METRICS_ATTR_AGE,
659 			  jiffies - tm->tcpm_stamp,
660 			  TCP_METRICS_ATTR_PAD) < 0)
661 		goto nla_put_failure;
662 
663 	{
664 		int n = 0;
665 
666 		nest = nla_nest_start(msg, TCP_METRICS_ATTR_VALS);
667 		if (!nest)
668 			goto nla_put_failure;
669 		for (i = 0; i < TCP_METRIC_MAX_KERNEL + 1; i++) {
670 			u32 val = tm->tcpm_vals[i];
671 
672 			if (!val)
673 				continue;
674 			if (i == TCP_METRIC_RTT) {
675 				if (nla_put_u32(msg, TCP_METRIC_RTT_US + 1,
676 						val) < 0)
677 					goto nla_put_failure;
678 				n++;
679 				val = max(val / 1000, 1U);
680 			}
681 			if (i == TCP_METRIC_RTTVAR) {
682 				if (nla_put_u32(msg, TCP_METRIC_RTTVAR_US + 1,
683 						val) < 0)
684 					goto nla_put_failure;
685 				n++;
686 				val = max(val / 1000, 1U);
687 			}
688 			if (nla_put_u32(msg, i + 1, val) < 0)
689 				goto nla_put_failure;
690 			n++;
691 		}
692 		if (n)
693 			nla_nest_end(msg, nest);
694 		else
695 			nla_nest_cancel(msg, nest);
696 	}
697 
698 	{
699 		struct tcp_fastopen_metrics tfom_copy[1], *tfom;
700 		unsigned int seq;
701 
702 		do {
703 			seq = read_seqbegin(&fastopen_seqlock);
704 			tfom_copy[0] = tm->tcpm_fastopen;
705 		} while (read_seqretry(&fastopen_seqlock, seq));
706 
707 		tfom = tfom_copy;
708 		if (tfom->mss &&
709 		    nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_MSS,
710 				tfom->mss) < 0)
711 			goto nla_put_failure;
712 		if (tfom->syn_loss &&
713 		    (nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROPS,
714 				tfom->syn_loss) < 0 ||
715 		     nla_put_msecs(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS,
716 				jiffies - tfom->last_syn_loss,
717 				TCP_METRICS_ATTR_PAD) < 0))
718 			goto nla_put_failure;
719 		if (tfom->cookie.len > 0 &&
720 		    nla_put(msg, TCP_METRICS_ATTR_FOPEN_COOKIE,
721 			    tfom->cookie.len, tfom->cookie.val) < 0)
722 			goto nla_put_failure;
723 	}
724 
725 	return 0;
726 
727 nla_put_failure:
728 	return -EMSGSIZE;
729 }
730 
731 static int tcp_metrics_dump_info(struct sk_buff *skb,
732 				 struct netlink_callback *cb,
733 				 struct tcp_metrics_block *tm)
734 {
735 	void *hdr;
736 
737 	hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
738 			  &tcp_metrics_nl_family, NLM_F_MULTI,
739 			  TCP_METRICS_CMD_GET);
740 	if (!hdr)
741 		return -EMSGSIZE;
742 
743 	if (tcp_metrics_fill_info(skb, tm) < 0)
744 		goto nla_put_failure;
745 
746 	genlmsg_end(skb, hdr);
747 	return 0;
748 
749 nla_put_failure:
750 	genlmsg_cancel(skb, hdr);
751 	return -EMSGSIZE;
752 }
753 
754 static int tcp_metrics_nl_dump(struct sk_buff *skb,
755 			       struct netlink_callback *cb)
756 {
757 	struct net *net = sock_net(skb->sk);
758 	unsigned int max_rows = 1U << tcp_metrics_hash_log;
759 	unsigned int row, s_row = cb->args[0];
760 	int s_col = cb->args[1], col = s_col;
761 
762 	for (row = s_row; row < max_rows; row++, s_col = 0) {
763 		struct tcp_metrics_block *tm;
764 		struct tcpm_hash_bucket *hb = tcp_metrics_hash + row;
765 
766 		rcu_read_lock();
767 		for (col = 0, tm = rcu_dereference(hb->chain); tm;
768 		     tm = rcu_dereference(tm->tcpm_next), col++) {
769 			if (!net_eq(tm_net(tm), net))
770 				continue;
771 			if (col < s_col)
772 				continue;
773 			if (tcp_metrics_dump_info(skb, cb, tm) < 0) {
774 				rcu_read_unlock();
775 				goto done;
776 			}
777 		}
778 		rcu_read_unlock();
779 	}
780 
781 done:
782 	cb->args[0] = row;
783 	cb->args[1] = col;
784 	return skb->len;
785 }
786 
787 static int __parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr,
788 			   unsigned int *hash, int optional, int v4, int v6)
789 {
790 	struct nlattr *a;
791 
792 	a = info->attrs[v4];
793 	if (a) {
794 		inetpeer_set_addr_v4(addr, nla_get_in_addr(a));
795 		if (hash)
796 			*hash = ipv4_addr_hash(inetpeer_get_addr_v4(addr));
797 		return 0;
798 	}
799 	a = info->attrs[v6];
800 	if (a) {
801 		struct in6_addr in6;
802 
803 		if (nla_len(a) != sizeof(struct in6_addr))
804 			return -EINVAL;
805 		in6 = nla_get_in6_addr(a);
806 		inetpeer_set_addr_v6(addr, &in6);
807 		if (hash)
808 			*hash = ipv6_addr_hash(inetpeer_get_addr_v6(addr));
809 		return 0;
810 	}
811 	return optional ? 1 : -EAFNOSUPPORT;
812 }
813 
814 static int parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr,
815 			 unsigned int *hash, int optional)
816 {
817 	return __parse_nl_addr(info, addr, hash, optional,
818 			       TCP_METRICS_ATTR_ADDR_IPV4,
819 			       TCP_METRICS_ATTR_ADDR_IPV6);
820 }
821 
822 static int parse_nl_saddr(struct genl_info *info, struct inetpeer_addr *addr)
823 {
824 	return __parse_nl_addr(info, addr, NULL, 0,
825 			       TCP_METRICS_ATTR_SADDR_IPV4,
826 			       TCP_METRICS_ATTR_SADDR_IPV6);
827 }
828 
829 static int tcp_metrics_nl_cmd_get(struct sk_buff *skb, struct genl_info *info)
830 {
831 	struct tcp_metrics_block *tm;
832 	struct inetpeer_addr saddr, daddr;
833 	unsigned int hash;
834 	struct sk_buff *msg;
835 	struct net *net = genl_info_net(info);
836 	void *reply;
837 	int ret;
838 	bool src = true;
839 
840 	ret = parse_nl_addr(info, &daddr, &hash, 0);
841 	if (ret < 0)
842 		return ret;
843 
844 	ret = parse_nl_saddr(info, &saddr);
845 	if (ret < 0)
846 		src = false;
847 
848 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
849 	if (!msg)
850 		return -ENOMEM;
851 
852 	reply = genlmsg_put_reply(msg, info, &tcp_metrics_nl_family, 0,
853 				  info->genlhdr->cmd);
854 	if (!reply)
855 		goto nla_put_failure;
856 
857 	hash ^= net_hash_mix(net);
858 	hash = hash_32(hash, tcp_metrics_hash_log);
859 	ret = -ESRCH;
860 	rcu_read_lock();
861 	for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
862 	     tm = rcu_dereference(tm->tcpm_next)) {
863 		if (addr_same(&tm->tcpm_daddr, &daddr) &&
864 		    (!src || addr_same(&tm->tcpm_saddr, &saddr)) &&
865 		    net_eq(tm_net(tm), net)) {
866 			ret = tcp_metrics_fill_info(msg, tm);
867 			break;
868 		}
869 	}
870 	rcu_read_unlock();
871 	if (ret < 0)
872 		goto out_free;
873 
874 	genlmsg_end(msg, reply);
875 	return genlmsg_reply(msg, info);
876 
877 nla_put_failure:
878 	ret = -EMSGSIZE;
879 
880 out_free:
881 	nlmsg_free(msg);
882 	return ret;
883 }
884 
885 static void tcp_metrics_flush_all(struct net *net)
886 {
887 	unsigned int max_rows = 1U << tcp_metrics_hash_log;
888 	struct tcpm_hash_bucket *hb = tcp_metrics_hash;
889 	struct tcp_metrics_block *tm;
890 	unsigned int row;
891 
892 	for (row = 0; row < max_rows; row++, hb++) {
893 		struct tcp_metrics_block __rcu **pp;
894 		bool match;
895 
896 		spin_lock_bh(&tcp_metrics_lock);
897 		pp = &hb->chain;
898 		for (tm = deref_locked(*pp); tm; tm = deref_locked(*pp)) {
899 			match = net ? net_eq(tm_net(tm), net) :
900 				!atomic_read(&tm_net(tm)->count);
901 			if (match) {
902 				*pp = tm->tcpm_next;
903 				kfree_rcu(tm, rcu_head);
904 			} else {
905 				pp = &tm->tcpm_next;
906 			}
907 		}
908 		spin_unlock_bh(&tcp_metrics_lock);
909 	}
910 }
911 
912 static int tcp_metrics_nl_cmd_del(struct sk_buff *skb, struct genl_info *info)
913 {
914 	struct tcpm_hash_bucket *hb;
915 	struct tcp_metrics_block *tm;
916 	struct tcp_metrics_block __rcu **pp;
917 	struct inetpeer_addr saddr, daddr;
918 	unsigned int hash;
919 	struct net *net = genl_info_net(info);
920 	int ret;
921 	bool src = true, found = false;
922 
923 	ret = parse_nl_addr(info, &daddr, &hash, 1);
924 	if (ret < 0)
925 		return ret;
926 	if (ret > 0) {
927 		tcp_metrics_flush_all(net);
928 		return 0;
929 	}
930 	ret = parse_nl_saddr(info, &saddr);
931 	if (ret < 0)
932 		src = false;
933 
934 	hash ^= net_hash_mix(net);
935 	hash = hash_32(hash, tcp_metrics_hash_log);
936 	hb = tcp_metrics_hash + hash;
937 	pp = &hb->chain;
938 	spin_lock_bh(&tcp_metrics_lock);
939 	for (tm = deref_locked(*pp); tm; tm = deref_locked(*pp)) {
940 		if (addr_same(&tm->tcpm_daddr, &daddr) &&
941 		    (!src || addr_same(&tm->tcpm_saddr, &saddr)) &&
942 		    net_eq(tm_net(tm), net)) {
943 			*pp = tm->tcpm_next;
944 			kfree_rcu(tm, rcu_head);
945 			found = true;
946 		} else {
947 			pp = &tm->tcpm_next;
948 		}
949 	}
950 	spin_unlock_bh(&tcp_metrics_lock);
951 	if (!found)
952 		return -ESRCH;
953 	return 0;
954 }
955 
956 static const struct genl_ops tcp_metrics_nl_ops[] = {
957 	{
958 		.cmd = TCP_METRICS_CMD_GET,
959 		.doit = tcp_metrics_nl_cmd_get,
960 		.dumpit = tcp_metrics_nl_dump,
961 		.policy = tcp_metrics_nl_policy,
962 	},
963 	{
964 		.cmd = TCP_METRICS_CMD_DEL,
965 		.doit = tcp_metrics_nl_cmd_del,
966 		.policy = tcp_metrics_nl_policy,
967 		.flags = GENL_ADMIN_PERM,
968 	},
969 };
970 
971 static struct genl_family tcp_metrics_nl_family __ro_after_init = {
972 	.hdrsize	= 0,
973 	.name		= TCP_METRICS_GENL_NAME,
974 	.version	= TCP_METRICS_GENL_VERSION,
975 	.maxattr	= TCP_METRICS_ATTR_MAX,
976 	.netnsok	= true,
977 	.module		= THIS_MODULE,
978 	.ops		= tcp_metrics_nl_ops,
979 	.n_ops		= ARRAY_SIZE(tcp_metrics_nl_ops),
980 };
981 
982 static unsigned int tcpmhash_entries;
983 static int __init set_tcpmhash_entries(char *str)
984 {
985 	ssize_t ret;
986 
987 	if (!str)
988 		return 0;
989 
990 	ret = kstrtouint(str, 0, &tcpmhash_entries);
991 	if (ret)
992 		return 0;
993 
994 	return 1;
995 }
996 __setup("tcpmhash_entries=", set_tcpmhash_entries);
997 
998 static int __net_init tcp_net_metrics_init(struct net *net)
999 {
1000 	size_t size;
1001 	unsigned int slots;
1002 
1003 	if (!net_eq(net, &init_net))
1004 		return 0;
1005 
1006 	slots = tcpmhash_entries;
1007 	if (!slots) {
1008 		if (totalram_pages >= 128 * 1024)
1009 			slots = 16 * 1024;
1010 		else
1011 			slots = 8 * 1024;
1012 	}
1013 
1014 	tcp_metrics_hash_log = order_base_2(slots);
1015 	size = sizeof(struct tcpm_hash_bucket) << tcp_metrics_hash_log;
1016 
1017 	tcp_metrics_hash = kvzalloc(size, GFP_KERNEL);
1018 	if (!tcp_metrics_hash)
1019 		return -ENOMEM;
1020 
1021 	return 0;
1022 }
1023 
1024 static void __net_exit tcp_net_metrics_exit_batch(struct list_head *net_exit_list)
1025 {
1026 	tcp_metrics_flush_all(NULL);
1027 }
1028 
1029 static __net_initdata struct pernet_operations tcp_net_metrics_ops = {
1030 	.init		=	tcp_net_metrics_init,
1031 	.exit_batch	=	tcp_net_metrics_exit_batch,
1032 };
1033 
1034 void __init tcp_metrics_init(void)
1035 {
1036 	int ret;
1037 
1038 	ret = register_pernet_subsys(&tcp_net_metrics_ops);
1039 	if (ret < 0)
1040 		panic("Could not allocate the tcp_metrics hash table\n");
1041 
1042 	ret = genl_register_family(&tcp_metrics_nl_family);
1043 	if (ret < 0)
1044 		panic("Could not register tcp_metrics generic netlink\n");
1045 }
1046