xref: /linux/net/ipv4/tcp_metrics.c (revision 995231c820e3bd3633cb38bf4ea6f2541e1da331)
1 #include <linux/rcupdate.h>
2 #include <linux/spinlock.h>
3 #include <linux/jiffies.h>
4 #include <linux/module.h>
5 #include <linux/cache.h>
6 #include <linux/slab.h>
7 #include <linux/init.h>
8 #include <linux/tcp.h>
9 #include <linux/hash.h>
10 #include <linux/tcp_metrics.h>
11 #include <linux/vmalloc.h>
12 
13 #include <net/inet_connection_sock.h>
14 #include <net/net_namespace.h>
15 #include <net/request_sock.h>
16 #include <net/inetpeer.h>
17 #include <net/sock.h>
18 #include <net/ipv6.h>
19 #include <net/dst.h>
20 #include <net/tcp.h>
21 #include <net/genetlink.h>
22 
23 static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *saddr,
24 						   const struct inetpeer_addr *daddr,
25 						   struct net *net, unsigned int hash);
26 
27 struct tcp_fastopen_metrics {
28 	u16	mss;
29 	u16	syn_loss:10,		/* Recurring Fast Open SYN losses */
30 		try_exp:2;		/* Request w/ exp. option (once) */
31 	unsigned long	last_syn_loss;	/* Last Fast Open SYN loss */
32 	struct	tcp_fastopen_cookie	cookie;
33 };
34 
35 /* TCP_METRIC_MAX includes 2 extra fields for userspace compatibility
36  * Kernel only stores RTT and RTTVAR in usec resolution
37  */
38 #define TCP_METRIC_MAX_KERNEL (TCP_METRIC_MAX - 2)
39 
40 struct tcp_metrics_block {
41 	struct tcp_metrics_block __rcu	*tcpm_next;
42 	possible_net_t			tcpm_net;
43 	struct inetpeer_addr		tcpm_saddr;
44 	struct inetpeer_addr		tcpm_daddr;
45 	unsigned long			tcpm_stamp;
46 	u32				tcpm_lock;
47 	u32				tcpm_vals[TCP_METRIC_MAX_KERNEL + 1];
48 	struct tcp_fastopen_metrics	tcpm_fastopen;
49 
50 	struct rcu_head			rcu_head;
51 };
52 
53 static inline struct net *tm_net(struct tcp_metrics_block *tm)
54 {
55 	return read_pnet(&tm->tcpm_net);
56 }
57 
58 static bool tcp_metric_locked(struct tcp_metrics_block *tm,
59 			      enum tcp_metric_index idx)
60 {
61 	return tm->tcpm_lock & (1 << idx);
62 }
63 
64 static u32 tcp_metric_get(struct tcp_metrics_block *tm,
65 			  enum tcp_metric_index idx)
66 {
67 	return tm->tcpm_vals[idx];
68 }
69 
70 static void tcp_metric_set(struct tcp_metrics_block *tm,
71 			   enum tcp_metric_index idx,
72 			   u32 val)
73 {
74 	tm->tcpm_vals[idx] = val;
75 }
76 
77 static bool addr_same(const struct inetpeer_addr *a,
78 		      const struct inetpeer_addr *b)
79 {
80 	return inetpeer_addr_cmp(a, b) == 0;
81 }
82 
83 struct tcpm_hash_bucket {
84 	struct tcp_metrics_block __rcu	*chain;
85 };
86 
87 static struct tcpm_hash_bucket	*tcp_metrics_hash __read_mostly;
88 static unsigned int		tcp_metrics_hash_log __read_mostly;
89 
90 static DEFINE_SPINLOCK(tcp_metrics_lock);
91 
92 static void tcpm_suck_dst(struct tcp_metrics_block *tm,
93 			  const struct dst_entry *dst,
94 			  bool fastopen_clear)
95 {
96 	u32 msval;
97 	u32 val;
98 
99 	tm->tcpm_stamp = jiffies;
100 
101 	val = 0;
102 	if (dst_metric_locked(dst, RTAX_RTT))
103 		val |= 1 << TCP_METRIC_RTT;
104 	if (dst_metric_locked(dst, RTAX_RTTVAR))
105 		val |= 1 << TCP_METRIC_RTTVAR;
106 	if (dst_metric_locked(dst, RTAX_SSTHRESH))
107 		val |= 1 << TCP_METRIC_SSTHRESH;
108 	if (dst_metric_locked(dst, RTAX_CWND))
109 		val |= 1 << TCP_METRIC_CWND;
110 	if (dst_metric_locked(dst, RTAX_REORDERING))
111 		val |= 1 << TCP_METRIC_REORDERING;
112 	tm->tcpm_lock = val;
113 
114 	msval = dst_metric_raw(dst, RTAX_RTT);
115 	tm->tcpm_vals[TCP_METRIC_RTT] = msval * USEC_PER_MSEC;
116 
117 	msval = dst_metric_raw(dst, RTAX_RTTVAR);
118 	tm->tcpm_vals[TCP_METRIC_RTTVAR] = msval * USEC_PER_MSEC;
119 	tm->tcpm_vals[TCP_METRIC_SSTHRESH] = dst_metric_raw(dst, RTAX_SSTHRESH);
120 	tm->tcpm_vals[TCP_METRIC_CWND] = dst_metric_raw(dst, RTAX_CWND);
121 	tm->tcpm_vals[TCP_METRIC_REORDERING] = dst_metric_raw(dst, RTAX_REORDERING);
122 	if (fastopen_clear) {
123 		tm->tcpm_fastopen.mss = 0;
124 		tm->tcpm_fastopen.syn_loss = 0;
125 		tm->tcpm_fastopen.try_exp = 0;
126 		tm->tcpm_fastopen.cookie.exp = false;
127 		tm->tcpm_fastopen.cookie.len = 0;
128 	}
129 }
130 
131 #define TCP_METRICS_TIMEOUT		(60 * 60 * HZ)
132 
133 static void tcpm_check_stamp(struct tcp_metrics_block *tm, struct dst_entry *dst)
134 {
135 	if (tm && unlikely(time_after(jiffies, tm->tcpm_stamp + TCP_METRICS_TIMEOUT)))
136 		tcpm_suck_dst(tm, dst, false);
137 }
138 
139 #define TCP_METRICS_RECLAIM_DEPTH	5
140 #define TCP_METRICS_RECLAIM_PTR		(struct tcp_metrics_block *) 0x1UL
141 
142 #define deref_locked(p)	\
143 	rcu_dereference_protected(p, lockdep_is_held(&tcp_metrics_lock))
144 
145 static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst,
146 					  struct inetpeer_addr *saddr,
147 					  struct inetpeer_addr *daddr,
148 					  unsigned int hash)
149 {
150 	struct tcp_metrics_block *tm;
151 	struct net *net;
152 	bool reclaim = false;
153 
154 	spin_lock_bh(&tcp_metrics_lock);
155 	net = dev_net(dst->dev);
156 
157 	/* While waiting for the spin-lock the cache might have been populated
158 	 * with this entry and so we have to check again.
159 	 */
160 	tm = __tcp_get_metrics(saddr, daddr, net, hash);
161 	if (tm == TCP_METRICS_RECLAIM_PTR) {
162 		reclaim = true;
163 		tm = NULL;
164 	}
165 	if (tm) {
166 		tcpm_check_stamp(tm, dst);
167 		goto out_unlock;
168 	}
169 
170 	if (unlikely(reclaim)) {
171 		struct tcp_metrics_block *oldest;
172 
173 		oldest = deref_locked(tcp_metrics_hash[hash].chain);
174 		for (tm = deref_locked(oldest->tcpm_next); tm;
175 		     tm = deref_locked(tm->tcpm_next)) {
176 			if (time_before(tm->tcpm_stamp, oldest->tcpm_stamp))
177 				oldest = tm;
178 		}
179 		tm = oldest;
180 	} else {
181 		tm = kmalloc(sizeof(*tm), GFP_ATOMIC);
182 		if (!tm)
183 			goto out_unlock;
184 	}
185 	write_pnet(&tm->tcpm_net, net);
186 	tm->tcpm_saddr = *saddr;
187 	tm->tcpm_daddr = *daddr;
188 
189 	tcpm_suck_dst(tm, dst, true);
190 
191 	if (likely(!reclaim)) {
192 		tm->tcpm_next = tcp_metrics_hash[hash].chain;
193 		rcu_assign_pointer(tcp_metrics_hash[hash].chain, tm);
194 	}
195 
196 out_unlock:
197 	spin_unlock_bh(&tcp_metrics_lock);
198 	return tm;
199 }
200 
201 static struct tcp_metrics_block *tcp_get_encode(struct tcp_metrics_block *tm, int depth)
202 {
203 	if (tm)
204 		return tm;
205 	if (depth > TCP_METRICS_RECLAIM_DEPTH)
206 		return TCP_METRICS_RECLAIM_PTR;
207 	return NULL;
208 }
209 
210 static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *saddr,
211 						   const struct inetpeer_addr *daddr,
212 						   struct net *net, unsigned int hash)
213 {
214 	struct tcp_metrics_block *tm;
215 	int depth = 0;
216 
217 	for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
218 	     tm = rcu_dereference(tm->tcpm_next)) {
219 		if (addr_same(&tm->tcpm_saddr, saddr) &&
220 		    addr_same(&tm->tcpm_daddr, daddr) &&
221 		    net_eq(tm_net(tm), net))
222 			break;
223 		depth++;
224 	}
225 	return tcp_get_encode(tm, depth);
226 }
227 
228 static struct tcp_metrics_block *__tcp_get_metrics_req(struct request_sock *req,
229 						       struct dst_entry *dst)
230 {
231 	struct tcp_metrics_block *tm;
232 	struct inetpeer_addr saddr, daddr;
233 	unsigned int hash;
234 	struct net *net;
235 
236 	saddr.family = req->rsk_ops->family;
237 	daddr.family = req->rsk_ops->family;
238 	switch (daddr.family) {
239 	case AF_INET:
240 		inetpeer_set_addr_v4(&saddr, inet_rsk(req)->ir_loc_addr);
241 		inetpeer_set_addr_v4(&daddr, inet_rsk(req)->ir_rmt_addr);
242 		hash = ipv4_addr_hash(inet_rsk(req)->ir_rmt_addr);
243 		break;
244 #if IS_ENABLED(CONFIG_IPV6)
245 	case AF_INET6:
246 		inetpeer_set_addr_v6(&saddr, &inet_rsk(req)->ir_v6_loc_addr);
247 		inetpeer_set_addr_v6(&daddr, &inet_rsk(req)->ir_v6_rmt_addr);
248 		hash = ipv6_addr_hash(&inet_rsk(req)->ir_v6_rmt_addr);
249 		break;
250 #endif
251 	default:
252 		return NULL;
253 	}
254 
255 	net = dev_net(dst->dev);
256 	hash ^= net_hash_mix(net);
257 	hash = hash_32(hash, tcp_metrics_hash_log);
258 
259 	for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
260 	     tm = rcu_dereference(tm->tcpm_next)) {
261 		if (addr_same(&tm->tcpm_saddr, &saddr) &&
262 		    addr_same(&tm->tcpm_daddr, &daddr) &&
263 		    net_eq(tm_net(tm), net))
264 			break;
265 	}
266 	tcpm_check_stamp(tm, dst);
267 	return tm;
268 }
269 
270 static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk,
271 						 struct dst_entry *dst,
272 						 bool create)
273 {
274 	struct tcp_metrics_block *tm;
275 	struct inetpeer_addr saddr, daddr;
276 	unsigned int hash;
277 	struct net *net;
278 
279 	if (sk->sk_family == AF_INET) {
280 		inetpeer_set_addr_v4(&saddr, inet_sk(sk)->inet_saddr);
281 		inetpeer_set_addr_v4(&daddr, inet_sk(sk)->inet_daddr);
282 		hash = ipv4_addr_hash(inet_sk(sk)->inet_daddr);
283 	}
284 #if IS_ENABLED(CONFIG_IPV6)
285 	else if (sk->sk_family == AF_INET6) {
286 		if (ipv6_addr_v4mapped(&sk->sk_v6_daddr)) {
287 			inetpeer_set_addr_v4(&saddr, inet_sk(sk)->inet_saddr);
288 			inetpeer_set_addr_v4(&daddr, inet_sk(sk)->inet_daddr);
289 			hash = ipv4_addr_hash(inet_sk(sk)->inet_daddr);
290 		} else {
291 			inetpeer_set_addr_v6(&saddr, &sk->sk_v6_rcv_saddr);
292 			inetpeer_set_addr_v6(&daddr, &sk->sk_v6_daddr);
293 			hash = ipv6_addr_hash(&sk->sk_v6_daddr);
294 		}
295 	}
296 #endif
297 	else
298 		return NULL;
299 
300 	net = dev_net(dst->dev);
301 	hash ^= net_hash_mix(net);
302 	hash = hash_32(hash, tcp_metrics_hash_log);
303 
304 	tm = __tcp_get_metrics(&saddr, &daddr, net, hash);
305 	if (tm == TCP_METRICS_RECLAIM_PTR)
306 		tm = NULL;
307 	if (!tm && create)
308 		tm = tcpm_new(dst, &saddr, &daddr, hash);
309 	else
310 		tcpm_check_stamp(tm, dst);
311 
312 	return tm;
313 }
314 
315 /* Save metrics learned by this TCP session.  This function is called
316  * only, when TCP finishes successfully i.e. when it enters TIME-WAIT
317  * or goes from LAST-ACK to CLOSE.
318  */
319 void tcp_update_metrics(struct sock *sk)
320 {
321 	const struct inet_connection_sock *icsk = inet_csk(sk);
322 	struct dst_entry *dst = __sk_dst_get(sk);
323 	struct tcp_sock *tp = tcp_sk(sk);
324 	struct net *net = sock_net(sk);
325 	struct tcp_metrics_block *tm;
326 	unsigned long rtt;
327 	u32 val;
328 	int m;
329 
330 	sk_dst_confirm(sk);
331 	if (net->ipv4.sysctl_tcp_nometrics_save || !dst)
332 		return;
333 
334 	rcu_read_lock();
335 	if (icsk->icsk_backoff || !tp->srtt_us) {
336 		/* This session failed to estimate rtt. Why?
337 		 * Probably, no packets returned in time.  Reset our
338 		 * results.
339 		 */
340 		tm = tcp_get_metrics(sk, dst, false);
341 		if (tm && !tcp_metric_locked(tm, TCP_METRIC_RTT))
342 			tcp_metric_set(tm, TCP_METRIC_RTT, 0);
343 		goto out_unlock;
344 	} else
345 		tm = tcp_get_metrics(sk, dst, true);
346 
347 	if (!tm)
348 		goto out_unlock;
349 
350 	rtt = tcp_metric_get(tm, TCP_METRIC_RTT);
351 	m = rtt - tp->srtt_us;
352 
353 	/* If newly calculated rtt larger than stored one, store new
354 	 * one. Otherwise, use EWMA. Remember, rtt overestimation is
355 	 * always better than underestimation.
356 	 */
357 	if (!tcp_metric_locked(tm, TCP_METRIC_RTT)) {
358 		if (m <= 0)
359 			rtt = tp->srtt_us;
360 		else
361 			rtt -= (m >> 3);
362 		tcp_metric_set(tm, TCP_METRIC_RTT, rtt);
363 	}
364 
365 	if (!tcp_metric_locked(tm, TCP_METRIC_RTTVAR)) {
366 		unsigned long var;
367 
368 		if (m < 0)
369 			m = -m;
370 
371 		/* Scale deviation to rttvar fixed point */
372 		m >>= 1;
373 		if (m < tp->mdev_us)
374 			m = tp->mdev_us;
375 
376 		var = tcp_metric_get(tm, TCP_METRIC_RTTVAR);
377 		if (m >= var)
378 			var = m;
379 		else
380 			var -= (var - m) >> 2;
381 
382 		tcp_metric_set(tm, TCP_METRIC_RTTVAR, var);
383 	}
384 
385 	if (tcp_in_initial_slowstart(tp)) {
386 		/* Slow start still did not finish. */
387 		if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
388 			val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
389 			if (val && (tp->snd_cwnd >> 1) > val)
390 				tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
391 					       tp->snd_cwnd >> 1);
392 		}
393 		if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
394 			val = tcp_metric_get(tm, TCP_METRIC_CWND);
395 			if (tp->snd_cwnd > val)
396 				tcp_metric_set(tm, TCP_METRIC_CWND,
397 					       tp->snd_cwnd);
398 		}
399 	} else if (!tcp_in_slow_start(tp) &&
400 		   icsk->icsk_ca_state == TCP_CA_Open) {
401 		/* Cong. avoidance phase, cwnd is reliable. */
402 		if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH))
403 			tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
404 				       max(tp->snd_cwnd >> 1, tp->snd_ssthresh));
405 		if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
406 			val = tcp_metric_get(tm, TCP_METRIC_CWND);
407 			tcp_metric_set(tm, TCP_METRIC_CWND, (val + tp->snd_cwnd) >> 1);
408 		}
409 	} else {
410 		/* Else slow start did not finish, cwnd is non-sense,
411 		 * ssthresh may be also invalid.
412 		 */
413 		if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
414 			val = tcp_metric_get(tm, TCP_METRIC_CWND);
415 			tcp_metric_set(tm, TCP_METRIC_CWND,
416 				       (val + tp->snd_ssthresh) >> 1);
417 		}
418 		if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
419 			val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
420 			if (val && tp->snd_ssthresh > val)
421 				tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
422 					       tp->snd_ssthresh);
423 		}
424 		if (!tcp_metric_locked(tm, TCP_METRIC_REORDERING)) {
425 			val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
426 			if (val < tp->reordering &&
427 			    tp->reordering != net->ipv4.sysctl_tcp_reordering)
428 				tcp_metric_set(tm, TCP_METRIC_REORDERING,
429 					       tp->reordering);
430 		}
431 	}
432 	tm->tcpm_stamp = jiffies;
433 out_unlock:
434 	rcu_read_unlock();
435 }
436 
437 /* Initialize metrics on socket. */
438 
439 void tcp_init_metrics(struct sock *sk)
440 {
441 	struct dst_entry *dst = __sk_dst_get(sk);
442 	struct tcp_sock *tp = tcp_sk(sk);
443 	struct tcp_metrics_block *tm;
444 	u32 val, crtt = 0; /* cached RTT scaled by 8 */
445 
446 	sk_dst_confirm(sk);
447 	if (!dst)
448 		goto reset;
449 
450 	rcu_read_lock();
451 	tm = tcp_get_metrics(sk, dst, true);
452 	if (!tm) {
453 		rcu_read_unlock();
454 		goto reset;
455 	}
456 
457 	if (tcp_metric_locked(tm, TCP_METRIC_CWND))
458 		tp->snd_cwnd_clamp = tcp_metric_get(tm, TCP_METRIC_CWND);
459 
460 	val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
461 	if (val) {
462 		tp->snd_ssthresh = val;
463 		if (tp->snd_ssthresh > tp->snd_cwnd_clamp)
464 			tp->snd_ssthresh = tp->snd_cwnd_clamp;
465 	} else {
466 		/* ssthresh may have been reduced unnecessarily during.
467 		 * 3WHS. Restore it back to its initial default.
468 		 */
469 		tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
470 	}
471 	val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
472 	if (val && tp->reordering != val) {
473 		tcp_disable_fack(tp);
474 		tp->reordering = val;
475 	}
476 
477 	crtt = tcp_metric_get(tm, TCP_METRIC_RTT);
478 	rcu_read_unlock();
479 reset:
480 	/* The initial RTT measurement from the SYN/SYN-ACK is not ideal
481 	 * to seed the RTO for later data packets because SYN packets are
482 	 * small. Use the per-dst cached values to seed the RTO but keep
483 	 * the RTT estimator variables intact (e.g., srtt, mdev, rttvar).
484 	 * Later the RTO will be updated immediately upon obtaining the first
485 	 * data RTT sample (tcp_rtt_estimator()). Hence the cached RTT only
486 	 * influences the first RTO but not later RTT estimation.
487 	 *
488 	 * But if RTT is not available from the SYN (due to retransmits or
489 	 * syn cookies) or the cache, force a conservative 3secs timeout.
490 	 *
491 	 * A bit of theory. RTT is time passed after "normal" sized packet
492 	 * is sent until it is ACKed. In normal circumstances sending small
493 	 * packets force peer to delay ACKs and calculation is correct too.
494 	 * The algorithm is adaptive and, provided we follow specs, it
495 	 * NEVER underestimate RTT. BUT! If peer tries to make some clever
496 	 * tricks sort of "quick acks" for time long enough to decrease RTT
497 	 * to low value, and then abruptly stops to do it and starts to delay
498 	 * ACKs, wait for troubles.
499 	 */
500 	if (crtt > tp->srtt_us) {
501 		/* Set RTO like tcp_rtt_estimator(), but from cached RTT. */
502 		crtt /= 8 * USEC_PER_SEC / HZ;
503 		inet_csk(sk)->icsk_rto = crtt + max(2 * crtt, tcp_rto_min(sk));
504 	} else if (tp->srtt_us == 0) {
505 		/* RFC6298: 5.7 We've failed to get a valid RTT sample from
506 		 * 3WHS. This is most likely due to retransmission,
507 		 * including spurious one. Reset the RTO back to 3secs
508 		 * from the more aggressive 1sec to avoid more spurious
509 		 * retransmission.
510 		 */
511 		tp->rttvar_us = jiffies_to_usecs(TCP_TIMEOUT_FALLBACK);
512 		tp->mdev_us = tp->mdev_max_us = tp->rttvar_us;
513 
514 		inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK;
515 	}
516 	/* Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been
517 	 * retransmitted. In light of RFC6298 more aggressive 1sec
518 	 * initRTO, we only reset cwnd when more than 1 SYN/SYN-ACK
519 	 * retransmission has occurred.
520 	 */
521 	if (tp->total_retrans > 1)
522 		tp->snd_cwnd = 1;
523 	else
524 		tp->snd_cwnd = tcp_init_cwnd(tp, dst);
525 	tp->snd_cwnd_stamp = tcp_jiffies32;
526 }
527 
528 bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst)
529 {
530 	struct tcp_metrics_block *tm;
531 	bool ret;
532 
533 	if (!dst)
534 		return false;
535 
536 	rcu_read_lock();
537 	tm = __tcp_get_metrics_req(req, dst);
538 	if (tm && tcp_metric_get(tm, TCP_METRIC_RTT))
539 		ret = true;
540 	else
541 		ret = false;
542 	rcu_read_unlock();
543 
544 	return ret;
545 }
546 
547 static DEFINE_SEQLOCK(fastopen_seqlock);
548 
549 void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
550 			    struct tcp_fastopen_cookie *cookie,
551 			    int *syn_loss, unsigned long *last_syn_loss)
552 {
553 	struct tcp_metrics_block *tm;
554 
555 	rcu_read_lock();
556 	tm = tcp_get_metrics(sk, __sk_dst_get(sk), false);
557 	if (tm) {
558 		struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
559 		unsigned int seq;
560 
561 		do {
562 			seq = read_seqbegin(&fastopen_seqlock);
563 			if (tfom->mss)
564 				*mss = tfom->mss;
565 			*cookie = tfom->cookie;
566 			if (cookie->len <= 0 && tfom->try_exp == 1)
567 				cookie->exp = true;
568 			*syn_loss = tfom->syn_loss;
569 			*last_syn_loss = *syn_loss ? tfom->last_syn_loss : 0;
570 		} while (read_seqretry(&fastopen_seqlock, seq));
571 	}
572 	rcu_read_unlock();
573 }
574 
575 void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
576 			    struct tcp_fastopen_cookie *cookie, bool syn_lost,
577 			    u16 try_exp)
578 {
579 	struct dst_entry *dst = __sk_dst_get(sk);
580 	struct tcp_metrics_block *tm;
581 
582 	if (!dst)
583 		return;
584 	rcu_read_lock();
585 	tm = tcp_get_metrics(sk, dst, true);
586 	if (tm) {
587 		struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
588 
589 		write_seqlock_bh(&fastopen_seqlock);
590 		if (mss)
591 			tfom->mss = mss;
592 		if (cookie && cookie->len > 0)
593 			tfom->cookie = *cookie;
594 		else if (try_exp > tfom->try_exp &&
595 			 tfom->cookie.len <= 0 && !tfom->cookie.exp)
596 			tfom->try_exp = try_exp;
597 		if (syn_lost) {
598 			++tfom->syn_loss;
599 			tfom->last_syn_loss = jiffies;
600 		} else
601 			tfom->syn_loss = 0;
602 		write_sequnlock_bh(&fastopen_seqlock);
603 	}
604 	rcu_read_unlock();
605 }
606 
607 static struct genl_family tcp_metrics_nl_family;
608 
609 static const struct nla_policy tcp_metrics_nl_policy[TCP_METRICS_ATTR_MAX + 1] = {
610 	[TCP_METRICS_ATTR_ADDR_IPV4]	= { .type = NLA_U32, },
611 	[TCP_METRICS_ATTR_ADDR_IPV6]	= { .type = NLA_BINARY,
612 					    .len = sizeof(struct in6_addr), },
613 	/* Following attributes are not received for GET/DEL,
614 	 * we keep them for reference
615 	 */
616 #if 0
617 	[TCP_METRICS_ATTR_AGE]		= { .type = NLA_MSECS, },
618 	[TCP_METRICS_ATTR_TW_TSVAL]	= { .type = NLA_U32, },
619 	[TCP_METRICS_ATTR_TW_TS_STAMP]	= { .type = NLA_S32, },
620 	[TCP_METRICS_ATTR_VALS]		= { .type = NLA_NESTED, },
621 	[TCP_METRICS_ATTR_FOPEN_MSS]	= { .type = NLA_U16, },
622 	[TCP_METRICS_ATTR_FOPEN_SYN_DROPS]	= { .type = NLA_U16, },
623 	[TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS]	= { .type = NLA_MSECS, },
624 	[TCP_METRICS_ATTR_FOPEN_COOKIE]	= { .type = NLA_BINARY,
625 					    .len = TCP_FASTOPEN_COOKIE_MAX, },
626 #endif
627 };
628 
629 /* Add attributes, caller cancels its header on failure */
630 static int tcp_metrics_fill_info(struct sk_buff *msg,
631 				 struct tcp_metrics_block *tm)
632 {
633 	struct nlattr *nest;
634 	int i;
635 
636 	switch (tm->tcpm_daddr.family) {
637 	case AF_INET:
638 		if (nla_put_in_addr(msg, TCP_METRICS_ATTR_ADDR_IPV4,
639 				    inetpeer_get_addr_v4(&tm->tcpm_daddr)) < 0)
640 			goto nla_put_failure;
641 		if (nla_put_in_addr(msg, TCP_METRICS_ATTR_SADDR_IPV4,
642 				    inetpeer_get_addr_v4(&tm->tcpm_saddr)) < 0)
643 			goto nla_put_failure;
644 		break;
645 	case AF_INET6:
646 		if (nla_put_in6_addr(msg, TCP_METRICS_ATTR_ADDR_IPV6,
647 				     inetpeer_get_addr_v6(&tm->tcpm_daddr)) < 0)
648 			goto nla_put_failure;
649 		if (nla_put_in6_addr(msg, TCP_METRICS_ATTR_SADDR_IPV6,
650 				     inetpeer_get_addr_v6(&tm->tcpm_saddr)) < 0)
651 			goto nla_put_failure;
652 		break;
653 	default:
654 		return -EAFNOSUPPORT;
655 	}
656 
657 	if (nla_put_msecs(msg, TCP_METRICS_ATTR_AGE,
658 			  jiffies - tm->tcpm_stamp,
659 			  TCP_METRICS_ATTR_PAD) < 0)
660 		goto nla_put_failure;
661 
662 	{
663 		int n = 0;
664 
665 		nest = nla_nest_start(msg, TCP_METRICS_ATTR_VALS);
666 		if (!nest)
667 			goto nla_put_failure;
668 		for (i = 0; i < TCP_METRIC_MAX_KERNEL + 1; i++) {
669 			u32 val = tm->tcpm_vals[i];
670 
671 			if (!val)
672 				continue;
673 			if (i == TCP_METRIC_RTT) {
674 				if (nla_put_u32(msg, TCP_METRIC_RTT_US + 1,
675 						val) < 0)
676 					goto nla_put_failure;
677 				n++;
678 				val = max(val / 1000, 1U);
679 			}
680 			if (i == TCP_METRIC_RTTVAR) {
681 				if (nla_put_u32(msg, TCP_METRIC_RTTVAR_US + 1,
682 						val) < 0)
683 					goto nla_put_failure;
684 				n++;
685 				val = max(val / 1000, 1U);
686 			}
687 			if (nla_put_u32(msg, i + 1, val) < 0)
688 				goto nla_put_failure;
689 			n++;
690 		}
691 		if (n)
692 			nla_nest_end(msg, nest);
693 		else
694 			nla_nest_cancel(msg, nest);
695 	}
696 
697 	{
698 		struct tcp_fastopen_metrics tfom_copy[1], *tfom;
699 		unsigned int seq;
700 
701 		do {
702 			seq = read_seqbegin(&fastopen_seqlock);
703 			tfom_copy[0] = tm->tcpm_fastopen;
704 		} while (read_seqretry(&fastopen_seqlock, seq));
705 
706 		tfom = tfom_copy;
707 		if (tfom->mss &&
708 		    nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_MSS,
709 				tfom->mss) < 0)
710 			goto nla_put_failure;
711 		if (tfom->syn_loss &&
712 		    (nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROPS,
713 				tfom->syn_loss) < 0 ||
714 		     nla_put_msecs(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS,
715 				jiffies - tfom->last_syn_loss,
716 				TCP_METRICS_ATTR_PAD) < 0))
717 			goto nla_put_failure;
718 		if (tfom->cookie.len > 0 &&
719 		    nla_put(msg, TCP_METRICS_ATTR_FOPEN_COOKIE,
720 			    tfom->cookie.len, tfom->cookie.val) < 0)
721 			goto nla_put_failure;
722 	}
723 
724 	return 0;
725 
726 nla_put_failure:
727 	return -EMSGSIZE;
728 }
729 
730 static int tcp_metrics_dump_info(struct sk_buff *skb,
731 				 struct netlink_callback *cb,
732 				 struct tcp_metrics_block *tm)
733 {
734 	void *hdr;
735 
736 	hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
737 			  &tcp_metrics_nl_family, NLM_F_MULTI,
738 			  TCP_METRICS_CMD_GET);
739 	if (!hdr)
740 		return -EMSGSIZE;
741 
742 	if (tcp_metrics_fill_info(skb, tm) < 0)
743 		goto nla_put_failure;
744 
745 	genlmsg_end(skb, hdr);
746 	return 0;
747 
748 nla_put_failure:
749 	genlmsg_cancel(skb, hdr);
750 	return -EMSGSIZE;
751 }
752 
753 static int tcp_metrics_nl_dump(struct sk_buff *skb,
754 			       struct netlink_callback *cb)
755 {
756 	struct net *net = sock_net(skb->sk);
757 	unsigned int max_rows = 1U << tcp_metrics_hash_log;
758 	unsigned int row, s_row = cb->args[0];
759 	int s_col = cb->args[1], col = s_col;
760 
761 	for (row = s_row; row < max_rows; row++, s_col = 0) {
762 		struct tcp_metrics_block *tm;
763 		struct tcpm_hash_bucket *hb = tcp_metrics_hash + row;
764 
765 		rcu_read_lock();
766 		for (col = 0, tm = rcu_dereference(hb->chain); tm;
767 		     tm = rcu_dereference(tm->tcpm_next), col++) {
768 			if (!net_eq(tm_net(tm), net))
769 				continue;
770 			if (col < s_col)
771 				continue;
772 			if (tcp_metrics_dump_info(skb, cb, tm) < 0) {
773 				rcu_read_unlock();
774 				goto done;
775 			}
776 		}
777 		rcu_read_unlock();
778 	}
779 
780 done:
781 	cb->args[0] = row;
782 	cb->args[1] = col;
783 	return skb->len;
784 }
785 
786 static int __parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr,
787 			   unsigned int *hash, int optional, int v4, int v6)
788 {
789 	struct nlattr *a;
790 
791 	a = info->attrs[v4];
792 	if (a) {
793 		inetpeer_set_addr_v4(addr, nla_get_in_addr(a));
794 		if (hash)
795 			*hash = ipv4_addr_hash(inetpeer_get_addr_v4(addr));
796 		return 0;
797 	}
798 	a = info->attrs[v6];
799 	if (a) {
800 		struct in6_addr in6;
801 
802 		if (nla_len(a) != sizeof(struct in6_addr))
803 			return -EINVAL;
804 		in6 = nla_get_in6_addr(a);
805 		inetpeer_set_addr_v6(addr, &in6);
806 		if (hash)
807 			*hash = ipv6_addr_hash(inetpeer_get_addr_v6(addr));
808 		return 0;
809 	}
810 	return optional ? 1 : -EAFNOSUPPORT;
811 }
812 
813 static int parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr,
814 			 unsigned int *hash, int optional)
815 {
816 	return __parse_nl_addr(info, addr, hash, optional,
817 			       TCP_METRICS_ATTR_ADDR_IPV4,
818 			       TCP_METRICS_ATTR_ADDR_IPV6);
819 }
820 
821 static int parse_nl_saddr(struct genl_info *info, struct inetpeer_addr *addr)
822 {
823 	return __parse_nl_addr(info, addr, NULL, 0,
824 			       TCP_METRICS_ATTR_SADDR_IPV4,
825 			       TCP_METRICS_ATTR_SADDR_IPV6);
826 }
827 
828 static int tcp_metrics_nl_cmd_get(struct sk_buff *skb, struct genl_info *info)
829 {
830 	struct tcp_metrics_block *tm;
831 	struct inetpeer_addr saddr, daddr;
832 	unsigned int hash;
833 	struct sk_buff *msg;
834 	struct net *net = genl_info_net(info);
835 	void *reply;
836 	int ret;
837 	bool src = true;
838 
839 	ret = parse_nl_addr(info, &daddr, &hash, 0);
840 	if (ret < 0)
841 		return ret;
842 
843 	ret = parse_nl_saddr(info, &saddr);
844 	if (ret < 0)
845 		src = false;
846 
847 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
848 	if (!msg)
849 		return -ENOMEM;
850 
851 	reply = genlmsg_put_reply(msg, info, &tcp_metrics_nl_family, 0,
852 				  info->genlhdr->cmd);
853 	if (!reply)
854 		goto nla_put_failure;
855 
856 	hash ^= net_hash_mix(net);
857 	hash = hash_32(hash, tcp_metrics_hash_log);
858 	ret = -ESRCH;
859 	rcu_read_lock();
860 	for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
861 	     tm = rcu_dereference(tm->tcpm_next)) {
862 		if (addr_same(&tm->tcpm_daddr, &daddr) &&
863 		    (!src || addr_same(&tm->tcpm_saddr, &saddr)) &&
864 		    net_eq(tm_net(tm), net)) {
865 			ret = tcp_metrics_fill_info(msg, tm);
866 			break;
867 		}
868 	}
869 	rcu_read_unlock();
870 	if (ret < 0)
871 		goto out_free;
872 
873 	genlmsg_end(msg, reply);
874 	return genlmsg_reply(msg, info);
875 
876 nla_put_failure:
877 	ret = -EMSGSIZE;
878 
879 out_free:
880 	nlmsg_free(msg);
881 	return ret;
882 }
883 
884 static void tcp_metrics_flush_all(struct net *net)
885 {
886 	unsigned int max_rows = 1U << tcp_metrics_hash_log;
887 	struct tcpm_hash_bucket *hb = tcp_metrics_hash;
888 	struct tcp_metrics_block *tm;
889 	unsigned int row;
890 
891 	for (row = 0; row < max_rows; row++, hb++) {
892 		struct tcp_metrics_block __rcu **pp;
893 		bool match;
894 
895 		spin_lock_bh(&tcp_metrics_lock);
896 		pp = &hb->chain;
897 		for (tm = deref_locked(*pp); tm; tm = deref_locked(*pp)) {
898 			match = net ? net_eq(tm_net(tm), net) :
899 				!atomic_read(&tm_net(tm)->count);
900 			if (match) {
901 				*pp = tm->tcpm_next;
902 				kfree_rcu(tm, rcu_head);
903 			} else {
904 				pp = &tm->tcpm_next;
905 			}
906 		}
907 		spin_unlock_bh(&tcp_metrics_lock);
908 	}
909 }
910 
911 static int tcp_metrics_nl_cmd_del(struct sk_buff *skb, struct genl_info *info)
912 {
913 	struct tcpm_hash_bucket *hb;
914 	struct tcp_metrics_block *tm;
915 	struct tcp_metrics_block __rcu **pp;
916 	struct inetpeer_addr saddr, daddr;
917 	unsigned int hash;
918 	struct net *net = genl_info_net(info);
919 	int ret;
920 	bool src = true, found = false;
921 
922 	ret = parse_nl_addr(info, &daddr, &hash, 1);
923 	if (ret < 0)
924 		return ret;
925 	if (ret > 0) {
926 		tcp_metrics_flush_all(net);
927 		return 0;
928 	}
929 	ret = parse_nl_saddr(info, &saddr);
930 	if (ret < 0)
931 		src = false;
932 
933 	hash ^= net_hash_mix(net);
934 	hash = hash_32(hash, tcp_metrics_hash_log);
935 	hb = tcp_metrics_hash + hash;
936 	pp = &hb->chain;
937 	spin_lock_bh(&tcp_metrics_lock);
938 	for (tm = deref_locked(*pp); tm; tm = deref_locked(*pp)) {
939 		if (addr_same(&tm->tcpm_daddr, &daddr) &&
940 		    (!src || addr_same(&tm->tcpm_saddr, &saddr)) &&
941 		    net_eq(tm_net(tm), net)) {
942 			*pp = tm->tcpm_next;
943 			kfree_rcu(tm, rcu_head);
944 			found = true;
945 		} else {
946 			pp = &tm->tcpm_next;
947 		}
948 	}
949 	spin_unlock_bh(&tcp_metrics_lock);
950 	if (!found)
951 		return -ESRCH;
952 	return 0;
953 }
954 
955 static const struct genl_ops tcp_metrics_nl_ops[] = {
956 	{
957 		.cmd = TCP_METRICS_CMD_GET,
958 		.doit = tcp_metrics_nl_cmd_get,
959 		.dumpit = tcp_metrics_nl_dump,
960 		.policy = tcp_metrics_nl_policy,
961 	},
962 	{
963 		.cmd = TCP_METRICS_CMD_DEL,
964 		.doit = tcp_metrics_nl_cmd_del,
965 		.policy = tcp_metrics_nl_policy,
966 		.flags = GENL_ADMIN_PERM,
967 	},
968 };
969 
970 static struct genl_family tcp_metrics_nl_family __ro_after_init = {
971 	.hdrsize	= 0,
972 	.name		= TCP_METRICS_GENL_NAME,
973 	.version	= TCP_METRICS_GENL_VERSION,
974 	.maxattr	= TCP_METRICS_ATTR_MAX,
975 	.netnsok	= true,
976 	.module		= THIS_MODULE,
977 	.ops		= tcp_metrics_nl_ops,
978 	.n_ops		= ARRAY_SIZE(tcp_metrics_nl_ops),
979 };
980 
981 static unsigned int tcpmhash_entries;
982 static int __init set_tcpmhash_entries(char *str)
983 {
984 	ssize_t ret;
985 
986 	if (!str)
987 		return 0;
988 
989 	ret = kstrtouint(str, 0, &tcpmhash_entries);
990 	if (ret)
991 		return 0;
992 
993 	return 1;
994 }
995 __setup("tcpmhash_entries=", set_tcpmhash_entries);
996 
997 static int __net_init tcp_net_metrics_init(struct net *net)
998 {
999 	size_t size;
1000 	unsigned int slots;
1001 
1002 	if (!net_eq(net, &init_net))
1003 		return 0;
1004 
1005 	slots = tcpmhash_entries;
1006 	if (!slots) {
1007 		if (totalram_pages >= 128 * 1024)
1008 			slots = 16 * 1024;
1009 		else
1010 			slots = 8 * 1024;
1011 	}
1012 
1013 	tcp_metrics_hash_log = order_base_2(slots);
1014 	size = sizeof(struct tcpm_hash_bucket) << tcp_metrics_hash_log;
1015 
1016 	tcp_metrics_hash = kvzalloc(size, GFP_KERNEL);
1017 	if (!tcp_metrics_hash)
1018 		return -ENOMEM;
1019 
1020 	return 0;
1021 }
1022 
1023 static void __net_exit tcp_net_metrics_exit_batch(struct list_head *net_exit_list)
1024 {
1025 	tcp_metrics_flush_all(NULL);
1026 }
1027 
1028 static __net_initdata struct pernet_operations tcp_net_metrics_ops = {
1029 	.init		=	tcp_net_metrics_init,
1030 	.exit_batch	=	tcp_net_metrics_exit_batch,
1031 };
1032 
1033 void __init tcp_metrics_init(void)
1034 {
1035 	int ret;
1036 
1037 	ret = register_pernet_subsys(&tcp_net_metrics_ops);
1038 	if (ret < 0)
1039 		panic("Could not allocate the tcp_metrics hash table\n");
1040 
1041 	ret = genl_register_family(&tcp_metrics_nl_family);
1042 	if (ret < 0)
1043 		panic("Could not register tcp_metrics generic netlink\n");
1044 }
1045