xref: /linux/net/ipv4/tcp_ao.c (revision 3fd6c59042dbba50391e30862beac979491145fe)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * INET		An implementation of the TCP Authentication Option (TCP-AO).
4  *		See RFC5925.
5  *
6  * Authors:	Dmitry Safonov <dima@arista.com>
7  *		Francesco Ruggeri <fruggeri@arista.com>
8  *		Salam Noureddine <noureddine@arista.com>
9  */
10 #define pr_fmt(fmt) "TCP: " fmt
11 
12 #include <crypto/hash.h>
13 #include <linux/inetdevice.h>
14 #include <linux/tcp.h>
15 
16 #include <net/tcp.h>
17 #include <net/ipv6.h>
18 #include <net/icmp.h>
19 #include <trace/events/tcp.h>
20 
21 DEFINE_STATIC_KEY_DEFERRED_FALSE(tcp_ao_needed, HZ);
22 
tcp_ao_calc_traffic_key(struct tcp_ao_key * mkt,u8 * key,void * ctx,unsigned int len,struct tcp_sigpool * hp)23 int tcp_ao_calc_traffic_key(struct tcp_ao_key *mkt, u8 *key, void *ctx,
24 			    unsigned int len, struct tcp_sigpool *hp)
25 {
26 	struct scatterlist sg;
27 	int ret;
28 
29 	if (crypto_ahash_setkey(crypto_ahash_reqtfm(hp->req),
30 				mkt->key, mkt->keylen))
31 		goto clear_hash;
32 
33 	ret = crypto_ahash_init(hp->req);
34 	if (ret)
35 		goto clear_hash;
36 
37 	sg_init_one(&sg, ctx, len);
38 	ahash_request_set_crypt(hp->req, &sg, key, len);
39 	crypto_ahash_update(hp->req);
40 
41 	ret = crypto_ahash_final(hp->req);
42 	if (ret)
43 		goto clear_hash;
44 
45 	return 0;
46 clear_hash:
47 	memset(key, 0, tcp_ao_digest_size(mkt));
48 	return 1;
49 }
50 
tcp_ao_ignore_icmp(const struct sock * sk,int family,int type,int code)51 bool tcp_ao_ignore_icmp(const struct sock *sk, int family, int type, int code)
52 {
53 	bool ignore_icmp = false;
54 	struct tcp_ao_info *ao;
55 
56 	if (!static_branch_unlikely(&tcp_ao_needed.key))
57 		return false;
58 
59 	/* RFC5925, 7.8:
60 	 * >> A TCP-AO implementation MUST default to ignore incoming ICMPv4
61 	 * messages of Type 3 (destination unreachable), Codes 2-4 (protocol
62 	 * unreachable, port unreachable, and fragmentation needed -- ’hard
63 	 * errors’), and ICMPv6 Type 1 (destination unreachable), Code 1
64 	 * (administratively prohibited) and Code 4 (port unreachable) intended
65 	 * for connections in synchronized states (ESTABLISHED, FIN-WAIT-1, FIN-
66 	 * WAIT-2, CLOSE-WAIT, CLOSING, LAST-ACK, TIME-WAIT) that match MKTs.
67 	 */
68 	if (family == AF_INET) {
69 		if (type != ICMP_DEST_UNREACH)
70 			return false;
71 		if (code < ICMP_PROT_UNREACH || code > ICMP_FRAG_NEEDED)
72 			return false;
73 	} else {
74 		if (type != ICMPV6_DEST_UNREACH)
75 			return false;
76 		if (code != ICMPV6_ADM_PROHIBITED && code != ICMPV6_PORT_UNREACH)
77 			return false;
78 	}
79 
80 	rcu_read_lock();
81 	switch (sk->sk_state) {
82 	case TCP_TIME_WAIT:
83 		ao = rcu_dereference(tcp_twsk(sk)->ao_info);
84 		break;
85 	case TCP_SYN_SENT:
86 	case TCP_SYN_RECV:
87 	case TCP_LISTEN:
88 	case TCP_NEW_SYN_RECV:
89 		/* RFC5925 specifies to ignore ICMPs *only* on connections
90 		 * in synchronized states.
91 		 */
92 		rcu_read_unlock();
93 		return false;
94 	default:
95 		ao = rcu_dereference(tcp_sk(sk)->ao_info);
96 	}
97 
98 	if (ao && !ao->accept_icmps) {
99 		ignore_icmp = true;
100 		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAODROPPEDICMPS);
101 		atomic64_inc(&ao->counters.dropped_icmp);
102 	}
103 	rcu_read_unlock();
104 
105 	return ignore_icmp;
106 }
107 
108 /* Optimized version of tcp_ao_do_lookup(): only for sockets for which
109  * it's known that the keys in ao_info are matching peer's
110  * family/address/VRF/etc.
111  */
tcp_ao_established_key(const struct sock * sk,struct tcp_ao_info * ao,int sndid,int rcvid)112 struct tcp_ao_key *tcp_ao_established_key(const struct sock *sk,
113 					  struct tcp_ao_info *ao,
114 					  int sndid, int rcvid)
115 {
116 	struct tcp_ao_key *key;
117 
118 	hlist_for_each_entry_rcu(key, &ao->head, node, lockdep_sock_is_held(sk)) {
119 		if ((sndid >= 0 && key->sndid != sndid) ||
120 		    (rcvid >= 0 && key->rcvid != rcvid))
121 			continue;
122 		return key;
123 	}
124 
125 	return NULL;
126 }
127 
ipv4_prefix_cmp(const struct in_addr * addr1,const struct in_addr * addr2,unsigned int prefixlen)128 static int ipv4_prefix_cmp(const struct in_addr *addr1,
129 			   const struct in_addr *addr2,
130 			   unsigned int prefixlen)
131 {
132 	__be32 mask = inet_make_mask(prefixlen);
133 	__be32 a1 = addr1->s_addr & mask;
134 	__be32 a2 = addr2->s_addr & mask;
135 
136 	if (a1 == a2)
137 		return 0;
138 	return memcmp(&a1, &a2, sizeof(a1));
139 }
140 
__tcp_ao_key_cmp(const struct tcp_ao_key * key,int l3index,const union tcp_ao_addr * addr,u8 prefixlen,int family,int sndid,int rcvid)141 static int __tcp_ao_key_cmp(const struct tcp_ao_key *key, int l3index,
142 			    const union tcp_ao_addr *addr, u8 prefixlen,
143 			    int family, int sndid, int rcvid)
144 {
145 	if (sndid >= 0 && key->sndid != sndid)
146 		return (key->sndid > sndid) ? 1 : -1;
147 	if (rcvid >= 0 && key->rcvid != rcvid)
148 		return (key->rcvid > rcvid) ? 1 : -1;
149 	if (l3index >= 0 && (key->keyflags & TCP_AO_KEYF_IFINDEX)) {
150 		if (key->l3index != l3index)
151 			return (key->l3index > l3index) ? 1 : -1;
152 	}
153 
154 	if (family == AF_UNSPEC)
155 		return 0;
156 	if (key->family != family)
157 		return (key->family > family) ? 1 : -1;
158 
159 	if (family == AF_INET) {
160 		if (ntohl(key->addr.a4.s_addr) == INADDR_ANY)
161 			return 0;
162 		if (ntohl(addr->a4.s_addr) == INADDR_ANY)
163 			return 0;
164 		return ipv4_prefix_cmp(&key->addr.a4, &addr->a4, prefixlen);
165 #if IS_ENABLED(CONFIG_IPV6)
166 	} else {
167 		if (ipv6_addr_any(&key->addr.a6) || ipv6_addr_any(&addr->a6))
168 			return 0;
169 		if (ipv6_prefix_equal(&key->addr.a6, &addr->a6, prefixlen))
170 			return 0;
171 		return memcmp(&key->addr.a6, &addr->a6, sizeof(addr->a6));
172 #endif
173 	}
174 	return -1;
175 }
176 
tcp_ao_key_cmp(const struct tcp_ao_key * key,int l3index,const union tcp_ao_addr * addr,u8 prefixlen,int family,int sndid,int rcvid)177 static int tcp_ao_key_cmp(const struct tcp_ao_key *key, int l3index,
178 			  const union tcp_ao_addr *addr, u8 prefixlen,
179 			  int family, int sndid, int rcvid)
180 {
181 #if IS_ENABLED(CONFIG_IPV6)
182 	if (family == AF_INET6 && ipv6_addr_v4mapped(&addr->a6)) {
183 		__be32 addr4 = addr->a6.s6_addr32[3];
184 
185 		return __tcp_ao_key_cmp(key, l3index,
186 					(union tcp_ao_addr *)&addr4,
187 					prefixlen, AF_INET, sndid, rcvid);
188 	}
189 #endif
190 	return __tcp_ao_key_cmp(key, l3index, addr,
191 				prefixlen, family, sndid, rcvid);
192 }
193 
__tcp_ao_do_lookup(const struct sock * sk,int l3index,const union tcp_ao_addr * addr,int family,u8 prefix,int sndid,int rcvid)194 static struct tcp_ao_key *__tcp_ao_do_lookup(const struct sock *sk, int l3index,
195 		const union tcp_ao_addr *addr, int family, u8 prefix,
196 		int sndid, int rcvid)
197 {
198 	struct tcp_ao_key *key;
199 	struct tcp_ao_info *ao;
200 
201 	if (!static_branch_unlikely(&tcp_ao_needed.key))
202 		return NULL;
203 
204 	ao = rcu_dereference_check(tcp_sk(sk)->ao_info,
205 				   lockdep_sock_is_held(sk));
206 	if (!ao)
207 		return NULL;
208 
209 	hlist_for_each_entry_rcu(key, &ao->head, node, lockdep_sock_is_held(sk)) {
210 		u8 prefixlen = min(prefix, key->prefixlen);
211 
212 		if (!tcp_ao_key_cmp(key, l3index, addr, prefixlen,
213 				    family, sndid, rcvid))
214 			return key;
215 	}
216 	return NULL;
217 }
218 
tcp_ao_do_lookup(const struct sock * sk,int l3index,const union tcp_ao_addr * addr,int family,int sndid,int rcvid)219 struct tcp_ao_key *tcp_ao_do_lookup(const struct sock *sk, int l3index,
220 				    const union tcp_ao_addr *addr,
221 				    int family, int sndid, int rcvid)
222 {
223 	return __tcp_ao_do_lookup(sk, l3index, addr, family, U8_MAX, sndid, rcvid);
224 }
225 
tcp_ao_alloc_info(gfp_t flags)226 static struct tcp_ao_info *tcp_ao_alloc_info(gfp_t flags)
227 {
228 	struct tcp_ao_info *ao;
229 
230 	ao = kzalloc(sizeof(*ao), flags);
231 	if (!ao)
232 		return NULL;
233 	INIT_HLIST_HEAD(&ao->head);
234 	refcount_set(&ao->refcnt, 1);
235 
236 	return ao;
237 }
238 
tcp_ao_link_mkt(struct tcp_ao_info * ao,struct tcp_ao_key * mkt)239 static void tcp_ao_link_mkt(struct tcp_ao_info *ao, struct tcp_ao_key *mkt)
240 {
241 	hlist_add_head_rcu(&mkt->node, &ao->head);
242 }
243 
tcp_ao_copy_key(struct sock * sk,struct tcp_ao_key * key)244 static struct tcp_ao_key *tcp_ao_copy_key(struct sock *sk,
245 					  struct tcp_ao_key *key)
246 {
247 	struct tcp_ao_key *new_key;
248 
249 	new_key = sock_kmalloc(sk, tcp_ao_sizeof_key(key),
250 			       GFP_ATOMIC);
251 	if (!new_key)
252 		return NULL;
253 
254 	*new_key = *key;
255 	INIT_HLIST_NODE(&new_key->node);
256 	tcp_sigpool_get(new_key->tcp_sigpool_id);
257 	atomic64_set(&new_key->pkt_good, 0);
258 	atomic64_set(&new_key->pkt_bad, 0);
259 
260 	return new_key;
261 }
262 
tcp_ao_key_free_rcu(struct rcu_head * head)263 static void tcp_ao_key_free_rcu(struct rcu_head *head)
264 {
265 	struct tcp_ao_key *key = container_of(head, struct tcp_ao_key, rcu);
266 
267 	tcp_sigpool_release(key->tcp_sigpool_id);
268 	kfree_sensitive(key);
269 }
270 
tcp_ao_info_free_rcu(struct rcu_head * head)271 static void tcp_ao_info_free_rcu(struct rcu_head *head)
272 {
273 	struct tcp_ao_info *ao = container_of(head, struct tcp_ao_info, rcu);
274 	struct tcp_ao_key *key;
275 	struct hlist_node *n;
276 
277 	hlist_for_each_entry_safe(key, n, &ao->head, node) {
278 		hlist_del(&key->node);
279 		tcp_sigpool_release(key->tcp_sigpool_id);
280 		kfree_sensitive(key);
281 	}
282 	kfree(ao);
283 	static_branch_slow_dec_deferred(&tcp_ao_needed);
284 }
285 
tcp_ao_sk_omem_free(struct sock * sk,struct tcp_ao_info * ao)286 static void tcp_ao_sk_omem_free(struct sock *sk, struct tcp_ao_info *ao)
287 {
288 	size_t total_ao_sk_mem = 0;
289 	struct tcp_ao_key *key;
290 
291 	hlist_for_each_entry(key,  &ao->head, node)
292 		total_ao_sk_mem += tcp_ao_sizeof_key(key);
293 	atomic_sub(total_ao_sk_mem, &sk->sk_omem_alloc);
294 }
295 
tcp_ao_destroy_sock(struct sock * sk,bool twsk)296 void tcp_ao_destroy_sock(struct sock *sk, bool twsk)
297 {
298 	struct tcp_ao_info *ao;
299 
300 	if (twsk) {
301 		ao = rcu_dereference_protected(tcp_twsk(sk)->ao_info, 1);
302 		rcu_assign_pointer(tcp_twsk(sk)->ao_info, NULL);
303 	} else {
304 		ao = rcu_dereference_protected(tcp_sk(sk)->ao_info, 1);
305 		rcu_assign_pointer(tcp_sk(sk)->ao_info, NULL);
306 	}
307 
308 	if (!ao || !refcount_dec_and_test(&ao->refcnt))
309 		return;
310 
311 	if (!twsk)
312 		tcp_ao_sk_omem_free(sk, ao);
313 	call_rcu(&ao->rcu, tcp_ao_info_free_rcu);
314 }
315 
tcp_ao_time_wait(struct tcp_timewait_sock * tcptw,struct tcp_sock * tp)316 void tcp_ao_time_wait(struct tcp_timewait_sock *tcptw, struct tcp_sock *tp)
317 {
318 	struct tcp_ao_info *ao_info = rcu_dereference_protected(tp->ao_info, 1);
319 
320 	if (ao_info) {
321 		struct tcp_ao_key *key;
322 		struct hlist_node *n;
323 		int omem = 0;
324 
325 		hlist_for_each_entry_safe(key, n, &ao_info->head, node) {
326 			omem += tcp_ao_sizeof_key(key);
327 		}
328 
329 		refcount_inc(&ao_info->refcnt);
330 		atomic_sub(omem, &(((struct sock *)tp)->sk_omem_alloc));
331 		rcu_assign_pointer(tcptw->ao_info, ao_info);
332 	} else {
333 		tcptw->ao_info = NULL;
334 	}
335 }
336 
337 /* 4 tuple and ISNs are expected in NBO */
tcp_v4_ao_calc_key(struct tcp_ao_key * mkt,u8 * key,__be32 saddr,__be32 daddr,__be16 sport,__be16 dport,__be32 sisn,__be32 disn)338 static int tcp_v4_ao_calc_key(struct tcp_ao_key *mkt, u8 *key,
339 			      __be32 saddr, __be32 daddr,
340 			      __be16 sport, __be16 dport,
341 			      __be32 sisn,  __be32 disn)
342 {
343 	/* See RFC5926 3.1.1 */
344 	struct kdf_input_block {
345 		u8                      counter;
346 		u8                      label[6];
347 		struct tcp4_ao_context	ctx;
348 		__be16                  outlen;
349 	} __packed * tmp;
350 	struct tcp_sigpool hp;
351 	int err;
352 
353 	err = tcp_sigpool_start(mkt->tcp_sigpool_id, &hp);
354 	if (err)
355 		return err;
356 
357 	tmp = hp.scratch;
358 	tmp->counter	= 1;
359 	memcpy(tmp->label, "TCP-AO", 6);
360 	tmp->ctx.saddr	= saddr;
361 	tmp->ctx.daddr	= daddr;
362 	tmp->ctx.sport	= sport;
363 	tmp->ctx.dport	= dport;
364 	tmp->ctx.sisn	= sisn;
365 	tmp->ctx.disn	= disn;
366 	tmp->outlen	= htons(tcp_ao_digest_size(mkt) * 8); /* in bits */
367 
368 	err = tcp_ao_calc_traffic_key(mkt, key, tmp, sizeof(*tmp), &hp);
369 	tcp_sigpool_end(&hp);
370 
371 	return err;
372 }
373 
tcp_v4_ao_calc_key_sk(struct tcp_ao_key * mkt,u8 * key,const struct sock * sk,__be32 sisn,__be32 disn,bool send)374 int tcp_v4_ao_calc_key_sk(struct tcp_ao_key *mkt, u8 *key,
375 			  const struct sock *sk,
376 			  __be32 sisn, __be32 disn, bool send)
377 {
378 	if (send)
379 		return tcp_v4_ao_calc_key(mkt, key, sk->sk_rcv_saddr,
380 					  sk->sk_daddr, htons(sk->sk_num),
381 					  sk->sk_dport, sisn, disn);
382 	else
383 		return tcp_v4_ao_calc_key(mkt, key, sk->sk_daddr,
384 					  sk->sk_rcv_saddr, sk->sk_dport,
385 					  htons(sk->sk_num), disn, sisn);
386 }
387 
tcp_ao_calc_key_sk(struct tcp_ao_key * mkt,u8 * key,const struct sock * sk,__be32 sisn,__be32 disn,bool send)388 static int tcp_ao_calc_key_sk(struct tcp_ao_key *mkt, u8 *key,
389 			      const struct sock *sk,
390 			      __be32 sisn, __be32 disn, bool send)
391 {
392 	if (mkt->family == AF_INET)
393 		return tcp_v4_ao_calc_key_sk(mkt, key, sk, sisn, disn, send);
394 #if IS_ENABLED(CONFIG_IPV6)
395 	else if (mkt->family == AF_INET6)
396 		return tcp_v6_ao_calc_key_sk(mkt, key, sk, sisn, disn, send);
397 #endif
398 	else
399 		return -EOPNOTSUPP;
400 }
401 
tcp_v4_ao_calc_key_rsk(struct tcp_ao_key * mkt,u8 * key,struct request_sock * req)402 int tcp_v4_ao_calc_key_rsk(struct tcp_ao_key *mkt, u8 *key,
403 			   struct request_sock *req)
404 {
405 	struct inet_request_sock *ireq = inet_rsk(req);
406 
407 	return tcp_v4_ao_calc_key(mkt, key,
408 				  ireq->ir_loc_addr, ireq->ir_rmt_addr,
409 				  htons(ireq->ir_num), ireq->ir_rmt_port,
410 				  htonl(tcp_rsk(req)->snt_isn),
411 				  htonl(tcp_rsk(req)->rcv_isn));
412 }
413 
tcp_v4_ao_calc_key_skb(struct tcp_ao_key * mkt,u8 * key,const struct sk_buff * skb,__be32 sisn,__be32 disn)414 static int tcp_v4_ao_calc_key_skb(struct tcp_ao_key *mkt, u8 *key,
415 				  const struct sk_buff *skb,
416 				  __be32 sisn, __be32 disn)
417 {
418 	const struct iphdr *iph = ip_hdr(skb);
419 	const struct tcphdr *th = tcp_hdr(skb);
420 
421 	return tcp_v4_ao_calc_key(mkt, key, iph->saddr, iph->daddr,
422 				  th->source, th->dest, sisn, disn);
423 }
424 
tcp_ao_calc_key_skb(struct tcp_ao_key * mkt,u8 * key,const struct sk_buff * skb,__be32 sisn,__be32 disn,int family)425 static int tcp_ao_calc_key_skb(struct tcp_ao_key *mkt, u8 *key,
426 			       const struct sk_buff *skb,
427 			       __be32 sisn, __be32 disn, int family)
428 {
429 	if (family == AF_INET)
430 		return tcp_v4_ao_calc_key_skb(mkt, key, skb, sisn, disn);
431 #if IS_ENABLED(CONFIG_IPV6)
432 	else if (family == AF_INET6)
433 		return tcp_v6_ao_calc_key_skb(mkt, key, skb, sisn, disn);
434 #endif
435 	return -EAFNOSUPPORT;
436 }
437 
tcp_v4_ao_hash_pseudoheader(struct tcp_sigpool * hp,__be32 daddr,__be32 saddr,int nbytes)438 static int tcp_v4_ao_hash_pseudoheader(struct tcp_sigpool *hp,
439 				       __be32 daddr, __be32 saddr,
440 				       int nbytes)
441 {
442 	struct tcp4_pseudohdr *bp;
443 	struct scatterlist sg;
444 
445 	bp = hp->scratch;
446 	bp->saddr = saddr;
447 	bp->daddr = daddr;
448 	bp->pad = 0;
449 	bp->protocol = IPPROTO_TCP;
450 	bp->len = cpu_to_be16(nbytes);
451 
452 	sg_init_one(&sg, bp, sizeof(*bp));
453 	ahash_request_set_crypt(hp->req, &sg, NULL, sizeof(*bp));
454 	return crypto_ahash_update(hp->req);
455 }
456 
tcp_ao_hash_pseudoheader(unsigned short int family,const struct sock * sk,const struct sk_buff * skb,struct tcp_sigpool * hp,int nbytes)457 static int tcp_ao_hash_pseudoheader(unsigned short int family,
458 				    const struct sock *sk,
459 				    const struct sk_buff *skb,
460 				    struct tcp_sigpool *hp, int nbytes)
461 {
462 	const struct tcphdr *th = tcp_hdr(skb);
463 
464 	/* TODO: Can we rely on checksum being zero to mean outbound pkt? */
465 	if (!th->check) {
466 		if (family == AF_INET)
467 			return tcp_v4_ao_hash_pseudoheader(hp, sk->sk_daddr,
468 					sk->sk_rcv_saddr, skb->len);
469 #if IS_ENABLED(CONFIG_IPV6)
470 		else if (family == AF_INET6)
471 			return tcp_v6_ao_hash_pseudoheader(hp, &sk->sk_v6_daddr,
472 					&sk->sk_v6_rcv_saddr, skb->len);
473 #endif
474 		else
475 			return -EAFNOSUPPORT;
476 	}
477 
478 	if (family == AF_INET) {
479 		const struct iphdr *iph = ip_hdr(skb);
480 
481 		return tcp_v4_ao_hash_pseudoheader(hp, iph->daddr,
482 				iph->saddr, skb->len);
483 #if IS_ENABLED(CONFIG_IPV6)
484 	} else if (family == AF_INET6) {
485 		const struct ipv6hdr *iph = ipv6_hdr(skb);
486 
487 		return tcp_v6_ao_hash_pseudoheader(hp, &iph->daddr,
488 				&iph->saddr, skb->len);
489 #endif
490 	}
491 	return -EAFNOSUPPORT;
492 }
493 
tcp_ao_compute_sne(u32 next_sne,u32 next_seq,u32 seq)494 u32 tcp_ao_compute_sne(u32 next_sne, u32 next_seq, u32 seq)
495 {
496 	u32 sne = next_sne;
497 
498 	if (before(seq, next_seq)) {
499 		if (seq > next_seq)
500 			sne--;
501 	} else {
502 		if (seq < next_seq)
503 			sne++;
504 	}
505 
506 	return sne;
507 }
508 
509 /* tcp_ao_hash_sne(struct tcp_sigpool *hp)
510  * @hp	- used for hashing
511  * @sne - sne value
512  */
tcp_ao_hash_sne(struct tcp_sigpool * hp,u32 sne)513 static int tcp_ao_hash_sne(struct tcp_sigpool *hp, u32 sne)
514 {
515 	struct scatterlist sg;
516 	__be32 *bp;
517 
518 	bp = (__be32 *)hp->scratch;
519 	*bp = htonl(sne);
520 
521 	sg_init_one(&sg, bp, sizeof(*bp));
522 	ahash_request_set_crypt(hp->req, &sg, NULL, sizeof(*bp));
523 	return crypto_ahash_update(hp->req);
524 }
525 
tcp_ao_hash_header(struct tcp_sigpool * hp,const struct tcphdr * th,bool exclude_options,u8 * hash,int hash_offset,int hash_len)526 static int tcp_ao_hash_header(struct tcp_sigpool *hp,
527 			      const struct tcphdr *th,
528 			      bool exclude_options, u8 *hash,
529 			      int hash_offset, int hash_len)
530 {
531 	struct scatterlist sg;
532 	u8 *hdr = hp->scratch;
533 	int err, len;
534 
535 	/* We are not allowed to change tcphdr, make a local copy */
536 	if (exclude_options) {
537 		len = sizeof(*th) + sizeof(struct tcp_ao_hdr) + hash_len;
538 		memcpy(hdr, th, sizeof(*th));
539 		memcpy(hdr + sizeof(*th),
540 		       (u8 *)th + hash_offset - sizeof(struct tcp_ao_hdr),
541 		       sizeof(struct tcp_ao_hdr));
542 		memset(hdr + sizeof(*th) + sizeof(struct tcp_ao_hdr),
543 		       0, hash_len);
544 		((struct tcphdr *)hdr)->check = 0;
545 	} else {
546 		len = th->doff << 2;
547 		memcpy(hdr, th, len);
548 		/* zero out tcp-ao hash */
549 		((struct tcphdr *)hdr)->check = 0;
550 		memset(hdr + hash_offset, 0, hash_len);
551 	}
552 
553 	sg_init_one(&sg, hdr, len);
554 	ahash_request_set_crypt(hp->req, &sg, NULL, len);
555 	err = crypto_ahash_update(hp->req);
556 	WARN_ON_ONCE(err != 0);
557 	return err;
558 }
559 
tcp_ao_hash_hdr(unsigned short int family,char * ao_hash,struct tcp_ao_key * key,const u8 * tkey,const union tcp_ao_addr * daddr,const union tcp_ao_addr * saddr,const struct tcphdr * th,u32 sne)560 int tcp_ao_hash_hdr(unsigned short int family, char *ao_hash,
561 		    struct tcp_ao_key *key, const u8 *tkey,
562 		    const union tcp_ao_addr *daddr,
563 		    const union tcp_ao_addr *saddr,
564 		    const struct tcphdr *th, u32 sne)
565 {
566 	int tkey_len = tcp_ao_digest_size(key);
567 	int hash_offset = ao_hash - (char *)th;
568 	struct tcp_sigpool hp;
569 	void *hash_buf = NULL;
570 
571 	hash_buf = kmalloc(tkey_len, GFP_ATOMIC);
572 	if (!hash_buf)
573 		goto clear_hash_noput;
574 
575 	if (tcp_sigpool_start(key->tcp_sigpool_id, &hp))
576 		goto clear_hash_noput;
577 
578 	if (crypto_ahash_setkey(crypto_ahash_reqtfm(hp.req), tkey, tkey_len))
579 		goto clear_hash;
580 
581 	if (crypto_ahash_init(hp.req))
582 		goto clear_hash;
583 
584 	if (tcp_ao_hash_sne(&hp, sne))
585 		goto clear_hash;
586 	if (family == AF_INET) {
587 		if (tcp_v4_ao_hash_pseudoheader(&hp, daddr->a4.s_addr,
588 						saddr->a4.s_addr, th->doff * 4))
589 			goto clear_hash;
590 #if IS_ENABLED(CONFIG_IPV6)
591 	} else if (family == AF_INET6) {
592 		if (tcp_v6_ao_hash_pseudoheader(&hp, &daddr->a6,
593 						&saddr->a6, th->doff * 4))
594 			goto clear_hash;
595 #endif
596 	} else {
597 		WARN_ON_ONCE(1);
598 		goto clear_hash;
599 	}
600 	if (tcp_ao_hash_header(&hp, th,
601 			       !!(key->keyflags & TCP_AO_KEYF_EXCLUDE_OPT),
602 			       ao_hash, hash_offset, tcp_ao_maclen(key)))
603 		goto clear_hash;
604 	ahash_request_set_crypt(hp.req, NULL, hash_buf, 0);
605 	if (crypto_ahash_final(hp.req))
606 		goto clear_hash;
607 
608 	memcpy(ao_hash, hash_buf, tcp_ao_maclen(key));
609 	tcp_sigpool_end(&hp);
610 	kfree(hash_buf);
611 	return 0;
612 
613 clear_hash:
614 	tcp_sigpool_end(&hp);
615 clear_hash_noput:
616 	memset(ao_hash, 0, tcp_ao_maclen(key));
617 	kfree(hash_buf);
618 	return 1;
619 }
620 
tcp_ao_hash_skb(unsigned short int family,char * ao_hash,struct tcp_ao_key * key,const struct sock * sk,const struct sk_buff * skb,const u8 * tkey,int hash_offset,u32 sne)621 int tcp_ao_hash_skb(unsigned short int family,
622 		    char *ao_hash, struct tcp_ao_key *key,
623 		    const struct sock *sk, const struct sk_buff *skb,
624 		    const u8 *tkey, int hash_offset, u32 sne)
625 {
626 	const struct tcphdr *th = tcp_hdr(skb);
627 	int tkey_len = tcp_ao_digest_size(key);
628 	struct tcp_sigpool hp;
629 	void *hash_buf = NULL;
630 
631 	hash_buf = kmalloc(tkey_len, GFP_ATOMIC);
632 	if (!hash_buf)
633 		goto clear_hash_noput;
634 
635 	if (tcp_sigpool_start(key->tcp_sigpool_id, &hp))
636 		goto clear_hash_noput;
637 
638 	if (crypto_ahash_setkey(crypto_ahash_reqtfm(hp.req), tkey, tkey_len))
639 		goto clear_hash;
640 
641 	/* For now use sha1 by default. Depends on alg in tcp_ao_key */
642 	if (crypto_ahash_init(hp.req))
643 		goto clear_hash;
644 
645 	if (tcp_ao_hash_sne(&hp, sne))
646 		goto clear_hash;
647 	if (tcp_ao_hash_pseudoheader(family, sk, skb, &hp, skb->len))
648 		goto clear_hash;
649 	if (tcp_ao_hash_header(&hp, th,
650 			       !!(key->keyflags & TCP_AO_KEYF_EXCLUDE_OPT),
651 			       ao_hash, hash_offset, tcp_ao_maclen(key)))
652 		goto clear_hash;
653 	if (tcp_sigpool_hash_skb_data(&hp, skb, th->doff << 2))
654 		goto clear_hash;
655 	ahash_request_set_crypt(hp.req, NULL, hash_buf, 0);
656 	if (crypto_ahash_final(hp.req))
657 		goto clear_hash;
658 
659 	memcpy(ao_hash, hash_buf, tcp_ao_maclen(key));
660 	tcp_sigpool_end(&hp);
661 	kfree(hash_buf);
662 	return 0;
663 
664 clear_hash:
665 	tcp_sigpool_end(&hp);
666 clear_hash_noput:
667 	memset(ao_hash, 0, tcp_ao_maclen(key));
668 	kfree(hash_buf);
669 	return 1;
670 }
671 
tcp_v4_ao_hash_skb(char * ao_hash,struct tcp_ao_key * key,const struct sock * sk,const struct sk_buff * skb,const u8 * tkey,int hash_offset,u32 sne)672 int tcp_v4_ao_hash_skb(char *ao_hash, struct tcp_ao_key *key,
673 		       const struct sock *sk, const struct sk_buff *skb,
674 		       const u8 *tkey, int hash_offset, u32 sne)
675 {
676 	return tcp_ao_hash_skb(AF_INET, ao_hash, key, sk, skb,
677 			       tkey, hash_offset, sne);
678 }
679 
tcp_v4_ao_synack_hash(char * ao_hash,struct tcp_ao_key * ao_key,struct request_sock * req,const struct sk_buff * skb,int hash_offset,u32 sne)680 int tcp_v4_ao_synack_hash(char *ao_hash, struct tcp_ao_key *ao_key,
681 			  struct request_sock *req, const struct sk_buff *skb,
682 			  int hash_offset, u32 sne)
683 {
684 	void *hash_buf = NULL;
685 	int err;
686 
687 	hash_buf = kmalloc(tcp_ao_digest_size(ao_key), GFP_ATOMIC);
688 	if (!hash_buf)
689 		return -ENOMEM;
690 
691 	err = tcp_v4_ao_calc_key_rsk(ao_key, hash_buf, req);
692 	if (err)
693 		goto out;
694 
695 	err = tcp_ao_hash_skb(AF_INET, ao_hash, ao_key, req_to_sk(req), skb,
696 			      hash_buf, hash_offset, sne);
697 out:
698 	kfree(hash_buf);
699 	return err;
700 }
701 
tcp_v4_ao_lookup_rsk(const struct sock * sk,struct request_sock * req,int sndid,int rcvid)702 struct tcp_ao_key *tcp_v4_ao_lookup_rsk(const struct sock *sk,
703 					struct request_sock *req,
704 					int sndid, int rcvid)
705 {
706 	struct inet_request_sock *ireq = inet_rsk(req);
707 	union tcp_ao_addr *addr = (union tcp_ao_addr *)&ireq->ir_rmt_addr;
708 	int l3index;
709 
710 	l3index = l3mdev_master_ifindex_by_index(sock_net(sk), ireq->ir_iif);
711 	return tcp_ao_do_lookup(sk, l3index, addr, AF_INET, sndid, rcvid);
712 }
713 
tcp_v4_ao_lookup(const struct sock * sk,struct sock * addr_sk,int sndid,int rcvid)714 struct tcp_ao_key *tcp_v4_ao_lookup(const struct sock *sk, struct sock *addr_sk,
715 				    int sndid, int rcvid)
716 {
717 	int l3index = l3mdev_master_ifindex_by_index(sock_net(sk),
718 						     addr_sk->sk_bound_dev_if);
719 	union tcp_ao_addr *addr = (union tcp_ao_addr *)&addr_sk->sk_daddr;
720 
721 	return tcp_ao_do_lookup(sk, l3index, addr, AF_INET, sndid, rcvid);
722 }
723 
tcp_ao_prepare_reset(const struct sock * sk,struct sk_buff * skb,const struct tcp_ao_hdr * aoh,int l3index,u32 seq,struct tcp_ao_key ** key,char ** traffic_key,bool * allocated_traffic_key,u8 * keyid,u32 * sne)724 int tcp_ao_prepare_reset(const struct sock *sk, struct sk_buff *skb,
725 			 const struct tcp_ao_hdr *aoh, int l3index, u32 seq,
726 			 struct tcp_ao_key **key, char **traffic_key,
727 			 bool *allocated_traffic_key, u8 *keyid, u32 *sne)
728 {
729 	const struct tcphdr *th = tcp_hdr(skb);
730 	struct tcp_ao_info *ao_info;
731 
732 	*allocated_traffic_key = false;
733 	/* If there's no socket - than initial sisn/disn are unknown.
734 	 * Drop the segment. RFC5925 (7.7) advises to require graceful
735 	 * restart [RFC4724]. Alternatively, the RFC5925 advises to
736 	 * save/restore traffic keys before/after reboot.
737 	 * Linux TCP-AO support provides TCP_AO_ADD_KEY and TCP_AO_REPAIR
738 	 * options to restore a socket post-reboot.
739 	 */
740 	if (!sk)
741 		return -ENOTCONN;
742 
743 	if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_NEW_SYN_RECV)) {
744 		unsigned int family = READ_ONCE(sk->sk_family);
745 		union tcp_ao_addr *addr;
746 		__be32 disn, sisn;
747 
748 		if (sk->sk_state == TCP_NEW_SYN_RECV) {
749 			struct request_sock *req = inet_reqsk(sk);
750 
751 			sisn = htonl(tcp_rsk(req)->rcv_isn);
752 			disn = htonl(tcp_rsk(req)->snt_isn);
753 			*sne = tcp_ao_compute_sne(0, tcp_rsk(req)->snt_isn, seq);
754 		} else {
755 			sisn = th->seq;
756 			disn = 0;
757 		}
758 		if (IS_ENABLED(CONFIG_IPV6) && family == AF_INET6)
759 			addr = (union tcp_md5_addr *)&ipv6_hdr(skb)->saddr;
760 		else
761 			addr = (union tcp_md5_addr *)&ip_hdr(skb)->saddr;
762 #if IS_ENABLED(CONFIG_IPV6)
763 		if (family == AF_INET6 && ipv6_addr_v4mapped(&sk->sk_v6_daddr))
764 			family = AF_INET;
765 #endif
766 
767 		sk = sk_const_to_full_sk(sk);
768 		ao_info = rcu_dereference(tcp_sk(sk)->ao_info);
769 		if (!ao_info)
770 			return -ENOENT;
771 		*key = tcp_ao_do_lookup(sk, l3index, addr, family,
772 					-1, aoh->rnext_keyid);
773 		if (!*key)
774 			return -ENOENT;
775 		*traffic_key = kmalloc(tcp_ao_digest_size(*key), GFP_ATOMIC);
776 		if (!*traffic_key)
777 			return -ENOMEM;
778 		*allocated_traffic_key = true;
779 		if (tcp_ao_calc_key_skb(*key, *traffic_key, skb,
780 					sisn, disn, family))
781 			return -1;
782 		*keyid = (*key)->rcvid;
783 	} else {
784 		struct tcp_ao_key *rnext_key;
785 		u32 snd_basis;
786 
787 		if (sk->sk_state == TCP_TIME_WAIT) {
788 			ao_info = rcu_dereference(tcp_twsk(sk)->ao_info);
789 			snd_basis = tcp_twsk(sk)->tw_snd_nxt;
790 		} else {
791 			ao_info = rcu_dereference(tcp_sk(sk)->ao_info);
792 			snd_basis = tcp_sk(sk)->snd_una;
793 		}
794 		if (!ao_info)
795 			return -ENOENT;
796 
797 		*key = tcp_ao_established_key(sk, ao_info, aoh->rnext_keyid, -1);
798 		if (!*key)
799 			return -ENOENT;
800 		*traffic_key = snd_other_key(*key);
801 		rnext_key = READ_ONCE(ao_info->rnext_key);
802 		*keyid = rnext_key->rcvid;
803 		*sne = tcp_ao_compute_sne(READ_ONCE(ao_info->snd_sne),
804 					  snd_basis, seq);
805 	}
806 	return 0;
807 }
808 
tcp_ao_transmit_skb(struct sock * sk,struct sk_buff * skb,struct tcp_ao_key * key,struct tcphdr * th,__u8 * hash_location)809 int tcp_ao_transmit_skb(struct sock *sk, struct sk_buff *skb,
810 			struct tcp_ao_key *key, struct tcphdr *th,
811 			__u8 *hash_location)
812 {
813 	struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
814 	struct tcp_sock *tp = tcp_sk(sk);
815 	struct tcp_ao_info *ao;
816 	void *tkey_buf = NULL;
817 	u8 *traffic_key;
818 	u32 sne;
819 
820 	ao = rcu_dereference_protected(tcp_sk(sk)->ao_info,
821 				       lockdep_sock_is_held(sk));
822 	traffic_key = snd_other_key(key);
823 	if (unlikely(tcb->tcp_flags & TCPHDR_SYN)) {
824 		__be32 disn;
825 
826 		if (!(tcb->tcp_flags & TCPHDR_ACK)) {
827 			disn = 0;
828 			tkey_buf = kmalloc(tcp_ao_digest_size(key), GFP_ATOMIC);
829 			if (!tkey_buf)
830 				return -ENOMEM;
831 			traffic_key = tkey_buf;
832 		} else {
833 			disn = ao->risn;
834 		}
835 		tp->af_specific->ao_calc_key_sk(key, traffic_key,
836 						sk, ao->lisn, disn, true);
837 	}
838 	sne = tcp_ao_compute_sne(READ_ONCE(ao->snd_sne), READ_ONCE(tp->snd_una),
839 				 ntohl(th->seq));
840 	tp->af_specific->calc_ao_hash(hash_location, key, sk, skb, traffic_key,
841 				      hash_location - (u8 *)th, sne);
842 	kfree(tkey_buf);
843 	return 0;
844 }
845 
tcp_ao_inbound_lookup(unsigned short int family,const struct sock * sk,const struct sk_buff * skb,int sndid,int rcvid,int l3index)846 static struct tcp_ao_key *tcp_ao_inbound_lookup(unsigned short int family,
847 		const struct sock *sk, const struct sk_buff *skb,
848 		int sndid, int rcvid, int l3index)
849 {
850 	if (family == AF_INET) {
851 		const struct iphdr *iph = ip_hdr(skb);
852 
853 		return tcp_ao_do_lookup(sk, l3index,
854 					(union tcp_ao_addr *)&iph->saddr,
855 					AF_INET, sndid, rcvid);
856 	} else {
857 		const struct ipv6hdr *iph = ipv6_hdr(skb);
858 
859 		return tcp_ao_do_lookup(sk, l3index,
860 					(union tcp_ao_addr *)&iph->saddr,
861 					AF_INET6, sndid, rcvid);
862 	}
863 }
864 
tcp_ao_syncookie(struct sock * sk,const struct sk_buff * skb,struct request_sock * req,unsigned short int family)865 void tcp_ao_syncookie(struct sock *sk, const struct sk_buff *skb,
866 		      struct request_sock *req, unsigned short int family)
867 {
868 	struct tcp_request_sock *treq = tcp_rsk(req);
869 	const struct tcphdr *th = tcp_hdr(skb);
870 	const struct tcp_ao_hdr *aoh;
871 	struct tcp_ao_key *key;
872 	int l3index;
873 
874 	/* treq->af_specific is used to perform TCP_AO lookup
875 	 * in tcp_create_openreq_child().
876 	 */
877 #if IS_ENABLED(CONFIG_IPV6)
878 	if (family == AF_INET6)
879 		treq->af_specific = &tcp_request_sock_ipv6_ops;
880 	else
881 #endif
882 		treq->af_specific = &tcp_request_sock_ipv4_ops;
883 
884 	treq->used_tcp_ao = false;
885 
886 	if (tcp_parse_auth_options(th, NULL, &aoh) || !aoh)
887 		return;
888 
889 	l3index = l3mdev_master_ifindex_by_index(sock_net(sk), inet_rsk(req)->ir_iif);
890 	key = tcp_ao_inbound_lookup(family, sk, skb, -1, aoh->keyid, l3index);
891 	if (!key)
892 		/* Key not found, continue without TCP-AO */
893 		return;
894 
895 	treq->ao_rcv_next = aoh->keyid;
896 	treq->ao_keyid = aoh->rnext_keyid;
897 	treq->used_tcp_ao = true;
898 }
899 
900 static enum skb_drop_reason
tcp_ao_verify_hash(const struct sock * sk,const struct sk_buff * skb,unsigned short int family,struct tcp_ao_info * info,const struct tcp_ao_hdr * aoh,struct tcp_ao_key * key,u8 * traffic_key,u8 * phash,u32 sne,int l3index)901 tcp_ao_verify_hash(const struct sock *sk, const struct sk_buff *skb,
902 		   unsigned short int family, struct tcp_ao_info *info,
903 		   const struct tcp_ao_hdr *aoh, struct tcp_ao_key *key,
904 		   u8 *traffic_key, u8 *phash, u32 sne, int l3index)
905 {
906 	const struct tcphdr *th = tcp_hdr(skb);
907 	u8 maclen = tcp_ao_hdr_maclen(aoh);
908 	void *hash_buf = NULL;
909 
910 	if (maclen != tcp_ao_maclen(key)) {
911 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAOBAD);
912 		atomic64_inc(&info->counters.pkt_bad);
913 		atomic64_inc(&key->pkt_bad);
914 		trace_tcp_ao_wrong_maclen(sk, skb, aoh->keyid,
915 					  aoh->rnext_keyid, maclen);
916 		return SKB_DROP_REASON_TCP_AOFAILURE;
917 	}
918 
919 	hash_buf = kmalloc(tcp_ao_digest_size(key), GFP_ATOMIC);
920 	if (!hash_buf)
921 		return SKB_DROP_REASON_NOT_SPECIFIED;
922 
923 	/* XXX: make it per-AF callback? */
924 	tcp_ao_hash_skb(family, hash_buf, key, sk, skb, traffic_key,
925 			(phash - (u8 *)th), sne);
926 	if (memcmp(phash, hash_buf, maclen)) {
927 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAOBAD);
928 		atomic64_inc(&info->counters.pkt_bad);
929 		atomic64_inc(&key->pkt_bad);
930 		trace_tcp_ao_mismatch(sk, skb, aoh->keyid,
931 				      aoh->rnext_keyid, maclen);
932 		kfree(hash_buf);
933 		return SKB_DROP_REASON_TCP_AOFAILURE;
934 	}
935 	NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAOGOOD);
936 	atomic64_inc(&info->counters.pkt_good);
937 	atomic64_inc(&key->pkt_good);
938 	kfree(hash_buf);
939 	return SKB_NOT_DROPPED_YET;
940 }
941 
942 enum skb_drop_reason
tcp_inbound_ao_hash(struct sock * sk,const struct sk_buff * skb,unsigned short int family,const struct request_sock * req,int l3index,const struct tcp_ao_hdr * aoh)943 tcp_inbound_ao_hash(struct sock *sk, const struct sk_buff *skb,
944 		    unsigned short int family, const struct request_sock *req,
945 		    int l3index, const struct tcp_ao_hdr *aoh)
946 {
947 	const struct tcphdr *th = tcp_hdr(skb);
948 	u8 maclen = tcp_ao_hdr_maclen(aoh);
949 	u8 *phash = (u8 *)(aoh + 1); /* hash goes just after the header */
950 	struct tcp_ao_info *info;
951 	enum skb_drop_reason ret;
952 	struct tcp_ao_key *key;
953 	__be32 sisn, disn;
954 	u8 *traffic_key;
955 	int state;
956 	u32 sne = 0;
957 
958 	info = rcu_dereference(tcp_sk(sk)->ao_info);
959 	if (!info) {
960 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAOKEYNOTFOUND);
961 		trace_tcp_ao_key_not_found(sk, skb, aoh->keyid,
962 					   aoh->rnext_keyid, maclen);
963 		return SKB_DROP_REASON_TCP_AOUNEXPECTED;
964 	}
965 
966 	if (unlikely(th->syn)) {
967 		sisn = th->seq;
968 		disn = 0;
969 	}
970 
971 	state = READ_ONCE(sk->sk_state);
972 	/* Fast-path */
973 	if (likely((1 << state) & TCP_AO_ESTABLISHED)) {
974 		enum skb_drop_reason err;
975 		struct tcp_ao_key *current_key;
976 
977 		/* Check if this socket's rnext_key matches the keyid in the
978 		 * packet. If not we lookup the key based on the keyid
979 		 * matching the rcvid in the mkt.
980 		 */
981 		key = READ_ONCE(info->rnext_key);
982 		if (key->rcvid != aoh->keyid) {
983 			key = tcp_ao_established_key(sk, info, -1, aoh->keyid);
984 			if (!key)
985 				goto key_not_found;
986 		}
987 
988 		/* Delayed retransmitted SYN */
989 		if (unlikely(th->syn && !th->ack))
990 			goto verify_hash;
991 
992 		sne = tcp_ao_compute_sne(info->rcv_sne, tcp_sk(sk)->rcv_nxt,
993 					 ntohl(th->seq));
994 		/* Established socket, traffic key are cached */
995 		traffic_key = rcv_other_key(key);
996 		err = tcp_ao_verify_hash(sk, skb, family, info, aoh, key,
997 					 traffic_key, phash, sne, l3index);
998 		if (err)
999 			return err;
1000 		current_key = READ_ONCE(info->current_key);
1001 		/* Key rotation: the peer asks us to use new key (RNext) */
1002 		if (unlikely(aoh->rnext_keyid != current_key->sndid)) {
1003 			trace_tcp_ao_rnext_request(sk, skb, current_key->sndid,
1004 						   aoh->rnext_keyid,
1005 						   tcp_ao_hdr_maclen(aoh));
1006 			/* If the key is not found we do nothing. */
1007 			key = tcp_ao_established_key(sk, info, aoh->rnext_keyid, -1);
1008 			if (key)
1009 				/* pairs with tcp_ao_del_cmd */
1010 				WRITE_ONCE(info->current_key, key);
1011 		}
1012 		return SKB_NOT_DROPPED_YET;
1013 	}
1014 
1015 	if (unlikely(state == TCP_CLOSE))
1016 		return SKB_DROP_REASON_TCP_CLOSE;
1017 
1018 	/* Lookup key based on peer address and keyid.
1019 	 * current_key and rnext_key must not be used on tcp listen
1020 	 * sockets as otherwise:
1021 	 * - request sockets would race on those key pointers
1022 	 * - tcp_ao_del_cmd() allows async key removal
1023 	 */
1024 	key = tcp_ao_inbound_lookup(family, sk, skb, -1, aoh->keyid, l3index);
1025 	if (!key)
1026 		goto key_not_found;
1027 
1028 	if (th->syn && !th->ack)
1029 		goto verify_hash;
1030 
1031 	if ((1 << state) & (TCPF_LISTEN | TCPF_NEW_SYN_RECV)) {
1032 		/* Make the initial syn the likely case here */
1033 		if (unlikely(req)) {
1034 			sne = tcp_ao_compute_sne(0, tcp_rsk(req)->rcv_isn,
1035 						 ntohl(th->seq));
1036 			sisn = htonl(tcp_rsk(req)->rcv_isn);
1037 			disn = htonl(tcp_rsk(req)->snt_isn);
1038 		} else if (unlikely(th->ack && !th->syn)) {
1039 			/* Possible syncookie packet */
1040 			sisn = htonl(ntohl(th->seq) - 1);
1041 			disn = htonl(ntohl(th->ack_seq) - 1);
1042 			sne = tcp_ao_compute_sne(0, ntohl(sisn),
1043 						 ntohl(th->seq));
1044 		} else if (unlikely(!th->syn)) {
1045 			/* no way to figure out initial sisn/disn - drop */
1046 			return SKB_DROP_REASON_TCP_FLAGS;
1047 		}
1048 	} else if ((1 << state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
1049 		disn = info->lisn;
1050 		if (th->syn || th->rst)
1051 			sisn = th->seq;
1052 		else
1053 			sisn = info->risn;
1054 	} else {
1055 		WARN_ONCE(1, "TCP-AO: Unexpected sk_state %d", state);
1056 		return SKB_DROP_REASON_TCP_AOFAILURE;
1057 	}
1058 verify_hash:
1059 	traffic_key = kmalloc(tcp_ao_digest_size(key), GFP_ATOMIC);
1060 	if (!traffic_key)
1061 		return SKB_DROP_REASON_NOT_SPECIFIED;
1062 	tcp_ao_calc_key_skb(key, traffic_key, skb, sisn, disn, family);
1063 	ret = tcp_ao_verify_hash(sk, skb, family, info, aoh, key,
1064 				 traffic_key, phash, sne, l3index);
1065 	kfree(traffic_key);
1066 	return ret;
1067 
1068 key_not_found:
1069 	NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAOKEYNOTFOUND);
1070 	atomic64_inc(&info->counters.key_not_found);
1071 	trace_tcp_ao_key_not_found(sk, skb, aoh->keyid,
1072 				   aoh->rnext_keyid, maclen);
1073 	return SKB_DROP_REASON_TCP_AOKEYNOTFOUND;
1074 }
1075 
tcp_ao_cache_traffic_keys(const struct sock * sk,struct tcp_ao_info * ao,struct tcp_ao_key * ao_key)1076 static int tcp_ao_cache_traffic_keys(const struct sock *sk,
1077 				     struct tcp_ao_info *ao,
1078 				     struct tcp_ao_key *ao_key)
1079 {
1080 	u8 *traffic_key = snd_other_key(ao_key);
1081 	int ret;
1082 
1083 	ret = tcp_ao_calc_key_sk(ao_key, traffic_key, sk,
1084 				 ao->lisn, ao->risn, true);
1085 	if (ret)
1086 		return ret;
1087 
1088 	traffic_key = rcv_other_key(ao_key);
1089 	ret = tcp_ao_calc_key_sk(ao_key, traffic_key, sk,
1090 				 ao->lisn, ao->risn, false);
1091 	return ret;
1092 }
1093 
tcp_ao_connect_init(struct sock * sk)1094 void tcp_ao_connect_init(struct sock *sk)
1095 {
1096 	struct tcp_sock *tp = tcp_sk(sk);
1097 	struct tcp_ao_info *ao_info;
1098 	struct hlist_node *next;
1099 	union tcp_ao_addr *addr;
1100 	struct tcp_ao_key *key;
1101 	int family, l3index;
1102 
1103 	ao_info = rcu_dereference_protected(tp->ao_info,
1104 					    lockdep_sock_is_held(sk));
1105 	if (!ao_info)
1106 		return;
1107 
1108 	/* Remove all keys that don't match the peer */
1109 	family = sk->sk_family;
1110 	if (family == AF_INET)
1111 		addr = (union tcp_ao_addr *)&sk->sk_daddr;
1112 #if IS_ENABLED(CONFIG_IPV6)
1113 	else if (family == AF_INET6)
1114 		addr = (union tcp_ao_addr *)&sk->sk_v6_daddr;
1115 #endif
1116 	else
1117 		return;
1118 	l3index = l3mdev_master_ifindex_by_index(sock_net(sk),
1119 						 sk->sk_bound_dev_if);
1120 
1121 	hlist_for_each_entry_safe(key, next, &ao_info->head, node) {
1122 		if (!tcp_ao_key_cmp(key, l3index, addr, key->prefixlen, family, -1, -1))
1123 			continue;
1124 
1125 		if (key == ao_info->current_key)
1126 			ao_info->current_key = NULL;
1127 		if (key == ao_info->rnext_key)
1128 			ao_info->rnext_key = NULL;
1129 		hlist_del_rcu(&key->node);
1130 		atomic_sub(tcp_ao_sizeof_key(key), &sk->sk_omem_alloc);
1131 		call_rcu(&key->rcu, tcp_ao_key_free_rcu);
1132 	}
1133 
1134 	key = tp->af_specific->ao_lookup(sk, sk, -1, -1);
1135 	if (key) {
1136 		/* if current_key or rnext_key were not provided,
1137 		 * use the first key matching the peer
1138 		 */
1139 		if (!ao_info->current_key)
1140 			ao_info->current_key = key;
1141 		if (!ao_info->rnext_key)
1142 			ao_info->rnext_key = key;
1143 		tp->tcp_header_len += tcp_ao_len_aligned(key);
1144 
1145 		ao_info->lisn = htonl(tp->write_seq);
1146 		ao_info->snd_sne = 0;
1147 	} else {
1148 		/* Can't happen: tcp_connect() verifies that there's
1149 		 * at least one tcp-ao key that matches the remote peer.
1150 		 */
1151 		WARN_ON_ONCE(1);
1152 		rcu_assign_pointer(tp->ao_info, NULL);
1153 		kfree(ao_info);
1154 	}
1155 }
1156 
tcp_ao_established(struct sock * sk)1157 void tcp_ao_established(struct sock *sk)
1158 {
1159 	struct tcp_ao_info *ao;
1160 	struct tcp_ao_key *key;
1161 
1162 	ao = rcu_dereference_protected(tcp_sk(sk)->ao_info,
1163 				       lockdep_sock_is_held(sk));
1164 	if (!ao)
1165 		return;
1166 
1167 	hlist_for_each_entry_rcu(key, &ao->head, node, lockdep_sock_is_held(sk))
1168 		tcp_ao_cache_traffic_keys(sk, ao, key);
1169 }
1170 
tcp_ao_finish_connect(struct sock * sk,struct sk_buff * skb)1171 void tcp_ao_finish_connect(struct sock *sk, struct sk_buff *skb)
1172 {
1173 	struct tcp_ao_info *ao;
1174 	struct tcp_ao_key *key;
1175 
1176 	ao = rcu_dereference_protected(tcp_sk(sk)->ao_info,
1177 				       lockdep_sock_is_held(sk));
1178 	if (!ao)
1179 		return;
1180 
1181 	WRITE_ONCE(ao->risn, tcp_hdr(skb)->seq);
1182 	ao->rcv_sne = 0;
1183 
1184 	hlist_for_each_entry_rcu(key, &ao->head, node, lockdep_sock_is_held(sk))
1185 		tcp_ao_cache_traffic_keys(sk, ao, key);
1186 }
1187 
tcp_ao_copy_all_matching(const struct sock * sk,struct sock * newsk,struct request_sock * req,struct sk_buff * skb,int family)1188 int tcp_ao_copy_all_matching(const struct sock *sk, struct sock *newsk,
1189 			     struct request_sock *req, struct sk_buff *skb,
1190 			     int family)
1191 {
1192 	struct tcp_ao_key *key, *new_key, *first_key;
1193 	struct tcp_ao_info *new_ao, *ao;
1194 	struct hlist_node *key_head;
1195 	int l3index, ret = -ENOMEM;
1196 	union tcp_ao_addr *addr;
1197 	bool match = false;
1198 
1199 	ao = rcu_dereference(tcp_sk(sk)->ao_info);
1200 	if (!ao)
1201 		return 0;
1202 
1203 	/* New socket without TCP-AO on it */
1204 	if (!tcp_rsk_used_ao(req))
1205 		return 0;
1206 
1207 	new_ao = tcp_ao_alloc_info(GFP_ATOMIC);
1208 	if (!new_ao)
1209 		return -ENOMEM;
1210 	new_ao->lisn = htonl(tcp_rsk(req)->snt_isn);
1211 	new_ao->risn = htonl(tcp_rsk(req)->rcv_isn);
1212 	new_ao->ao_required = ao->ao_required;
1213 	new_ao->accept_icmps = ao->accept_icmps;
1214 
1215 	if (family == AF_INET) {
1216 		addr = (union tcp_ao_addr *)&newsk->sk_daddr;
1217 #if IS_ENABLED(CONFIG_IPV6)
1218 	} else if (family == AF_INET6) {
1219 		addr = (union tcp_ao_addr *)&newsk->sk_v6_daddr;
1220 #endif
1221 	} else {
1222 		ret = -EAFNOSUPPORT;
1223 		goto free_ao;
1224 	}
1225 	l3index = l3mdev_master_ifindex_by_index(sock_net(newsk),
1226 						 newsk->sk_bound_dev_if);
1227 
1228 	hlist_for_each_entry_rcu(key, &ao->head, node) {
1229 		if (tcp_ao_key_cmp(key, l3index, addr, key->prefixlen, family, -1, -1))
1230 			continue;
1231 
1232 		new_key = tcp_ao_copy_key(newsk, key);
1233 		if (!new_key)
1234 			goto free_and_exit;
1235 
1236 		tcp_ao_cache_traffic_keys(newsk, new_ao, new_key);
1237 		tcp_ao_link_mkt(new_ao, new_key);
1238 		match = true;
1239 	}
1240 
1241 	if (!match) {
1242 		/* RFC5925 (7.4.1) specifies that the TCP-AO status
1243 		 * of a connection is determined on the initial SYN.
1244 		 * At this point the connection was TCP-AO enabled, so
1245 		 * it can't switch to being unsigned if peer's key
1246 		 * disappears on the listening socket.
1247 		 */
1248 		ret = -EKEYREJECTED;
1249 		goto free_and_exit;
1250 	}
1251 
1252 	if (!static_key_fast_inc_not_disabled(&tcp_ao_needed.key.key)) {
1253 		ret = -EUSERS;
1254 		goto free_and_exit;
1255 	}
1256 
1257 	key_head = rcu_dereference(hlist_first_rcu(&new_ao->head));
1258 	first_key = hlist_entry_safe(key_head, struct tcp_ao_key, node);
1259 
1260 	key = tcp_ao_established_key(req_to_sk(req), new_ao, tcp_rsk(req)->ao_keyid, -1);
1261 	if (key)
1262 		new_ao->current_key = key;
1263 	else
1264 		new_ao->current_key = first_key;
1265 
1266 	/* set rnext_key */
1267 	key = tcp_ao_established_key(req_to_sk(req), new_ao, -1, tcp_rsk(req)->ao_rcv_next);
1268 	if (key)
1269 		new_ao->rnext_key = key;
1270 	else
1271 		new_ao->rnext_key = first_key;
1272 
1273 	sk_gso_disable(newsk);
1274 	rcu_assign_pointer(tcp_sk(newsk)->ao_info, new_ao);
1275 
1276 	return 0;
1277 
1278 free_and_exit:
1279 	hlist_for_each_entry_safe(key, key_head, &new_ao->head, node) {
1280 		hlist_del(&key->node);
1281 		tcp_sigpool_release(key->tcp_sigpool_id);
1282 		atomic_sub(tcp_ao_sizeof_key(key), &newsk->sk_omem_alloc);
1283 		kfree_sensitive(key);
1284 	}
1285 free_ao:
1286 	kfree(new_ao);
1287 	return ret;
1288 }
1289 
tcp_ao_can_set_current_rnext(struct sock * sk)1290 static bool tcp_ao_can_set_current_rnext(struct sock *sk)
1291 {
1292 	/* There aren't current/rnext keys on TCP_LISTEN sockets */
1293 	if (sk->sk_state == TCP_LISTEN)
1294 		return false;
1295 	return true;
1296 }
1297 
tcp_ao_verify_ipv4(struct sock * sk,struct tcp_ao_add * cmd,union tcp_ao_addr ** addr)1298 static int tcp_ao_verify_ipv4(struct sock *sk, struct tcp_ao_add *cmd,
1299 			      union tcp_ao_addr **addr)
1300 {
1301 	struct sockaddr_in *sin = (struct sockaddr_in *)&cmd->addr;
1302 	struct inet_sock *inet = inet_sk(sk);
1303 
1304 	if (sin->sin_family != AF_INET)
1305 		return -EINVAL;
1306 
1307 	/* Currently matching is not performed on port (or port ranges) */
1308 	if (sin->sin_port != 0)
1309 		return -EINVAL;
1310 
1311 	/* Check prefix and trailing 0's in addr */
1312 	if (cmd->prefix != 0) {
1313 		__be32 mask;
1314 
1315 		if (ntohl(sin->sin_addr.s_addr) == INADDR_ANY)
1316 			return -EINVAL;
1317 		if (cmd->prefix > 32)
1318 			return -EINVAL;
1319 
1320 		mask = inet_make_mask(cmd->prefix);
1321 		if (sin->sin_addr.s_addr & ~mask)
1322 			return -EINVAL;
1323 
1324 		/* Check that MKT address is consistent with socket */
1325 		if (ntohl(inet->inet_daddr) != INADDR_ANY &&
1326 		    (inet->inet_daddr & mask) != sin->sin_addr.s_addr)
1327 			return -EINVAL;
1328 	} else {
1329 		if (ntohl(sin->sin_addr.s_addr) != INADDR_ANY)
1330 			return -EINVAL;
1331 	}
1332 
1333 	*addr = (union tcp_ao_addr *)&sin->sin_addr;
1334 	return 0;
1335 }
1336 
tcp_ao_parse_crypto(struct tcp_ao_add * cmd,struct tcp_ao_key * key)1337 static int tcp_ao_parse_crypto(struct tcp_ao_add *cmd, struct tcp_ao_key *key)
1338 {
1339 	unsigned int syn_tcp_option_space;
1340 	bool is_kdf_aes_128_cmac = false;
1341 	struct crypto_ahash *tfm;
1342 	struct tcp_sigpool hp;
1343 	void *tmp_key = NULL;
1344 	int err;
1345 
1346 	/* RFC5926, 3.1.1.2. KDF_AES_128_CMAC */
1347 	if (!strcmp("cmac(aes128)", cmd->alg_name)) {
1348 		strscpy(cmd->alg_name, "cmac(aes)", sizeof(cmd->alg_name));
1349 		is_kdf_aes_128_cmac = (cmd->keylen != 16);
1350 		tmp_key = kmalloc(cmd->keylen, GFP_KERNEL);
1351 		if (!tmp_key)
1352 			return -ENOMEM;
1353 	}
1354 
1355 	key->maclen = cmd->maclen ?: 12; /* 12 is the default in RFC5925 */
1356 
1357 	/* Check: maclen + tcp-ao header <= (MAX_TCP_OPTION_SPACE - mss
1358 	 *					- tstamp (including sackperm)
1359 	 *					- wscale),
1360 	 * see tcp_syn_options(), tcp_synack_options(), commit 33ad798c924b.
1361 	 *
1362 	 * In order to allow D-SACK with TCP-AO, the header size should be:
1363 	 * (MAX_TCP_OPTION_SPACE - TCPOLEN_TSTAMP_ALIGNED
1364 	 *			- TCPOLEN_SACK_BASE_ALIGNED
1365 	 *			- 2 * TCPOLEN_SACK_PERBLOCK) = 8 (maclen = 4),
1366 	 * see tcp_established_options().
1367 	 *
1368 	 * RFC5925, 2.2:
1369 	 * Typical MACs are 96-128 bits (12-16 bytes), but any length
1370 	 * that fits in the header of the segment being authenticated
1371 	 * is allowed.
1372 	 *
1373 	 * RFC5925, 7.6:
1374 	 * TCP-AO continues to consume 16 bytes in non-SYN segments,
1375 	 * leaving a total of 24 bytes for other options, of which
1376 	 * the timestamp consumes 10.  This leaves 14 bytes, of which 10
1377 	 * are used for a single SACK block. When two SACK blocks are used,
1378 	 * such as to handle D-SACK, a smaller TCP-AO MAC would be required
1379 	 * to make room for the additional SACK block (i.e., to leave 18
1380 	 * bytes for the D-SACK variant of the SACK option) [RFC2883].
1381 	 * Note that D-SACK is not supportable in TCP MD5 in the presence
1382 	 * of timestamps, because TCP MD5’s MAC length is fixed and too
1383 	 * large to leave sufficient option space.
1384 	 */
1385 	syn_tcp_option_space = MAX_TCP_OPTION_SPACE;
1386 	syn_tcp_option_space -= TCPOLEN_MSS_ALIGNED;
1387 	syn_tcp_option_space -= TCPOLEN_TSTAMP_ALIGNED;
1388 	syn_tcp_option_space -= TCPOLEN_WSCALE_ALIGNED;
1389 	if (tcp_ao_len_aligned(key) > syn_tcp_option_space) {
1390 		err = -EMSGSIZE;
1391 		goto err_kfree;
1392 	}
1393 
1394 	key->keylen = cmd->keylen;
1395 	memcpy(key->key, cmd->key, cmd->keylen);
1396 
1397 	err = tcp_sigpool_start(key->tcp_sigpool_id, &hp);
1398 	if (err)
1399 		goto err_kfree;
1400 
1401 	tfm = crypto_ahash_reqtfm(hp.req);
1402 	if (is_kdf_aes_128_cmac) {
1403 		void *scratch = hp.scratch;
1404 		struct scatterlist sg;
1405 
1406 		memcpy(tmp_key, cmd->key, cmd->keylen);
1407 		sg_init_one(&sg, tmp_key, cmd->keylen);
1408 
1409 		/* Using zero-key of 16 bytes as described in RFC5926 */
1410 		memset(scratch, 0, 16);
1411 		err = crypto_ahash_setkey(tfm, scratch, 16);
1412 		if (err)
1413 			goto err_pool_end;
1414 
1415 		err = crypto_ahash_init(hp.req);
1416 		if (err)
1417 			goto err_pool_end;
1418 
1419 		ahash_request_set_crypt(hp.req, &sg, key->key, cmd->keylen);
1420 		err = crypto_ahash_update(hp.req);
1421 		if (err)
1422 			goto err_pool_end;
1423 
1424 		err |= crypto_ahash_final(hp.req);
1425 		if (err)
1426 			goto err_pool_end;
1427 		key->keylen = 16;
1428 	}
1429 
1430 	err = crypto_ahash_setkey(tfm, key->key, key->keylen);
1431 	if (err)
1432 		goto err_pool_end;
1433 
1434 	tcp_sigpool_end(&hp);
1435 	kfree_sensitive(tmp_key);
1436 
1437 	if (tcp_ao_maclen(key) > key->digest_size)
1438 		return -EINVAL;
1439 
1440 	return 0;
1441 
1442 err_pool_end:
1443 	tcp_sigpool_end(&hp);
1444 err_kfree:
1445 	kfree_sensitive(tmp_key);
1446 	return err;
1447 }
1448 
1449 #if IS_ENABLED(CONFIG_IPV6)
tcp_ao_verify_ipv6(struct sock * sk,struct tcp_ao_add * cmd,union tcp_ao_addr ** paddr,unsigned short int * family)1450 static int tcp_ao_verify_ipv6(struct sock *sk, struct tcp_ao_add *cmd,
1451 			      union tcp_ao_addr **paddr,
1452 			      unsigned short int *family)
1453 {
1454 	struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd->addr;
1455 	struct in6_addr *addr = &sin6->sin6_addr;
1456 	u8 prefix = cmd->prefix;
1457 
1458 	if (sin6->sin6_family != AF_INET6)
1459 		return -EINVAL;
1460 
1461 	/* Currently matching is not performed on port (or port ranges) */
1462 	if (sin6->sin6_port != 0)
1463 		return -EINVAL;
1464 
1465 	/* Check prefix and trailing 0's in addr */
1466 	if (cmd->prefix != 0 && ipv6_addr_v4mapped(addr)) {
1467 		__be32 addr4 = addr->s6_addr32[3];
1468 		__be32 mask;
1469 
1470 		if (prefix > 32 || ntohl(addr4) == INADDR_ANY)
1471 			return -EINVAL;
1472 
1473 		mask = inet_make_mask(prefix);
1474 		if (addr4 & ~mask)
1475 			return -EINVAL;
1476 
1477 		/* Check that MKT address is consistent with socket */
1478 		if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
1479 			__be32 daddr4 = sk->sk_v6_daddr.s6_addr32[3];
1480 
1481 			if (!ipv6_addr_v4mapped(&sk->sk_v6_daddr))
1482 				return -EINVAL;
1483 			if ((daddr4 & mask) != addr4)
1484 				return -EINVAL;
1485 		}
1486 
1487 		*paddr = (union tcp_ao_addr *)&addr->s6_addr32[3];
1488 		*family = AF_INET;
1489 		return 0;
1490 	} else if (cmd->prefix != 0) {
1491 		struct in6_addr pfx;
1492 
1493 		if (ipv6_addr_any(addr) || prefix > 128)
1494 			return -EINVAL;
1495 
1496 		ipv6_addr_prefix(&pfx, addr, prefix);
1497 		if (ipv6_addr_cmp(&pfx, addr))
1498 			return -EINVAL;
1499 
1500 		/* Check that MKT address is consistent with socket */
1501 		if (!ipv6_addr_any(&sk->sk_v6_daddr) &&
1502 		    !ipv6_prefix_equal(&sk->sk_v6_daddr, addr, prefix))
1503 
1504 			return -EINVAL;
1505 	} else {
1506 		if (!ipv6_addr_any(addr))
1507 			return -EINVAL;
1508 	}
1509 
1510 	*paddr = (union tcp_ao_addr *)addr;
1511 	return 0;
1512 }
1513 #else
tcp_ao_verify_ipv6(struct sock * sk,struct tcp_ao_add * cmd,union tcp_ao_addr ** paddr,unsigned short int * family)1514 static int tcp_ao_verify_ipv6(struct sock *sk, struct tcp_ao_add *cmd,
1515 			      union tcp_ao_addr **paddr,
1516 			      unsigned short int *family)
1517 {
1518 	return -EOPNOTSUPP;
1519 }
1520 #endif
1521 
setsockopt_ao_info(struct sock * sk)1522 static struct tcp_ao_info *setsockopt_ao_info(struct sock *sk)
1523 {
1524 	if (sk_fullsock(sk)) {
1525 		return rcu_dereference_protected(tcp_sk(sk)->ao_info,
1526 						 lockdep_sock_is_held(sk));
1527 	} else if (sk->sk_state == TCP_TIME_WAIT) {
1528 		return rcu_dereference_protected(tcp_twsk(sk)->ao_info,
1529 						 lockdep_sock_is_held(sk));
1530 	}
1531 	return ERR_PTR(-ESOCKTNOSUPPORT);
1532 }
1533 
getsockopt_ao_info(struct sock * sk)1534 static struct tcp_ao_info *getsockopt_ao_info(struct sock *sk)
1535 {
1536 	if (sk_fullsock(sk))
1537 		return rcu_dereference(tcp_sk(sk)->ao_info);
1538 	else if (sk->sk_state == TCP_TIME_WAIT)
1539 		return rcu_dereference(tcp_twsk(sk)->ao_info);
1540 
1541 	return ERR_PTR(-ESOCKTNOSUPPORT);
1542 }
1543 
1544 #define TCP_AO_KEYF_ALL (TCP_AO_KEYF_IFINDEX | TCP_AO_KEYF_EXCLUDE_OPT)
1545 #define TCP_AO_GET_KEYF_VALID	(TCP_AO_KEYF_IFINDEX)
1546 
tcp_ao_key_alloc(struct sock * sk,struct tcp_ao_add * cmd)1547 static struct tcp_ao_key *tcp_ao_key_alloc(struct sock *sk,
1548 					   struct tcp_ao_add *cmd)
1549 {
1550 	const char *algo = cmd->alg_name;
1551 	unsigned int digest_size;
1552 	struct crypto_ahash *tfm;
1553 	struct tcp_ao_key *key;
1554 	struct tcp_sigpool hp;
1555 	int err, pool_id;
1556 	size_t size;
1557 
1558 	/* Force null-termination of alg_name */
1559 	cmd->alg_name[ARRAY_SIZE(cmd->alg_name) - 1] = '\0';
1560 
1561 	/* RFC5926, 3.1.1.2. KDF_AES_128_CMAC */
1562 	if (!strcmp("cmac(aes128)", algo))
1563 		algo = "cmac(aes)";
1564 
1565 	/* Full TCP header (th->doff << 2) should fit into scratch area,
1566 	 * see tcp_ao_hash_header().
1567 	 */
1568 	pool_id = tcp_sigpool_alloc_ahash(algo, 60);
1569 	if (pool_id < 0)
1570 		return ERR_PTR(pool_id);
1571 
1572 	err = tcp_sigpool_start(pool_id, &hp);
1573 	if (err)
1574 		goto err_free_pool;
1575 
1576 	tfm = crypto_ahash_reqtfm(hp.req);
1577 	digest_size = crypto_ahash_digestsize(tfm);
1578 	tcp_sigpool_end(&hp);
1579 
1580 	size = sizeof(struct tcp_ao_key) + (digest_size << 1);
1581 	key = sock_kmalloc(sk, size, GFP_KERNEL);
1582 	if (!key) {
1583 		err = -ENOMEM;
1584 		goto err_free_pool;
1585 	}
1586 
1587 	key->tcp_sigpool_id = pool_id;
1588 	key->digest_size = digest_size;
1589 	return key;
1590 
1591 err_free_pool:
1592 	tcp_sigpool_release(pool_id);
1593 	return ERR_PTR(err);
1594 }
1595 
tcp_ao_add_cmd(struct sock * sk,unsigned short int family,sockptr_t optval,int optlen)1596 static int tcp_ao_add_cmd(struct sock *sk, unsigned short int family,
1597 			  sockptr_t optval, int optlen)
1598 {
1599 	struct tcp_ao_info *ao_info;
1600 	union tcp_ao_addr *addr;
1601 	struct tcp_ao_key *key;
1602 	struct tcp_ao_add cmd;
1603 	int ret, l3index = 0;
1604 	bool first = false;
1605 
1606 	if (optlen < sizeof(cmd))
1607 		return -EINVAL;
1608 
1609 	ret = copy_struct_from_sockptr(&cmd, sizeof(cmd), optval, optlen);
1610 	if (ret)
1611 		return ret;
1612 
1613 	if (cmd.keylen > TCP_AO_MAXKEYLEN)
1614 		return -EINVAL;
1615 
1616 	if (cmd.reserved != 0 || cmd.reserved2 != 0)
1617 		return -EINVAL;
1618 
1619 	if (family == AF_INET)
1620 		ret = tcp_ao_verify_ipv4(sk, &cmd, &addr);
1621 	else
1622 		ret = tcp_ao_verify_ipv6(sk, &cmd, &addr, &family);
1623 	if (ret)
1624 		return ret;
1625 
1626 	if (cmd.keyflags & ~TCP_AO_KEYF_ALL)
1627 		return -EINVAL;
1628 
1629 	if (cmd.set_current || cmd.set_rnext) {
1630 		if (!tcp_ao_can_set_current_rnext(sk))
1631 			return -EINVAL;
1632 	}
1633 
1634 	if (cmd.ifindex && !(cmd.keyflags & TCP_AO_KEYF_IFINDEX))
1635 		return -EINVAL;
1636 
1637 	/* For cmd.tcp_ifindex = 0 the key will apply to the default VRF */
1638 	if (cmd.keyflags & TCP_AO_KEYF_IFINDEX && cmd.ifindex) {
1639 		int bound_dev_if = READ_ONCE(sk->sk_bound_dev_if);
1640 		struct net_device *dev;
1641 
1642 		rcu_read_lock();
1643 		dev = dev_get_by_index_rcu(sock_net(sk), cmd.ifindex);
1644 		if (dev && netif_is_l3_master(dev))
1645 			l3index = dev->ifindex;
1646 		rcu_read_unlock();
1647 
1648 		if (!dev || !l3index)
1649 			return -EINVAL;
1650 
1651 		if (!bound_dev_if || bound_dev_if != cmd.ifindex) {
1652 			/* tcp_ao_established_key() doesn't expect having
1653 			 * non peer-matching key on an established TCP-AO
1654 			 * connection.
1655 			 */
1656 			if (!((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)))
1657 				return -EINVAL;
1658 		}
1659 
1660 		/* It's still possible to bind after adding keys or even
1661 		 * re-bind to a different dev (with CAP_NET_RAW).
1662 		 * So, no reason to return error here, rather try to be
1663 		 * nice and warn the user.
1664 		 */
1665 		if (bound_dev_if && bound_dev_if != cmd.ifindex)
1666 			net_warn_ratelimited("AO key ifindex %d != sk bound ifindex %d\n",
1667 					     cmd.ifindex, bound_dev_if);
1668 	}
1669 
1670 	/* Don't allow keys for peers that have a matching TCP-MD5 key */
1671 	if (cmd.keyflags & TCP_AO_KEYF_IFINDEX) {
1672 		/* Non-_exact version of tcp_md5_do_lookup() will
1673 		 * as well match keys that aren't bound to a specific VRF
1674 		 * (that will make them match AO key with
1675 		 * sysctl_tcp_l3dev_accept = 1
1676 		 */
1677 		if (tcp_md5_do_lookup(sk, l3index, addr, family))
1678 			return -EKEYREJECTED;
1679 	} else {
1680 		if (tcp_md5_do_lookup_any_l3index(sk, addr, family))
1681 			return -EKEYREJECTED;
1682 	}
1683 
1684 	ao_info = setsockopt_ao_info(sk);
1685 	if (IS_ERR(ao_info))
1686 		return PTR_ERR(ao_info);
1687 
1688 	if (!ao_info) {
1689 		ao_info = tcp_ao_alloc_info(GFP_KERNEL);
1690 		if (!ao_info)
1691 			return -ENOMEM;
1692 		first = true;
1693 	} else {
1694 		/* Check that neither RecvID nor SendID match any
1695 		 * existing key for the peer, RFC5925 3.1:
1696 		 * > The IDs of MKTs MUST NOT overlap where their
1697 		 * > TCP connection identifiers overlap.
1698 		 */
1699 		if (__tcp_ao_do_lookup(sk, l3index, addr, family, cmd.prefix, -1, cmd.rcvid))
1700 			return -EEXIST;
1701 		if (__tcp_ao_do_lookup(sk, l3index, addr, family,
1702 				       cmd.prefix, cmd.sndid, -1))
1703 			return -EEXIST;
1704 	}
1705 
1706 	key = tcp_ao_key_alloc(sk, &cmd);
1707 	if (IS_ERR(key)) {
1708 		ret = PTR_ERR(key);
1709 		goto err_free_ao;
1710 	}
1711 
1712 	INIT_HLIST_NODE(&key->node);
1713 	memcpy(&key->addr, addr, (family == AF_INET) ? sizeof(struct in_addr) :
1714 						       sizeof(struct in6_addr));
1715 	key->prefixlen	= cmd.prefix;
1716 	key->family	= family;
1717 	key->keyflags	= cmd.keyflags;
1718 	key->sndid	= cmd.sndid;
1719 	key->rcvid	= cmd.rcvid;
1720 	key->l3index	= l3index;
1721 	atomic64_set(&key->pkt_good, 0);
1722 	atomic64_set(&key->pkt_bad, 0);
1723 
1724 	ret = tcp_ao_parse_crypto(&cmd, key);
1725 	if (ret < 0)
1726 		goto err_free_sock;
1727 
1728 	if (!((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))) {
1729 		tcp_ao_cache_traffic_keys(sk, ao_info, key);
1730 		if (first) {
1731 			ao_info->current_key = key;
1732 			ao_info->rnext_key = key;
1733 		}
1734 	}
1735 
1736 	tcp_ao_link_mkt(ao_info, key);
1737 	if (first) {
1738 		if (!static_branch_inc(&tcp_ao_needed.key)) {
1739 			ret = -EUSERS;
1740 			goto err_free_sock;
1741 		}
1742 		sk_gso_disable(sk);
1743 		rcu_assign_pointer(tcp_sk(sk)->ao_info, ao_info);
1744 	}
1745 
1746 	if (cmd.set_current)
1747 		WRITE_ONCE(ao_info->current_key, key);
1748 	if (cmd.set_rnext)
1749 		WRITE_ONCE(ao_info->rnext_key, key);
1750 	return 0;
1751 
1752 err_free_sock:
1753 	atomic_sub(tcp_ao_sizeof_key(key), &sk->sk_omem_alloc);
1754 	tcp_sigpool_release(key->tcp_sigpool_id);
1755 	kfree_sensitive(key);
1756 err_free_ao:
1757 	if (first)
1758 		kfree(ao_info);
1759 	return ret;
1760 }
1761 
tcp_ao_delete_key(struct sock * sk,struct tcp_ao_info * ao_info,bool del_async,struct tcp_ao_key * key,struct tcp_ao_key * new_current,struct tcp_ao_key * new_rnext)1762 static int tcp_ao_delete_key(struct sock *sk, struct tcp_ao_info *ao_info,
1763 			     bool del_async, struct tcp_ao_key *key,
1764 			     struct tcp_ao_key *new_current,
1765 			     struct tcp_ao_key *new_rnext)
1766 {
1767 	int err;
1768 
1769 	hlist_del_rcu(&key->node);
1770 
1771 	/* Support for async delete on listening sockets: as they don't
1772 	 * need current_key/rnext_key maintaining, we don't need to check
1773 	 * them and we can just free all resources in RCU fashion.
1774 	 */
1775 	if (del_async) {
1776 		atomic_sub(tcp_ao_sizeof_key(key), &sk->sk_omem_alloc);
1777 		call_rcu(&key->rcu, tcp_ao_key_free_rcu);
1778 		return 0;
1779 	}
1780 
1781 	/* At this moment another CPU could have looked this key up
1782 	 * while it was unlinked from the list. Wait for RCU grace period,
1783 	 * after which the key is off-list and can't be looked up again;
1784 	 * the rx path [just before RCU came] might have used it and set it
1785 	 * as current_key (very unlikely).
1786 	 * Free the key with next RCU grace period (in case it was
1787 	 * current_key before tcp_ao_current_rnext() might have
1788 	 * changed it in forced-delete).
1789 	 */
1790 	synchronize_rcu();
1791 	if (new_current)
1792 		WRITE_ONCE(ao_info->current_key, new_current);
1793 	if (new_rnext)
1794 		WRITE_ONCE(ao_info->rnext_key, new_rnext);
1795 
1796 	if (unlikely(READ_ONCE(ao_info->current_key) == key ||
1797 		     READ_ONCE(ao_info->rnext_key) == key)) {
1798 		err = -EBUSY;
1799 		goto add_key;
1800 	}
1801 
1802 	atomic_sub(tcp_ao_sizeof_key(key), &sk->sk_omem_alloc);
1803 	call_rcu(&key->rcu, tcp_ao_key_free_rcu);
1804 
1805 	return 0;
1806 add_key:
1807 	hlist_add_head_rcu(&key->node, &ao_info->head);
1808 	return err;
1809 }
1810 
1811 #define TCP_AO_DEL_KEYF_ALL (TCP_AO_KEYF_IFINDEX)
tcp_ao_del_cmd(struct sock * sk,unsigned short int family,sockptr_t optval,int optlen)1812 static int tcp_ao_del_cmd(struct sock *sk, unsigned short int family,
1813 			  sockptr_t optval, int optlen)
1814 {
1815 	struct tcp_ao_key *key, *new_current = NULL, *new_rnext = NULL;
1816 	int err, addr_len, l3index = 0;
1817 	struct tcp_ao_info *ao_info;
1818 	union tcp_ao_addr *addr;
1819 	struct tcp_ao_del cmd;
1820 	__u8 prefix;
1821 	u16 port;
1822 
1823 	if (optlen < sizeof(cmd))
1824 		return -EINVAL;
1825 
1826 	err = copy_struct_from_sockptr(&cmd, sizeof(cmd), optval, optlen);
1827 	if (err)
1828 		return err;
1829 
1830 	if (cmd.reserved != 0 || cmd.reserved2 != 0)
1831 		return -EINVAL;
1832 
1833 	if (cmd.set_current || cmd.set_rnext) {
1834 		if (!tcp_ao_can_set_current_rnext(sk))
1835 			return -EINVAL;
1836 	}
1837 
1838 	if (cmd.keyflags & ~TCP_AO_DEL_KEYF_ALL)
1839 		return -EINVAL;
1840 
1841 	/* No sanity check for TCP_AO_KEYF_IFINDEX as if a VRF
1842 	 * was destroyed, there still should be a way to delete keys,
1843 	 * that were bound to that l3intf. So, fail late at lookup stage
1844 	 * if there is no key for that ifindex.
1845 	 */
1846 	if (cmd.ifindex && !(cmd.keyflags & TCP_AO_KEYF_IFINDEX))
1847 		return -EINVAL;
1848 
1849 	ao_info = setsockopt_ao_info(sk);
1850 	if (IS_ERR(ao_info))
1851 		return PTR_ERR(ao_info);
1852 	if (!ao_info)
1853 		return -ENOENT;
1854 
1855 	/* For sockets in TCP_CLOSED it's possible set keys that aren't
1856 	 * matching the future peer (address/VRF/etc),
1857 	 * tcp_ao_connect_init() will choose a correct matching MKT
1858 	 * if there's any.
1859 	 */
1860 	if (cmd.set_current) {
1861 		new_current = tcp_ao_established_key(sk, ao_info, cmd.current_key, -1);
1862 		if (!new_current)
1863 			return -ENOENT;
1864 	}
1865 	if (cmd.set_rnext) {
1866 		new_rnext = tcp_ao_established_key(sk, ao_info, -1, cmd.rnext);
1867 		if (!new_rnext)
1868 			return -ENOENT;
1869 	}
1870 	if (cmd.del_async && sk->sk_state != TCP_LISTEN)
1871 		return -EINVAL;
1872 
1873 	if (family == AF_INET) {
1874 		struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.addr;
1875 
1876 		addr = (union tcp_ao_addr *)&sin->sin_addr;
1877 		addr_len = sizeof(struct in_addr);
1878 		port = ntohs(sin->sin_port);
1879 	} else {
1880 		struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.addr;
1881 		struct in6_addr *addr6 = &sin6->sin6_addr;
1882 
1883 		if (ipv6_addr_v4mapped(addr6)) {
1884 			addr = (union tcp_ao_addr *)&addr6->s6_addr32[3];
1885 			addr_len = sizeof(struct in_addr);
1886 			family = AF_INET;
1887 		} else {
1888 			addr = (union tcp_ao_addr *)addr6;
1889 			addr_len = sizeof(struct in6_addr);
1890 		}
1891 		port = ntohs(sin6->sin6_port);
1892 	}
1893 	prefix = cmd.prefix;
1894 
1895 	/* Currently matching is not performed on port (or port ranges) */
1896 	if (port != 0)
1897 		return -EINVAL;
1898 
1899 	/* We could choose random present key here for current/rnext
1900 	 * but that's less predictable. Let's be strict and don't
1901 	 * allow removing a key that's in use. RFC5925 doesn't
1902 	 * specify how-to coordinate key removal, but says:
1903 	 * "It is presumed that an MKT affecting a particular
1904 	 * connection cannot be destroyed during an active connection"
1905 	 */
1906 	hlist_for_each_entry_rcu(key, &ao_info->head, node,
1907 				 lockdep_sock_is_held(sk)) {
1908 		if (cmd.sndid != key->sndid ||
1909 		    cmd.rcvid != key->rcvid)
1910 			continue;
1911 
1912 		if (family != key->family ||
1913 		    prefix != key->prefixlen ||
1914 		    memcmp(addr, &key->addr, addr_len))
1915 			continue;
1916 
1917 		if ((cmd.keyflags & TCP_AO_KEYF_IFINDEX) !=
1918 		    (key->keyflags & TCP_AO_KEYF_IFINDEX))
1919 			continue;
1920 
1921 		if (key->l3index != l3index)
1922 			continue;
1923 
1924 		if (key == new_current || key == new_rnext)
1925 			continue;
1926 
1927 		return tcp_ao_delete_key(sk, ao_info, cmd.del_async, key,
1928 					 new_current, new_rnext);
1929 	}
1930 	return -ENOENT;
1931 }
1932 
1933 /* cmd.ao_required makes a socket TCP-AO only.
1934  * Don't allow any md5 keys for any l3intf on the socket together with it.
1935  * Restricting it early in setsockopt() removes a check for
1936  * ao_info->ao_required on inbound tcp segment fast-path.
1937  */
tcp_ao_required_verify(struct sock * sk)1938 static int tcp_ao_required_verify(struct sock *sk)
1939 {
1940 #ifdef CONFIG_TCP_MD5SIG
1941 	const struct tcp_md5sig_info *md5sig;
1942 
1943 	if (!static_branch_unlikely(&tcp_md5_needed.key))
1944 		return 0;
1945 
1946 	md5sig = rcu_dereference_check(tcp_sk(sk)->md5sig_info,
1947 				       lockdep_sock_is_held(sk));
1948 	if (!md5sig)
1949 		return 0;
1950 
1951 	if (rcu_dereference_check(hlist_first_rcu(&md5sig->head),
1952 				  lockdep_sock_is_held(sk)))
1953 		return 1;
1954 #endif
1955 	return 0;
1956 }
1957 
tcp_ao_info_cmd(struct sock * sk,unsigned short int family,sockptr_t optval,int optlen)1958 static int tcp_ao_info_cmd(struct sock *sk, unsigned short int family,
1959 			   sockptr_t optval, int optlen)
1960 {
1961 	struct tcp_ao_key *new_current = NULL, *new_rnext = NULL;
1962 	struct tcp_ao_info *ao_info;
1963 	struct tcp_ao_info_opt cmd;
1964 	bool first = false;
1965 	int err;
1966 
1967 	if (optlen < sizeof(cmd))
1968 		return -EINVAL;
1969 
1970 	err = copy_struct_from_sockptr(&cmd, sizeof(cmd), optval, optlen);
1971 	if (err)
1972 		return err;
1973 
1974 	if (cmd.set_current || cmd.set_rnext) {
1975 		if (!tcp_ao_can_set_current_rnext(sk))
1976 			return -EINVAL;
1977 	}
1978 
1979 	if (cmd.reserved != 0 || cmd.reserved2 != 0)
1980 		return -EINVAL;
1981 
1982 	ao_info = setsockopt_ao_info(sk);
1983 	if (IS_ERR(ao_info))
1984 		return PTR_ERR(ao_info);
1985 	if (!ao_info) {
1986 		if (!((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)))
1987 			return -EINVAL;
1988 		ao_info = tcp_ao_alloc_info(GFP_KERNEL);
1989 		if (!ao_info)
1990 			return -ENOMEM;
1991 		first = true;
1992 	}
1993 
1994 	if (cmd.ao_required && tcp_ao_required_verify(sk)) {
1995 		err = -EKEYREJECTED;
1996 		goto out;
1997 	}
1998 
1999 	/* For sockets in TCP_CLOSED it's possible set keys that aren't
2000 	 * matching the future peer (address/port/VRF/etc),
2001 	 * tcp_ao_connect_init() will choose a correct matching MKT
2002 	 * if there's any.
2003 	 */
2004 	if (cmd.set_current) {
2005 		new_current = tcp_ao_established_key(sk, ao_info, cmd.current_key, -1);
2006 		if (!new_current) {
2007 			err = -ENOENT;
2008 			goto out;
2009 		}
2010 	}
2011 	if (cmd.set_rnext) {
2012 		new_rnext = tcp_ao_established_key(sk, ao_info, -1, cmd.rnext);
2013 		if (!new_rnext) {
2014 			err = -ENOENT;
2015 			goto out;
2016 		}
2017 	}
2018 	if (cmd.set_counters) {
2019 		atomic64_set(&ao_info->counters.pkt_good, cmd.pkt_good);
2020 		atomic64_set(&ao_info->counters.pkt_bad, cmd.pkt_bad);
2021 		atomic64_set(&ao_info->counters.key_not_found, cmd.pkt_key_not_found);
2022 		atomic64_set(&ao_info->counters.ao_required, cmd.pkt_ao_required);
2023 		atomic64_set(&ao_info->counters.dropped_icmp, cmd.pkt_dropped_icmp);
2024 	}
2025 
2026 	ao_info->ao_required = cmd.ao_required;
2027 	ao_info->accept_icmps = cmd.accept_icmps;
2028 	if (new_current)
2029 		WRITE_ONCE(ao_info->current_key, new_current);
2030 	if (new_rnext)
2031 		WRITE_ONCE(ao_info->rnext_key, new_rnext);
2032 	if (first) {
2033 		if (!static_branch_inc(&tcp_ao_needed.key)) {
2034 			err = -EUSERS;
2035 			goto out;
2036 		}
2037 		sk_gso_disable(sk);
2038 		rcu_assign_pointer(tcp_sk(sk)->ao_info, ao_info);
2039 	}
2040 	return 0;
2041 out:
2042 	if (first)
2043 		kfree(ao_info);
2044 	return err;
2045 }
2046 
tcp_parse_ao(struct sock * sk,int cmd,unsigned short int family,sockptr_t optval,int optlen)2047 int tcp_parse_ao(struct sock *sk, int cmd, unsigned short int family,
2048 		 sockptr_t optval, int optlen)
2049 {
2050 	if (WARN_ON_ONCE(family != AF_INET && family != AF_INET6))
2051 		return -EAFNOSUPPORT;
2052 
2053 	switch (cmd) {
2054 	case TCP_AO_ADD_KEY:
2055 		return tcp_ao_add_cmd(sk, family, optval, optlen);
2056 	case TCP_AO_DEL_KEY:
2057 		return tcp_ao_del_cmd(sk, family, optval, optlen);
2058 	case TCP_AO_INFO:
2059 		return tcp_ao_info_cmd(sk, family, optval, optlen);
2060 	default:
2061 		WARN_ON_ONCE(1);
2062 		return -EINVAL;
2063 	}
2064 }
2065 
tcp_v4_parse_ao(struct sock * sk,int cmd,sockptr_t optval,int optlen)2066 int tcp_v4_parse_ao(struct sock *sk, int cmd, sockptr_t optval, int optlen)
2067 {
2068 	return tcp_parse_ao(sk, cmd, AF_INET, optval, optlen);
2069 }
2070 
2071 /* tcp_ao_copy_mkts_to_user(ao_info, optval, optlen)
2072  *
2073  * @ao_info:	struct tcp_ao_info on the socket that
2074  *		socket getsockopt(TCP_AO_GET_KEYS) is executed on
2075  * @optval:	pointer to array of tcp_ao_getsockopt structures in user space.
2076  *		Must be != NULL.
2077  * @optlen:	pointer to size of tcp_ao_getsockopt structure.
2078  *		Must be != NULL.
2079  *
2080  * Return value: 0 on success, a negative error number otherwise.
2081  *
2082  * optval points to an array of tcp_ao_getsockopt structures in user space.
2083  * optval[0] is used as both input and output to getsockopt. It determines
2084  * which keys are returned by the kernel.
2085  * optval[0].nkeys is the size of the array in user space. On return it contains
2086  * the number of keys matching the search criteria.
2087  * If tcp_ao_getsockopt::get_all is set, then all keys in the socket are
2088  * returned, otherwise only keys matching <addr, prefix, sndid, rcvid>
2089  * in optval[0] are returned.
2090  * optlen is also used as both input and output. The user provides the size
2091  * of struct tcp_ao_getsockopt in user space, and the kernel returns the size
2092  * of the structure in kernel space.
2093  * The size of struct tcp_ao_getsockopt may differ between user and kernel.
2094  * There are three cases to consider:
2095  *  * If usize == ksize, then keys are copied verbatim.
2096  *  * If usize < ksize, then the userspace has passed an old struct to a
2097  *    newer kernel. The rest of the trailing bytes in optval[0]
2098  *    (ksize - usize) are interpreted as 0 by the kernel.
2099  *  * If usize > ksize, then the userspace has passed a new struct to an
2100  *    older kernel. The trailing bytes unknown to the kernel (usize - ksize)
2101  *    are checked to ensure they are zeroed, otherwise -E2BIG is returned.
2102  * On return the kernel fills in min(usize, ksize) in each entry of the array.
2103  * The layout of the fields in the user and kernel structures is expected to
2104  * be the same (including in the 32bit vs 64bit case).
2105  */
tcp_ao_copy_mkts_to_user(const struct sock * sk,struct tcp_ao_info * ao_info,sockptr_t optval,sockptr_t optlen)2106 static int tcp_ao_copy_mkts_to_user(const struct sock *sk,
2107 				    struct tcp_ao_info *ao_info,
2108 				    sockptr_t optval, sockptr_t optlen)
2109 {
2110 	struct tcp_ao_getsockopt opt_in, opt_out;
2111 	struct tcp_ao_key *key, *current_key;
2112 	bool do_address_matching = true;
2113 	union tcp_ao_addr *addr = NULL;
2114 	int err, l3index, user_len;
2115 	unsigned int max_keys;	/* maximum number of keys to copy to user */
2116 	size_t out_offset = 0;
2117 	size_t bytes_to_write;	/* number of bytes to write to user level */
2118 	u32 matched_keys;	/* keys from ao_info matched so far */
2119 	int optlen_out;
2120 	__be16 port = 0;
2121 
2122 	if (copy_from_sockptr(&user_len, optlen, sizeof(int)))
2123 		return -EFAULT;
2124 
2125 	if (user_len <= 0)
2126 		return -EINVAL;
2127 
2128 	memset(&opt_in, 0, sizeof(struct tcp_ao_getsockopt));
2129 	err = copy_struct_from_sockptr(&opt_in, sizeof(opt_in),
2130 				       optval, user_len);
2131 	if (err < 0)
2132 		return err;
2133 
2134 	if (opt_in.pkt_good || opt_in.pkt_bad)
2135 		return -EINVAL;
2136 	if (opt_in.keyflags & ~TCP_AO_GET_KEYF_VALID)
2137 		return -EINVAL;
2138 	if (opt_in.ifindex && !(opt_in.keyflags & TCP_AO_KEYF_IFINDEX))
2139 		return -EINVAL;
2140 
2141 	if (opt_in.reserved != 0)
2142 		return -EINVAL;
2143 
2144 	max_keys = opt_in.nkeys;
2145 	l3index = (opt_in.keyflags & TCP_AO_KEYF_IFINDEX) ? opt_in.ifindex : -1;
2146 
2147 	if (opt_in.get_all || opt_in.is_current || opt_in.is_rnext) {
2148 		if (opt_in.get_all && (opt_in.is_current || opt_in.is_rnext))
2149 			return -EINVAL;
2150 		do_address_matching = false;
2151 	}
2152 
2153 	switch (opt_in.addr.ss_family) {
2154 	case AF_INET: {
2155 		struct sockaddr_in *sin;
2156 		__be32 mask;
2157 
2158 		sin = (struct sockaddr_in *)&opt_in.addr;
2159 		port = sin->sin_port;
2160 		addr = (union tcp_ao_addr *)&sin->sin_addr;
2161 
2162 		if (opt_in.prefix > 32)
2163 			return -EINVAL;
2164 
2165 		if (ntohl(sin->sin_addr.s_addr) == INADDR_ANY &&
2166 		    opt_in.prefix != 0)
2167 			return -EINVAL;
2168 
2169 		mask = inet_make_mask(opt_in.prefix);
2170 		if (sin->sin_addr.s_addr & ~mask)
2171 			return -EINVAL;
2172 
2173 		break;
2174 	}
2175 	case AF_INET6: {
2176 		struct sockaddr_in6 *sin6;
2177 		struct in6_addr *addr6;
2178 
2179 		sin6 = (struct sockaddr_in6 *)&opt_in.addr;
2180 		addr = (union tcp_ao_addr *)&sin6->sin6_addr;
2181 		addr6 = &sin6->sin6_addr;
2182 		port = sin6->sin6_port;
2183 
2184 		/* We don't have to change family and @addr here if
2185 		 * ipv6_addr_v4mapped() like in key adding:
2186 		 * tcp_ao_key_cmp() does it. Do the sanity checks though.
2187 		 */
2188 		if (opt_in.prefix != 0) {
2189 			if (ipv6_addr_v4mapped(addr6)) {
2190 				__be32 mask, addr4 = addr6->s6_addr32[3];
2191 
2192 				if (opt_in.prefix > 32 ||
2193 				    ntohl(addr4) == INADDR_ANY)
2194 					return -EINVAL;
2195 				mask = inet_make_mask(opt_in.prefix);
2196 				if (addr4 & ~mask)
2197 					return -EINVAL;
2198 			} else {
2199 				struct in6_addr pfx;
2200 
2201 				if (ipv6_addr_any(addr6) ||
2202 				    opt_in.prefix > 128)
2203 					return -EINVAL;
2204 
2205 				ipv6_addr_prefix(&pfx, addr6, opt_in.prefix);
2206 				if (ipv6_addr_cmp(&pfx, addr6))
2207 					return -EINVAL;
2208 			}
2209 		} else if (!ipv6_addr_any(addr6)) {
2210 			return -EINVAL;
2211 		}
2212 		break;
2213 	}
2214 	case 0:
2215 		if (!do_address_matching)
2216 			break;
2217 		fallthrough;
2218 	default:
2219 		return -EAFNOSUPPORT;
2220 	}
2221 
2222 	if (!do_address_matching) {
2223 		/* We could just ignore those, but let's do stricter checks */
2224 		if (addr || port)
2225 			return -EINVAL;
2226 		if (opt_in.prefix || opt_in.sndid || opt_in.rcvid)
2227 			return -EINVAL;
2228 	}
2229 
2230 	bytes_to_write = min_t(int, user_len, sizeof(struct tcp_ao_getsockopt));
2231 	matched_keys = 0;
2232 	/* May change in RX, while we're dumping, pre-fetch it */
2233 	current_key = READ_ONCE(ao_info->current_key);
2234 
2235 	hlist_for_each_entry_rcu(key, &ao_info->head, node,
2236 				 lockdep_sock_is_held(sk)) {
2237 		if (opt_in.get_all)
2238 			goto match;
2239 
2240 		if (opt_in.is_current || opt_in.is_rnext) {
2241 			if (opt_in.is_current && key == current_key)
2242 				goto match;
2243 			if (opt_in.is_rnext && key == ao_info->rnext_key)
2244 				goto match;
2245 			continue;
2246 		}
2247 
2248 		if (tcp_ao_key_cmp(key, l3index, addr, opt_in.prefix,
2249 				   opt_in.addr.ss_family,
2250 				   opt_in.sndid, opt_in.rcvid) != 0)
2251 			continue;
2252 match:
2253 		matched_keys++;
2254 		if (matched_keys > max_keys)
2255 			continue;
2256 
2257 		memset(&opt_out, 0, sizeof(struct tcp_ao_getsockopt));
2258 
2259 		if (key->family == AF_INET) {
2260 			struct sockaddr_in *sin_out = (struct sockaddr_in *)&opt_out.addr;
2261 
2262 			sin_out->sin_family = key->family;
2263 			sin_out->sin_port = 0;
2264 			memcpy(&sin_out->sin_addr, &key->addr, sizeof(struct in_addr));
2265 		} else {
2266 			struct sockaddr_in6 *sin6_out = (struct sockaddr_in6 *)&opt_out.addr;
2267 
2268 			sin6_out->sin6_family = key->family;
2269 			sin6_out->sin6_port = 0;
2270 			memcpy(&sin6_out->sin6_addr, &key->addr, sizeof(struct in6_addr));
2271 		}
2272 		opt_out.sndid = key->sndid;
2273 		opt_out.rcvid = key->rcvid;
2274 		opt_out.prefix = key->prefixlen;
2275 		opt_out.keyflags = key->keyflags;
2276 		opt_out.is_current = (key == current_key);
2277 		opt_out.is_rnext = (key == ao_info->rnext_key);
2278 		opt_out.nkeys = 0;
2279 		opt_out.maclen = key->maclen;
2280 		opt_out.keylen = key->keylen;
2281 		opt_out.ifindex = key->l3index;
2282 		opt_out.pkt_good = atomic64_read(&key->pkt_good);
2283 		opt_out.pkt_bad = atomic64_read(&key->pkt_bad);
2284 		memcpy(&opt_out.key, key->key, key->keylen);
2285 		tcp_sigpool_algo(key->tcp_sigpool_id, opt_out.alg_name, 64);
2286 
2287 		/* Copy key to user */
2288 		if (copy_to_sockptr_offset(optval, out_offset,
2289 					   &opt_out, bytes_to_write))
2290 			return -EFAULT;
2291 		out_offset += user_len;
2292 	}
2293 
2294 	optlen_out = (int)sizeof(struct tcp_ao_getsockopt);
2295 	if (copy_to_sockptr(optlen, &optlen_out, sizeof(int)))
2296 		return -EFAULT;
2297 
2298 	out_offset = offsetof(struct tcp_ao_getsockopt, nkeys);
2299 	if (copy_to_sockptr_offset(optval, out_offset,
2300 				   &matched_keys, sizeof(u32)))
2301 		return -EFAULT;
2302 
2303 	return 0;
2304 }
2305 
tcp_ao_get_mkts(struct sock * sk,sockptr_t optval,sockptr_t optlen)2306 int tcp_ao_get_mkts(struct sock *sk, sockptr_t optval, sockptr_t optlen)
2307 {
2308 	struct tcp_ao_info *ao_info;
2309 
2310 	ao_info = setsockopt_ao_info(sk);
2311 	if (IS_ERR(ao_info))
2312 		return PTR_ERR(ao_info);
2313 	if (!ao_info)
2314 		return -ENOENT;
2315 
2316 	return tcp_ao_copy_mkts_to_user(sk, ao_info, optval, optlen);
2317 }
2318 
tcp_ao_get_sock_info(struct sock * sk,sockptr_t optval,sockptr_t optlen)2319 int tcp_ao_get_sock_info(struct sock *sk, sockptr_t optval, sockptr_t optlen)
2320 {
2321 	struct tcp_ao_info_opt out, in = {};
2322 	struct tcp_ao_key *current_key;
2323 	struct tcp_ao_info *ao;
2324 	int err, len;
2325 
2326 	if (copy_from_sockptr(&len, optlen, sizeof(int)))
2327 		return -EFAULT;
2328 
2329 	if (len <= 0)
2330 		return -EINVAL;
2331 
2332 	/* Copying this "in" only to check ::reserved, ::reserved2,
2333 	 * that may be needed to extend (struct tcp_ao_info_opt) and
2334 	 * what getsockopt() provides in future.
2335 	 */
2336 	err = copy_struct_from_sockptr(&in, sizeof(in), optval, len);
2337 	if (err)
2338 		return err;
2339 
2340 	if (in.reserved != 0 || in.reserved2 != 0)
2341 		return -EINVAL;
2342 
2343 	ao = setsockopt_ao_info(sk);
2344 	if (IS_ERR(ao))
2345 		return PTR_ERR(ao);
2346 	if (!ao)
2347 		return -ENOENT;
2348 
2349 	memset(&out, 0, sizeof(out));
2350 	out.ao_required		= ao->ao_required;
2351 	out.accept_icmps	= ao->accept_icmps;
2352 	out.pkt_good		= atomic64_read(&ao->counters.pkt_good);
2353 	out.pkt_bad		= atomic64_read(&ao->counters.pkt_bad);
2354 	out.pkt_key_not_found	= atomic64_read(&ao->counters.key_not_found);
2355 	out.pkt_ao_required	= atomic64_read(&ao->counters.ao_required);
2356 	out.pkt_dropped_icmp	= atomic64_read(&ao->counters.dropped_icmp);
2357 
2358 	current_key = READ_ONCE(ao->current_key);
2359 	if (current_key) {
2360 		out.set_current = 1;
2361 		out.current_key = current_key->sndid;
2362 	}
2363 	if (ao->rnext_key) {
2364 		out.set_rnext = 1;
2365 		out.rnext = ao->rnext_key->rcvid;
2366 	}
2367 
2368 	if (copy_to_sockptr(optval, &out, min_t(int, len, sizeof(out))))
2369 		return -EFAULT;
2370 
2371 	return 0;
2372 }
2373 
tcp_ao_set_repair(struct sock * sk,sockptr_t optval,unsigned int optlen)2374 int tcp_ao_set_repair(struct sock *sk, sockptr_t optval, unsigned int optlen)
2375 {
2376 	struct tcp_sock *tp = tcp_sk(sk);
2377 	struct tcp_ao_repair cmd;
2378 	struct tcp_ao_key *key;
2379 	struct tcp_ao_info *ao;
2380 	int err;
2381 
2382 	if (optlen < sizeof(cmd))
2383 		return -EINVAL;
2384 
2385 	err = copy_struct_from_sockptr(&cmd, sizeof(cmd), optval, optlen);
2386 	if (err)
2387 		return err;
2388 
2389 	if (!tp->repair)
2390 		return -EPERM;
2391 
2392 	ao = setsockopt_ao_info(sk);
2393 	if (IS_ERR(ao))
2394 		return PTR_ERR(ao);
2395 	if (!ao)
2396 		return -ENOENT;
2397 
2398 	WRITE_ONCE(ao->lisn, cmd.snt_isn);
2399 	WRITE_ONCE(ao->risn, cmd.rcv_isn);
2400 	WRITE_ONCE(ao->snd_sne, cmd.snd_sne);
2401 	WRITE_ONCE(ao->rcv_sne, cmd.rcv_sne);
2402 
2403 	hlist_for_each_entry_rcu(key, &ao->head, node, lockdep_sock_is_held(sk))
2404 		tcp_ao_cache_traffic_keys(sk, ao, key);
2405 
2406 	return 0;
2407 }
2408 
tcp_ao_get_repair(struct sock * sk,sockptr_t optval,sockptr_t optlen)2409 int tcp_ao_get_repair(struct sock *sk, sockptr_t optval, sockptr_t optlen)
2410 {
2411 	struct tcp_sock *tp = tcp_sk(sk);
2412 	struct tcp_ao_repair opt;
2413 	struct tcp_ao_info *ao;
2414 	int len;
2415 
2416 	if (copy_from_sockptr(&len, optlen, sizeof(int)))
2417 		return -EFAULT;
2418 
2419 	if (len <= 0)
2420 		return -EINVAL;
2421 
2422 	if (!tp->repair)
2423 		return -EPERM;
2424 
2425 	rcu_read_lock();
2426 	ao = getsockopt_ao_info(sk);
2427 	if (IS_ERR_OR_NULL(ao)) {
2428 		rcu_read_unlock();
2429 		return ao ? PTR_ERR(ao) : -ENOENT;
2430 	}
2431 
2432 	opt.snt_isn	= ao->lisn;
2433 	opt.rcv_isn	= ao->risn;
2434 	opt.snd_sne	= READ_ONCE(ao->snd_sne);
2435 	opt.rcv_sne	= READ_ONCE(ao->rcv_sne);
2436 	rcu_read_unlock();
2437 
2438 	if (copy_to_sockptr(optval, &opt, min_t(int, len, sizeof(opt))))
2439 		return -EFAULT;
2440 	return 0;
2441 }
2442