xref: /linux/net/ipv4/esp4.c (revision ef2233850edc4cc0d5fc6136fcdb004a1ddfa7db)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #define pr_fmt(fmt) "IPsec: " fmt
3 
4 #include <crypto/aead.h>
5 #include <crypto/authenc.h>
6 #include <linux/err.h>
7 #include <linux/module.h>
8 #include <net/ip.h>
9 #include <net/xfrm.h>
10 #include <net/esp.h>
11 #include <linux/scatterlist.h>
12 #include <linux/kernel.h>
13 #include <linux/pfkeyv2.h>
14 #include <linux/rtnetlink.h>
15 #include <linux/slab.h>
16 #include <linux/spinlock.h>
17 #include <linux/in6.h>
18 #include <net/icmp.h>
19 #include <net/protocol.h>
20 #include <net/udp.h>
21 #include <net/tcp.h>
22 #include <net/espintcp.h>
23 #include <linux/skbuff_ref.h>
24 
25 #include <linux/highmem.h>
26 
27 struct esp_skb_cb {
28 	struct xfrm_skb_cb xfrm;
29 	void *tmp;
30 };
31 
32 struct esp_output_extra {
33 	__be32 seqhi;
34 	u32 esphoff;
35 };
36 
37 #define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
38 
39 /*
40  * Allocate an AEAD request structure with extra space for SG and IV.
41  *
42  * For alignment considerations the IV is placed at the front, followed
43  * by the request and finally the SG list.
44  *
45  * TODO: Use spare space in skb for this where possible.
46  */
esp_alloc_tmp(struct crypto_aead * aead,int nfrags,int extralen)47 static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int extralen)
48 {
49 	unsigned int len;
50 
51 	len = extralen;
52 
53 	len += crypto_aead_ivsize(aead);
54 
55 	if (len) {
56 		len += crypto_aead_alignmask(aead) &
57 		       ~(crypto_tfm_ctx_alignment() - 1);
58 		len = ALIGN(len, crypto_tfm_ctx_alignment());
59 	}
60 
61 	len += sizeof(struct aead_request) + crypto_aead_reqsize(aead);
62 	len = ALIGN(len, __alignof__(struct scatterlist));
63 
64 	len += sizeof(struct scatterlist) * nfrags;
65 
66 	return kmalloc(len, GFP_ATOMIC);
67 }
68 
esp_tmp_extra(void * tmp)69 static inline void *esp_tmp_extra(void *tmp)
70 {
71 	return PTR_ALIGN(tmp, __alignof__(struct esp_output_extra));
72 }
73 
esp_tmp_iv(struct crypto_aead * aead,void * tmp,int extralen)74 static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int extralen)
75 {
76 	return crypto_aead_ivsize(aead) ?
77 	       PTR_ALIGN((u8 *)tmp + extralen,
78 			 crypto_aead_alignmask(aead) + 1) : tmp + extralen;
79 }
80 
esp_tmp_req(struct crypto_aead * aead,u8 * iv)81 static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv)
82 {
83 	struct aead_request *req;
84 
85 	req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead),
86 				crypto_tfm_ctx_alignment());
87 	aead_request_set_tfm(req, aead);
88 	return req;
89 }
90 
esp_req_sg(struct crypto_aead * aead,struct aead_request * req)91 static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead,
92 					     struct aead_request *req)
93 {
94 	return (void *)ALIGN((unsigned long)(req + 1) +
95 			     crypto_aead_reqsize(aead),
96 			     __alignof__(struct scatterlist));
97 }
98 
esp_ssg_unref(struct xfrm_state * x,void * tmp,struct sk_buff * skb)99 static void esp_ssg_unref(struct xfrm_state *x, void *tmp, struct sk_buff *skb)
100 {
101 	struct crypto_aead *aead = x->data;
102 	int extralen = 0;
103 	u8 *iv;
104 	struct aead_request *req;
105 	struct scatterlist *sg;
106 
107 	if (x->props.flags & XFRM_STATE_ESN)
108 		extralen += sizeof(struct esp_output_extra);
109 
110 	iv = esp_tmp_iv(aead, tmp, extralen);
111 	req = esp_tmp_req(aead, iv);
112 
113 	/* Unref skb_frag_pages in the src scatterlist if necessary.
114 	 * Skip the first sg which comes from skb->data.
115 	 */
116 	if (req->src != req->dst)
117 		for (sg = sg_next(req->src); sg; sg = sg_next(sg))
118 			skb_page_unref(page_to_netmem(sg_page(sg)),
119 				       skb->pp_recycle);
120 }
121 
122 #ifdef CONFIG_INET_ESPINTCP
esp_find_tcp_sk(struct xfrm_state * x)123 static struct sock *esp_find_tcp_sk(struct xfrm_state *x)
124 {
125 	struct xfrm_encap_tmpl *encap = x->encap;
126 	struct net *net = xs_net(x);
127 	__be16 sport, dport;
128 	struct sock *sk;
129 
130 	spin_lock_bh(&x->lock);
131 	sport = encap->encap_sport;
132 	dport = encap->encap_dport;
133 	spin_unlock_bh(&x->lock);
134 
135 	sk = inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo, x->id.daddr.a4,
136 				     dport, x->props.saddr.a4, sport, 0);
137 	if (!sk)
138 		return ERR_PTR(-ENOENT);
139 
140 	if (!tcp_is_ulp_esp(sk)) {
141 		sock_put(sk);
142 		return ERR_PTR(-EINVAL);
143 	}
144 
145 	return sk;
146 }
147 
esp_output_tcp_finish(struct xfrm_state * x,struct sk_buff * skb)148 static int esp_output_tcp_finish(struct xfrm_state *x, struct sk_buff *skb)
149 {
150 	struct sock *sk;
151 	int err;
152 
153 	rcu_read_lock();
154 
155 	sk = esp_find_tcp_sk(x);
156 	err = PTR_ERR_OR_ZERO(sk);
157 	if (err) {
158 		kfree_skb(skb);
159 		goto out;
160 	}
161 
162 	bh_lock_sock(sk);
163 	if (sock_owned_by_user(sk))
164 		err = espintcp_queue_out(sk, skb);
165 	else
166 		err = espintcp_push_skb(sk, skb);
167 	bh_unlock_sock(sk);
168 
169 	sock_put(sk);
170 
171 out:
172 	rcu_read_unlock();
173 	return err;
174 }
175 
esp_output_tcp_encap_cb(struct net * net,struct sock * sk,struct sk_buff * skb)176 static int esp_output_tcp_encap_cb(struct net *net, struct sock *sk,
177 				   struct sk_buff *skb)
178 {
179 	struct dst_entry *dst = skb_dst(skb);
180 	struct xfrm_state *x = dst->xfrm;
181 
182 	return esp_output_tcp_finish(x, skb);
183 }
184 
esp_output_tail_tcp(struct xfrm_state * x,struct sk_buff * skb)185 static int esp_output_tail_tcp(struct xfrm_state *x, struct sk_buff *skb)
186 {
187 	int err;
188 
189 	local_bh_disable();
190 	err = xfrm_trans_queue_net(xs_net(x), skb, esp_output_tcp_encap_cb);
191 	local_bh_enable();
192 
193 	/* EINPROGRESS just happens to do the right thing.  It
194 	 * actually means that the skb has been consumed and
195 	 * isn't coming back.
196 	 */
197 	return err ?: -EINPROGRESS;
198 }
199 #else
esp_output_tail_tcp(struct xfrm_state * x,struct sk_buff * skb)200 static int esp_output_tail_tcp(struct xfrm_state *x, struct sk_buff *skb)
201 {
202 	WARN_ON(1);
203 	return -EOPNOTSUPP;
204 }
205 #endif
206 
esp_output_done(void * data,int err)207 static void esp_output_done(void *data, int err)
208 {
209 	struct sk_buff *skb = data;
210 	struct xfrm_offload *xo = xfrm_offload(skb);
211 	void *tmp;
212 	struct xfrm_state *x;
213 
214 	if (xo && (xo->flags & XFRM_DEV_RESUME)) {
215 		struct sec_path *sp = skb_sec_path(skb);
216 
217 		x = sp->xvec[sp->len - 1];
218 	} else {
219 		x = skb_dst(skb)->xfrm;
220 	}
221 
222 	tmp = ESP_SKB_CB(skb)->tmp;
223 	esp_ssg_unref(x, tmp, skb);
224 	kfree(tmp);
225 
226 	if (xo && (xo->flags & XFRM_DEV_RESUME)) {
227 		if (err) {
228 			XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
229 			kfree_skb(skb);
230 			return;
231 		}
232 
233 		skb_push(skb, skb->data - skb_mac_header(skb));
234 		secpath_reset(skb);
235 		xfrm_dev_resume(skb);
236 	} else {
237 		if (!err &&
238 		    x->encap && x->encap->encap_type == TCP_ENCAP_ESPINTCP)
239 			esp_output_tail_tcp(x, skb);
240 		else
241 			xfrm_output_resume(skb_to_full_sk(skb), skb, err);
242 	}
243 }
244 
245 /* Move ESP header back into place. */
esp_restore_header(struct sk_buff * skb,unsigned int offset)246 static void esp_restore_header(struct sk_buff *skb, unsigned int offset)
247 {
248 	struct ip_esp_hdr *esph = (void *)(skb->data + offset);
249 	void *tmp = ESP_SKB_CB(skb)->tmp;
250 	__be32 *seqhi = esp_tmp_extra(tmp);
251 
252 	esph->seq_no = esph->spi;
253 	esph->spi = *seqhi;
254 }
255 
esp_output_restore_header(struct sk_buff * skb)256 static void esp_output_restore_header(struct sk_buff *skb)
257 {
258 	void *tmp = ESP_SKB_CB(skb)->tmp;
259 	struct esp_output_extra *extra = esp_tmp_extra(tmp);
260 
261 	esp_restore_header(skb, skb_transport_offset(skb) + extra->esphoff -
262 				sizeof(__be32));
263 }
264 
esp_output_set_extra(struct sk_buff * skb,struct xfrm_state * x,struct ip_esp_hdr * esph,struct esp_output_extra * extra)265 static struct ip_esp_hdr *esp_output_set_extra(struct sk_buff *skb,
266 					       struct xfrm_state *x,
267 					       struct ip_esp_hdr *esph,
268 					       struct esp_output_extra *extra)
269 {
270 	/* For ESN we move the header forward by 4 bytes to
271 	 * accommodate the high bits.  We will move it back after
272 	 * encryption.
273 	 */
274 	if ((x->props.flags & XFRM_STATE_ESN)) {
275 		__u32 seqhi;
276 		struct xfrm_offload *xo = xfrm_offload(skb);
277 
278 		if (xo)
279 			seqhi = xo->seq.hi;
280 		else
281 			seqhi = XFRM_SKB_CB(skb)->seq.output.hi;
282 
283 		extra->esphoff = (unsigned char *)esph -
284 				 skb_transport_header(skb);
285 		esph = (struct ip_esp_hdr *)((unsigned char *)esph - 4);
286 		extra->seqhi = esph->spi;
287 		esph->seq_no = htonl(seqhi);
288 	}
289 
290 	esph->spi = x->id.spi;
291 
292 	return esph;
293 }
294 
esp_output_done_esn(void * data,int err)295 static void esp_output_done_esn(void *data, int err)
296 {
297 	struct sk_buff *skb = data;
298 
299 	esp_output_restore_header(skb);
300 	esp_output_done(data, err);
301 }
302 
esp_output_udp_encap(struct sk_buff * skb,int encap_type,struct esp_info * esp,__be16 sport,__be16 dport)303 static struct ip_esp_hdr *esp_output_udp_encap(struct sk_buff *skb,
304 					       int encap_type,
305 					       struct esp_info *esp,
306 					       __be16 sport,
307 					       __be16 dport)
308 {
309 	struct udphdr *uh;
310 	unsigned int len;
311 	struct xfrm_offload *xo = xfrm_offload(skb);
312 
313 	len = skb->len + esp->tailen - skb_transport_offset(skb);
314 	if (len + sizeof(struct iphdr) > IP_MAX_MTU)
315 		return ERR_PTR(-EMSGSIZE);
316 
317 	uh = (struct udphdr *)esp->esph;
318 	uh->source = sport;
319 	uh->dest = dport;
320 	uh->len = htons(len);
321 	uh->check = 0;
322 
323 	/* For IPv4 ESP with UDP encapsulation, if xo is not null, the skb is in the crypto offload
324 	 * data path, which means that esp_output_udp_encap is called outside of the XFRM stack.
325 	 * In this case, the mac header doesn't point to the IPv4 protocol field, so don't set it.
326 	 */
327 	if (!xo || encap_type != UDP_ENCAP_ESPINUDP)
328 		*skb_mac_header(skb) = IPPROTO_UDP;
329 
330 	return (struct ip_esp_hdr *)(uh + 1);
331 }
332 
333 #ifdef CONFIG_INET_ESPINTCP
esp_output_tcp_encap(struct xfrm_state * x,struct sk_buff * skb,struct esp_info * esp)334 static struct ip_esp_hdr *esp_output_tcp_encap(struct xfrm_state *x,
335 						    struct sk_buff *skb,
336 						    struct esp_info *esp)
337 {
338 	__be16 *lenp = (void *)esp->esph;
339 	struct ip_esp_hdr *esph;
340 	unsigned int len;
341 	struct sock *sk;
342 
343 	len = skb->len + esp->tailen - skb_transport_offset(skb);
344 	if (len > IP_MAX_MTU)
345 		return ERR_PTR(-EMSGSIZE);
346 
347 	rcu_read_lock();
348 	sk = esp_find_tcp_sk(x);
349 	rcu_read_unlock();
350 
351 	if (IS_ERR(sk))
352 		return ERR_CAST(sk);
353 
354 	sock_put(sk);
355 
356 	*lenp = htons(len);
357 	esph = (struct ip_esp_hdr *)(lenp + 1);
358 
359 	return esph;
360 }
361 #else
esp_output_tcp_encap(struct xfrm_state * x,struct sk_buff * skb,struct esp_info * esp)362 static struct ip_esp_hdr *esp_output_tcp_encap(struct xfrm_state *x,
363 						    struct sk_buff *skb,
364 						    struct esp_info *esp)
365 {
366 	return ERR_PTR(-EOPNOTSUPP);
367 }
368 #endif
369 
esp_output_encap(struct xfrm_state * x,struct sk_buff * skb,struct esp_info * esp)370 static int esp_output_encap(struct xfrm_state *x, struct sk_buff *skb,
371 			    struct esp_info *esp)
372 {
373 	struct xfrm_encap_tmpl *encap = x->encap;
374 	struct ip_esp_hdr *esph;
375 	__be16 sport, dport;
376 	int encap_type;
377 
378 	spin_lock_bh(&x->lock);
379 	sport = encap->encap_sport;
380 	dport = encap->encap_dport;
381 	encap_type = encap->encap_type;
382 	spin_unlock_bh(&x->lock);
383 
384 	switch (encap_type) {
385 	default:
386 	case UDP_ENCAP_ESPINUDP:
387 		esph = esp_output_udp_encap(skb, encap_type, esp, sport, dport);
388 		break;
389 	case TCP_ENCAP_ESPINTCP:
390 		esph = esp_output_tcp_encap(x, skb, esp);
391 		break;
392 	}
393 
394 	if (IS_ERR(esph))
395 		return PTR_ERR(esph);
396 
397 	esp->esph = esph;
398 
399 	return 0;
400 }
401 
esp_output_head(struct xfrm_state * x,struct sk_buff * skb,struct esp_info * esp)402 int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
403 {
404 	u8 *tail;
405 	int nfrags;
406 	int esph_offset;
407 	struct page *page;
408 	struct sk_buff *trailer;
409 	int tailen = esp->tailen;
410 
411 	/* this is non-NULL only with TCP/UDP Encapsulation */
412 	if (x->encap) {
413 		int err = esp_output_encap(x, skb, esp);
414 
415 		if (err < 0)
416 			return err;
417 	}
418 
419 	if (ALIGN(tailen, L1_CACHE_BYTES) > PAGE_SIZE ||
420 	    ALIGN(skb->data_len, L1_CACHE_BYTES) > PAGE_SIZE)
421 		goto cow;
422 
423 	if (!skb_cloned(skb)) {
424 		if (tailen <= skb_tailroom(skb)) {
425 			nfrags = 1;
426 			trailer = skb;
427 			tail = skb_tail_pointer(trailer);
428 
429 			goto skip_cow;
430 		} else if ((skb_shinfo(skb)->nr_frags < MAX_SKB_FRAGS)
431 			   && !skb_has_frag_list(skb)) {
432 			int allocsize;
433 			struct sock *sk = skb->sk;
434 			struct page_frag *pfrag = &x->xfrag;
435 
436 			esp->inplace = false;
437 
438 			allocsize = ALIGN(tailen, L1_CACHE_BYTES);
439 
440 			spin_lock_bh(&x->lock);
441 
442 			if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
443 				spin_unlock_bh(&x->lock);
444 				goto cow;
445 			}
446 
447 			page = pfrag->page;
448 			get_page(page);
449 
450 			tail = page_address(page) + pfrag->offset;
451 
452 			esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto);
453 
454 			nfrags = skb_shinfo(skb)->nr_frags;
455 
456 			__skb_fill_page_desc(skb, nfrags, page, pfrag->offset,
457 					     tailen);
458 			skb_shinfo(skb)->nr_frags = ++nfrags;
459 
460 			pfrag->offset = pfrag->offset + allocsize;
461 
462 			spin_unlock_bh(&x->lock);
463 
464 			nfrags++;
465 
466 			skb_len_add(skb, tailen);
467 			if (sk && sk_fullsock(sk))
468 				refcount_add(tailen, &sk->sk_wmem_alloc);
469 
470 			goto out;
471 		}
472 	}
473 
474 cow:
475 	esph_offset = (unsigned char *)esp->esph - skb_transport_header(skb);
476 
477 	nfrags = skb_cow_data(skb, tailen, &trailer);
478 	if (nfrags < 0)
479 		goto out;
480 	tail = skb_tail_pointer(trailer);
481 	esp->esph = (struct ip_esp_hdr *)(skb_transport_header(skb) + esph_offset);
482 
483 skip_cow:
484 	esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto);
485 	pskb_put(skb, trailer, tailen);
486 
487 out:
488 	return nfrags;
489 }
490 EXPORT_SYMBOL_GPL(esp_output_head);
491 
esp_output_tail(struct xfrm_state * x,struct sk_buff * skb,struct esp_info * esp)492 int esp_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
493 {
494 	u8 *iv;
495 	int alen;
496 	void *tmp;
497 	int ivlen;
498 	int assoclen;
499 	int extralen;
500 	struct page *page;
501 	struct ip_esp_hdr *esph;
502 	struct crypto_aead *aead;
503 	struct aead_request *req;
504 	struct scatterlist *sg, *dsg;
505 	struct esp_output_extra *extra;
506 	int err = -ENOMEM;
507 
508 	assoclen = sizeof(struct ip_esp_hdr);
509 	extralen = 0;
510 
511 	if (x->props.flags & XFRM_STATE_ESN) {
512 		extralen += sizeof(*extra);
513 		assoclen += sizeof(__be32);
514 	}
515 
516 	aead = x->data;
517 	alen = crypto_aead_authsize(aead);
518 	ivlen = crypto_aead_ivsize(aead);
519 
520 	tmp = esp_alloc_tmp(aead, esp->nfrags + 2, extralen);
521 	if (!tmp)
522 		goto error;
523 
524 	extra = esp_tmp_extra(tmp);
525 	iv = esp_tmp_iv(aead, tmp, extralen);
526 	req = esp_tmp_req(aead, iv);
527 	sg = esp_req_sg(aead, req);
528 
529 	if (esp->inplace)
530 		dsg = sg;
531 	else
532 		dsg = &sg[esp->nfrags];
533 
534 	esph = esp_output_set_extra(skb, x, esp->esph, extra);
535 	esp->esph = esph;
536 
537 	sg_init_table(sg, esp->nfrags);
538 	err = skb_to_sgvec(skb, sg,
539 		           (unsigned char *)esph - skb->data,
540 		           assoclen + ivlen + esp->clen + alen);
541 	if (unlikely(err < 0))
542 		goto error_free;
543 
544 	if (!esp->inplace) {
545 		int allocsize;
546 		struct page_frag *pfrag = &x->xfrag;
547 
548 		allocsize = ALIGN(skb->data_len, L1_CACHE_BYTES);
549 
550 		spin_lock_bh(&x->lock);
551 		if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
552 			spin_unlock_bh(&x->lock);
553 			goto error_free;
554 		}
555 
556 		skb_shinfo(skb)->nr_frags = 1;
557 
558 		page = pfrag->page;
559 		get_page(page);
560 		/* replace page frags in skb with new page */
561 		__skb_fill_page_desc(skb, 0, page, pfrag->offset, skb->data_len);
562 		pfrag->offset = pfrag->offset + allocsize;
563 		spin_unlock_bh(&x->lock);
564 
565 		sg_init_table(dsg, skb_shinfo(skb)->nr_frags + 1);
566 		err = skb_to_sgvec(skb, dsg,
567 			           (unsigned char *)esph - skb->data,
568 			           assoclen + ivlen + esp->clen + alen);
569 		if (unlikely(err < 0))
570 			goto error_free;
571 	}
572 
573 	if ((x->props.flags & XFRM_STATE_ESN))
574 		aead_request_set_callback(req, 0, esp_output_done_esn, skb);
575 	else
576 		aead_request_set_callback(req, 0, esp_output_done, skb);
577 
578 	aead_request_set_crypt(req, sg, dsg, ivlen + esp->clen, iv);
579 	aead_request_set_ad(req, assoclen);
580 
581 	memset(iv, 0, ivlen);
582 	memcpy(iv + ivlen - min(ivlen, 8), (u8 *)&esp->seqno + 8 - min(ivlen, 8),
583 	       min(ivlen, 8));
584 
585 	ESP_SKB_CB(skb)->tmp = tmp;
586 	err = crypto_aead_encrypt(req);
587 
588 	switch (err) {
589 	case -EINPROGRESS:
590 		goto error;
591 
592 	case -ENOSPC:
593 		err = NET_XMIT_DROP;
594 		break;
595 
596 	case 0:
597 		if ((x->props.flags & XFRM_STATE_ESN))
598 			esp_output_restore_header(skb);
599 	}
600 
601 	if (sg != dsg)
602 		esp_ssg_unref(x, tmp, skb);
603 
604 	if (!err && x->encap && x->encap->encap_type == TCP_ENCAP_ESPINTCP)
605 		err = esp_output_tail_tcp(x, skb);
606 
607 error_free:
608 	kfree(tmp);
609 error:
610 	return err;
611 }
612 EXPORT_SYMBOL_GPL(esp_output_tail);
613 
esp_output(struct xfrm_state * x,struct sk_buff * skb)614 static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
615 {
616 	int alen;
617 	int blksize;
618 	struct ip_esp_hdr *esph;
619 	struct crypto_aead *aead;
620 	struct esp_info esp;
621 
622 	esp.inplace = true;
623 
624 	esp.proto = *skb_mac_header(skb);
625 	*skb_mac_header(skb) = IPPROTO_ESP;
626 
627 	/* skb is pure payload to encrypt */
628 
629 	aead = x->data;
630 	alen = crypto_aead_authsize(aead);
631 
632 	esp.tfclen = 0;
633 	if (x->tfcpad) {
634 		struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
635 		u32 padto;
636 
637 		padto = min(x->tfcpad, xfrm_state_mtu(x, dst->child_mtu_cached));
638 		if (skb->len < padto)
639 			esp.tfclen = padto - skb->len;
640 	}
641 	blksize = ALIGN(crypto_aead_blocksize(aead), 4);
642 	esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize);
643 	esp.plen = esp.clen - skb->len - esp.tfclen;
644 	esp.tailen = esp.tfclen + esp.plen + alen;
645 
646 	esp.esph = ip_esp_hdr(skb);
647 
648 	esp.nfrags = esp_output_head(x, skb, &esp);
649 	if (esp.nfrags < 0)
650 		return esp.nfrags;
651 
652 	esph = esp.esph;
653 	esph->spi = x->id.spi;
654 
655 	esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
656 	esp.seqno = cpu_to_be64(XFRM_SKB_CB(skb)->seq.output.low +
657 				 ((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32));
658 
659 	skb_push(skb, -skb_network_offset(skb));
660 
661 	return esp_output_tail(x, skb, &esp);
662 }
663 
esp_remove_trailer(struct sk_buff * skb)664 static inline int esp_remove_trailer(struct sk_buff *skb)
665 {
666 	struct xfrm_state *x = xfrm_input_state(skb);
667 	struct crypto_aead *aead = x->data;
668 	int alen, hlen, elen;
669 	int padlen, trimlen;
670 	__wsum csumdiff;
671 	u8 nexthdr[2];
672 	int ret;
673 
674 	alen = crypto_aead_authsize(aead);
675 	hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
676 	elen = skb->len - hlen;
677 
678 	if (skb_copy_bits(skb, skb->len - alen - 2, nexthdr, 2))
679 		BUG();
680 
681 	ret = -EINVAL;
682 	padlen = nexthdr[0];
683 	if (padlen + 2 + alen >= elen) {
684 		net_dbg_ratelimited("ipsec esp packet is garbage padlen=%d, elen=%d\n",
685 				    padlen + 2, elen - alen);
686 		goto out;
687 	}
688 
689 	trimlen = alen + padlen + 2;
690 	if (skb->ip_summed == CHECKSUM_COMPLETE) {
691 		csumdiff = skb_checksum(skb, skb->len - trimlen, trimlen, 0);
692 		skb->csum = csum_block_sub(skb->csum, csumdiff,
693 					   skb->len - trimlen);
694 	}
695 	ret = pskb_trim(skb, skb->len - trimlen);
696 	if (unlikely(ret))
697 		return ret;
698 
699 	ret = nexthdr[1];
700 
701 out:
702 	return ret;
703 }
704 
esp_input_done2(struct sk_buff * skb,int err)705 int esp_input_done2(struct sk_buff *skb, int err)
706 {
707 	const struct iphdr *iph;
708 	struct xfrm_state *x = xfrm_input_state(skb);
709 	struct xfrm_offload *xo = xfrm_offload(skb);
710 	struct crypto_aead *aead = x->data;
711 	int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
712 	int ihl;
713 
714 	if (!xo || !(xo->flags & CRYPTO_DONE))
715 		kfree(ESP_SKB_CB(skb)->tmp);
716 
717 	if (unlikely(err))
718 		goto out;
719 
720 	err = esp_remove_trailer(skb);
721 	if (unlikely(err < 0))
722 		goto out;
723 
724 	iph = ip_hdr(skb);
725 	ihl = iph->ihl * 4;
726 
727 	if (x->encap) {
728 		struct xfrm_encap_tmpl *encap = x->encap;
729 		struct tcphdr *th = (void *)(skb_network_header(skb) + ihl);
730 		struct udphdr *uh = (void *)(skb_network_header(skb) + ihl);
731 		__be16 source;
732 
733 		switch (x->encap->encap_type) {
734 		case TCP_ENCAP_ESPINTCP:
735 			source = th->source;
736 			break;
737 		case UDP_ENCAP_ESPINUDP:
738 			source = uh->source;
739 			break;
740 		default:
741 			WARN_ON_ONCE(1);
742 			err = -EINVAL;
743 			goto out;
744 		}
745 
746 		/*
747 		 * 1) if the NAT-T peer's IP or port changed then
748 		 *    advertise the change to the keying daemon.
749 		 *    This is an inbound SA, so just compare
750 		 *    SRC ports.
751 		 */
752 		if (iph->saddr != x->props.saddr.a4 ||
753 		    source != encap->encap_sport) {
754 			xfrm_address_t ipaddr;
755 
756 			ipaddr.a4 = iph->saddr;
757 			km_new_mapping(x, &ipaddr, source);
758 
759 			/* XXX: perhaps add an extra
760 			 * policy check here, to see
761 			 * if we should allow or
762 			 * reject a packet from a
763 			 * different source
764 			 * address/port.
765 			 */
766 		}
767 
768 		/*
769 		 * 2) ignore UDP/TCP checksums in case
770 		 *    of NAT-T in Transport Mode, or
771 		 *    perform other post-processing fixes
772 		 *    as per draft-ietf-ipsec-udp-encaps-06,
773 		 *    section 3.1.2
774 		 */
775 		if (x->props.mode == XFRM_MODE_TRANSPORT)
776 			skb->ip_summed = CHECKSUM_UNNECESSARY;
777 	}
778 
779 	skb_pull_rcsum(skb, hlen);
780 	if (x->props.mode == XFRM_MODE_TUNNEL ||
781 	    x->props.mode == XFRM_MODE_IPTFS)
782 		skb_reset_transport_header(skb);
783 	else
784 		skb_set_transport_header(skb, -ihl);
785 
786 	/* RFC4303: Drop dummy packets without any error */
787 	if (err == IPPROTO_NONE)
788 		err = -EINVAL;
789 
790 out:
791 	return err;
792 }
793 EXPORT_SYMBOL_GPL(esp_input_done2);
794 
esp_input_done(void * data,int err)795 static void esp_input_done(void *data, int err)
796 {
797 	struct sk_buff *skb = data;
798 
799 	xfrm_input_resume(skb, esp_input_done2(skb, err));
800 }
801 
esp_input_restore_header(struct sk_buff * skb)802 static void esp_input_restore_header(struct sk_buff *skb)
803 {
804 	esp_restore_header(skb, 0);
805 	__skb_pull(skb, 4);
806 }
807 
esp_input_set_header(struct sk_buff * skb,__be32 * seqhi)808 static void esp_input_set_header(struct sk_buff *skb, __be32 *seqhi)
809 {
810 	struct xfrm_state *x = xfrm_input_state(skb);
811 	struct ip_esp_hdr *esph;
812 
813 	/* For ESN we move the header forward by 4 bytes to
814 	 * accommodate the high bits.  We will move it back after
815 	 * decryption.
816 	 */
817 	if ((x->props.flags & XFRM_STATE_ESN)) {
818 		esph = skb_push(skb, 4);
819 		*seqhi = esph->spi;
820 		esph->spi = esph->seq_no;
821 		esph->seq_no = XFRM_SKB_CB(skb)->seq.input.hi;
822 	}
823 }
824 
esp_input_done_esn(void * data,int err)825 static void esp_input_done_esn(void *data, int err)
826 {
827 	struct sk_buff *skb = data;
828 
829 	esp_input_restore_header(skb);
830 	esp_input_done(data, err);
831 }
832 
833 /*
834  * Note: detecting truncated vs. non-truncated authentication data is very
835  * expensive, so we only support truncated data, which is the recommended
836  * and common case.
837  */
esp_input(struct xfrm_state * x,struct sk_buff * skb)838 static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
839 {
840 	struct crypto_aead *aead = x->data;
841 	struct aead_request *req;
842 	struct sk_buff *trailer;
843 	int ivlen = crypto_aead_ivsize(aead);
844 	int elen = skb->len - sizeof(struct ip_esp_hdr) - ivlen;
845 	int nfrags;
846 	int assoclen;
847 	int seqhilen;
848 	__be32 *seqhi;
849 	void *tmp;
850 	u8 *iv;
851 	struct scatterlist *sg;
852 	int err = -EINVAL;
853 
854 	if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + ivlen))
855 		goto out;
856 
857 	if (elen <= 0)
858 		goto out;
859 
860 	assoclen = sizeof(struct ip_esp_hdr);
861 	seqhilen = 0;
862 
863 	if (x->props.flags & XFRM_STATE_ESN) {
864 		seqhilen += sizeof(__be32);
865 		assoclen += seqhilen;
866 	}
867 
868 	if (!skb_cloned(skb)) {
869 		if (!skb_is_nonlinear(skb)) {
870 			nfrags = 1;
871 
872 			goto skip_cow;
873 		} else if (!skb_has_frag_list(skb)) {
874 			nfrags = skb_shinfo(skb)->nr_frags;
875 			nfrags++;
876 
877 			goto skip_cow;
878 		}
879 	}
880 
881 	err = skb_cow_data(skb, 0, &trailer);
882 	if (err < 0)
883 		goto out;
884 
885 	nfrags = err;
886 
887 skip_cow:
888 	err = -ENOMEM;
889 	tmp = esp_alloc_tmp(aead, nfrags, seqhilen);
890 	if (!tmp)
891 		goto out;
892 
893 	ESP_SKB_CB(skb)->tmp = tmp;
894 	seqhi = esp_tmp_extra(tmp);
895 	iv = esp_tmp_iv(aead, tmp, seqhilen);
896 	req = esp_tmp_req(aead, iv);
897 	sg = esp_req_sg(aead, req);
898 
899 	esp_input_set_header(skb, seqhi);
900 
901 	sg_init_table(sg, nfrags);
902 	err = skb_to_sgvec(skb, sg, 0, skb->len);
903 	if (unlikely(err < 0)) {
904 		kfree(tmp);
905 		goto out;
906 	}
907 
908 	skb->ip_summed = CHECKSUM_NONE;
909 
910 	if ((x->props.flags & XFRM_STATE_ESN))
911 		aead_request_set_callback(req, 0, esp_input_done_esn, skb);
912 	else
913 		aead_request_set_callback(req, 0, esp_input_done, skb);
914 
915 	aead_request_set_crypt(req, sg, sg, elen + ivlen, iv);
916 	aead_request_set_ad(req, assoclen);
917 
918 	err = crypto_aead_decrypt(req);
919 	if (err == -EINPROGRESS)
920 		goto out;
921 
922 	if ((x->props.flags & XFRM_STATE_ESN))
923 		esp_input_restore_header(skb);
924 
925 	err = esp_input_done2(skb, err);
926 
927 out:
928 	return err;
929 }
930 
esp4_err(struct sk_buff * skb,u32 info)931 static int esp4_err(struct sk_buff *skb, u32 info)
932 {
933 	struct net *net = dev_net(skb->dev);
934 	const struct iphdr *iph = (const struct iphdr *)skb->data;
935 	struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2));
936 	struct xfrm_state *x;
937 
938 	switch (icmp_hdr(skb)->type) {
939 	case ICMP_DEST_UNREACH:
940 		if (icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
941 			return 0;
942 		break;
943 	case ICMP_REDIRECT:
944 		break;
945 	default:
946 		return 0;
947 	}
948 
949 	x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
950 			      esph->spi, IPPROTO_ESP, AF_INET);
951 	if (!x)
952 		return 0;
953 
954 	if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH)
955 		ipv4_update_pmtu(skb, net, info, 0, IPPROTO_ESP);
956 	else
957 		ipv4_redirect(skb, net, 0, IPPROTO_ESP);
958 	xfrm_state_put(x);
959 
960 	return 0;
961 }
962 
esp_destroy(struct xfrm_state * x)963 static void esp_destroy(struct xfrm_state *x)
964 {
965 	struct crypto_aead *aead = x->data;
966 
967 	if (!aead)
968 		return;
969 
970 	crypto_free_aead(aead);
971 }
972 
esp_init_aead(struct xfrm_state * x,struct netlink_ext_ack * extack)973 static int esp_init_aead(struct xfrm_state *x, struct netlink_ext_ack *extack)
974 {
975 	char aead_name[CRYPTO_MAX_ALG_NAME];
976 	struct crypto_aead *aead;
977 	int err;
978 
979 	if (snprintf(aead_name, CRYPTO_MAX_ALG_NAME, "%s(%s)",
980 		     x->geniv, x->aead->alg_name) >= CRYPTO_MAX_ALG_NAME) {
981 		NL_SET_ERR_MSG(extack, "Algorithm name is too long");
982 		return -ENAMETOOLONG;
983 	}
984 
985 	aead = crypto_alloc_aead(aead_name, 0, 0);
986 	err = PTR_ERR(aead);
987 	if (IS_ERR(aead))
988 		goto error;
989 
990 	x->data = aead;
991 
992 	err = crypto_aead_setkey(aead, x->aead->alg_key,
993 				 (x->aead->alg_key_len + 7) / 8);
994 	if (err)
995 		goto error;
996 
997 	err = crypto_aead_setauthsize(aead, x->aead->alg_icv_len / 8);
998 	if (err)
999 		goto error;
1000 
1001 	return 0;
1002 
1003 error:
1004 	NL_SET_ERR_MSG(extack, "Kernel was unable to initialize cryptographic operations");
1005 	return err;
1006 }
1007 
esp_init_authenc(struct xfrm_state * x,struct netlink_ext_ack * extack)1008 static int esp_init_authenc(struct xfrm_state *x,
1009 			    struct netlink_ext_ack *extack)
1010 {
1011 	struct crypto_aead *aead;
1012 	struct crypto_authenc_key_param *param;
1013 	struct rtattr *rta;
1014 	char *key;
1015 	char *p;
1016 	char authenc_name[CRYPTO_MAX_ALG_NAME];
1017 	unsigned int keylen;
1018 	int err;
1019 
1020 	err = -ENAMETOOLONG;
1021 
1022 	if ((x->props.flags & XFRM_STATE_ESN)) {
1023 		if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
1024 			     "%s%sauthencesn(%s,%s)%s",
1025 			     x->geniv ?: "", x->geniv ? "(" : "",
1026 			     x->aalg ? x->aalg->alg_name : "digest_null",
1027 			     x->ealg->alg_name,
1028 			     x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME) {
1029 			NL_SET_ERR_MSG(extack, "Algorithm name is too long");
1030 			goto error;
1031 		}
1032 	} else {
1033 		if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
1034 			     "%s%sauthenc(%s,%s)%s",
1035 			     x->geniv ?: "", x->geniv ? "(" : "",
1036 			     x->aalg ? x->aalg->alg_name : "digest_null",
1037 			     x->ealg->alg_name,
1038 			     x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME) {
1039 			NL_SET_ERR_MSG(extack, "Algorithm name is too long");
1040 			goto error;
1041 		}
1042 	}
1043 
1044 	aead = crypto_alloc_aead(authenc_name, 0, 0);
1045 	err = PTR_ERR(aead);
1046 	if (IS_ERR(aead)) {
1047 		NL_SET_ERR_MSG(extack, "Kernel was unable to initialize cryptographic operations");
1048 		goto error;
1049 	}
1050 
1051 	x->data = aead;
1052 
1053 	keylen = (x->aalg ? (x->aalg->alg_key_len + 7) / 8 : 0) +
1054 		 (x->ealg->alg_key_len + 7) / 8 + RTA_SPACE(sizeof(*param));
1055 	err = -ENOMEM;
1056 	key = kmalloc(keylen, GFP_KERNEL);
1057 	if (!key)
1058 		goto error;
1059 
1060 	p = key;
1061 	rta = (void *)p;
1062 	rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
1063 	rta->rta_len = RTA_LENGTH(sizeof(*param));
1064 	param = RTA_DATA(rta);
1065 	p += RTA_SPACE(sizeof(*param));
1066 
1067 	if (x->aalg) {
1068 		struct xfrm_algo_desc *aalg_desc;
1069 
1070 		memcpy(p, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8);
1071 		p += (x->aalg->alg_key_len + 7) / 8;
1072 
1073 		aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
1074 		BUG_ON(!aalg_desc);
1075 
1076 		err = -EINVAL;
1077 		if (aalg_desc->uinfo.auth.icv_fullbits / 8 !=
1078 		    crypto_aead_authsize(aead)) {
1079 			NL_SET_ERR_MSG(extack, "Kernel was unable to initialize cryptographic operations");
1080 			goto free_key;
1081 		}
1082 
1083 		err = crypto_aead_setauthsize(
1084 			aead, x->aalg->alg_trunc_len / 8);
1085 		if (err) {
1086 			NL_SET_ERR_MSG(extack, "Kernel was unable to initialize cryptographic operations");
1087 			goto free_key;
1088 		}
1089 	}
1090 
1091 	param->enckeylen = cpu_to_be32((x->ealg->alg_key_len + 7) / 8);
1092 	memcpy(p, x->ealg->alg_key, (x->ealg->alg_key_len + 7) / 8);
1093 
1094 	err = crypto_aead_setkey(aead, key, keylen);
1095 
1096 free_key:
1097 	kfree_sensitive(key);
1098 
1099 error:
1100 	return err;
1101 }
1102 
esp_init_state(struct xfrm_state * x,struct netlink_ext_ack * extack)1103 static int esp_init_state(struct xfrm_state *x, struct netlink_ext_ack *extack)
1104 {
1105 	struct crypto_aead *aead;
1106 	u32 align;
1107 	int err;
1108 
1109 	x->data = NULL;
1110 
1111 	if (x->aead) {
1112 		err = esp_init_aead(x, extack);
1113 	} else if (x->ealg) {
1114 		err = esp_init_authenc(x, extack);
1115 	} else {
1116 		NL_SET_ERR_MSG(extack, "ESP: AEAD or CRYPT must be provided");
1117 		err = -EINVAL;
1118 	}
1119 
1120 	if (err)
1121 		goto error;
1122 
1123 	aead = x->data;
1124 
1125 	x->props.header_len = sizeof(struct ip_esp_hdr) +
1126 			      crypto_aead_ivsize(aead);
1127 	if (x->props.mode == XFRM_MODE_TUNNEL)
1128 		x->props.header_len += sizeof(struct iphdr);
1129 	else if (x->props.mode == XFRM_MODE_BEET && x->sel.family != AF_INET6)
1130 		x->props.header_len += IPV4_BEET_PHMAXLEN;
1131 	if (x->encap) {
1132 		struct xfrm_encap_tmpl *encap = x->encap;
1133 
1134 		switch (encap->encap_type) {
1135 		default:
1136 			NL_SET_ERR_MSG(extack, "Unsupported encapsulation type for ESP");
1137 			err = -EINVAL;
1138 			goto error;
1139 		case UDP_ENCAP_ESPINUDP:
1140 			x->props.header_len += sizeof(struct udphdr);
1141 			break;
1142 #ifdef CONFIG_INET_ESPINTCP
1143 		case TCP_ENCAP_ESPINTCP:
1144 			/* only the length field, TCP encap is done by
1145 			 * the socket
1146 			 */
1147 			x->props.header_len += 2;
1148 			break;
1149 #endif
1150 		}
1151 	}
1152 
1153 	align = ALIGN(crypto_aead_blocksize(aead), 4);
1154 	x->props.trailer_len = align + 1 + crypto_aead_authsize(aead);
1155 
1156 error:
1157 	return err;
1158 }
1159 
esp4_rcv_cb(struct sk_buff * skb,int err)1160 static int esp4_rcv_cb(struct sk_buff *skb, int err)
1161 {
1162 	return 0;
1163 }
1164 
1165 static const struct xfrm_type esp_type =
1166 {
1167 	.owner		= THIS_MODULE,
1168 	.proto	     	= IPPROTO_ESP,
1169 	.flags		= XFRM_TYPE_REPLAY_PROT,
1170 	.init_state	= esp_init_state,
1171 	.destructor	= esp_destroy,
1172 	.input		= esp_input,
1173 	.output		= esp_output,
1174 };
1175 
1176 static struct xfrm4_protocol esp4_protocol = {
1177 	.handler	=	xfrm4_rcv,
1178 	.input_handler	=	xfrm_input,
1179 	.cb_handler	=	esp4_rcv_cb,
1180 	.err_handler	=	esp4_err,
1181 	.priority	=	0,
1182 };
1183 
esp4_init(void)1184 static int __init esp4_init(void)
1185 {
1186 	if (xfrm_register_type(&esp_type, AF_INET) < 0) {
1187 		pr_info("%s: can't add xfrm type\n", __func__);
1188 		return -EAGAIN;
1189 	}
1190 	if (xfrm4_protocol_register(&esp4_protocol, IPPROTO_ESP) < 0) {
1191 		pr_info("%s: can't add protocol\n", __func__);
1192 		xfrm_unregister_type(&esp_type, AF_INET);
1193 		return -EAGAIN;
1194 	}
1195 	return 0;
1196 }
1197 
esp4_fini(void)1198 static void __exit esp4_fini(void)
1199 {
1200 	if (xfrm4_protocol_deregister(&esp4_protocol, IPPROTO_ESP) < 0)
1201 		pr_info("%s: can't remove protocol\n", __func__);
1202 	xfrm_unregister_type(&esp_type, AF_INET);
1203 }
1204 
1205 module_init(esp4_init);
1206 module_exit(esp4_fini);
1207 MODULE_DESCRIPTION("IPv4 ESP transformation library");
1208 MODULE_LICENSE("GPL");
1209 MODULE_ALIAS_XFRM_TYPE(AF_INET, XFRM_PROTO_ESP);
1210