1 // SPDX-License-Identifier: GPL-2.0-only 2 #define pr_fmt(fmt) "IPsec: " fmt 3 4 #include <crypto/aead.h> 5 #include <crypto/authenc.h> 6 #include <linux/err.h> 7 #include <linux/module.h> 8 #include <net/ip.h> 9 #include <net/xfrm.h> 10 #include <net/esp.h> 11 #include <linux/scatterlist.h> 12 #include <linux/kernel.h> 13 #include <linux/pfkeyv2.h> 14 #include <linux/rtnetlink.h> 15 #include <linux/slab.h> 16 #include <linux/spinlock.h> 17 #include <linux/in6.h> 18 #include <net/icmp.h> 19 #include <net/protocol.h> 20 #include <net/udp.h> 21 #include <net/tcp.h> 22 #include <net/espintcp.h> 23 24 #include <linux/highmem.h> 25 26 struct esp_skb_cb { 27 struct xfrm_skb_cb xfrm; 28 void *tmp; 29 }; 30 31 struct esp_output_extra { 32 __be32 seqhi; 33 u32 esphoff; 34 }; 35 36 #define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0])) 37 38 /* 39 * Allocate an AEAD request structure with extra space for SG and IV. 40 * 41 * For alignment considerations the IV is placed at the front, followed 42 * by the request and finally the SG list. 43 * 44 * TODO: Use spare space in skb for this where possible. 45 */ 46 static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int extralen) 47 { 48 unsigned int len; 49 50 len = extralen; 51 52 len += crypto_aead_ivsize(aead); 53 54 if (len) { 55 len += crypto_aead_alignmask(aead) & 56 ~(crypto_tfm_ctx_alignment() - 1); 57 len = ALIGN(len, crypto_tfm_ctx_alignment()); 58 } 59 60 len += sizeof(struct aead_request) + crypto_aead_reqsize(aead); 61 len = ALIGN(len, __alignof__(struct scatterlist)); 62 63 len += sizeof(struct scatterlist) * nfrags; 64 65 return kmalloc(len, GFP_ATOMIC); 66 } 67 68 static inline void *esp_tmp_extra(void *tmp) 69 { 70 return PTR_ALIGN(tmp, __alignof__(struct esp_output_extra)); 71 } 72 73 static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int extralen) 74 { 75 return crypto_aead_ivsize(aead) ? 76 PTR_ALIGN((u8 *)tmp + extralen, 77 crypto_aead_alignmask(aead) + 1) : tmp + extralen; 78 } 79 80 static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv) 81 { 82 struct aead_request *req; 83 84 req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead), 85 crypto_tfm_ctx_alignment()); 86 aead_request_set_tfm(req, aead); 87 return req; 88 } 89 90 static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead, 91 struct aead_request *req) 92 { 93 return (void *)ALIGN((unsigned long)(req + 1) + 94 crypto_aead_reqsize(aead), 95 __alignof__(struct scatterlist)); 96 } 97 98 static void esp_ssg_unref(struct xfrm_state *x, void *tmp) 99 { 100 struct crypto_aead *aead = x->data; 101 int extralen = 0; 102 u8 *iv; 103 struct aead_request *req; 104 struct scatterlist *sg; 105 106 if (x->props.flags & XFRM_STATE_ESN) 107 extralen += sizeof(struct esp_output_extra); 108 109 iv = esp_tmp_iv(aead, tmp, extralen); 110 req = esp_tmp_req(aead, iv); 111 112 /* Unref skb_frag_pages in the src scatterlist if necessary. 113 * Skip the first sg which comes from skb->data. 114 */ 115 if (req->src != req->dst) 116 for (sg = sg_next(req->src); sg; sg = sg_next(sg)) 117 put_page(sg_page(sg)); 118 } 119 120 #ifdef CONFIG_INET_ESPINTCP 121 struct esp_tcp_sk { 122 struct sock *sk; 123 struct rcu_head rcu; 124 }; 125 126 static void esp_free_tcp_sk(struct rcu_head *head) 127 { 128 struct esp_tcp_sk *esk = container_of(head, struct esp_tcp_sk, rcu); 129 130 sock_put(esk->sk); 131 kfree(esk); 132 } 133 134 static struct sock *esp_find_tcp_sk(struct xfrm_state *x) 135 { 136 struct xfrm_encap_tmpl *encap = x->encap; 137 struct esp_tcp_sk *esk; 138 __be16 sport, dport; 139 struct sock *nsk; 140 struct sock *sk; 141 142 sk = rcu_dereference(x->encap_sk); 143 if (sk && sk->sk_state == TCP_ESTABLISHED) 144 return sk; 145 146 spin_lock_bh(&x->lock); 147 sport = encap->encap_sport; 148 dport = encap->encap_dport; 149 nsk = rcu_dereference_protected(x->encap_sk, 150 lockdep_is_held(&x->lock)); 151 if (sk && sk == nsk) { 152 esk = kmalloc(sizeof(*esk), GFP_ATOMIC); 153 if (!esk) { 154 spin_unlock_bh(&x->lock); 155 return ERR_PTR(-ENOMEM); 156 } 157 RCU_INIT_POINTER(x->encap_sk, NULL); 158 esk->sk = sk; 159 call_rcu(&esk->rcu, esp_free_tcp_sk); 160 } 161 spin_unlock_bh(&x->lock); 162 163 sk = inet_lookup_established(xs_net(x), &tcp_hashinfo, x->id.daddr.a4, 164 dport, x->props.saddr.a4, sport, 0); 165 if (!sk) 166 return ERR_PTR(-ENOENT); 167 168 if (!tcp_is_ulp_esp(sk)) { 169 sock_put(sk); 170 return ERR_PTR(-EINVAL); 171 } 172 173 spin_lock_bh(&x->lock); 174 nsk = rcu_dereference_protected(x->encap_sk, 175 lockdep_is_held(&x->lock)); 176 if (encap->encap_sport != sport || 177 encap->encap_dport != dport) { 178 sock_put(sk); 179 sk = nsk ?: ERR_PTR(-EREMCHG); 180 } else if (sk == nsk) { 181 sock_put(sk); 182 } else { 183 rcu_assign_pointer(x->encap_sk, sk); 184 } 185 spin_unlock_bh(&x->lock); 186 187 return sk; 188 } 189 190 static int esp_output_tcp_finish(struct xfrm_state *x, struct sk_buff *skb) 191 { 192 struct sock *sk; 193 int err; 194 195 rcu_read_lock(); 196 197 sk = esp_find_tcp_sk(x); 198 err = PTR_ERR_OR_ZERO(sk); 199 if (err) 200 goto out; 201 202 bh_lock_sock(sk); 203 if (sock_owned_by_user(sk)) 204 err = espintcp_queue_out(sk, skb); 205 else 206 err = espintcp_push_skb(sk, skb); 207 bh_unlock_sock(sk); 208 209 out: 210 rcu_read_unlock(); 211 return err; 212 } 213 214 static int esp_output_tcp_encap_cb(struct net *net, struct sock *sk, 215 struct sk_buff *skb) 216 { 217 struct dst_entry *dst = skb_dst(skb); 218 struct xfrm_state *x = dst->xfrm; 219 220 return esp_output_tcp_finish(x, skb); 221 } 222 223 static int esp_output_tail_tcp(struct xfrm_state *x, struct sk_buff *skb) 224 { 225 int err; 226 227 local_bh_disable(); 228 err = xfrm_trans_queue_net(xs_net(x), skb, esp_output_tcp_encap_cb); 229 local_bh_enable(); 230 231 /* EINPROGRESS just happens to do the right thing. It 232 * actually means that the skb has been consumed and 233 * isn't coming back. 234 */ 235 return err ?: -EINPROGRESS; 236 } 237 #else 238 static int esp_output_tail_tcp(struct xfrm_state *x, struct sk_buff *skb) 239 { 240 kfree_skb(skb); 241 242 return -EOPNOTSUPP; 243 } 244 #endif 245 246 static void esp_output_done(struct crypto_async_request *base, int err) 247 { 248 struct sk_buff *skb = base->data; 249 struct xfrm_offload *xo = xfrm_offload(skb); 250 void *tmp; 251 struct xfrm_state *x; 252 253 if (xo && (xo->flags & XFRM_DEV_RESUME)) { 254 struct sec_path *sp = skb_sec_path(skb); 255 256 x = sp->xvec[sp->len - 1]; 257 } else { 258 x = skb_dst(skb)->xfrm; 259 } 260 261 tmp = ESP_SKB_CB(skb)->tmp; 262 esp_ssg_unref(x, tmp); 263 kfree(tmp); 264 265 if (xo && (xo->flags & XFRM_DEV_RESUME)) { 266 if (err) { 267 XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR); 268 kfree_skb(skb); 269 return; 270 } 271 272 skb_push(skb, skb->data - skb_mac_header(skb)); 273 secpath_reset(skb); 274 xfrm_dev_resume(skb); 275 } else { 276 if (!err && 277 x->encap && x->encap->encap_type == TCP_ENCAP_ESPINTCP) 278 esp_output_tail_tcp(x, skb); 279 else 280 xfrm_output_resume(skb->sk, skb, err); 281 } 282 } 283 284 /* Move ESP header back into place. */ 285 static void esp_restore_header(struct sk_buff *skb, unsigned int offset) 286 { 287 struct ip_esp_hdr *esph = (void *)(skb->data + offset); 288 void *tmp = ESP_SKB_CB(skb)->tmp; 289 __be32 *seqhi = esp_tmp_extra(tmp); 290 291 esph->seq_no = esph->spi; 292 esph->spi = *seqhi; 293 } 294 295 static void esp_output_restore_header(struct sk_buff *skb) 296 { 297 void *tmp = ESP_SKB_CB(skb)->tmp; 298 struct esp_output_extra *extra = esp_tmp_extra(tmp); 299 300 esp_restore_header(skb, skb_transport_offset(skb) + extra->esphoff - 301 sizeof(__be32)); 302 } 303 304 static struct ip_esp_hdr *esp_output_set_extra(struct sk_buff *skb, 305 struct xfrm_state *x, 306 struct ip_esp_hdr *esph, 307 struct esp_output_extra *extra) 308 { 309 /* For ESN we move the header forward by 4 bytes to 310 * accommodate the high bits. We will move it back after 311 * encryption. 312 */ 313 if ((x->props.flags & XFRM_STATE_ESN)) { 314 __u32 seqhi; 315 struct xfrm_offload *xo = xfrm_offload(skb); 316 317 if (xo) 318 seqhi = xo->seq.hi; 319 else 320 seqhi = XFRM_SKB_CB(skb)->seq.output.hi; 321 322 extra->esphoff = (unsigned char *)esph - 323 skb_transport_header(skb); 324 esph = (struct ip_esp_hdr *)((unsigned char *)esph - 4); 325 extra->seqhi = esph->spi; 326 esph->seq_no = htonl(seqhi); 327 } 328 329 esph->spi = x->id.spi; 330 331 return esph; 332 } 333 334 static void esp_output_done_esn(struct crypto_async_request *base, int err) 335 { 336 struct sk_buff *skb = base->data; 337 338 esp_output_restore_header(skb); 339 esp_output_done(base, err); 340 } 341 342 static struct ip_esp_hdr *esp_output_udp_encap(struct sk_buff *skb, 343 int encap_type, 344 struct esp_info *esp, 345 __be16 sport, 346 __be16 dport) 347 { 348 struct udphdr *uh; 349 __be32 *udpdata32; 350 unsigned int len; 351 352 len = skb->len + esp->tailen - skb_transport_offset(skb); 353 if (len + sizeof(struct iphdr) > IP_MAX_MTU) 354 return ERR_PTR(-EMSGSIZE); 355 356 uh = (struct udphdr *)esp->esph; 357 uh->source = sport; 358 uh->dest = dport; 359 uh->len = htons(len); 360 uh->check = 0; 361 362 *skb_mac_header(skb) = IPPROTO_UDP; 363 364 if (encap_type == UDP_ENCAP_ESPINUDP_NON_IKE) { 365 udpdata32 = (__be32 *)(uh + 1); 366 udpdata32[0] = udpdata32[1] = 0; 367 return (struct ip_esp_hdr *)(udpdata32 + 2); 368 } 369 370 return (struct ip_esp_hdr *)(uh + 1); 371 } 372 373 #ifdef CONFIG_INET_ESPINTCP 374 static struct ip_esp_hdr *esp_output_tcp_encap(struct xfrm_state *x, 375 struct sk_buff *skb, 376 struct esp_info *esp) 377 { 378 __be16 *lenp = (void *)esp->esph; 379 struct ip_esp_hdr *esph; 380 unsigned int len; 381 struct sock *sk; 382 383 len = skb->len + esp->tailen - skb_transport_offset(skb); 384 if (len > IP_MAX_MTU) 385 return ERR_PTR(-EMSGSIZE); 386 387 rcu_read_lock(); 388 sk = esp_find_tcp_sk(x); 389 rcu_read_unlock(); 390 391 if (IS_ERR(sk)) 392 return ERR_CAST(sk); 393 394 *lenp = htons(len); 395 esph = (struct ip_esp_hdr *)(lenp + 1); 396 397 return esph; 398 } 399 #else 400 static struct ip_esp_hdr *esp_output_tcp_encap(struct xfrm_state *x, 401 struct sk_buff *skb, 402 struct esp_info *esp) 403 { 404 return ERR_PTR(-EOPNOTSUPP); 405 } 406 #endif 407 408 static int esp_output_encap(struct xfrm_state *x, struct sk_buff *skb, 409 struct esp_info *esp) 410 { 411 struct xfrm_encap_tmpl *encap = x->encap; 412 struct ip_esp_hdr *esph; 413 __be16 sport, dport; 414 int encap_type; 415 416 spin_lock_bh(&x->lock); 417 sport = encap->encap_sport; 418 dport = encap->encap_dport; 419 encap_type = encap->encap_type; 420 spin_unlock_bh(&x->lock); 421 422 switch (encap_type) { 423 default: 424 case UDP_ENCAP_ESPINUDP: 425 case UDP_ENCAP_ESPINUDP_NON_IKE: 426 esph = esp_output_udp_encap(skb, encap_type, esp, sport, dport); 427 break; 428 case TCP_ENCAP_ESPINTCP: 429 esph = esp_output_tcp_encap(x, skb, esp); 430 break; 431 } 432 433 if (IS_ERR(esph)) 434 return PTR_ERR(esph); 435 436 esp->esph = esph; 437 438 return 0; 439 } 440 441 int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp) 442 { 443 u8 *tail; 444 int nfrags; 445 int esph_offset; 446 struct page *page; 447 struct sk_buff *trailer; 448 int tailen = esp->tailen; 449 450 /* this is non-NULL only with TCP/UDP Encapsulation */ 451 if (x->encap) { 452 int err = esp_output_encap(x, skb, esp); 453 454 if (err < 0) 455 return err; 456 } 457 458 if (!skb_cloned(skb)) { 459 if (tailen <= skb_tailroom(skb)) { 460 nfrags = 1; 461 trailer = skb; 462 tail = skb_tail_pointer(trailer); 463 464 goto skip_cow; 465 } else if ((skb_shinfo(skb)->nr_frags < MAX_SKB_FRAGS) 466 && !skb_has_frag_list(skb)) { 467 int allocsize; 468 struct sock *sk = skb->sk; 469 struct page_frag *pfrag = &x->xfrag; 470 471 esp->inplace = false; 472 473 allocsize = ALIGN(tailen, L1_CACHE_BYTES); 474 475 spin_lock_bh(&x->lock); 476 477 if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) { 478 spin_unlock_bh(&x->lock); 479 goto cow; 480 } 481 482 page = pfrag->page; 483 get_page(page); 484 485 tail = page_address(page) + pfrag->offset; 486 487 esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto); 488 489 nfrags = skb_shinfo(skb)->nr_frags; 490 491 __skb_fill_page_desc(skb, nfrags, page, pfrag->offset, 492 tailen); 493 skb_shinfo(skb)->nr_frags = ++nfrags; 494 495 pfrag->offset = pfrag->offset + allocsize; 496 497 spin_unlock_bh(&x->lock); 498 499 nfrags++; 500 501 skb->len += tailen; 502 skb->data_len += tailen; 503 skb->truesize += tailen; 504 if (sk && sk_fullsock(sk)) 505 refcount_add(tailen, &sk->sk_wmem_alloc); 506 507 goto out; 508 } 509 } 510 511 cow: 512 esph_offset = (unsigned char *)esp->esph - skb_transport_header(skb); 513 514 nfrags = skb_cow_data(skb, tailen, &trailer); 515 if (nfrags < 0) 516 goto out; 517 tail = skb_tail_pointer(trailer); 518 esp->esph = (struct ip_esp_hdr *)(skb_transport_header(skb) + esph_offset); 519 520 skip_cow: 521 esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto); 522 pskb_put(skb, trailer, tailen); 523 524 out: 525 return nfrags; 526 } 527 EXPORT_SYMBOL_GPL(esp_output_head); 528 529 int esp_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp) 530 { 531 u8 *iv; 532 int alen; 533 void *tmp; 534 int ivlen; 535 int assoclen; 536 int extralen; 537 struct page *page; 538 struct ip_esp_hdr *esph; 539 struct crypto_aead *aead; 540 struct aead_request *req; 541 struct scatterlist *sg, *dsg; 542 struct esp_output_extra *extra; 543 int err = -ENOMEM; 544 545 assoclen = sizeof(struct ip_esp_hdr); 546 extralen = 0; 547 548 if (x->props.flags & XFRM_STATE_ESN) { 549 extralen += sizeof(*extra); 550 assoclen += sizeof(__be32); 551 } 552 553 aead = x->data; 554 alen = crypto_aead_authsize(aead); 555 ivlen = crypto_aead_ivsize(aead); 556 557 tmp = esp_alloc_tmp(aead, esp->nfrags + 2, extralen); 558 if (!tmp) 559 goto error; 560 561 extra = esp_tmp_extra(tmp); 562 iv = esp_tmp_iv(aead, tmp, extralen); 563 req = esp_tmp_req(aead, iv); 564 sg = esp_req_sg(aead, req); 565 566 if (esp->inplace) 567 dsg = sg; 568 else 569 dsg = &sg[esp->nfrags]; 570 571 esph = esp_output_set_extra(skb, x, esp->esph, extra); 572 esp->esph = esph; 573 574 sg_init_table(sg, esp->nfrags); 575 err = skb_to_sgvec(skb, sg, 576 (unsigned char *)esph - skb->data, 577 assoclen + ivlen + esp->clen + alen); 578 if (unlikely(err < 0)) 579 goto error_free; 580 581 if (!esp->inplace) { 582 int allocsize; 583 struct page_frag *pfrag = &x->xfrag; 584 585 allocsize = ALIGN(skb->data_len, L1_CACHE_BYTES); 586 587 spin_lock_bh(&x->lock); 588 if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) { 589 spin_unlock_bh(&x->lock); 590 goto error_free; 591 } 592 593 skb_shinfo(skb)->nr_frags = 1; 594 595 page = pfrag->page; 596 get_page(page); 597 /* replace page frags in skb with new page */ 598 __skb_fill_page_desc(skb, 0, page, pfrag->offset, skb->data_len); 599 pfrag->offset = pfrag->offset + allocsize; 600 spin_unlock_bh(&x->lock); 601 602 sg_init_table(dsg, skb_shinfo(skb)->nr_frags + 1); 603 err = skb_to_sgvec(skb, dsg, 604 (unsigned char *)esph - skb->data, 605 assoclen + ivlen + esp->clen + alen); 606 if (unlikely(err < 0)) 607 goto error_free; 608 } 609 610 if ((x->props.flags & XFRM_STATE_ESN)) 611 aead_request_set_callback(req, 0, esp_output_done_esn, skb); 612 else 613 aead_request_set_callback(req, 0, esp_output_done, skb); 614 615 aead_request_set_crypt(req, sg, dsg, ivlen + esp->clen, iv); 616 aead_request_set_ad(req, assoclen); 617 618 memset(iv, 0, ivlen); 619 memcpy(iv + ivlen - min(ivlen, 8), (u8 *)&esp->seqno + 8 - min(ivlen, 8), 620 min(ivlen, 8)); 621 622 ESP_SKB_CB(skb)->tmp = tmp; 623 err = crypto_aead_encrypt(req); 624 625 switch (err) { 626 case -EINPROGRESS: 627 goto error; 628 629 case -ENOSPC: 630 err = NET_XMIT_DROP; 631 break; 632 633 case 0: 634 if ((x->props.flags & XFRM_STATE_ESN)) 635 esp_output_restore_header(skb); 636 } 637 638 if (sg != dsg) 639 esp_ssg_unref(x, tmp); 640 641 if (!err && x->encap && x->encap->encap_type == TCP_ENCAP_ESPINTCP) 642 err = esp_output_tail_tcp(x, skb); 643 644 error_free: 645 kfree(tmp); 646 error: 647 return err; 648 } 649 EXPORT_SYMBOL_GPL(esp_output_tail); 650 651 static int esp_output(struct xfrm_state *x, struct sk_buff *skb) 652 { 653 int alen; 654 int blksize; 655 struct ip_esp_hdr *esph; 656 struct crypto_aead *aead; 657 struct esp_info esp; 658 659 esp.inplace = true; 660 661 esp.proto = *skb_mac_header(skb); 662 *skb_mac_header(skb) = IPPROTO_ESP; 663 664 /* skb is pure payload to encrypt */ 665 666 aead = x->data; 667 alen = crypto_aead_authsize(aead); 668 669 esp.tfclen = 0; 670 if (x->tfcpad) { 671 struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb); 672 u32 padto; 673 674 padto = min(x->tfcpad, __xfrm_state_mtu(x, dst->child_mtu_cached)); 675 if (skb->len < padto) 676 esp.tfclen = padto - skb->len; 677 } 678 blksize = ALIGN(crypto_aead_blocksize(aead), 4); 679 esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize); 680 esp.plen = esp.clen - skb->len - esp.tfclen; 681 esp.tailen = esp.tfclen + esp.plen + alen; 682 683 esp.esph = ip_esp_hdr(skb); 684 685 esp.nfrags = esp_output_head(x, skb, &esp); 686 if (esp.nfrags < 0) 687 return esp.nfrags; 688 689 esph = esp.esph; 690 esph->spi = x->id.spi; 691 692 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low); 693 esp.seqno = cpu_to_be64(XFRM_SKB_CB(skb)->seq.output.low + 694 ((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32)); 695 696 skb_push(skb, -skb_network_offset(skb)); 697 698 return esp_output_tail(x, skb, &esp); 699 } 700 701 static inline int esp_remove_trailer(struct sk_buff *skb) 702 { 703 struct xfrm_state *x = xfrm_input_state(skb); 704 struct xfrm_offload *xo = xfrm_offload(skb); 705 struct crypto_aead *aead = x->data; 706 int alen, hlen, elen; 707 int padlen, trimlen; 708 __wsum csumdiff; 709 u8 nexthdr[2]; 710 int ret; 711 712 alen = crypto_aead_authsize(aead); 713 hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead); 714 elen = skb->len - hlen; 715 716 if (xo && (xo->flags & XFRM_ESP_NO_TRAILER)) { 717 ret = xo->proto; 718 goto out; 719 } 720 721 if (skb_copy_bits(skb, skb->len - alen - 2, nexthdr, 2)) 722 BUG(); 723 724 ret = -EINVAL; 725 padlen = nexthdr[0]; 726 if (padlen + 2 + alen >= elen) { 727 net_dbg_ratelimited("ipsec esp packet is garbage padlen=%d, elen=%d\n", 728 padlen + 2, elen - alen); 729 goto out; 730 } 731 732 trimlen = alen + padlen + 2; 733 if (skb->ip_summed == CHECKSUM_COMPLETE) { 734 csumdiff = skb_checksum(skb, skb->len - trimlen, trimlen, 0); 735 skb->csum = csum_block_sub(skb->csum, csumdiff, 736 skb->len - trimlen); 737 } 738 pskb_trim(skb, skb->len - trimlen); 739 740 ret = nexthdr[1]; 741 742 out: 743 return ret; 744 } 745 746 int esp_input_done2(struct sk_buff *skb, int err) 747 { 748 const struct iphdr *iph; 749 struct xfrm_state *x = xfrm_input_state(skb); 750 struct xfrm_offload *xo = xfrm_offload(skb); 751 struct crypto_aead *aead = x->data; 752 int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead); 753 int ihl; 754 755 if (!xo || !(xo->flags & CRYPTO_DONE)) 756 kfree(ESP_SKB_CB(skb)->tmp); 757 758 if (unlikely(err)) 759 goto out; 760 761 err = esp_remove_trailer(skb); 762 if (unlikely(err < 0)) 763 goto out; 764 765 iph = ip_hdr(skb); 766 ihl = iph->ihl * 4; 767 768 if (x->encap) { 769 struct xfrm_encap_tmpl *encap = x->encap; 770 struct tcphdr *th = (void *)(skb_network_header(skb) + ihl); 771 struct udphdr *uh = (void *)(skb_network_header(skb) + ihl); 772 __be16 source; 773 774 switch (x->encap->encap_type) { 775 case TCP_ENCAP_ESPINTCP: 776 source = th->source; 777 break; 778 case UDP_ENCAP_ESPINUDP: 779 case UDP_ENCAP_ESPINUDP_NON_IKE: 780 source = uh->source; 781 break; 782 default: 783 WARN_ON_ONCE(1); 784 err = -EINVAL; 785 goto out; 786 } 787 788 /* 789 * 1) if the NAT-T peer's IP or port changed then 790 * advertize the change to the keying daemon. 791 * This is an inbound SA, so just compare 792 * SRC ports. 793 */ 794 if (iph->saddr != x->props.saddr.a4 || 795 source != encap->encap_sport) { 796 xfrm_address_t ipaddr; 797 798 ipaddr.a4 = iph->saddr; 799 km_new_mapping(x, &ipaddr, source); 800 801 /* XXX: perhaps add an extra 802 * policy check here, to see 803 * if we should allow or 804 * reject a packet from a 805 * different source 806 * address/port. 807 */ 808 } 809 810 /* 811 * 2) ignore UDP/TCP checksums in case 812 * of NAT-T in Transport Mode, or 813 * perform other post-processing fixes 814 * as per draft-ietf-ipsec-udp-encaps-06, 815 * section 3.1.2 816 */ 817 if (x->props.mode == XFRM_MODE_TRANSPORT) 818 skb->ip_summed = CHECKSUM_UNNECESSARY; 819 } 820 821 skb_pull_rcsum(skb, hlen); 822 if (x->props.mode == XFRM_MODE_TUNNEL) 823 skb_reset_transport_header(skb); 824 else 825 skb_set_transport_header(skb, -ihl); 826 827 /* RFC4303: Drop dummy packets without any error */ 828 if (err == IPPROTO_NONE) 829 err = -EINVAL; 830 831 out: 832 return err; 833 } 834 EXPORT_SYMBOL_GPL(esp_input_done2); 835 836 static void esp_input_done(struct crypto_async_request *base, int err) 837 { 838 struct sk_buff *skb = base->data; 839 840 xfrm_input_resume(skb, esp_input_done2(skb, err)); 841 } 842 843 static void esp_input_restore_header(struct sk_buff *skb) 844 { 845 esp_restore_header(skb, 0); 846 __skb_pull(skb, 4); 847 } 848 849 static void esp_input_set_header(struct sk_buff *skb, __be32 *seqhi) 850 { 851 struct xfrm_state *x = xfrm_input_state(skb); 852 struct ip_esp_hdr *esph; 853 854 /* For ESN we move the header forward by 4 bytes to 855 * accommodate the high bits. We will move it back after 856 * decryption. 857 */ 858 if ((x->props.flags & XFRM_STATE_ESN)) { 859 esph = skb_push(skb, 4); 860 *seqhi = esph->spi; 861 esph->spi = esph->seq_no; 862 esph->seq_no = XFRM_SKB_CB(skb)->seq.input.hi; 863 } 864 } 865 866 static void esp_input_done_esn(struct crypto_async_request *base, int err) 867 { 868 struct sk_buff *skb = base->data; 869 870 esp_input_restore_header(skb); 871 esp_input_done(base, err); 872 } 873 874 /* 875 * Note: detecting truncated vs. non-truncated authentication data is very 876 * expensive, so we only support truncated data, which is the recommended 877 * and common case. 878 */ 879 static int esp_input(struct xfrm_state *x, struct sk_buff *skb) 880 { 881 struct crypto_aead *aead = x->data; 882 struct aead_request *req; 883 struct sk_buff *trailer; 884 int ivlen = crypto_aead_ivsize(aead); 885 int elen = skb->len - sizeof(struct ip_esp_hdr) - ivlen; 886 int nfrags; 887 int assoclen; 888 int seqhilen; 889 __be32 *seqhi; 890 void *tmp; 891 u8 *iv; 892 struct scatterlist *sg; 893 int err = -EINVAL; 894 895 if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + ivlen)) 896 goto out; 897 898 if (elen <= 0) 899 goto out; 900 901 assoclen = sizeof(struct ip_esp_hdr); 902 seqhilen = 0; 903 904 if (x->props.flags & XFRM_STATE_ESN) { 905 seqhilen += sizeof(__be32); 906 assoclen += seqhilen; 907 } 908 909 if (!skb_cloned(skb)) { 910 if (!skb_is_nonlinear(skb)) { 911 nfrags = 1; 912 913 goto skip_cow; 914 } else if (!skb_has_frag_list(skb)) { 915 nfrags = skb_shinfo(skb)->nr_frags; 916 nfrags++; 917 918 goto skip_cow; 919 } 920 } 921 922 err = skb_cow_data(skb, 0, &trailer); 923 if (err < 0) 924 goto out; 925 926 nfrags = err; 927 928 skip_cow: 929 err = -ENOMEM; 930 tmp = esp_alloc_tmp(aead, nfrags, seqhilen); 931 if (!tmp) 932 goto out; 933 934 ESP_SKB_CB(skb)->tmp = tmp; 935 seqhi = esp_tmp_extra(tmp); 936 iv = esp_tmp_iv(aead, tmp, seqhilen); 937 req = esp_tmp_req(aead, iv); 938 sg = esp_req_sg(aead, req); 939 940 esp_input_set_header(skb, seqhi); 941 942 sg_init_table(sg, nfrags); 943 err = skb_to_sgvec(skb, sg, 0, skb->len); 944 if (unlikely(err < 0)) { 945 kfree(tmp); 946 goto out; 947 } 948 949 skb->ip_summed = CHECKSUM_NONE; 950 951 if ((x->props.flags & XFRM_STATE_ESN)) 952 aead_request_set_callback(req, 0, esp_input_done_esn, skb); 953 else 954 aead_request_set_callback(req, 0, esp_input_done, skb); 955 956 aead_request_set_crypt(req, sg, sg, elen + ivlen, iv); 957 aead_request_set_ad(req, assoclen); 958 959 err = crypto_aead_decrypt(req); 960 if (err == -EINPROGRESS) 961 goto out; 962 963 if ((x->props.flags & XFRM_STATE_ESN)) 964 esp_input_restore_header(skb); 965 966 err = esp_input_done2(skb, err); 967 968 out: 969 return err; 970 } 971 972 static int esp4_err(struct sk_buff *skb, u32 info) 973 { 974 struct net *net = dev_net(skb->dev); 975 const struct iphdr *iph = (const struct iphdr *)skb->data; 976 struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2)); 977 struct xfrm_state *x; 978 979 switch (icmp_hdr(skb)->type) { 980 case ICMP_DEST_UNREACH: 981 if (icmp_hdr(skb)->code != ICMP_FRAG_NEEDED) 982 return 0; 983 break; 984 case ICMP_REDIRECT: 985 break; 986 default: 987 return 0; 988 } 989 990 x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr, 991 esph->spi, IPPROTO_ESP, AF_INET); 992 if (!x) 993 return 0; 994 995 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) 996 ipv4_update_pmtu(skb, net, info, 0, IPPROTO_ESP); 997 else 998 ipv4_redirect(skb, net, 0, IPPROTO_ESP); 999 xfrm_state_put(x); 1000 1001 return 0; 1002 } 1003 1004 static void esp_destroy(struct xfrm_state *x) 1005 { 1006 struct crypto_aead *aead = x->data; 1007 1008 if (!aead) 1009 return; 1010 1011 crypto_free_aead(aead); 1012 } 1013 1014 static int esp_init_aead(struct xfrm_state *x) 1015 { 1016 char aead_name[CRYPTO_MAX_ALG_NAME]; 1017 struct crypto_aead *aead; 1018 int err; 1019 1020 err = -ENAMETOOLONG; 1021 if (snprintf(aead_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", 1022 x->geniv, x->aead->alg_name) >= CRYPTO_MAX_ALG_NAME) 1023 goto error; 1024 1025 aead = crypto_alloc_aead(aead_name, 0, 0); 1026 err = PTR_ERR(aead); 1027 if (IS_ERR(aead)) 1028 goto error; 1029 1030 x->data = aead; 1031 1032 err = crypto_aead_setkey(aead, x->aead->alg_key, 1033 (x->aead->alg_key_len + 7) / 8); 1034 if (err) 1035 goto error; 1036 1037 err = crypto_aead_setauthsize(aead, x->aead->alg_icv_len / 8); 1038 if (err) 1039 goto error; 1040 1041 error: 1042 return err; 1043 } 1044 1045 static int esp_init_authenc(struct xfrm_state *x) 1046 { 1047 struct crypto_aead *aead; 1048 struct crypto_authenc_key_param *param; 1049 struct rtattr *rta; 1050 char *key; 1051 char *p; 1052 char authenc_name[CRYPTO_MAX_ALG_NAME]; 1053 unsigned int keylen; 1054 int err; 1055 1056 err = -EINVAL; 1057 if (!x->ealg) 1058 goto error; 1059 1060 err = -ENAMETOOLONG; 1061 1062 if ((x->props.flags & XFRM_STATE_ESN)) { 1063 if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME, 1064 "%s%sauthencesn(%s,%s)%s", 1065 x->geniv ?: "", x->geniv ? "(" : "", 1066 x->aalg ? x->aalg->alg_name : "digest_null", 1067 x->ealg->alg_name, 1068 x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME) 1069 goto error; 1070 } else { 1071 if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME, 1072 "%s%sauthenc(%s,%s)%s", 1073 x->geniv ?: "", x->geniv ? "(" : "", 1074 x->aalg ? x->aalg->alg_name : "digest_null", 1075 x->ealg->alg_name, 1076 x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME) 1077 goto error; 1078 } 1079 1080 aead = crypto_alloc_aead(authenc_name, 0, 0); 1081 err = PTR_ERR(aead); 1082 if (IS_ERR(aead)) 1083 goto error; 1084 1085 x->data = aead; 1086 1087 keylen = (x->aalg ? (x->aalg->alg_key_len + 7) / 8 : 0) + 1088 (x->ealg->alg_key_len + 7) / 8 + RTA_SPACE(sizeof(*param)); 1089 err = -ENOMEM; 1090 key = kmalloc(keylen, GFP_KERNEL); 1091 if (!key) 1092 goto error; 1093 1094 p = key; 1095 rta = (void *)p; 1096 rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM; 1097 rta->rta_len = RTA_LENGTH(sizeof(*param)); 1098 param = RTA_DATA(rta); 1099 p += RTA_SPACE(sizeof(*param)); 1100 1101 if (x->aalg) { 1102 struct xfrm_algo_desc *aalg_desc; 1103 1104 memcpy(p, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8); 1105 p += (x->aalg->alg_key_len + 7) / 8; 1106 1107 aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0); 1108 BUG_ON(!aalg_desc); 1109 1110 err = -EINVAL; 1111 if (aalg_desc->uinfo.auth.icv_fullbits / 8 != 1112 crypto_aead_authsize(aead)) { 1113 pr_info("ESP: %s digestsize %u != %hu\n", 1114 x->aalg->alg_name, 1115 crypto_aead_authsize(aead), 1116 aalg_desc->uinfo.auth.icv_fullbits / 8); 1117 goto free_key; 1118 } 1119 1120 err = crypto_aead_setauthsize( 1121 aead, x->aalg->alg_trunc_len / 8); 1122 if (err) 1123 goto free_key; 1124 } 1125 1126 param->enckeylen = cpu_to_be32((x->ealg->alg_key_len + 7) / 8); 1127 memcpy(p, x->ealg->alg_key, (x->ealg->alg_key_len + 7) / 8); 1128 1129 err = crypto_aead_setkey(aead, key, keylen); 1130 1131 free_key: 1132 kfree(key); 1133 1134 error: 1135 return err; 1136 } 1137 1138 static int esp_init_state(struct xfrm_state *x) 1139 { 1140 struct crypto_aead *aead; 1141 u32 align; 1142 int err; 1143 1144 x->data = NULL; 1145 1146 if (x->aead) 1147 err = esp_init_aead(x); 1148 else 1149 err = esp_init_authenc(x); 1150 1151 if (err) 1152 goto error; 1153 1154 aead = x->data; 1155 1156 x->props.header_len = sizeof(struct ip_esp_hdr) + 1157 crypto_aead_ivsize(aead); 1158 if (x->props.mode == XFRM_MODE_TUNNEL) 1159 x->props.header_len += sizeof(struct iphdr); 1160 else if (x->props.mode == XFRM_MODE_BEET && x->sel.family != AF_INET6) 1161 x->props.header_len += IPV4_BEET_PHMAXLEN; 1162 if (x->encap) { 1163 struct xfrm_encap_tmpl *encap = x->encap; 1164 1165 switch (encap->encap_type) { 1166 default: 1167 err = -EINVAL; 1168 goto error; 1169 case UDP_ENCAP_ESPINUDP: 1170 x->props.header_len += sizeof(struct udphdr); 1171 break; 1172 case UDP_ENCAP_ESPINUDP_NON_IKE: 1173 x->props.header_len += sizeof(struct udphdr) + 2 * sizeof(u32); 1174 break; 1175 #ifdef CONFIG_INET_ESPINTCP 1176 case TCP_ENCAP_ESPINTCP: 1177 /* only the length field, TCP encap is done by 1178 * the socket 1179 */ 1180 x->props.header_len += 2; 1181 break; 1182 #endif 1183 } 1184 } 1185 1186 align = ALIGN(crypto_aead_blocksize(aead), 4); 1187 x->props.trailer_len = align + 1 + crypto_aead_authsize(aead); 1188 1189 error: 1190 return err; 1191 } 1192 1193 static int esp4_rcv_cb(struct sk_buff *skb, int err) 1194 { 1195 return 0; 1196 } 1197 1198 static const struct xfrm_type esp_type = 1199 { 1200 .owner = THIS_MODULE, 1201 .proto = IPPROTO_ESP, 1202 .flags = XFRM_TYPE_REPLAY_PROT, 1203 .init_state = esp_init_state, 1204 .destructor = esp_destroy, 1205 .input = esp_input, 1206 .output = esp_output, 1207 }; 1208 1209 static struct xfrm4_protocol esp4_protocol = { 1210 .handler = xfrm4_rcv, 1211 .input_handler = xfrm_input, 1212 .cb_handler = esp4_rcv_cb, 1213 .err_handler = esp4_err, 1214 .priority = 0, 1215 }; 1216 1217 static int __init esp4_init(void) 1218 { 1219 if (xfrm_register_type(&esp_type, AF_INET) < 0) { 1220 pr_info("%s: can't add xfrm type\n", __func__); 1221 return -EAGAIN; 1222 } 1223 if (xfrm4_protocol_register(&esp4_protocol, IPPROTO_ESP) < 0) { 1224 pr_info("%s: can't add protocol\n", __func__); 1225 xfrm_unregister_type(&esp_type, AF_INET); 1226 return -EAGAIN; 1227 } 1228 return 0; 1229 } 1230 1231 static void __exit esp4_fini(void) 1232 { 1233 if (xfrm4_protocol_deregister(&esp4_protocol, IPPROTO_ESP) < 0) 1234 pr_info("%s: can't remove protocol\n", __func__); 1235 xfrm_unregister_type(&esp_type, AF_INET); 1236 } 1237 1238 module_init(esp4_init); 1239 module_exit(esp4_fini); 1240 MODULE_LICENSE("GPL"); 1241 MODULE_ALIAS_XFRM_TYPE(AF_INET, XFRM_PROTO_ESP); 1242