1 #define pr_fmt(fmt) "IPsec: " fmt 2 3 #include <crypto/aead.h> 4 #include <crypto/authenc.h> 5 #include <linux/err.h> 6 #include <linux/module.h> 7 #include <net/ip.h> 8 #include <net/xfrm.h> 9 #include <net/esp.h> 10 #include <linux/scatterlist.h> 11 #include <linux/kernel.h> 12 #include <linux/pfkeyv2.h> 13 #include <linux/rtnetlink.h> 14 #include <linux/slab.h> 15 #include <linux/spinlock.h> 16 #include <linux/in6.h> 17 #include <net/icmp.h> 18 #include <net/protocol.h> 19 #include <net/udp.h> 20 21 struct esp_skb_cb { 22 struct xfrm_skb_cb xfrm; 23 void *tmp; 24 }; 25 26 #define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0])) 27 28 static u32 esp4_get_mtu(struct xfrm_state *x, int mtu); 29 30 /* 31 * Allocate an AEAD request structure with extra space for SG and IV. 32 * 33 * For alignment considerations the IV is placed at the front, followed 34 * by the request and finally the SG list. 35 * 36 * TODO: Use spare space in skb for this where possible. 37 */ 38 static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int seqhilen) 39 { 40 unsigned int len; 41 42 len = seqhilen; 43 44 len += crypto_aead_ivsize(aead); 45 46 if (len) { 47 len += crypto_aead_alignmask(aead) & 48 ~(crypto_tfm_ctx_alignment() - 1); 49 len = ALIGN(len, crypto_tfm_ctx_alignment()); 50 } 51 52 len += sizeof(struct aead_request) + crypto_aead_reqsize(aead); 53 len = ALIGN(len, __alignof__(struct scatterlist)); 54 55 len += sizeof(struct scatterlist) * nfrags; 56 57 return kmalloc(len, GFP_ATOMIC); 58 } 59 60 static inline __be32 *esp_tmp_seqhi(void *tmp) 61 { 62 return PTR_ALIGN((__be32 *)tmp, __alignof__(__be32)); 63 } 64 static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int seqhilen) 65 { 66 return crypto_aead_ivsize(aead) ? 67 PTR_ALIGN((u8 *)tmp + seqhilen, 68 crypto_aead_alignmask(aead) + 1) : tmp + seqhilen; 69 } 70 71 static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv) 72 { 73 struct aead_request *req; 74 75 req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead), 76 crypto_tfm_ctx_alignment()); 77 aead_request_set_tfm(req, aead); 78 return req; 79 } 80 81 static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead, 82 struct aead_request *req) 83 { 84 return (void *)ALIGN((unsigned long)(req + 1) + 85 crypto_aead_reqsize(aead), 86 __alignof__(struct scatterlist)); 87 } 88 89 static void esp_output_done(struct crypto_async_request *base, int err) 90 { 91 struct sk_buff *skb = base->data; 92 93 kfree(ESP_SKB_CB(skb)->tmp); 94 xfrm_output_resume(skb, err); 95 } 96 97 /* Move ESP header back into place. */ 98 static void esp_restore_header(struct sk_buff *skb, unsigned int offset) 99 { 100 struct ip_esp_hdr *esph = (void *)(skb->data + offset); 101 void *tmp = ESP_SKB_CB(skb)->tmp; 102 __be32 *seqhi = esp_tmp_seqhi(tmp); 103 104 esph->seq_no = esph->spi; 105 esph->spi = *seqhi; 106 } 107 108 static void esp_output_restore_header(struct sk_buff *skb) 109 { 110 esp_restore_header(skb, skb_transport_offset(skb) - sizeof(__be32)); 111 } 112 113 static void esp_output_done_esn(struct crypto_async_request *base, int err) 114 { 115 struct sk_buff *skb = base->data; 116 117 esp_output_restore_header(skb); 118 esp_output_done(base, err); 119 } 120 121 static int esp_output(struct xfrm_state *x, struct sk_buff *skb) 122 { 123 int err; 124 struct ip_esp_hdr *esph; 125 struct crypto_aead *aead; 126 struct aead_request *req; 127 struct scatterlist *sg; 128 struct sk_buff *trailer; 129 void *tmp; 130 u8 *iv; 131 u8 *tail; 132 int blksize; 133 int clen; 134 int alen; 135 int plen; 136 int ivlen; 137 int tfclen; 138 int nfrags; 139 int assoclen; 140 int seqhilen; 141 __be32 *seqhi; 142 __be64 seqno; 143 144 /* skb is pure payload to encrypt */ 145 146 aead = x->data; 147 alen = crypto_aead_authsize(aead); 148 ivlen = crypto_aead_ivsize(aead); 149 150 tfclen = 0; 151 if (x->tfcpad) { 152 struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb); 153 u32 padto; 154 155 padto = min(x->tfcpad, esp4_get_mtu(x, dst->child_mtu_cached)); 156 if (skb->len < padto) 157 tfclen = padto - skb->len; 158 } 159 blksize = ALIGN(crypto_aead_blocksize(aead), 4); 160 clen = ALIGN(skb->len + 2 + tfclen, blksize); 161 plen = clen - skb->len - tfclen; 162 163 err = skb_cow_data(skb, tfclen + plen + alen, &trailer); 164 if (err < 0) 165 goto error; 166 nfrags = err; 167 168 assoclen = sizeof(*esph); 169 seqhilen = 0; 170 171 if (x->props.flags & XFRM_STATE_ESN) { 172 seqhilen += sizeof(__be32); 173 assoclen += seqhilen; 174 } 175 176 tmp = esp_alloc_tmp(aead, nfrags, seqhilen); 177 if (!tmp) { 178 err = -ENOMEM; 179 goto error; 180 } 181 182 seqhi = esp_tmp_seqhi(tmp); 183 iv = esp_tmp_iv(aead, tmp, seqhilen); 184 req = esp_tmp_req(aead, iv); 185 sg = esp_req_sg(aead, req); 186 187 /* Fill padding... */ 188 tail = skb_tail_pointer(trailer); 189 if (tfclen) { 190 memset(tail, 0, tfclen); 191 tail += tfclen; 192 } 193 do { 194 int i; 195 for (i = 0; i < plen - 2; i++) 196 tail[i] = i + 1; 197 } while (0); 198 tail[plen - 2] = plen - 2; 199 tail[plen - 1] = *skb_mac_header(skb); 200 pskb_put(skb, trailer, clen - skb->len + alen); 201 202 skb_push(skb, -skb_network_offset(skb)); 203 esph = ip_esp_hdr(skb); 204 *skb_mac_header(skb) = IPPROTO_ESP; 205 206 /* this is non-NULL only with UDP Encapsulation */ 207 if (x->encap) { 208 struct xfrm_encap_tmpl *encap = x->encap; 209 struct udphdr *uh; 210 __be32 *udpdata32; 211 __be16 sport, dport; 212 int encap_type; 213 214 spin_lock_bh(&x->lock); 215 sport = encap->encap_sport; 216 dport = encap->encap_dport; 217 encap_type = encap->encap_type; 218 spin_unlock_bh(&x->lock); 219 220 uh = (struct udphdr *)esph; 221 uh->source = sport; 222 uh->dest = dport; 223 uh->len = htons(skb->len - skb_transport_offset(skb)); 224 uh->check = 0; 225 226 switch (encap_type) { 227 default: 228 case UDP_ENCAP_ESPINUDP: 229 esph = (struct ip_esp_hdr *)(uh + 1); 230 break; 231 case UDP_ENCAP_ESPINUDP_NON_IKE: 232 udpdata32 = (__be32 *)(uh + 1); 233 udpdata32[0] = udpdata32[1] = 0; 234 esph = (struct ip_esp_hdr *)(udpdata32 + 2); 235 break; 236 } 237 238 *skb_mac_header(skb) = IPPROTO_UDP; 239 } 240 241 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low); 242 243 aead_request_set_callback(req, 0, esp_output_done, skb); 244 245 /* For ESN we move the header forward by 4 bytes to 246 * accomodate the high bits. We will move it back after 247 * encryption. 248 */ 249 if ((x->props.flags & XFRM_STATE_ESN)) { 250 esph = (void *)(skb_transport_header(skb) - sizeof(__be32)); 251 *seqhi = esph->spi; 252 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.hi); 253 aead_request_set_callback(req, 0, esp_output_done_esn, skb); 254 } 255 256 esph->spi = x->id.spi; 257 258 sg_init_table(sg, nfrags); 259 skb_to_sgvec(skb, sg, 260 (unsigned char *)esph - skb->data, 261 assoclen + ivlen + clen + alen); 262 263 aead_request_set_crypt(req, sg, sg, ivlen + clen, iv); 264 aead_request_set_ad(req, assoclen); 265 266 seqno = cpu_to_be64(XFRM_SKB_CB(skb)->seq.output.low + 267 ((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32)); 268 269 memset(iv, 0, ivlen); 270 memcpy(iv + ivlen - min(ivlen, 8), (u8 *)&seqno + 8 - min(ivlen, 8), 271 min(ivlen, 8)); 272 273 ESP_SKB_CB(skb)->tmp = tmp; 274 err = crypto_aead_encrypt(req); 275 276 switch (err) { 277 case -EINPROGRESS: 278 goto error; 279 280 case -EBUSY: 281 err = NET_XMIT_DROP; 282 break; 283 284 case 0: 285 if ((x->props.flags & XFRM_STATE_ESN)) 286 esp_output_restore_header(skb); 287 } 288 289 kfree(tmp); 290 291 error: 292 return err; 293 } 294 295 static int esp_input_done2(struct sk_buff *skb, int err) 296 { 297 const struct iphdr *iph; 298 struct xfrm_state *x = xfrm_input_state(skb); 299 struct crypto_aead *aead = x->data; 300 int alen = crypto_aead_authsize(aead); 301 int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead); 302 int elen = skb->len - hlen; 303 int ihl; 304 u8 nexthdr[2]; 305 int padlen; 306 307 kfree(ESP_SKB_CB(skb)->tmp); 308 309 if (unlikely(err)) 310 goto out; 311 312 if (skb_copy_bits(skb, skb->len-alen-2, nexthdr, 2)) 313 BUG(); 314 315 err = -EINVAL; 316 padlen = nexthdr[0]; 317 if (padlen + 2 + alen >= elen) 318 goto out; 319 320 /* ... check padding bits here. Silly. :-) */ 321 322 iph = ip_hdr(skb); 323 ihl = iph->ihl * 4; 324 325 if (x->encap) { 326 struct xfrm_encap_tmpl *encap = x->encap; 327 struct udphdr *uh = (void *)(skb_network_header(skb) + ihl); 328 329 /* 330 * 1) if the NAT-T peer's IP or port changed then 331 * advertize the change to the keying daemon. 332 * This is an inbound SA, so just compare 333 * SRC ports. 334 */ 335 if (iph->saddr != x->props.saddr.a4 || 336 uh->source != encap->encap_sport) { 337 xfrm_address_t ipaddr; 338 339 ipaddr.a4 = iph->saddr; 340 km_new_mapping(x, &ipaddr, uh->source); 341 342 /* XXX: perhaps add an extra 343 * policy check here, to see 344 * if we should allow or 345 * reject a packet from a 346 * different source 347 * address/port. 348 */ 349 } 350 351 /* 352 * 2) ignore UDP/TCP checksums in case 353 * of NAT-T in Transport Mode, or 354 * perform other post-processing fixes 355 * as per draft-ietf-ipsec-udp-encaps-06, 356 * section 3.1.2 357 */ 358 if (x->props.mode == XFRM_MODE_TRANSPORT) 359 skb->ip_summed = CHECKSUM_UNNECESSARY; 360 } 361 362 pskb_trim(skb, skb->len - alen - padlen - 2); 363 __skb_pull(skb, hlen); 364 if (x->props.mode == XFRM_MODE_TUNNEL) 365 skb_reset_transport_header(skb); 366 else 367 skb_set_transport_header(skb, -ihl); 368 369 err = nexthdr[1]; 370 371 /* RFC4303: Drop dummy packets without any error */ 372 if (err == IPPROTO_NONE) 373 err = -EINVAL; 374 375 out: 376 return err; 377 } 378 379 static void esp_input_done(struct crypto_async_request *base, int err) 380 { 381 struct sk_buff *skb = base->data; 382 383 xfrm_input_resume(skb, esp_input_done2(skb, err)); 384 } 385 386 static void esp_input_restore_header(struct sk_buff *skb) 387 { 388 esp_restore_header(skb, 0); 389 __skb_pull(skb, 4); 390 } 391 392 static void esp_input_done_esn(struct crypto_async_request *base, int err) 393 { 394 struct sk_buff *skb = base->data; 395 396 esp_input_restore_header(skb); 397 esp_input_done(base, err); 398 } 399 400 /* 401 * Note: detecting truncated vs. non-truncated authentication data is very 402 * expensive, so we only support truncated data, which is the recommended 403 * and common case. 404 */ 405 static int esp_input(struct xfrm_state *x, struct sk_buff *skb) 406 { 407 struct ip_esp_hdr *esph; 408 struct crypto_aead *aead = x->data; 409 struct aead_request *req; 410 struct sk_buff *trailer; 411 int ivlen = crypto_aead_ivsize(aead); 412 int elen = skb->len - sizeof(*esph) - ivlen; 413 int nfrags; 414 int assoclen; 415 int seqhilen; 416 __be32 *seqhi; 417 void *tmp; 418 u8 *iv; 419 struct scatterlist *sg; 420 int err = -EINVAL; 421 422 if (!pskb_may_pull(skb, sizeof(*esph) + ivlen)) 423 goto out; 424 425 if (elen <= 0) 426 goto out; 427 428 err = skb_cow_data(skb, 0, &trailer); 429 if (err < 0) 430 goto out; 431 432 nfrags = err; 433 434 assoclen = sizeof(*esph); 435 seqhilen = 0; 436 437 if (x->props.flags & XFRM_STATE_ESN) { 438 seqhilen += sizeof(__be32); 439 assoclen += seqhilen; 440 } 441 442 err = -ENOMEM; 443 tmp = esp_alloc_tmp(aead, nfrags, seqhilen); 444 if (!tmp) 445 goto out; 446 447 ESP_SKB_CB(skb)->tmp = tmp; 448 seqhi = esp_tmp_seqhi(tmp); 449 iv = esp_tmp_iv(aead, tmp, seqhilen); 450 req = esp_tmp_req(aead, iv); 451 sg = esp_req_sg(aead, req); 452 453 skb->ip_summed = CHECKSUM_NONE; 454 455 esph = (struct ip_esp_hdr *)skb->data; 456 457 aead_request_set_callback(req, 0, esp_input_done, skb); 458 459 /* For ESN we move the header forward by 4 bytes to 460 * accomodate the high bits. We will move it back after 461 * decryption. 462 */ 463 if ((x->props.flags & XFRM_STATE_ESN)) { 464 esph = (void *)skb_push(skb, 4); 465 *seqhi = esph->spi; 466 esph->spi = esph->seq_no; 467 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.input.hi); 468 aead_request_set_callback(req, 0, esp_input_done_esn, skb); 469 } 470 471 sg_init_table(sg, nfrags); 472 skb_to_sgvec(skb, sg, 0, skb->len); 473 474 aead_request_set_crypt(req, sg, sg, elen + ivlen, iv); 475 aead_request_set_ad(req, assoclen); 476 477 err = crypto_aead_decrypt(req); 478 if (err == -EINPROGRESS) 479 goto out; 480 481 if ((x->props.flags & XFRM_STATE_ESN)) 482 esp_input_restore_header(skb); 483 484 err = esp_input_done2(skb, err); 485 486 out: 487 return err; 488 } 489 490 static u32 esp4_get_mtu(struct xfrm_state *x, int mtu) 491 { 492 struct crypto_aead *aead = x->data; 493 u32 blksize = ALIGN(crypto_aead_blocksize(aead), 4); 494 unsigned int net_adj; 495 496 switch (x->props.mode) { 497 case XFRM_MODE_TRANSPORT: 498 case XFRM_MODE_BEET: 499 net_adj = sizeof(struct iphdr); 500 break; 501 case XFRM_MODE_TUNNEL: 502 net_adj = 0; 503 break; 504 default: 505 BUG(); 506 } 507 508 return ((mtu - x->props.header_len - crypto_aead_authsize(aead) - 509 net_adj) & ~(blksize - 1)) + net_adj - 2; 510 } 511 512 static int esp4_err(struct sk_buff *skb, u32 info) 513 { 514 struct net *net = dev_net(skb->dev); 515 const struct iphdr *iph = (const struct iphdr *)skb->data; 516 struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2)); 517 struct xfrm_state *x; 518 519 switch (icmp_hdr(skb)->type) { 520 case ICMP_DEST_UNREACH: 521 if (icmp_hdr(skb)->code != ICMP_FRAG_NEEDED) 522 return 0; 523 case ICMP_REDIRECT: 524 break; 525 default: 526 return 0; 527 } 528 529 x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr, 530 esph->spi, IPPROTO_ESP, AF_INET); 531 if (!x) 532 return 0; 533 534 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) 535 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_ESP, 0); 536 else 537 ipv4_redirect(skb, net, 0, 0, IPPROTO_ESP, 0); 538 xfrm_state_put(x); 539 540 return 0; 541 } 542 543 static void esp_destroy(struct xfrm_state *x) 544 { 545 struct crypto_aead *aead = x->data; 546 547 if (!aead) 548 return; 549 550 crypto_free_aead(aead); 551 } 552 553 static int esp_init_aead(struct xfrm_state *x) 554 { 555 char aead_name[CRYPTO_MAX_ALG_NAME]; 556 struct crypto_aead *aead; 557 int err; 558 559 err = -ENAMETOOLONG; 560 if (snprintf(aead_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", 561 x->geniv, x->aead->alg_name) >= CRYPTO_MAX_ALG_NAME) 562 goto error; 563 564 aead = crypto_alloc_aead(aead_name, 0, 0); 565 err = PTR_ERR(aead); 566 if (IS_ERR(aead)) 567 goto error; 568 569 x->data = aead; 570 571 err = crypto_aead_setkey(aead, x->aead->alg_key, 572 (x->aead->alg_key_len + 7) / 8); 573 if (err) 574 goto error; 575 576 err = crypto_aead_setauthsize(aead, x->aead->alg_icv_len / 8); 577 if (err) 578 goto error; 579 580 error: 581 return err; 582 } 583 584 static int esp_init_authenc(struct xfrm_state *x) 585 { 586 struct crypto_aead *aead; 587 struct crypto_authenc_key_param *param; 588 struct rtattr *rta; 589 char *key; 590 char *p; 591 char authenc_name[CRYPTO_MAX_ALG_NAME]; 592 unsigned int keylen; 593 int err; 594 595 err = -EINVAL; 596 if (!x->ealg) 597 goto error; 598 599 err = -ENAMETOOLONG; 600 601 if ((x->props.flags & XFRM_STATE_ESN)) { 602 if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME, 603 "%s%sauthencesn(%s,%s)%s", 604 x->geniv ?: "", x->geniv ? "(" : "", 605 x->aalg ? x->aalg->alg_name : "digest_null", 606 x->ealg->alg_name, 607 x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME) 608 goto error; 609 } else { 610 if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME, 611 "%s%sauthenc(%s,%s)%s", 612 x->geniv ?: "", x->geniv ? "(" : "", 613 x->aalg ? x->aalg->alg_name : "digest_null", 614 x->ealg->alg_name, 615 x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME) 616 goto error; 617 } 618 619 aead = crypto_alloc_aead(authenc_name, 0, 0); 620 err = PTR_ERR(aead); 621 if (IS_ERR(aead)) 622 goto error; 623 624 x->data = aead; 625 626 keylen = (x->aalg ? (x->aalg->alg_key_len + 7) / 8 : 0) + 627 (x->ealg->alg_key_len + 7) / 8 + RTA_SPACE(sizeof(*param)); 628 err = -ENOMEM; 629 key = kmalloc(keylen, GFP_KERNEL); 630 if (!key) 631 goto error; 632 633 p = key; 634 rta = (void *)p; 635 rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM; 636 rta->rta_len = RTA_LENGTH(sizeof(*param)); 637 param = RTA_DATA(rta); 638 p += RTA_SPACE(sizeof(*param)); 639 640 if (x->aalg) { 641 struct xfrm_algo_desc *aalg_desc; 642 643 memcpy(p, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8); 644 p += (x->aalg->alg_key_len + 7) / 8; 645 646 aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0); 647 BUG_ON(!aalg_desc); 648 649 err = -EINVAL; 650 if (aalg_desc->uinfo.auth.icv_fullbits / 8 != 651 crypto_aead_authsize(aead)) { 652 pr_info("ESP: %s digestsize %u != %hu\n", 653 x->aalg->alg_name, 654 crypto_aead_authsize(aead), 655 aalg_desc->uinfo.auth.icv_fullbits / 8); 656 goto free_key; 657 } 658 659 err = crypto_aead_setauthsize( 660 aead, x->aalg->alg_trunc_len / 8); 661 if (err) 662 goto free_key; 663 } 664 665 param->enckeylen = cpu_to_be32((x->ealg->alg_key_len + 7) / 8); 666 memcpy(p, x->ealg->alg_key, (x->ealg->alg_key_len + 7) / 8); 667 668 err = crypto_aead_setkey(aead, key, keylen); 669 670 free_key: 671 kfree(key); 672 673 error: 674 return err; 675 } 676 677 static int esp_init_state(struct xfrm_state *x) 678 { 679 struct crypto_aead *aead; 680 u32 align; 681 int err; 682 683 x->data = NULL; 684 685 if (x->aead) 686 err = esp_init_aead(x); 687 else 688 err = esp_init_authenc(x); 689 690 if (err) 691 goto error; 692 693 aead = x->data; 694 695 x->props.header_len = sizeof(struct ip_esp_hdr) + 696 crypto_aead_ivsize(aead); 697 if (x->props.mode == XFRM_MODE_TUNNEL) 698 x->props.header_len += sizeof(struct iphdr); 699 else if (x->props.mode == XFRM_MODE_BEET && x->sel.family != AF_INET6) 700 x->props.header_len += IPV4_BEET_PHMAXLEN; 701 if (x->encap) { 702 struct xfrm_encap_tmpl *encap = x->encap; 703 704 switch (encap->encap_type) { 705 default: 706 goto error; 707 case UDP_ENCAP_ESPINUDP: 708 x->props.header_len += sizeof(struct udphdr); 709 break; 710 case UDP_ENCAP_ESPINUDP_NON_IKE: 711 x->props.header_len += sizeof(struct udphdr) + 2 * sizeof(u32); 712 break; 713 } 714 } 715 716 align = ALIGN(crypto_aead_blocksize(aead), 4); 717 x->props.trailer_len = align + 1 + crypto_aead_authsize(aead); 718 719 error: 720 return err; 721 } 722 723 static int esp4_rcv_cb(struct sk_buff *skb, int err) 724 { 725 return 0; 726 } 727 728 static const struct xfrm_type esp_type = 729 { 730 .description = "ESP4", 731 .owner = THIS_MODULE, 732 .proto = IPPROTO_ESP, 733 .flags = XFRM_TYPE_REPLAY_PROT, 734 .init_state = esp_init_state, 735 .destructor = esp_destroy, 736 .get_mtu = esp4_get_mtu, 737 .input = esp_input, 738 .output = esp_output 739 }; 740 741 static struct xfrm4_protocol esp4_protocol = { 742 .handler = xfrm4_rcv, 743 .input_handler = xfrm_input, 744 .cb_handler = esp4_rcv_cb, 745 .err_handler = esp4_err, 746 .priority = 0, 747 }; 748 749 static int __init esp4_init(void) 750 { 751 if (xfrm_register_type(&esp_type, AF_INET) < 0) { 752 pr_info("%s: can't add xfrm type\n", __func__); 753 return -EAGAIN; 754 } 755 if (xfrm4_protocol_register(&esp4_protocol, IPPROTO_ESP) < 0) { 756 pr_info("%s: can't add protocol\n", __func__); 757 xfrm_unregister_type(&esp_type, AF_INET); 758 return -EAGAIN; 759 } 760 return 0; 761 } 762 763 static void __exit esp4_fini(void) 764 { 765 if (xfrm4_protocol_deregister(&esp4_protocol, IPPROTO_ESP) < 0) 766 pr_info("%s: can't remove protocol\n", __func__); 767 if (xfrm_unregister_type(&esp_type, AF_INET) < 0) 768 pr_info("%s: can't remove xfrm type\n", __func__); 769 } 770 771 module_init(esp4_init); 772 module_exit(esp4_fini); 773 MODULE_LICENSE("GPL"); 774 MODULE_ALIAS_XFRM_TYPE(AF_INET, XFRM_PROTO_ESP); 775