1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * xfrm_input.c 4 * 5 * Changes: 6 * YOSHIFUJI Hideaki @USAGI 7 * Split up af-specific portion 8 * 9 */ 10 11 #include <linux/bottom_half.h> 12 #include <linux/cache.h> 13 #include <linux/interrupt.h> 14 #include <linux/slab.h> 15 #include <linux/module.h> 16 #include <linux/netdevice.h> 17 #include <linux/percpu.h> 18 #include <net/dst.h> 19 #include <net/ip.h> 20 #include <net/xfrm.h> 21 #include <net/ip_tunnels.h> 22 #include <net/ip6_tunnel.h> 23 #include <net/dst_metadata.h> 24 25 #include "xfrm_inout.h" 26 27 struct xfrm_trans_tasklet { 28 struct work_struct work; 29 spinlock_t queue_lock; 30 struct sk_buff_head queue; 31 }; 32 33 struct xfrm_trans_cb { 34 union { 35 struct inet_skb_parm h4; 36 #if IS_ENABLED(CONFIG_IPV6) 37 struct inet6_skb_parm h6; 38 #endif 39 } header; 40 int (*finish)(struct net *net, struct sock *sk, struct sk_buff *skb); 41 struct net *net; 42 }; 43 44 #define XFRM_TRANS_SKB_CB(__skb) ((struct xfrm_trans_cb *)&((__skb)->cb[0])) 45 46 static DEFINE_SPINLOCK(xfrm_input_afinfo_lock); 47 static struct xfrm_input_afinfo const __rcu *xfrm_input_afinfo[2][AF_INET6 + 1]; 48 49 static struct gro_cells gro_cells; 50 static struct net_device xfrm_napi_dev; 51 52 static DEFINE_PER_CPU(struct xfrm_trans_tasklet, xfrm_trans_tasklet); 53 54 int xfrm_input_register_afinfo(const struct xfrm_input_afinfo *afinfo) 55 { 56 int err = 0; 57 58 if (WARN_ON(afinfo->family > AF_INET6)) 59 return -EAFNOSUPPORT; 60 61 spin_lock_bh(&xfrm_input_afinfo_lock); 62 if (unlikely(xfrm_input_afinfo[afinfo->is_ipip][afinfo->family])) 63 err = -EEXIST; 64 else 65 rcu_assign_pointer(xfrm_input_afinfo[afinfo->is_ipip][afinfo->family], afinfo); 66 spin_unlock_bh(&xfrm_input_afinfo_lock); 67 return err; 68 } 69 EXPORT_SYMBOL(xfrm_input_register_afinfo); 70 71 int xfrm_input_unregister_afinfo(const struct xfrm_input_afinfo *afinfo) 72 { 73 int err = 0; 74 75 spin_lock_bh(&xfrm_input_afinfo_lock); 76 if (likely(xfrm_input_afinfo[afinfo->is_ipip][afinfo->family])) { 77 if (unlikely(xfrm_input_afinfo[afinfo->is_ipip][afinfo->family] != afinfo)) 78 err = -EINVAL; 79 else 80 RCU_INIT_POINTER(xfrm_input_afinfo[afinfo->is_ipip][afinfo->family], NULL); 81 } 82 spin_unlock_bh(&xfrm_input_afinfo_lock); 83 synchronize_rcu(); 84 return err; 85 } 86 EXPORT_SYMBOL(xfrm_input_unregister_afinfo); 87 88 static const struct xfrm_input_afinfo *xfrm_input_get_afinfo(u8 family, bool is_ipip) 89 { 90 const struct xfrm_input_afinfo *afinfo; 91 92 if (WARN_ON_ONCE(family > AF_INET6)) 93 return NULL; 94 95 rcu_read_lock(); 96 afinfo = rcu_dereference(xfrm_input_afinfo[is_ipip][family]); 97 if (unlikely(!afinfo)) 98 rcu_read_unlock(); 99 return afinfo; 100 } 101 102 static int xfrm_rcv_cb(struct sk_buff *skb, unsigned int family, u8 protocol, 103 int err) 104 { 105 bool is_ipip = (protocol == IPPROTO_IPIP || protocol == IPPROTO_IPV6); 106 const struct xfrm_input_afinfo *afinfo; 107 int ret; 108 109 afinfo = xfrm_input_get_afinfo(family, is_ipip); 110 if (!afinfo) 111 return -EAFNOSUPPORT; 112 113 ret = afinfo->callback(skb, protocol, err); 114 rcu_read_unlock(); 115 116 return ret; 117 } 118 119 struct sec_path *secpath_set(struct sk_buff *skb) 120 { 121 struct sec_path *sp, *tmp = skb_ext_find(skb, SKB_EXT_SEC_PATH); 122 123 sp = skb_ext_add(skb, SKB_EXT_SEC_PATH); 124 if (!sp) 125 return NULL; 126 127 if (tmp) /* reused existing one (was COW'd if needed) */ 128 return sp; 129 130 /* allocated new secpath */ 131 memset(sp->ovec, 0, sizeof(sp->ovec)); 132 sp->olen = 0; 133 sp->len = 0; 134 135 return sp; 136 } 137 EXPORT_SYMBOL(secpath_set); 138 139 /* Fetch spi and seq from ipsec header */ 140 141 int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq) 142 { 143 int offset, offset_seq; 144 int hlen; 145 146 switch (nexthdr) { 147 case IPPROTO_AH: 148 hlen = sizeof(struct ip_auth_hdr); 149 offset = offsetof(struct ip_auth_hdr, spi); 150 offset_seq = offsetof(struct ip_auth_hdr, seq_no); 151 break; 152 case IPPROTO_ESP: 153 hlen = sizeof(struct ip_esp_hdr); 154 offset = offsetof(struct ip_esp_hdr, spi); 155 offset_seq = offsetof(struct ip_esp_hdr, seq_no); 156 break; 157 case IPPROTO_COMP: 158 if (!pskb_may_pull(skb, sizeof(struct ip_comp_hdr))) 159 return -EINVAL; 160 *spi = htonl(ntohs(*(__be16 *)(skb_transport_header(skb) + 2))); 161 *seq = 0; 162 return 0; 163 default: 164 return 1; 165 } 166 167 if (!pskb_may_pull(skb, hlen)) 168 return -EINVAL; 169 170 *spi = *(__be32 *)(skb_transport_header(skb) + offset); 171 *seq = *(__be32 *)(skb_transport_header(skb) + offset_seq); 172 return 0; 173 } 174 EXPORT_SYMBOL(xfrm_parse_spi); 175 176 static int xfrm4_remove_beet_encap(struct xfrm_state *x, struct sk_buff *skb) 177 { 178 struct iphdr *iph; 179 int optlen = 0; 180 int err = -EINVAL; 181 182 if (unlikely(XFRM_MODE_SKB_CB(skb)->protocol == IPPROTO_BEETPH)) { 183 struct ip_beet_phdr *ph; 184 int phlen; 185 186 if (!pskb_may_pull(skb, sizeof(*ph))) 187 goto out; 188 189 ph = (struct ip_beet_phdr *)skb->data; 190 191 phlen = sizeof(*ph) + ph->padlen; 192 optlen = ph->hdrlen * 8 + (IPV4_BEET_PHMAXLEN - phlen); 193 if (optlen < 0 || optlen & 3 || optlen > 250) 194 goto out; 195 196 XFRM_MODE_SKB_CB(skb)->protocol = ph->nexthdr; 197 198 if (!pskb_may_pull(skb, phlen)) 199 goto out; 200 __skb_pull(skb, phlen); 201 } 202 203 skb_push(skb, sizeof(*iph)); 204 skb_reset_network_header(skb); 205 skb_mac_header_rebuild(skb); 206 207 xfrm4_beet_make_header(skb); 208 209 iph = ip_hdr(skb); 210 211 iph->ihl += optlen / 4; 212 iph->tot_len = htons(skb->len); 213 iph->daddr = x->sel.daddr.a4; 214 iph->saddr = x->sel.saddr.a4; 215 iph->check = 0; 216 iph->check = ip_fast_csum(skb_network_header(skb), iph->ihl); 217 err = 0; 218 out: 219 return err; 220 } 221 222 static void ipip_ecn_decapsulate(struct sk_buff *skb) 223 { 224 struct iphdr *inner_iph = ipip_hdr(skb); 225 226 if (INET_ECN_is_ce(XFRM_MODE_SKB_CB(skb)->tos)) 227 IP_ECN_set_ce(inner_iph); 228 } 229 230 static int xfrm4_remove_tunnel_encap(struct xfrm_state *x, struct sk_buff *skb) 231 { 232 int err = -EINVAL; 233 234 if (XFRM_MODE_SKB_CB(skb)->protocol != IPPROTO_IPIP) 235 goto out; 236 237 if (!pskb_may_pull(skb, sizeof(struct iphdr))) 238 goto out; 239 240 err = skb_unclone(skb, GFP_ATOMIC); 241 if (err) 242 goto out; 243 244 if (x->props.flags & XFRM_STATE_DECAP_DSCP) 245 ipv4_copy_dscp(XFRM_MODE_SKB_CB(skb)->tos, ipip_hdr(skb)); 246 if (!(x->props.flags & XFRM_STATE_NOECN)) 247 ipip_ecn_decapsulate(skb); 248 249 skb_reset_network_header(skb); 250 skb_mac_header_rebuild(skb); 251 if (skb->mac_len) 252 eth_hdr(skb)->h_proto = skb->protocol; 253 254 err = 0; 255 256 out: 257 return err; 258 } 259 260 static void ipip6_ecn_decapsulate(struct sk_buff *skb) 261 { 262 struct ipv6hdr *inner_iph = ipipv6_hdr(skb); 263 264 if (INET_ECN_is_ce(XFRM_MODE_SKB_CB(skb)->tos)) 265 IP6_ECN_set_ce(skb, inner_iph); 266 } 267 268 static int xfrm6_remove_tunnel_encap(struct xfrm_state *x, struct sk_buff *skb) 269 { 270 int err = -EINVAL; 271 272 if (XFRM_MODE_SKB_CB(skb)->protocol != IPPROTO_IPV6) 273 goto out; 274 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) 275 goto out; 276 277 err = skb_unclone(skb, GFP_ATOMIC); 278 if (err) 279 goto out; 280 281 if (x->props.flags & XFRM_STATE_DECAP_DSCP) 282 ipv6_copy_dscp(ipv6_get_dsfield(ipv6_hdr(skb)), 283 ipipv6_hdr(skb)); 284 if (!(x->props.flags & XFRM_STATE_NOECN)) 285 ipip6_ecn_decapsulate(skb); 286 287 skb_reset_network_header(skb); 288 skb_mac_header_rebuild(skb); 289 if (skb->mac_len) 290 eth_hdr(skb)->h_proto = skb->protocol; 291 292 err = 0; 293 294 out: 295 return err; 296 } 297 298 static int xfrm6_remove_beet_encap(struct xfrm_state *x, struct sk_buff *skb) 299 { 300 struct ipv6hdr *ip6h; 301 int size = sizeof(struct ipv6hdr); 302 int err; 303 304 err = skb_cow_head(skb, size + skb->mac_len); 305 if (err) 306 goto out; 307 308 __skb_push(skb, size); 309 skb_reset_network_header(skb); 310 skb_mac_header_rebuild(skb); 311 312 xfrm6_beet_make_header(skb); 313 314 ip6h = ipv6_hdr(skb); 315 ip6h->payload_len = htons(skb->len - size); 316 ip6h->daddr = x->sel.daddr.in6; 317 ip6h->saddr = x->sel.saddr.in6; 318 err = 0; 319 out: 320 return err; 321 } 322 323 /* Remove encapsulation header. 324 * 325 * The IP header will be moved over the top of the encapsulation 326 * header. 327 * 328 * On entry, the transport header shall point to where the IP header 329 * should be and the network header shall be set to where the IP 330 * header currently is. skb->data shall point to the start of the 331 * payload. 332 */ 333 static int 334 xfrm_inner_mode_encap_remove(struct xfrm_state *x, 335 const struct xfrm_mode *inner_mode, 336 struct sk_buff *skb) 337 { 338 switch (inner_mode->encap) { 339 case XFRM_MODE_BEET: 340 if (inner_mode->family == AF_INET) 341 return xfrm4_remove_beet_encap(x, skb); 342 if (inner_mode->family == AF_INET6) 343 return xfrm6_remove_beet_encap(x, skb); 344 break; 345 case XFRM_MODE_TUNNEL: 346 if (inner_mode->family == AF_INET) 347 return xfrm4_remove_tunnel_encap(x, skb); 348 if (inner_mode->family == AF_INET6) 349 return xfrm6_remove_tunnel_encap(x, skb); 350 break; 351 } 352 353 WARN_ON_ONCE(1); 354 return -EOPNOTSUPP; 355 } 356 357 static int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb) 358 { 359 const struct xfrm_mode *inner_mode = &x->inner_mode; 360 361 switch (x->outer_mode.family) { 362 case AF_INET: 363 xfrm4_extract_header(skb); 364 break; 365 case AF_INET6: 366 xfrm6_extract_header(skb); 367 break; 368 default: 369 WARN_ON_ONCE(1); 370 return -EAFNOSUPPORT; 371 } 372 373 if (x->sel.family == AF_UNSPEC) { 374 inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol); 375 if (!inner_mode) 376 return -EAFNOSUPPORT; 377 } 378 379 switch (inner_mode->family) { 380 case AF_INET: 381 skb->protocol = htons(ETH_P_IP); 382 break; 383 case AF_INET6: 384 skb->protocol = htons(ETH_P_IPV6); 385 break; 386 default: 387 WARN_ON_ONCE(1); 388 break; 389 } 390 391 return xfrm_inner_mode_encap_remove(x, inner_mode, skb); 392 } 393 394 /* Remove encapsulation header. 395 * 396 * The IP header will be moved over the top of the encapsulation header. 397 * 398 * On entry, skb_transport_header() shall point to where the IP header 399 * should be and skb_network_header() shall be set to where the IP header 400 * currently is. skb->data shall point to the start of the payload. 401 */ 402 static int xfrm4_transport_input(struct xfrm_state *x, struct sk_buff *skb) 403 { 404 int ihl = skb->data - skb_transport_header(skb); 405 406 if (skb->transport_header != skb->network_header) { 407 memmove(skb_transport_header(skb), 408 skb_network_header(skb), ihl); 409 skb->network_header = skb->transport_header; 410 } 411 ip_hdr(skb)->tot_len = htons(skb->len + ihl); 412 skb_reset_transport_header(skb); 413 return 0; 414 } 415 416 static int xfrm6_transport_input(struct xfrm_state *x, struct sk_buff *skb) 417 { 418 #if IS_ENABLED(CONFIG_IPV6) 419 int ihl = skb->data - skb_transport_header(skb); 420 421 if (skb->transport_header != skb->network_header) { 422 memmove(skb_transport_header(skb), 423 skb_network_header(skb), ihl); 424 skb->network_header = skb->transport_header; 425 } 426 ipv6_hdr(skb)->payload_len = htons(skb->len + ihl - 427 sizeof(struct ipv6hdr)); 428 skb_reset_transport_header(skb); 429 return 0; 430 #else 431 WARN_ON_ONCE(1); 432 return -EAFNOSUPPORT; 433 #endif 434 } 435 436 static int xfrm_inner_mode_input(struct xfrm_state *x, 437 const struct xfrm_mode *inner_mode, 438 struct sk_buff *skb) 439 { 440 switch (inner_mode->encap) { 441 case XFRM_MODE_BEET: 442 case XFRM_MODE_TUNNEL: 443 return xfrm_prepare_input(x, skb); 444 case XFRM_MODE_TRANSPORT: 445 if (inner_mode->family == AF_INET) 446 return xfrm4_transport_input(x, skb); 447 if (inner_mode->family == AF_INET6) 448 return xfrm6_transport_input(x, skb); 449 break; 450 case XFRM_MODE_ROUTEOPTIMIZATION: 451 WARN_ON_ONCE(1); 452 break; 453 default: 454 WARN_ON_ONCE(1); 455 break; 456 } 457 458 return -EOPNOTSUPP; 459 } 460 461 int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type) 462 { 463 const struct xfrm_state_afinfo *afinfo; 464 struct net *net = dev_net(skb->dev); 465 const struct xfrm_mode *inner_mode; 466 int err; 467 __be32 seq; 468 __be32 seq_hi; 469 struct xfrm_state *x = NULL; 470 xfrm_address_t *daddr; 471 u32 mark = skb->mark; 472 unsigned int family = AF_UNSPEC; 473 int decaps = 0; 474 int async = 0; 475 bool xfrm_gro = false; 476 bool crypto_done = false; 477 struct xfrm_offload *xo = xfrm_offload(skb); 478 struct sec_path *sp; 479 480 if (encap_type < 0) { 481 x = xfrm_input_state(skb); 482 483 if (unlikely(x->km.state != XFRM_STATE_VALID)) { 484 if (x->km.state == XFRM_STATE_ACQ) 485 XFRM_INC_STATS(net, LINUX_MIB_XFRMACQUIREERROR); 486 else 487 XFRM_INC_STATS(net, 488 LINUX_MIB_XFRMINSTATEINVALID); 489 490 if (encap_type == -1) 491 dev_put(skb->dev); 492 goto drop; 493 } 494 495 family = x->outer_mode.family; 496 497 /* An encap_type of -1 indicates async resumption. */ 498 if (encap_type == -1) { 499 async = 1; 500 seq = XFRM_SKB_CB(skb)->seq.input.low; 501 goto resume; 502 } 503 504 /* encap_type < -1 indicates a GRO call. */ 505 encap_type = 0; 506 seq = XFRM_SPI_SKB_CB(skb)->seq; 507 508 if (xo && (xo->flags & CRYPTO_DONE)) { 509 crypto_done = true; 510 family = XFRM_SPI_SKB_CB(skb)->family; 511 512 if (!(xo->status & CRYPTO_SUCCESS)) { 513 if (xo->status & 514 (CRYPTO_TRANSPORT_AH_AUTH_FAILED | 515 CRYPTO_TRANSPORT_ESP_AUTH_FAILED | 516 CRYPTO_TUNNEL_AH_AUTH_FAILED | 517 CRYPTO_TUNNEL_ESP_AUTH_FAILED)) { 518 519 xfrm_audit_state_icvfail(x, skb, 520 x->type->proto); 521 x->stats.integrity_failed++; 522 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR); 523 goto drop; 524 } 525 526 if (xo->status & CRYPTO_INVALID_PROTOCOL) { 527 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR); 528 goto drop; 529 } 530 531 XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR); 532 goto drop; 533 } 534 535 if (xfrm_parse_spi(skb, nexthdr, &spi, &seq)) { 536 XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR); 537 goto drop; 538 } 539 } 540 541 goto lock; 542 } 543 544 family = XFRM_SPI_SKB_CB(skb)->family; 545 546 /* if tunnel is present override skb->mark value with tunnel i_key */ 547 switch (family) { 548 case AF_INET: 549 if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4) 550 mark = be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4->parms.i_key); 551 break; 552 case AF_INET6: 553 if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6) 554 mark = be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6->parms.i_key); 555 break; 556 } 557 558 sp = secpath_set(skb); 559 if (!sp) { 560 XFRM_INC_STATS(net, LINUX_MIB_XFRMINERROR); 561 goto drop; 562 } 563 564 seq = 0; 565 if (!spi && xfrm_parse_spi(skb, nexthdr, &spi, &seq)) { 566 secpath_reset(skb); 567 XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR); 568 goto drop; 569 } 570 571 daddr = (xfrm_address_t *)(skb_network_header(skb) + 572 XFRM_SPI_SKB_CB(skb)->daddroff); 573 do { 574 sp = skb_sec_path(skb); 575 576 if (sp->len == XFRM_MAX_DEPTH) { 577 secpath_reset(skb); 578 XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR); 579 goto drop; 580 } 581 582 x = xfrm_state_lookup(net, mark, daddr, spi, nexthdr, family); 583 if (x == NULL) { 584 secpath_reset(skb); 585 XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOSTATES); 586 xfrm_audit_state_notfound(skb, family, spi, seq); 587 goto drop; 588 } 589 590 skb->mark = xfrm_smark_get(skb->mark, x); 591 592 sp->xvec[sp->len++] = x; 593 594 skb_dst_force(skb); 595 if (!skb_dst(skb)) { 596 XFRM_INC_STATS(net, LINUX_MIB_XFRMINERROR); 597 goto drop; 598 } 599 600 lock: 601 spin_lock(&x->lock); 602 603 if (unlikely(x->km.state != XFRM_STATE_VALID)) { 604 if (x->km.state == XFRM_STATE_ACQ) 605 XFRM_INC_STATS(net, LINUX_MIB_XFRMACQUIREERROR); 606 else 607 XFRM_INC_STATS(net, 608 LINUX_MIB_XFRMINSTATEINVALID); 609 goto drop_unlock; 610 } 611 612 if ((x->encap ? x->encap->encap_type : 0) != encap_type) { 613 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH); 614 goto drop_unlock; 615 } 616 617 if (xfrm_replay_check(x, skb, seq)) { 618 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR); 619 goto drop_unlock; 620 } 621 622 if (xfrm_state_check_expire(x)) { 623 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEEXPIRED); 624 goto drop_unlock; 625 } 626 627 spin_unlock(&x->lock); 628 629 if (xfrm_tunnel_check(skb, x, family)) { 630 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMODEERROR); 631 goto drop; 632 } 633 634 seq_hi = htonl(xfrm_replay_seqhi(x, seq)); 635 636 XFRM_SKB_CB(skb)->seq.input.low = seq; 637 XFRM_SKB_CB(skb)->seq.input.hi = seq_hi; 638 639 dev_hold(skb->dev); 640 641 if (crypto_done) 642 nexthdr = x->type_offload->input_tail(x, skb); 643 else 644 nexthdr = x->type->input(x, skb); 645 646 if (nexthdr == -EINPROGRESS) 647 return 0; 648 resume: 649 dev_put(skb->dev); 650 651 spin_lock(&x->lock); 652 if (nexthdr < 0) { 653 if (nexthdr == -EBADMSG) { 654 xfrm_audit_state_icvfail(x, skb, 655 x->type->proto); 656 x->stats.integrity_failed++; 657 } 658 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR); 659 goto drop_unlock; 660 } 661 662 /* only the first xfrm gets the encap type */ 663 encap_type = 0; 664 665 if (xfrm_replay_recheck(x, skb, seq)) { 666 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR); 667 goto drop_unlock; 668 } 669 670 xfrm_replay_advance(x, seq); 671 672 x->curlft.bytes += skb->len; 673 x->curlft.packets++; 674 675 spin_unlock(&x->lock); 676 677 XFRM_MODE_SKB_CB(skb)->protocol = nexthdr; 678 679 inner_mode = &x->inner_mode; 680 681 if (x->sel.family == AF_UNSPEC) { 682 inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol); 683 if (inner_mode == NULL) { 684 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMODEERROR); 685 goto drop; 686 } 687 } 688 689 if (xfrm_inner_mode_input(x, inner_mode, skb)) { 690 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMODEERROR); 691 goto drop; 692 } 693 694 if (x->outer_mode.flags & XFRM_MODE_FLAG_TUNNEL) { 695 decaps = 1; 696 break; 697 } 698 699 /* 700 * We need the inner address. However, we only get here for 701 * transport mode so the outer address is identical. 702 */ 703 daddr = &x->id.daddr; 704 family = x->outer_mode.family; 705 706 err = xfrm_parse_spi(skb, nexthdr, &spi, &seq); 707 if (err < 0) { 708 XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR); 709 goto drop; 710 } 711 crypto_done = false; 712 } while (!err); 713 714 err = xfrm_rcv_cb(skb, family, x->type->proto, 0); 715 if (err) 716 goto drop; 717 718 nf_reset_ct(skb); 719 720 if (decaps) { 721 sp = skb_sec_path(skb); 722 if (sp) 723 sp->olen = 0; 724 if (skb_valid_dst(skb)) 725 skb_dst_drop(skb); 726 gro_cells_receive(&gro_cells, skb); 727 return 0; 728 } else { 729 xo = xfrm_offload(skb); 730 if (xo) 731 xfrm_gro = xo->flags & XFRM_GRO; 732 733 err = -EAFNOSUPPORT; 734 rcu_read_lock(); 735 afinfo = xfrm_state_afinfo_get_rcu(x->inner_mode.family); 736 if (likely(afinfo)) 737 err = afinfo->transport_finish(skb, xfrm_gro || async); 738 rcu_read_unlock(); 739 if (xfrm_gro) { 740 sp = skb_sec_path(skb); 741 if (sp) 742 sp->olen = 0; 743 if (skb_valid_dst(skb)) 744 skb_dst_drop(skb); 745 gro_cells_receive(&gro_cells, skb); 746 return err; 747 } 748 749 return err; 750 } 751 752 drop_unlock: 753 spin_unlock(&x->lock); 754 drop: 755 xfrm_rcv_cb(skb, family, x && x->type ? x->type->proto : nexthdr, -1); 756 kfree_skb(skb); 757 return 0; 758 } 759 EXPORT_SYMBOL(xfrm_input); 760 761 int xfrm_input_resume(struct sk_buff *skb, int nexthdr) 762 { 763 return xfrm_input(skb, nexthdr, 0, -1); 764 } 765 EXPORT_SYMBOL(xfrm_input_resume); 766 767 static void xfrm_trans_reinject(struct work_struct *work) 768 { 769 struct xfrm_trans_tasklet *trans = container_of(work, struct xfrm_trans_tasklet, work); 770 struct sk_buff_head queue; 771 struct sk_buff *skb; 772 773 __skb_queue_head_init(&queue); 774 spin_lock_bh(&trans->queue_lock); 775 skb_queue_splice_init(&trans->queue, &queue); 776 spin_unlock_bh(&trans->queue_lock); 777 778 local_bh_disable(); 779 while ((skb = __skb_dequeue(&queue))) 780 XFRM_TRANS_SKB_CB(skb)->finish(XFRM_TRANS_SKB_CB(skb)->net, 781 NULL, skb); 782 local_bh_enable(); 783 } 784 785 int xfrm_trans_queue_net(struct net *net, struct sk_buff *skb, 786 int (*finish)(struct net *, struct sock *, 787 struct sk_buff *)) 788 { 789 struct xfrm_trans_tasklet *trans; 790 791 trans = this_cpu_ptr(&xfrm_trans_tasklet); 792 793 if (skb_queue_len(&trans->queue) >= READ_ONCE(netdev_max_backlog)) 794 return -ENOBUFS; 795 796 BUILD_BUG_ON(sizeof(struct xfrm_trans_cb) > sizeof(skb->cb)); 797 798 XFRM_TRANS_SKB_CB(skb)->finish = finish; 799 XFRM_TRANS_SKB_CB(skb)->net = net; 800 spin_lock_bh(&trans->queue_lock); 801 __skb_queue_tail(&trans->queue, skb); 802 spin_unlock_bh(&trans->queue_lock); 803 schedule_work(&trans->work); 804 return 0; 805 } 806 EXPORT_SYMBOL(xfrm_trans_queue_net); 807 808 int xfrm_trans_queue(struct sk_buff *skb, 809 int (*finish)(struct net *, struct sock *, 810 struct sk_buff *)) 811 { 812 return xfrm_trans_queue_net(dev_net(skb->dev), skb, finish); 813 } 814 EXPORT_SYMBOL(xfrm_trans_queue); 815 816 void __init xfrm_input_init(void) 817 { 818 int err; 819 int i; 820 821 init_dummy_netdev(&xfrm_napi_dev); 822 err = gro_cells_init(&gro_cells, &xfrm_napi_dev); 823 if (err) 824 gro_cells.cells = NULL; 825 826 for_each_possible_cpu(i) { 827 struct xfrm_trans_tasklet *trans; 828 829 trans = &per_cpu(xfrm_trans_tasklet, i); 830 spin_lock_init(&trans->queue_lock); 831 __skb_queue_head_init(&trans->queue); 832 INIT_WORK(&trans->work, xfrm_trans_reinject); 833 } 834 } 835