1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * xfrm_input.c 4 * 5 * Changes: 6 * YOSHIFUJI Hideaki @USAGI 7 * Split up af-specific portion 8 * 9 */ 10 11 #include <linux/bottom_half.h> 12 #include <linux/cache.h> 13 #include <linux/interrupt.h> 14 #include <linux/slab.h> 15 #include <linux/module.h> 16 #include <linux/netdevice.h> 17 #include <linux/percpu.h> 18 #include <net/dst.h> 19 #include <net/ip.h> 20 #include <net/xfrm.h> 21 #include <net/ip_tunnels.h> 22 #include <net/ip6_tunnel.h> 23 #include <net/dst_metadata.h> 24 25 #include "xfrm_inout.h" 26 27 struct xfrm_trans_tasklet { 28 struct work_struct work; 29 spinlock_t queue_lock; 30 struct sk_buff_head queue; 31 }; 32 33 struct xfrm_trans_cb { 34 union { 35 struct inet_skb_parm h4; 36 #if IS_ENABLED(CONFIG_IPV6) 37 struct inet6_skb_parm h6; 38 #endif 39 } header; 40 int (*finish)(struct net *net, struct sock *sk, struct sk_buff *skb); 41 struct net *net; 42 }; 43 44 #define XFRM_TRANS_SKB_CB(__skb) ((struct xfrm_trans_cb *)&((__skb)->cb[0])) 45 46 static DEFINE_SPINLOCK(xfrm_input_afinfo_lock); 47 static struct xfrm_input_afinfo const __rcu *xfrm_input_afinfo[2][AF_INET6 + 1]; 48 49 static struct gro_cells gro_cells; 50 static struct net_device xfrm_napi_dev; 51 52 static DEFINE_PER_CPU(struct xfrm_trans_tasklet, xfrm_trans_tasklet); 53 54 int xfrm_input_register_afinfo(const struct xfrm_input_afinfo *afinfo) 55 { 56 int err = 0; 57 58 if (WARN_ON(afinfo->family > AF_INET6)) 59 return -EAFNOSUPPORT; 60 61 spin_lock_bh(&xfrm_input_afinfo_lock); 62 if (unlikely(xfrm_input_afinfo[afinfo->is_ipip][afinfo->family])) 63 err = -EEXIST; 64 else 65 rcu_assign_pointer(xfrm_input_afinfo[afinfo->is_ipip][afinfo->family], afinfo); 66 spin_unlock_bh(&xfrm_input_afinfo_lock); 67 return err; 68 } 69 EXPORT_SYMBOL(xfrm_input_register_afinfo); 70 71 int xfrm_input_unregister_afinfo(const struct xfrm_input_afinfo *afinfo) 72 { 73 int err = 0; 74 75 spin_lock_bh(&xfrm_input_afinfo_lock); 76 if (likely(xfrm_input_afinfo[afinfo->is_ipip][afinfo->family])) { 77 if (unlikely(xfrm_input_afinfo[afinfo->is_ipip][afinfo->family] != afinfo)) 78 err = -EINVAL; 79 else 80 RCU_INIT_POINTER(xfrm_input_afinfo[afinfo->is_ipip][afinfo->family], NULL); 81 } 82 spin_unlock_bh(&xfrm_input_afinfo_lock); 83 synchronize_rcu(); 84 return err; 85 } 86 EXPORT_SYMBOL(xfrm_input_unregister_afinfo); 87 88 static const struct xfrm_input_afinfo *xfrm_input_get_afinfo(u8 family, bool is_ipip) 89 { 90 const struct xfrm_input_afinfo *afinfo; 91 92 if (WARN_ON_ONCE(family > AF_INET6)) 93 return NULL; 94 95 rcu_read_lock(); 96 afinfo = rcu_dereference(xfrm_input_afinfo[is_ipip][family]); 97 if (unlikely(!afinfo)) 98 rcu_read_unlock(); 99 return afinfo; 100 } 101 102 static int xfrm_rcv_cb(struct sk_buff *skb, unsigned int family, u8 protocol, 103 int err) 104 { 105 bool is_ipip = (protocol == IPPROTO_IPIP || protocol == IPPROTO_IPV6); 106 const struct xfrm_input_afinfo *afinfo; 107 int ret; 108 109 afinfo = xfrm_input_get_afinfo(family, is_ipip); 110 if (!afinfo) 111 return -EAFNOSUPPORT; 112 113 ret = afinfo->callback(skb, protocol, err); 114 rcu_read_unlock(); 115 116 return ret; 117 } 118 119 struct sec_path *secpath_set(struct sk_buff *skb) 120 { 121 struct sec_path *sp, *tmp = skb_ext_find(skb, SKB_EXT_SEC_PATH); 122 123 sp = skb_ext_add(skb, SKB_EXT_SEC_PATH); 124 if (!sp) 125 return NULL; 126 127 if (tmp) /* reused existing one (was COW'd if needed) */ 128 return sp; 129 130 /* allocated new secpath */ 131 memset(sp->ovec, 0, sizeof(sp->ovec)); 132 sp->olen = 0; 133 sp->len = 0; 134 135 return sp; 136 } 137 EXPORT_SYMBOL(secpath_set); 138 139 /* Fetch spi and seq from ipsec header */ 140 141 int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq) 142 { 143 int offset, offset_seq; 144 int hlen; 145 146 switch (nexthdr) { 147 case IPPROTO_AH: 148 hlen = sizeof(struct ip_auth_hdr); 149 offset = offsetof(struct ip_auth_hdr, spi); 150 offset_seq = offsetof(struct ip_auth_hdr, seq_no); 151 break; 152 case IPPROTO_ESP: 153 hlen = sizeof(struct ip_esp_hdr); 154 offset = offsetof(struct ip_esp_hdr, spi); 155 offset_seq = offsetof(struct ip_esp_hdr, seq_no); 156 break; 157 case IPPROTO_COMP: 158 if (!pskb_may_pull(skb, sizeof(struct ip_comp_hdr))) 159 return -EINVAL; 160 *spi = htonl(ntohs(*(__be16 *)(skb_transport_header(skb) + 2))); 161 *seq = 0; 162 return 0; 163 default: 164 return 1; 165 } 166 167 if (!pskb_may_pull(skb, hlen)) 168 return -EINVAL; 169 170 *spi = *(__be32 *)(skb_transport_header(skb) + offset); 171 *seq = *(__be32 *)(skb_transport_header(skb) + offset_seq); 172 return 0; 173 } 174 EXPORT_SYMBOL(xfrm_parse_spi); 175 176 static int xfrm4_remove_beet_encap(struct xfrm_state *x, struct sk_buff *skb) 177 { 178 struct iphdr *iph; 179 int optlen = 0; 180 int err = -EINVAL; 181 182 if (unlikely(XFRM_MODE_SKB_CB(skb)->protocol == IPPROTO_BEETPH)) { 183 struct ip_beet_phdr *ph; 184 int phlen; 185 186 if (!pskb_may_pull(skb, sizeof(*ph))) 187 goto out; 188 189 ph = (struct ip_beet_phdr *)skb->data; 190 191 phlen = sizeof(*ph) + ph->padlen; 192 optlen = ph->hdrlen * 8 + (IPV4_BEET_PHMAXLEN - phlen); 193 if (optlen < 0 || optlen & 3 || optlen > 250) 194 goto out; 195 196 XFRM_MODE_SKB_CB(skb)->protocol = ph->nexthdr; 197 198 if (!pskb_may_pull(skb, phlen)) 199 goto out; 200 __skb_pull(skb, phlen); 201 } 202 203 skb_push(skb, sizeof(*iph)); 204 skb_reset_network_header(skb); 205 skb_mac_header_rebuild(skb); 206 207 xfrm4_beet_make_header(skb); 208 209 iph = ip_hdr(skb); 210 211 iph->ihl += optlen / 4; 212 iph->tot_len = htons(skb->len); 213 iph->daddr = x->sel.daddr.a4; 214 iph->saddr = x->sel.saddr.a4; 215 iph->check = 0; 216 iph->check = ip_fast_csum(skb_network_header(skb), iph->ihl); 217 err = 0; 218 out: 219 return err; 220 } 221 222 static void ipip_ecn_decapsulate(struct sk_buff *skb) 223 { 224 struct iphdr *inner_iph = ipip_hdr(skb); 225 226 if (INET_ECN_is_ce(XFRM_MODE_SKB_CB(skb)->tos)) 227 IP_ECN_set_ce(inner_iph); 228 } 229 230 static int xfrm4_remove_tunnel_encap(struct xfrm_state *x, struct sk_buff *skb) 231 { 232 int err = -EINVAL; 233 234 if (XFRM_MODE_SKB_CB(skb)->protocol != IPPROTO_IPIP) 235 goto out; 236 237 if (!pskb_may_pull(skb, sizeof(struct iphdr))) 238 goto out; 239 240 err = skb_unclone(skb, GFP_ATOMIC); 241 if (err) 242 goto out; 243 244 if (x->props.flags & XFRM_STATE_DECAP_DSCP) 245 ipv4_copy_dscp(XFRM_MODE_SKB_CB(skb)->tos, ipip_hdr(skb)); 246 if (!(x->props.flags & XFRM_STATE_NOECN)) 247 ipip_ecn_decapsulate(skb); 248 249 skb_reset_network_header(skb); 250 skb_mac_header_rebuild(skb); 251 if (skb->mac_len) 252 eth_hdr(skb)->h_proto = skb->protocol; 253 254 err = 0; 255 256 out: 257 return err; 258 } 259 260 static void ipip6_ecn_decapsulate(struct sk_buff *skb) 261 { 262 struct ipv6hdr *inner_iph = ipipv6_hdr(skb); 263 264 if (INET_ECN_is_ce(XFRM_MODE_SKB_CB(skb)->tos)) 265 IP6_ECN_set_ce(skb, inner_iph); 266 } 267 268 static int xfrm6_remove_tunnel_encap(struct xfrm_state *x, struct sk_buff *skb) 269 { 270 int err = -EINVAL; 271 272 if (XFRM_MODE_SKB_CB(skb)->protocol != IPPROTO_IPV6) 273 goto out; 274 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) 275 goto out; 276 277 err = skb_unclone(skb, GFP_ATOMIC); 278 if (err) 279 goto out; 280 281 if (x->props.flags & XFRM_STATE_DECAP_DSCP) 282 ipv6_copy_dscp(XFRM_MODE_SKB_CB(skb)->tos, ipipv6_hdr(skb)); 283 if (!(x->props.flags & XFRM_STATE_NOECN)) 284 ipip6_ecn_decapsulate(skb); 285 286 skb_reset_network_header(skb); 287 skb_mac_header_rebuild(skb); 288 if (skb->mac_len) 289 eth_hdr(skb)->h_proto = skb->protocol; 290 291 err = 0; 292 293 out: 294 return err; 295 } 296 297 static int xfrm6_remove_beet_encap(struct xfrm_state *x, struct sk_buff *skb) 298 { 299 struct ipv6hdr *ip6h; 300 int size = sizeof(struct ipv6hdr); 301 int err; 302 303 err = skb_cow_head(skb, size + skb->mac_len); 304 if (err) 305 goto out; 306 307 __skb_push(skb, size); 308 skb_reset_network_header(skb); 309 skb_mac_header_rebuild(skb); 310 311 xfrm6_beet_make_header(skb); 312 313 ip6h = ipv6_hdr(skb); 314 ip6h->payload_len = htons(skb->len - size); 315 ip6h->daddr = x->sel.daddr.in6; 316 ip6h->saddr = x->sel.saddr.in6; 317 err = 0; 318 out: 319 return err; 320 } 321 322 /* Remove encapsulation header. 323 * 324 * The IP header will be moved over the top of the encapsulation 325 * header. 326 * 327 * On entry, the transport header shall point to where the IP header 328 * should be and the network header shall be set to where the IP 329 * header currently is. skb->data shall point to the start of the 330 * payload. 331 */ 332 static int 333 xfrm_inner_mode_encap_remove(struct xfrm_state *x, 334 const struct xfrm_mode *inner_mode, 335 struct sk_buff *skb) 336 { 337 switch (inner_mode->encap) { 338 case XFRM_MODE_BEET: 339 if (inner_mode->family == AF_INET) 340 return xfrm4_remove_beet_encap(x, skb); 341 if (inner_mode->family == AF_INET6) 342 return xfrm6_remove_beet_encap(x, skb); 343 break; 344 case XFRM_MODE_TUNNEL: 345 if (inner_mode->family == AF_INET) 346 return xfrm4_remove_tunnel_encap(x, skb); 347 if (inner_mode->family == AF_INET6) 348 return xfrm6_remove_tunnel_encap(x, skb); 349 break; 350 } 351 352 WARN_ON_ONCE(1); 353 return -EOPNOTSUPP; 354 } 355 356 static int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb) 357 { 358 const struct xfrm_mode *inner_mode = &x->inner_mode; 359 360 switch (x->outer_mode.family) { 361 case AF_INET: 362 xfrm4_extract_header(skb); 363 break; 364 case AF_INET6: 365 xfrm6_extract_header(skb); 366 break; 367 default: 368 WARN_ON_ONCE(1); 369 return -EAFNOSUPPORT; 370 } 371 372 if (x->sel.family == AF_UNSPEC) { 373 inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol); 374 if (!inner_mode) 375 return -EAFNOSUPPORT; 376 } 377 378 switch (inner_mode->family) { 379 case AF_INET: 380 skb->protocol = htons(ETH_P_IP); 381 break; 382 case AF_INET6: 383 skb->protocol = htons(ETH_P_IPV6); 384 break; 385 default: 386 WARN_ON_ONCE(1); 387 break; 388 } 389 390 return xfrm_inner_mode_encap_remove(x, inner_mode, skb); 391 } 392 393 /* Remove encapsulation header. 394 * 395 * The IP header will be moved over the top of the encapsulation header. 396 * 397 * On entry, skb_transport_header() shall point to where the IP header 398 * should be and skb_network_header() shall be set to where the IP header 399 * currently is. skb->data shall point to the start of the payload. 400 */ 401 static int xfrm4_transport_input(struct xfrm_state *x, struct sk_buff *skb) 402 { 403 int ihl = skb->data - skb_transport_header(skb); 404 405 if (skb->transport_header != skb->network_header) { 406 memmove(skb_transport_header(skb), 407 skb_network_header(skb), ihl); 408 skb->network_header = skb->transport_header; 409 } 410 ip_hdr(skb)->tot_len = htons(skb->len + ihl); 411 skb_reset_transport_header(skb); 412 return 0; 413 } 414 415 static int xfrm6_transport_input(struct xfrm_state *x, struct sk_buff *skb) 416 { 417 #if IS_ENABLED(CONFIG_IPV6) 418 int ihl = skb->data - skb_transport_header(skb); 419 420 if (skb->transport_header != skb->network_header) { 421 memmove(skb_transport_header(skb), 422 skb_network_header(skb), ihl); 423 skb->network_header = skb->transport_header; 424 } 425 ipv6_hdr(skb)->payload_len = htons(skb->len + ihl - 426 sizeof(struct ipv6hdr)); 427 skb_reset_transport_header(skb); 428 return 0; 429 #else 430 WARN_ON_ONCE(1); 431 return -EAFNOSUPPORT; 432 #endif 433 } 434 435 static int xfrm_inner_mode_input(struct xfrm_state *x, 436 const struct xfrm_mode *inner_mode, 437 struct sk_buff *skb) 438 { 439 switch (inner_mode->encap) { 440 case XFRM_MODE_BEET: 441 case XFRM_MODE_TUNNEL: 442 return xfrm_prepare_input(x, skb); 443 case XFRM_MODE_TRANSPORT: 444 if (inner_mode->family == AF_INET) 445 return xfrm4_transport_input(x, skb); 446 if (inner_mode->family == AF_INET6) 447 return xfrm6_transport_input(x, skb); 448 break; 449 case XFRM_MODE_ROUTEOPTIMIZATION: 450 WARN_ON_ONCE(1); 451 break; 452 default: 453 WARN_ON_ONCE(1); 454 break; 455 } 456 457 return -EOPNOTSUPP; 458 } 459 460 int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type) 461 { 462 const struct xfrm_state_afinfo *afinfo; 463 struct net *net = dev_net(skb->dev); 464 const struct xfrm_mode *inner_mode; 465 int err; 466 __be32 seq; 467 __be32 seq_hi; 468 struct xfrm_state *x = NULL; 469 xfrm_address_t *daddr; 470 u32 mark = skb->mark; 471 unsigned int family = AF_UNSPEC; 472 int decaps = 0; 473 int async = 0; 474 bool xfrm_gro = false; 475 bool crypto_done = false; 476 struct xfrm_offload *xo = xfrm_offload(skb); 477 struct sec_path *sp; 478 479 if (encap_type < 0) { 480 x = xfrm_input_state(skb); 481 482 if (unlikely(x->km.state != XFRM_STATE_VALID)) { 483 if (x->km.state == XFRM_STATE_ACQ) 484 XFRM_INC_STATS(net, LINUX_MIB_XFRMACQUIREERROR); 485 else 486 XFRM_INC_STATS(net, 487 LINUX_MIB_XFRMINSTATEINVALID); 488 489 if (encap_type == -1) 490 dev_put(skb->dev); 491 goto drop; 492 } 493 494 family = x->outer_mode.family; 495 496 /* An encap_type of -1 indicates async resumption. */ 497 if (encap_type == -1) { 498 async = 1; 499 seq = XFRM_SKB_CB(skb)->seq.input.low; 500 goto resume; 501 } 502 503 /* encap_type < -1 indicates a GRO call. */ 504 encap_type = 0; 505 seq = XFRM_SPI_SKB_CB(skb)->seq; 506 507 if (xo && (xo->flags & CRYPTO_DONE)) { 508 crypto_done = true; 509 family = XFRM_SPI_SKB_CB(skb)->family; 510 511 if (!(xo->status & CRYPTO_SUCCESS)) { 512 if (xo->status & 513 (CRYPTO_TRANSPORT_AH_AUTH_FAILED | 514 CRYPTO_TRANSPORT_ESP_AUTH_FAILED | 515 CRYPTO_TUNNEL_AH_AUTH_FAILED | 516 CRYPTO_TUNNEL_ESP_AUTH_FAILED)) { 517 518 xfrm_audit_state_icvfail(x, skb, 519 x->type->proto); 520 x->stats.integrity_failed++; 521 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR); 522 goto drop; 523 } 524 525 if (xo->status & CRYPTO_INVALID_PROTOCOL) { 526 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR); 527 goto drop; 528 } 529 530 XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR); 531 goto drop; 532 } 533 534 if (xfrm_parse_spi(skb, nexthdr, &spi, &seq)) { 535 XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR); 536 goto drop; 537 } 538 } 539 540 goto lock; 541 } 542 543 family = XFRM_SPI_SKB_CB(skb)->family; 544 545 /* if tunnel is present override skb->mark value with tunnel i_key */ 546 switch (family) { 547 case AF_INET: 548 if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4) 549 mark = be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4->parms.i_key); 550 break; 551 case AF_INET6: 552 if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6) 553 mark = be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6->parms.i_key); 554 break; 555 } 556 557 sp = secpath_set(skb); 558 if (!sp) { 559 XFRM_INC_STATS(net, LINUX_MIB_XFRMINERROR); 560 goto drop; 561 } 562 563 seq = 0; 564 if (!spi && xfrm_parse_spi(skb, nexthdr, &spi, &seq)) { 565 secpath_reset(skb); 566 XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR); 567 goto drop; 568 } 569 570 daddr = (xfrm_address_t *)(skb_network_header(skb) + 571 XFRM_SPI_SKB_CB(skb)->daddroff); 572 do { 573 sp = skb_sec_path(skb); 574 575 if (sp->len == XFRM_MAX_DEPTH) { 576 secpath_reset(skb); 577 XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR); 578 goto drop; 579 } 580 581 x = xfrm_state_lookup(net, mark, daddr, spi, nexthdr, family); 582 if (x == NULL) { 583 secpath_reset(skb); 584 XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOSTATES); 585 xfrm_audit_state_notfound(skb, family, spi, seq); 586 goto drop; 587 } 588 589 skb->mark = xfrm_smark_get(skb->mark, x); 590 591 sp->xvec[sp->len++] = x; 592 593 skb_dst_force(skb); 594 if (!skb_dst(skb)) { 595 XFRM_INC_STATS(net, LINUX_MIB_XFRMINERROR); 596 goto drop; 597 } 598 599 lock: 600 spin_lock(&x->lock); 601 602 if (unlikely(x->km.state != XFRM_STATE_VALID)) { 603 if (x->km.state == XFRM_STATE_ACQ) 604 XFRM_INC_STATS(net, LINUX_MIB_XFRMACQUIREERROR); 605 else 606 XFRM_INC_STATS(net, 607 LINUX_MIB_XFRMINSTATEINVALID); 608 goto drop_unlock; 609 } 610 611 if ((x->encap ? x->encap->encap_type : 0) != encap_type) { 612 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH); 613 goto drop_unlock; 614 } 615 616 if (xfrm_replay_check(x, skb, seq)) { 617 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR); 618 goto drop_unlock; 619 } 620 621 if (xfrm_state_check_expire(x)) { 622 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEEXPIRED); 623 goto drop_unlock; 624 } 625 626 spin_unlock(&x->lock); 627 628 if (xfrm_tunnel_check(skb, x, family)) { 629 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMODEERROR); 630 goto drop; 631 } 632 633 seq_hi = htonl(xfrm_replay_seqhi(x, seq)); 634 635 XFRM_SKB_CB(skb)->seq.input.low = seq; 636 XFRM_SKB_CB(skb)->seq.input.hi = seq_hi; 637 638 dev_hold(skb->dev); 639 640 if (crypto_done) 641 nexthdr = x->type_offload->input_tail(x, skb); 642 else 643 nexthdr = x->type->input(x, skb); 644 645 if (nexthdr == -EINPROGRESS) 646 return 0; 647 resume: 648 dev_put(skb->dev); 649 650 spin_lock(&x->lock); 651 if (nexthdr < 0) { 652 if (nexthdr == -EBADMSG) { 653 xfrm_audit_state_icvfail(x, skb, 654 x->type->proto); 655 x->stats.integrity_failed++; 656 } 657 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR); 658 goto drop_unlock; 659 } 660 661 /* only the first xfrm gets the encap type */ 662 encap_type = 0; 663 664 if (xfrm_replay_recheck(x, skb, seq)) { 665 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR); 666 goto drop_unlock; 667 } 668 669 xfrm_replay_advance(x, seq); 670 671 x->curlft.bytes += skb->len; 672 x->curlft.packets++; 673 x->lastused = ktime_get_real_seconds(); 674 675 spin_unlock(&x->lock); 676 677 XFRM_MODE_SKB_CB(skb)->protocol = nexthdr; 678 679 inner_mode = &x->inner_mode; 680 681 if (x->sel.family == AF_UNSPEC) { 682 inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol); 683 if (inner_mode == NULL) { 684 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMODEERROR); 685 goto drop; 686 } 687 } 688 689 if (xfrm_inner_mode_input(x, inner_mode, skb)) { 690 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMODEERROR); 691 goto drop; 692 } 693 694 if (x->outer_mode.flags & XFRM_MODE_FLAG_TUNNEL) { 695 decaps = 1; 696 break; 697 } 698 699 /* 700 * We need the inner address. However, we only get here for 701 * transport mode so the outer address is identical. 702 */ 703 daddr = &x->id.daddr; 704 family = x->outer_mode.family; 705 706 err = xfrm_parse_spi(skb, nexthdr, &spi, &seq); 707 if (err < 0) { 708 XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR); 709 goto drop; 710 } 711 crypto_done = false; 712 } while (!err); 713 714 err = xfrm_rcv_cb(skb, family, x->type->proto, 0); 715 if (err) 716 goto drop; 717 718 nf_reset_ct(skb); 719 720 if (decaps) { 721 sp = skb_sec_path(skb); 722 if (sp) 723 sp->olen = 0; 724 if (skb_valid_dst(skb)) 725 skb_dst_drop(skb); 726 gro_cells_receive(&gro_cells, skb); 727 return 0; 728 } else { 729 xo = xfrm_offload(skb); 730 if (xo) 731 xfrm_gro = xo->flags & XFRM_GRO; 732 733 err = -EAFNOSUPPORT; 734 rcu_read_lock(); 735 afinfo = xfrm_state_afinfo_get_rcu(x->inner_mode.family); 736 if (likely(afinfo)) 737 err = afinfo->transport_finish(skb, xfrm_gro || async); 738 rcu_read_unlock(); 739 if (xfrm_gro) { 740 sp = skb_sec_path(skb); 741 if (sp) 742 sp->olen = 0; 743 if (skb_valid_dst(skb)) 744 skb_dst_drop(skb); 745 gro_cells_receive(&gro_cells, skb); 746 return err; 747 } 748 749 return err; 750 } 751 752 drop_unlock: 753 spin_unlock(&x->lock); 754 drop: 755 xfrm_rcv_cb(skb, family, x && x->type ? x->type->proto : nexthdr, -1); 756 kfree_skb(skb); 757 return 0; 758 } 759 EXPORT_SYMBOL(xfrm_input); 760 761 int xfrm_input_resume(struct sk_buff *skb, int nexthdr) 762 { 763 return xfrm_input(skb, nexthdr, 0, -1); 764 } 765 EXPORT_SYMBOL(xfrm_input_resume); 766 767 static void xfrm_trans_reinject(struct work_struct *work) 768 { 769 struct xfrm_trans_tasklet *trans = container_of(work, struct xfrm_trans_tasklet, work); 770 struct sk_buff_head queue; 771 struct sk_buff *skb; 772 773 __skb_queue_head_init(&queue); 774 spin_lock_bh(&trans->queue_lock); 775 skb_queue_splice_init(&trans->queue, &queue); 776 spin_unlock_bh(&trans->queue_lock); 777 778 local_bh_disable(); 779 while ((skb = __skb_dequeue(&queue))) 780 XFRM_TRANS_SKB_CB(skb)->finish(XFRM_TRANS_SKB_CB(skb)->net, 781 NULL, skb); 782 local_bh_enable(); 783 } 784 785 int xfrm_trans_queue_net(struct net *net, struct sk_buff *skb, 786 int (*finish)(struct net *, struct sock *, 787 struct sk_buff *)) 788 { 789 struct xfrm_trans_tasklet *trans; 790 791 trans = this_cpu_ptr(&xfrm_trans_tasklet); 792 793 if (skb_queue_len(&trans->queue) >= READ_ONCE(netdev_max_backlog)) 794 return -ENOBUFS; 795 796 BUILD_BUG_ON(sizeof(struct xfrm_trans_cb) > sizeof(skb->cb)); 797 798 XFRM_TRANS_SKB_CB(skb)->finish = finish; 799 XFRM_TRANS_SKB_CB(skb)->net = net; 800 spin_lock_bh(&trans->queue_lock); 801 __skb_queue_tail(&trans->queue, skb); 802 spin_unlock_bh(&trans->queue_lock); 803 schedule_work(&trans->work); 804 return 0; 805 } 806 EXPORT_SYMBOL(xfrm_trans_queue_net); 807 808 int xfrm_trans_queue(struct sk_buff *skb, 809 int (*finish)(struct net *, struct sock *, 810 struct sk_buff *)) 811 { 812 return xfrm_trans_queue_net(dev_net(skb->dev), skb, finish); 813 } 814 EXPORT_SYMBOL(xfrm_trans_queue); 815 816 void __init xfrm_input_init(void) 817 { 818 int err; 819 int i; 820 821 init_dummy_netdev(&xfrm_napi_dev); 822 err = gro_cells_init(&gro_cells, &xfrm_napi_dev); 823 if (err) 824 gro_cells.cells = NULL; 825 826 for_each_possible_cpu(i) { 827 struct xfrm_trans_tasklet *trans; 828 829 trans = &per_cpu(xfrm_trans_tasklet, i); 830 spin_lock_init(&trans->queue_lock); 831 __skb_queue_head_init(&trans->queue); 832 INIT_WORK(&trans->work, xfrm_trans_reinject); 833 } 834 } 835