1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * xfrm_output.c - Common IPsec encapsulation code. 4 * 5 * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au> 6 */ 7 8 #include <linux/errno.h> 9 #include <linux/module.h> 10 #include <linux/netdevice.h> 11 #include <linux/netfilter.h> 12 #include <linux/skbuff.h> 13 #include <linux/slab.h> 14 #include <linux/spinlock.h> 15 #include <net/dst.h> 16 #include <net/icmp.h> 17 #include <net/inet_ecn.h> 18 #include <net/xfrm.h> 19 20 #if IS_ENABLED(CONFIG_IPV6) 21 #include <net/ip6_route.h> 22 #include <net/ipv6_stubs.h> 23 #endif 24 25 #include "xfrm_inout.h" 26 27 static int xfrm_output2(struct net *net, struct sock *sk, struct sk_buff *skb); 28 static int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb); 29 30 static int xfrm_skb_check_space(struct sk_buff *skb) 31 { 32 struct dst_entry *dst = skb_dst(skb); 33 int nhead = dst->header_len + LL_RESERVED_SPACE(dst->dev) 34 - skb_headroom(skb); 35 int ntail = dst->dev->needed_tailroom - skb_tailroom(skb); 36 37 if (nhead <= 0) { 38 if (ntail <= 0) 39 return 0; 40 nhead = 0; 41 } else if (ntail < 0) 42 ntail = 0; 43 44 return pskb_expand_head(skb, nhead, ntail, GFP_ATOMIC); 45 } 46 47 /* Children define the path of the packet through the 48 * Linux networking. Thus, destinations are stackable. 49 */ 50 51 static struct dst_entry *skb_dst_pop(struct sk_buff *skb) 52 { 53 struct dst_entry *child = dst_clone(xfrm_dst_child(skb_dst(skb))); 54 55 skb_dst_drop(skb); 56 return child; 57 } 58 59 /* Add encapsulation header. 60 * 61 * The IP header will be moved forward to make space for the encapsulation 62 * header. 63 */ 64 static int xfrm4_transport_output(struct xfrm_state *x, struct sk_buff *skb) 65 { 66 struct iphdr *iph = ip_hdr(skb); 67 int ihl = iph->ihl * 4; 68 69 skb_set_inner_transport_header(skb, skb_transport_offset(skb)); 70 71 skb_set_network_header(skb, -x->props.header_len); 72 skb->mac_header = skb->network_header + 73 offsetof(struct iphdr, protocol); 74 skb->transport_header = skb->network_header + ihl; 75 __skb_pull(skb, ihl); 76 memmove(skb_network_header(skb), iph, ihl); 77 return 0; 78 } 79 80 #if IS_ENABLED(CONFIG_IPV6_MIP6) 81 static int mip6_rthdr_offset(struct sk_buff *skb, u8 **nexthdr, int type) 82 { 83 const unsigned char *nh = skb_network_header(skb); 84 unsigned int offset = sizeof(struct ipv6hdr); 85 unsigned int packet_len; 86 int found_rhdr = 0; 87 88 packet_len = skb_tail_pointer(skb) - nh; 89 *nexthdr = &ipv6_hdr(skb)->nexthdr; 90 91 while (offset <= packet_len) { 92 struct ipv6_opt_hdr *exthdr; 93 94 switch (**nexthdr) { 95 case NEXTHDR_HOP: 96 break; 97 case NEXTHDR_ROUTING: 98 if (type == IPPROTO_ROUTING && offset + 3 <= packet_len) { 99 struct ipv6_rt_hdr *rt; 100 101 rt = (struct ipv6_rt_hdr *)(nh + offset); 102 if (rt->type != 0) 103 return offset; 104 } 105 found_rhdr = 1; 106 break; 107 case NEXTHDR_DEST: 108 /* HAO MUST NOT appear more than once. 109 * XXX: It is better to try to find by the end of 110 * XXX: packet if HAO exists. 111 */ 112 if (ipv6_find_tlv(skb, offset, IPV6_TLV_HAO) >= 0) { 113 net_dbg_ratelimited("mip6: hao exists already, override\n"); 114 return offset; 115 } 116 117 if (found_rhdr) 118 return offset; 119 120 break; 121 default: 122 return offset; 123 } 124 125 if (offset + sizeof(struct ipv6_opt_hdr) > packet_len) 126 return -EINVAL; 127 128 exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) + 129 offset); 130 offset += ipv6_optlen(exthdr); 131 if (offset > IPV6_MAXPLEN) 132 return -EINVAL; 133 *nexthdr = &exthdr->nexthdr; 134 } 135 136 return -EINVAL; 137 } 138 #endif 139 140 #if IS_ENABLED(CONFIG_IPV6) 141 static int xfrm6_hdr_offset(struct xfrm_state *x, struct sk_buff *skb, u8 **prevhdr) 142 { 143 switch (x->type->proto) { 144 #if IS_ENABLED(CONFIG_IPV6_MIP6) 145 case IPPROTO_DSTOPTS: 146 case IPPROTO_ROUTING: 147 return mip6_rthdr_offset(skb, prevhdr, x->type->proto); 148 #endif 149 default: 150 break; 151 } 152 153 return ip6_find_1stfragopt(skb, prevhdr); 154 } 155 #endif 156 157 /* Add encapsulation header. 158 * 159 * The IP header and mutable extension headers will be moved forward to make 160 * space for the encapsulation header. 161 */ 162 static int xfrm6_transport_output(struct xfrm_state *x, struct sk_buff *skb) 163 { 164 #if IS_ENABLED(CONFIG_IPV6) 165 struct ipv6hdr *iph; 166 u8 *prevhdr; 167 int hdr_len; 168 169 iph = ipv6_hdr(skb); 170 skb_set_inner_transport_header(skb, skb_transport_offset(skb)); 171 172 hdr_len = xfrm6_hdr_offset(x, skb, &prevhdr); 173 if (hdr_len < 0) 174 return hdr_len; 175 skb_set_mac_header(skb, 176 (prevhdr - x->props.header_len) - skb->data); 177 skb_set_network_header(skb, -x->props.header_len); 178 skb->transport_header = skb->network_header + hdr_len; 179 __skb_pull(skb, hdr_len); 180 memmove(ipv6_hdr(skb), iph, hdr_len); 181 return 0; 182 #else 183 WARN_ON_ONCE(1); 184 return -EAFNOSUPPORT; 185 #endif 186 } 187 188 /* Add route optimization header space. 189 * 190 * The IP header and mutable extension headers will be moved forward to make 191 * space for the route optimization header. 192 */ 193 static int xfrm6_ro_output(struct xfrm_state *x, struct sk_buff *skb) 194 { 195 #if IS_ENABLED(CONFIG_IPV6) 196 struct ipv6hdr *iph; 197 u8 *prevhdr; 198 int hdr_len; 199 200 iph = ipv6_hdr(skb); 201 202 hdr_len = xfrm6_hdr_offset(x, skb, &prevhdr); 203 if (hdr_len < 0) 204 return hdr_len; 205 skb_set_mac_header(skb, 206 (prevhdr - x->props.header_len) - skb->data); 207 skb_set_network_header(skb, -x->props.header_len); 208 skb->transport_header = skb->network_header + hdr_len; 209 __skb_pull(skb, hdr_len); 210 memmove(ipv6_hdr(skb), iph, hdr_len); 211 212 return 0; 213 #else 214 WARN_ON_ONCE(1); 215 return -EAFNOSUPPORT; 216 #endif 217 } 218 219 /* Add encapsulation header. 220 * 221 * The top IP header will be constructed per draft-nikander-esp-beet-mode-06.txt. 222 */ 223 static int xfrm4_beet_encap_add(struct xfrm_state *x, struct sk_buff *skb) 224 { 225 struct ip_beet_phdr *ph; 226 struct iphdr *top_iph; 227 int hdrlen, optlen; 228 229 hdrlen = 0; 230 optlen = XFRM_MODE_SKB_CB(skb)->optlen; 231 if (unlikely(optlen)) 232 hdrlen += IPV4_BEET_PHMAXLEN - (optlen & 4); 233 234 skb_set_network_header(skb, -x->props.header_len - hdrlen + 235 (XFRM_MODE_SKB_CB(skb)->ihl - sizeof(*top_iph))); 236 if (x->sel.family != AF_INET6) 237 skb->network_header += IPV4_BEET_PHMAXLEN; 238 skb->mac_header = skb->network_header + 239 offsetof(struct iphdr, protocol); 240 skb->transport_header = skb->network_header + sizeof(*top_iph); 241 242 xfrm4_beet_make_header(skb); 243 244 ph = __skb_pull(skb, XFRM_MODE_SKB_CB(skb)->ihl - hdrlen); 245 246 top_iph = ip_hdr(skb); 247 248 if (unlikely(optlen)) { 249 if (WARN_ON(optlen < 0)) 250 return -EINVAL; 251 252 ph->padlen = 4 - (optlen & 4); 253 ph->hdrlen = optlen / 8; 254 ph->nexthdr = top_iph->protocol; 255 if (ph->padlen) 256 memset(ph + 1, IPOPT_NOP, ph->padlen); 257 258 top_iph->protocol = IPPROTO_BEETPH; 259 top_iph->ihl = sizeof(struct iphdr) / 4; 260 } 261 262 top_iph->saddr = x->props.saddr.a4; 263 top_iph->daddr = x->id.daddr.a4; 264 265 return 0; 266 } 267 268 /* Add encapsulation header. 269 * 270 * The top IP header will be constructed per RFC 2401. 271 */ 272 static int xfrm4_tunnel_encap_add(struct xfrm_state *x, struct sk_buff *skb) 273 { 274 bool small_ipv6 = (skb->protocol == htons(ETH_P_IPV6)) && (skb->len <= IPV6_MIN_MTU); 275 struct dst_entry *dst = skb_dst(skb); 276 struct iphdr *top_iph; 277 int flags; 278 279 skb_set_inner_network_header(skb, skb_network_offset(skb)); 280 skb_set_inner_transport_header(skb, skb_transport_offset(skb)); 281 282 skb_set_network_header(skb, -x->props.header_len); 283 skb->mac_header = skb->network_header + 284 offsetof(struct iphdr, protocol); 285 skb->transport_header = skb->network_header + sizeof(*top_iph); 286 top_iph = ip_hdr(skb); 287 288 top_iph->ihl = 5; 289 top_iph->version = 4; 290 291 top_iph->protocol = xfrm_af2proto(skb_dst(skb)->ops->family); 292 293 /* DS disclosing depends on XFRM_SA_XFLAG_DONT_ENCAP_DSCP */ 294 if (x->props.extra_flags & XFRM_SA_XFLAG_DONT_ENCAP_DSCP) 295 top_iph->tos = 0; 296 else 297 top_iph->tos = XFRM_MODE_SKB_CB(skb)->tos; 298 top_iph->tos = INET_ECN_encapsulate(top_iph->tos, 299 XFRM_MODE_SKB_CB(skb)->tos); 300 301 flags = x->props.flags; 302 if (flags & XFRM_STATE_NOECN) 303 IP_ECN_clear(top_iph); 304 305 top_iph->frag_off = (flags & XFRM_STATE_NOPMTUDISC) || small_ipv6 ? 306 0 : (XFRM_MODE_SKB_CB(skb)->frag_off & htons(IP_DF)); 307 308 top_iph->ttl = ip4_dst_hoplimit(xfrm_dst_child(dst)); 309 310 top_iph->saddr = x->props.saddr.a4; 311 top_iph->daddr = x->id.daddr.a4; 312 ip_select_ident(dev_net(dst->dev), skb, NULL); 313 314 return 0; 315 } 316 317 #if IS_ENABLED(CONFIG_IPV6) 318 static int xfrm6_tunnel_encap_add(struct xfrm_state *x, struct sk_buff *skb) 319 { 320 struct dst_entry *dst = skb_dst(skb); 321 struct ipv6hdr *top_iph; 322 int dsfield; 323 324 skb_set_inner_network_header(skb, skb_network_offset(skb)); 325 skb_set_inner_transport_header(skb, skb_transport_offset(skb)); 326 327 skb_set_network_header(skb, -x->props.header_len); 328 skb->mac_header = skb->network_header + 329 offsetof(struct ipv6hdr, nexthdr); 330 skb->transport_header = skb->network_header + sizeof(*top_iph); 331 top_iph = ipv6_hdr(skb); 332 333 top_iph->version = 6; 334 335 memcpy(top_iph->flow_lbl, XFRM_MODE_SKB_CB(skb)->flow_lbl, 336 sizeof(top_iph->flow_lbl)); 337 top_iph->nexthdr = xfrm_af2proto(skb_dst(skb)->ops->family); 338 339 if (x->props.extra_flags & XFRM_SA_XFLAG_DONT_ENCAP_DSCP) 340 dsfield = 0; 341 else 342 dsfield = XFRM_MODE_SKB_CB(skb)->tos; 343 dsfield = INET_ECN_encapsulate(dsfield, XFRM_MODE_SKB_CB(skb)->tos); 344 if (x->props.flags & XFRM_STATE_NOECN) 345 dsfield &= ~INET_ECN_MASK; 346 ipv6_change_dsfield(top_iph, 0, dsfield); 347 top_iph->hop_limit = ip6_dst_hoplimit(xfrm_dst_child(dst)); 348 top_iph->saddr = *(struct in6_addr *)&x->props.saddr; 349 top_iph->daddr = *(struct in6_addr *)&x->id.daddr; 350 return 0; 351 } 352 353 static int xfrm6_beet_encap_add(struct xfrm_state *x, struct sk_buff *skb) 354 { 355 struct ipv6hdr *top_iph; 356 struct ip_beet_phdr *ph; 357 int optlen, hdr_len; 358 359 hdr_len = 0; 360 optlen = XFRM_MODE_SKB_CB(skb)->optlen; 361 if (unlikely(optlen)) 362 hdr_len += IPV4_BEET_PHMAXLEN - (optlen & 4); 363 364 skb_set_network_header(skb, -x->props.header_len - hdr_len); 365 if (x->sel.family != AF_INET6) 366 skb->network_header += IPV4_BEET_PHMAXLEN; 367 skb->mac_header = skb->network_header + 368 offsetof(struct ipv6hdr, nexthdr); 369 skb->transport_header = skb->network_header + sizeof(*top_iph); 370 ph = __skb_pull(skb, XFRM_MODE_SKB_CB(skb)->ihl - hdr_len); 371 372 xfrm6_beet_make_header(skb); 373 374 top_iph = ipv6_hdr(skb); 375 if (unlikely(optlen)) { 376 if (WARN_ON(optlen < 0)) 377 return -EINVAL; 378 379 ph->padlen = 4 - (optlen & 4); 380 ph->hdrlen = optlen / 8; 381 ph->nexthdr = top_iph->nexthdr; 382 if (ph->padlen) 383 memset(ph + 1, IPOPT_NOP, ph->padlen); 384 385 top_iph->nexthdr = IPPROTO_BEETPH; 386 } 387 388 top_iph->saddr = *(struct in6_addr *)&x->props.saddr; 389 top_iph->daddr = *(struct in6_addr *)&x->id.daddr; 390 return 0; 391 } 392 #endif 393 394 /* Add encapsulation header. 395 * 396 * On exit, the transport header will be set to the start of the 397 * encapsulation header to be filled in by x->type->output and the mac 398 * header will be set to the nextheader (protocol for IPv4) field of the 399 * extension header directly preceding the encapsulation header, or in 400 * its absence, that of the top IP header. 401 * The value of the network header will always point to the top IP header 402 * while skb->data will point to the payload. 403 */ 404 static int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb) 405 { 406 int err; 407 408 err = xfrm_inner_extract_output(x, skb); 409 if (err) 410 return err; 411 412 IPCB(skb)->flags |= IPSKB_XFRM_TUNNEL_SIZE; 413 skb->protocol = htons(ETH_P_IP); 414 415 switch (x->outer_mode.encap) { 416 case XFRM_MODE_BEET: 417 return xfrm4_beet_encap_add(x, skb); 418 case XFRM_MODE_TUNNEL: 419 return xfrm4_tunnel_encap_add(x, skb); 420 } 421 422 WARN_ON_ONCE(1); 423 return -EOPNOTSUPP; 424 } 425 426 static int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb) 427 { 428 #if IS_ENABLED(CONFIG_IPV6) 429 int err; 430 431 err = xfrm_inner_extract_output(x, skb); 432 if (err) 433 return err; 434 435 skb->ignore_df = 1; 436 skb->protocol = htons(ETH_P_IPV6); 437 438 switch (x->outer_mode.encap) { 439 case XFRM_MODE_BEET: 440 return xfrm6_beet_encap_add(x, skb); 441 case XFRM_MODE_TUNNEL: 442 return xfrm6_tunnel_encap_add(x, skb); 443 default: 444 WARN_ON_ONCE(1); 445 return -EOPNOTSUPP; 446 } 447 #endif 448 WARN_ON_ONCE(1); 449 return -EAFNOSUPPORT; 450 } 451 452 static int xfrm_outer_mode_output(struct xfrm_state *x, struct sk_buff *skb) 453 { 454 switch (x->outer_mode.encap) { 455 case XFRM_MODE_BEET: 456 case XFRM_MODE_TUNNEL: 457 if (x->outer_mode.family == AF_INET) 458 return xfrm4_prepare_output(x, skb); 459 if (x->outer_mode.family == AF_INET6) 460 return xfrm6_prepare_output(x, skb); 461 break; 462 case XFRM_MODE_TRANSPORT: 463 if (x->outer_mode.family == AF_INET) 464 return xfrm4_transport_output(x, skb); 465 if (x->outer_mode.family == AF_INET6) 466 return xfrm6_transport_output(x, skb); 467 break; 468 case XFRM_MODE_ROUTEOPTIMIZATION: 469 if (x->outer_mode.family == AF_INET6) 470 return xfrm6_ro_output(x, skb); 471 WARN_ON_ONCE(1); 472 break; 473 default: 474 WARN_ON_ONCE(1); 475 break; 476 } 477 478 return -EOPNOTSUPP; 479 } 480 481 #if IS_ENABLED(CONFIG_NET_PKTGEN) 482 int pktgen_xfrm_outer_mode_output(struct xfrm_state *x, struct sk_buff *skb) 483 { 484 return xfrm_outer_mode_output(x, skb); 485 } 486 EXPORT_SYMBOL_GPL(pktgen_xfrm_outer_mode_output); 487 #endif 488 489 static int xfrm_output_one(struct sk_buff *skb, int err) 490 { 491 struct dst_entry *dst = skb_dst(skb); 492 struct xfrm_state *x = dst->xfrm; 493 struct net *net = xs_net(x); 494 495 if (err <= 0 || x->xso.type == XFRM_DEV_OFFLOAD_PACKET) 496 goto resume; 497 498 do { 499 err = xfrm_skb_check_space(skb); 500 if (err) { 501 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR); 502 goto error_nolock; 503 } 504 505 skb->mark = xfrm_smark_get(skb->mark, x); 506 507 err = xfrm_outer_mode_output(x, skb); 508 if (err) { 509 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEMODEERROR); 510 goto error_nolock; 511 } 512 513 spin_lock_bh(&x->lock); 514 515 if (unlikely(x->km.state != XFRM_STATE_VALID)) { 516 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEINVALID); 517 err = -EINVAL; 518 goto error; 519 } 520 521 err = xfrm_state_check_expire(x); 522 if (err) { 523 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEEXPIRED); 524 goto error; 525 } 526 527 err = xfrm_replay_overflow(x, skb); 528 if (err) { 529 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATESEQERROR); 530 goto error; 531 } 532 533 x->curlft.bytes += skb->len; 534 x->curlft.packets++; 535 x->lastused = ktime_get_real_seconds(); 536 537 spin_unlock_bh(&x->lock); 538 539 skb_dst_force(skb); 540 if (!skb_dst(skb)) { 541 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR); 542 err = -EHOSTUNREACH; 543 goto error_nolock; 544 } 545 546 if (xfrm_offload(skb)) { 547 x->type_offload->encap(x, skb); 548 } else { 549 /* Inner headers are invalid now. */ 550 skb->encapsulation = 0; 551 552 err = x->type->output(x, skb); 553 if (err == -EINPROGRESS) 554 goto out; 555 } 556 557 resume: 558 if (err) { 559 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEPROTOERROR); 560 goto error_nolock; 561 } 562 563 dst = skb_dst_pop(skb); 564 if (!dst) { 565 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR); 566 err = -EHOSTUNREACH; 567 goto error_nolock; 568 } 569 skb_dst_set(skb, dst); 570 x = dst->xfrm; 571 } while (x && !(x->outer_mode.flags & XFRM_MODE_FLAG_TUNNEL)); 572 573 return 0; 574 575 error: 576 spin_unlock_bh(&x->lock); 577 error_nolock: 578 kfree_skb(skb); 579 out: 580 return err; 581 } 582 583 int xfrm_output_resume(struct sock *sk, struct sk_buff *skb, int err) 584 { 585 struct net *net = xs_net(skb_dst(skb)->xfrm); 586 587 while (likely((err = xfrm_output_one(skb, err)) == 0)) { 588 nf_reset_ct(skb); 589 590 err = skb_dst(skb)->ops->local_out(net, sk, skb); 591 if (unlikely(err != 1)) 592 goto out; 593 594 if (!skb_dst(skb)->xfrm) 595 return dst_output(net, sk, skb); 596 597 err = nf_hook(skb_dst(skb)->ops->family, 598 NF_INET_POST_ROUTING, net, sk, skb, 599 NULL, skb_dst(skb)->dev, xfrm_output2); 600 if (unlikely(err != 1)) 601 goto out; 602 } 603 604 if (err == -EINPROGRESS) 605 err = 0; 606 607 out: 608 return err; 609 } 610 EXPORT_SYMBOL_GPL(xfrm_output_resume); 611 612 static int xfrm_output2(struct net *net, struct sock *sk, struct sk_buff *skb) 613 { 614 return xfrm_output_resume(sk, skb, 1); 615 } 616 617 static int xfrm_output_gso(struct net *net, struct sock *sk, struct sk_buff *skb) 618 { 619 struct sk_buff *segs, *nskb; 620 621 BUILD_BUG_ON(sizeof(*IPCB(skb)) > SKB_GSO_CB_OFFSET); 622 BUILD_BUG_ON(sizeof(*IP6CB(skb)) > SKB_GSO_CB_OFFSET); 623 segs = skb_gso_segment(skb, 0); 624 kfree_skb(skb); 625 if (IS_ERR(segs)) 626 return PTR_ERR(segs); 627 if (segs == NULL) 628 return -EINVAL; 629 630 skb_list_walk_safe(segs, segs, nskb) { 631 int err; 632 633 skb_mark_not_on_list(segs); 634 err = xfrm_output2(net, sk, segs); 635 636 if (unlikely(err)) { 637 kfree_skb_list(nskb); 638 return err; 639 } 640 } 641 642 return 0; 643 } 644 645 /* For partial checksum offload, the outer header checksum is calculated 646 * by software and the inner header checksum is calculated by hardware. 647 * This requires hardware to know the inner packet type to calculate 648 * the inner header checksum. Save inner ip protocol here to avoid 649 * traversing the packet in the vendor's xmit code. 650 * For IPsec tunnel mode save the ip protocol from the IP header of the 651 * plain text packet. Otherwise If the encap type is IPIP, just save 652 * skb->inner_ipproto in any other case get the ip protocol from the IP 653 * header. 654 */ 655 static void xfrm_get_inner_ipproto(struct sk_buff *skb, struct xfrm_state *x) 656 { 657 struct xfrm_offload *xo = xfrm_offload(skb); 658 const struct ethhdr *eth; 659 660 if (!xo) 661 return; 662 663 if (x->outer_mode.encap == XFRM_MODE_TUNNEL) { 664 switch (x->outer_mode.family) { 665 case AF_INET: 666 xo->inner_ipproto = ip_hdr(skb)->protocol; 667 break; 668 case AF_INET6: 669 xo->inner_ipproto = ipv6_hdr(skb)->nexthdr; 670 break; 671 default: 672 break; 673 } 674 675 return; 676 } 677 678 /* non-Tunnel Mode */ 679 if (!skb->encapsulation) 680 return; 681 682 if (skb->inner_protocol_type == ENCAP_TYPE_IPPROTO) { 683 xo->inner_ipproto = skb->inner_ipproto; 684 return; 685 } 686 687 if (skb->inner_protocol_type != ENCAP_TYPE_ETHER) 688 return; 689 690 eth = (struct ethhdr *)skb_inner_mac_header(skb); 691 692 switch (ntohs(eth->h_proto)) { 693 case ETH_P_IPV6: 694 xo->inner_ipproto = inner_ipv6_hdr(skb)->nexthdr; 695 break; 696 case ETH_P_IP: 697 xo->inner_ipproto = inner_ip_hdr(skb)->protocol; 698 break; 699 } 700 } 701 702 int xfrm_output(struct sock *sk, struct sk_buff *skb) 703 { 704 struct net *net = dev_net(skb_dst(skb)->dev); 705 struct xfrm_state *x = skb_dst(skb)->xfrm; 706 int err; 707 708 switch (x->outer_mode.family) { 709 case AF_INET: 710 memset(IPCB(skb), 0, sizeof(*IPCB(skb))); 711 IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED; 712 break; 713 case AF_INET6: 714 memset(IP6CB(skb), 0, sizeof(*IP6CB(skb))); 715 716 IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED; 717 break; 718 } 719 720 if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET) { 721 if (!xfrm_dev_offload_ok(skb, x)) { 722 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR); 723 kfree_skb(skb); 724 return -EHOSTUNREACH; 725 } 726 727 return xfrm_output_resume(sk, skb, 0); 728 } 729 730 secpath_reset(skb); 731 732 if (xfrm_dev_offload_ok(skb, x)) { 733 struct sec_path *sp; 734 735 sp = secpath_set(skb); 736 if (!sp) { 737 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR); 738 kfree_skb(skb); 739 return -ENOMEM; 740 } 741 742 sp->olen++; 743 sp->xvec[sp->len++] = x; 744 xfrm_state_hold(x); 745 746 xfrm_get_inner_ipproto(skb, x); 747 skb->encapsulation = 1; 748 749 if (skb_is_gso(skb)) { 750 if (skb->inner_protocol) 751 return xfrm_output_gso(net, sk, skb); 752 753 skb_shinfo(skb)->gso_type |= SKB_GSO_ESP; 754 goto out; 755 } 756 757 if (x->xso.dev && x->xso.dev->features & NETIF_F_HW_ESP_TX_CSUM) 758 goto out; 759 } else { 760 if (skb_is_gso(skb)) 761 return xfrm_output_gso(net, sk, skb); 762 } 763 764 if (skb->ip_summed == CHECKSUM_PARTIAL) { 765 err = skb_checksum_help(skb); 766 if (err) { 767 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR); 768 kfree_skb(skb); 769 return err; 770 } 771 } 772 773 out: 774 return xfrm_output2(net, sk, skb); 775 } 776 EXPORT_SYMBOL_GPL(xfrm_output); 777 778 static int xfrm4_tunnel_check_size(struct sk_buff *skb) 779 { 780 int mtu, ret = 0; 781 782 if (IPCB(skb)->flags & IPSKB_XFRM_TUNNEL_SIZE) 783 goto out; 784 785 if (!(ip_hdr(skb)->frag_off & htons(IP_DF)) || skb->ignore_df) 786 goto out; 787 788 mtu = dst_mtu(skb_dst(skb)); 789 if ((!skb_is_gso(skb) && skb->len > mtu) || 790 (skb_is_gso(skb) && 791 !skb_gso_validate_network_len(skb, ip_skb_dst_mtu(skb->sk, skb)))) { 792 skb->protocol = htons(ETH_P_IP); 793 794 if (skb->sk) 795 xfrm_local_error(skb, mtu); 796 else 797 icmp_send(skb, ICMP_DEST_UNREACH, 798 ICMP_FRAG_NEEDED, htonl(mtu)); 799 ret = -EMSGSIZE; 800 } 801 out: 802 return ret; 803 } 804 805 static int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb) 806 { 807 int err; 808 809 if (x->outer_mode.encap == XFRM_MODE_BEET && 810 ip_is_fragment(ip_hdr(skb))) { 811 net_warn_ratelimited("BEET mode doesn't support inner IPv4 fragments\n"); 812 return -EAFNOSUPPORT; 813 } 814 815 err = xfrm4_tunnel_check_size(skb); 816 if (err) 817 return err; 818 819 XFRM_MODE_SKB_CB(skb)->protocol = ip_hdr(skb)->protocol; 820 821 xfrm4_extract_header(skb); 822 return 0; 823 } 824 825 #if IS_ENABLED(CONFIG_IPV6) 826 static int xfrm6_tunnel_check_size(struct sk_buff *skb) 827 { 828 int mtu, ret = 0; 829 struct dst_entry *dst = skb_dst(skb); 830 831 if (skb->ignore_df) 832 goto out; 833 834 mtu = dst_mtu(dst); 835 if (mtu < IPV6_MIN_MTU) 836 mtu = IPV6_MIN_MTU; 837 838 if ((!skb_is_gso(skb) && skb->len > mtu) || 839 (skb_is_gso(skb) && 840 !skb_gso_validate_network_len(skb, ip6_skb_dst_mtu(skb)))) { 841 skb->dev = dst->dev; 842 skb->protocol = htons(ETH_P_IPV6); 843 844 if (xfrm6_local_dontfrag(skb->sk)) 845 ipv6_stub->xfrm6_local_rxpmtu(skb, mtu); 846 else if (skb->sk) 847 xfrm_local_error(skb, mtu); 848 else 849 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 850 ret = -EMSGSIZE; 851 } 852 out: 853 return ret; 854 } 855 #endif 856 857 static int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb) 858 { 859 #if IS_ENABLED(CONFIG_IPV6) 860 int err; 861 862 err = xfrm6_tunnel_check_size(skb); 863 if (err) 864 return err; 865 866 XFRM_MODE_SKB_CB(skb)->protocol = ipv6_hdr(skb)->nexthdr; 867 868 xfrm6_extract_header(skb); 869 return 0; 870 #else 871 WARN_ON_ONCE(1); 872 return -EAFNOSUPPORT; 873 #endif 874 } 875 876 static int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb) 877 { 878 const struct xfrm_mode *inner_mode; 879 880 if (x->sel.family == AF_UNSPEC) 881 inner_mode = xfrm_ip2inner_mode(x, 882 xfrm_af2proto(skb_dst(skb)->ops->family)); 883 else 884 inner_mode = &x->inner_mode; 885 886 if (inner_mode == NULL) 887 return -EAFNOSUPPORT; 888 889 switch (inner_mode->family) { 890 case AF_INET: 891 return xfrm4_extract_output(x, skb); 892 case AF_INET6: 893 return xfrm6_extract_output(x, skb); 894 } 895 896 return -EAFNOSUPPORT; 897 } 898 899 void xfrm_local_error(struct sk_buff *skb, int mtu) 900 { 901 unsigned int proto; 902 struct xfrm_state_afinfo *afinfo; 903 904 if (skb->protocol == htons(ETH_P_IP)) 905 proto = AF_INET; 906 else if (skb->protocol == htons(ETH_P_IPV6) && 907 skb->sk->sk_family == AF_INET6) 908 proto = AF_INET6; 909 else 910 return; 911 912 afinfo = xfrm_state_get_afinfo(proto); 913 if (afinfo) { 914 afinfo->local_error(skb, mtu); 915 rcu_read_unlock(); 916 } 917 } 918 EXPORT_SYMBOL_GPL(xfrm_local_error); 919