1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * IPV6 GSO/GRO offload support 4 * Linux INET implementation 5 * 6 * Copyright (C) 2016 secunet Security Networks AG 7 * Author: Steffen Klassert <steffen.klassert@secunet.com> 8 * 9 * ESP GRO support 10 */ 11 12 #include <linux/skbuff.h> 13 #include <linux/init.h> 14 #include <net/protocol.h> 15 #include <crypto/aead.h> 16 #include <crypto/authenc.h> 17 #include <linux/err.h> 18 #include <linux/module.h> 19 #include <net/gro.h> 20 #include <net/gso.h> 21 #include <net/ip.h> 22 #include <net/xfrm.h> 23 #include <net/esp.h> 24 #include <linux/scatterlist.h> 25 #include <linux/kernel.h> 26 #include <linux/slab.h> 27 #include <linux/spinlock.h> 28 #include <net/ip6_route.h> 29 #include <net/ipv6.h> 30 #include <linux/icmpv6.h> 31 32 static __u16 esp6_nexthdr_esp_offset(struct ipv6hdr *ipv6_hdr, int nhlen) 33 { 34 int off = sizeof(struct ipv6hdr); 35 struct ipv6_opt_hdr *exthdr; 36 37 /* ESP or ESPINUDP */ 38 if (likely(ipv6_hdr->nexthdr == NEXTHDR_ESP || 39 ipv6_hdr->nexthdr == NEXTHDR_UDP)) 40 return offsetof(struct ipv6hdr, nexthdr); 41 42 while (off < nhlen) { 43 exthdr = (void *)ipv6_hdr + off; 44 if (exthdr->nexthdr == NEXTHDR_ESP) 45 return off; 46 47 off += ipv6_optlen(exthdr); 48 } 49 50 return 0; 51 } 52 53 static struct sk_buff *esp6_gro_receive(struct list_head *head, 54 struct sk_buff *skb) 55 { 56 int offset = skb_gro_offset(skb); 57 struct xfrm_offload *xo; 58 struct xfrm_state *x; 59 int encap_type = 0; 60 __be32 seq; 61 __be32 spi; 62 int nhoff; 63 64 if (NAPI_GRO_CB(skb)->proto == IPPROTO_UDP) 65 encap_type = UDP_ENCAP_ESPINUDP; 66 67 if (!pskb_pull(skb, offset)) 68 return NULL; 69 70 if (xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq) != 0) 71 goto out; 72 73 xo = xfrm_offload(skb); 74 if (!xo || !(xo->flags & CRYPTO_DONE)) { 75 struct sec_path *sp = secpath_set(skb); 76 77 if (!sp) 78 goto out; 79 80 if (sp->len == XFRM_MAX_DEPTH) 81 goto out_reset; 82 83 x = xfrm_state_lookup(dev_net(skb->dev), skb->mark, 84 (xfrm_address_t *)&ipv6_hdr(skb)->daddr, 85 spi, IPPROTO_ESP, AF_INET6); 86 87 if (unlikely(x && x->dir && x->dir != XFRM_SA_DIR_IN)) { 88 /* non-offload path will record the error and audit log */ 89 xfrm_state_put(x); 90 x = NULL; 91 } 92 93 if (!x) 94 goto out_reset; 95 96 skb->mark = xfrm_smark_get(skb->mark, x); 97 98 sp->xvec[sp->len++] = x; 99 sp->olen++; 100 101 xo = xfrm_offload(skb); 102 if (!xo) 103 goto out_reset; 104 } 105 106 xo->flags |= XFRM_GRO; 107 108 nhoff = esp6_nexthdr_esp_offset(ipv6_hdr(skb), offset); 109 if (!nhoff) 110 goto out; 111 112 IP6CB(skb)->nhoff = nhoff; 113 XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = NULL; 114 XFRM_SPI_SKB_CB(skb)->family = AF_INET6; 115 XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct ipv6hdr, daddr); 116 XFRM_SPI_SKB_CB(skb)->seq = seq; 117 118 /* We don't need to handle errors from xfrm_input, it does all 119 * the error handling and frees the resources on error. */ 120 xfrm_input(skb, IPPROTO_ESP, spi, encap_type); 121 122 return ERR_PTR(-EINPROGRESS); 123 out_reset: 124 secpath_reset(skb); 125 out: 126 skb_push(skb, offset); 127 NAPI_GRO_CB(skb)->same_flow = 0; 128 NAPI_GRO_CB(skb)->flush = 1; 129 130 return NULL; 131 } 132 133 static void esp6_gso_encap(struct xfrm_state *x, struct sk_buff *skb) 134 { 135 struct ip_esp_hdr *esph; 136 struct ipv6hdr *iph = ipv6_hdr(skb); 137 struct xfrm_offload *xo = xfrm_offload(skb); 138 u8 proto = iph->nexthdr; 139 140 skb_push(skb, -skb_network_offset(skb)); 141 142 if (x->outer_mode.encap == XFRM_MODE_TRANSPORT) { 143 __be16 frag; 144 145 ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &proto, &frag); 146 } 147 148 esph = ip_esp_hdr(skb); 149 *skb_mac_header(skb) = IPPROTO_ESP; 150 151 esph->spi = x->id.spi; 152 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low); 153 154 xo->proto = proto; 155 } 156 157 static struct sk_buff *xfrm6_tunnel_gso_segment(struct xfrm_state *x, 158 struct sk_buff *skb, 159 netdev_features_t features) 160 { 161 __be16 type = x->inner_mode.family == AF_INET ? htons(ETH_P_IP) 162 : htons(ETH_P_IPV6); 163 164 return skb_eth_gso_segment(skb, features, type); 165 } 166 167 static struct sk_buff *xfrm6_transport_gso_segment(struct xfrm_state *x, 168 struct sk_buff *skb, 169 netdev_features_t features) 170 { 171 const struct net_offload *ops; 172 struct sk_buff *segs = ERR_PTR(-EINVAL); 173 struct xfrm_offload *xo = xfrm_offload(skb); 174 175 skb->transport_header += x->props.header_len; 176 ops = rcu_dereference(inet6_offloads[xo->proto]); 177 if (likely(ops && ops->callbacks.gso_segment)) 178 segs = ops->callbacks.gso_segment(skb, features); 179 180 return segs; 181 } 182 183 static struct sk_buff *xfrm6_beet_gso_segment(struct xfrm_state *x, 184 struct sk_buff *skb, 185 netdev_features_t features) 186 { 187 struct xfrm_offload *xo = xfrm_offload(skb); 188 struct sk_buff *segs = ERR_PTR(-EINVAL); 189 const struct net_offload *ops; 190 u8 proto = xo->proto; 191 192 skb->transport_header += x->props.header_len; 193 194 if (x->sel.family != AF_INET6) { 195 skb->transport_header -= 196 (sizeof(struct ipv6hdr) - sizeof(struct iphdr)); 197 198 if (proto == IPPROTO_BEETPH) { 199 struct ip_beet_phdr *ph = 200 (struct ip_beet_phdr *)skb->data; 201 202 skb->transport_header += ph->hdrlen * 8; 203 proto = ph->nexthdr; 204 } else { 205 skb->transport_header -= IPV4_BEET_PHMAXLEN; 206 } 207 208 if (proto == IPPROTO_TCP) 209 skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV6; 210 } else { 211 __be16 frag; 212 213 skb->transport_header += 214 ipv6_skip_exthdr(skb, 0, &proto, &frag); 215 } 216 217 if (proto == IPPROTO_IPIP) 218 skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP6; 219 220 __skb_pull(skb, skb_transport_offset(skb)); 221 ops = rcu_dereference(inet6_offloads[proto]); 222 if (likely(ops && ops->callbacks.gso_segment)) 223 segs = ops->callbacks.gso_segment(skb, features); 224 225 return segs; 226 } 227 228 static struct sk_buff *xfrm6_outer_mode_gso_segment(struct xfrm_state *x, 229 struct sk_buff *skb, 230 netdev_features_t features) 231 { 232 switch (x->outer_mode.encap) { 233 case XFRM_MODE_TUNNEL: 234 return xfrm6_tunnel_gso_segment(x, skb, features); 235 case XFRM_MODE_TRANSPORT: 236 return xfrm6_transport_gso_segment(x, skb, features); 237 case XFRM_MODE_BEET: 238 return xfrm6_beet_gso_segment(x, skb, features); 239 } 240 241 return ERR_PTR(-EOPNOTSUPP); 242 } 243 244 static struct sk_buff *esp6_gso_segment(struct sk_buff *skb, 245 netdev_features_t features) 246 { 247 struct xfrm_state *x; 248 struct ip_esp_hdr *esph; 249 struct crypto_aead *aead; 250 netdev_features_t esp_features = features; 251 struct xfrm_offload *xo = xfrm_offload(skb); 252 struct sec_path *sp; 253 254 if (!xo) 255 return ERR_PTR(-EINVAL); 256 257 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_ESP)) 258 return ERR_PTR(-EINVAL); 259 260 sp = skb_sec_path(skb); 261 x = sp->xvec[sp->len - 1]; 262 aead = x->data; 263 esph = ip_esp_hdr(skb); 264 265 if (esph->spi != x->id.spi) 266 return ERR_PTR(-EINVAL); 267 268 if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead))) 269 return ERR_PTR(-EINVAL); 270 271 __skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)); 272 273 skb->encap_hdr_csum = 1; 274 275 if (!(features & NETIF_F_HW_ESP) || x->xso.dev != skb->dev) 276 esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK | 277 NETIF_F_SCTP_CRC); 278 else if (!(features & NETIF_F_HW_ESP_TX_CSUM)) 279 esp_features = features & ~(NETIF_F_CSUM_MASK | 280 NETIF_F_SCTP_CRC); 281 282 xo->flags |= XFRM_GSO_SEGMENT; 283 284 return xfrm6_outer_mode_gso_segment(x, skb, esp_features); 285 } 286 287 static int esp6_input_tail(struct xfrm_state *x, struct sk_buff *skb) 288 { 289 struct crypto_aead *aead = x->data; 290 struct xfrm_offload *xo = xfrm_offload(skb); 291 292 if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead))) 293 return -EINVAL; 294 295 if (!(xo->flags & CRYPTO_DONE)) 296 skb->ip_summed = CHECKSUM_NONE; 297 298 return esp6_input_done2(skb, 0); 299 } 300 301 static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_t features) 302 { 303 int len; 304 int err; 305 int alen; 306 int blksize; 307 struct xfrm_offload *xo; 308 struct crypto_aead *aead; 309 struct esp_info esp; 310 bool hw_offload = true; 311 __u32 seq; 312 313 esp.inplace = true; 314 315 xo = xfrm_offload(skb); 316 317 if (!xo) 318 return -EINVAL; 319 320 if (!(features & NETIF_F_HW_ESP) || x->xso.dev != skb->dev) { 321 xo->flags |= CRYPTO_FALLBACK; 322 hw_offload = false; 323 } 324 325 esp.proto = xo->proto; 326 327 /* skb is pure payload to encrypt */ 328 329 aead = x->data; 330 alen = crypto_aead_authsize(aead); 331 332 esp.tfclen = 0; 333 /* XXX: Add support for tfc padding here. */ 334 335 blksize = ALIGN(crypto_aead_blocksize(aead), 4); 336 esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize); 337 esp.plen = esp.clen - skb->len - esp.tfclen; 338 esp.tailen = esp.tfclen + esp.plen + alen; 339 340 if (!hw_offload || !skb_is_gso(skb)) { 341 esp.nfrags = esp6_output_head(x, skb, &esp); 342 if (esp.nfrags < 0) 343 return esp.nfrags; 344 } 345 346 seq = xo->seq.low; 347 348 esp.esph = ip_esp_hdr(skb); 349 esp.esph->spi = x->id.spi; 350 351 skb_push(skb, -skb_network_offset(skb)); 352 353 if (xo->flags & XFRM_GSO_SEGMENT) { 354 esp.esph->seq_no = htonl(seq); 355 356 if (!skb_is_gso(skb)) 357 xo->seq.low++; 358 else 359 xo->seq.low += skb_shinfo(skb)->gso_segs; 360 } 361 362 if (xo->seq.low < seq) 363 xo->seq.hi++; 364 365 esp.seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32)); 366 367 len = skb->len - sizeof(struct ipv6hdr); 368 if (len > IPV6_MAXPLEN) 369 len = 0; 370 371 ipv6_hdr(skb)->payload_len = htons(len); 372 373 if (hw_offload) { 374 if (!skb_ext_add(skb, SKB_EXT_SEC_PATH)) 375 return -ENOMEM; 376 377 xo = xfrm_offload(skb); 378 if (!xo) 379 return -EINVAL; 380 381 xo->flags |= XFRM_XMIT; 382 return 0; 383 } 384 385 err = esp6_output_tail(x, skb, &esp); 386 if (err) 387 return err; 388 389 secpath_reset(skb); 390 391 if (skb_needs_linearize(skb, skb->dev->features) && 392 __skb_linearize(skb)) 393 return -ENOMEM; 394 return 0; 395 } 396 397 static const struct net_offload esp6_offload = { 398 .callbacks = { 399 .gro_receive = esp6_gro_receive, 400 .gso_segment = esp6_gso_segment, 401 }, 402 }; 403 404 static const struct xfrm_type_offload esp6_type_offload = { 405 .owner = THIS_MODULE, 406 .proto = IPPROTO_ESP, 407 .input_tail = esp6_input_tail, 408 .xmit = esp6_xmit, 409 .encap = esp6_gso_encap, 410 }; 411 412 static int __init esp6_offload_init(void) 413 { 414 if (xfrm_register_type_offload(&esp6_type_offload, AF_INET6) < 0) { 415 pr_info("%s: can't add xfrm type offload\n", __func__); 416 return -EAGAIN; 417 } 418 419 return inet6_add_offload(&esp6_offload, IPPROTO_ESP); 420 } 421 422 static void __exit esp6_offload_exit(void) 423 { 424 xfrm_unregister_type_offload(&esp6_type_offload, AF_INET6); 425 inet6_del_offload(&esp6_offload, IPPROTO_ESP); 426 } 427 428 module_init(esp6_offload_init); 429 module_exit(esp6_offload_exit); 430 MODULE_LICENSE("GPL"); 431 MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>"); 432 MODULE_ALIAS_XFRM_OFFLOAD_TYPE(AF_INET6, XFRM_PROTO_ESP); 433 MODULE_DESCRIPTION("IPV6 GSO/GRO offload support"); 434