1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * IPV4 GSO/GRO offload support 4 * Linux INET implementation 5 * 6 * Copyright (C) 2016 secunet Security Networks AG 7 * Author: Steffen Klassert <steffen.klassert@secunet.com> 8 * 9 * ESP GRO support 10 */ 11 12 #include <linux/skbuff.h> 13 #include <linux/init.h> 14 #include <net/protocol.h> 15 #include <crypto/aead.h> 16 #include <crypto/authenc.h> 17 #include <linux/err.h> 18 #include <linux/module.h> 19 #include <net/gro.h> 20 #include <net/ip.h> 21 #include <net/xfrm.h> 22 #include <net/esp.h> 23 #include <linux/scatterlist.h> 24 #include <linux/kernel.h> 25 #include <linux/slab.h> 26 #include <linux/spinlock.h> 27 #include <net/udp.h> 28 29 static struct sk_buff *esp4_gro_receive(struct list_head *head, 30 struct sk_buff *skb) 31 { 32 int offset = skb_gro_offset(skb); 33 struct xfrm_offload *xo; 34 struct xfrm_state *x; 35 __be32 seq; 36 __be32 spi; 37 38 if (!pskb_pull(skb, offset)) 39 return NULL; 40 41 if (xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq) != 0) 42 goto out; 43 44 xo = xfrm_offload(skb); 45 if (!xo || !(xo->flags & CRYPTO_DONE)) { 46 struct sec_path *sp = secpath_set(skb); 47 48 if (!sp) 49 goto out; 50 51 if (sp->len == XFRM_MAX_DEPTH) 52 goto out_reset; 53 54 x = xfrm_state_lookup(dev_net(skb->dev), skb->mark, 55 (xfrm_address_t *)&ip_hdr(skb)->daddr, 56 spi, IPPROTO_ESP, AF_INET); 57 if (!x) 58 goto out_reset; 59 60 skb->mark = xfrm_smark_get(skb->mark, x); 61 62 sp->xvec[sp->len++] = x; 63 sp->olen++; 64 65 xo = xfrm_offload(skb); 66 if (!xo) 67 goto out_reset; 68 } 69 70 xo->flags |= XFRM_GRO; 71 72 XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL; 73 XFRM_SPI_SKB_CB(skb)->family = AF_INET; 74 XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr); 75 XFRM_SPI_SKB_CB(skb)->seq = seq; 76 77 /* We don't need to handle errors from xfrm_input, it does all 78 * the error handling and frees the resources on error. */ 79 xfrm_input(skb, IPPROTO_ESP, spi, -2); 80 81 return ERR_PTR(-EINPROGRESS); 82 out_reset: 83 secpath_reset(skb); 84 out: 85 skb_push(skb, offset); 86 NAPI_GRO_CB(skb)->same_flow = 0; 87 NAPI_GRO_CB(skb)->flush = 1; 88 89 return NULL; 90 } 91 92 static void esp4_gso_encap(struct xfrm_state *x, struct sk_buff *skb) 93 { 94 struct ip_esp_hdr *esph; 95 struct iphdr *iph = ip_hdr(skb); 96 struct xfrm_offload *xo = xfrm_offload(skb); 97 int proto = iph->protocol; 98 99 skb_push(skb, -skb_network_offset(skb)); 100 esph = ip_esp_hdr(skb); 101 *skb_mac_header(skb) = IPPROTO_ESP; 102 103 esph->spi = x->id.spi; 104 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low); 105 106 xo->proto = proto; 107 } 108 109 static struct sk_buff *xfrm4_tunnel_gso_segment(struct xfrm_state *x, 110 struct sk_buff *skb, 111 netdev_features_t features) 112 { 113 __be16 type = x->inner_mode.family == AF_INET6 ? htons(ETH_P_IPV6) 114 : htons(ETH_P_IP); 115 116 return skb_eth_gso_segment(skb, features, type); 117 } 118 119 static struct sk_buff *xfrm4_transport_gso_segment(struct xfrm_state *x, 120 struct sk_buff *skb, 121 netdev_features_t features) 122 { 123 const struct net_offload *ops; 124 struct sk_buff *segs = ERR_PTR(-EINVAL); 125 struct xfrm_offload *xo = xfrm_offload(skb); 126 127 skb->transport_header += x->props.header_len; 128 ops = rcu_dereference(inet_offloads[xo->proto]); 129 if (likely(ops && ops->callbacks.gso_segment)) 130 segs = ops->callbacks.gso_segment(skb, features); 131 132 return segs; 133 } 134 135 static struct sk_buff *xfrm4_beet_gso_segment(struct xfrm_state *x, 136 struct sk_buff *skb, 137 netdev_features_t features) 138 { 139 struct xfrm_offload *xo = xfrm_offload(skb); 140 struct sk_buff *segs = ERR_PTR(-EINVAL); 141 const struct net_offload *ops; 142 u8 proto = xo->proto; 143 144 skb->transport_header += x->props.header_len; 145 146 if (x->sel.family != AF_INET6) { 147 if (proto == IPPROTO_BEETPH) { 148 struct ip_beet_phdr *ph = 149 (struct ip_beet_phdr *)skb->data; 150 151 skb->transport_header += ph->hdrlen * 8; 152 proto = ph->nexthdr; 153 } else { 154 skb->transport_header -= IPV4_BEET_PHMAXLEN; 155 } 156 } else { 157 __be16 frag; 158 159 skb->transport_header += 160 ipv6_skip_exthdr(skb, 0, &proto, &frag); 161 if (proto == IPPROTO_TCP) 162 skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4; 163 } 164 165 if (proto == IPPROTO_IPV6) 166 skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP4; 167 168 __skb_pull(skb, skb_transport_offset(skb)); 169 ops = rcu_dereference(inet_offloads[proto]); 170 if (likely(ops && ops->callbacks.gso_segment)) 171 segs = ops->callbacks.gso_segment(skb, features); 172 173 return segs; 174 } 175 176 static struct sk_buff *xfrm4_outer_mode_gso_segment(struct xfrm_state *x, 177 struct sk_buff *skb, 178 netdev_features_t features) 179 { 180 switch (x->outer_mode.encap) { 181 case XFRM_MODE_TUNNEL: 182 return xfrm4_tunnel_gso_segment(x, skb, features); 183 case XFRM_MODE_TRANSPORT: 184 return xfrm4_transport_gso_segment(x, skb, features); 185 case XFRM_MODE_BEET: 186 return xfrm4_beet_gso_segment(x, skb, features); 187 } 188 189 return ERR_PTR(-EOPNOTSUPP); 190 } 191 192 static struct sk_buff *esp4_gso_segment(struct sk_buff *skb, 193 netdev_features_t features) 194 { 195 struct xfrm_state *x; 196 struct ip_esp_hdr *esph; 197 struct crypto_aead *aead; 198 netdev_features_t esp_features = features; 199 struct xfrm_offload *xo = xfrm_offload(skb); 200 struct sec_path *sp; 201 202 if (!xo) 203 return ERR_PTR(-EINVAL); 204 205 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_ESP)) 206 return ERR_PTR(-EINVAL); 207 208 sp = skb_sec_path(skb); 209 x = sp->xvec[sp->len - 1]; 210 aead = x->data; 211 esph = ip_esp_hdr(skb); 212 213 if (esph->spi != x->id.spi) 214 return ERR_PTR(-EINVAL); 215 216 if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead))) 217 return ERR_PTR(-EINVAL); 218 219 __skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)); 220 221 skb->encap_hdr_csum = 1; 222 223 if ((!(skb->dev->gso_partial_features & NETIF_F_HW_ESP) && 224 !(features & NETIF_F_HW_ESP)) || x->xso.dev != skb->dev) 225 esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK | 226 NETIF_F_SCTP_CRC); 227 else if (!(features & NETIF_F_HW_ESP_TX_CSUM) && 228 !(skb->dev->gso_partial_features & NETIF_F_HW_ESP_TX_CSUM)) 229 esp_features = features & ~(NETIF_F_CSUM_MASK | 230 NETIF_F_SCTP_CRC); 231 232 xo->flags |= XFRM_GSO_SEGMENT; 233 234 return xfrm4_outer_mode_gso_segment(x, skb, esp_features); 235 } 236 237 static int esp_input_tail(struct xfrm_state *x, struct sk_buff *skb) 238 { 239 struct crypto_aead *aead = x->data; 240 struct xfrm_offload *xo = xfrm_offload(skb); 241 242 if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead))) 243 return -EINVAL; 244 245 if (!(xo->flags & CRYPTO_DONE)) 246 skb->ip_summed = CHECKSUM_NONE; 247 248 return esp_input_done2(skb, 0); 249 } 250 251 static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_t features) 252 { 253 int err; 254 int alen; 255 int blksize; 256 struct xfrm_offload *xo; 257 struct ip_esp_hdr *esph; 258 struct crypto_aead *aead; 259 struct esp_info esp; 260 bool hw_offload = true; 261 __u32 seq; 262 263 esp.inplace = true; 264 265 xo = xfrm_offload(skb); 266 267 if (!xo) 268 return -EINVAL; 269 270 if ((!(features & NETIF_F_HW_ESP) && 271 !(skb->dev->gso_partial_features & NETIF_F_HW_ESP)) || 272 x->xso.dev != skb->dev) { 273 xo->flags |= CRYPTO_FALLBACK; 274 hw_offload = false; 275 } 276 277 esp.proto = xo->proto; 278 279 /* skb is pure payload to encrypt */ 280 281 aead = x->data; 282 alen = crypto_aead_authsize(aead); 283 284 esp.tfclen = 0; 285 /* XXX: Add support for tfc padding here. */ 286 287 blksize = ALIGN(crypto_aead_blocksize(aead), 4); 288 esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize); 289 esp.plen = esp.clen - skb->len - esp.tfclen; 290 esp.tailen = esp.tfclen + esp.plen + alen; 291 292 esp.esph = ip_esp_hdr(skb); 293 294 295 if (!hw_offload || !skb_is_gso(skb)) { 296 esp.nfrags = esp_output_head(x, skb, &esp); 297 if (esp.nfrags < 0) 298 return esp.nfrags; 299 } 300 301 seq = xo->seq.low; 302 303 esph = esp.esph; 304 esph->spi = x->id.spi; 305 306 skb_push(skb, -skb_network_offset(skb)); 307 308 if (xo->flags & XFRM_GSO_SEGMENT) { 309 esph->seq_no = htonl(seq); 310 311 if (!skb_is_gso(skb)) 312 xo->seq.low++; 313 else 314 xo->seq.low += skb_shinfo(skb)->gso_segs; 315 } 316 317 if (xo->seq.low < seq) 318 xo->seq.hi++; 319 320 esp.seqno = cpu_to_be64(seq + ((u64)xo->seq.hi << 32)); 321 322 ip_hdr(skb)->tot_len = htons(skb->len); 323 ip_send_check(ip_hdr(skb)); 324 325 if (hw_offload) { 326 if (!skb_ext_add(skb, SKB_EXT_SEC_PATH)) 327 return -ENOMEM; 328 329 xo = xfrm_offload(skb); 330 if (!xo) 331 return -EINVAL; 332 333 xo->flags |= XFRM_XMIT; 334 return 0; 335 } 336 337 err = esp_output_tail(x, skb, &esp); 338 if (err) 339 return err; 340 341 secpath_reset(skb); 342 343 return 0; 344 } 345 346 static const struct net_offload esp4_offload = { 347 .callbacks = { 348 .gro_receive = esp4_gro_receive, 349 .gso_segment = esp4_gso_segment, 350 }, 351 }; 352 353 static const struct xfrm_type_offload esp_type_offload = { 354 .owner = THIS_MODULE, 355 .proto = IPPROTO_ESP, 356 .input_tail = esp_input_tail, 357 .xmit = esp_xmit, 358 .encap = esp4_gso_encap, 359 }; 360 361 static int __init esp4_offload_init(void) 362 { 363 if (xfrm_register_type_offload(&esp_type_offload, AF_INET) < 0) { 364 pr_info("%s: can't add xfrm type offload\n", __func__); 365 return -EAGAIN; 366 } 367 368 return inet_add_offload(&esp4_offload, IPPROTO_ESP); 369 } 370 371 static void __exit esp4_offload_exit(void) 372 { 373 xfrm_unregister_type_offload(&esp_type_offload, AF_INET); 374 inet_del_offload(&esp4_offload, IPPROTO_ESP); 375 } 376 377 module_init(esp4_offload_init); 378 module_exit(esp4_offload_exit); 379 MODULE_LICENSE("GPL"); 380 MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>"); 381 MODULE_ALIAS_XFRM_OFFLOAD_TYPE(AF_INET, XFRM_PROTO_ESP); 382 MODULE_DESCRIPTION("IPV4 GSO/GRO offload support"); 383