1 /* 2 * IPV4 GSO/GRO offload support 3 * Linux INET implementation 4 * 5 * Copyright (C) 2016 secunet Security Networks AG 6 * Author: Steffen Klassert <steffen.klassert@secunet.com> 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms and conditions of the GNU General Public License, 10 * version 2, as published by the Free Software Foundation. 11 * 12 * ESP GRO support 13 */ 14 15 #include <linux/skbuff.h> 16 #include <linux/init.h> 17 #include <net/protocol.h> 18 #include <crypto/aead.h> 19 #include <crypto/authenc.h> 20 #include <linux/err.h> 21 #include <linux/module.h> 22 #include <net/ip.h> 23 #include <net/xfrm.h> 24 #include <net/esp.h> 25 #include <linux/scatterlist.h> 26 #include <linux/kernel.h> 27 #include <linux/slab.h> 28 #include <linux/spinlock.h> 29 #include <net/udp.h> 30 31 static struct sk_buff **esp4_gro_receive(struct sk_buff **head, 32 struct sk_buff *skb) 33 { 34 int offset = skb_gro_offset(skb); 35 struct xfrm_offload *xo; 36 struct xfrm_state *x; 37 __be32 seq; 38 __be32 spi; 39 int err; 40 41 skb_pull(skb, offset); 42 43 if ((err = xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq)) != 0) 44 goto out; 45 46 xo = xfrm_offload(skb); 47 if (!xo || !(xo->flags & CRYPTO_DONE)) { 48 err = secpath_set(skb); 49 if (err) 50 goto out; 51 52 if (skb->sp->len == XFRM_MAX_DEPTH) 53 goto out; 54 55 x = xfrm_state_lookup(dev_net(skb->dev), skb->mark, 56 (xfrm_address_t *)&ip_hdr(skb)->daddr, 57 spi, IPPROTO_ESP, AF_INET); 58 if (!x) 59 goto out; 60 61 skb->sp->xvec[skb->sp->len++] = x; 62 skb->sp->olen++; 63 64 xo = xfrm_offload(skb); 65 if (!xo) { 66 xfrm_state_put(x); 67 goto out; 68 } 69 } 70 71 xo->flags |= XFRM_GRO; 72 73 XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL; 74 XFRM_SPI_SKB_CB(skb)->family = AF_INET; 75 XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr); 76 XFRM_SPI_SKB_CB(skb)->seq = seq; 77 78 /* We don't need to handle errors from xfrm_input, it does all 79 * the error handling and frees the resources on error. */ 80 xfrm_input(skb, IPPROTO_ESP, spi, -2); 81 82 return ERR_PTR(-EINPROGRESS); 83 out: 84 skb_push(skb, offset); 85 NAPI_GRO_CB(skb)->same_flow = 0; 86 NAPI_GRO_CB(skb)->flush = 1; 87 88 return NULL; 89 } 90 91 static void esp4_gso_encap(struct xfrm_state *x, struct sk_buff *skb) 92 { 93 struct ip_esp_hdr *esph; 94 struct iphdr *iph = ip_hdr(skb); 95 struct xfrm_offload *xo = xfrm_offload(skb); 96 int proto = iph->protocol; 97 98 skb_push(skb, -skb_network_offset(skb)); 99 esph = ip_esp_hdr(skb); 100 *skb_mac_header(skb) = IPPROTO_ESP; 101 102 esph->spi = x->id.spi; 103 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low); 104 105 xo->proto = proto; 106 } 107 108 static struct sk_buff *esp4_gso_segment(struct sk_buff *skb, 109 netdev_features_t features) 110 { 111 struct xfrm_state *x; 112 struct ip_esp_hdr *esph; 113 struct crypto_aead *aead; 114 netdev_features_t esp_features = features; 115 struct xfrm_offload *xo = xfrm_offload(skb); 116 117 if (!xo) 118 return ERR_PTR(-EINVAL); 119 120 x = skb->sp->xvec[skb->sp->len - 1]; 121 aead = x->data; 122 esph = ip_esp_hdr(skb); 123 124 if (esph->spi != x->id.spi) 125 return ERR_PTR(-EINVAL); 126 127 if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead))) 128 return ERR_PTR(-EINVAL); 129 130 __skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)); 131 132 skb->encap_hdr_csum = 1; 133 134 if (!(features & NETIF_F_HW_ESP) || !x->xso.offload_handle || 135 (x->xso.dev != skb->dev)) 136 esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK); 137 138 xo->flags |= XFRM_GSO_SEGMENT; 139 140 return x->outer_mode->gso_segment(x, skb, esp_features); 141 } 142 143 static int esp_input_tail(struct xfrm_state *x, struct sk_buff *skb) 144 { 145 struct crypto_aead *aead = x->data; 146 struct xfrm_offload *xo = xfrm_offload(skb); 147 148 if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead))) 149 return -EINVAL; 150 151 if (!(xo->flags & CRYPTO_DONE)) 152 skb->ip_summed = CHECKSUM_NONE; 153 154 return esp_input_done2(skb, 0); 155 } 156 157 static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_t features) 158 { 159 int err; 160 int alen; 161 int blksize; 162 struct xfrm_offload *xo; 163 struct ip_esp_hdr *esph; 164 struct crypto_aead *aead; 165 struct esp_info esp; 166 bool hw_offload = true; 167 __u32 seq; 168 169 esp.inplace = true; 170 171 xo = xfrm_offload(skb); 172 173 if (!xo) 174 return -EINVAL; 175 176 if (!(features & NETIF_F_HW_ESP) || !x->xso.offload_handle || 177 (x->xso.dev != skb->dev)) { 178 xo->flags |= CRYPTO_FALLBACK; 179 hw_offload = false; 180 } 181 182 esp.proto = xo->proto; 183 184 /* skb is pure payload to encrypt */ 185 186 aead = x->data; 187 alen = crypto_aead_authsize(aead); 188 189 esp.tfclen = 0; 190 /* XXX: Add support for tfc padding here. */ 191 192 blksize = ALIGN(crypto_aead_blocksize(aead), 4); 193 esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize); 194 esp.plen = esp.clen - skb->len - esp.tfclen; 195 esp.tailen = esp.tfclen + esp.plen + alen; 196 197 esp.esph = ip_esp_hdr(skb); 198 199 200 if (!hw_offload || (hw_offload && !skb_is_gso(skb))) { 201 esp.nfrags = esp_output_head(x, skb, &esp); 202 if (esp.nfrags < 0) 203 return esp.nfrags; 204 } 205 206 seq = xo->seq.low; 207 208 esph = esp.esph; 209 esph->spi = x->id.spi; 210 211 skb_push(skb, -skb_network_offset(skb)); 212 213 if (xo->flags & XFRM_GSO_SEGMENT) { 214 esph->seq_no = htonl(seq); 215 216 if (!skb_is_gso(skb)) 217 xo->seq.low++; 218 else 219 xo->seq.low += skb_shinfo(skb)->gso_segs; 220 } 221 222 esp.seqno = cpu_to_be64(seq + ((u64)xo->seq.hi << 32)); 223 224 ip_hdr(skb)->tot_len = htons(skb->len); 225 ip_send_check(ip_hdr(skb)); 226 227 if (hw_offload) 228 return 0; 229 230 err = esp_output_tail(x, skb, &esp); 231 if (err) 232 return err; 233 234 secpath_reset(skb); 235 236 return 0; 237 } 238 239 static const struct net_offload esp4_offload = { 240 .callbacks = { 241 .gro_receive = esp4_gro_receive, 242 .gso_segment = esp4_gso_segment, 243 }, 244 }; 245 246 static const struct xfrm_type_offload esp_type_offload = { 247 .description = "ESP4 OFFLOAD", 248 .owner = THIS_MODULE, 249 .proto = IPPROTO_ESP, 250 .input_tail = esp_input_tail, 251 .xmit = esp_xmit, 252 .encap = esp4_gso_encap, 253 }; 254 255 static int __init esp4_offload_init(void) 256 { 257 if (xfrm_register_type_offload(&esp_type_offload, AF_INET) < 0) { 258 pr_info("%s: can't add xfrm type offload\n", __func__); 259 return -EAGAIN; 260 } 261 262 return inet_add_offload(&esp4_offload, IPPROTO_ESP); 263 } 264 265 static void __exit esp4_offload_exit(void) 266 { 267 if (xfrm_unregister_type_offload(&esp_type_offload, AF_INET) < 0) 268 pr_info("%s: can't remove xfrm type offload\n", __func__); 269 270 inet_del_offload(&esp4_offload, IPPROTO_ESP); 271 } 272 273 module_init(esp4_offload_init); 274 module_exit(esp4_offload_exit); 275 MODULE_LICENSE("GPL"); 276 MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>"); 277 MODULE_ALIAS_XFRM_OFFLOAD_TYPE(AF_INET, XFRM_PROTO_ESP); 278