1 /* 2 * IPV4 GSO/GRO offload support 3 * Linux INET implementation 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License 7 * as published by the Free Software Foundation; either version 8 * 2 of the License, or (at your option) any later version. 9 * 10 * GRE GSO support 11 */ 12 13 #include <linux/skbuff.h> 14 #include <linux/init.h> 15 #include <net/protocol.h> 16 #include <net/gre.h> 17 18 static struct sk_buff *gre_gso_segment(struct sk_buff *skb, 19 netdev_features_t features) 20 { 21 struct sk_buff *segs = ERR_PTR(-EINVAL); 22 netdev_features_t enc_features; 23 int ghl; 24 struct gre_base_hdr *greh; 25 u16 mac_offset = skb->mac_header; 26 int mac_len = skb->mac_len; 27 __be16 protocol = skb->protocol; 28 int tnl_hlen; 29 bool csum; 30 31 if (unlikely(skb_shinfo(skb)->gso_type & 32 ~(SKB_GSO_TCPV4 | 33 SKB_GSO_TCPV6 | 34 SKB_GSO_UDP | 35 SKB_GSO_DODGY | 36 SKB_GSO_TCP_ECN | 37 SKB_GSO_GRE | 38 SKB_GSO_GRE_CSUM | 39 SKB_GSO_IPIP | 40 SKB_GSO_SIT))) 41 goto out; 42 43 if (!skb->encapsulation) 44 goto out; 45 46 if (unlikely(!pskb_may_pull(skb, sizeof(*greh)))) 47 goto out; 48 49 greh = (struct gre_base_hdr *)skb_transport_header(skb); 50 51 ghl = skb_inner_mac_header(skb) - skb_transport_header(skb); 52 if (unlikely(ghl < sizeof(*greh))) 53 goto out; 54 55 csum = !!(greh->flags & GRE_CSUM); 56 if (csum) 57 skb->encap_hdr_csum = 1; 58 59 /* setup inner skb. */ 60 skb->protocol = greh->protocol; 61 skb->encapsulation = 0; 62 63 if (unlikely(!pskb_may_pull(skb, ghl))) 64 goto out; 65 66 __skb_pull(skb, ghl); 67 skb_reset_mac_header(skb); 68 skb_set_network_header(skb, skb_inner_network_offset(skb)); 69 skb->mac_len = skb_inner_network_offset(skb); 70 71 /* segment inner packet. */ 72 enc_features = skb->dev->hw_enc_features & features; 73 segs = skb_mac_gso_segment(skb, enc_features); 74 if (IS_ERR_OR_NULL(segs)) { 75 skb_gso_error_unwind(skb, protocol, ghl, mac_offset, mac_len); 76 goto out; 77 } 78 79 skb = segs; 80 tnl_hlen = skb_tnl_header_len(skb); 81 do { 82 __skb_push(skb, ghl); 83 if (csum) { 84 __be32 *pcsum; 85 86 if (skb_has_shared_frag(skb)) { 87 int err; 88 89 err = __skb_linearize(skb); 90 if (err) { 91 kfree_skb_list(segs); 92 segs = ERR_PTR(err); 93 goto out; 94 } 95 } 96 97 skb_reset_transport_header(skb); 98 99 greh = (struct gre_base_hdr *) 100 skb_transport_header(skb); 101 pcsum = (__be32 *)(greh + 1); 102 *pcsum = 0; 103 *(__sum16 *)pcsum = gso_make_checksum(skb, 0); 104 } 105 __skb_push(skb, tnl_hlen - ghl); 106 107 skb_reset_inner_headers(skb); 108 skb->encapsulation = 1; 109 110 skb_reset_mac_header(skb); 111 skb_set_network_header(skb, mac_len); 112 skb->mac_len = mac_len; 113 skb->protocol = protocol; 114 } while ((skb = skb->next)); 115 out: 116 return segs; 117 } 118 119 static struct sk_buff **gre_gro_receive(struct sk_buff **head, 120 struct sk_buff *skb) 121 { 122 struct sk_buff **pp = NULL; 123 struct sk_buff *p; 124 const struct gre_base_hdr *greh; 125 unsigned int hlen, grehlen; 126 unsigned int off; 127 int flush = 1; 128 struct packet_offload *ptype; 129 __be16 type; 130 131 off = skb_gro_offset(skb); 132 hlen = off + sizeof(*greh); 133 greh = skb_gro_header_fast(skb, off); 134 if (skb_gro_header_hard(skb, hlen)) { 135 greh = skb_gro_header_slow(skb, hlen, off); 136 if (unlikely(!greh)) 137 goto out; 138 } 139 140 /* Only support version 0 and K (key), C (csum) flags. Note that 141 * although the support for the S (seq#) flag can be added easily 142 * for GRO, this is problematic for GSO hence can not be enabled 143 * here because a GRO pkt may end up in the forwarding path, thus 144 * requiring GSO support to break it up correctly. 145 */ 146 if ((greh->flags & ~(GRE_KEY|GRE_CSUM)) != 0) 147 goto out; 148 149 type = greh->protocol; 150 151 rcu_read_lock(); 152 ptype = gro_find_receive_by_type(type); 153 if (!ptype) 154 goto out_unlock; 155 156 grehlen = GRE_HEADER_SECTION; 157 158 if (greh->flags & GRE_KEY) 159 grehlen += GRE_HEADER_SECTION; 160 161 if (greh->flags & GRE_CSUM) 162 grehlen += GRE_HEADER_SECTION; 163 164 hlen = off + grehlen; 165 if (skb_gro_header_hard(skb, hlen)) { 166 greh = skb_gro_header_slow(skb, hlen, off); 167 if (unlikely(!greh)) 168 goto out_unlock; 169 } 170 171 /* Don't bother verifying checksum if we're going to flush anyway. */ 172 if ((greh->flags & GRE_CSUM) && !NAPI_GRO_CB(skb)->flush) { 173 if (skb_gro_checksum_simple_validate(skb)) 174 goto out_unlock; 175 176 skb_gro_checksum_try_convert(skb, IPPROTO_GRE, 0, 177 null_compute_pseudo); 178 } 179 180 flush = 0; 181 182 for (p = *head; p; p = p->next) { 183 const struct gre_base_hdr *greh2; 184 185 if (!NAPI_GRO_CB(p)->same_flow) 186 continue; 187 188 /* The following checks are needed to ensure only pkts 189 * from the same tunnel are considered for aggregation. 190 * The criteria for "the same tunnel" includes: 191 * 1) same version (we only support version 0 here) 192 * 2) same protocol (we only support ETH_P_IP for now) 193 * 3) same set of flags 194 * 4) same key if the key field is present. 195 */ 196 greh2 = (struct gre_base_hdr *)(p->data + off); 197 198 if (greh2->flags != greh->flags || 199 greh2->protocol != greh->protocol) { 200 NAPI_GRO_CB(p)->same_flow = 0; 201 continue; 202 } 203 if (greh->flags & GRE_KEY) { 204 /* compare keys */ 205 if (*(__be32 *)(greh2+1) != *(__be32 *)(greh+1)) { 206 NAPI_GRO_CB(p)->same_flow = 0; 207 continue; 208 } 209 } 210 } 211 212 skb_gro_pull(skb, grehlen); 213 214 /* Adjusted NAPI_GRO_CB(skb)->csum after skb_gro_pull()*/ 215 skb_gro_postpull_rcsum(skb, greh, grehlen); 216 217 pp = ptype->callbacks.gro_receive(head, skb); 218 219 out_unlock: 220 rcu_read_unlock(); 221 out: 222 NAPI_GRO_CB(skb)->flush |= flush; 223 224 return pp; 225 } 226 227 static int gre_gro_complete(struct sk_buff *skb, int nhoff) 228 { 229 struct gre_base_hdr *greh = (struct gre_base_hdr *)(skb->data + nhoff); 230 struct packet_offload *ptype; 231 unsigned int grehlen = sizeof(*greh); 232 int err = -ENOENT; 233 __be16 type; 234 235 skb->encapsulation = 1; 236 skb_shinfo(skb)->gso_type = SKB_GSO_GRE; 237 238 type = greh->protocol; 239 if (greh->flags & GRE_KEY) 240 grehlen += GRE_HEADER_SECTION; 241 242 if (greh->flags & GRE_CSUM) 243 grehlen += GRE_HEADER_SECTION; 244 245 rcu_read_lock(); 246 ptype = gro_find_complete_by_type(type); 247 if (ptype) 248 err = ptype->callbacks.gro_complete(skb, nhoff + grehlen); 249 250 rcu_read_unlock(); 251 252 skb_set_inner_mac_header(skb, nhoff + grehlen); 253 254 return err; 255 } 256 257 static const struct net_offload gre_offload = { 258 .callbacks = { 259 .gso_segment = gre_gso_segment, 260 .gro_receive = gre_gro_receive, 261 .gro_complete = gre_gro_complete, 262 }, 263 }; 264 265 static int __init gre_offload_init(void) 266 { 267 return inet_add_offload(&gre_offload, IPPROTO_GRE); 268 } 269 device_initcall(gre_offload_init); 270