1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * IPV4 GSO/GRO offload support 4 * Linux INET implementation 5 * 6 * GRE GSO support 7 */ 8 9 #include <linux/skbuff.h> 10 #include <linux/init.h> 11 #include <net/protocol.h> 12 #include <net/gre.h> 13 14 static struct sk_buff *gre_gso_segment(struct sk_buff *skb, 15 netdev_features_t features) 16 { 17 int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb); 18 bool need_csum, offload_csum, gso_partial, need_ipsec; 19 struct sk_buff *segs = ERR_PTR(-EINVAL); 20 u16 mac_offset = skb->mac_header; 21 __be16 protocol = skb->protocol; 22 u16 mac_len = skb->mac_len; 23 int gre_offset, outer_hlen; 24 25 if (!skb->encapsulation) 26 goto out; 27 28 if (unlikely(tnl_hlen < sizeof(struct gre_base_hdr))) 29 goto out; 30 31 if (unlikely(!pskb_may_pull(skb, tnl_hlen))) 32 goto out; 33 34 /* setup inner skb. */ 35 skb->encapsulation = 0; 36 SKB_GSO_CB(skb)->encap_level = 0; 37 __skb_pull(skb, tnl_hlen); 38 skb_reset_mac_header(skb); 39 skb_set_network_header(skb, skb_inner_network_offset(skb)); 40 skb->mac_len = skb_inner_network_offset(skb); 41 skb->protocol = skb->inner_protocol; 42 43 need_csum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_GRE_CSUM); 44 skb->encap_hdr_csum = need_csum; 45 46 features &= skb->dev->hw_enc_features; 47 if (need_csum) 48 features &= ~NETIF_F_SCTP_CRC; 49 50 need_ipsec = skb_dst(skb) && dst_xfrm(skb_dst(skb)); 51 /* Try to offload checksum if possible */ 52 offload_csum = !!(need_csum && !need_ipsec && 53 (skb->dev->features & NETIF_F_HW_CSUM)); 54 55 /* segment inner packet. */ 56 segs = skb_mac_gso_segment(skb, features); 57 if (IS_ERR_OR_NULL(segs)) { 58 skb_gso_error_unwind(skb, protocol, tnl_hlen, mac_offset, 59 mac_len); 60 goto out; 61 } 62 63 gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL); 64 65 outer_hlen = skb_tnl_header_len(skb); 66 gre_offset = outer_hlen - tnl_hlen; 67 skb = segs; 68 do { 69 struct gre_base_hdr *greh; 70 __sum16 *pcsum; 71 72 /* Set up inner headers if we are offloading inner checksum */ 73 if (skb->ip_summed == CHECKSUM_PARTIAL) { 74 skb_reset_inner_headers(skb); 75 skb->encapsulation = 1; 76 } 77 78 skb->mac_len = mac_len; 79 skb->protocol = protocol; 80 81 __skb_push(skb, outer_hlen); 82 skb_reset_mac_header(skb); 83 skb_set_network_header(skb, mac_len); 84 skb_set_transport_header(skb, gre_offset); 85 86 if (!need_csum) 87 continue; 88 89 greh = (struct gre_base_hdr *)skb_transport_header(skb); 90 pcsum = (__sum16 *)(greh + 1); 91 92 if (gso_partial && skb_is_gso(skb)) { 93 unsigned int partial_adj; 94 95 /* Adjust checksum to account for the fact that 96 * the partial checksum is based on actual size 97 * whereas headers should be based on MSS size. 98 */ 99 partial_adj = skb->len + skb_headroom(skb) - 100 SKB_GSO_CB(skb)->data_offset - 101 skb_shinfo(skb)->gso_size; 102 *pcsum = ~csum_fold((__force __wsum)htonl(partial_adj)); 103 } else { 104 *pcsum = 0; 105 } 106 107 *(pcsum + 1) = 0; 108 if (skb->encapsulation || !offload_csum) { 109 *pcsum = gso_make_checksum(skb, 0); 110 } else { 111 skb->ip_summed = CHECKSUM_PARTIAL; 112 skb->csum_start = skb_transport_header(skb) - skb->head; 113 skb->csum_offset = sizeof(*greh); 114 } 115 } while ((skb = skb->next)); 116 out: 117 return segs; 118 } 119 120 static struct sk_buff *gre_gro_receive(struct list_head *head, 121 struct sk_buff *skb) 122 { 123 struct sk_buff *pp = NULL; 124 struct sk_buff *p; 125 const struct gre_base_hdr *greh; 126 unsigned int hlen, grehlen; 127 unsigned int off; 128 int flush = 1; 129 struct packet_offload *ptype; 130 __be16 type; 131 132 if (NAPI_GRO_CB(skb)->encap_mark) 133 goto out; 134 135 NAPI_GRO_CB(skb)->encap_mark = 1; 136 137 off = skb_gro_offset(skb); 138 hlen = off + sizeof(*greh); 139 greh = skb_gro_header_fast(skb, off); 140 if (skb_gro_header_hard(skb, hlen)) { 141 greh = skb_gro_header_slow(skb, hlen, off); 142 if (unlikely(!greh)) 143 goto out; 144 } 145 146 /* Only support version 0 and K (key), C (csum) flags. Note that 147 * although the support for the S (seq#) flag can be added easily 148 * for GRO, this is problematic for GSO hence can not be enabled 149 * here because a GRO pkt may end up in the forwarding path, thus 150 * requiring GSO support to break it up correctly. 151 */ 152 if ((greh->flags & ~(GRE_KEY|GRE_CSUM)) != 0) 153 goto out; 154 155 /* We can only support GRE_CSUM if we can track the location of 156 * the GRE header. In the case of FOU/GUE we cannot because the 157 * outer UDP header displaces the GRE header leaving us in a state 158 * of limbo. 159 */ 160 if ((greh->flags & GRE_CSUM) && NAPI_GRO_CB(skb)->is_fou) 161 goto out; 162 163 type = greh->protocol; 164 165 rcu_read_lock(); 166 ptype = gro_find_receive_by_type(type); 167 if (!ptype) 168 goto out_unlock; 169 170 grehlen = GRE_HEADER_SECTION; 171 172 if (greh->flags & GRE_KEY) 173 grehlen += GRE_HEADER_SECTION; 174 175 if (greh->flags & GRE_CSUM) 176 grehlen += GRE_HEADER_SECTION; 177 178 hlen = off + grehlen; 179 if (skb_gro_header_hard(skb, hlen)) { 180 greh = skb_gro_header_slow(skb, hlen, off); 181 if (unlikely(!greh)) 182 goto out_unlock; 183 } 184 185 /* Don't bother verifying checksum if we're going to flush anyway. */ 186 if ((greh->flags & GRE_CSUM) && !NAPI_GRO_CB(skb)->flush) { 187 if (skb_gro_checksum_simple_validate(skb)) 188 goto out_unlock; 189 190 skb_gro_checksum_try_convert(skb, IPPROTO_GRE, 191 null_compute_pseudo); 192 } 193 194 list_for_each_entry(p, head, list) { 195 const struct gre_base_hdr *greh2; 196 197 if (!NAPI_GRO_CB(p)->same_flow) 198 continue; 199 200 /* The following checks are needed to ensure only pkts 201 * from the same tunnel are considered for aggregation. 202 * The criteria for "the same tunnel" includes: 203 * 1) same version (we only support version 0 here) 204 * 2) same protocol (we only support ETH_P_IP for now) 205 * 3) same set of flags 206 * 4) same key if the key field is present. 207 */ 208 greh2 = (struct gre_base_hdr *)(p->data + off); 209 210 if (greh2->flags != greh->flags || 211 greh2->protocol != greh->protocol) { 212 NAPI_GRO_CB(p)->same_flow = 0; 213 continue; 214 } 215 if (greh->flags & GRE_KEY) { 216 /* compare keys */ 217 if (*(__be32 *)(greh2+1) != *(__be32 *)(greh+1)) { 218 NAPI_GRO_CB(p)->same_flow = 0; 219 continue; 220 } 221 } 222 } 223 224 skb_gro_pull(skb, grehlen); 225 226 /* Adjusted NAPI_GRO_CB(skb)->csum after skb_gro_pull()*/ 227 skb_gro_postpull_rcsum(skb, greh, grehlen); 228 229 pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb); 230 flush = 0; 231 232 out_unlock: 233 rcu_read_unlock(); 234 out: 235 skb_gro_flush_final(skb, pp, flush); 236 237 return pp; 238 } 239 240 static int gre_gro_complete(struct sk_buff *skb, int nhoff) 241 { 242 struct gre_base_hdr *greh = (struct gre_base_hdr *)(skb->data + nhoff); 243 struct packet_offload *ptype; 244 unsigned int grehlen = sizeof(*greh); 245 int err = -ENOENT; 246 __be16 type; 247 248 skb->encapsulation = 1; 249 skb_shinfo(skb)->gso_type = SKB_GSO_GRE; 250 251 type = greh->protocol; 252 if (greh->flags & GRE_KEY) 253 grehlen += GRE_HEADER_SECTION; 254 255 if (greh->flags & GRE_CSUM) 256 grehlen += GRE_HEADER_SECTION; 257 258 rcu_read_lock(); 259 ptype = gro_find_complete_by_type(type); 260 if (ptype) 261 err = ptype->callbacks.gro_complete(skb, nhoff + grehlen); 262 263 rcu_read_unlock(); 264 265 skb_set_inner_mac_header(skb, nhoff + grehlen); 266 267 return err; 268 } 269 270 static const struct net_offload gre_offload = { 271 .callbacks = { 272 .gso_segment = gre_gso_segment, 273 .gro_receive = gre_gro_receive, 274 .gro_complete = gre_gro_complete, 275 }, 276 }; 277 278 static int __init gre_offload_init(void) 279 { 280 int err; 281 282 err = inet_add_offload(&gre_offload, IPPROTO_GRE); 283 #if IS_ENABLED(CONFIG_IPV6) 284 if (err) 285 return err; 286 287 err = inet6_add_offload(&gre_offload, IPPROTO_GRE); 288 if (err) 289 inet_del_offload(&gre_offload, IPPROTO_GRE); 290 #endif 291 292 return err; 293 } 294 device_initcall(gre_offload_init); 295