1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 3 #ifndef _NET_GSO_H 4 #define _NET_GSO_H 5 6 #include <linux/skbuff.h> 7 8 /* Keeps track of mac header offset relative to skb->head. 9 * It is useful for TSO of Tunneling protocol. e.g. GRE. 10 * For non-tunnel skb it points to skb_mac_header() and for 11 * tunnel skb it points to outer mac header. 12 * Keeps track of level of encapsulation of network headers. 13 */ 14 struct skb_gso_cb { 15 union { 16 int mac_offset; 17 int data_offset; 18 }; 19 int encap_level; 20 __wsum csum; 21 __u16 csum_start; 22 }; 23 #define SKB_GSO_CB_OFFSET 32 24 #define SKB_GSO_CB(skb) ((struct skb_gso_cb *)((skb)->cb + SKB_GSO_CB_OFFSET)) 25 26 static inline int skb_tnl_header_len(const struct sk_buff *inner_skb) 27 { 28 return (skb_mac_header(inner_skb) - inner_skb->head) - 29 SKB_GSO_CB(inner_skb)->mac_offset; 30 } 31 32 static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra) 33 { 34 int new_headroom, headroom; 35 int ret; 36 37 headroom = skb_headroom(skb); 38 ret = pskb_expand_head(skb, extra, 0, GFP_ATOMIC); 39 if (ret) 40 return ret; 41 42 new_headroom = skb_headroom(skb); 43 SKB_GSO_CB(skb)->mac_offset += (new_headroom - headroom); 44 return 0; 45 } 46 47 static inline void gso_reset_checksum(struct sk_buff *skb, __wsum res) 48 { 49 /* Do not update partial checksums if remote checksum is enabled. */ 50 if (skb->remcsum_offload) 51 return; 52 53 SKB_GSO_CB(skb)->csum = res; 54 SKB_GSO_CB(skb)->csum_start = skb_checksum_start(skb) - skb->head; 55 } 56 57 /* Compute the checksum for a gso segment. First compute the checksum value 58 * from the start of transport header to SKB_GSO_CB(skb)->csum_start, and 59 * then add in skb->csum (checksum from csum_start to end of packet). 60 * skb->csum and csum_start are then updated to reflect the checksum of the 61 * resultant packet starting from the transport header-- the resultant checksum 62 * is in the res argument (i.e. normally zero or ~ of checksum of a pseudo 63 * header. 64 */ 65 static inline __sum16 gso_make_checksum(struct sk_buff *skb, __wsum res) 66 { 67 unsigned char *csum_start = skb_transport_header(skb); 68 int plen = (skb->head + SKB_GSO_CB(skb)->csum_start) - csum_start; 69 __wsum partial = SKB_GSO_CB(skb)->csum; 70 71 SKB_GSO_CB(skb)->csum = res; 72 SKB_GSO_CB(skb)->csum_start = csum_start - skb->head; 73 74 return csum_fold(csum_partial(csum_start, plen, partial)); 75 } 76 77 struct sk_buff *__skb_gso_segment(struct sk_buff *skb, 78 netdev_features_t features, bool tx_path); 79 80 static inline struct sk_buff *skb_gso_segment(struct sk_buff *skb, 81 netdev_features_t features) 82 { 83 return __skb_gso_segment(skb, features, true); 84 } 85 86 struct sk_buff *skb_eth_gso_segment(struct sk_buff *skb, 87 netdev_features_t features, __be16 type); 88 89 struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb, 90 netdev_features_t features); 91 92 bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu); 93 94 bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len); 95 96 static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol, 97 int pulled_hlen, u16 mac_offset, 98 int mac_len) 99 { 100 skb->protocol = protocol; 101 skb->encapsulation = 1; 102 skb_push(skb, pulled_hlen); 103 skb_reset_transport_header(skb); 104 skb->mac_header = mac_offset; 105 skb->network_header = skb->mac_header + mac_len; 106 skb->mac_len = mac_len; 107 } 108 109 #endif /* _NET_GSO_H */ 110