Lines Matching full:skb

20 			/* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */
36 /* This indicates where we are processing relative to skb->data. */
39 /* This is non-zero if the packet cannot be merged with the new skb. */
71 /* Free the skb? */
100 #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
103 static inline int gro_recursion_inc_test(struct sk_buff *skb)
105 return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT;
111 struct sk_buff *skb)
113 if (unlikely(gro_recursion_inc_test(skb))) {
114 NAPI_GRO_CB(skb)->flush |= 1;
118 return cb(head, skb);
126 struct sk_buff *skb)
128 if (unlikely(gro_recursion_inc_test(skb))) {
129 NAPI_GRO_CB(skb)->flush |= 1;
133 return cb(sk, head, skb);
136 static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
138 return NAPI_GRO_CB(skb)->data_offset;
141 static inline unsigned int skb_gro_len(const struct sk_buff *skb)
143 return skb->len - NAPI_GRO_CB(skb)->data_offset;
146 static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
148 NAPI_GRO_CB(skb)->data_offset += len;
151 static inline void *skb_gro_header_fast(const struct sk_buff *skb,
154 return NAPI_GRO_CB(skb)->frag0 + offset;
157 static inline bool skb_gro_may_pull(const struct sk_buff *skb,
160 return likely(hlen <= NAPI_GRO_CB(skb)->frag0_len);
163 static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
166 if (!pskb_may_pull(skb, hlen))
169 return skb->data + offset;
172 static inline void *skb_gro_header(struct sk_buff *skb, unsigned int hlen,
177 ptr = skb_gro_header_fast(skb, offset);
178 if (!skb_gro_may_pull(skb, hlen))
179 ptr = skb_gro_header_slow(skb, hlen, offset);
183 static inline int skb_gro_receive_network_offset(const struct sk_buff *skb)
185 return NAPI_GRO_CB(skb)->network_offsets[NAPI_GRO_CB(skb)->encap_mark];
188 static inline void *skb_gro_network_header(const struct sk_buff *skb)
190 if (skb_gro_may_pull(skb, skb_gro_offset(skb)))
191 return skb_gro_header_fast(skb, skb_gro_receive_network_offset(skb));
193 return skb->data + skb_gro_receive_network_offset(skb);
196 static inline __wsum inet_gro_compute_pseudo(const struct sk_buff *skb,
199 const struct iphdr *iph = skb_gro_network_header(skb);
202 skb_gro_len(skb), proto, 0);
205 static inline void skb_gro_postpull_rcsum(struct sk_buff *skb,
208 if (NAPI_GRO_CB(skb)->csum_valid)
209 NAPI_GRO_CB(skb)->csum = wsum_negate(csum_partial(start, len,
210 wsum_negate(NAPI_GRO_CB(skb)->csum)));
218 __sum16 __skb_gro_checksum_complete(struct sk_buff *skb);
220 static inline bool skb_at_gro_remcsum_start(struct sk_buff *skb)
222 return (NAPI_GRO_CB(skb)->gro_remcsum_start == skb_gro_offset(skb));
225 static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb,
229 return ((skb->ip_summed != CHECKSUM_PARTIAL ||
230 skb_checksum_start_offset(skb) <
231 skb_gro_offset(skb)) &&
232 !skb_at_gro_remcsum_start(skb) &&
233 NAPI_GRO_CB(skb)->csum_cnt == 0 &&
237 static inline __sum16 __skb_gro_checksum_validate_complete(struct sk_buff *skb,
240 if (NAPI_GRO_CB(skb)->csum_valid &&
241 !csum_fold(csum_add(psum, NAPI_GRO_CB(skb)->csum)))
244 NAPI_GRO_CB(skb)->csum = psum;
246 return __skb_gro_checksum_complete(skb);
249 static inline void skb_gro_incr_csum_unnecessary(struct sk_buff *skb)
251 if (NAPI_GRO_CB(skb)->csum_cnt > 0) {
253 NAPI_GRO_CB(skb)->csum_cnt--;
255 /* Update skb for CHECKSUM_UNNECESSARY and csum_level when we
259 __skb_incr_checksum_unnecessary(skb);
263 #define __skb_gro_checksum_validate(skb, proto, zero_okay, check, \
267 if (__skb_gro_checksum_validate_needed(skb, zero_okay, check)) \
268 __ret = __skb_gro_checksum_validate_complete(skb, \
269 compute_pseudo(skb, proto)); \
271 skb_gro_incr_csum_unnecessary(skb); \
275 #define skb_gro_checksum_validate(skb, proto, compute_pseudo) \
276 __skb_gro_checksum_validate(skb, proto, false, 0, compute_pseudo)
278 #define skb_gro_checksum_validate_zero_check(skb, proto, check, \
280 __skb_gro_checksum_validate(skb, proto, true, check, compute_pseudo)
282 #define skb_gro_checksum_simple_validate(skb) \
283 __skb_gro_checksum_validate(skb, 0, false, 0, null_compute_pseudo)
285 static inline bool __skb_gro_checksum_convert_check(struct sk_buff *skb)
287 return (NAPI_GRO_CB(skb)->csum_cnt == 0 &&
288 !NAPI_GRO_CB(skb)->csum_valid);
291 static inline void __skb_gro_checksum_convert(struct sk_buff *skb,
294 NAPI_GRO_CB(skb)->csum = ~pseudo;
295 NAPI_GRO_CB(skb)->csum_valid = 1;
298 #define skb_gro_checksum_try_convert(skb, proto, compute_pseudo) \
300 if (__skb_gro_checksum_convert_check(skb)) \
301 __skb_gro_checksum_convert(skb, \
302 compute_pseudo(skb, proto)); \
316 static inline void *skb_gro_remcsum_process(struct sk_buff *skb, void *ptr,
325 BUG_ON(!NAPI_GRO_CB(skb)->csum_valid);
328 NAPI_GRO_CB(skb)->gro_remcsum_start = off + hdrlen + start;
332 ptr = skb_gro_header(skb, off + plen, off);
336 delta = remcsum_adjust(ptr + hdrlen, NAPI_GRO_CB(skb)->csum,
339 /* Adjust skb->csum since we changed the packet */
340 NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta);
348 static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb,
357 ptr = skb_gro_header(skb, plen, grc->offset);
365 static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
368 NAPI_GRO_CB(skb)->flush |= flush;
370 static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
376 NAPI_GRO_CB(skb)->flush |= flush;
377 skb_gro_remcsum_cleanup(skb, grc);
378 skb->remcsum_offload = 0;
382 static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
384 NAPI_GRO_CB(skb)->flush |= flush;
386 static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
391 NAPI_GRO_CB(skb)->flush |= flush;
392 skb_gro_remcsum_cleanup(skb, grc);
393 skb->remcsum_offload = 0;
412 #define indirect_call_gro_receive_inet(cb, f2, f1, head, skb) \
414 unlikely(gro_recursion_inc_test(skb)) ? \
415 NAPI_GRO_CB(skb)->flush |= 1, NULL : \
416 INDIRECT_CALL_INET(cb, f2, f1, head, skb); \
419 struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
421 int udp_gro_complete(struct sk_buff *skb, int nhoff, udp_lookup_t lookup);
423 static inline struct udphdr *udp_gro_udphdr(struct sk_buff *skb)
428 off = skb_gro_offset(skb);
430 uh = skb_gro_header(skb, hlen, off);
435 static inline __wsum ip6_gro_compute_pseudo(const struct sk_buff *skb,
438 const struct ipv6hdr *iph = skb_gro_network_header(skb);
441 skb_gro_len(skb), proto, 0));
502 int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb);
503 int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb);
535 /* Queue one GRO_NORMAL SKB up for list processing. If batch size exceeded,
538 static inline void gro_normal_one(struct gro_node *gro, struct sk_buff *skb,
541 list_add_tail(&skb->list, &gro->rx_list);
553 * The caller must verify skb_valid_dst(skb) is false and skb->dev is initialized.
556 static inline void inet_get_iif_sdif(const struct sk_buff *skb, int *iif, int *sdif)
558 *iif = inet_iif(skb) ?: skb->dev->ifindex;
562 if (netif_is_l3_slave(skb->dev)) {
563 struct net_device *master = netdev_master_upper_dev_get_rcu(skb->dev);
574 * The caller must verify skb_valid_dst(skb) is false and skb->dev is initialized.
577 static inline void inet6_get_iif_sdif(const struct sk_buff *skb, int *iif, int *sdif)
579 /* using skb->dev->ifindex because skb_dst(skb) is not initialized */
580 *iif = skb->dev->ifindex;
584 if (netif_is_l3_slave(skb->dev)) {
585 struct net_device *master = netdev_master_upper_dev_get_rcu(skb->dev);