xref: /linux/include/net/gro.h (revision 63767a76318c4e2b8321b9eed728ba18020aaccf)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 
3 #ifndef _NET_IPV6_GRO_H
4 #define _NET_IPV6_GRO_H
5 
6 #include <linux/indirect_call_wrapper.h>
7 #include <linux/ip.h>
8 #include <linux/ipv6.h>
9 #include <net/ip6_checksum.h>
10 #include <linux/skbuff.h>
11 #include <net/udp.h>
12 
13 struct napi_gro_cb {
14 	union {
15 		struct {
16 			/* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */
17 			void	*frag0;
18 
19 			/* Length of frag0. */
20 			unsigned int frag0_len;
21 		};
22 
23 		struct {
24 			/* used in skb_gro_receive() slow path */
25 			struct sk_buff *last;
26 
27 			/* jiffies when first packet was created/queued */
28 			unsigned long age;
29 		};
30 	};
31 
32 	/* This indicates where we are processing relative to skb->data. */
33 	int	data_offset;
34 
35 	/* This is non-zero if the packet cannot be merged with the new skb. */
36 	u16	flush;
37 
38 	/* Save the IP ID here and check when we get to the transport layer */
39 	u16	flush_id;
40 
41 	/* Number of segments aggregated. */
42 	u16	count;
43 
44 	/* Used in ipv6_gro_receive() and foo-over-udp and esp-in-udp */
45 	u16	proto;
46 
47 /* Used in napi_gro_cb::free */
48 #define NAPI_GRO_FREE             1
49 #define NAPI_GRO_FREE_STOLEN_HEAD 2
50 	/* portion of the cb set to zero at every gro iteration */
51 	struct_group(zeroed,
52 
53 		/* Start offset for remote checksum offload */
54 		u16	gro_remcsum_start;
55 
56 		/* This is non-zero if the packet may be of the same flow. */
57 		u8	same_flow:1;
58 
59 		/* Used in tunnel GRO receive */
60 		u8	encap_mark:1;
61 
62 		/* GRO checksum is valid */
63 		u8	csum_valid:1;
64 
65 		/* Number of checksums via CHECKSUM_UNNECESSARY */
66 		u8	csum_cnt:3;
67 
68 		/* Free the skb? */
69 		u8	free:2;
70 
71 		/* Used in foo-over-udp, set in udp[46]_gro_receive */
72 		u8	is_ipv6:1;
73 
74 		/* Used in GRE, set in fou/gue_gro_receive */
75 		u8	is_fou:1;
76 
77 		/* Used to determine if flush_id can be ignored */
78 		u8	is_atomic:1;
79 
80 		/* Number of gro_receive callbacks this packet already went through */
81 		u8 recursion_counter:4;
82 
83 		/* GRO is done by frag_list pointer chaining. */
84 		u8	is_flist:1;
85 	);
86 
87 	/* used to support CHECKSUM_COMPLETE for tunneling protocols */
88 	__wsum	csum;
89 };
90 
91 #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
92 
93 #define GRO_RECURSION_LIMIT 15
94 static inline int gro_recursion_inc_test(struct sk_buff *skb)
95 {
96 	return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT;
97 }
98 
99 typedef struct sk_buff *(*gro_receive_t)(struct list_head *, struct sk_buff *);
100 static inline struct sk_buff *call_gro_receive(gro_receive_t cb,
101 					       struct list_head *head,
102 					       struct sk_buff *skb)
103 {
104 	if (unlikely(gro_recursion_inc_test(skb))) {
105 		NAPI_GRO_CB(skb)->flush |= 1;
106 		return NULL;
107 	}
108 
109 	return cb(head, skb);
110 }
111 
112 typedef struct sk_buff *(*gro_receive_sk_t)(struct sock *, struct list_head *,
113 					    struct sk_buff *);
114 static inline struct sk_buff *call_gro_receive_sk(gro_receive_sk_t cb,
115 						  struct sock *sk,
116 						  struct list_head *head,
117 						  struct sk_buff *skb)
118 {
119 	if (unlikely(gro_recursion_inc_test(skb))) {
120 		NAPI_GRO_CB(skb)->flush |= 1;
121 		return NULL;
122 	}
123 
124 	return cb(sk, head, skb);
125 }
126 
127 static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
128 {
129 	return NAPI_GRO_CB(skb)->data_offset;
130 }
131 
132 static inline unsigned int skb_gro_len(const struct sk_buff *skb)
133 {
134 	return skb->len - NAPI_GRO_CB(skb)->data_offset;
135 }
136 
137 static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
138 {
139 	NAPI_GRO_CB(skb)->data_offset += len;
140 }
141 
142 static inline void *skb_gro_header_fast(const struct sk_buff *skb,
143 					unsigned int offset)
144 {
145 	return NAPI_GRO_CB(skb)->frag0 + offset;
146 }
147 
148 static inline bool skb_gro_may_pull(const struct sk_buff *skb,
149 				    unsigned int hlen)
150 {
151 	return likely(hlen <= NAPI_GRO_CB(skb)->frag0_len);
152 }
153 
154 static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
155 					unsigned int offset)
156 {
157 	if (!pskb_may_pull(skb, hlen))
158 		return NULL;
159 
160 	return skb->data + offset;
161 }
162 
163 static inline void *skb_gro_header(struct sk_buff *skb, unsigned int hlen,
164 				   unsigned int offset)
165 {
166 	void *ptr;
167 
168 	ptr = skb_gro_header_fast(skb, offset);
169 	if (!skb_gro_may_pull(skb, hlen))
170 		ptr = skb_gro_header_slow(skb, hlen, offset);
171 	return ptr;
172 }
173 
174 static inline void *skb_gro_network_header(const struct sk_buff *skb)
175 {
176 	if (skb_gro_may_pull(skb, skb_gro_offset(skb)))
177 		return skb_gro_header_fast(skb, skb_network_offset(skb));
178 
179 	return skb_network_header(skb);
180 }
181 
182 static inline __wsum inet_gro_compute_pseudo(const struct sk_buff *skb,
183 					     int proto)
184 {
185 	const struct iphdr *iph = skb_gro_network_header(skb);
186 
187 	return csum_tcpudp_nofold(iph->saddr, iph->daddr,
188 				  skb_gro_len(skb), proto, 0);
189 }
190 
191 static inline void skb_gro_postpull_rcsum(struct sk_buff *skb,
192 					const void *start, unsigned int len)
193 {
194 	if (NAPI_GRO_CB(skb)->csum_valid)
195 		NAPI_GRO_CB(skb)->csum = wsum_negate(csum_partial(start, len,
196 						wsum_negate(NAPI_GRO_CB(skb)->csum)));
197 }
198 
199 /* GRO checksum functions. These are logical equivalents of the normal
200  * checksum functions (in skbuff.h) except that they operate on the GRO
201  * offsets and fields in sk_buff.
202  */
203 
204 __sum16 __skb_gro_checksum_complete(struct sk_buff *skb);
205 
206 static inline bool skb_at_gro_remcsum_start(struct sk_buff *skb)
207 {
208 	return (NAPI_GRO_CB(skb)->gro_remcsum_start == skb_gro_offset(skb));
209 }
210 
211 static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb,
212 						      bool zero_okay,
213 						      __sum16 check)
214 {
215 	return ((skb->ip_summed != CHECKSUM_PARTIAL ||
216 		skb_checksum_start_offset(skb) <
217 		 skb_gro_offset(skb)) &&
218 		!skb_at_gro_remcsum_start(skb) &&
219 		NAPI_GRO_CB(skb)->csum_cnt == 0 &&
220 		(!zero_okay || check));
221 }
222 
223 static inline __sum16 __skb_gro_checksum_validate_complete(struct sk_buff *skb,
224 							   __wsum psum)
225 {
226 	if (NAPI_GRO_CB(skb)->csum_valid &&
227 	    !csum_fold(csum_add(psum, NAPI_GRO_CB(skb)->csum)))
228 		return 0;
229 
230 	NAPI_GRO_CB(skb)->csum = psum;
231 
232 	return __skb_gro_checksum_complete(skb);
233 }
234 
235 static inline void skb_gro_incr_csum_unnecessary(struct sk_buff *skb)
236 {
237 	if (NAPI_GRO_CB(skb)->csum_cnt > 0) {
238 		/* Consume a checksum from CHECKSUM_UNNECESSARY */
239 		NAPI_GRO_CB(skb)->csum_cnt--;
240 	} else {
241 		/* Update skb for CHECKSUM_UNNECESSARY and csum_level when we
242 		 * verified a new top level checksum or an encapsulated one
243 		 * during GRO. This saves work if we fallback to normal path.
244 		 */
245 		__skb_incr_checksum_unnecessary(skb);
246 	}
247 }
248 
249 #define __skb_gro_checksum_validate(skb, proto, zero_okay, check,	\
250 				    compute_pseudo)			\
251 ({									\
252 	__sum16 __ret = 0;						\
253 	if (__skb_gro_checksum_validate_needed(skb, zero_okay, check))	\
254 		__ret = __skb_gro_checksum_validate_complete(skb,	\
255 				compute_pseudo(skb, proto));		\
256 	if (!__ret)							\
257 		skb_gro_incr_csum_unnecessary(skb);			\
258 	__ret;								\
259 })
260 
261 #define skb_gro_checksum_validate(skb, proto, compute_pseudo)		\
262 	__skb_gro_checksum_validate(skb, proto, false, 0, compute_pseudo)
263 
264 #define skb_gro_checksum_validate_zero_check(skb, proto, check,		\
265 					     compute_pseudo)		\
266 	__skb_gro_checksum_validate(skb, proto, true, check, compute_pseudo)
267 
268 #define skb_gro_checksum_simple_validate(skb)				\
269 	__skb_gro_checksum_validate(skb, 0, false, 0, null_compute_pseudo)
270 
271 static inline bool __skb_gro_checksum_convert_check(struct sk_buff *skb)
272 {
273 	return (NAPI_GRO_CB(skb)->csum_cnt == 0 &&
274 		!NAPI_GRO_CB(skb)->csum_valid);
275 }
276 
277 static inline void __skb_gro_checksum_convert(struct sk_buff *skb,
278 					      __wsum pseudo)
279 {
280 	NAPI_GRO_CB(skb)->csum = ~pseudo;
281 	NAPI_GRO_CB(skb)->csum_valid = 1;
282 }
283 
284 #define skb_gro_checksum_try_convert(skb, proto, compute_pseudo)	\
285 do {									\
286 	if (__skb_gro_checksum_convert_check(skb))			\
287 		__skb_gro_checksum_convert(skb, 			\
288 					   compute_pseudo(skb, proto));	\
289 } while (0)
290 
291 struct gro_remcsum {
292 	int offset;
293 	__wsum delta;
294 };
295 
296 static inline void skb_gro_remcsum_init(struct gro_remcsum *grc)
297 {
298 	grc->offset = 0;
299 	grc->delta = 0;
300 }
301 
302 static inline void *skb_gro_remcsum_process(struct sk_buff *skb, void *ptr,
303 					    unsigned int off, size_t hdrlen,
304 					    int start, int offset,
305 					    struct gro_remcsum *grc,
306 					    bool nopartial)
307 {
308 	__wsum delta;
309 	size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
310 
311 	BUG_ON(!NAPI_GRO_CB(skb)->csum_valid);
312 
313 	if (!nopartial) {
314 		NAPI_GRO_CB(skb)->gro_remcsum_start = off + hdrlen + start;
315 		return ptr;
316 	}
317 
318 	ptr = skb_gro_header(skb, off + plen, off);
319 	if (!ptr)
320 		return NULL;
321 
322 	delta = remcsum_adjust(ptr + hdrlen, NAPI_GRO_CB(skb)->csum,
323 			       start, offset);
324 
325 	/* Adjust skb->csum since we changed the packet */
326 	NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta);
327 
328 	grc->offset = off + hdrlen + offset;
329 	grc->delta = delta;
330 
331 	return ptr;
332 }
333 
334 static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb,
335 					   struct gro_remcsum *grc)
336 {
337 	void *ptr;
338 	size_t plen = grc->offset + sizeof(u16);
339 
340 	if (!grc->delta)
341 		return;
342 
343 	ptr = skb_gro_header(skb, plen, grc->offset);
344 	if (!ptr)
345 		return;
346 
347 	remcsum_unadjust((__sum16 *)ptr, grc->delta);
348 }
349 
350 #ifdef CONFIG_XFRM_OFFLOAD
351 static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
352 {
353 	if (PTR_ERR(pp) != -EINPROGRESS)
354 		NAPI_GRO_CB(skb)->flush |= flush;
355 }
356 static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
357 					       struct sk_buff *pp,
358 					       int flush,
359 					       struct gro_remcsum *grc)
360 {
361 	if (PTR_ERR(pp) != -EINPROGRESS) {
362 		NAPI_GRO_CB(skb)->flush |= flush;
363 		skb_gro_remcsum_cleanup(skb, grc);
364 		skb->remcsum_offload = 0;
365 	}
366 }
367 #else
368 static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
369 {
370 	NAPI_GRO_CB(skb)->flush |= flush;
371 }
372 static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
373 					       struct sk_buff *pp,
374 					       int flush,
375 					       struct gro_remcsum *grc)
376 {
377 	NAPI_GRO_CB(skb)->flush |= flush;
378 	skb_gro_remcsum_cleanup(skb, grc);
379 	skb->remcsum_offload = 0;
380 }
381 #endif
382 
383 INDIRECT_CALLABLE_DECLARE(struct sk_buff *ipv6_gro_receive(struct list_head *,
384 							   struct sk_buff *));
385 INDIRECT_CALLABLE_DECLARE(int ipv6_gro_complete(struct sk_buff *, int));
386 INDIRECT_CALLABLE_DECLARE(struct sk_buff *inet_gro_receive(struct list_head *,
387 							   struct sk_buff *));
388 INDIRECT_CALLABLE_DECLARE(int inet_gro_complete(struct sk_buff *, int));
389 
390 INDIRECT_CALLABLE_DECLARE(struct sk_buff *udp4_gro_receive(struct list_head *,
391 							   struct sk_buff *));
392 INDIRECT_CALLABLE_DECLARE(int udp4_gro_complete(struct sk_buff *, int));
393 
394 INDIRECT_CALLABLE_DECLARE(struct sk_buff *udp6_gro_receive(struct list_head *,
395 							   struct sk_buff *));
396 INDIRECT_CALLABLE_DECLARE(int udp6_gro_complete(struct sk_buff *, int));
397 
398 #define indirect_call_gro_receive_inet(cb, f2, f1, head, skb)	\
399 ({								\
400 	unlikely(gro_recursion_inc_test(skb)) ?			\
401 		NAPI_GRO_CB(skb)->flush |= 1, NULL :		\
402 		INDIRECT_CALL_INET(cb, f2, f1, head, skb);	\
403 })
404 
405 struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
406 				struct udphdr *uh, struct sock *sk);
407 int udp_gro_complete(struct sk_buff *skb, int nhoff, udp_lookup_t lookup);
408 
409 static inline struct udphdr *udp_gro_udphdr(struct sk_buff *skb)
410 {
411 	struct udphdr *uh;
412 	unsigned int hlen, off;
413 
414 	off  = skb_gro_offset(skb);
415 	hlen = off + sizeof(*uh);
416 	uh   = skb_gro_header(skb, hlen, off);
417 
418 	return uh;
419 }
420 
421 static inline __wsum ip6_gro_compute_pseudo(const struct sk_buff *skb,
422 					    int proto)
423 {
424 	const struct ipv6hdr *iph = skb_gro_network_header(skb);
425 
426 	return ~csum_unfold(csum_ipv6_magic(&iph->saddr, &iph->daddr,
427 					    skb_gro_len(skb), proto, 0));
428 }
429 
430 int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb);
431 
432 /* Pass the currently batched GRO_NORMAL SKBs up to the stack. */
433 static inline void gro_normal_list(struct napi_struct *napi)
434 {
435 	if (!napi->rx_count)
436 		return;
437 	netif_receive_skb_list_internal(&napi->rx_list);
438 	INIT_LIST_HEAD(&napi->rx_list);
439 	napi->rx_count = 0;
440 }
441 
442 /* Queue one GRO_NORMAL SKB up for list processing. If batch size exceeded,
443  * pass the whole batch up to the stack.
444  */
445 static inline void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb, int segs)
446 {
447 	list_add_tail(&skb->list, &napi->rx_list);
448 	napi->rx_count += segs;
449 	if (napi->rx_count >= READ_ONCE(gro_normal_batch))
450 		gro_normal_list(napi);
451 }
452 
453 /* This function is the alternative of 'inet_iif' and 'inet_sdif'
454  * functions in case we can not rely on fields of IPCB.
455  *
456  * The caller must verify skb_valid_dst(skb) is false and skb->dev is initialized.
457  * The caller must hold the RCU read lock.
458  */
459 static inline void inet_get_iif_sdif(const struct sk_buff *skb, int *iif, int *sdif)
460 {
461 	*iif = inet_iif(skb) ?: skb->dev->ifindex;
462 	*sdif = 0;
463 
464 #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
465 	if (netif_is_l3_slave(skb->dev)) {
466 		struct net_device *master = netdev_master_upper_dev_get_rcu(skb->dev);
467 
468 		*sdif = *iif;
469 		*iif = master ? master->ifindex : 0;
470 	}
471 #endif
472 }
473 
474 /* This function is the alternative of 'inet6_iif' and 'inet6_sdif'
475  * functions in case we can not rely on fields of IP6CB.
476  *
477  * The caller must verify skb_valid_dst(skb) is false and skb->dev is initialized.
478  * The caller must hold the RCU read lock.
479  */
480 static inline void inet6_get_iif_sdif(const struct sk_buff *skb, int *iif, int *sdif)
481 {
482 	/* using skb->dev->ifindex because skb_dst(skb) is not initialized */
483 	*iif = skb->dev->ifindex;
484 	*sdif = 0;
485 
486 #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
487 	if (netif_is_l3_slave(skb->dev)) {
488 		struct net_device *master = netdev_master_upper_dev_get_rcu(skb->dev);
489 
490 		*sdif = *iif;
491 		*iif = master ? master->ifindex : 0;
492 	}
493 #endif
494 }
495 
496 extern struct list_head offload_base;
497 
498 #endif /* _NET_IPV6_GRO_H */
499