xref: /linux/net/core/gro.c (revision 1a9239bb4253f9076b5b4b2a1a4e8d7defd77a95)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 #include <net/gro.h>
3 #include <net/dst_metadata.h>
4 #include <net/busy_poll.h>
5 #include <trace/events/net.h>
6 #include <linux/skbuff_ref.h>
7 
8 #define MAX_GRO_SKBS 8
9 
10 static DEFINE_SPINLOCK(offload_lock);
11 
12 /**
13  *	dev_add_offload - register offload handlers
14  *	@po: protocol offload declaration
15  *
16  *	Add protocol offload handlers to the networking stack. The passed
17  *	&proto_offload is linked into kernel lists and may not be freed until
18  *	it has been removed from the kernel lists.
19  *
20  *	This call does not sleep therefore it can not
21  *	guarantee all CPU's that are in middle of receiving packets
22  *	will see the new offload handlers (until the next received packet).
23  */
dev_add_offload(struct packet_offload * po)24 void dev_add_offload(struct packet_offload *po)
25 {
26 	struct packet_offload *elem;
27 
28 	spin_lock(&offload_lock);
29 	list_for_each_entry(elem, &net_hotdata.offload_base, list) {
30 		if (po->priority < elem->priority)
31 			break;
32 	}
33 	list_add_rcu(&po->list, elem->list.prev);
34 	spin_unlock(&offload_lock);
35 }
36 EXPORT_SYMBOL(dev_add_offload);
37 
38 /**
39  *	__dev_remove_offload	 - remove offload handler
40  *	@po: packet offload declaration
41  *
42  *	Remove a protocol offload handler that was previously added to the
43  *	kernel offload handlers by dev_add_offload(). The passed &offload_type
44  *	is removed from the kernel lists and can be freed or reused once this
45  *	function returns.
46  *
47  *      The packet type might still be in use by receivers
48  *	and must not be freed until after all the CPU's have gone
49  *	through a quiescent state.
50  */
__dev_remove_offload(struct packet_offload * po)51 static void __dev_remove_offload(struct packet_offload *po)
52 {
53 	struct list_head *head = &net_hotdata.offload_base;
54 	struct packet_offload *po1;
55 
56 	spin_lock(&offload_lock);
57 
58 	list_for_each_entry(po1, head, list) {
59 		if (po == po1) {
60 			list_del_rcu(&po->list);
61 			goto out;
62 		}
63 	}
64 
65 	pr_warn("dev_remove_offload: %p not found\n", po);
66 out:
67 	spin_unlock(&offload_lock);
68 }
69 
70 /**
71  *	dev_remove_offload	 - remove packet offload handler
72  *	@po: packet offload declaration
73  *
74  *	Remove a packet offload handler that was previously added to the kernel
75  *	offload handlers by dev_add_offload(). The passed &offload_type is
76  *	removed from the kernel lists and can be freed or reused once this
77  *	function returns.
78  *
79  *	This call sleeps to guarantee that no CPU is looking at the packet
80  *	type after return.
81  */
dev_remove_offload(struct packet_offload * po)82 void dev_remove_offload(struct packet_offload *po)
83 {
84 	__dev_remove_offload(po);
85 
86 	synchronize_net();
87 }
88 EXPORT_SYMBOL(dev_remove_offload);
89 
90 
skb_gro_receive(struct sk_buff * p,struct sk_buff * skb)91 int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
92 {
93 	struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb);
94 	unsigned int offset = skb_gro_offset(skb);
95 	unsigned int headlen = skb_headlen(skb);
96 	unsigned int len = skb_gro_len(skb);
97 	unsigned int delta_truesize;
98 	unsigned int new_truesize;
99 	struct sk_buff *lp;
100 	int segs;
101 
102 	/* Do not splice page pool based packets w/ non-page pool
103 	 * packets. This can result in reference count issues as page
104 	 * pool pages will not decrement the reference count and will
105 	 * instead be immediately returned to the pool or have frag
106 	 * count decremented.
107 	 */
108 	if (p->pp_recycle != skb->pp_recycle)
109 		return -ETOOMANYREFS;
110 
111 	if (unlikely(p->len + len >= netif_get_gro_max_size(p->dev, p) ||
112 		     NAPI_GRO_CB(skb)->flush))
113 		return -E2BIG;
114 
115 	if (unlikely(p->len + len >= GRO_LEGACY_MAX_SIZE)) {
116 		if (NAPI_GRO_CB(skb)->proto != IPPROTO_TCP ||
117 		    (p->protocol == htons(ETH_P_IPV6) &&
118 		     skb_headroom(p) < sizeof(struct hop_jumbo_hdr)) ||
119 		    p->encapsulation)
120 			return -E2BIG;
121 	}
122 
123 	segs = NAPI_GRO_CB(skb)->count;
124 	lp = NAPI_GRO_CB(p)->last;
125 	pinfo = skb_shinfo(lp);
126 
127 	if (headlen <= offset) {
128 		skb_frag_t *frag;
129 		skb_frag_t *frag2;
130 		int i = skbinfo->nr_frags;
131 		int nr_frags = pinfo->nr_frags + i;
132 
133 		if (nr_frags > MAX_SKB_FRAGS)
134 			goto merge;
135 
136 		offset -= headlen;
137 		pinfo->nr_frags = nr_frags;
138 		skbinfo->nr_frags = 0;
139 
140 		frag = pinfo->frags + nr_frags;
141 		frag2 = skbinfo->frags + i;
142 		do {
143 			*--frag = *--frag2;
144 		} while (--i);
145 
146 		skb_frag_off_add(frag, offset);
147 		skb_frag_size_sub(frag, offset);
148 
149 		/* all fragments truesize : remove (head size + sk_buff) */
150 		new_truesize = SKB_TRUESIZE(skb_end_offset(skb));
151 		delta_truesize = skb->truesize - new_truesize;
152 
153 		skb->truesize = new_truesize;
154 		skb->len -= skb->data_len;
155 		skb->data_len = 0;
156 
157 		NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE;
158 		goto done;
159 	} else if (skb->head_frag) {
160 		int nr_frags = pinfo->nr_frags;
161 		skb_frag_t *frag = pinfo->frags + nr_frags;
162 		struct page *page = virt_to_head_page(skb->head);
163 		unsigned int first_size = headlen - offset;
164 		unsigned int first_offset;
165 
166 		if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS)
167 			goto merge;
168 
169 		first_offset = skb->data -
170 			       (unsigned char *)page_address(page) +
171 			       offset;
172 
173 		pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags;
174 
175 		skb_frag_fill_page_desc(frag, page, first_offset, first_size);
176 
177 		memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags);
178 		/* We dont need to clear skbinfo->nr_frags here */
179 
180 		new_truesize = SKB_DATA_ALIGN(sizeof(struct sk_buff));
181 		delta_truesize = skb->truesize - new_truesize;
182 		skb->truesize = new_truesize;
183 		NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD;
184 		goto done;
185 	}
186 
187 merge:
188 	/* sk ownership - if any - completely transferred to the aggregated packet */
189 	skb->destructor = NULL;
190 	skb->sk = NULL;
191 	delta_truesize = skb->truesize;
192 	if (offset > headlen) {
193 		unsigned int eat = offset - headlen;
194 
195 		skb_frag_off_add(&skbinfo->frags[0], eat);
196 		skb_frag_size_sub(&skbinfo->frags[0], eat);
197 		skb->data_len -= eat;
198 		skb->len -= eat;
199 		offset = headlen;
200 	}
201 
202 	__skb_pull(skb, offset);
203 
204 	if (NAPI_GRO_CB(p)->last == p)
205 		skb_shinfo(p)->frag_list = skb;
206 	else
207 		NAPI_GRO_CB(p)->last->next = skb;
208 	NAPI_GRO_CB(p)->last = skb;
209 	__skb_header_release(skb);
210 	lp = p;
211 
212 done:
213 	NAPI_GRO_CB(p)->count += segs;
214 	p->data_len += len;
215 	p->truesize += delta_truesize;
216 	p->len += len;
217 	if (lp != p) {
218 		lp->data_len += len;
219 		lp->truesize += delta_truesize;
220 		lp->len += len;
221 	}
222 	NAPI_GRO_CB(skb)->same_flow = 1;
223 	return 0;
224 }
225 
skb_gro_receive_list(struct sk_buff * p,struct sk_buff * skb)226 int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb)
227 {
228 	if (unlikely(p->len + skb->len >= 65536))
229 		return -E2BIG;
230 
231 	if (NAPI_GRO_CB(p)->last == p)
232 		skb_shinfo(p)->frag_list = skb;
233 	else
234 		NAPI_GRO_CB(p)->last->next = skb;
235 
236 	skb_pull(skb, skb_gro_offset(skb));
237 
238 	NAPI_GRO_CB(p)->last = skb;
239 	NAPI_GRO_CB(p)->count++;
240 	p->data_len += skb->len;
241 
242 	/* sk ownership - if any - completely transferred to the aggregated packet */
243 	skb->destructor = NULL;
244 	skb->sk = NULL;
245 	p->truesize += skb->truesize;
246 	p->len += skb->len;
247 
248 	NAPI_GRO_CB(skb)->same_flow = 1;
249 
250 	return 0;
251 }
252 
gro_complete(struct gro_node * gro,struct sk_buff * skb)253 static void gro_complete(struct gro_node *gro, struct sk_buff *skb)
254 {
255 	struct list_head *head = &net_hotdata.offload_base;
256 	struct packet_offload *ptype;
257 	__be16 type = skb->protocol;
258 	int err = -ENOENT;
259 
260 	BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
261 
262 	if (NAPI_GRO_CB(skb)->count == 1) {
263 		skb_shinfo(skb)->gso_size = 0;
264 		goto out;
265 	}
266 
267 	rcu_read_lock();
268 	list_for_each_entry_rcu(ptype, head, list) {
269 		if (ptype->type != type || !ptype->callbacks.gro_complete)
270 			continue;
271 
272 		err = INDIRECT_CALL_INET(ptype->callbacks.gro_complete,
273 					 ipv6_gro_complete, inet_gro_complete,
274 					 skb, 0);
275 		break;
276 	}
277 	rcu_read_unlock();
278 
279 	if (err) {
280 		WARN_ON(&ptype->list == head);
281 		kfree_skb(skb);
282 		return;
283 	}
284 
285 out:
286 	gro_normal_one(gro, skb, NAPI_GRO_CB(skb)->count);
287 }
288 
__gro_flush_chain(struct gro_node * gro,u32 index,bool flush_old)289 static void __gro_flush_chain(struct gro_node *gro, u32 index, bool flush_old)
290 {
291 	struct list_head *head = &gro->hash[index].list;
292 	struct sk_buff *skb, *p;
293 
294 	list_for_each_entry_safe_reverse(skb, p, head, list) {
295 		if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
296 			return;
297 		skb_list_del_init(skb);
298 		gro_complete(gro, skb);
299 		gro->hash[index].count--;
300 	}
301 
302 	if (!gro->hash[index].count)
303 		__clear_bit(index, &gro->bitmask);
304 }
305 
306 /*
307  * gro->hash[].list contains packets ordered by age.
308  * youngest packets at the head of it.
309  * Complete skbs in reverse order to reduce latencies.
310  */
__gro_flush(struct gro_node * gro,bool flush_old)311 void __gro_flush(struct gro_node *gro, bool flush_old)
312 {
313 	unsigned long bitmask = gro->bitmask;
314 	unsigned int i, base = ~0U;
315 
316 	while ((i = ffs(bitmask)) != 0) {
317 		bitmask >>= i;
318 		base += i;
319 		__gro_flush_chain(gro, base, flush_old);
320 	}
321 }
322 EXPORT_SYMBOL(__gro_flush);
323 
gro_list_prepare_tc_ext(const struct sk_buff * skb,const struct sk_buff * p,unsigned long diffs)324 static unsigned long gro_list_prepare_tc_ext(const struct sk_buff *skb,
325 					     const struct sk_buff *p,
326 					     unsigned long diffs)
327 {
328 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
329 	struct tc_skb_ext *skb_ext;
330 	struct tc_skb_ext *p_ext;
331 
332 	skb_ext = skb_ext_find(skb, TC_SKB_EXT);
333 	p_ext = skb_ext_find(p, TC_SKB_EXT);
334 
335 	diffs |= (!!p_ext) ^ (!!skb_ext);
336 	if (!diffs && unlikely(skb_ext))
337 		diffs |= p_ext->chain ^ skb_ext->chain;
338 #endif
339 	return diffs;
340 }
341 
gro_list_prepare(const struct list_head * head,const struct sk_buff * skb)342 static void gro_list_prepare(const struct list_head *head,
343 			     const struct sk_buff *skb)
344 {
345 	unsigned int maclen = skb->dev->hard_header_len;
346 	u32 hash = skb_get_hash_raw(skb);
347 	struct sk_buff *p;
348 
349 	list_for_each_entry(p, head, list) {
350 		unsigned long diffs;
351 
352 		if (hash != skb_get_hash_raw(p)) {
353 			NAPI_GRO_CB(p)->same_flow = 0;
354 			continue;
355 		}
356 
357 		diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
358 		diffs |= p->vlan_all ^ skb->vlan_all;
359 		diffs |= skb_metadata_differs(p, skb);
360 		if (maclen == ETH_HLEN)
361 			diffs |= compare_ether_header(skb_mac_header(p),
362 						      skb_mac_header(skb));
363 		else if (!diffs)
364 			diffs = memcmp(skb_mac_header(p),
365 				       skb_mac_header(skb),
366 				       maclen);
367 
368 		/* in most common scenarios 'slow_gro' is 0
369 		 * otherwise we are already on some slower paths
370 		 * either skip all the infrequent tests altogether or
371 		 * avoid trying too hard to skip each of them individually
372 		 */
373 		if (!diffs && unlikely(skb->slow_gro | p->slow_gro)) {
374 			diffs |= p->sk != skb->sk;
375 			diffs |= skb_metadata_dst_cmp(p, skb);
376 			diffs |= skb_get_nfct(p) ^ skb_get_nfct(skb);
377 
378 			diffs |= gro_list_prepare_tc_ext(skb, p, diffs);
379 		}
380 
381 		NAPI_GRO_CB(p)->same_flow = !diffs;
382 	}
383 }
384 
skb_gro_reset_offset(struct sk_buff * skb,u32 nhoff)385 static inline void skb_gro_reset_offset(struct sk_buff *skb, u32 nhoff)
386 {
387 	const struct skb_shared_info *pinfo;
388 	const skb_frag_t *frag0;
389 	unsigned int headlen;
390 
391 	NAPI_GRO_CB(skb)->network_offset = 0;
392 	NAPI_GRO_CB(skb)->data_offset = 0;
393 	headlen = skb_headlen(skb);
394 	NAPI_GRO_CB(skb)->frag0 = skb->data;
395 	NAPI_GRO_CB(skb)->frag0_len = headlen;
396 	if (headlen)
397 		return;
398 
399 	pinfo = skb_shinfo(skb);
400 	frag0 = &pinfo->frags[0];
401 
402 	if (pinfo->nr_frags && skb_frag_page(frag0) &&
403 	    !PageHighMem(skb_frag_page(frag0)) &&
404 	    (!NET_IP_ALIGN || !((skb_frag_off(frag0) + nhoff) & 3))) {
405 		NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
406 		NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
407 						    skb_frag_size(frag0),
408 						    skb->end - skb->tail);
409 	}
410 }
411 
gro_pull_from_frag0(struct sk_buff * skb,int grow)412 static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
413 {
414 	struct skb_shared_info *pinfo = skb_shinfo(skb);
415 
416 	BUG_ON(skb->end - skb->tail < grow);
417 
418 	memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
419 
420 	skb->data_len -= grow;
421 	skb->tail += grow;
422 
423 	skb_frag_off_add(&pinfo->frags[0], grow);
424 	skb_frag_size_sub(&pinfo->frags[0], grow);
425 
426 	if (unlikely(!skb_frag_size(&pinfo->frags[0]))) {
427 		skb_frag_unref(skb, 0);
428 		memmove(pinfo->frags, pinfo->frags + 1,
429 			--pinfo->nr_frags * sizeof(pinfo->frags[0]));
430 	}
431 }
432 
gro_try_pull_from_frag0(struct sk_buff * skb)433 static void gro_try_pull_from_frag0(struct sk_buff *skb)
434 {
435 	int grow = skb_gro_offset(skb) - skb_headlen(skb);
436 
437 	if (grow > 0)
438 		gro_pull_from_frag0(skb, grow);
439 }
440 
gro_flush_oldest(struct gro_node * gro,struct list_head * head)441 static void gro_flush_oldest(struct gro_node *gro, struct list_head *head)
442 {
443 	struct sk_buff *oldest;
444 
445 	oldest = list_last_entry(head, struct sk_buff, list);
446 
447 	/* We are called with head length >= MAX_GRO_SKBS, so this is
448 	 * impossible.
449 	 */
450 	if (WARN_ON_ONCE(!oldest))
451 		return;
452 
453 	/* Do not adjust napi->gro_hash[].count, caller is adding a new
454 	 * SKB to the chain.
455 	 */
456 	skb_list_del_init(oldest);
457 	gro_complete(gro, oldest);
458 }
459 
dev_gro_receive(struct gro_node * gro,struct sk_buff * skb)460 static enum gro_result dev_gro_receive(struct gro_node *gro,
461 				       struct sk_buff *skb)
462 {
463 	u32 bucket = skb_get_hash_raw(skb) & (GRO_HASH_BUCKETS - 1);
464 	struct list_head *head = &net_hotdata.offload_base;
465 	struct gro_list *gro_list = &gro->hash[bucket];
466 	struct packet_offload *ptype;
467 	__be16 type = skb->protocol;
468 	struct sk_buff *pp = NULL;
469 	enum gro_result ret;
470 	int same_flow;
471 
472 	if (netif_elide_gro(skb->dev))
473 		goto normal;
474 
475 	gro_list_prepare(&gro_list->list, skb);
476 
477 	rcu_read_lock();
478 	list_for_each_entry_rcu(ptype, head, list) {
479 		if (ptype->type == type && ptype->callbacks.gro_receive)
480 			goto found_ptype;
481 	}
482 	rcu_read_unlock();
483 	goto normal;
484 
485 found_ptype:
486 	skb_set_network_header(skb, skb_gro_offset(skb));
487 	skb_reset_mac_len(skb);
488 	BUILD_BUG_ON(sizeof_field(struct napi_gro_cb, zeroed) != sizeof(u32));
489 	BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct napi_gro_cb, zeroed),
490 					sizeof(u32))); /* Avoid slow unaligned acc */
491 	*(u32 *)&NAPI_GRO_CB(skb)->zeroed = 0;
492 	NAPI_GRO_CB(skb)->flush = skb_has_frag_list(skb);
493 	NAPI_GRO_CB(skb)->count = 1;
494 	if (unlikely(skb_is_gso(skb))) {
495 		NAPI_GRO_CB(skb)->count = skb_shinfo(skb)->gso_segs;
496 		/* Only support TCP and non DODGY users. */
497 		if (!skb_is_gso_tcp(skb) ||
498 		    (skb_shinfo(skb)->gso_type & SKB_GSO_DODGY))
499 			NAPI_GRO_CB(skb)->flush = 1;
500 	}
501 
502 	/* Setup for GRO checksum validation */
503 	switch (skb->ip_summed) {
504 	case CHECKSUM_COMPLETE:
505 		NAPI_GRO_CB(skb)->csum = skb->csum;
506 		NAPI_GRO_CB(skb)->csum_valid = 1;
507 		break;
508 	case CHECKSUM_UNNECESSARY:
509 		NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
510 		break;
511 	}
512 
513 	pp = INDIRECT_CALL_INET(ptype->callbacks.gro_receive,
514 				ipv6_gro_receive, inet_gro_receive,
515 				&gro_list->list, skb);
516 
517 	rcu_read_unlock();
518 
519 	if (PTR_ERR(pp) == -EINPROGRESS) {
520 		ret = GRO_CONSUMED;
521 		goto ok;
522 	}
523 
524 	same_flow = NAPI_GRO_CB(skb)->same_flow;
525 	ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
526 
527 	if (pp) {
528 		skb_list_del_init(pp);
529 		gro_complete(gro, pp);
530 		gro_list->count--;
531 	}
532 
533 	if (same_flow)
534 		goto ok;
535 
536 	if (NAPI_GRO_CB(skb)->flush)
537 		goto normal;
538 
539 	if (unlikely(gro_list->count >= MAX_GRO_SKBS))
540 		gro_flush_oldest(gro, &gro_list->list);
541 	else
542 		gro_list->count++;
543 
544 	/* Must be called before setting NAPI_GRO_CB(skb)->{age|last} */
545 	gro_try_pull_from_frag0(skb);
546 	NAPI_GRO_CB(skb)->age = jiffies;
547 	NAPI_GRO_CB(skb)->last = skb;
548 	if (!skb_is_gso(skb))
549 		skb_shinfo(skb)->gso_size = skb_gro_len(skb);
550 	list_add(&skb->list, &gro_list->list);
551 	ret = GRO_HELD;
552 ok:
553 	if (gro_list->count) {
554 		if (!test_bit(bucket, &gro->bitmask))
555 			__set_bit(bucket, &gro->bitmask);
556 	} else if (test_bit(bucket, &gro->bitmask)) {
557 		__clear_bit(bucket, &gro->bitmask);
558 	}
559 
560 	return ret;
561 
562 normal:
563 	ret = GRO_NORMAL;
564 	gro_try_pull_from_frag0(skb);
565 	goto ok;
566 }
567 
gro_find_receive_by_type(__be16 type)568 struct packet_offload *gro_find_receive_by_type(__be16 type)
569 {
570 	struct list_head *offload_head = &net_hotdata.offload_base;
571 	struct packet_offload *ptype;
572 
573 	list_for_each_entry_rcu(ptype, offload_head, list) {
574 		if (ptype->type != type || !ptype->callbacks.gro_receive)
575 			continue;
576 		return ptype;
577 	}
578 	return NULL;
579 }
580 EXPORT_SYMBOL(gro_find_receive_by_type);
581 
gro_find_complete_by_type(__be16 type)582 struct packet_offload *gro_find_complete_by_type(__be16 type)
583 {
584 	struct list_head *offload_head = &net_hotdata.offload_base;
585 	struct packet_offload *ptype;
586 
587 	list_for_each_entry_rcu(ptype, offload_head, list) {
588 		if (ptype->type != type || !ptype->callbacks.gro_complete)
589 			continue;
590 		return ptype;
591 	}
592 	return NULL;
593 }
594 EXPORT_SYMBOL(gro_find_complete_by_type);
595 
gro_skb_finish(struct gro_node * gro,struct sk_buff * skb,gro_result_t ret)596 static gro_result_t gro_skb_finish(struct gro_node *gro, struct sk_buff *skb,
597 				   gro_result_t ret)
598 {
599 	switch (ret) {
600 	case GRO_NORMAL:
601 		gro_normal_one(gro, skb, 1);
602 		break;
603 
604 	case GRO_MERGED_FREE:
605 		if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
606 			napi_skb_free_stolen_head(skb);
607 		else if (skb->fclone != SKB_FCLONE_UNAVAILABLE)
608 			__kfree_skb(skb);
609 		else
610 			__napi_kfree_skb(skb, SKB_CONSUMED);
611 		break;
612 
613 	case GRO_HELD:
614 	case GRO_MERGED:
615 	case GRO_CONSUMED:
616 		break;
617 	}
618 
619 	return ret;
620 }
621 
gro_receive_skb(struct gro_node * gro,struct sk_buff * skb)622 gro_result_t gro_receive_skb(struct gro_node *gro, struct sk_buff *skb)
623 {
624 	gro_result_t ret;
625 
626 	__skb_mark_napi_id(skb, gro);
627 	trace_napi_gro_receive_entry(skb);
628 
629 	skb_gro_reset_offset(skb, 0);
630 
631 	ret = gro_skb_finish(gro, skb, dev_gro_receive(gro, skb));
632 	trace_napi_gro_receive_exit(ret);
633 
634 	return ret;
635 }
636 EXPORT_SYMBOL(gro_receive_skb);
637 
napi_reuse_skb(struct napi_struct * napi,struct sk_buff * skb)638 static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
639 {
640 	if (unlikely(skb->pfmemalloc)) {
641 		consume_skb(skb);
642 		return;
643 	}
644 	__skb_pull(skb, skb_headlen(skb));
645 	/* restore the reserve we had after netdev_alloc_skb_ip_align() */
646 	skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
647 	__vlan_hwaccel_clear_tag(skb);
648 	skb->dev = napi->dev;
649 	skb->skb_iif = 0;
650 
651 	/* eth_type_trans() assumes pkt_type is PACKET_HOST */
652 	skb->pkt_type = PACKET_HOST;
653 
654 	skb->encapsulation = 0;
655 	skb->ip_summed = CHECKSUM_NONE;
656 	skb_shinfo(skb)->gso_type = 0;
657 	skb_shinfo(skb)->gso_size = 0;
658 	if (unlikely(skb->slow_gro)) {
659 		skb_orphan(skb);
660 		skb_ext_reset(skb);
661 		nf_reset_ct(skb);
662 		skb->slow_gro = 0;
663 	}
664 
665 	napi->skb = skb;
666 }
667 
napi_get_frags(struct napi_struct * napi)668 struct sk_buff *napi_get_frags(struct napi_struct *napi)
669 {
670 	struct sk_buff *skb = napi->skb;
671 
672 	if (!skb) {
673 		skb = napi_alloc_skb(napi, GRO_MAX_HEAD);
674 		if (skb) {
675 			napi->skb = skb;
676 			skb_mark_napi_id(skb, napi);
677 		}
678 	}
679 	return skb;
680 }
681 EXPORT_SYMBOL(napi_get_frags);
682 
napi_frags_finish(struct napi_struct * napi,struct sk_buff * skb,gro_result_t ret)683 static gro_result_t napi_frags_finish(struct napi_struct *napi,
684 				      struct sk_buff *skb,
685 				      gro_result_t ret)
686 {
687 	switch (ret) {
688 	case GRO_NORMAL:
689 	case GRO_HELD:
690 		__skb_push(skb, ETH_HLEN);
691 		skb->protocol = eth_type_trans(skb, skb->dev);
692 		if (ret == GRO_NORMAL)
693 			gro_normal_one(&napi->gro, skb, 1);
694 		break;
695 
696 	case GRO_MERGED_FREE:
697 		if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
698 			napi_skb_free_stolen_head(skb);
699 		else
700 			napi_reuse_skb(napi, skb);
701 		break;
702 
703 	case GRO_MERGED:
704 	case GRO_CONSUMED:
705 		break;
706 	}
707 
708 	return ret;
709 }
710 
711 /* Upper GRO stack assumes network header starts at gro_offset=0
712  * Drivers could call both napi_gro_frags() and napi_gro_receive()
713  * We copy ethernet header into skb->data to have a common layout.
714  */
napi_frags_skb(struct napi_struct * napi)715 static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
716 {
717 	struct sk_buff *skb = napi->skb;
718 	const struct ethhdr *eth;
719 	unsigned int hlen = sizeof(*eth);
720 
721 	napi->skb = NULL;
722 
723 	skb_reset_mac_header(skb);
724 	skb_gro_reset_offset(skb, hlen);
725 
726 	if (unlikely(!skb_gro_may_pull(skb, hlen))) {
727 		eth = skb_gro_header_slow(skb, hlen, 0);
728 		if (unlikely(!eth)) {
729 			net_warn_ratelimited("%s: dropping impossible skb from %s\n",
730 					     __func__, napi->dev->name);
731 			napi_reuse_skb(napi, skb);
732 			return NULL;
733 		}
734 	} else {
735 		eth = (const struct ethhdr *)skb->data;
736 
737 		if (NAPI_GRO_CB(skb)->frag0 != skb->data)
738 			gro_pull_from_frag0(skb, hlen);
739 
740 		NAPI_GRO_CB(skb)->frag0 += hlen;
741 		NAPI_GRO_CB(skb)->frag0_len -= hlen;
742 	}
743 	__skb_pull(skb, hlen);
744 
745 	/*
746 	 * This works because the only protocols we care about don't require
747 	 * special handling.
748 	 * We'll fix it up properly in napi_frags_finish()
749 	 */
750 	skb->protocol = eth->h_proto;
751 
752 	return skb;
753 }
754 
napi_gro_frags(struct napi_struct * napi)755 gro_result_t napi_gro_frags(struct napi_struct *napi)
756 {
757 	gro_result_t ret;
758 	struct sk_buff *skb = napi_frags_skb(napi);
759 
760 	trace_napi_gro_frags_entry(skb);
761 
762 	ret = napi_frags_finish(napi, skb, dev_gro_receive(&napi->gro, skb));
763 	trace_napi_gro_frags_exit(ret);
764 
765 	return ret;
766 }
767 EXPORT_SYMBOL(napi_gro_frags);
768 
769 /* Compute the checksum from gro_offset and return the folded value
770  * after adding in any pseudo checksum.
771  */
__skb_gro_checksum_complete(struct sk_buff * skb)772 __sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
773 {
774 	__wsum wsum;
775 	__sum16 sum;
776 
777 	wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0);
778 
779 	/* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
780 	sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum));
781 	/* See comments in __skb_checksum_complete(). */
782 	if (likely(!sum)) {
783 		if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
784 		    !skb->csum_complete_sw)
785 			netdev_rx_csum_fault(skb->dev, skb);
786 	}
787 
788 	NAPI_GRO_CB(skb)->csum = wsum;
789 	NAPI_GRO_CB(skb)->csum_valid = 1;
790 
791 	return sum;
792 }
793 EXPORT_SYMBOL(__skb_gro_checksum_complete);
794 
gro_init(struct gro_node * gro)795 void gro_init(struct gro_node *gro)
796 {
797 	for (u32 i = 0; i < GRO_HASH_BUCKETS; i++) {
798 		INIT_LIST_HEAD(&gro->hash[i].list);
799 		gro->hash[i].count = 0;
800 	}
801 
802 	gro->bitmask = 0;
803 	gro->cached_napi_id = 0;
804 
805 	INIT_LIST_HEAD(&gro->rx_list);
806 	gro->rx_count = 0;
807 }
808 
gro_cleanup(struct gro_node * gro)809 void gro_cleanup(struct gro_node *gro)
810 {
811 	struct sk_buff *skb, *n;
812 
813 	for (u32 i = 0; i < GRO_HASH_BUCKETS; i++) {
814 		list_for_each_entry_safe(skb, n, &gro->hash[i].list, list)
815 			kfree_skb(skb);
816 
817 		gro->hash[i].count = 0;
818 	}
819 
820 	gro->bitmask = 0;
821 	gro->cached_napi_id = 0;
822 
823 	list_for_each_entry_safe(skb, n, &gro->rx_list, list)
824 		kfree_skb(skb);
825 
826 	gro->rx_count = 0;
827 }
828