xref: /linux/net/core/gro.c (revision 6a4aee277740d04ac0fd54cfa17cc28261932ddc)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 #include <net/gro.h>
3 #include <net/dst_metadata.h>
4 #include <net/busy_poll.h>
5 #include <trace/events/net.h>
6 
7 #define MAX_GRO_SKBS 8
8 
9 /* This should be increased if a protocol with a bigger head is added. */
10 #define GRO_MAX_HEAD (MAX_HEADER + 128)
11 
12 static DEFINE_SPINLOCK(offload_lock);
13 
14 /**
15  *	dev_add_offload - register offload handlers
16  *	@po: protocol offload declaration
17  *
18  *	Add protocol offload handlers to the networking stack. The passed
19  *	&proto_offload is linked into kernel lists and may not be freed until
20  *	it has been removed from the kernel lists.
21  *
22  *	This call does not sleep therefore it can not
23  *	guarantee all CPU's that are in middle of receiving packets
24  *	will see the new offload handlers (until the next received packet).
25  */
26 void dev_add_offload(struct packet_offload *po)
27 {
28 	struct packet_offload *elem;
29 
30 	spin_lock(&offload_lock);
31 	list_for_each_entry(elem, &net_hotdata.offload_base, list) {
32 		if (po->priority < elem->priority)
33 			break;
34 	}
35 	list_add_rcu(&po->list, elem->list.prev);
36 	spin_unlock(&offload_lock);
37 }
38 EXPORT_SYMBOL(dev_add_offload);
39 
40 /**
41  *	__dev_remove_offload	 - remove offload handler
42  *	@po: packet offload declaration
43  *
44  *	Remove a protocol offload handler that was previously added to the
45  *	kernel offload handlers by dev_add_offload(). The passed &offload_type
46  *	is removed from the kernel lists and can be freed or reused once this
47  *	function returns.
48  *
49  *      The packet type might still be in use by receivers
50  *	and must not be freed until after all the CPU's have gone
51  *	through a quiescent state.
52  */
53 static void __dev_remove_offload(struct packet_offload *po)
54 {
55 	struct list_head *head = &net_hotdata.offload_base;
56 	struct packet_offload *po1;
57 
58 	spin_lock(&offload_lock);
59 
60 	list_for_each_entry(po1, head, list) {
61 		if (po == po1) {
62 			list_del_rcu(&po->list);
63 			goto out;
64 		}
65 	}
66 
67 	pr_warn("dev_remove_offload: %p not found\n", po);
68 out:
69 	spin_unlock(&offload_lock);
70 }
71 
72 /**
73  *	dev_remove_offload	 - remove packet offload handler
74  *	@po: packet offload declaration
75  *
76  *	Remove a packet offload handler that was previously added to the kernel
77  *	offload handlers by dev_add_offload(). The passed &offload_type is
78  *	removed from the kernel lists and can be freed or reused once this
79  *	function returns.
80  *
81  *	This call sleeps to guarantee that no CPU is looking at the packet
82  *	type after return.
83  */
84 void dev_remove_offload(struct packet_offload *po)
85 {
86 	__dev_remove_offload(po);
87 
88 	synchronize_net();
89 }
90 EXPORT_SYMBOL(dev_remove_offload);
91 
92 
93 int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
94 {
95 	struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb);
96 	unsigned int offset = skb_gro_offset(skb);
97 	unsigned int headlen = skb_headlen(skb);
98 	unsigned int len = skb_gro_len(skb);
99 	unsigned int delta_truesize;
100 	unsigned int gro_max_size;
101 	unsigned int new_truesize;
102 	struct sk_buff *lp;
103 	int segs;
104 
105 	/* Do not splice page pool based packets w/ non-page pool
106 	 * packets. This can result in reference count issues as page
107 	 * pool pages will not decrement the reference count and will
108 	 * instead be immediately returned to the pool or have frag
109 	 * count decremented.
110 	 */
111 	if (p->pp_recycle != skb->pp_recycle)
112 		return -ETOOMANYREFS;
113 
114 	/* pairs with WRITE_ONCE() in netif_set_gro(_ipv4)_max_size() */
115 	gro_max_size = p->protocol == htons(ETH_P_IPV6) ?
116 			READ_ONCE(p->dev->gro_max_size) :
117 			READ_ONCE(p->dev->gro_ipv4_max_size);
118 
119 	if (unlikely(p->len + len >= gro_max_size || NAPI_GRO_CB(skb)->flush))
120 		return -E2BIG;
121 
122 	if (unlikely(p->len + len >= GRO_LEGACY_MAX_SIZE)) {
123 		if (NAPI_GRO_CB(skb)->proto != IPPROTO_TCP ||
124 		    (p->protocol == htons(ETH_P_IPV6) &&
125 		     skb_headroom(p) < sizeof(struct hop_jumbo_hdr)) ||
126 		    p->encapsulation)
127 			return -E2BIG;
128 	}
129 
130 	segs = NAPI_GRO_CB(skb)->count;
131 	lp = NAPI_GRO_CB(p)->last;
132 	pinfo = skb_shinfo(lp);
133 
134 	if (headlen <= offset) {
135 		skb_frag_t *frag;
136 		skb_frag_t *frag2;
137 		int i = skbinfo->nr_frags;
138 		int nr_frags = pinfo->nr_frags + i;
139 
140 		if (nr_frags > MAX_SKB_FRAGS)
141 			goto merge;
142 
143 		offset -= headlen;
144 		pinfo->nr_frags = nr_frags;
145 		skbinfo->nr_frags = 0;
146 
147 		frag = pinfo->frags + nr_frags;
148 		frag2 = skbinfo->frags + i;
149 		do {
150 			*--frag = *--frag2;
151 		} while (--i);
152 
153 		skb_frag_off_add(frag, offset);
154 		skb_frag_size_sub(frag, offset);
155 
156 		/* all fragments truesize : remove (head size + sk_buff) */
157 		new_truesize = SKB_TRUESIZE(skb_end_offset(skb));
158 		delta_truesize = skb->truesize - new_truesize;
159 
160 		skb->truesize = new_truesize;
161 		skb->len -= skb->data_len;
162 		skb->data_len = 0;
163 
164 		NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE;
165 		goto done;
166 	} else if (skb->head_frag) {
167 		int nr_frags = pinfo->nr_frags;
168 		skb_frag_t *frag = pinfo->frags + nr_frags;
169 		struct page *page = virt_to_head_page(skb->head);
170 		unsigned int first_size = headlen - offset;
171 		unsigned int first_offset;
172 
173 		if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS)
174 			goto merge;
175 
176 		first_offset = skb->data -
177 			       (unsigned char *)page_address(page) +
178 			       offset;
179 
180 		pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags;
181 
182 		skb_frag_fill_page_desc(frag, page, first_offset, first_size);
183 
184 		memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags);
185 		/* We dont need to clear skbinfo->nr_frags here */
186 
187 		new_truesize = SKB_DATA_ALIGN(sizeof(struct sk_buff));
188 		delta_truesize = skb->truesize - new_truesize;
189 		skb->truesize = new_truesize;
190 		NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD;
191 		goto done;
192 	}
193 
194 merge:
195 	/* sk owenrship - if any - completely transferred to the aggregated packet */
196 	skb->destructor = NULL;
197 	delta_truesize = skb->truesize;
198 	if (offset > headlen) {
199 		unsigned int eat = offset - headlen;
200 
201 		skb_frag_off_add(&skbinfo->frags[0], eat);
202 		skb_frag_size_sub(&skbinfo->frags[0], eat);
203 		skb->data_len -= eat;
204 		skb->len -= eat;
205 		offset = headlen;
206 	}
207 
208 	__skb_pull(skb, offset);
209 
210 	if (NAPI_GRO_CB(p)->last == p)
211 		skb_shinfo(p)->frag_list = skb;
212 	else
213 		NAPI_GRO_CB(p)->last->next = skb;
214 	NAPI_GRO_CB(p)->last = skb;
215 	__skb_header_release(skb);
216 	lp = p;
217 
218 done:
219 	NAPI_GRO_CB(p)->count += segs;
220 	p->data_len += len;
221 	p->truesize += delta_truesize;
222 	p->len += len;
223 	if (lp != p) {
224 		lp->data_len += len;
225 		lp->truesize += delta_truesize;
226 		lp->len += len;
227 	}
228 	NAPI_GRO_CB(skb)->same_flow = 1;
229 	return 0;
230 }
231 
232 
233 static void napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb)
234 {
235 	struct list_head *head = &net_hotdata.offload_base;
236 	struct packet_offload *ptype;
237 	__be16 type = skb->protocol;
238 	int err = -ENOENT;
239 
240 	BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
241 
242 	if (NAPI_GRO_CB(skb)->count == 1) {
243 		skb_shinfo(skb)->gso_size = 0;
244 		goto out;
245 	}
246 
247 	rcu_read_lock();
248 	list_for_each_entry_rcu(ptype, head, list) {
249 		if (ptype->type != type || !ptype->callbacks.gro_complete)
250 			continue;
251 
252 		err = INDIRECT_CALL_INET(ptype->callbacks.gro_complete,
253 					 ipv6_gro_complete, inet_gro_complete,
254 					 skb, 0);
255 		break;
256 	}
257 	rcu_read_unlock();
258 
259 	if (err) {
260 		WARN_ON(&ptype->list == head);
261 		kfree_skb(skb);
262 		return;
263 	}
264 
265 out:
266 	gro_normal_one(napi, skb, NAPI_GRO_CB(skb)->count);
267 }
268 
269 static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index,
270 				   bool flush_old)
271 {
272 	struct list_head *head = &napi->gro_hash[index].list;
273 	struct sk_buff *skb, *p;
274 
275 	list_for_each_entry_safe_reverse(skb, p, head, list) {
276 		if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
277 			return;
278 		skb_list_del_init(skb);
279 		napi_gro_complete(napi, skb);
280 		napi->gro_hash[index].count--;
281 	}
282 
283 	if (!napi->gro_hash[index].count)
284 		__clear_bit(index, &napi->gro_bitmask);
285 }
286 
287 /* napi->gro_hash[].list contains packets ordered by age.
288  * youngest packets at the head of it.
289  * Complete skbs in reverse order to reduce latencies.
290  */
291 void napi_gro_flush(struct napi_struct *napi, bool flush_old)
292 {
293 	unsigned long bitmask = napi->gro_bitmask;
294 	unsigned int i, base = ~0U;
295 
296 	while ((i = ffs(bitmask)) != 0) {
297 		bitmask >>= i;
298 		base += i;
299 		__napi_gro_flush_chain(napi, base, flush_old);
300 	}
301 }
302 EXPORT_SYMBOL(napi_gro_flush);
303 
304 static unsigned long gro_list_prepare_tc_ext(const struct sk_buff *skb,
305 					     const struct sk_buff *p,
306 					     unsigned long diffs)
307 {
308 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
309 	struct tc_skb_ext *skb_ext;
310 	struct tc_skb_ext *p_ext;
311 
312 	skb_ext = skb_ext_find(skb, TC_SKB_EXT);
313 	p_ext = skb_ext_find(p, TC_SKB_EXT);
314 
315 	diffs |= (!!p_ext) ^ (!!skb_ext);
316 	if (!diffs && unlikely(skb_ext))
317 		diffs |= p_ext->chain ^ skb_ext->chain;
318 #endif
319 	return diffs;
320 }
321 
322 static void gro_list_prepare(const struct list_head *head,
323 			     const struct sk_buff *skb)
324 {
325 	unsigned int maclen = skb->dev->hard_header_len;
326 	u32 hash = skb_get_hash_raw(skb);
327 	struct sk_buff *p;
328 
329 	list_for_each_entry(p, head, list) {
330 		unsigned long diffs;
331 
332 		NAPI_GRO_CB(p)->flush = 0;
333 
334 		if (hash != skb_get_hash_raw(p)) {
335 			NAPI_GRO_CB(p)->same_flow = 0;
336 			continue;
337 		}
338 
339 		diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
340 		diffs |= p->vlan_all ^ skb->vlan_all;
341 		diffs |= skb_metadata_differs(p, skb);
342 		if (maclen == ETH_HLEN)
343 			diffs |= compare_ether_header(skb_mac_header(p),
344 						      skb_mac_header(skb));
345 		else if (!diffs)
346 			diffs = memcmp(skb_mac_header(p),
347 				       skb_mac_header(skb),
348 				       maclen);
349 
350 		/* in most common scenarions 'slow_gro' is 0
351 		 * otherwise we are already on some slower paths
352 		 * either skip all the infrequent tests altogether or
353 		 * avoid trying too hard to skip each of them individually
354 		 */
355 		if (!diffs && unlikely(skb->slow_gro | p->slow_gro)) {
356 			diffs |= p->sk != skb->sk;
357 			diffs |= skb_metadata_dst_cmp(p, skb);
358 			diffs |= skb_get_nfct(p) ^ skb_get_nfct(skb);
359 
360 			diffs |= gro_list_prepare_tc_ext(skb, p, diffs);
361 		}
362 
363 		NAPI_GRO_CB(p)->same_flow = !diffs;
364 	}
365 }
366 
367 static inline void skb_gro_reset_offset(struct sk_buff *skb, u32 nhoff)
368 {
369 	const struct skb_shared_info *pinfo;
370 	const skb_frag_t *frag0;
371 	unsigned int headlen;
372 
373 	NAPI_GRO_CB(skb)->data_offset = 0;
374 	headlen = skb_headlen(skb);
375 	NAPI_GRO_CB(skb)->frag0 = skb->data;
376 	NAPI_GRO_CB(skb)->frag0_len = headlen;
377 	if (headlen)
378 		return;
379 
380 	pinfo = skb_shinfo(skb);
381 	frag0 = &pinfo->frags[0];
382 
383 	if (pinfo->nr_frags && !PageHighMem(skb_frag_page(frag0)) &&
384 	    (!NET_IP_ALIGN || !((skb_frag_off(frag0) + nhoff) & 3))) {
385 		NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
386 		NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
387 						    skb_frag_size(frag0),
388 						    skb->end - skb->tail);
389 	}
390 }
391 
392 static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
393 {
394 	struct skb_shared_info *pinfo = skb_shinfo(skb);
395 
396 	BUG_ON(skb->end - skb->tail < grow);
397 
398 	memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
399 
400 	skb->data_len -= grow;
401 	skb->tail += grow;
402 
403 	skb_frag_off_add(&pinfo->frags[0], grow);
404 	skb_frag_size_sub(&pinfo->frags[0], grow);
405 
406 	if (unlikely(!skb_frag_size(&pinfo->frags[0]))) {
407 		skb_frag_unref(skb, 0);
408 		memmove(pinfo->frags, pinfo->frags + 1,
409 			--pinfo->nr_frags * sizeof(pinfo->frags[0]));
410 	}
411 }
412 
413 static void gro_try_pull_from_frag0(struct sk_buff *skb)
414 {
415 	int grow = skb_gro_offset(skb) - skb_headlen(skb);
416 
417 	if (grow > 0)
418 		gro_pull_from_frag0(skb, grow);
419 }
420 
421 static void gro_flush_oldest(struct napi_struct *napi, struct list_head *head)
422 {
423 	struct sk_buff *oldest;
424 
425 	oldest = list_last_entry(head, struct sk_buff, list);
426 
427 	/* We are called with head length >= MAX_GRO_SKBS, so this is
428 	 * impossible.
429 	 */
430 	if (WARN_ON_ONCE(!oldest))
431 		return;
432 
433 	/* Do not adjust napi->gro_hash[].count, caller is adding a new
434 	 * SKB to the chain.
435 	 */
436 	skb_list_del_init(oldest);
437 	napi_gro_complete(napi, oldest);
438 }
439 
440 static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
441 {
442 	u32 bucket = skb_get_hash_raw(skb) & (GRO_HASH_BUCKETS - 1);
443 	struct gro_list *gro_list = &napi->gro_hash[bucket];
444 	struct list_head *head = &net_hotdata.offload_base;
445 	struct packet_offload *ptype;
446 	__be16 type = skb->protocol;
447 	struct sk_buff *pp = NULL;
448 	enum gro_result ret;
449 	int same_flow;
450 
451 	if (netif_elide_gro(skb->dev))
452 		goto normal;
453 
454 	gro_list_prepare(&gro_list->list, skb);
455 
456 	rcu_read_lock();
457 	list_for_each_entry_rcu(ptype, head, list) {
458 		if (ptype->type == type && ptype->callbacks.gro_receive)
459 			goto found_ptype;
460 	}
461 	rcu_read_unlock();
462 	goto normal;
463 
464 found_ptype:
465 	skb_set_network_header(skb, skb_gro_offset(skb));
466 	skb_reset_mac_len(skb);
467 	BUILD_BUG_ON(sizeof_field(struct napi_gro_cb, zeroed) != sizeof(u32));
468 	BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct napi_gro_cb, zeroed),
469 					sizeof(u32))); /* Avoid slow unaligned acc */
470 	*(u32 *)&NAPI_GRO_CB(skb)->zeroed = 0;
471 	NAPI_GRO_CB(skb)->flush = skb_has_frag_list(skb);
472 	NAPI_GRO_CB(skb)->is_atomic = 1;
473 	NAPI_GRO_CB(skb)->count = 1;
474 	if (unlikely(skb_is_gso(skb))) {
475 		NAPI_GRO_CB(skb)->count = skb_shinfo(skb)->gso_segs;
476 		/* Only support TCP and non DODGY users. */
477 		if (!skb_is_gso_tcp(skb) ||
478 		    (skb_shinfo(skb)->gso_type & SKB_GSO_DODGY))
479 			NAPI_GRO_CB(skb)->flush = 1;
480 	}
481 
482 	/* Setup for GRO checksum validation */
483 	switch (skb->ip_summed) {
484 	case CHECKSUM_COMPLETE:
485 		NAPI_GRO_CB(skb)->csum = skb->csum;
486 		NAPI_GRO_CB(skb)->csum_valid = 1;
487 		break;
488 	case CHECKSUM_UNNECESSARY:
489 		NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
490 		break;
491 	}
492 
493 	pp = INDIRECT_CALL_INET(ptype->callbacks.gro_receive,
494 				ipv6_gro_receive, inet_gro_receive,
495 				&gro_list->list, skb);
496 
497 	rcu_read_unlock();
498 
499 	if (PTR_ERR(pp) == -EINPROGRESS) {
500 		ret = GRO_CONSUMED;
501 		goto ok;
502 	}
503 
504 	same_flow = NAPI_GRO_CB(skb)->same_flow;
505 	ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
506 
507 	if (pp) {
508 		skb_list_del_init(pp);
509 		napi_gro_complete(napi, pp);
510 		gro_list->count--;
511 	}
512 
513 	if (same_flow)
514 		goto ok;
515 
516 	if (NAPI_GRO_CB(skb)->flush)
517 		goto normal;
518 
519 	if (unlikely(gro_list->count >= MAX_GRO_SKBS))
520 		gro_flush_oldest(napi, &gro_list->list);
521 	else
522 		gro_list->count++;
523 
524 	/* Must be called before setting NAPI_GRO_CB(skb)->{age|last} */
525 	gro_try_pull_from_frag0(skb);
526 	NAPI_GRO_CB(skb)->age = jiffies;
527 	NAPI_GRO_CB(skb)->last = skb;
528 	if (!skb_is_gso(skb))
529 		skb_shinfo(skb)->gso_size = skb_gro_len(skb);
530 	list_add(&skb->list, &gro_list->list);
531 	ret = GRO_HELD;
532 ok:
533 	if (gro_list->count) {
534 		if (!test_bit(bucket, &napi->gro_bitmask))
535 			__set_bit(bucket, &napi->gro_bitmask);
536 	} else if (test_bit(bucket, &napi->gro_bitmask)) {
537 		__clear_bit(bucket, &napi->gro_bitmask);
538 	}
539 
540 	return ret;
541 
542 normal:
543 	ret = GRO_NORMAL;
544 	gro_try_pull_from_frag0(skb);
545 	goto ok;
546 }
547 
548 struct packet_offload *gro_find_receive_by_type(__be16 type)
549 {
550 	struct list_head *offload_head = &net_hotdata.offload_base;
551 	struct packet_offload *ptype;
552 
553 	list_for_each_entry_rcu(ptype, offload_head, list) {
554 		if (ptype->type != type || !ptype->callbacks.gro_receive)
555 			continue;
556 		return ptype;
557 	}
558 	return NULL;
559 }
560 EXPORT_SYMBOL(gro_find_receive_by_type);
561 
562 struct packet_offload *gro_find_complete_by_type(__be16 type)
563 {
564 	struct list_head *offload_head = &net_hotdata.offload_base;
565 	struct packet_offload *ptype;
566 
567 	list_for_each_entry_rcu(ptype, offload_head, list) {
568 		if (ptype->type != type || !ptype->callbacks.gro_complete)
569 			continue;
570 		return ptype;
571 	}
572 	return NULL;
573 }
574 EXPORT_SYMBOL(gro_find_complete_by_type);
575 
576 static gro_result_t napi_skb_finish(struct napi_struct *napi,
577 				    struct sk_buff *skb,
578 				    gro_result_t ret)
579 {
580 	switch (ret) {
581 	case GRO_NORMAL:
582 		gro_normal_one(napi, skb, 1);
583 		break;
584 
585 	case GRO_MERGED_FREE:
586 		if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
587 			napi_skb_free_stolen_head(skb);
588 		else if (skb->fclone != SKB_FCLONE_UNAVAILABLE)
589 			__kfree_skb(skb);
590 		else
591 			__napi_kfree_skb(skb, SKB_CONSUMED);
592 		break;
593 
594 	case GRO_HELD:
595 	case GRO_MERGED:
596 	case GRO_CONSUMED:
597 		break;
598 	}
599 
600 	return ret;
601 }
602 
603 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
604 {
605 	gro_result_t ret;
606 
607 	skb_mark_napi_id(skb, napi);
608 	trace_napi_gro_receive_entry(skb);
609 
610 	skb_gro_reset_offset(skb, 0);
611 
612 	ret = napi_skb_finish(napi, skb, dev_gro_receive(napi, skb));
613 	trace_napi_gro_receive_exit(ret);
614 
615 	return ret;
616 }
617 EXPORT_SYMBOL(napi_gro_receive);
618 
619 static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
620 {
621 	if (unlikely(skb->pfmemalloc)) {
622 		consume_skb(skb);
623 		return;
624 	}
625 	__skb_pull(skb, skb_headlen(skb));
626 	/* restore the reserve we had after netdev_alloc_skb_ip_align() */
627 	skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
628 	__vlan_hwaccel_clear_tag(skb);
629 	skb->dev = napi->dev;
630 	skb->skb_iif = 0;
631 
632 	/* eth_type_trans() assumes pkt_type is PACKET_HOST */
633 	skb->pkt_type = PACKET_HOST;
634 
635 	skb->encapsulation = 0;
636 	skb_shinfo(skb)->gso_type = 0;
637 	skb_shinfo(skb)->gso_size = 0;
638 	if (unlikely(skb->slow_gro)) {
639 		skb_orphan(skb);
640 		skb_ext_reset(skb);
641 		nf_reset_ct(skb);
642 		skb->slow_gro = 0;
643 	}
644 
645 	napi->skb = skb;
646 }
647 
648 struct sk_buff *napi_get_frags(struct napi_struct *napi)
649 {
650 	struct sk_buff *skb = napi->skb;
651 
652 	if (!skb) {
653 		skb = napi_alloc_skb(napi, GRO_MAX_HEAD);
654 		if (skb) {
655 			napi->skb = skb;
656 			skb_mark_napi_id(skb, napi);
657 		}
658 	}
659 	return skb;
660 }
661 EXPORT_SYMBOL(napi_get_frags);
662 
663 static gro_result_t napi_frags_finish(struct napi_struct *napi,
664 				      struct sk_buff *skb,
665 				      gro_result_t ret)
666 {
667 	switch (ret) {
668 	case GRO_NORMAL:
669 	case GRO_HELD:
670 		__skb_push(skb, ETH_HLEN);
671 		skb->protocol = eth_type_trans(skb, skb->dev);
672 		if (ret == GRO_NORMAL)
673 			gro_normal_one(napi, skb, 1);
674 		break;
675 
676 	case GRO_MERGED_FREE:
677 		if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
678 			napi_skb_free_stolen_head(skb);
679 		else
680 			napi_reuse_skb(napi, skb);
681 		break;
682 
683 	case GRO_MERGED:
684 	case GRO_CONSUMED:
685 		break;
686 	}
687 
688 	return ret;
689 }
690 
691 /* Upper GRO stack assumes network header starts at gro_offset=0
692  * Drivers could call both napi_gro_frags() and napi_gro_receive()
693  * We copy ethernet header into skb->data to have a common layout.
694  */
695 static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
696 {
697 	struct sk_buff *skb = napi->skb;
698 	const struct ethhdr *eth;
699 	unsigned int hlen = sizeof(*eth);
700 
701 	napi->skb = NULL;
702 
703 	skb_reset_mac_header(skb);
704 	skb_gro_reset_offset(skb, hlen);
705 
706 	if (unlikely(!skb_gro_may_pull(skb, hlen))) {
707 		eth = skb_gro_header_slow(skb, hlen, 0);
708 		if (unlikely(!eth)) {
709 			net_warn_ratelimited("%s: dropping impossible skb from %s\n",
710 					     __func__, napi->dev->name);
711 			napi_reuse_skb(napi, skb);
712 			return NULL;
713 		}
714 	} else {
715 		eth = (const struct ethhdr *)skb->data;
716 
717 		if (NAPI_GRO_CB(skb)->frag0 != skb->data)
718 			gro_pull_from_frag0(skb, hlen);
719 
720 		NAPI_GRO_CB(skb)->frag0 += hlen;
721 		NAPI_GRO_CB(skb)->frag0_len -= hlen;
722 	}
723 	__skb_pull(skb, hlen);
724 
725 	/*
726 	 * This works because the only protocols we care about don't require
727 	 * special handling.
728 	 * We'll fix it up properly in napi_frags_finish()
729 	 */
730 	skb->protocol = eth->h_proto;
731 
732 	return skb;
733 }
734 
735 gro_result_t napi_gro_frags(struct napi_struct *napi)
736 {
737 	gro_result_t ret;
738 	struct sk_buff *skb = napi_frags_skb(napi);
739 
740 	trace_napi_gro_frags_entry(skb);
741 
742 	ret = napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
743 	trace_napi_gro_frags_exit(ret);
744 
745 	return ret;
746 }
747 EXPORT_SYMBOL(napi_gro_frags);
748 
749 /* Compute the checksum from gro_offset and return the folded value
750  * after adding in any pseudo checksum.
751  */
752 __sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
753 {
754 	__wsum wsum;
755 	__sum16 sum;
756 
757 	wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0);
758 
759 	/* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
760 	sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum));
761 	/* See comments in __skb_checksum_complete(). */
762 	if (likely(!sum)) {
763 		if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
764 		    !skb->csum_complete_sw)
765 			netdev_rx_csum_fault(skb->dev, skb);
766 	}
767 
768 	NAPI_GRO_CB(skb)->csum = wsum;
769 	NAPI_GRO_CB(skb)->csum_valid = 1;
770 
771 	return sum;
772 }
773 EXPORT_SYMBOL(__skb_gro_checksum_complete);
774