xref: /linux/net/core/gro.c (revision 1e15510b71c99c6e49134d756df91069f7d18141)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 #include <net/gro.h>
3 #include <net/dst_metadata.h>
4 #include <net/busy_poll.h>
5 #include <trace/events/net.h>
6 #include <linux/skbuff_ref.h>
7 
8 #define MAX_GRO_SKBS 8
9 
10 static DEFINE_SPINLOCK(offload_lock);
11 
12 /**
13  *	dev_add_offload - register offload handlers
14  *	@po: protocol offload declaration
15  *
16  *	Add protocol offload handlers to the networking stack. The passed
17  *	&proto_offload is linked into kernel lists and may not be freed until
18  *	it has been removed from the kernel lists.
19  *
20  *	This call does not sleep therefore it can not
21  *	guarantee all CPU's that are in middle of receiving packets
22  *	will see the new offload handlers (until the next received packet).
23  */
dev_add_offload(struct packet_offload * po)24 void dev_add_offload(struct packet_offload *po)
25 {
26 	struct packet_offload *elem;
27 
28 	spin_lock(&offload_lock);
29 	list_for_each_entry(elem, &net_hotdata.offload_base, list) {
30 		if (po->priority < elem->priority)
31 			break;
32 	}
33 	list_add_rcu(&po->list, elem->list.prev);
34 	spin_unlock(&offload_lock);
35 }
36 EXPORT_SYMBOL(dev_add_offload);
37 
38 /**
39  *	__dev_remove_offload	 - remove offload handler
40  *	@po: packet offload declaration
41  *
42  *	Remove a protocol offload handler that was previously added to the
43  *	kernel offload handlers by dev_add_offload(). The passed &offload_type
44  *	is removed from the kernel lists and can be freed or reused once this
45  *	function returns.
46  *
47  *      The packet type might still be in use by receivers
48  *	and must not be freed until after all the CPU's have gone
49  *	through a quiescent state.
50  */
__dev_remove_offload(struct packet_offload * po)51 static void __dev_remove_offload(struct packet_offload *po)
52 {
53 	struct list_head *head = &net_hotdata.offload_base;
54 	struct packet_offload *po1;
55 
56 	spin_lock(&offload_lock);
57 
58 	list_for_each_entry(po1, head, list) {
59 		if (po == po1) {
60 			list_del_rcu(&po->list);
61 			goto out;
62 		}
63 	}
64 
65 	pr_warn("dev_remove_offload: %p not found\n", po);
66 out:
67 	spin_unlock(&offload_lock);
68 }
69 
70 /**
71  *	dev_remove_offload	 - remove packet offload handler
72  *	@po: packet offload declaration
73  *
74  *	Remove a packet offload handler that was previously added to the kernel
75  *	offload handlers by dev_add_offload(). The passed &offload_type is
76  *	removed from the kernel lists and can be freed or reused once this
77  *	function returns.
78  *
79  *	This call sleeps to guarantee that no CPU is looking at the packet
80  *	type after return.
81  */
dev_remove_offload(struct packet_offload * po)82 void dev_remove_offload(struct packet_offload *po)
83 {
84 	__dev_remove_offload(po);
85 
86 	synchronize_net();
87 }
88 EXPORT_SYMBOL(dev_remove_offload);
89 
90 
skb_gro_receive(struct sk_buff * p,struct sk_buff * skb)91 int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
92 {
93 	struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb);
94 	unsigned int offset = skb_gro_offset(skb);
95 	unsigned int headlen = skb_headlen(skb);
96 	unsigned int len = skb_gro_len(skb);
97 	unsigned int delta_truesize;
98 	unsigned int new_truesize;
99 	struct sk_buff *lp;
100 	int segs;
101 
102 	/* Do not splice page pool based packets w/ non-page pool
103 	 * packets. This can result in reference count issues as page
104 	 * pool pages will not decrement the reference count and will
105 	 * instead be immediately returned to the pool or have frag
106 	 * count decremented.
107 	 */
108 	if (p->pp_recycle != skb->pp_recycle)
109 		return -ETOOMANYREFS;
110 
111 	if (unlikely(p->len + len >= netif_get_gro_max_size(p->dev, p) ||
112 		     NAPI_GRO_CB(skb)->flush))
113 		return -E2BIG;
114 
115 	if (unlikely(p->len + len >= GRO_LEGACY_MAX_SIZE)) {
116 		if (NAPI_GRO_CB(skb)->proto != IPPROTO_TCP ||
117 		    (p->protocol == htons(ETH_P_IPV6) &&
118 		     skb_headroom(p) < sizeof(struct hop_jumbo_hdr)) ||
119 		    p->encapsulation)
120 			return -E2BIG;
121 	}
122 
123 	segs = NAPI_GRO_CB(skb)->count;
124 	lp = NAPI_GRO_CB(p)->last;
125 	pinfo = skb_shinfo(lp);
126 
127 	if (headlen <= offset) {
128 		skb_frag_t *frag;
129 		skb_frag_t *frag2;
130 		int i = skbinfo->nr_frags;
131 		int nr_frags = pinfo->nr_frags + i;
132 
133 		if (nr_frags > MAX_SKB_FRAGS)
134 			goto merge;
135 
136 		offset -= headlen;
137 		pinfo->nr_frags = nr_frags;
138 		skbinfo->nr_frags = 0;
139 
140 		frag = pinfo->frags + nr_frags;
141 		frag2 = skbinfo->frags + i;
142 		do {
143 			*--frag = *--frag2;
144 		} while (--i);
145 
146 		skb_frag_off_add(frag, offset);
147 		skb_frag_size_sub(frag, offset);
148 
149 		/* all fragments truesize : remove (head size + sk_buff) */
150 		new_truesize = SKB_TRUESIZE(skb_end_offset(skb));
151 		delta_truesize = skb->truesize - new_truesize;
152 
153 		skb->truesize = new_truesize;
154 		skb->len -= skb->data_len;
155 		skb->data_len = 0;
156 
157 		NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE;
158 		goto done;
159 	} else if (skb->head_frag) {
160 		int nr_frags = pinfo->nr_frags;
161 		skb_frag_t *frag = pinfo->frags + nr_frags;
162 		struct page *page = virt_to_head_page(skb->head);
163 		unsigned int first_size = headlen - offset;
164 		unsigned int first_offset;
165 
166 		if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS)
167 			goto merge;
168 
169 		first_offset = skb->data -
170 			       (unsigned char *)page_address(page) +
171 			       offset;
172 
173 		pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags;
174 
175 		skb_frag_fill_page_desc(frag, page, first_offset, first_size);
176 
177 		memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags);
178 		/* We dont need to clear skbinfo->nr_frags here */
179 
180 		new_truesize = SKB_DATA_ALIGN(sizeof(struct sk_buff));
181 		delta_truesize = skb->truesize - new_truesize;
182 		skb->truesize = new_truesize;
183 		NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD;
184 		goto done;
185 	}
186 
187 merge:
188 	/* sk ownership - if any - completely transferred to the aggregated packet */
189 	skb->destructor = NULL;
190 	skb->sk = NULL;
191 	delta_truesize = skb->truesize;
192 	if (offset > headlen) {
193 		unsigned int eat = offset - headlen;
194 
195 		skb_frag_off_add(&skbinfo->frags[0], eat);
196 		skb_frag_size_sub(&skbinfo->frags[0], eat);
197 		skb->data_len -= eat;
198 		skb->len -= eat;
199 		offset = headlen;
200 	}
201 
202 	__skb_pull(skb, offset);
203 
204 	if (NAPI_GRO_CB(p)->last == p)
205 		skb_shinfo(p)->frag_list = skb;
206 	else
207 		NAPI_GRO_CB(p)->last->next = skb;
208 	NAPI_GRO_CB(p)->last = skb;
209 	__skb_header_release(skb);
210 	lp = p;
211 
212 done:
213 	NAPI_GRO_CB(p)->count += segs;
214 	p->data_len += len;
215 	p->truesize += delta_truesize;
216 	p->len += len;
217 	if (lp != p) {
218 		lp->data_len += len;
219 		lp->truesize += delta_truesize;
220 		lp->len += len;
221 	}
222 	NAPI_GRO_CB(skb)->same_flow = 1;
223 	return 0;
224 }
225 
skb_gro_receive_list(struct sk_buff * p,struct sk_buff * skb)226 int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb)
227 {
228 	if (unlikely(p->len + skb->len >= 65536))
229 		return -E2BIG;
230 
231 	if (NAPI_GRO_CB(p)->last == p)
232 		skb_shinfo(p)->frag_list = skb;
233 	else
234 		NAPI_GRO_CB(p)->last->next = skb;
235 
236 	skb_pull(skb, skb_gro_offset(skb));
237 
238 	NAPI_GRO_CB(p)->last = skb;
239 	NAPI_GRO_CB(p)->count++;
240 	p->data_len += skb->len;
241 
242 	/* sk ownership - if any - completely transferred to the aggregated packet */
243 	skb->destructor = NULL;
244 	skb->sk = NULL;
245 	p->truesize += skb->truesize;
246 	p->len += skb->len;
247 
248 	NAPI_GRO_CB(skb)->same_flow = 1;
249 
250 	return 0;
251 }
252 
253 
napi_gro_complete(struct napi_struct * napi,struct sk_buff * skb)254 static void napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb)
255 {
256 	struct list_head *head = &net_hotdata.offload_base;
257 	struct packet_offload *ptype;
258 	__be16 type = skb->protocol;
259 	int err = -ENOENT;
260 
261 	BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
262 
263 	if (NAPI_GRO_CB(skb)->count == 1) {
264 		skb_shinfo(skb)->gso_size = 0;
265 		goto out;
266 	}
267 
268 	rcu_read_lock();
269 	list_for_each_entry_rcu(ptype, head, list) {
270 		if (ptype->type != type || !ptype->callbacks.gro_complete)
271 			continue;
272 
273 		err = INDIRECT_CALL_INET(ptype->callbacks.gro_complete,
274 					 ipv6_gro_complete, inet_gro_complete,
275 					 skb, 0);
276 		break;
277 	}
278 	rcu_read_unlock();
279 
280 	if (err) {
281 		WARN_ON(&ptype->list == head);
282 		kfree_skb(skb);
283 		return;
284 	}
285 
286 out:
287 	gro_normal_one(napi, skb, NAPI_GRO_CB(skb)->count);
288 }
289 
__napi_gro_flush_chain(struct napi_struct * napi,u32 index,bool flush_old)290 static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index,
291 				   bool flush_old)
292 {
293 	struct list_head *head = &napi->gro_hash[index].list;
294 	struct sk_buff *skb, *p;
295 
296 	list_for_each_entry_safe_reverse(skb, p, head, list) {
297 		if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
298 			return;
299 		skb_list_del_init(skb);
300 		napi_gro_complete(napi, skb);
301 		napi->gro_hash[index].count--;
302 	}
303 
304 	if (!napi->gro_hash[index].count)
305 		__clear_bit(index, &napi->gro_bitmask);
306 }
307 
308 /* napi->gro_hash[].list contains packets ordered by age.
309  * youngest packets at the head of it.
310  * Complete skbs in reverse order to reduce latencies.
311  */
napi_gro_flush(struct napi_struct * napi,bool flush_old)312 void napi_gro_flush(struct napi_struct *napi, bool flush_old)
313 {
314 	unsigned long bitmask = napi->gro_bitmask;
315 	unsigned int i, base = ~0U;
316 
317 	while ((i = ffs(bitmask)) != 0) {
318 		bitmask >>= i;
319 		base += i;
320 		__napi_gro_flush_chain(napi, base, flush_old);
321 	}
322 }
323 EXPORT_SYMBOL(napi_gro_flush);
324 
gro_list_prepare_tc_ext(const struct sk_buff * skb,const struct sk_buff * p,unsigned long diffs)325 static unsigned long gro_list_prepare_tc_ext(const struct sk_buff *skb,
326 					     const struct sk_buff *p,
327 					     unsigned long diffs)
328 {
329 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
330 	struct tc_skb_ext *skb_ext;
331 	struct tc_skb_ext *p_ext;
332 
333 	skb_ext = skb_ext_find(skb, TC_SKB_EXT);
334 	p_ext = skb_ext_find(p, TC_SKB_EXT);
335 
336 	diffs |= (!!p_ext) ^ (!!skb_ext);
337 	if (!diffs && unlikely(skb_ext))
338 		diffs |= p_ext->chain ^ skb_ext->chain;
339 #endif
340 	return diffs;
341 }
342 
gro_list_prepare(const struct list_head * head,const struct sk_buff * skb)343 static void gro_list_prepare(const struct list_head *head,
344 			     const struct sk_buff *skb)
345 {
346 	unsigned int maclen = skb->dev->hard_header_len;
347 	u32 hash = skb_get_hash_raw(skb);
348 	struct sk_buff *p;
349 
350 	list_for_each_entry(p, head, list) {
351 		unsigned long diffs;
352 
353 		if (hash != skb_get_hash_raw(p)) {
354 			NAPI_GRO_CB(p)->same_flow = 0;
355 			continue;
356 		}
357 
358 		diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
359 		diffs |= p->vlan_all ^ skb->vlan_all;
360 		diffs |= skb_metadata_differs(p, skb);
361 		if (maclen == ETH_HLEN)
362 			diffs |= compare_ether_header(skb_mac_header(p),
363 						      skb_mac_header(skb));
364 		else if (!diffs)
365 			diffs = memcmp(skb_mac_header(p),
366 				       skb_mac_header(skb),
367 				       maclen);
368 
369 		/* in most common scenarios 'slow_gro' is 0
370 		 * otherwise we are already on some slower paths
371 		 * either skip all the infrequent tests altogether or
372 		 * avoid trying too hard to skip each of them individually
373 		 */
374 		if (!diffs && unlikely(skb->slow_gro | p->slow_gro)) {
375 			diffs |= p->sk != skb->sk;
376 			diffs |= skb_metadata_dst_cmp(p, skb);
377 			diffs |= skb_get_nfct(p) ^ skb_get_nfct(skb);
378 
379 			diffs |= gro_list_prepare_tc_ext(skb, p, diffs);
380 		}
381 
382 		NAPI_GRO_CB(p)->same_flow = !diffs;
383 	}
384 }
385 
skb_gro_reset_offset(struct sk_buff * skb,u32 nhoff)386 static inline void skb_gro_reset_offset(struct sk_buff *skb, u32 nhoff)
387 {
388 	const struct skb_shared_info *pinfo;
389 	const skb_frag_t *frag0;
390 	unsigned int headlen;
391 
392 	NAPI_GRO_CB(skb)->network_offset = 0;
393 	NAPI_GRO_CB(skb)->data_offset = 0;
394 	headlen = skb_headlen(skb);
395 	NAPI_GRO_CB(skb)->frag0 = skb->data;
396 	NAPI_GRO_CB(skb)->frag0_len = headlen;
397 	if (headlen)
398 		return;
399 
400 	pinfo = skb_shinfo(skb);
401 	frag0 = &pinfo->frags[0];
402 
403 	if (pinfo->nr_frags && skb_frag_page(frag0) &&
404 	    !PageHighMem(skb_frag_page(frag0)) &&
405 	    (!NET_IP_ALIGN || !((skb_frag_off(frag0) + nhoff) & 3))) {
406 		NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
407 		NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
408 						    skb_frag_size(frag0),
409 						    skb->end - skb->tail);
410 	}
411 }
412 
gro_pull_from_frag0(struct sk_buff * skb,int grow)413 static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
414 {
415 	struct skb_shared_info *pinfo = skb_shinfo(skb);
416 
417 	BUG_ON(skb->end - skb->tail < grow);
418 
419 	memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
420 
421 	skb->data_len -= grow;
422 	skb->tail += grow;
423 
424 	skb_frag_off_add(&pinfo->frags[0], grow);
425 	skb_frag_size_sub(&pinfo->frags[0], grow);
426 
427 	if (unlikely(!skb_frag_size(&pinfo->frags[0]))) {
428 		skb_frag_unref(skb, 0);
429 		memmove(pinfo->frags, pinfo->frags + 1,
430 			--pinfo->nr_frags * sizeof(pinfo->frags[0]));
431 	}
432 }
433 
gro_try_pull_from_frag0(struct sk_buff * skb)434 static void gro_try_pull_from_frag0(struct sk_buff *skb)
435 {
436 	int grow = skb_gro_offset(skb) - skb_headlen(skb);
437 
438 	if (grow > 0)
439 		gro_pull_from_frag0(skb, grow);
440 }
441 
gro_flush_oldest(struct napi_struct * napi,struct list_head * head)442 static void gro_flush_oldest(struct napi_struct *napi, struct list_head *head)
443 {
444 	struct sk_buff *oldest;
445 
446 	oldest = list_last_entry(head, struct sk_buff, list);
447 
448 	/* We are called with head length >= MAX_GRO_SKBS, so this is
449 	 * impossible.
450 	 */
451 	if (WARN_ON_ONCE(!oldest))
452 		return;
453 
454 	/* Do not adjust napi->gro_hash[].count, caller is adding a new
455 	 * SKB to the chain.
456 	 */
457 	skb_list_del_init(oldest);
458 	napi_gro_complete(napi, oldest);
459 }
460 
dev_gro_receive(struct napi_struct * napi,struct sk_buff * skb)461 static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
462 {
463 	u32 bucket = skb_get_hash_raw(skb) & (GRO_HASH_BUCKETS - 1);
464 	struct gro_list *gro_list = &napi->gro_hash[bucket];
465 	struct list_head *head = &net_hotdata.offload_base;
466 	struct packet_offload *ptype;
467 	__be16 type = skb->protocol;
468 	struct sk_buff *pp = NULL;
469 	enum gro_result ret;
470 	int same_flow;
471 
472 	if (netif_elide_gro(skb->dev))
473 		goto normal;
474 
475 	gro_list_prepare(&gro_list->list, skb);
476 
477 	rcu_read_lock();
478 	list_for_each_entry_rcu(ptype, head, list) {
479 		if (ptype->type == type && ptype->callbacks.gro_receive)
480 			goto found_ptype;
481 	}
482 	rcu_read_unlock();
483 	goto normal;
484 
485 found_ptype:
486 	skb_set_network_header(skb, skb_gro_offset(skb));
487 	skb_reset_mac_len(skb);
488 	BUILD_BUG_ON(sizeof_field(struct napi_gro_cb, zeroed) != sizeof(u32));
489 	BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct napi_gro_cb, zeroed),
490 					sizeof(u32))); /* Avoid slow unaligned acc */
491 	*(u32 *)&NAPI_GRO_CB(skb)->zeroed = 0;
492 	NAPI_GRO_CB(skb)->flush = skb_has_frag_list(skb);
493 	NAPI_GRO_CB(skb)->count = 1;
494 	if (unlikely(skb_is_gso(skb))) {
495 		NAPI_GRO_CB(skb)->count = skb_shinfo(skb)->gso_segs;
496 		/* Only support TCP and non DODGY users. */
497 		if (!skb_is_gso_tcp(skb) ||
498 		    (skb_shinfo(skb)->gso_type & SKB_GSO_DODGY))
499 			NAPI_GRO_CB(skb)->flush = 1;
500 	}
501 
502 	/* Setup for GRO checksum validation */
503 	switch (skb->ip_summed) {
504 	case CHECKSUM_COMPLETE:
505 		NAPI_GRO_CB(skb)->csum = skb->csum;
506 		NAPI_GRO_CB(skb)->csum_valid = 1;
507 		break;
508 	case CHECKSUM_UNNECESSARY:
509 		NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
510 		break;
511 	}
512 
513 	pp = INDIRECT_CALL_INET(ptype->callbacks.gro_receive,
514 				ipv6_gro_receive, inet_gro_receive,
515 				&gro_list->list, skb);
516 
517 	rcu_read_unlock();
518 
519 	if (PTR_ERR(pp) == -EINPROGRESS) {
520 		ret = GRO_CONSUMED;
521 		goto ok;
522 	}
523 
524 	same_flow = NAPI_GRO_CB(skb)->same_flow;
525 	ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
526 
527 	if (pp) {
528 		skb_list_del_init(pp);
529 		napi_gro_complete(napi, pp);
530 		gro_list->count--;
531 	}
532 
533 	if (same_flow)
534 		goto ok;
535 
536 	if (NAPI_GRO_CB(skb)->flush)
537 		goto normal;
538 
539 	if (unlikely(gro_list->count >= MAX_GRO_SKBS))
540 		gro_flush_oldest(napi, &gro_list->list);
541 	else
542 		gro_list->count++;
543 
544 	/* Must be called before setting NAPI_GRO_CB(skb)->{age|last} */
545 	gro_try_pull_from_frag0(skb);
546 	NAPI_GRO_CB(skb)->age = jiffies;
547 	NAPI_GRO_CB(skb)->last = skb;
548 	if (!skb_is_gso(skb))
549 		skb_shinfo(skb)->gso_size = skb_gro_len(skb);
550 	list_add(&skb->list, &gro_list->list);
551 	ret = GRO_HELD;
552 ok:
553 	if (gro_list->count) {
554 		if (!test_bit(bucket, &napi->gro_bitmask))
555 			__set_bit(bucket, &napi->gro_bitmask);
556 	} else if (test_bit(bucket, &napi->gro_bitmask)) {
557 		__clear_bit(bucket, &napi->gro_bitmask);
558 	}
559 
560 	return ret;
561 
562 normal:
563 	ret = GRO_NORMAL;
564 	gro_try_pull_from_frag0(skb);
565 	goto ok;
566 }
567 
gro_find_receive_by_type(__be16 type)568 struct packet_offload *gro_find_receive_by_type(__be16 type)
569 {
570 	struct list_head *offload_head = &net_hotdata.offload_base;
571 	struct packet_offload *ptype;
572 
573 	list_for_each_entry_rcu(ptype, offload_head, list) {
574 		if (ptype->type != type || !ptype->callbacks.gro_receive)
575 			continue;
576 		return ptype;
577 	}
578 	return NULL;
579 }
580 EXPORT_SYMBOL(gro_find_receive_by_type);
581 
gro_find_complete_by_type(__be16 type)582 struct packet_offload *gro_find_complete_by_type(__be16 type)
583 {
584 	struct list_head *offload_head = &net_hotdata.offload_base;
585 	struct packet_offload *ptype;
586 
587 	list_for_each_entry_rcu(ptype, offload_head, list) {
588 		if (ptype->type != type || !ptype->callbacks.gro_complete)
589 			continue;
590 		return ptype;
591 	}
592 	return NULL;
593 }
594 EXPORT_SYMBOL(gro_find_complete_by_type);
595 
napi_skb_finish(struct napi_struct * napi,struct sk_buff * skb,gro_result_t ret)596 static gro_result_t napi_skb_finish(struct napi_struct *napi,
597 				    struct sk_buff *skb,
598 				    gro_result_t ret)
599 {
600 	switch (ret) {
601 	case GRO_NORMAL:
602 		gro_normal_one(napi, skb, 1);
603 		break;
604 
605 	case GRO_MERGED_FREE:
606 		if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
607 			napi_skb_free_stolen_head(skb);
608 		else if (skb->fclone != SKB_FCLONE_UNAVAILABLE)
609 			__kfree_skb(skb);
610 		else
611 			__napi_kfree_skb(skb, SKB_CONSUMED);
612 		break;
613 
614 	case GRO_HELD:
615 	case GRO_MERGED:
616 	case GRO_CONSUMED:
617 		break;
618 	}
619 
620 	return ret;
621 }
622 
napi_gro_receive(struct napi_struct * napi,struct sk_buff * skb)623 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
624 {
625 	gro_result_t ret;
626 
627 	skb_mark_napi_id(skb, napi);
628 	trace_napi_gro_receive_entry(skb);
629 
630 	skb_gro_reset_offset(skb, 0);
631 
632 	ret = napi_skb_finish(napi, skb, dev_gro_receive(napi, skb));
633 	trace_napi_gro_receive_exit(ret);
634 
635 	return ret;
636 }
637 EXPORT_SYMBOL(napi_gro_receive);
638 
napi_reuse_skb(struct napi_struct * napi,struct sk_buff * skb)639 static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
640 {
641 	if (unlikely(skb->pfmemalloc)) {
642 		consume_skb(skb);
643 		return;
644 	}
645 	__skb_pull(skb, skb_headlen(skb));
646 	/* restore the reserve we had after netdev_alloc_skb_ip_align() */
647 	skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
648 	__vlan_hwaccel_clear_tag(skb);
649 	skb->dev = napi->dev;
650 	skb->skb_iif = 0;
651 
652 	/* eth_type_trans() assumes pkt_type is PACKET_HOST */
653 	skb->pkt_type = PACKET_HOST;
654 
655 	skb->encapsulation = 0;
656 	skb->ip_summed = CHECKSUM_NONE;
657 	skb_shinfo(skb)->gso_type = 0;
658 	skb_shinfo(skb)->gso_size = 0;
659 	if (unlikely(skb->slow_gro)) {
660 		skb_orphan(skb);
661 		skb_ext_reset(skb);
662 		nf_reset_ct(skb);
663 		skb->slow_gro = 0;
664 	}
665 
666 	napi->skb = skb;
667 }
668 
napi_get_frags(struct napi_struct * napi)669 struct sk_buff *napi_get_frags(struct napi_struct *napi)
670 {
671 	struct sk_buff *skb = napi->skb;
672 
673 	if (!skb) {
674 		skb = napi_alloc_skb(napi, GRO_MAX_HEAD);
675 		if (skb) {
676 			napi->skb = skb;
677 			skb_mark_napi_id(skb, napi);
678 		}
679 	}
680 	return skb;
681 }
682 EXPORT_SYMBOL(napi_get_frags);
683 
napi_frags_finish(struct napi_struct * napi,struct sk_buff * skb,gro_result_t ret)684 static gro_result_t napi_frags_finish(struct napi_struct *napi,
685 				      struct sk_buff *skb,
686 				      gro_result_t ret)
687 {
688 	switch (ret) {
689 	case GRO_NORMAL:
690 	case GRO_HELD:
691 		__skb_push(skb, ETH_HLEN);
692 		skb->protocol = eth_type_trans(skb, skb->dev);
693 		if (ret == GRO_NORMAL)
694 			gro_normal_one(napi, skb, 1);
695 		break;
696 
697 	case GRO_MERGED_FREE:
698 		if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
699 			napi_skb_free_stolen_head(skb);
700 		else
701 			napi_reuse_skb(napi, skb);
702 		break;
703 
704 	case GRO_MERGED:
705 	case GRO_CONSUMED:
706 		break;
707 	}
708 
709 	return ret;
710 }
711 
712 /* Upper GRO stack assumes network header starts at gro_offset=0
713  * Drivers could call both napi_gro_frags() and napi_gro_receive()
714  * We copy ethernet header into skb->data to have a common layout.
715  */
napi_frags_skb(struct napi_struct * napi)716 static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
717 {
718 	struct sk_buff *skb = napi->skb;
719 	const struct ethhdr *eth;
720 	unsigned int hlen = sizeof(*eth);
721 
722 	napi->skb = NULL;
723 
724 	skb_reset_mac_header(skb);
725 	skb_gro_reset_offset(skb, hlen);
726 
727 	if (unlikely(!skb_gro_may_pull(skb, hlen))) {
728 		eth = skb_gro_header_slow(skb, hlen, 0);
729 		if (unlikely(!eth)) {
730 			net_warn_ratelimited("%s: dropping impossible skb from %s\n",
731 					     __func__, napi->dev->name);
732 			napi_reuse_skb(napi, skb);
733 			return NULL;
734 		}
735 	} else {
736 		eth = (const struct ethhdr *)skb->data;
737 
738 		if (NAPI_GRO_CB(skb)->frag0 != skb->data)
739 			gro_pull_from_frag0(skb, hlen);
740 
741 		NAPI_GRO_CB(skb)->frag0 += hlen;
742 		NAPI_GRO_CB(skb)->frag0_len -= hlen;
743 	}
744 	__skb_pull(skb, hlen);
745 
746 	/*
747 	 * This works because the only protocols we care about don't require
748 	 * special handling.
749 	 * We'll fix it up properly in napi_frags_finish()
750 	 */
751 	skb->protocol = eth->h_proto;
752 
753 	return skb;
754 }
755 
napi_gro_frags(struct napi_struct * napi)756 gro_result_t napi_gro_frags(struct napi_struct *napi)
757 {
758 	gro_result_t ret;
759 	struct sk_buff *skb = napi_frags_skb(napi);
760 
761 	trace_napi_gro_frags_entry(skb);
762 
763 	ret = napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
764 	trace_napi_gro_frags_exit(ret);
765 
766 	return ret;
767 }
768 EXPORT_SYMBOL(napi_gro_frags);
769 
770 /* Compute the checksum from gro_offset and return the folded value
771  * after adding in any pseudo checksum.
772  */
__skb_gro_checksum_complete(struct sk_buff * skb)773 __sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
774 {
775 	__wsum wsum;
776 	__sum16 sum;
777 
778 	wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0);
779 
780 	/* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
781 	sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum));
782 	/* See comments in __skb_checksum_complete(). */
783 	if (likely(!sum)) {
784 		if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
785 		    !skb->csum_complete_sw)
786 			netdev_rx_csum_fault(skb->dev, skb);
787 	}
788 
789 	NAPI_GRO_CB(skb)->csum = wsum;
790 	NAPI_GRO_CB(skb)->csum_valid = 1;
791 
792 	return sum;
793 }
794 EXPORT_SYMBOL(__skb_gro_checksum_complete);
795