1 // SPDX-License-Identifier: GPL-2.0-or-later
2 #include <net/gro.h>
3 #include <net/dst_metadata.h>
4 #include <net/busy_poll.h>
5 #include <trace/events/net.h>
6 #include <linux/skbuff_ref.h>
7
8 #define MAX_GRO_SKBS 8
9
10 /* This should be increased if a protocol with a bigger head is added. */
11 #define GRO_MAX_HEAD (MAX_HEADER + 128)
12
13 static DEFINE_SPINLOCK(offload_lock);
14
15 /**
16 * dev_add_offload - register offload handlers
17 * @po: protocol offload declaration
18 *
19 * Add protocol offload handlers to the networking stack. The passed
20 * &proto_offload is linked into kernel lists and may not be freed until
21 * it has been removed from the kernel lists.
22 *
23 * This call does not sleep therefore it can not
24 * guarantee all CPU's that are in middle of receiving packets
25 * will see the new offload handlers (until the next received packet).
26 */
dev_add_offload(struct packet_offload * po)27 void dev_add_offload(struct packet_offload *po)
28 {
29 struct packet_offload *elem;
30
31 spin_lock(&offload_lock);
32 list_for_each_entry(elem, &net_hotdata.offload_base, list) {
33 if (po->priority < elem->priority)
34 break;
35 }
36 list_add_rcu(&po->list, elem->list.prev);
37 spin_unlock(&offload_lock);
38 }
39 EXPORT_SYMBOL(dev_add_offload);
40
41 /**
42 * __dev_remove_offload - remove offload handler
43 * @po: packet offload declaration
44 *
45 * Remove a protocol offload handler that was previously added to the
46 * kernel offload handlers by dev_add_offload(). The passed &offload_type
47 * is removed from the kernel lists and can be freed or reused once this
48 * function returns.
49 *
50 * The packet type might still be in use by receivers
51 * and must not be freed until after all the CPU's have gone
52 * through a quiescent state.
53 */
__dev_remove_offload(struct packet_offload * po)54 static void __dev_remove_offload(struct packet_offload *po)
55 {
56 struct list_head *head = &net_hotdata.offload_base;
57 struct packet_offload *po1;
58
59 spin_lock(&offload_lock);
60
61 list_for_each_entry(po1, head, list) {
62 if (po == po1) {
63 list_del_rcu(&po->list);
64 goto out;
65 }
66 }
67
68 pr_warn("dev_remove_offload: %p not found\n", po);
69 out:
70 spin_unlock(&offload_lock);
71 }
72
73 /**
74 * dev_remove_offload - remove packet offload handler
75 * @po: packet offload declaration
76 *
77 * Remove a packet offload handler that was previously added to the kernel
78 * offload handlers by dev_add_offload(). The passed &offload_type is
79 * removed from the kernel lists and can be freed or reused once this
80 * function returns.
81 *
82 * This call sleeps to guarantee that no CPU is looking at the packet
83 * type after return.
84 */
dev_remove_offload(struct packet_offload * po)85 void dev_remove_offload(struct packet_offload *po)
86 {
87 __dev_remove_offload(po);
88
89 synchronize_net();
90 }
91 EXPORT_SYMBOL(dev_remove_offload);
92
93
skb_gro_receive(struct sk_buff * p,struct sk_buff * skb)94 int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
95 {
96 struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb);
97 unsigned int offset = skb_gro_offset(skb);
98 unsigned int headlen = skb_headlen(skb);
99 unsigned int len = skb_gro_len(skb);
100 unsigned int delta_truesize;
101 unsigned int gro_max_size;
102 unsigned int new_truesize;
103 struct sk_buff *lp;
104 int segs;
105
106 /* Do not splice page pool based packets w/ non-page pool
107 * packets. This can result in reference count issues as page
108 * pool pages will not decrement the reference count and will
109 * instead be immediately returned to the pool or have frag
110 * count decremented.
111 */
112 if (p->pp_recycle != skb->pp_recycle)
113 return -ETOOMANYREFS;
114
115 /* pairs with WRITE_ONCE() in netif_set_gro(_ipv4)_max_size() */
116 gro_max_size = p->protocol == htons(ETH_P_IPV6) ?
117 READ_ONCE(p->dev->gro_max_size) :
118 READ_ONCE(p->dev->gro_ipv4_max_size);
119
120 if (unlikely(p->len + len >= gro_max_size || NAPI_GRO_CB(skb)->flush))
121 return -E2BIG;
122
123 if (unlikely(p->len + len >= GRO_LEGACY_MAX_SIZE)) {
124 if (NAPI_GRO_CB(skb)->proto != IPPROTO_TCP ||
125 (p->protocol == htons(ETH_P_IPV6) &&
126 skb_headroom(p) < sizeof(struct hop_jumbo_hdr)) ||
127 p->encapsulation)
128 return -E2BIG;
129 }
130
131 segs = NAPI_GRO_CB(skb)->count;
132 lp = NAPI_GRO_CB(p)->last;
133 pinfo = skb_shinfo(lp);
134
135 if (headlen <= offset) {
136 skb_frag_t *frag;
137 skb_frag_t *frag2;
138 int i = skbinfo->nr_frags;
139 int nr_frags = pinfo->nr_frags + i;
140
141 if (nr_frags > MAX_SKB_FRAGS)
142 goto merge;
143
144 offset -= headlen;
145 pinfo->nr_frags = nr_frags;
146 skbinfo->nr_frags = 0;
147
148 frag = pinfo->frags + nr_frags;
149 frag2 = skbinfo->frags + i;
150 do {
151 *--frag = *--frag2;
152 } while (--i);
153
154 skb_frag_off_add(frag, offset);
155 skb_frag_size_sub(frag, offset);
156
157 /* all fragments truesize : remove (head size + sk_buff) */
158 new_truesize = SKB_TRUESIZE(skb_end_offset(skb));
159 delta_truesize = skb->truesize - new_truesize;
160
161 skb->truesize = new_truesize;
162 skb->len -= skb->data_len;
163 skb->data_len = 0;
164
165 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE;
166 goto done;
167 } else if (skb->head_frag) {
168 int nr_frags = pinfo->nr_frags;
169 skb_frag_t *frag = pinfo->frags + nr_frags;
170 struct page *page = virt_to_head_page(skb->head);
171 unsigned int first_size = headlen - offset;
172 unsigned int first_offset;
173
174 if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS)
175 goto merge;
176
177 first_offset = skb->data -
178 (unsigned char *)page_address(page) +
179 offset;
180
181 pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags;
182
183 skb_frag_fill_page_desc(frag, page, first_offset, first_size);
184
185 memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags);
186 /* We dont need to clear skbinfo->nr_frags here */
187
188 new_truesize = SKB_DATA_ALIGN(sizeof(struct sk_buff));
189 delta_truesize = skb->truesize - new_truesize;
190 skb->truesize = new_truesize;
191 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD;
192 goto done;
193 }
194
195 merge:
196 /* sk ownership - if any - completely transferred to the aggregated packet */
197 skb->destructor = NULL;
198 skb->sk = NULL;
199 delta_truesize = skb->truesize;
200 if (offset > headlen) {
201 unsigned int eat = offset - headlen;
202
203 skb_frag_off_add(&skbinfo->frags[0], eat);
204 skb_frag_size_sub(&skbinfo->frags[0], eat);
205 skb->data_len -= eat;
206 skb->len -= eat;
207 offset = headlen;
208 }
209
210 __skb_pull(skb, offset);
211
212 if (NAPI_GRO_CB(p)->last == p)
213 skb_shinfo(p)->frag_list = skb;
214 else
215 NAPI_GRO_CB(p)->last->next = skb;
216 NAPI_GRO_CB(p)->last = skb;
217 __skb_header_release(skb);
218 lp = p;
219
220 done:
221 NAPI_GRO_CB(p)->count += segs;
222 p->data_len += len;
223 p->truesize += delta_truesize;
224 p->len += len;
225 if (lp != p) {
226 lp->data_len += len;
227 lp->truesize += delta_truesize;
228 lp->len += len;
229 }
230 NAPI_GRO_CB(skb)->same_flow = 1;
231 return 0;
232 }
233
skb_gro_receive_list(struct sk_buff * p,struct sk_buff * skb)234 int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb)
235 {
236 if (unlikely(p->len + skb->len >= 65536))
237 return -E2BIG;
238
239 if (NAPI_GRO_CB(p)->last == p)
240 skb_shinfo(p)->frag_list = skb;
241 else
242 NAPI_GRO_CB(p)->last->next = skb;
243
244 skb_pull(skb, skb_gro_offset(skb));
245
246 NAPI_GRO_CB(p)->last = skb;
247 NAPI_GRO_CB(p)->count++;
248 p->data_len += skb->len;
249
250 /* sk ownership - if any - completely transferred to the aggregated packet */
251 skb->destructor = NULL;
252 skb->sk = NULL;
253 p->truesize += skb->truesize;
254 p->len += skb->len;
255
256 NAPI_GRO_CB(skb)->same_flow = 1;
257
258 return 0;
259 }
260
261
napi_gro_complete(struct napi_struct * napi,struct sk_buff * skb)262 static void napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb)
263 {
264 struct list_head *head = &net_hotdata.offload_base;
265 struct packet_offload *ptype;
266 __be16 type = skb->protocol;
267 int err = -ENOENT;
268
269 BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
270
271 if (NAPI_GRO_CB(skb)->count == 1) {
272 skb_shinfo(skb)->gso_size = 0;
273 goto out;
274 }
275
276 rcu_read_lock();
277 list_for_each_entry_rcu(ptype, head, list) {
278 if (ptype->type != type || !ptype->callbacks.gro_complete)
279 continue;
280
281 err = INDIRECT_CALL_INET(ptype->callbacks.gro_complete,
282 ipv6_gro_complete, inet_gro_complete,
283 skb, 0);
284 break;
285 }
286 rcu_read_unlock();
287
288 if (err) {
289 WARN_ON(&ptype->list == head);
290 kfree_skb(skb);
291 return;
292 }
293
294 out:
295 gro_normal_one(napi, skb, NAPI_GRO_CB(skb)->count);
296 }
297
__napi_gro_flush_chain(struct napi_struct * napi,u32 index,bool flush_old)298 static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index,
299 bool flush_old)
300 {
301 struct list_head *head = &napi->gro_hash[index].list;
302 struct sk_buff *skb, *p;
303
304 list_for_each_entry_safe_reverse(skb, p, head, list) {
305 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
306 return;
307 skb_list_del_init(skb);
308 napi_gro_complete(napi, skb);
309 napi->gro_hash[index].count--;
310 }
311
312 if (!napi->gro_hash[index].count)
313 __clear_bit(index, &napi->gro_bitmask);
314 }
315
316 /* napi->gro_hash[].list contains packets ordered by age.
317 * youngest packets at the head of it.
318 * Complete skbs in reverse order to reduce latencies.
319 */
napi_gro_flush(struct napi_struct * napi,bool flush_old)320 void napi_gro_flush(struct napi_struct *napi, bool flush_old)
321 {
322 unsigned long bitmask = napi->gro_bitmask;
323 unsigned int i, base = ~0U;
324
325 while ((i = ffs(bitmask)) != 0) {
326 bitmask >>= i;
327 base += i;
328 __napi_gro_flush_chain(napi, base, flush_old);
329 }
330 }
331 EXPORT_SYMBOL(napi_gro_flush);
332
gro_list_prepare_tc_ext(const struct sk_buff * skb,const struct sk_buff * p,unsigned long diffs)333 static unsigned long gro_list_prepare_tc_ext(const struct sk_buff *skb,
334 const struct sk_buff *p,
335 unsigned long diffs)
336 {
337 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
338 struct tc_skb_ext *skb_ext;
339 struct tc_skb_ext *p_ext;
340
341 skb_ext = skb_ext_find(skb, TC_SKB_EXT);
342 p_ext = skb_ext_find(p, TC_SKB_EXT);
343
344 diffs |= (!!p_ext) ^ (!!skb_ext);
345 if (!diffs && unlikely(skb_ext))
346 diffs |= p_ext->chain ^ skb_ext->chain;
347 #endif
348 return diffs;
349 }
350
gro_list_prepare(const struct list_head * head,const struct sk_buff * skb)351 static void gro_list_prepare(const struct list_head *head,
352 const struct sk_buff *skb)
353 {
354 unsigned int maclen = skb->dev->hard_header_len;
355 u32 hash = skb_get_hash_raw(skb);
356 struct sk_buff *p;
357
358 list_for_each_entry(p, head, list) {
359 unsigned long diffs;
360
361 if (hash != skb_get_hash_raw(p)) {
362 NAPI_GRO_CB(p)->same_flow = 0;
363 continue;
364 }
365
366 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
367 diffs |= p->vlan_all ^ skb->vlan_all;
368 diffs |= skb_metadata_differs(p, skb);
369 if (maclen == ETH_HLEN)
370 diffs |= compare_ether_header(skb_mac_header(p),
371 skb_mac_header(skb));
372 else if (!diffs)
373 diffs = memcmp(skb_mac_header(p),
374 skb_mac_header(skb),
375 maclen);
376
377 /* in most common scenarios 'slow_gro' is 0
378 * otherwise we are already on some slower paths
379 * either skip all the infrequent tests altogether or
380 * avoid trying too hard to skip each of them individually
381 */
382 if (!diffs && unlikely(skb->slow_gro | p->slow_gro)) {
383 diffs |= p->sk != skb->sk;
384 diffs |= skb_metadata_dst_cmp(p, skb);
385 diffs |= skb_get_nfct(p) ^ skb_get_nfct(skb);
386
387 diffs |= gro_list_prepare_tc_ext(skb, p, diffs);
388 }
389
390 NAPI_GRO_CB(p)->same_flow = !diffs;
391 }
392 }
393
skb_gro_reset_offset(struct sk_buff * skb,u32 nhoff)394 static inline void skb_gro_reset_offset(struct sk_buff *skb, u32 nhoff)
395 {
396 const struct skb_shared_info *pinfo;
397 const skb_frag_t *frag0;
398 unsigned int headlen;
399
400 NAPI_GRO_CB(skb)->network_offset = 0;
401 NAPI_GRO_CB(skb)->data_offset = 0;
402 headlen = skb_headlen(skb);
403 NAPI_GRO_CB(skb)->frag0 = skb->data;
404 NAPI_GRO_CB(skb)->frag0_len = headlen;
405 if (headlen)
406 return;
407
408 pinfo = skb_shinfo(skb);
409 frag0 = &pinfo->frags[0];
410
411 if (pinfo->nr_frags && skb_frag_page(frag0) &&
412 !PageHighMem(skb_frag_page(frag0)) &&
413 (!NET_IP_ALIGN || !((skb_frag_off(frag0) + nhoff) & 3))) {
414 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
415 NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
416 skb_frag_size(frag0),
417 skb->end - skb->tail);
418 }
419 }
420
gro_pull_from_frag0(struct sk_buff * skb,int grow)421 static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
422 {
423 struct skb_shared_info *pinfo = skb_shinfo(skb);
424
425 BUG_ON(skb->end - skb->tail < grow);
426
427 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
428
429 skb->data_len -= grow;
430 skb->tail += grow;
431
432 skb_frag_off_add(&pinfo->frags[0], grow);
433 skb_frag_size_sub(&pinfo->frags[0], grow);
434
435 if (unlikely(!skb_frag_size(&pinfo->frags[0]))) {
436 skb_frag_unref(skb, 0);
437 memmove(pinfo->frags, pinfo->frags + 1,
438 --pinfo->nr_frags * sizeof(pinfo->frags[0]));
439 }
440 }
441
gro_try_pull_from_frag0(struct sk_buff * skb)442 static void gro_try_pull_from_frag0(struct sk_buff *skb)
443 {
444 int grow = skb_gro_offset(skb) - skb_headlen(skb);
445
446 if (grow > 0)
447 gro_pull_from_frag0(skb, grow);
448 }
449
gro_flush_oldest(struct napi_struct * napi,struct list_head * head)450 static void gro_flush_oldest(struct napi_struct *napi, struct list_head *head)
451 {
452 struct sk_buff *oldest;
453
454 oldest = list_last_entry(head, struct sk_buff, list);
455
456 /* We are called with head length >= MAX_GRO_SKBS, so this is
457 * impossible.
458 */
459 if (WARN_ON_ONCE(!oldest))
460 return;
461
462 /* Do not adjust napi->gro_hash[].count, caller is adding a new
463 * SKB to the chain.
464 */
465 skb_list_del_init(oldest);
466 napi_gro_complete(napi, oldest);
467 }
468
dev_gro_receive(struct napi_struct * napi,struct sk_buff * skb)469 static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
470 {
471 u32 bucket = skb_get_hash_raw(skb) & (GRO_HASH_BUCKETS - 1);
472 struct gro_list *gro_list = &napi->gro_hash[bucket];
473 struct list_head *head = &net_hotdata.offload_base;
474 struct packet_offload *ptype;
475 __be16 type = skb->protocol;
476 struct sk_buff *pp = NULL;
477 enum gro_result ret;
478 int same_flow;
479
480 if (netif_elide_gro(skb->dev))
481 goto normal;
482
483 gro_list_prepare(&gro_list->list, skb);
484
485 rcu_read_lock();
486 list_for_each_entry_rcu(ptype, head, list) {
487 if (ptype->type == type && ptype->callbacks.gro_receive)
488 goto found_ptype;
489 }
490 rcu_read_unlock();
491 goto normal;
492
493 found_ptype:
494 skb_set_network_header(skb, skb_gro_offset(skb));
495 skb_reset_mac_len(skb);
496 BUILD_BUG_ON(sizeof_field(struct napi_gro_cb, zeroed) != sizeof(u32));
497 BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct napi_gro_cb, zeroed),
498 sizeof(u32))); /* Avoid slow unaligned acc */
499 *(u32 *)&NAPI_GRO_CB(skb)->zeroed = 0;
500 NAPI_GRO_CB(skb)->flush = skb_has_frag_list(skb);
501 NAPI_GRO_CB(skb)->count = 1;
502 if (unlikely(skb_is_gso(skb))) {
503 NAPI_GRO_CB(skb)->count = skb_shinfo(skb)->gso_segs;
504 /* Only support TCP and non DODGY users. */
505 if (!skb_is_gso_tcp(skb) ||
506 (skb_shinfo(skb)->gso_type & SKB_GSO_DODGY))
507 NAPI_GRO_CB(skb)->flush = 1;
508 }
509
510 /* Setup for GRO checksum validation */
511 switch (skb->ip_summed) {
512 case CHECKSUM_COMPLETE:
513 NAPI_GRO_CB(skb)->csum = skb->csum;
514 NAPI_GRO_CB(skb)->csum_valid = 1;
515 break;
516 case CHECKSUM_UNNECESSARY:
517 NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
518 break;
519 }
520
521 pp = INDIRECT_CALL_INET(ptype->callbacks.gro_receive,
522 ipv6_gro_receive, inet_gro_receive,
523 &gro_list->list, skb);
524
525 rcu_read_unlock();
526
527 if (PTR_ERR(pp) == -EINPROGRESS) {
528 ret = GRO_CONSUMED;
529 goto ok;
530 }
531
532 same_flow = NAPI_GRO_CB(skb)->same_flow;
533 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
534
535 if (pp) {
536 skb_list_del_init(pp);
537 napi_gro_complete(napi, pp);
538 gro_list->count--;
539 }
540
541 if (same_flow)
542 goto ok;
543
544 if (NAPI_GRO_CB(skb)->flush)
545 goto normal;
546
547 if (unlikely(gro_list->count >= MAX_GRO_SKBS))
548 gro_flush_oldest(napi, &gro_list->list);
549 else
550 gro_list->count++;
551
552 /* Must be called before setting NAPI_GRO_CB(skb)->{age|last} */
553 gro_try_pull_from_frag0(skb);
554 NAPI_GRO_CB(skb)->age = jiffies;
555 NAPI_GRO_CB(skb)->last = skb;
556 if (!skb_is_gso(skb))
557 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
558 list_add(&skb->list, &gro_list->list);
559 ret = GRO_HELD;
560 ok:
561 if (gro_list->count) {
562 if (!test_bit(bucket, &napi->gro_bitmask))
563 __set_bit(bucket, &napi->gro_bitmask);
564 } else if (test_bit(bucket, &napi->gro_bitmask)) {
565 __clear_bit(bucket, &napi->gro_bitmask);
566 }
567
568 return ret;
569
570 normal:
571 ret = GRO_NORMAL;
572 gro_try_pull_from_frag0(skb);
573 goto ok;
574 }
575
gro_find_receive_by_type(__be16 type)576 struct packet_offload *gro_find_receive_by_type(__be16 type)
577 {
578 struct list_head *offload_head = &net_hotdata.offload_base;
579 struct packet_offload *ptype;
580
581 list_for_each_entry_rcu(ptype, offload_head, list) {
582 if (ptype->type != type || !ptype->callbacks.gro_receive)
583 continue;
584 return ptype;
585 }
586 return NULL;
587 }
588 EXPORT_SYMBOL(gro_find_receive_by_type);
589
gro_find_complete_by_type(__be16 type)590 struct packet_offload *gro_find_complete_by_type(__be16 type)
591 {
592 struct list_head *offload_head = &net_hotdata.offload_base;
593 struct packet_offload *ptype;
594
595 list_for_each_entry_rcu(ptype, offload_head, list) {
596 if (ptype->type != type || !ptype->callbacks.gro_complete)
597 continue;
598 return ptype;
599 }
600 return NULL;
601 }
602 EXPORT_SYMBOL(gro_find_complete_by_type);
603
napi_skb_finish(struct napi_struct * napi,struct sk_buff * skb,gro_result_t ret)604 static gro_result_t napi_skb_finish(struct napi_struct *napi,
605 struct sk_buff *skb,
606 gro_result_t ret)
607 {
608 switch (ret) {
609 case GRO_NORMAL:
610 gro_normal_one(napi, skb, 1);
611 break;
612
613 case GRO_MERGED_FREE:
614 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
615 napi_skb_free_stolen_head(skb);
616 else if (skb->fclone != SKB_FCLONE_UNAVAILABLE)
617 __kfree_skb(skb);
618 else
619 __napi_kfree_skb(skb, SKB_CONSUMED);
620 break;
621
622 case GRO_HELD:
623 case GRO_MERGED:
624 case GRO_CONSUMED:
625 break;
626 }
627
628 return ret;
629 }
630
napi_gro_receive(struct napi_struct * napi,struct sk_buff * skb)631 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
632 {
633 gro_result_t ret;
634
635 skb_mark_napi_id(skb, napi);
636 trace_napi_gro_receive_entry(skb);
637
638 skb_gro_reset_offset(skb, 0);
639
640 ret = napi_skb_finish(napi, skb, dev_gro_receive(napi, skb));
641 trace_napi_gro_receive_exit(ret);
642
643 return ret;
644 }
645 EXPORT_SYMBOL(napi_gro_receive);
646
napi_reuse_skb(struct napi_struct * napi,struct sk_buff * skb)647 static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
648 {
649 if (unlikely(skb->pfmemalloc)) {
650 consume_skb(skb);
651 return;
652 }
653 __skb_pull(skb, skb_headlen(skb));
654 /* restore the reserve we had after netdev_alloc_skb_ip_align() */
655 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
656 __vlan_hwaccel_clear_tag(skb);
657 skb->dev = napi->dev;
658 skb->skb_iif = 0;
659
660 /* eth_type_trans() assumes pkt_type is PACKET_HOST */
661 skb->pkt_type = PACKET_HOST;
662
663 skb->encapsulation = 0;
664 skb_shinfo(skb)->gso_type = 0;
665 skb_shinfo(skb)->gso_size = 0;
666 if (unlikely(skb->slow_gro)) {
667 skb_orphan(skb);
668 skb_ext_reset(skb);
669 nf_reset_ct(skb);
670 skb->slow_gro = 0;
671 }
672
673 napi->skb = skb;
674 }
675
napi_get_frags(struct napi_struct * napi)676 struct sk_buff *napi_get_frags(struct napi_struct *napi)
677 {
678 struct sk_buff *skb = napi->skb;
679
680 if (!skb) {
681 skb = napi_alloc_skb(napi, GRO_MAX_HEAD);
682 if (skb) {
683 napi->skb = skb;
684 skb_mark_napi_id(skb, napi);
685 }
686 }
687 return skb;
688 }
689 EXPORT_SYMBOL(napi_get_frags);
690
napi_frags_finish(struct napi_struct * napi,struct sk_buff * skb,gro_result_t ret)691 static gro_result_t napi_frags_finish(struct napi_struct *napi,
692 struct sk_buff *skb,
693 gro_result_t ret)
694 {
695 switch (ret) {
696 case GRO_NORMAL:
697 case GRO_HELD:
698 __skb_push(skb, ETH_HLEN);
699 skb->protocol = eth_type_trans(skb, skb->dev);
700 if (ret == GRO_NORMAL)
701 gro_normal_one(napi, skb, 1);
702 break;
703
704 case GRO_MERGED_FREE:
705 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
706 napi_skb_free_stolen_head(skb);
707 else
708 napi_reuse_skb(napi, skb);
709 break;
710
711 case GRO_MERGED:
712 case GRO_CONSUMED:
713 break;
714 }
715
716 return ret;
717 }
718
719 /* Upper GRO stack assumes network header starts at gro_offset=0
720 * Drivers could call both napi_gro_frags() and napi_gro_receive()
721 * We copy ethernet header into skb->data to have a common layout.
722 */
napi_frags_skb(struct napi_struct * napi)723 static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
724 {
725 struct sk_buff *skb = napi->skb;
726 const struct ethhdr *eth;
727 unsigned int hlen = sizeof(*eth);
728
729 napi->skb = NULL;
730
731 skb_reset_mac_header(skb);
732 skb_gro_reset_offset(skb, hlen);
733
734 if (unlikely(!skb_gro_may_pull(skb, hlen))) {
735 eth = skb_gro_header_slow(skb, hlen, 0);
736 if (unlikely(!eth)) {
737 net_warn_ratelimited("%s: dropping impossible skb from %s\n",
738 __func__, napi->dev->name);
739 napi_reuse_skb(napi, skb);
740 return NULL;
741 }
742 } else {
743 eth = (const struct ethhdr *)skb->data;
744
745 if (NAPI_GRO_CB(skb)->frag0 != skb->data)
746 gro_pull_from_frag0(skb, hlen);
747
748 NAPI_GRO_CB(skb)->frag0 += hlen;
749 NAPI_GRO_CB(skb)->frag0_len -= hlen;
750 }
751 __skb_pull(skb, hlen);
752
753 /*
754 * This works because the only protocols we care about don't require
755 * special handling.
756 * We'll fix it up properly in napi_frags_finish()
757 */
758 skb->protocol = eth->h_proto;
759
760 return skb;
761 }
762
napi_gro_frags(struct napi_struct * napi)763 gro_result_t napi_gro_frags(struct napi_struct *napi)
764 {
765 gro_result_t ret;
766 struct sk_buff *skb = napi_frags_skb(napi);
767
768 trace_napi_gro_frags_entry(skb);
769
770 ret = napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
771 trace_napi_gro_frags_exit(ret);
772
773 return ret;
774 }
775 EXPORT_SYMBOL(napi_gro_frags);
776
777 /* Compute the checksum from gro_offset and return the folded value
778 * after adding in any pseudo checksum.
779 */
__skb_gro_checksum_complete(struct sk_buff * skb)780 __sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
781 {
782 __wsum wsum;
783 __sum16 sum;
784
785 wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0);
786
787 /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
788 sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum));
789 /* See comments in __skb_checksum_complete(). */
790 if (likely(!sum)) {
791 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
792 !skb->csum_complete_sw)
793 netdev_rx_csum_fault(skb->dev, skb);
794 }
795
796 NAPI_GRO_CB(skb)->csum = wsum;
797 NAPI_GRO_CB(skb)->csum_valid = 1;
798
799 return sum;
800 }
801 EXPORT_SYMBOL(__skb_gro_checksum_complete);
802