Lines Matching full:skb
19 * Ray VanTassle : Fixed --skb->lock in free
86 #include <trace/events/skb.h>
195 * @skb: buffer
205 static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr, in skb_panic() argument
209 msg, addr, skb->len, sz, skb->head, skb->data, in skb_panic()
210 (unsigned long)skb->tail, (unsigned long)skb->end, in skb_panic()
211 skb->dev ? skb->dev->name : "<NULL>"); in skb_panic()
215 static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr) in skb_over_panic() argument
217 skb_panic(skb, sz, addr, __func__); in skb_over_panic()
220 static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr) in skb_under_panic() argument
222 skb_panic(skb, sz, addr, __func__); in skb_under_panic()
280 struct sk_buff *skb; in napi_skb_cache_get() local
294 skb = nc->skb_cache[--nc->skb_count]; in napi_skb_cache_get()
296 kasan_mempool_unpoison_object(skb, kmem_cache_size(net_hotdata.skbuff_cache)); in napi_skb_cache_get()
298 return skb; in napi_skb_cache_get()
302 * napi_skb_cache_get_bulk - obtain a number of zeroed skb heads from the cache
303 * @skbs: pointer to an at least @n-sized array to fill with skb pointers
363 static inline void __finalize_skb_around(struct sk_buff *skb, void *data, in __finalize_skb_around() argument
370 /* Assumes caller memset cleared SKB */ in __finalize_skb_around()
371 skb->truesize = SKB_TRUESIZE(size); in __finalize_skb_around()
372 refcount_set(&skb->users, 1); in __finalize_skb_around()
373 skb->head = data; in __finalize_skb_around()
374 skb->data = data; in __finalize_skb_around()
375 skb_reset_tail_pointer(skb); in __finalize_skb_around()
376 skb_set_end_offset(skb, size); in __finalize_skb_around()
377 skb->mac_header = (typeof(skb->mac_header))~0U; in __finalize_skb_around()
378 skb->transport_header = (typeof(skb->transport_header))~0U; in __finalize_skb_around()
379 skb->alloc_cpu = raw_smp_processor_id(); in __finalize_skb_around()
381 shinfo = skb_shinfo(skb); in __finalize_skb_around()
385 skb_set_kcov_handle(skb, kcov_common_handle()); in __finalize_skb_around()
412 struct sk_buff *skb; in slab_build_skb() local
415 skb = kmem_cache_alloc(net_hotdata.skbuff_cache, in slab_build_skb()
417 if (unlikely(!skb)) in slab_build_skb()
420 memset(skb, 0, offsetof(struct sk_buff, tail)); in slab_build_skb()
422 __finalize_skb_around(skb, data, size); in slab_build_skb()
424 return skb; in slab_build_skb()
428 /* Caller must provide SKB that is memset cleared */
429 static void __build_skb_around(struct sk_buff *skb, void *data, in __build_skb_around() argument
440 __finalize_skb_around(skb, data, size); in __build_skb_around()
453 * The return is the new skb buffer.
465 struct sk_buff *skb; in __build_skb() local
467 skb = kmem_cache_alloc(net_hotdata.skbuff_cache, in __build_skb()
469 if (unlikely(!skb)) in __build_skb()
472 memset(skb, 0, offsetof(struct sk_buff, tail)); in __build_skb()
473 __build_skb_around(skb, data, frag_size); in __build_skb()
475 return skb; in __build_skb()
479 * takes care of skb->head and skb->pfmemalloc
483 struct sk_buff *skb = __build_skb(data, frag_size); in build_skb() local
485 if (likely(skb && frag_size)) { in build_skb()
486 skb->head_frag = 1; in build_skb()
487 skb_propagate_pfmemalloc(virt_to_head_page(data), skb); in build_skb()
489 return skb; in build_skb()
494 * build_skb_around - build a network buffer around provided skb
495 * @skb: sk_buff provide by caller, must be memset cleared
499 struct sk_buff *build_skb_around(struct sk_buff *skb, in build_skb_around() argument
502 if (unlikely(!skb)) in build_skb_around()
505 __build_skb_around(skb, data, frag_size); in build_skb_around()
508 skb->head_frag = 1; in build_skb_around()
509 skb_propagate_pfmemalloc(virt_to_head_page(data), skb); in build_skb_around()
511 return skb; in build_skb_around()
527 struct sk_buff *skb; in __napi_build_skb() local
529 skb = napi_skb_cache_get(); in __napi_build_skb()
530 if (unlikely(!skb)) in __napi_build_skb()
533 memset(skb, 0, offsetof(struct sk_buff, tail)); in __napi_build_skb()
534 __build_skb_around(skb, data, frag_size); in __napi_build_skb()
536 return skb; in __napi_build_skb()
544 * Version of __napi_build_skb() that takes care of skb->head_frag
545 * and skb->pfmemalloc when the data is a page or page fragment.
551 struct sk_buff *skb = __napi_build_skb(data, frag_size); in napi_build_skb() local
553 if (likely(skb) && frag_size) { in napi_build_skb()
554 skb->head_frag = 1; in napi_build_skb()
555 skb_propagate_pfmemalloc(virt_to_head_page(data), skb); in napi_build_skb()
558 return skb; in napi_build_skb()
629 * instead of head cache and allocate a cloned (child) skb.
645 struct sk_buff *skb; in __alloc_skb() local
658 skb = napi_skb_cache_get(); in __alloc_skb()
660 skb = kmem_cache_alloc_node(cache, gfp_mask & ~GFP_DMA, node); in __alloc_skb()
661 if (unlikely(!skb)) in __alloc_skb()
663 prefetchw(skb); in __alloc_skb()
668 * Both skb->head and skb_shared_info are cache line aligned. in __alloc_skb()
684 memset(skb, 0, offsetof(struct sk_buff, tail)); in __alloc_skb()
685 __build_skb_around(skb, data, size); in __alloc_skb()
686 skb->pfmemalloc = pfmemalloc; in __alloc_skb()
691 fclones = container_of(skb, struct sk_buff_fclones, skb1); in __alloc_skb()
693 skb->fclone = SKB_FCLONE_ORIG; in __alloc_skb()
697 return skb; in __alloc_skb()
700 kmem_cache_free(cache, skb); in __alloc_skb()
722 struct sk_buff *skb; in __netdev_alloc_skb() local
729 * we use kmalloc() for skb->head allocation. in __netdev_alloc_skb()
734 skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE); in __netdev_alloc_skb()
735 if (!skb) in __netdev_alloc_skb()
764 skb = __build_skb(data, len); in __netdev_alloc_skb()
765 if (unlikely(!skb)) { in __netdev_alloc_skb()
771 skb->pfmemalloc = 1; in __netdev_alloc_skb()
772 skb->head_frag = 1; in __netdev_alloc_skb()
775 skb_reserve(skb, NET_SKB_PAD); in __netdev_alloc_skb()
776 skb->dev = dev; in __netdev_alloc_skb()
779 return skb; in __netdev_alloc_skb()
799 struct sk_buff *skb; in napi_alloc_skb() local
807 * we use kmalloc() for skb->head allocation. in napi_alloc_skb()
812 skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX | SKB_ALLOC_NAPI, in napi_alloc_skb()
814 if (!skb) in napi_alloc_skb()
834 skb = __napi_build_skb(data, len); in napi_alloc_skb()
835 if (unlikely(!skb)) { in napi_alloc_skb()
841 skb->pfmemalloc = 1; in napi_alloc_skb()
842 skb->head_frag = 1; in napi_alloc_skb()
845 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); in napi_alloc_skb()
846 skb->dev = napi->dev; in napi_alloc_skb()
849 return skb; in napi_alloc_skb()
853 void skb_add_rx_frag_netmem(struct sk_buff *skb, int i, netmem_ref netmem, in skb_add_rx_frag_netmem() argument
858 skb_fill_netmem_desc(skb, i, netmem, off, size); in skb_add_rx_frag_netmem()
859 skb->len += size; in skb_add_rx_frag_netmem()
860 skb->data_len += size; in skb_add_rx_frag_netmem()
861 skb->truesize += truesize; in skb_add_rx_frag_netmem()
865 void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size, in skb_coalesce_rx_frag() argument
868 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in skb_coalesce_rx_frag()
873 skb->len += size; in skb_coalesce_rx_frag()
874 skb->data_len += size; in skb_coalesce_rx_frag()
875 skb->truesize += truesize; in skb_coalesce_rx_frag()
885 static inline void skb_drop_fraglist(struct sk_buff *skb) in skb_drop_fraglist() argument
887 skb_drop_list(&skb_shinfo(skb)->frag_list); in skb_drop_fraglist()
890 static void skb_clone_fraglist(struct sk_buff *skb) in skb_clone_fraglist() argument
894 skb_walk_frags(skb, list) in skb_clone_fraglist()
903 struct sk_buff *skb = *pskb, *nskb; in skb_pp_cow_data() local
908 * the skb. in skb_pp_cow_data()
910 if (skb_has_frag_list(skb)) in skb_pp_cow_data()
914 if (skb->len > max_head_size + MAX_SKB_FRAGS * PAGE_SIZE) in skb_pp_cow_data()
917 size = min_t(u32, skb->len, max_head_size); in skb_pp_cow_data()
930 skb_copy_header(nskb, skb); in skb_pp_cow_data()
933 err = skb_copy_bits(skb, 0, nskb->data, size); in skb_pp_cow_data()
940 head_off = skb_headroom(nskb) - skb_headroom(skb); in skb_pp_cow_data()
944 len = skb->len - off; in skb_pp_cow_data()
945 for (i = 0; i < MAX_SKB_FRAGS && off < skb->len; i++) { in skb_pp_cow_data()
959 err = skb_copy_bits(skb, off, page_address(page) + page_off, in skb_pp_cow_data()
970 consume_skb(skb); in skb_pp_cow_data()
1005 static bool skb_pp_recycle(struct sk_buff *skb, void *data) in skb_pp_recycle() argument
1007 if (!IS_ENABLED(CONFIG_PAGE_POOL) || !skb->pp_recycle) in skb_pp_recycle()
1013 * skb_pp_frag_ref() - Increase fragment references of a page pool aware skb
1014 * @skb: page pool aware skb
1016 * Increase the fragment reference count (pp_ref_count) of a skb. This is
1018 * i.e. when skb->pp_recycle is true, and not for fragments in a
1019 * non-pp-recycling skb. It has a fallback to increase references on normal
1022 static int skb_pp_frag_ref(struct sk_buff *skb) in skb_pp_frag_ref() argument
1028 if (!skb->pp_recycle) in skb_pp_frag_ref()
1031 shinfo = skb_shinfo(skb); in skb_pp_frag_ref()
1051 static void skb_free_head(struct sk_buff *skb) in skb_free_head() argument
1053 unsigned char *head = skb->head; in skb_free_head()
1055 if (skb->head_frag) { in skb_free_head()
1056 if (skb_pp_recycle(skb, head)) in skb_free_head()
1060 skb_kfree_head(head, skb_end_offset(skb)); in skb_free_head()
1064 static void skb_release_data(struct sk_buff *skb, enum skb_drop_reason reason) in skb_release_data() argument
1066 struct skb_shared_info *shinfo = skb_shinfo(skb); in skb_release_data()
1069 if (!skb_data_unref(skb, shinfo)) in skb_release_data()
1072 if (skb_zcopy(skb)) { in skb_release_data()
1075 skb_zcopy_clear(skb, true); in skb_release_data()
1081 __skb_frag_unref(&shinfo->frags[i], skb->pp_recycle); in skb_release_data()
1087 skb_free_head(skb); in skb_release_data()
1089 /* When we clone an SKB we copy the reycling bit. The pp_recycle in skb_release_data()
1092 * to make one SKB responsible for triggering the recycle path. in skb_release_data()
1093 * So disable the recycling bit if an SKB is cloned and we have in skb_release_data()
1094 * additional references to the fragmented part of the SKB. in skb_release_data()
1095 * Eventually the last SKB will have the recycling bit set and it's in skb_release_data()
1098 skb->pp_recycle = 0; in skb_release_data()
1104 static void kfree_skbmem(struct sk_buff *skb) in kfree_skbmem() argument
1108 switch (skb->fclone) { in kfree_skbmem()
1110 kmem_cache_free(net_hotdata.skbuff_cache, skb); in kfree_skbmem()
1114 fclones = container_of(skb, struct sk_buff_fclones, skb1); in kfree_skbmem()
1116 /* We usually free the clone (TX completion) before original skb in kfree_skbmem()
1125 fclones = container_of(skb, struct sk_buff_fclones, skb2); in kfree_skbmem()
1134 void skb_release_head_state(struct sk_buff *skb) in skb_release_head_state() argument
1136 skb_dst_drop(skb); in skb_release_head_state()
1137 if (skb->destructor) { in skb_release_head_state()
1139 skb->destructor(skb); in skb_release_head_state()
1142 nf_conntrack_put(skb_nfct(skb)); in skb_release_head_state()
1144 skb_ext_put(skb); in skb_release_head_state()
1148 static void skb_release_all(struct sk_buff *skb, enum skb_drop_reason reason) in skb_release_all() argument
1150 skb_release_head_state(skb); in skb_release_all()
1151 if (likely(skb->head)) in skb_release_all()
1152 skb_release_data(skb, reason); in skb_release_all()
1157 * @skb: buffer
1164 void __kfree_skb(struct sk_buff *skb) in __kfree_skb() argument
1166 skb_release_all(skb, SKB_DROP_REASON_NOT_SPECIFIED); in __kfree_skb()
1167 kfree_skbmem(skb); in __kfree_skb()
1172 bool __sk_skb_reason_drop(struct sock *sk, struct sk_buff *skb, in __sk_skb_reason_drop() argument
1175 if (unlikely(!skb_unref(skb))) in __sk_skb_reason_drop()
1184 trace_consume_skb(skb, __builtin_return_address(0)); in __sk_skb_reason_drop()
1186 trace_kfree_skb(skb, __builtin_return_address(0), reason, sk); in __sk_skb_reason_drop()
1192 * @sk: the socket to receive @skb, or NULL if not applicable
1193 * @skb: buffer to free
1194 * @reason: reason why this skb is dropped
1201 sk_skb_reason_drop(struct sock *sk, struct sk_buff *skb, enum skb_drop_reason reason) in sk_skb_reason_drop() argument
1203 if (__sk_skb_reason_drop(sk, skb, reason)) in sk_skb_reason_drop()
1204 __kfree_skb(skb); in sk_skb_reason_drop()
1215 static void kfree_skb_add_bulk(struct sk_buff *skb, in kfree_skb_add_bulk() argument
1219 /* if SKB is a clone, don't handle this case */ in kfree_skb_add_bulk()
1220 if (unlikely(skb->fclone != SKB_FCLONE_UNAVAILABLE)) { in kfree_skb_add_bulk()
1221 __kfree_skb(skb); in kfree_skb_add_bulk()
1225 skb_release_all(skb, reason); in kfree_skb_add_bulk()
1226 sa->skb_array[sa->skb_count++] = skb; in kfree_skb_add_bulk()
1258 /* Dump skb information and contents.
1264 void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt) in skb_dump() argument
1266 struct skb_shared_info *sh = skb_shinfo(skb); in skb_dump()
1267 struct net_device *dev = skb->dev; in skb_dump()
1268 struct sock *sk = skb->sk; in skb_dump()
1275 len = skb->len; in skb_dump()
1277 len = min_t(int, skb->len, MAX_HEADER + 128); in skb_dump()
1279 headroom = skb_headroom(skb); in skb_dump()
1280 tailroom = skb_tailroom(skb); in skb_dump()
1282 has_mac = skb_mac_header_was_set(skb); in skb_dump()
1283 has_trans = skb_transport_header_was_set(skb); in skb_dump()
1292 level, skb->len, headroom, skb_headlen(skb), tailroom, in skb_dump()
1293 has_mac ? skb->mac_header : -1, in skb_dump()
1294 has_mac ? skb_mac_header_len(skb) : -1, in skb_dump()
1295 skb->mac_len, in skb_dump()
1296 skb->network_header, in skb_dump()
1297 has_trans ? skb_network_header_len(skb) : -1, in skb_dump()
1298 has_trans ? skb->transport_header : -1, in skb_dump()
1301 skb->csum, skb->csum_start, skb->csum_offset, skb->ip_summed, in skb_dump()
1302 skb->csum_complete_sw, skb->csum_valid, skb->csum_level, in skb_dump()
1303 skb->hash, skb->sw_hash, skb->l4_hash, in skb_dump()
1304 ntohs(skb->protocol), skb->pkt_type, skb->skb_iif, in skb_dump()
1305 skb->priority, skb->mark, skb->alloc_cpu, skb->vlan_all, in skb_dump()
1306 skb->encapsulation, skb->inner_protocol, skb->inner_mac_header, in skb_dump()
1307 skb->inner_network_header, skb->inner_transport_header); in skb_dump()
1317 print_hex_dump(level, "skb headroom: ", DUMP_PREFIX_OFFSET, in skb_dump()
1318 16, 1, skb->head, headroom, false); in skb_dump()
1320 seg_len = min_t(int, skb_headlen(skb), len); in skb_dump()
1322 print_hex_dump(level, "skb linear: ", DUMP_PREFIX_OFFSET, in skb_dump()
1323 16, 1, skb->data, seg_len, false); in skb_dump()
1327 print_hex_dump(level, "skb tailroom: ", DUMP_PREFIX_OFFSET, in skb_dump()
1328 16, 1, skb_tail_pointer(skb), tailroom, false); in skb_dump()
1330 for (i = 0; len && i < skb_shinfo(skb)->nr_frags; i++) { in skb_dump()
1331 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in skb_dump()
1349 print_hex_dump(level, "skb frag: ", in skb_dump()
1359 if (full_pkt && skb_has_frag_list(skb)) { in skb_dump()
1360 printk("skb fraglist:\n"); in skb_dump()
1361 skb_walk_frags(skb, list_skb) in skb_dump()
1369 * @skb: buffer that triggered an error
1371 * Report xmit error if a device callback is tracking this skb.
1372 * skb must be freed afterwards.
1374 void skb_tx_error(struct sk_buff *skb) in skb_tx_error() argument
1376 if (skb) { in skb_tx_error()
1377 skb_zcopy_downgrade_managed(skb); in skb_tx_error()
1378 skb_zcopy_clear(skb, true); in skb_tx_error()
1386 * @skb: buffer to free
1392 void consume_skb(struct sk_buff *skb) in consume_skb() argument
1394 if (!skb_unref(skb)) in consume_skb()
1397 trace_consume_skb(skb, __builtin_return_address(0)); in consume_skb()
1398 __kfree_skb(skb); in consume_skb()
1405 * @skb: buffer to free
1408 * skb reference and all the head states have been already dropped
1410 void __consume_stateless_skb(struct sk_buff *skb) in __consume_stateless_skb() argument
1412 trace_consume_skb(skb, __builtin_return_address(0)); in __consume_stateless_skb()
1413 skb_release_data(skb, SKB_CONSUMED); in __consume_stateless_skb()
1414 kfree_skbmem(skb); in __consume_stateless_skb()
1417 static void napi_skb_cache_put(struct sk_buff *skb) in napi_skb_cache_put() argument
1422 if (!kasan_mempool_poison_object(skb)) in napi_skb_cache_put()
1426 nc->skb_cache[nc->skb_count++] = skb; in napi_skb_cache_put()
1440 void __napi_kfree_skb(struct sk_buff *skb, enum skb_drop_reason reason) in __napi_kfree_skb() argument
1442 skb_release_all(skb, reason); in __napi_kfree_skb()
1443 napi_skb_cache_put(skb); in __napi_kfree_skb()
1446 void napi_skb_free_stolen_head(struct sk_buff *skb) in napi_skb_free_stolen_head() argument
1448 if (unlikely(skb->slow_gro)) { in napi_skb_free_stolen_head()
1449 nf_reset_ct(skb); in napi_skb_free_stolen_head()
1450 skb_dst_drop(skb); in napi_skb_free_stolen_head()
1451 skb_ext_put(skb); in napi_skb_free_stolen_head()
1452 skb_orphan(skb); in napi_skb_free_stolen_head()
1453 skb->slow_gro = 0; in napi_skb_free_stolen_head()
1455 napi_skb_cache_put(skb); in napi_skb_free_stolen_head()
1458 void napi_consume_skb(struct sk_buff *skb, int budget) in napi_consume_skb() argument
1462 dev_consume_skb_any(skb); in napi_consume_skb()
1468 if (!skb_unref(skb)) in napi_consume_skb()
1471 /* if reaching here SKB is ready to free */ in napi_consume_skb()
1472 trace_consume_skb(skb, __builtin_return_address(0)); in napi_consume_skb()
1474 /* if SKB is a clone, don't handle this case */ in napi_consume_skb()
1475 if (skb->fclone != SKB_FCLONE_UNAVAILABLE) { in napi_consume_skb()
1476 __kfree_skb(skb); in napi_consume_skb()
1480 skb_release_all(skb, SKB_CONSUMED); in napi_consume_skb()
1481 napi_skb_cache_put(skb); in napi_consume_skb()
1541 static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) in __skb_clone() argument
1543 #define C(x) n->x = skb->x in __skb_clone()
1547 __copy_skb_header(n, skb); in __skb_clone()
1552 n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; in __skb_clone()
1567 atomic_inc(&(skb_shinfo(skb)->dataref)); in __skb_clone()
1568 skb->cloned = 1; in __skb_clone()
1600 * skb_morph - morph one skb into another
1601 * @dst: the skb to receive the contents
1602 * @src: the skb to supply the contents
1604 * This is identical to skb_clone except that the target skb is
1607 * The target skb is returned upon exit.
1663 struct sk_buff *skb; in msg_zerocopy_alloc() local
1667 skb = sock_omalloc(sk, 0, GFP_KERNEL); in msg_zerocopy_alloc()
1668 if (!skb) in msg_zerocopy_alloc()
1671 BUILD_BUG_ON(sizeof(*uarg) > sizeof(skb->cb)); in msg_zerocopy_alloc()
1672 uarg = (void *)skb->cb; in msg_zerocopy_alloc()
1676 kfree_skb(skb); in msg_zerocopy_alloc()
1720 /* TCP can create new skb to attach new uarg */ in msg_zerocopy_realloc()
1748 static bool skb_zerocopy_notify_extend(struct sk_buff *skb, u32 lo, u16 len) in skb_zerocopy_notify_extend() argument
1750 struct sock_exterr_skb *serr = SKB_EXT_ERR(skb); in skb_zerocopy_notify_extend()
1770 struct sk_buff *tail, *skb = skb_from_uarg(uarg); in __msg_zerocopy_callback() local
1772 struct sock *sk = skb->sk; in __msg_zerocopy_callback()
1792 serr = SKB_EXT_ERR(skb); in __msg_zerocopy_callback()
1806 __skb_queue_tail(q, skb); in __msg_zerocopy_callback()
1807 skb = NULL; in __msg_zerocopy_callback()
1814 consume_skb(skb); in __msg_zerocopy_callback()
1818 static void msg_zerocopy_complete(struct sk_buff *skb, struct ubuf_info *uarg, in msg_zerocopy_complete() argument
1846 int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb, in skb_zerocopy_iter_stream() argument
1851 int err, orig_len = skb->len; in skb_zerocopy_iter_stream()
1854 err = uarg->ops->link_skb(skb, uarg); in skb_zerocopy_iter_stream()
1858 struct ubuf_info *orig_uarg = skb_zcopy(skb); in skb_zerocopy_iter_stream()
1860 /* An skb can only point to one uarg. This edge case happens in skb_zerocopy_iter_stream()
1861 * when TCP appends to an skb, but zerocopy_realloc triggered in skb_zerocopy_iter_stream()
1868 err = __zerocopy_sg_from_iter(msg, sk, skb, &msg->msg_iter, len, in skb_zerocopy_iter_stream()
1870 if (err == -EFAULT || (err == -EMSGSIZE && skb->len == orig_len)) { in skb_zerocopy_iter_stream()
1871 struct sock *save_sk = skb->sk; in skb_zerocopy_iter_stream()
1873 /* Streams do not free skb on error. Reset to prev state. */ in skb_zerocopy_iter_stream()
1874 iov_iter_revert(&msg->msg_iter, skb->len - orig_len); in skb_zerocopy_iter_stream()
1875 skb->sk = sk; in skb_zerocopy_iter_stream()
1876 ___pskb_trim(skb, orig_len); in skb_zerocopy_iter_stream()
1877 skb->sk = save_sk; in skb_zerocopy_iter_stream()
1881 skb_zcopy_set(skb, uarg, NULL); in skb_zerocopy_iter_stream()
1882 return skb->len - orig_len; in skb_zerocopy_iter_stream()
1886 void __skb_zcopy_downgrade_managed(struct sk_buff *skb) in __skb_zcopy_downgrade_managed() argument
1890 skb_shinfo(skb)->flags &= ~SKBFL_MANAGED_FRAG_REFS; in __skb_zcopy_downgrade_managed()
1891 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) in __skb_zcopy_downgrade_managed()
1892 skb_frag_ref(skb, i); in __skb_zcopy_downgrade_managed()
1917 * skb_copy_ubufs - copy userspace skb frags buffers to kernel
1918 * @skb: the skb to modify
1921 * This must be called on skb with SKBFL_ZEROCOPY_ENABLE.
1931 int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) in skb_copy_ubufs() argument
1933 int num_frags = skb_shinfo(skb)->nr_frags; in skb_copy_ubufs()
1938 if (skb_shared(skb) || skb_unclone(skb, gfp_mask)) in skb_copy_ubufs()
1941 if (!skb_frags_readable(skb)) in skb_copy_ubufs()
1951 while ((PAGE_SIZE << order) * MAX_SKB_FRAGS < __skb_pagelen(skb)) in skb_copy_ubufs()
1955 new_frags = (__skb_pagelen(skb) + psize - 1) >> (PAGE_SHIFT + order); in skb_copy_ubufs()
1973 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; in skb_copy_ubufs()
1998 /* skb frags release userspace buffers */ in skb_copy_ubufs()
2000 skb_frag_unref(skb, i); in skb_copy_ubufs()
2002 /* skb frags point to kernel buffers */ in skb_copy_ubufs()
2004 __skb_fill_netmem_desc(skb, i, page_to_netmem(head), 0, psize); in skb_copy_ubufs()
2007 __skb_fill_netmem_desc(skb, new_frags - 1, page_to_netmem(head), 0, in skb_copy_ubufs()
2009 skb_shinfo(skb)->nr_frags = new_frags; in skb_copy_ubufs()
2012 skb_zcopy_clear(skb, false); in skb_copy_ubufs()
2019 * @skb: buffer to clone
2031 struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) in skb_clone() argument
2033 struct sk_buff_fclones *fclones = container_of(skb, in skb_clone()
2038 if (skb_orphan_frags(skb, gfp_mask)) in skb_clone()
2041 if (skb->fclone == SKB_FCLONE_ORIG && in skb_clone()
2047 if (skb_pfmemalloc(skb)) in skb_clone()
2057 return __skb_clone(n, skb); in skb_clone()
2061 void skb_headers_offset_update(struct sk_buff *skb, int off) in skb_headers_offset_update() argument
2064 if (skb->ip_summed == CHECKSUM_PARTIAL) in skb_headers_offset_update()
2065 skb->csum_start += off; in skb_headers_offset_update()
2066 /* {transport,network,mac}_header and tail are relative to skb->head */ in skb_headers_offset_update()
2067 skb->transport_header += off; in skb_headers_offset_update()
2068 skb->network_header += off; in skb_headers_offset_update()
2069 if (skb_mac_header_was_set(skb)) in skb_headers_offset_update()
2070 skb->mac_header += off; in skb_headers_offset_update()
2071 skb->inner_transport_header += off; in skb_headers_offset_update()
2072 skb->inner_network_header += off; in skb_headers_offset_update()
2073 skb->inner_mac_header += off; in skb_headers_offset_update()
2087 static inline int skb_alloc_rx_flag(const struct sk_buff *skb) in skb_alloc_rx_flag() argument
2089 if (skb_pfmemalloc(skb)) in skb_alloc_rx_flag()
2096 * @skb: buffer to copy
2111 struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) in skb_copy() argument
2117 if (!skb_frags_readable(skb)) in skb_copy()
2120 if (WARN_ON_ONCE(skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST)) in skb_copy()
2123 headerlen = skb_headroom(skb); in skb_copy()
2124 size = skb_end_offset(skb) + skb->data_len; in skb_copy()
2126 skb_alloc_rx_flag(skb), NUMA_NO_NODE); in skb_copy()
2133 skb_put(n, skb->len); in skb_copy()
2135 BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)); in skb_copy()
2137 skb_copy_header(n, skb); in skb_copy()
2144 * @skb: buffer to copy
2145 * @headroom: headroom of new skb
2147 * @fclone: if true allocate the copy of the skb from the fclone
2159 struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom, in __pskb_copy_fclone() argument
2162 unsigned int size = skb_headlen(skb) + headroom; in __pskb_copy_fclone()
2163 int flags = skb_alloc_rx_flag(skb) | (fclone ? SKB_ALLOC_FCLONE : 0); in __pskb_copy_fclone()
2172 skb_put(n, skb_headlen(skb)); in __pskb_copy_fclone()
2174 skb_copy_from_linear_data(skb, n->data, n->len); in __pskb_copy_fclone()
2176 n->truesize += skb->data_len; in __pskb_copy_fclone()
2177 n->data_len = skb->data_len; in __pskb_copy_fclone()
2178 n->len = skb->len; in __pskb_copy_fclone()
2180 if (skb_shinfo(skb)->nr_frags) { in __pskb_copy_fclone()
2183 if (skb_orphan_frags(skb, gfp_mask) || in __pskb_copy_fclone()
2184 skb_zerocopy_clone(n, skb, gfp_mask)) { in __pskb_copy_fclone()
2189 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in __pskb_copy_fclone()
2190 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; in __pskb_copy_fclone()
2191 skb_frag_ref(skb, i); in __pskb_copy_fclone()
2196 if (skb_has_frag_list(skb)) { in __pskb_copy_fclone()
2197 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; in __pskb_copy_fclone()
2201 skb_copy_header(n, skb); in __pskb_copy_fclone()
2209 * @skb: buffer to reallocate
2215 * header of @skb. &sk_buff itself is not changed. &sk_buff MUST have
2219 * All the pointers pointing into skb header may change and must be
2223 int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, in pskb_expand_head() argument
2226 unsigned int osize = skb_end_offset(skb); in pskb_expand_head()
2234 BUG_ON(skb_shared(skb)); in pskb_expand_head()
2236 skb_zcopy_downgrade_managed(skb); in pskb_expand_head()
2238 if (skb_pfmemalloc(skb)) in pskb_expand_head()
2249 memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head); in pskb_expand_head()
2252 skb_shinfo(skb), in pskb_expand_head()
2253 offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags])); in pskb_expand_head()
2260 if (skb_cloned(skb)) { in pskb_expand_head()
2261 if (skb_orphan_frags(skb, gfp_mask)) in pskb_expand_head()
2263 if (skb_zcopy(skb)) in pskb_expand_head()
2264 refcount_inc(&skb_uarg(skb)->refcnt); in pskb_expand_head()
2265 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) in pskb_expand_head()
2266 skb_frag_ref(skb, i); in pskb_expand_head()
2268 if (skb_has_frag_list(skb)) in pskb_expand_head()
2269 skb_clone_fraglist(skb); in pskb_expand_head()
2271 skb_release_data(skb, SKB_CONSUMED); in pskb_expand_head()
2273 skb_free_head(skb); in pskb_expand_head()
2275 off = (data + nhead) - skb->head; in pskb_expand_head()
2277 skb->head = data; in pskb_expand_head()
2278 skb->head_frag = 0; in pskb_expand_head()
2279 skb->data += off; in pskb_expand_head()
2281 skb_set_end_offset(skb, size); in pskb_expand_head()
2285 skb->tail += off; in pskb_expand_head()
2286 skb_headers_offset_update(skb, nhead); in pskb_expand_head()
2287 skb->cloned = 0; in pskb_expand_head()
2288 skb->hdr_len = 0; in pskb_expand_head()
2289 skb->nohdr = 0; in pskb_expand_head()
2290 atomic_set(&skb_shinfo(skb)->dataref, 1); in pskb_expand_head()
2292 skb_metadata_clear(skb); in pskb_expand_head()
2294 /* It is not generally safe to change skb->truesize. in pskb_expand_head()
2296 * when skb is orphaned (not attached to a socket). in pskb_expand_head()
2298 if (!skb->sk || skb->destructor == sock_edemux) in pskb_expand_head()
2299 skb->truesize += size - osize; in pskb_expand_head()
2310 /* Make private copy of skb with writable head and some headroom */
2312 struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) in skb_realloc_headroom() argument
2315 int delta = headroom - skb_headroom(skb); in skb_realloc_headroom()
2318 skb2 = pskb_copy(skb, GFP_ATOMIC); in skb_realloc_headroom()
2320 skb2 = skb_clone(skb, GFP_ATOMIC); in skb_realloc_headroom()
2332 int __skb_unclone_keeptruesize(struct sk_buff *skb, gfp_t pri) in __skb_unclone_keeptruesize() argument
2338 saved_end_offset = skb_end_offset(skb); in __skb_unclone_keeptruesize()
2339 saved_truesize = skb->truesize; in __skb_unclone_keeptruesize()
2341 res = pskb_expand_head(skb, 0, 0, pri); in __skb_unclone_keeptruesize()
2345 skb->truesize = saved_truesize; in __skb_unclone_keeptruesize()
2347 if (likely(skb_end_offset(skb) == saved_end_offset)) in __skb_unclone_keeptruesize()
2350 /* We can not change skb->end if the original or new value in __skb_unclone_keeptruesize()
2354 skb_end_offset(skb) == SKB_SMALL_HEAD_HEADROOM) { in __skb_unclone_keeptruesize()
2359 saved_end_offset, skb_end_offset(skb)); in __skb_unclone_keeptruesize()
2364 shinfo = skb_shinfo(skb); in __skb_unclone_keeptruesize()
2366 /* We are about to change back skb->end, in __skb_unclone_keeptruesize()
2369 memmove(skb->head + saved_end_offset, in __skb_unclone_keeptruesize()
2373 skb_set_end_offset(skb, saved_end_offset); in __skb_unclone_keeptruesize()
2380 * @skb: buffer to reallocate
2383 * Unlike skb_realloc_headroom, this one does not allocate a new skb
2384 * if possible; copies skb->sk to new skb as needed
2385 * and frees original skb in case of failures.
2390 struct sk_buff *skb_expand_head(struct sk_buff *skb, unsigned int headroom) in skb_expand_head() argument
2392 int delta = headroom - skb_headroom(skb); in skb_expand_head()
2393 int osize = skb_end_offset(skb); in skb_expand_head()
2394 struct sock *sk = skb->sk; in skb_expand_head()
2398 return skb; in skb_expand_head()
2401 /* pskb_expand_head() might crash, if skb is shared. */ in skb_expand_head()
2402 if (skb_shared(skb) || !is_skb_wmem(skb)) { in skb_expand_head()
2403 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); in skb_expand_head()
2410 consume_skb(skb); in skb_expand_head()
2411 skb = nskb; in skb_expand_head()
2413 if (pskb_expand_head(skb, delta, 0, GFP_ATOMIC)) in skb_expand_head()
2416 if (sk && is_skb_wmem(skb)) { in skb_expand_head()
2417 delta = skb_end_offset(skb) - osize; in skb_expand_head()
2419 skb->truesize += delta; in skb_expand_head()
2421 return skb; in skb_expand_head()
2424 kfree_skb(skb); in skb_expand_head()
2431 * @skb: buffer to copy
2447 struct sk_buff *skb_copy_expand(const struct sk_buff *skb, in skb_copy_expand() argument
2458 if (!skb_frags_readable(skb)) in skb_copy_expand()
2461 if (WARN_ON_ONCE(skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST)) in skb_copy_expand()
2464 oldheadroom = skb_headroom(skb); in skb_copy_expand()
2465 n = __alloc_skb(newheadroom + skb->len + newtailroom, in skb_copy_expand()
2466 gfp_mask, skb_alloc_rx_flag(skb), in skb_copy_expand()
2474 skb_put(n, skb->len); in skb_copy_expand()
2484 BUG_ON(skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off, in skb_copy_expand()
2485 skb->len + head_copy_len)); in skb_copy_expand()
2487 skb_copy_header(n, skb); in skb_copy_expand()
2496 * __skb_pad - zero pad the tail of an skb
2497 * @skb: buffer to pad
2505 * May return error in out of memory cases. The skb is freed on error
2509 int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error) in __skb_pad() argument
2515 if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) { in __skb_pad()
2516 memset(skb->data+skb->len, 0, pad); in __skb_pad()
2520 ntail = skb->data_len + pad - (skb->end - skb->tail); in __skb_pad()
2521 if (likely(skb_cloned(skb) || ntail > 0)) { in __skb_pad()
2522 err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC); in __skb_pad()
2527 /* FIXME: The use of this function with non-linear skb's really needs in __skb_pad()
2530 err = skb_linearize(skb); in __skb_pad()
2534 memset(skb->data + skb->len, 0, pad); in __skb_pad()
2539 kfree_skb(skb); in __skb_pad()
2546 * @skb: start of the buffer to use
2551 * fragmented buffer. @tail must be the last fragment of @skb -- or
2552 * @skb itself. If this would exceed the total buffer size the kernel
2557 void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len) in pskb_put() argument
2559 if (tail != skb) { in pskb_put()
2560 skb->data_len += len; in pskb_put()
2561 skb->len += len; in pskb_put()
2569 * @skb: buffer to use
2576 void *skb_put(struct sk_buff *skb, unsigned int len) in skb_put() argument
2578 void *tmp = skb_tail_pointer(skb); in skb_put()
2579 SKB_LINEAR_ASSERT(skb); in skb_put()
2580 skb->tail += len; in skb_put()
2581 skb->len += len; in skb_put()
2582 if (unlikely(skb->tail > skb->end)) in skb_put()
2583 skb_over_panic(skb, len, __builtin_return_address(0)); in skb_put()
2590 * @skb: buffer to use
2597 void *skb_push(struct sk_buff *skb, unsigned int len) in skb_push() argument
2599 skb->data -= len; in skb_push()
2600 skb->len += len; in skb_push()
2601 if (unlikely(skb->data < skb->head)) in skb_push()
2602 skb_under_panic(skb, len, __builtin_return_address(0)); in skb_push()
2603 return skb->data; in skb_push()
2609 * @skb: buffer to use
2617 void *skb_pull(struct sk_buff *skb, unsigned int len) in skb_pull() argument
2619 return skb_pull_inline(skb, len); in skb_pull()
2626 * @skb: buffer to use
2634 void *skb_pull_data(struct sk_buff *skb, size_t len) in skb_pull_data() argument
2636 void *data = skb->data; in skb_pull_data()
2638 if (skb->len < len) in skb_pull_data()
2641 skb_pull(skb, len); in skb_pull_data()
2649 * @skb: buffer to alter
2654 * The skb must be linear.
2656 void skb_trim(struct sk_buff *skb, unsigned int len) in skb_trim() argument
2658 if (skb->len > len) in skb_trim()
2659 __skb_trim(skb, len); in skb_trim()
2663 /* Trims skb to length len. It can change skb pointers.
2666 int ___pskb_trim(struct sk_buff *skb, unsigned int len) in ___pskb_trim() argument
2670 int offset = skb_headlen(skb); in ___pskb_trim()
2671 int nfrags = skb_shinfo(skb)->nr_frags; in ___pskb_trim()
2675 if (skb_cloned(skb) && in ___pskb_trim()
2676 unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))) in ___pskb_trim()
2684 int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]); in ___pskb_trim()
2691 skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset); in ___pskb_trim()
2694 skb_shinfo(skb)->nr_frags = i; in ___pskb_trim()
2697 skb_frag_unref(skb, i); in ___pskb_trim()
2699 if (skb_has_frag_list(skb)) in ___pskb_trim()
2700 skb_drop_fraglist(skb); in ___pskb_trim()
2704 for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp); in ___pskb_trim()
2736 if (len > skb_headlen(skb)) { in ___pskb_trim()
2737 skb->data_len -= skb->len - len; in ___pskb_trim()
2738 skb->len = len; in ___pskb_trim()
2740 skb->len = len; in ___pskb_trim()
2741 skb->data_len = 0; in ___pskb_trim()
2742 skb_set_tail_pointer(skb, len); in ___pskb_trim()
2745 if (!skb->sk || skb->destructor == sock_edemux) in ___pskb_trim()
2746 skb_condense(skb); in ___pskb_trim()
2753 int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len) in pskb_trim_rcsum_slow() argument
2755 if (skb->ip_summed == CHECKSUM_COMPLETE) { in pskb_trim_rcsum_slow()
2756 int delta = skb->len - len; in pskb_trim_rcsum_slow()
2758 skb->csum = csum_block_sub(skb->csum, in pskb_trim_rcsum_slow()
2759 skb_checksum(skb, len, delta, 0), in pskb_trim_rcsum_slow()
2761 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { in pskb_trim_rcsum_slow()
2762 int hdlen = (len > skb_headlen(skb)) ? skb_headlen(skb) : len; in pskb_trim_rcsum_slow()
2763 int offset = skb_checksum_start_offset(skb) + skb->csum_offset; in pskb_trim_rcsum_slow()
2768 return __pskb_trim(skb, len); in pskb_trim_rcsum_slow()
2773 * __pskb_pull_tail - advance tail of skb header
2774 * @skb: buffer to reallocate
2784 * or value of new tail of skb in the case of success.
2786 * All the pointers pointing into skb header may change and must be
2790 /* Moves tail of skb head forward, copying data from fragmented part,
2793 * 2. It may change skb pointers.
2797 void *__pskb_pull_tail(struct sk_buff *skb, int delta) in __pskb_pull_tail() argument
2799 /* If skb has not enough free space at tail, get new one in __pskb_pull_tail()
2801 * room at tail, reallocate without expansion only if skb is cloned. in __pskb_pull_tail()
2803 int i, k, eat = (skb->tail + delta) - skb->end; in __pskb_pull_tail()
2805 if (!skb_frags_readable(skb)) in __pskb_pull_tail()
2808 if (eat > 0 || skb_cloned(skb)) { in __pskb_pull_tail()
2809 if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0, in __pskb_pull_tail()
2814 BUG_ON(skb_copy_bits(skb, skb_headlen(skb), in __pskb_pull_tail()
2815 skb_tail_pointer(skb), delta)); in __pskb_pull_tail()
2820 if (!skb_has_frag_list(skb)) in __pskb_pull_tail()
2825 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in __pskb_pull_tail()
2826 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); in __pskb_pull_tail()
2834 * Certainly, it is possible to add an offset to skb data, in __pskb_pull_tail()
2837 * further bloating skb head and crucify ourselves here instead. in __pskb_pull_tail()
2841 struct sk_buff *list = skb_shinfo(skb)->frag_list; in __pskb_pull_tail()
2853 if (skb_is_gso(skb) && !list->head_frag && in __pskb_pull_tail()
2855 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; in __pskb_pull_tail()
2878 while ((list = skb_shinfo(skb)->frag_list) != insp) { in __pskb_pull_tail()
2879 skb_shinfo(skb)->frag_list = list->next; in __pskb_pull_tail()
2885 skb_shinfo(skb)->frag_list = clone; in __pskb_pull_tail()
2888 /* Success! Now we may commit changes to skb data. */ in __pskb_pull_tail()
2893 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in __pskb_pull_tail()
2894 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); in __pskb_pull_tail()
2897 skb_frag_unref(skb, i); in __pskb_pull_tail()
2900 skb_frag_t *frag = &skb_shinfo(skb)->frags[k]; in __pskb_pull_tail()
2902 *frag = skb_shinfo(skb)->frags[i]; in __pskb_pull_tail()
2913 skb_shinfo(skb)->nr_frags = k; in __pskb_pull_tail()
2916 skb->tail += delta; in __pskb_pull_tail()
2917 skb->data_len -= delta; in __pskb_pull_tail()
2919 if (!skb->data_len) in __pskb_pull_tail()
2920 skb_zcopy_clear(skb, false); in __pskb_pull_tail()
2922 return skb_tail_pointer(skb); in __pskb_pull_tail()
2927 * skb_copy_bits - copy bits from skb to kernel buffer
2928 * @skb: source skb
2933 * Copy the specified number of bytes from the source skb to the
2941 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) in skb_copy_bits() argument
2943 int start = skb_headlen(skb); in skb_copy_bits()
2947 if (offset > (int)skb->len - len) in skb_copy_bits()
2954 skb_copy_from_linear_data_offset(skb, offset, to, copy); in skb_copy_bits()
2961 if (!skb_frags_readable(skb)) in skb_copy_bits()
2964 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in skb_copy_bits()
2966 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; in skb_copy_bits()
2995 skb_walk_frags(skb, frag_iter) { in skb_copy_bits()
3124 * Map linear and fragment data from the skb to spd. It reports true if the
3127 static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe, in __skb_splice_bits() argument
3135 * If skb->head_frag is set, this 'linear' part is backed by a in __skb_splice_bits()
3139 if (__splice_segment(virt_to_page(skb->data), in __skb_splice_bits()
3140 (unsigned long) skb->data & (PAGE_SIZE - 1), in __skb_splice_bits()
3141 skb_headlen(skb), in __skb_splice_bits()
3143 skb_head_is_locked(skb), in __skb_splice_bits()
3150 if (!skb_frags_readable(skb)) in __skb_splice_bits()
3153 for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) { in __skb_splice_bits()
3154 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; in __skb_splice_bits()
3165 skb_walk_frags(skb, iter) { in __skb_splice_bits()
3182 * Map data from the skb to a pipe. Should handle both the linear part,
3185 int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset, in skb_splice_bits() argument
3200 __skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk); in skb_splice_bits()
3233 static int __skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, in __skb_send_sock() argument
3238 struct sk_buff *head = skb; in __skb_send_sock()
3245 while (offset < skb_headlen(skb) && len) { in __skb_send_sock()
3249 slen = min_t(int, len, skb_headlen(skb) - offset); in __skb_send_sock()
3250 kv.iov_base = skb->data + offset; in __skb_send_sock()
3267 /* All the data was skb head? */ in __skb_send_sock()
3272 offset -= skb_headlen(skb); in __skb_send_sock()
3275 for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags; fragidx++) { in __skb_send_sock()
3276 skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx]; in __skb_send_sock()
3284 for (; len && fragidx < skb_shinfo(skb)->nr_frags; fragidx++) { in __skb_send_sock()
3285 skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx]; in __skb_send_sock()
3319 if (skb == head) { in __skb_send_sock()
3320 if (skb_has_frag_list(skb)) { in __skb_send_sock()
3321 skb = skb_shinfo(skb)->frag_list; in __skb_send_sock()
3324 } else if (skb->next) { in __skb_send_sock()
3325 skb = skb->next; in __skb_send_sock()
3337 /* Send skb data on a socket. Socket must be locked. */
3338 int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset, in skb_send_sock_locked() argument
3341 return __skb_send_sock(sk, skb, offset, len, sendmsg_locked, 0); in skb_send_sock_locked()
3345 int skb_send_sock_locked_with_flags(struct sock *sk, struct sk_buff *skb, in skb_send_sock_locked_with_flags() argument
3348 return __skb_send_sock(sk, skb, offset, len, sendmsg_locked, flags); in skb_send_sock_locked_with_flags()
3352 /* Send skb data on a socket. Socket must be unlocked. */
3353 int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len) in skb_send_sock() argument
3355 return __skb_send_sock(sk, skb, offset, len, sendmsg_unlocked, 0); in skb_send_sock()
3359 * skb_store_bits - store bits from kernel buffer to skb
3360 * @skb: destination buffer
3366 * destination skb. This function handles all the messy bits of
3370 int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) in skb_store_bits() argument
3372 int start = skb_headlen(skb); in skb_store_bits()
3376 if (offset > (int)skb->len - len) in skb_store_bits()
3382 skb_copy_to_linear_data_offset(skb, offset, from, copy); in skb_store_bits()
3389 if (!skb_frags_readable(skb)) in skb_store_bits()
3392 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in skb_store_bits()
3393 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in skb_store_bits()
3423 skb_walk_frags(skb, frag_iter) { in skb_store_bits()
3450 /* Checksum skb data. */
3451 __wsum skb_checksum(const struct sk_buff *skb, int offset, int len, __wsum csum) in skb_checksum() argument
3453 int start = skb_headlen(skb); in skb_checksum()
3462 csum = csum_partial(skb->data + offset, copy, csum); in skb_checksum()
3469 if (WARN_ON_ONCE(!skb_frags_readable(skb))) in skb_checksum()
3472 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in skb_checksum()
3474 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in skb_checksum()
3505 skb_walk_frags(skb, frag_iter) { in skb_checksum()
3533 __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, in skb_copy_and_csum_bits() argument
3536 int start = skb_headlen(skb); in skb_copy_and_csum_bits()
3546 csum = csum_partial_copy_nocheck(skb->data + offset, to, in skb_copy_and_csum_bits()
3555 if (!skb_frags_readable(skb)) in skb_copy_and_csum_bits()
3558 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in skb_copy_and_csum_bits()
3563 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); in skb_copy_and_csum_bits()
3565 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in skb_copy_and_csum_bits()
3594 skb_walk_frags(skb, frag_iter) { in skb_copy_and_csum_bits()
3622 u32 skb_crc32c(const struct sk_buff *skb, int offset, int len, u32 crc) in skb_crc32c() argument
3624 int start = skb_headlen(skb); in skb_crc32c()
3630 crc = crc32c(crc, skb->data + offset, copy); in skb_crc32c()
3637 if (WARN_ON_ONCE(!skb_frags_readable(skb))) in skb_crc32c()
3640 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in skb_crc32c()
3642 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in skb_crc32c()
3669 skb_walk_frags(skb, frag_iter) { in skb_crc32c()
3693 __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len) in __skb_checksum_complete_head() argument
3697 sum = csum_fold(skb_checksum(skb, 0, len, skb->csum)); in __skb_checksum_complete_head()
3700 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && in __skb_checksum_complete_head()
3701 !skb->csum_complete_sw) in __skb_checksum_complete_head()
3702 netdev_rx_csum_fault(skb->dev, skb); in __skb_checksum_complete_head()
3704 if (!skb_shared(skb)) in __skb_checksum_complete_head()
3705 skb->csum_valid = !sum; in __skb_checksum_complete_head()
3710 /* This function assumes skb->csum already holds pseudo header's checksum,
3712 * __skb_checksum_validate_complete(). And, the original skb->csum must
3716 * zero. The new checksum is stored back into skb->csum unless the skb is
3719 __sum16 __skb_checksum_complete(struct sk_buff *skb) in __skb_checksum_complete() argument
3724 csum = skb_checksum(skb, 0, skb->len, 0); in __skb_checksum_complete()
3726 sum = csum_fold(csum_add(skb->csum, csum)); in __skb_checksum_complete()
3730 * between the original skb->csum and skb_checksum(). This means either in __skb_checksum_complete()
3731 * the original hardware checksum is incorrect or we screw up skb->csum in __skb_checksum_complete()
3732 * when moving skb->data around. in __skb_checksum_complete()
3735 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && in __skb_checksum_complete()
3736 !skb->csum_complete_sw) in __skb_checksum_complete()
3737 netdev_rx_csum_fault(skb->dev, skb); in __skb_checksum_complete()
3740 if (!skb_shared(skb)) { in __skb_checksum_complete()
3742 skb->csum = csum; in __skb_checksum_complete()
3743 skb->ip_summed = CHECKSUM_COMPLETE; in __skb_checksum_complete()
3744 skb->csum_complete_sw = 1; in __skb_checksum_complete()
3745 skb->csum_valid = !sum; in __skb_checksum_complete()
3756 * Calculates the amount of linear headroom needed in the 'to' skb passed
3780 * skb_zerocopy - Zero copy skb to skb
3795 * -EFAULT: skb_copy_bits() found some problem with skb geometry
3801 int plen = 0; /* length of skb->head fragment */ in skb_zerocopy()
3857 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) in skb_copy_and_csum_dev() argument
3862 if (skb->ip_summed == CHECKSUM_PARTIAL) in skb_copy_and_csum_dev()
3863 csstart = skb_checksum_start_offset(skb); in skb_copy_and_csum_dev()
3865 csstart = skb_headlen(skb); in skb_copy_and_csum_dev()
3867 BUG_ON(csstart > skb_headlen(skb)); in skb_copy_and_csum_dev()
3869 skb_copy_from_linear_data(skb, to, csstart); in skb_copy_and_csum_dev()
3872 if (csstart != skb->len) in skb_copy_and_csum_dev()
3873 csum = skb_copy_and_csum_bits(skb, csstart, to + csstart, in skb_copy_and_csum_dev()
3874 skb->len - csstart); in skb_copy_and_csum_dev()
3876 if (skb->ip_summed == CHECKSUM_PARTIAL) { in skb_copy_and_csum_dev()
3877 long csstuff = csstart + skb->csum_offset; in skb_copy_and_csum_dev()
3954 * skb_rbtree_purge - empty a skb rbtree
3969 struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode); in skb_rbtree_purge() local
3972 rb_erase(&skb->rbnode, root); in skb_rbtree_purge()
3973 sum += skb->truesize; in skb_rbtree_purge()
3974 kfree_skb(skb); in skb_rbtree_purge()
3981 struct sk_buff *skb, *next; in skb_errqueue_purge() local
3988 skb_queue_walk_safe(list, skb, next) { in skb_errqueue_purge()
3989 if (SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ZEROCOPY || in skb_errqueue_purge()
3990 SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_TIMESTAMPING) in skb_errqueue_purge()
3992 __skb_unlink(skb, list); in skb_errqueue_purge()
3993 __skb_queue_tail(&kill, skb); in skb_errqueue_purge()
4044 * @skb: buffer to remove
4050 * You must know what list the SKB is on.
4052 void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) in skb_unlink() argument
4057 __skb_unlink(skb, list); in skb_unlink()
4082 static inline void skb_split_inside_header(struct sk_buff *skb, in skb_split_inside_header() argument
4088 skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len), in skb_split_inside_header()
4091 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) in skb_split_inside_header()
4092 skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i]; in skb_split_inside_header()
4094 skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags; in skb_split_inside_header()
4095 skb1->unreadable = skb->unreadable; in skb_split_inside_header()
4096 skb_shinfo(skb)->nr_frags = 0; in skb_split_inside_header()
4097 skb1->data_len = skb->data_len; in skb_split_inside_header()
4099 skb->data_len = 0; in skb_split_inside_header()
4100 skb->len = len; in skb_split_inside_header()
4101 skb_set_tail_pointer(skb, len); in skb_split_inside_header()
4104 static inline void skb_split_no_header(struct sk_buff *skb, in skb_split_no_header() argument
4109 const int nfrags = skb_shinfo(skb)->nr_frags; in skb_split_no_header()
4111 skb_shinfo(skb)->nr_frags = 0; in skb_split_no_header()
4112 skb1->len = skb1->data_len = skb->len - len; in skb_split_no_header()
4113 skb->len = len; in skb_split_no_header()
4114 skb->data_len = len - pos; in skb_split_no_header()
4117 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); in skb_split_no_header()
4120 skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i]; in skb_split_no_header()
4131 skb_frag_ref(skb, i); in skb_split_no_header()
4134 skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos); in skb_split_no_header()
4135 skb_shinfo(skb)->nr_frags++; in skb_split_no_header()
4139 skb_shinfo(skb)->nr_frags++; in skb_split_no_header()
4144 skb1->unreadable = skb->unreadable; in skb_split_no_header()
4148 * skb_split - Split fragmented skb to two parts at length len.
4149 * @skb: the buffer to split
4151 * @len: new length for skb
4153 void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len) in skb_split() argument
4155 int pos = skb_headlen(skb); in skb_split()
4158 skb_zcopy_downgrade_managed(skb); in skb_split()
4160 skb_shinfo(skb1)->flags |= skb_shinfo(skb)->flags & zc_flags; in skb_split()
4161 skb_zerocopy_clone(skb1, skb, 0); in skb_split()
4163 skb_split_inside_header(skb, skb1, len, pos); in skb_split()
4165 skb_split_no_header(skb, skb1, len, pos); in skb_split()
4169 /* Shifting from/to a cloned skb is a no-go.
4173 static int skb_prepare_for_shift(struct sk_buff *skb) in skb_prepare_for_shift() argument
4175 return skb_unclone_keeptruesize(skb, GFP_ATOMIC); in skb_prepare_for_shift()
4179 * skb_shift - Shifts paged data partially from skb to another
4181 * @skb: buffer from which the paged data comes from
4185 * the length of the skb, from skb to tgt. Returns number bytes shifted.
4186 * It's up to caller to free skb if everything was shifted.
4190 * Skb cannot include anything else but paged data while tgt is allowed
4194 * specialized skb free'er to handle frags without up-to-date nr_frags.
4196 int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen) in skb_shift() argument
4201 BUG_ON(shiftlen > skb->len); in skb_shift()
4203 if (skb_headlen(skb)) in skb_shift()
4205 if (skb_zcopy(tgt) || skb_zcopy(skb)) in skb_shift()
4208 DEBUG_NET_WARN_ON_ONCE(tgt->pp_recycle != skb->pp_recycle); in skb_shift()
4209 DEBUG_NET_WARN_ON_ONCE(skb_cmp_decrypted(tgt, skb)); in skb_shift()
4214 fragfrom = &skb_shinfo(skb)->frags[from]; in skb_shift()
4227 if (skb_prepare_for_shift(skb) || in skb_shift()
4232 fragfrom = &skb_shinfo(skb)->frags[from]; in skb_shift()
4245 /* Skip full, not-fitting skb to avoid expensive operations */ in skb_shift()
4246 if ((shiftlen == skb->len) && in skb_shift()
4247 (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to)) in skb_shift()
4250 if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt)) in skb_shift()
4253 while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) { in skb_shift()
4257 fragfrom = &skb_shinfo(skb)->frags[from]; in skb_shift()
4285 fragfrom = &skb_shinfo(skb)->frags[0]; in skb_shift()
4289 __skb_frag_unref(fragfrom, skb->pp_recycle); in skb_shift()
4292 /* Reposition in the original skb */ in skb_shift()
4294 while (from < skb_shinfo(skb)->nr_frags) in skb_shift()
4295 skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++]; in skb_shift()
4296 skb_shinfo(skb)->nr_frags = to; in skb_shift()
4298 BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags); in skb_shift()
4301 /* Most likely the tgt won't ever need its checksum anymore, skb on in skb_shift()
4305 skb->ip_summed = CHECKSUM_PARTIAL; in skb_shift()
4307 skb_len_add(skb, -shiftlen); in skb_shift()
4314 * skb_prepare_seq_read - Prepare a sequential read of skb data
4315 * @skb: the buffer to read
4323 void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from, in skb_prepare_seq_read() argument
4328 st->root_skb = st->cur_skb = skb; in skb_prepare_seq_read()
4336 * skb_seq_read - Sequentially read skb data
4341 * Reads a block of skb data at @consumed relative to the
4344 * of the block or 0 if the end of the skb data or the upper
4448 * skb_abort_seq_read - Abort a sequential read of skb data
4511 * skb_find_text - Find a text pattern in skb data
4512 * @skb: the buffer to look in
4517 * Finds a pattern in the skb data according to the specified
4522 unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, in skb_find_text() argument
4534 skb_prepare_seq_read(skb, from, to, TS_SKB_CB(&state)); in skb_find_text()
4541 int skb_append_pagefrags(struct sk_buff *skb, struct page *page, in skb_append_pagefrags() argument
4544 int i = skb_shinfo(skb)->nr_frags; in skb_append_pagefrags()
4546 if (skb_can_coalesce(skb, i, page, offset)) { in skb_append_pagefrags()
4547 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], size); in skb_append_pagefrags()
4549 skb_zcopy_downgrade_managed(skb); in skb_append_pagefrags()
4551 skb_fill_page_desc_noacc(skb, i, page, offset, size); in skb_append_pagefrags()
4561 * skb_pull_rcsum - pull skb and update receive checksum
4562 * @skb: buffer to update
4571 void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len) in skb_pull_rcsum() argument
4573 unsigned char *data = skb->data; in skb_pull_rcsum()
4575 BUG_ON(len > skb->len); in skb_pull_rcsum()
4576 __skb_pull(skb, len); in skb_pull_rcsum()
4577 skb_postpull_rcsum(skb, data, len); in skb_pull_rcsum()
4578 return skb->data; in skb_pull_rcsum()
4594 struct sk_buff *skb_segment_list(struct sk_buff *skb, in skb_segment_list() argument
4598 struct sk_buff *list_skb = skb_shinfo(skb)->frag_list; in skb_segment_list()
4599 unsigned int tnl_hlen = skb_tnl_header_len(skb); in skb_segment_list()
4606 skb_push(skb, -skb_network_offset(skb) + offset); in skb_segment_list()
4609 err = skb_unclone(skb, GFP_ATOMIC); in skb_segment_list()
4613 skb_shinfo(skb)->frag_list = NULL; in skb_segment_list()
4633 skb->next = nskb; in skb_segment_list()
4649 len_diff = skb_network_header_len(nskb) - skb_network_header_len(skb); in skb_segment_list()
4650 __copy_skb_header(nskb, skb); in skb_segment_list()
4652 skb_headers_offset_update(nskb, skb_headroom(nskb) - skb_headroom(skb)); in skb_segment_list()
4654 skb_copy_from_linear_data_offset(skb, -tnl_hlen, in skb_segment_list()
4663 skb->truesize = skb->truesize - delta_truesize; in skb_segment_list()
4664 skb->data_len = skb->data_len - delta_len; in skb_segment_list()
4665 skb->len = skb->len - delta_len; in skb_segment_list()
4667 skb_gso_reset(skb); in skb_segment_list()
4669 skb->prev = tail; in skb_segment_list()
4671 if (skb_needs_linearize(skb, features) && in skb_segment_list()
4672 __skb_linearize(skb)) in skb_segment_list()
4675 skb_get(skb); in skb_segment_list()
4677 return skb; in skb_segment_list()
4680 kfree_skb_list(skb->next); in skb_segment_list()
4681 skb->next = NULL; in skb_segment_list()
4687 * skb_segment - Perform protocol segmentation on skb.
4691 * This function performs segmentation on the given skb. It returns
4756 * Try to split the SKB to multiple GSO SKBs in skb_segment()
5099 /* The SKB kmem_cache slab is critical for network performance. Never
5125 * struct skb_shared_info is located at the end of skb->head, in skb_init()
5139 __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len, in __skb_to_sgvec() argument
5142 int start = skb_headlen(skb); in __skb_to_sgvec()
5153 sg_set_buf(sg, skb->data + offset, copy); in __skb_to_sgvec()
5160 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in __skb_to_sgvec()
5165 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); in __skb_to_sgvec()
5167 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in __skb_to_sgvec()
5183 skb_walk_frags(skb, frag_iter) { in __skb_to_sgvec()
5212 * @skb: Socket buffer containing the buffers to be mapped
5222 int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) in skb_to_sgvec() argument
5224 int nsg = __skb_to_sgvec(skb, sg, offset, len, 0); in skb_to_sgvec()
5235 /* As compared with skb_to_sgvec, skb_to_sgvec_nomark only map skb to given
5236 * sglist without mark the sg which contain last skb data as the end.
5254 int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg, in skb_to_sgvec_nomark() argument
5257 return __skb_to_sgvec(skb, sg, offset, len, 0); in skb_to_sgvec_nomark()
5265 * @skb: The socket buffer to check.
5267 * @trailer: Returned pointer to the skb where the @tailbits space begins
5275 * set to point to the skb in which this space begins.
5280 int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) in skb_cow_data() argument
5286 /* If skb is cloned or its head is paged, reallocate in skb_cow_data()
5290 if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) && in skb_cow_data()
5291 !__pskb_pull_tail(skb, __skb_pagelen(skb))) in skb_cow_data()
5295 if (!skb_has_frag_list(skb)) { in skb_cow_data()
5301 if (skb_tailroom(skb) < tailbits && in skb_cow_data()
5302 pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC)) in skb_cow_data()
5306 *trailer = skb; in skb_cow_data()
5313 skb_p = &skb_shinfo(skb)->frag_list; in skb_cow_data()
5326 /* If the skb is the last, worry about trailer. */ in skb_cow_data()
5357 * OK, link new skb, drop old one */ in skb_cow_data()
5373 static void sock_rmem_free(struct sk_buff *skb) in sock_rmem_free() argument
5375 struct sock *sk = skb->sk; in sock_rmem_free()
5377 atomic_sub(skb->truesize, &sk->sk_rmem_alloc); in sock_rmem_free()
5380 static void skb_set_err_queue(struct sk_buff *skb) in skb_set_err_queue() argument
5385 skb->pkt_type = PACKET_OUTGOING; in skb_set_err_queue()
5392 int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) in sock_queue_err_skb() argument
5394 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= in sock_queue_err_skb()
5398 skb_orphan(skb); in sock_queue_err_skb()
5399 skb->sk = sk; in sock_queue_err_skb()
5400 skb->destructor = sock_rmem_free; in sock_queue_err_skb()
5401 atomic_add(skb->truesize, &sk->sk_rmem_alloc); in sock_queue_err_skb()
5402 skb_set_err_queue(skb); in sock_queue_err_skb()
5405 skb_dst_force(skb); in sock_queue_err_skb()
5407 skb_queue_tail(&sk->sk_error_queue, skb); in sock_queue_err_skb()
5414 static bool is_icmp_err_skb(const struct sk_buff *skb) in is_icmp_err_skb() argument
5416 return skb && (SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP || in is_icmp_err_skb()
5417 SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP6); in is_icmp_err_skb()
5423 struct sk_buff *skb, *skb_next = NULL; in sock_dequeue_err_skb() local
5431 skb = __skb_dequeue(q); in sock_dequeue_err_skb()
5432 if (skb && (skb_next = skb_peek(q))) { in sock_dequeue_err_skb()
5439 if (is_icmp_err_skb(skb) && !icmp_next) in sock_dequeue_err_skb()
5445 return skb; in sock_dequeue_err_skb()
5450 * skb_clone_sk - create clone of skb, and take reference to socket
5451 * @skb: the skb to clone
5462 struct sk_buff *skb_clone_sk(struct sk_buff *skb) in skb_clone_sk() argument
5464 struct sock *sk = skb->sk; in skb_clone_sk()
5470 clone = skb_clone(skb, GFP_ATOMIC); in skb_clone_sk()
5483 static void __skb_complete_tx_timestamp(struct sk_buff *skb, in __skb_complete_tx_timestamp() argument
5491 BUILD_BUG_ON(sizeof(struct sock_exterr_skb) > sizeof(skb->cb)); in __skb_complete_tx_timestamp()
5493 serr = SKB_EXT_ERR(skb); in __skb_complete_tx_timestamp()
5499 serr->header.h4.iif = skb->dev ? skb->dev->ifindex : 0; in __skb_complete_tx_timestamp()
5501 serr->ee.ee_data = skb_shinfo(skb)->tskey; in __skb_complete_tx_timestamp()
5506 err = sock_queue_err_skb(sk, skb); in __skb_complete_tx_timestamp()
5509 kfree_skb(skb); in __skb_complete_tx_timestamp()
5526 void skb_complete_tx_timestamp(struct sk_buff *skb, in skb_complete_tx_timestamp() argument
5529 struct sock *sk = skb->sk; in skb_complete_tx_timestamp()
5538 *skb_hwtstamps(skb) = *hwtstamps; in skb_complete_tx_timestamp()
5539 __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND, false); in skb_complete_tx_timestamp()
5545 kfree_skb(skb); in skb_complete_tx_timestamp()
5549 static bool skb_tstamp_tx_report_so_timestamping(struct sk_buff *skb, in skb_tstamp_tx_report_so_timestamping() argument
5555 return skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP; in skb_tstamp_tx_report_so_timestamping()
5557 return skb_shinfo(skb)->tx_flags & (hwtstamps ? SKBTX_HW_TSTAMP_NOBPF : in skb_tstamp_tx_report_so_timestamping()
5560 return TCP_SKB_CB(skb)->txstamp_ack & TSTAMP_ACK_SK; in skb_tstamp_tx_report_so_timestamping()
5562 return skb_shinfo(skb)->tx_flags & SKBTX_COMPLETION_TSTAMP; in skb_tstamp_tx_report_so_timestamping()
5568 static void skb_tstamp_tx_report_bpf_timestamping(struct sk_buff *skb, in skb_tstamp_tx_report_bpf_timestamping() argument
5582 *skb_hwtstamps(skb) = *hwtstamps; in skb_tstamp_tx_report_bpf_timestamping()
5594 bpf_skops_tx_timestamping(sk, skb, op); in skb_tstamp_tx_report_bpf_timestamping()
5602 struct sk_buff *skb; in __skb_tstamp_tx() local
5629 skb = tcp_get_timestamping_opt_stats(sk, orig_skb, in __skb_tstamp_tx()
5634 skb = alloc_skb(0, GFP_ATOMIC); in __skb_tstamp_tx()
5636 skb = skb_clone(orig_skb, GFP_ATOMIC); in __skb_tstamp_tx()
5638 if (skb_orphan_frags_rx(skb, GFP_ATOMIC)) { in __skb_tstamp_tx()
5639 kfree_skb(skb); in __skb_tstamp_tx()
5643 if (!skb) in __skb_tstamp_tx()
5647 skb_shinfo(skb)->tx_flags |= skb_shinfo(orig_skb)->tx_flags & in __skb_tstamp_tx()
5649 skb_shinfo(skb)->tskey = skb_shinfo(orig_skb)->tskey; in __skb_tstamp_tx()
5653 *skb_hwtstamps(skb) = *hwtstamps; in __skb_tstamp_tx()
5655 __net_timestamp(skb); in __skb_tstamp_tx()
5657 __skb_complete_tx_timestamp(skb, sk, tstype, opt_stats); in __skb_tstamp_tx()
5670 void skb_complete_wifi_ack(struct sk_buff *skb, bool acked) in skb_complete_wifi_ack() argument
5672 struct sock *sk = skb->sk; in skb_complete_wifi_ack()
5676 skb->wifi_acked_valid = 1; in skb_complete_wifi_ack()
5677 skb->wifi_acked = acked; in skb_complete_wifi_ack()
5679 serr = SKB_EXT_ERR(skb); in skb_complete_wifi_ack()
5688 err = sock_queue_err_skb(sk, skb); in skb_complete_wifi_ack()
5692 kfree_skb(skb); in skb_complete_wifi_ack()
5699 * @skb: the skb to set
5700 * @start: the number of bytes after skb->data to start checksumming.
5704 * for skb->csum_start and skb->csum_offset are valid so we don't oops.
5706 * This function checks and sets those values and skb->ip_summed: if this
5709 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off) in skb_partial_csum_set() argument
5712 u32 csum_start = skb_headroom(skb) + (u32)start; in skb_partial_csum_set()
5714 if (unlikely(csum_start >= U16_MAX || csum_end > skb_headlen(skb))) { in skb_partial_csum_set()
5716 start, off, skb_headroom(skb), skb_headlen(skb)); in skb_partial_csum_set()
5719 skb->ip_summed = CHECKSUM_PARTIAL; in skb_partial_csum_set()
5720 skb->csum_start = csum_start; in skb_partial_csum_set()
5721 skb->csum_offset = off; in skb_partial_csum_set()
5722 skb->transport_header = csum_start; in skb_partial_csum_set()
5727 static int skb_maybe_pull_tail(struct sk_buff *skb, unsigned int len, in skb_maybe_pull_tail() argument
5730 if (skb_headlen(skb) >= len) in skb_maybe_pull_tail()
5736 if (max > skb->len) in skb_maybe_pull_tail()
5737 max = skb->len; in skb_maybe_pull_tail()
5739 if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL) in skb_maybe_pull_tail()
5742 if (skb_headlen(skb) < len) in skb_maybe_pull_tail()
5750 static __sum16 *skb_checksum_setup_ip(struct sk_buff *skb, in skb_checksum_setup_ip() argument
5758 err = skb_maybe_pull_tail(skb, off + sizeof(struct tcphdr), in skb_checksum_setup_ip()
5760 if (!err && !skb_partial_csum_set(skb, off, in skb_checksum_setup_ip()
5764 return err ? ERR_PTR(err) : &tcp_hdr(skb)->check; in skb_checksum_setup_ip()
5767 err = skb_maybe_pull_tail(skb, off + sizeof(struct udphdr), in skb_checksum_setup_ip()
5769 if (!err && !skb_partial_csum_set(skb, off, in skb_checksum_setup_ip()
5773 return err ? ERR_PTR(err) : &udp_hdr(skb)->check; in skb_checksum_setup_ip()
5784 static int skb_checksum_setup_ipv4(struct sk_buff *skb, bool recalculate) in skb_checksum_setup_ipv4() argument
5793 err = skb_maybe_pull_tail(skb, in skb_checksum_setup_ipv4()
5799 if (ip_is_fragment(ip_hdr(skb))) in skb_checksum_setup_ipv4()
5802 off = ip_hdrlen(skb); in skb_checksum_setup_ipv4()
5809 csum = skb_checksum_setup_ip(skb, ip_hdr(skb)->protocol, off); in skb_checksum_setup_ipv4()
5814 *csum = ~csum_tcpudp_magic(ip_hdr(skb)->saddr, in skb_checksum_setup_ipv4()
5815 ip_hdr(skb)->daddr, in skb_checksum_setup_ipv4()
5816 skb->len - off, in skb_checksum_setup_ipv4()
5817 ip_hdr(skb)->protocol, 0); in skb_checksum_setup_ipv4()
5829 #define OPT_HDR(type, skb, off) \ argument
5830 (type *)(skb_network_header(skb) + (off))
5832 static int skb_checksum_setup_ipv6(struct sk_buff *skb, bool recalculate) in skb_checksum_setup_ipv6() argument
5847 err = skb_maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN); in skb_checksum_setup_ipv6()
5851 nexthdr = ipv6_hdr(skb)->nexthdr; in skb_checksum_setup_ipv6()
5853 len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len); in skb_checksum_setup_ipv6()
5861 err = skb_maybe_pull_tail(skb, in skb_checksum_setup_ipv6()
5868 hp = OPT_HDR(struct ipv6_opt_hdr, skb, off); in skb_checksum_setup_ipv6()
5876 err = skb_maybe_pull_tail(skb, in skb_checksum_setup_ipv6()
5883 hp = OPT_HDR(struct ip_auth_hdr, skb, off); in skb_checksum_setup_ipv6()
5891 err = skb_maybe_pull_tail(skb, in skb_checksum_setup_ipv6()
5898 hp = OPT_HDR(struct frag_hdr, skb, off); in skb_checksum_setup_ipv6()
5918 csum = skb_checksum_setup_ip(skb, nexthdr, off); in skb_checksum_setup_ipv6()
5923 *csum = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, in skb_checksum_setup_ipv6()
5924 &ipv6_hdr(skb)->daddr, in skb_checksum_setup_ipv6()
5925 skb->len - off, nexthdr, 0); in skb_checksum_setup_ipv6()
5934 * @skb: the skb to set up
5937 int skb_checksum_setup(struct sk_buff *skb, bool recalculate) in skb_checksum_setup() argument
5941 switch (skb->protocol) { in skb_checksum_setup()
5943 err = skb_checksum_setup_ipv4(skb, recalculate); in skb_checksum_setup()
5947 err = skb_checksum_setup_ipv6(skb, recalculate); in skb_checksum_setup()
5960 * skb_checksum_maybe_trim - maybe trims the given skb
5961 * @skb: the skb to check
5964 * Checks whether the given skb has data beyond the given transport length.
5965 * If so, returns a cloned skb trimmed to this transport length.
5966 * Otherwise returns the provided skb. Returns NULL in error cases
5967 * (e.g. transport_len exceeds skb length or out-of-memory).
5969 * Caller needs to set the skb transport header and free any returned skb if it
5970 * differs from the provided skb.
5972 static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb, in skb_checksum_maybe_trim() argument
5976 unsigned int len = skb_transport_offset(skb) + transport_len; in skb_checksum_maybe_trim()
5979 if (skb->len < len) in skb_checksum_maybe_trim()
5981 else if (skb->len == len) in skb_checksum_maybe_trim()
5982 return skb; in skb_checksum_maybe_trim()
5984 skb_chk = skb_clone(skb, GFP_ATOMIC); in skb_checksum_maybe_trim()
5998 * skb_checksum_trimmed - validate checksum of an skb
5999 * @skb: the skb to check
6003 * Applies the given checksum function skb_chkf to the provided skb.
6004 * Returns a checked and maybe trimmed skb. Returns NULL on error.
6006 * If the skb has data beyond the given transport length, then a
6007 * trimmed & cloned skb is checked and returned.
6009 * Caller needs to set the skb transport header and free any returned skb if it
6010 * differs from the provided skb.
6012 struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb, in skb_checksum_trimmed() argument
6014 __sum16(*skb_chkf)(struct sk_buff *skb)) in skb_checksum_trimmed() argument
6017 unsigned int offset = skb_transport_offset(skb); in skb_checksum_trimmed()
6020 skb_chk = skb_checksum_maybe_trim(skb, transport_len); in skb_checksum_trimmed()
6037 if (skb_chk && skb_chk != skb) in skb_checksum_trimmed()
6045 void __skb_warn_lro_forwarding(const struct sk_buff *skb) in __skb_warn_lro_forwarding() argument
6048 skb->dev->name); in __skb_warn_lro_forwarding()
6052 void kfree_skb_partial(struct sk_buff *skb, bool head_stolen) in kfree_skb_partial() argument
6055 skb_release_head_state(skb); in kfree_skb_partial()
6056 kmem_cache_free(net_hotdata.skbuff_cache, skb); in kfree_skb_partial()
6058 __kfree_skb(skb); in kfree_skb_partial()
6064 * skb_try_coalesce - try to merge skb to prior one
6082 * pages within the same SKB. In theory we could take full in skb_try_coalesce()
6144 /* if the skb is not cloned this does nothing in skb_try_coalesce()
6162 * skb_scrub_packet - scrub an skb
6164 * @skb: buffer to clean
6170 * skb_scrub_packet can also be used to clean a skb before injecting it in
6172 * skb that could impact namespace isolation.
6174 void skb_scrub_packet(struct sk_buff *skb, bool xnet) in skb_scrub_packet() argument
6176 skb->pkt_type = PACKET_HOST; in skb_scrub_packet()
6177 skb->skb_iif = 0; in skb_scrub_packet()
6178 skb->ignore_df = 0; in skb_scrub_packet()
6179 skb_dst_drop(skb); in skb_scrub_packet()
6180 skb_ext_reset(skb); in skb_scrub_packet()
6181 nf_reset_ct(skb); in skb_scrub_packet()
6182 nf_reset_trace(skb); in skb_scrub_packet()
6185 skb->offload_fwd_mark = 0; in skb_scrub_packet()
6186 skb->offload_l3_fwd_mark = 0; in skb_scrub_packet()
6188 ipvs_reset(skb); in skb_scrub_packet()
6193 skb->mark = 0; in skb_scrub_packet()
6194 skb_clear_tstamp(skb); in skb_scrub_packet()
6198 static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb) in skb_reorder_vlan_header() argument
6203 if (skb_cow(skb, skb_headroom(skb)) < 0) { in skb_reorder_vlan_header()
6204 kfree_skb(skb); in skb_reorder_vlan_header()
6208 mac_len = skb->data - skb_mac_header(skb); in skb_reorder_vlan_header()
6210 memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb), in skb_reorder_vlan_header()
6214 meta_len = skb_metadata_len(skb); in skb_reorder_vlan_header()
6216 meta = skb_metadata_end(skb) - meta_len; in skb_reorder_vlan_header()
6220 skb->mac_header += VLAN_HLEN; in skb_reorder_vlan_header()
6221 return skb; in skb_reorder_vlan_header()
6224 struct sk_buff *skb_vlan_untag(struct sk_buff *skb) in skb_vlan_untag() argument
6229 if (unlikely(skb_vlan_tag_present(skb))) { in skb_vlan_untag()
6231 return skb; in skb_vlan_untag()
6234 skb = skb_share_check(skb, GFP_ATOMIC); in skb_vlan_untag()
6235 if (unlikely(!skb)) in skb_vlan_untag()
6238 if (unlikely(!pskb_may_pull(skb, VLAN_HLEN + sizeof(unsigned short)))) in skb_vlan_untag()
6241 vhdr = (struct vlan_hdr *)skb->data; in skb_vlan_untag()
6243 __vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci); in skb_vlan_untag()
6245 skb_pull_rcsum(skb, VLAN_HLEN); in skb_vlan_untag()
6246 vlan_set_encap_proto(skb, vhdr); in skb_vlan_untag()
6248 skb = skb_reorder_vlan_header(skb); in skb_vlan_untag()
6249 if (unlikely(!skb)) in skb_vlan_untag()
6252 skb_reset_network_header(skb); in skb_vlan_untag()
6253 if (!skb_transport_header_was_set(skb)) in skb_vlan_untag()
6254 skb_reset_transport_header(skb); in skb_vlan_untag()
6255 skb_reset_mac_len(skb); in skb_vlan_untag()
6257 return skb; in skb_vlan_untag()
6260 kfree_skb(skb); in skb_vlan_untag()
6265 int skb_ensure_writable(struct sk_buff *skb, unsigned int write_len) in skb_ensure_writable() argument
6267 if (!pskb_may_pull(skb, write_len)) in skb_ensure_writable()
6270 if (!skb_cloned(skb) || skb_clone_writable(skb, write_len)) in skb_ensure_writable()
6273 return pskb_expand_head(skb, 0, 0, GFP_ATOMIC); in skb_ensure_writable()
6277 int skb_ensure_writable_head_tail(struct sk_buff *skb, struct net_device *dev) in skb_ensure_writable_head_tail() argument
6287 if (unlikely(needed_tailroom && skb->len < ETH_ZLEN)) in skb_ensure_writable_head_tail()
6288 needed_tailroom += ETH_ZLEN - skb->len; in skb_ensure_writable_head_tail()
6290 needed_headroom = max_t(int, needed_headroom - skb_headroom(skb), 0); in skb_ensure_writable_head_tail()
6291 needed_tailroom = max_t(int, needed_tailroom - skb_tailroom(skb), 0); in skb_ensure_writable_head_tail()
6293 if (likely(!needed_headroom && !needed_tailroom && !skb_cloned(skb))) in skb_ensure_writable_head_tail()
6297 return pskb_expand_head(skb, needed_headroom, needed_tailroom, in skb_ensure_writable_head_tail()
6303 * expects a non skb_vlan_tag_present skb with a vlan tag payload
6305 int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci) in __skb_vlan_pop() argument
6307 int offset = skb->data - skb_mac_header(skb); in __skb_vlan_pop()
6311 "__skb_vlan_pop got skb with skb->data not at mac header (offset %d)\n", in __skb_vlan_pop()
6316 err = skb_ensure_writable(skb, VLAN_ETH_HLEN); in __skb_vlan_pop()
6320 skb_postpull_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN); in __skb_vlan_pop()
6322 vlan_remove_tag(skb, vlan_tci); in __skb_vlan_pop()
6324 skb->mac_header += VLAN_HLEN; in __skb_vlan_pop()
6326 if (skb_network_offset(skb) < ETH_HLEN) in __skb_vlan_pop()
6327 skb_set_network_header(skb, ETH_HLEN); in __skb_vlan_pop()
6329 skb_reset_mac_len(skb); in __skb_vlan_pop()
6336 * Expects skb->data at mac header.
6338 int skb_vlan_pop(struct sk_buff *skb) in skb_vlan_pop() argument
6344 if (likely(skb_vlan_tag_present(skb))) { in skb_vlan_pop()
6345 __vlan_hwaccel_clear_tag(skb); in skb_vlan_pop()
6347 if (unlikely(!eth_type_vlan(skb->protocol))) in skb_vlan_pop()
6350 err = __skb_vlan_pop(skb, &vlan_tci); in skb_vlan_pop()
6355 if (likely(!eth_type_vlan(skb->protocol))) in skb_vlan_pop()
6358 vlan_proto = skb->protocol; in skb_vlan_pop()
6359 err = __skb_vlan_pop(skb, &vlan_tci); in skb_vlan_pop()
6363 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); in skb_vlan_pop()
6369 * Expects skb->data at mac header.
6371 int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci) in skb_vlan_push() argument
6373 if (skb_vlan_tag_present(skb)) { in skb_vlan_push()
6374 int offset = skb->data - skb_mac_header(skb); in skb_vlan_push()
6378 "skb_vlan_push got skb with skb->data not at mac header (offset %d)\n", in skb_vlan_push()
6383 err = __vlan_insert_tag(skb, skb->vlan_proto, in skb_vlan_push()
6384 skb_vlan_tag_get(skb)); in skb_vlan_push()
6388 skb->protocol = skb->vlan_proto; in skb_vlan_push()
6389 skb->network_header -= VLAN_HLEN; in skb_vlan_push()
6391 skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN); in skb_vlan_push()
6393 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); in skb_vlan_push()
6401 * @skb: Socket buffer to modify
6403 * Drop the Ethernet header of @skb.
6405 * Expects that skb->data points to the mac header and that no VLAN tags are
6410 int skb_eth_pop(struct sk_buff *skb) in skb_eth_pop() argument
6412 if (!pskb_may_pull(skb, ETH_HLEN) || skb_vlan_tagged(skb) || in skb_eth_pop()
6413 skb_network_offset(skb) < ETH_HLEN) in skb_eth_pop()
6416 skb_pull_rcsum(skb, ETH_HLEN); in skb_eth_pop()
6417 skb_reset_mac_header(skb); in skb_eth_pop()
6418 skb_reset_mac_len(skb); in skb_eth_pop()
6427 * @skb: Socket buffer to modify
6431 * Prepend @skb with a new Ethernet header.
6433 * Expects that skb->data points to the mac header, which must be empty.
6437 int skb_eth_push(struct sk_buff *skb, const unsigned char *dst, in skb_eth_push() argument
6443 if (skb_network_offset(skb) || skb_vlan_tag_present(skb)) in skb_eth_push()
6446 err = skb_cow_head(skb, sizeof(*eth)); in skb_eth_push()
6450 skb_push(skb, sizeof(*eth)); in skb_eth_push()
6451 skb_reset_mac_header(skb); in skb_eth_push()
6452 skb_reset_mac_len(skb); in skb_eth_push()
6454 eth = eth_hdr(skb); in skb_eth_push()
6457 eth->h_proto = skb->protocol; in skb_eth_push()
6459 skb_postpush_rcsum(skb, eth, sizeof(*eth)); in skb_eth_push()
6465 /* Update the ethertype of hdr and the skb csum value if required. */
6466 static void skb_mod_eth_type(struct sk_buff *skb, struct ethhdr *hdr, in skb_mod_eth_type() argument
6469 if (skb->ip_summed == CHECKSUM_COMPLETE) { in skb_mod_eth_type()
6472 skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum); in skb_mod_eth_type()
6482 * @skb: buffer
6489 * Expects skb->data at mac header.
6493 int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto, in skb_mpls_push() argument
6503 if (skb->encapsulation) in skb_mpls_push()
6506 err = skb_cow_head(skb, MPLS_HLEN); in skb_mpls_push()
6510 if (!skb->inner_protocol) { in skb_mpls_push()
6511 skb_set_inner_network_header(skb, skb_network_offset(skb)); in skb_mpls_push()
6512 skb_set_inner_protocol(skb, skb->protocol); in skb_mpls_push()
6515 skb_push(skb, MPLS_HLEN); in skb_mpls_push()
6516 memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb), in skb_mpls_push()
6518 skb_reset_mac_header(skb); in skb_mpls_push()
6519 skb_set_network_header(skb, mac_len); in skb_mpls_push()
6520 skb_reset_mac_len(skb); in skb_mpls_push()
6522 lse = mpls_hdr(skb); in skb_mpls_push()
6524 skb_postpush_rcsum(skb, lse, MPLS_HLEN); in skb_mpls_push()
6527 skb_mod_eth_type(skb, eth_hdr(skb), mpls_proto); in skb_mpls_push()
6528 skb->protocol = mpls_proto; in skb_mpls_push()
6537 * @skb: buffer
6542 * Expects skb->data at mac header.
6546 int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len, in skb_mpls_pop() argument
6551 if (unlikely(!eth_p_mpls(skb->protocol))) in skb_mpls_pop()
6554 err = skb_ensure_writable(skb, mac_len + MPLS_HLEN); in skb_mpls_pop()
6558 skb_postpull_rcsum(skb, mpls_hdr(skb), MPLS_HLEN); in skb_mpls_pop()
6559 memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb), in skb_mpls_pop()
6562 __skb_pull(skb, MPLS_HLEN); in skb_mpls_pop()
6563 skb_reset_mac_header(skb); in skb_mpls_pop()
6564 skb_set_network_header(skb, mac_len); in skb_mpls_pop()
6570 hdr = (struct ethhdr *)((void *)mpls_hdr(skb) - ETH_HLEN); in skb_mpls_pop()
6571 skb_mod_eth_type(skb, hdr, next_proto); in skb_mpls_pop()
6573 skb->protocol = next_proto; in skb_mpls_pop()
6582 * @skb: buffer
6585 * Expects skb->data at mac header.
6589 int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse) in skb_mpls_update_lse() argument
6593 if (unlikely(!eth_p_mpls(skb->protocol))) in skb_mpls_update_lse()
6596 err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN); in skb_mpls_update_lse()
6600 if (skb->ip_summed == CHECKSUM_COMPLETE) { in skb_mpls_update_lse()
6601 __be32 diff[] = { ~mpls_hdr(skb)->label_stack_entry, mpls_lse }; in skb_mpls_update_lse()
6603 skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum); in skb_mpls_update_lse()
6606 mpls_hdr(skb)->label_stack_entry = mpls_lse; in skb_mpls_update_lse()
6615 * @skb: buffer
6617 * Expects skb->data at mac header.
6621 int skb_mpls_dec_ttl(struct sk_buff *skb) in skb_mpls_dec_ttl() argument
6626 if (unlikely(!eth_p_mpls(skb->protocol))) in skb_mpls_dec_ttl()
6629 if (!pskb_may_pull(skb, skb_network_offset(skb) + MPLS_HLEN)) in skb_mpls_dec_ttl()
6632 lse = be32_to_cpu(mpls_hdr(skb)->label_stack_entry); in skb_mpls_dec_ttl()
6640 return skb_mpls_update_lse(skb, cpu_to_be32(lse)); in skb_mpls_dec_ttl()
6645 * alloc_skb_with_frags - allocate skb with page frags
6653 * This can be used to allocate a paged skb, given a maximal order for frags.
6662 struct sk_buff *skb; in alloc_skb_with_frags() local
6671 skb = alloc_skb(header_len, gfp_mask); in alloc_skb_with_frags()
6672 if (!skb) in alloc_skb_with_frags()
6697 skb_fill_page_desc(skb, nr_frags, page, 0, chunk); in alloc_skb_with_frags()
6699 skb->truesize += (PAGE_SIZE << order); in alloc_skb_with_frags()
6702 return skb; in alloc_skb_with_frags()
6705 kfree_skb(skb); in alloc_skb_with_frags()
6710 /* carve out the first off bytes from skb when off < headlen */
6711 static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off, in pskb_carve_inside_header() argument
6715 unsigned int size = skb_end_offset(skb); in pskb_carve_inside_header()
6719 if (skb_pfmemalloc(skb)) in pskb_carve_inside_header()
6728 skb_copy_from_linear_data_offset(skb, off, data, new_hlen); in pskb_carve_inside_header()
6729 skb->len -= off; in pskb_carve_inside_header()
6732 skb_shinfo(skb), in pskb_carve_inside_header()
6734 frags[skb_shinfo(skb)->nr_frags])); in pskb_carve_inside_header()
6735 if (skb_cloned(skb)) { in pskb_carve_inside_header()
6737 if (skb_orphan_frags(skb, gfp_mask)) { in pskb_carve_inside_header()
6741 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) in pskb_carve_inside_header()
6742 skb_frag_ref(skb, i); in pskb_carve_inside_header()
6743 if (skb_has_frag_list(skb)) in pskb_carve_inside_header()
6744 skb_clone_fraglist(skb); in pskb_carve_inside_header()
6745 skb_release_data(skb, SKB_CONSUMED); in pskb_carve_inside_header()
6750 skb_free_head(skb); in pskb_carve_inside_header()
6753 skb->head = data; in pskb_carve_inside_header()
6754 skb->data = data; in pskb_carve_inside_header()
6755 skb->head_frag = 0; in pskb_carve_inside_header()
6756 skb_set_end_offset(skb, size); in pskb_carve_inside_header()
6757 skb_set_tail_pointer(skb, skb_headlen(skb)); in pskb_carve_inside_header()
6758 skb_headers_offset_update(skb, 0); in pskb_carve_inside_header()
6759 skb->cloned = 0; in pskb_carve_inside_header()
6760 skb->hdr_len = 0; in pskb_carve_inside_header()
6761 skb->nohdr = 0; in pskb_carve_inside_header()
6762 atomic_set(&skb_shinfo(skb)->dataref, 1); in pskb_carve_inside_header()
6767 static int pskb_carve(struct sk_buff *skb, const u32 off, gfp_t gfp);
6769 /* carve out the first eat bytes from skb's frag_list. May recurse into
6822 /* carve off first len bytes from skb. Split line (off) is in the
6823 * non-linear part of skb
6825 static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off, in pskb_carve_inside_nonlinear() argument
6829 unsigned int size = skb_end_offset(skb); in pskb_carve_inside_nonlinear()
6831 const int nfrags = skb_shinfo(skb)->nr_frags; in pskb_carve_inside_nonlinear()
6834 if (skb_pfmemalloc(skb)) in pskb_carve_inside_nonlinear()
6843 skb_shinfo(skb), offsetof(struct skb_shared_info, frags[0])); in pskb_carve_inside_nonlinear()
6844 if (skb_orphan_frags(skb, gfp_mask)) { in pskb_carve_inside_nonlinear()
6850 int fsize = skb_frag_size(&skb_shinfo(skb)->frags[i]); in pskb_carve_inside_nonlinear()
6853 shinfo->frags[k] = skb_shinfo(skb)->frags[i]; in pskb_carve_inside_nonlinear()
6867 skb_frag_ref(skb, i); in pskb_carve_inside_nonlinear()
6873 if (skb_has_frag_list(skb)) in pskb_carve_inside_nonlinear()
6874 skb_clone_fraglist(skb); in pskb_carve_inside_nonlinear()
6879 if (skb_has_frag_list(skb)) in pskb_carve_inside_nonlinear()
6880 kfree_skb_list(skb_shinfo(skb)->frag_list); in pskb_carve_inside_nonlinear()
6884 skb_release_data(skb, SKB_CONSUMED); in pskb_carve_inside_nonlinear()
6886 skb->head = data; in pskb_carve_inside_nonlinear()
6887 skb->head_frag = 0; in pskb_carve_inside_nonlinear()
6888 skb->data = data; in pskb_carve_inside_nonlinear()
6889 skb_set_end_offset(skb, size); in pskb_carve_inside_nonlinear()
6890 skb_reset_tail_pointer(skb); in pskb_carve_inside_nonlinear()
6891 skb_headers_offset_update(skb, 0); in pskb_carve_inside_nonlinear()
6892 skb->cloned = 0; in pskb_carve_inside_nonlinear()
6893 skb->hdr_len = 0; in pskb_carve_inside_nonlinear()
6894 skb->nohdr = 0; in pskb_carve_inside_nonlinear()
6895 skb->len -= off; in pskb_carve_inside_nonlinear()
6896 skb->data_len = skb->len; in pskb_carve_inside_nonlinear()
6897 atomic_set(&skb_shinfo(skb)->dataref, 1); in pskb_carve_inside_nonlinear()
6901 /* remove len bytes from the beginning of the skb */
6902 static int pskb_carve(struct sk_buff *skb, const u32 len, gfp_t gfp) in pskb_carve() argument
6904 int headlen = skb_headlen(skb); in pskb_carve()
6907 return pskb_carve_inside_header(skb, len, headlen, gfp); in pskb_carve()
6909 return pskb_carve_inside_nonlinear(skb, len, headlen, gfp); in pskb_carve()
6912 /* Extract to_copy bytes starting at off from skb, and return this in
6913 * a new skb
6915 struct sk_buff *pskb_extract(struct sk_buff *skb, int off, in pskb_extract() argument
6918 struct sk_buff *clone = skb_clone(skb, gfp); in pskb_extract()
6934 * @skb: buffer
6936 * Can be used to save memory before skb is added to a busy queue.
6937 * If packet has bytes in frags and enough tail room in skb->head,
6941 * We do not reallocate skb->head thus can not fail.
6942 * Caller must re-evaluate skb->truesize if needed.
6944 void skb_condense(struct sk_buff *skb) in skb_condense() argument
6946 if (skb->data_len) { in skb_condense()
6947 if (skb->data_len > skb->end - skb->tail || in skb_condense()
6948 skb_cloned(skb) || !skb_frags_readable(skb)) in skb_condense()
6952 __pskb_pull_tail(skb, skb->data_len); in skb_condense()
6954 /* At this point, skb->truesize might be over estimated, in skb_condense()
6955 * because skb had a fragment, and fragments do not tell in skb_condense()
6957 * When we pulled its content into skb->head, fragment in skb_condense()
6959 * adjust skb->truesize, not knowing the frag truesize. in skb_condense()
6961 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); in skb_condense()
6972 * __skb_ext_alloc - allocate a new skb extensions storage
6977 * skb via __skb_ext_set().
7029 * __skb_ext_set - attach the specified extension storage to this skb
7030 * @skb: buffer
7038 void *__skb_ext_set(struct sk_buff *skb, enum skb_ext_id id, in __skb_ext_set() argument
7043 skb_ext_put(skb); in __skb_ext_set()
7047 skb->extensions = ext; in __skb_ext_set()
7048 skb->active_extensions = 1 << id; in __skb_ext_set()
7055 * @skb: buffer
7062 * If the skb was cloned, COW applies and the returned memory can be
7067 void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id) in skb_ext_add() argument
7072 if (skb->active_extensions) { in skb_ext_add()
7073 old = skb->extensions; in skb_ext_add()
7075 new = skb_ext_maybe_cow(old, skb->active_extensions); in skb_ext_add()
7095 skb->slow_gro = 1; in skb_ext_add()
7096 skb->extensions = new; in skb_ext_add()
7097 skb->active_extensions |= 1 << id; in skb_ext_add()
7120 void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id) in __skb_ext_del() argument
7122 struct skb_ext *ext = skb->extensions; in __skb_ext_del()
7124 skb->active_extensions &= ~(1 << id); in __skb_ext_del()
7125 if (skb->active_extensions == 0) { in __skb_ext_del()
7126 skb->extensions = NULL; in __skb_ext_del()
7165 static void kfree_skb_napi_cache(struct sk_buff *skb) in kfree_skb_napi_cache() argument
7167 /* if SKB is a clone, don't handle this case */ in kfree_skb_napi_cache()
7168 if (skb->fclone != SKB_FCLONE_UNAVAILABLE) { in kfree_skb_napi_cache()
7169 __kfree_skb(skb); in kfree_skb_napi_cache()
7174 __napi_kfree_skb(skb, SKB_CONSUMED); in kfree_skb_napi_cache()
7179 * skb_attempt_defer_free - queue skb for remote freeing
7180 * @skb: buffer
7182 * Put @skb in a per-cpu list, using the cpu which
7183 * allocated the skb/pages to reduce false sharing
7186 void skb_attempt_defer_free(struct sk_buff *skb) in skb_attempt_defer_free() argument
7190 int cpu = skb->alloc_cpu; in skb_attempt_defer_free()
7197 nodefer: kfree_skb_napi_cache(skb); in skb_attempt_defer_free()
7201 DEBUG_NET_WARN_ON_ONCE(skb_dst(skb)); in skb_attempt_defer_free()
7202 DEBUG_NET_WARN_ON_ONCE(skb->destructor); in skb_attempt_defer_free()
7203 DEBUG_NET_WARN_ON_ONCE(skb_nfct(skb)); in skb_attempt_defer_free()
7213 llist_add(&skb->ll_node, &sdn->defer_list); in skb_attempt_defer_free()
7225 static void skb_splice_csum_page(struct sk_buff *skb, struct page *page, in skb_splice_csum_page() argument
7234 skb->csum = csum_block_add(skb->csum, csum, skb->len); in skb_splice_csum_page()
7239 * @skb: The buffer to add pages to
7251 ssize_t skb_splice_from_iter(struct sk_buff *skb, struct iov_iter *iter, in skb_splice_from_iter() argument
7264 space = frag_limit - skb_shinfo(skb)->nr_frags; in skb_splice_from_iter()
7286 ret = skb_append_pagefrags(skb, page, off, part, in skb_splice_from_iter()
7293 if (skb->ip_summed == CHECKSUM_NONE) in skb_splice_from_iter()
7294 skb_splice_csum_page(skb, page, off, part); in skb_splice_from_iter()
7307 skb_len_add(skb, spliced); in skb_splice_from_iter()