Home
last modified time | relevance | path

Searched refs:skbs (Results 1 – 25 of 49) sorted by relevance

12

/linux/Documentation/networking/
H A Dskbuff.rst15 Shared skbs and skb clones
19 to keep a struct sk_buff alive. skbs with a ``sk_buff.users != 1`` are referred
20 to as shared skbs (see skb_shared()).
22 skb_clone() allows for fast duplication of skbs. None of the data buffers
24 &skb_shared_info.refcount indicates the number of skbs pointing at the same
27 dataref and headerless skbs
31 :doc: dataref and headerless skbs
H A Dnetdev-features.rst129 ndo_start_xmit can handle skbs with frags in high memory.
133 Those features say that ndo_start_xmit can handle fragmented skbs:
134 NETIF_F_SG --- paged skbs (skb_shinfo()->frags), NETIF_F_FRAGLIST ---
135 chained skbs (skb->next/prev list).
H A Dxdp-rx-metadata.rst87 ``skbs``. However, TC-BPF programs can access the XDP metadata area using
91 can override some of the metadata used for building ``skbs``.
H A Dsegmentation-offloads.rst60 UFO is deprecated: modern kernels will no longer generate UFO skbs, but can
165 padded and stored as chained skbs, and skb_segment() splits based on those.
181 will check for GSO_BY_FRAGS and WARN if asked to manipulate these skbs.
H A Ddevmem.rst241 Unreadable skbs
245 results in a few quirks for payloads of devmem skbs:
248 not possible with devmem skbs.
/linux/drivers/net/ethernet/sfc/
H A Dselftest.c87 struct sk_buff **skbs; member
431 state->skbs[i] = skb; in efx_begin_loopback()
489 skb = state->skbs[i]; in efx_end_loopback()
545 state->skbs = kcalloc(state->packet_count, in efx_test_loopback()
546 sizeof(state->skbs[0]), GFP_KERNEL); in efx_test_loopback()
547 if (!state->skbs) in efx_test_loopback()
568 kfree(state->skbs); in efx_test_loopback()
/linux/drivers/net/ethernet/sfc/siena/
H A Dselftest.c88 struct sk_buff **skbs; member
432 state->skbs[i] = skb; in efx_begin_loopback()
490 skb = state->skbs[i]; in efx_end_loopback()
546 state->skbs = kcalloc(state->packet_count, in efx_test_loopback()
547 sizeof(state->skbs[0]), GFP_KERNEL); in efx_test_loopback()
548 if (!state->skbs) in efx_test_loopback()
569 kfree(state->skbs); in efx_test_loopback()
/linux/drivers/net/ethernet/sfc/falcon/
H A Dselftest.c85 struct sk_buff **skbs; member
434 state->skbs[i] = skb; in ef4_begin_loopback()
492 skb = state->skbs[i]; in ef4_end_loopback()
548 state->skbs = kcalloc(state->packet_count, in ef4_test_loopback()
549 sizeof(state->skbs[0]), GFP_KERNEL); in ef4_test_loopback()
550 if (!state->skbs) in ef4_test_loopback()
571 kfree(state->skbs); in ef4_test_loopback()
/linux/net/mctp/test/
H A Droute-test.c885 struct sk_buff *skbs[2]; in mctp_test_route_input_sk_fail_frag() local
896 for (i = 0; i < ARRAY_SIZE(skbs); i++) { in mctp_test_route_input_sk_fail_frag()
897 skbs[i] = mctp_test_create_skb(&hdrs[i], 10); in mctp_test_route_input_sk_fail_frag()
898 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, skbs[i]); in mctp_test_route_input_sk_fail_frag()
899 skb_get(skbs[i]); in mctp_test_route_input_sk_fail_frag()
901 mctp_test_skb_set_dev(skbs[i], dev); in mctp_test_route_input_sk_fail_frag()
907 rc = mctp_route_input(&rt->rt, skbs[0]); in mctp_test_route_input_sk_fail_frag()
911 rc = mctp_route_input(&rt->rt, skbs[1]); in mctp_test_route_input_sk_fail_frag()
915 KUNIT_EXPECT_EQ(test, refcount_read(&skbs[0]->users), 1); in mctp_test_route_input_sk_fail_frag()
916 kfree_skb(skbs[0]); in mctp_test_route_input_sk_fail_frag()
[all …]
/linux/net/mac80211/
H A Dtx.c919 __skb_queue_tail(&tx->skbs, tmp); in ieee80211_fragment()
961 __skb_queue_tail(&tx->skbs, skb); in ieee80211_tx_h_fragment()
998 skb_queue_walk(&tx->skbs, skb) { in ieee80211_tx_h_fragment()
1004 if (!skb_queue_is_last(&tx->skbs, skb)) { in ieee80211_tx_h_fragment()
1034 skb_queue_walk(&tx->skbs, skb) { in ieee80211_tx_h_stats()
1085 skb_queue_walk(&tx->skbs, skb) { in ieee80211_tx_h_calculate_duration()
1089 if (!skb_queue_is_last(&tx->skbs, skb)) { in ieee80211_tx_h_calculate_duration()
1090 struct sk_buff *next = skb_queue_next(&tx->skbs, skb); in ieee80211_tx_h_calculate_duration()
1217 __skb_queue_head_init(&tx->skbs); in ieee80211_tx_prepare()
1672 struct sk_buff_head *skbs, in ieee80211_tx_frags() argument
[all …]
H A Dwep.c299 skb_queue_walk(&tx->skbs, skb) { in ieee80211_crypto_wep_encrypt()
/linux/drivers/net/wireless/mediatek/mt7601u/
H A Ddma.c283 struct sk_buff_head skbs; in mt7601u_tx_tasklet() local
286 __skb_queue_head_init(&skbs); in mt7601u_tx_tasklet()
295 skb_queue_splice_init(&dev->tx_skb_done, &skbs); in mt7601u_tx_tasklet()
299 while (!skb_queue_empty(&skbs)) { in mt7601u_tx_tasklet()
300 struct sk_buff *skb = __skb_dequeue(&skbs); in mt7601u_tx_tasklet()
/linux/drivers/net/ethernet/qlogic/qede/
H A Dqede_fp.c79 struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb; in qede_free_tx_pkt()
84 bool data_split = txq->sw_tx_ring.skbs[idx].flags & QEDE_TSO_SPLIT_BD; in qede_free_tx_pkt()
124 txq->sw_tx_ring.skbs[idx].skb = NULL; in qede_free_tx_pkt()
125 txq->sw_tx_ring.skbs[idx].flags = 0; in qede_free_tx_pkt()
136 struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb; in qede_free_failed_tx_pkt()
172 txq->sw_tx_ring.skbs[idx].skb = NULL; in qede_free_failed_tx_pkt()
173 txq->sw_tx_ring.skbs[idx].flags = 0; in qede_free_failed_tx_pkt()
1519 txq->sw_tx_ring.skbs[idx].skb = skb; in qede_start_xmit()
1641 txq->sw_tx_ring.skbs[idx].flags |= QEDE_TSO_SPLIT_BD; in qede_start_xmit()
/linux/net/bpf/
H A Dtest_run.c117 struct sk_buff **skbs; member
178 xdp->skbs = kvmalloc_array(xdp->batch_size, sizeof(void *), GFP_KERNEL); in xdp_test_run_setup()
179 if (!xdp->skbs) in xdp_test_run_setup()
209 kvfree(xdp->skbs); in xdp_test_run_setup()
220 kfree(xdp->skbs); in xdp_test_run_teardown()
253 struct sk_buff **skbs, in xdp_recv_frames() argument
261 (void **)skbs); in xdp_recv_frames()
270 struct sk_buff *skb = skbs[i]; in xdp_recv_frames()
361 ret = xdp_recv_frames(frames, nframes, xdp->skbs, xdp->dev); in xdp_test_run_batch()
/linux/drivers/net/wireless/intel/iwlwifi/dvm/
H A Dtx.c1123 struct sk_buff_head skbs; in iwlagn_rx_reply_tx() local
1144 __skb_queue_head_init(&skbs); in iwlagn_rx_reply_tx()
1173 iwl_trans_reclaim(priv->trans, txq_id, ssn, &skbs, false); in iwlagn_rx_reply_tx()
1178 skb_queue_walk(&skbs, skb) { in iwlagn_rx_reply_tx()
1249 while (!skb_queue_empty(&skbs)) { in iwlagn_rx_reply_tx()
1250 skb = __skb_dequeue(&skbs); in iwlagn_rx_reply_tx()
/linux/drivers/net/ethernet/renesas/
H A Drswitch.c295 kfree(gq->skbs); in rswitch_gwca_queue_free()
296 gq->skbs = NULL; in rswitch_gwca_queue_free()
334 gq->skbs = kcalloc(gq->ring_size, sizeof(*gq->skbs), GFP_KERNEL); in rswitch_gwca_queue_alloc()
335 if (!gq->skbs) in rswitch_gwca_queue_alloc()
878 skb = gq->skbs[gq->dirty]; in rswitch_tx_free()
885 dev_kfree_skb_any(gq->skbs[gq->dirty]); in rswitch_tx_free()
886 gq->skbs[gq->dirty] = NULL; in rswitch_tx_free()
1737 gq->skbs[(gq->cur + nr_desc - 1) % gq->ring_size] = skb; in rswitch_start_xmit()
1758 gq->skbs[(gq->cur + nr_desc - 1) % gq->ring_size] = NULL; in rswitch_start_xmit()
/linux/drivers/net/ethernet/mediatek/
H A Dmtk_star_emac.c241 struct sk_buff *skbs[MTK_STAR_RING_NUM_DESCS]; member
325 desc_data->skb = ring->skbs[ring->tail]; in mtk_star_ring_pop_tail()
328 ring->skbs[ring->tail] = NULL; in mtk_star_ring_pop_tail()
349 ring->skbs[ring->head] = desc_data->skb; in mtk_star_ring_push_head()
718 ring->skbs[i] = skb; in mtk_star_prepare_rx_skbs()
738 desc_data.skb = ring->skbs[i]; in mtk_star_ring_free_skbs()
/linux/tools/testing/selftests/net/packetdrill/
H A Dtcp_zerocopy_maxfrags.pkt93 // verify that it is split in skbs with 17 frags
/linux/drivers/net/ethernet/actions/
H A Dowl-emac.h247 struct sk_buff **skbs; member
/linux/drivers/net/wireless/intel/iwlegacy/
H A D3945.c283 skb = txq->skbs[txq->q.read_ptr]; in il3945_tx_queue_reclaim()
285 txq->skbs[txq->q.read_ptr] = NULL; in il3945_tx_queue_reclaim()
332 info = IEEE80211_SKB_CB(txq->skbs[txq->q.read_ptr]); in il3945_hdl_tx()
667 if (txq->skbs) { in il3945_hw_txq_free_tfd()
668 struct sk_buff *skb = txq->skbs[txq->q.read_ptr]; in il3945_hw_txq_free_tfd()
673 txq->skbs[txq->q.read_ptr] = NULL; in il3945_hw_txq_free_tfd()
/linux/drivers/net/ethernet/chelsio/inline_crypto/chtls/
H A Dchtls_main.c423 struct sk_buff **skbs, const __be64 *rsp) in chtls_recv() argument
425 struct sk_buff *skb = *skbs; in chtls_recv()
/linux/drivers/net/wireless/intel/iwlwifi/
H A Diwl-trans.c642 struct sk_buff_head *skbs, bool is_flush) in iwl_trans_reclaim() argument
648 iwl_pcie_reclaim(trans, queue, ssn, skbs, is_flush); in iwl_trans_reclaim()
/linux/net/core/
H A Dxdp.c621 int xdp_alloc_skb_bulk(void **skbs, int n_skb, gfp_t gfp) in xdp_alloc_skb_bulk() argument
623 n_skb = kmem_cache_alloc_bulk(net_hotdata.skbuff_cache, gfp, n_skb, skbs); in xdp_alloc_skb_bulk()
/linux/net/bluetooth/
H A Dl2cap_core.c61 struct sk_buff_head *skbs, u8 event);
1924 struct sk_buff_head *skbs) in l2cap_streaming_send() argument
1929 BT_DBG("chan %p, skbs %p", chan, skbs); in l2cap_streaming_send()
1931 skb_queue_splice_tail_init(skbs, &chan->tx_q); in l2cap_streaming_send()
2731 struct sk_buff_head *skbs, u8 event) in l2cap_tx_state_xmit() argument
2733 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs, in l2cap_tx_state_xmit()
2739 chan->tx_send_head = skb_peek(skbs); in l2cap_tx_state_xmit()
2741 skb_queue_splice_tail_init(skbs, &chan->tx_q); in l2cap_tx_state_xmit()
2803 struct sk_buff_head *skbs, u8 event) in l2cap_tx_state_wait_f() argument
2805 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs, in l2cap_tx_state_wait_f()
[all …]
/linux/drivers/net/ethernet/chelsio/cxgb3/
H A Dsge.c1877 struct sk_buff *skbs[], int n) in deliver_partial_bundle() argument
1881 tdev->recv(tdev, skbs, n); in deliver_partial_bundle()
1904 struct sk_buff *skb, *tmp, *skbs[RX_BUNDLE_SIZE]; in ofld_poll() local
1926 skbs[ngathered] = skb; in ofld_poll()
1929 adapter->tdev.recv(&adapter->tdev, skbs, in ofld_poll()
1940 deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered); in ofld_poll()

12