| /linux/Documentation/networking/ |
| H A D | skbuff.rst | 15 Shared skbs and skb clones 19 to keep a struct sk_buff alive. skbs with a ``sk_buff.users != 1`` are referred 20 to as shared skbs (see skb_shared()). 22 skb_clone() allows for fast duplication of skbs. None of the data buffers 24 &skb_shared_info.refcount indicates the number of skbs pointing at the same 27 dataref and headerless skbs 31 :doc: dataref and headerless skbs
|
| H A D | xdp-rx-metadata.rst | 87 ``skbs``. However, TC-BPF programs can access the XDP metadata area using 91 can override some of the metadata used for building ``skbs``.
|
| /linux/kernel/bpf/ |
| H A D | cpumap.c | 138 void **skbs, u32 skb_n, in cpu_map_bpf_prog_run_skb() argument 146 struct sk_buff *skb = skbs[i]; in cpu_map_bpf_prog_run_skb() 151 skbs[pass++] = skb; in cpu_map_bpf_prog_run_skb() 246 void **skbs, struct cpu_map_ret *ret, in cpu_map_bpf_prog_run() argument 260 ret->skb_n = cpu_map_bpf_prog_run_skb(rcpu, skbs, ret->skb_n, in cpu_map_bpf_prog_run() 272 memmove(&skbs[ret->xdp_n], skbs, ret->skb_n * sizeof(*skbs)); in cpu_map_bpf_prog_run() 307 void *skbs[CPUMAP_BATCH]; in cpu_map_kthread_run() local 342 skbs[ret.skb_n++] = skb; in cpu_map_kthread_run() 359 cpu_map_bpf_prog_run(rcpu, frames, skbs, &ret, &stats); in cpu_map_kthread_run() 363 m = napi_skb_cache_get_bulk(skbs, ret.xdp_n); in cpu_map_kthread_run() [all …]
|
| /linux/drivers/net/ethernet/actions/ |
| H A D | owl-emac.c | 206 ring->skbs[i] = skb; in owl_emac_ring_prepare_rx() 252 owl_emac_dma_unmap_rx(priv, ring->skbs[i], ring->skbs_dma[i]); in owl_emac_ring_unprepare_rx() 255 dev_kfree_skb(ring->skbs[i]); in owl_emac_ring_unprepare_rx() 256 ring->skbs[i] = NULL; in owl_emac_ring_unprepare_rx() 271 owl_emac_dma_unmap_tx(priv, ring->skbs[i], ring->skbs_dma[i]); in owl_emac_ring_unprepare_tx() 274 dev_kfree_skb(ring->skbs[i]); in owl_emac_ring_unprepare_tx() 275 ring->skbs[i] = NULL; in owl_emac_ring_unprepare_tx() 288 ring->skbs = devm_kcalloc(dev, size, sizeof(struct sk_buff *), in owl_emac_ring_alloc() 290 if (!ring->skbs) in owl_emac_ring_alloc() 530 ring->skbs[tx_head] = skb; in owl_emac_setup_frame_xmit() [all …]
|
| H A D | owl-emac.h | 247 struct sk_buff **skbs; member
|
| /linux/drivers/net/ethernet/sfc/ |
| H A D | selftest.c | 87 struct sk_buff **skbs; member 431 state->skbs[i] = skb; in efx_begin_loopback() 489 skb = state->skbs[i]; in efx_end_loopback() 545 state->skbs = kcalloc(state->packet_count, in efx_test_loopback() 546 sizeof(state->skbs[0]), GFP_KERNEL); in efx_test_loopback() 547 if (!state->skbs) in efx_test_loopback() 568 kfree(state->skbs); in efx_test_loopback()
|
| /linux/drivers/net/ethernet/sfc/siena/ |
| H A D | selftest.c | 88 struct sk_buff **skbs; member 432 state->skbs[i] = skb; in efx_begin_loopback() 490 skb = state->skbs[i]; in efx_end_loopback() 546 state->skbs = kcalloc(state->packet_count, in efx_test_loopback() 547 sizeof(state->skbs[0]), GFP_KERNEL); in efx_test_loopback() 548 if (!state->skbs) in efx_test_loopback() 569 kfree(state->skbs); in efx_test_loopback()
|
| /linux/drivers/net/ethernet/sfc/falcon/ |
| H A D | selftest.c | 85 struct sk_buff **skbs; member 434 state->skbs[i] = skb; in ef4_begin_loopback() 492 skb = state->skbs[i]; in ef4_end_loopback() 548 state->skbs = kcalloc(state->packet_count, in ef4_test_loopback() 549 sizeof(state->skbs[0]), GFP_KERNEL); in ef4_test_loopback() 550 if (!state->skbs) in ef4_test_loopback() 571 kfree(state->skbs); in ef4_test_loopback()
|
| /linux/net/mac80211/ |
| H A D | tx.c | 921 __skb_queue_tail(&tx->skbs, tmp); in ieee80211_fragment() 963 __skb_queue_tail(&tx->skbs, skb); in ieee80211_tx_h_fragment() 1000 skb_queue_walk(&tx->skbs, skb) { in ieee80211_tx_h_fragment() 1006 if (!skb_queue_is_last(&tx->skbs, skb)) { in ieee80211_tx_h_fragment() 1036 skb_queue_walk(&tx->skbs, skb) { in ieee80211_tx_h_stats() 1089 skb_queue_walk(&tx->skbs, skb) { in ieee80211_tx_h_calculate_duration() 1093 if (!skb_queue_is_last(&tx->skbs, skb)) { in ieee80211_tx_h_calculate_duration() 1094 struct sk_buff *next = skb_queue_next(&tx->skbs, skb); in ieee80211_tx_h_calculate_duration() 1223 __skb_queue_head_init(&tx->skbs); in ieee80211_tx_prepare() 1672 struct sk_buff_head *skbs, in ieee80211_tx_frags() argument [all …]
|
| H A D | wep.c | 299 skb_queue_walk(&tx->skbs, skb) { in ieee80211_crypto_wep_encrypt()
|
| /linux/drivers/net/wireless/mediatek/mt7601u/ |
| H A D | dma.c | 283 struct sk_buff_head skbs; in mt7601u_tx_tasklet() local 286 __skb_queue_head_init(&skbs); in mt7601u_tx_tasklet() 295 skb_queue_splice_init(&dev->tx_skb_done, &skbs); in mt7601u_tx_tasklet() 299 while (!skb_queue_empty(&skbs)) { in mt7601u_tx_tasklet() 300 struct sk_buff *skb = __skb_dequeue(&skbs); in mt7601u_tx_tasklet()
|
| /linux/net/mctp/test/ |
| H A D | route-test.c | 593 /* Test that skbs from different nets (otherwise identical) get routed to their in mctp_test_route_input_multiple_nets_bind_init() 673 /* test that skbs from different nets (otherwise identical) get routed to their in mctp_test_route_input_multiple_nets_key_init() 761 struct sk_buff *skbs[2]; in mctp_test_route_input_sk_fail_single() 773 for (i = 0; i < ARRAY_SIZE(skbs); i++) { 774 skbs[i] = mctp_test_create_skb(&hdrs[i], 10); 775 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, skbs[i]); 776 skb_get(skbs[i]); in mctp_test_route_input_sk_fail_frag() 778 mctp_test_skb_set_dev(skbs[i], dev); in mctp_test_route_input_sk_fail_frag() 784 rc = mctp_dst_input(&dst, skbs[0]); in mctp_test_route_input_sk_fail_frag() 788 rc = mctp_dst_input(&dst, skbs[ in mctp_test_route_input_sk_fail_frag() 781 struct sk_buff *skbs[2]; mctp_test_route_input_sk_fail_frag() local [all...] |
| /linux/drivers/net/ethernet/qlogic/qede/ |
| H A D | qede_fp.c | 80 struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb; in qede_free_tx_pkt() 85 bool data_split = txq->sw_tx_ring.skbs[idx].flags & QEDE_TSO_SPLIT_BD; in qede_free_tx_pkt() 125 txq->sw_tx_ring.skbs[idx].skb = NULL; in qede_free_tx_pkt() 126 txq->sw_tx_ring.skbs[idx].flags = 0; in qede_free_tx_pkt() 137 struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb; in qede_free_failed_tx_pkt() 173 txq->sw_tx_ring.skbs[idx].skb = NULL; in qede_free_failed_tx_pkt() 174 txq->sw_tx_ring.skbs[idx].flags = 0; in qede_free_failed_tx_pkt() 1520 txq->sw_tx_ring.skbs[idx].skb = skb; in qede_start_xmit() 1642 txq->sw_tx_ring.skbs[idx].flags |= QEDE_TSO_SPLIT_BD; in qede_start_xmit()
|
| /linux/net/bpf/ |
| H A D | test_run.c | 106 struct sk_buff **skbs; member 167 xdp->skbs = kvmalloc_array(xdp->batch_size, sizeof(void *), GFP_KERNEL); in xdp_test_run_setup() 168 if (!xdp->skbs) in xdp_test_run_setup() 198 kvfree(xdp->skbs); in xdp_test_run_setup() 209 kfree(xdp->skbs); in xdp_test_run_teardown() 242 struct sk_buff **skbs, in xdp_recv_frames() argument 250 (void **)skbs); in xdp_recv_frames() 259 struct sk_buff *skb = skbs[i]; in xdp_recv_frames() 350 ret = xdp_recv_frames(frames, nframes, xdp->skbs, xdp->dev); in xdp_test_run_batch()
|
| /linux/drivers/net/wireless/intel/iwlwifi/dvm/ |
| H A D | tx.c | 1125 struct sk_buff_head skbs; in iwlagn_rx_reply_tx() local 1146 __skb_queue_head_init(&skbs); in iwlagn_rx_reply_tx() 1175 iwl_trans_reclaim(priv->trans, txq_id, ssn, &skbs, false); in iwlagn_rx_reply_tx() 1180 skb_queue_walk(&skbs, skb) { in iwlagn_rx_reply_tx() 1251 while (!skb_queue_empty(&skbs)) { in iwlagn_rx_reply_tx() 1252 skb = __skb_dequeue(&skbs); in iwlagn_rx_reply_tx()
|
| /linux/net/mptcp/ |
| H A D | protocol.c | 2145 static bool __mptcp_move_skbs(struct sock *sk, struct list_head *skbs, u32 *delta) in __mptcp_move_skbs() argument 2147 struct sk_buff *skb = list_first_entry(skbs, struct sk_buff, list); in __mptcp_move_skbs() 2162 if (list_empty(skbs)) in __mptcp_move_skbs() 2165 skb = list_first_entry(skbs, struct sk_buff, list); in __mptcp_move_skbs() 2174 static bool mptcp_can_spool_backlog(struct sock *sk, struct list_head *skbs) in mptcp_can_spool_backlog() argument 2189 INIT_LIST_HEAD(skbs); in mptcp_can_spool_backlog() 2190 list_splice_init(&msk->backlog_list, skbs); in mptcp_can_spool_backlog() 2195 struct list_head *skbs) in mptcp_backlog_spooled() argument 2200 list_splice(skbs, &msk->backlog_list); in mptcp_backlog_spooled() 2205 struct list_head skbs; in mptcp_move_skbs() local [all …]
|
| /linux/tools/testing/selftests/net/packetdrill/ |
| H A D | tcp_zerocopy_maxfrags.pkt | 95 // verify that it is split in skbs with 17 frags
|
| /linux/drivers/net/ethernet/mediatek/ |
| H A D | mtk_star_emac.c | 241 struct sk_buff *skbs[MTK_STAR_RING_NUM_DESCS]; member 325 desc_data->skb = ring->skbs[ring->tail]; in mtk_star_ring_pop_tail() 328 ring->skbs[ring->tail] = NULL; in mtk_star_ring_pop_tail() 349 ring->skbs[ring->head] = desc_data->skb; in mtk_star_ring_push_head() 718 ring->skbs[i] = skb; in mtk_star_prepare_rx_skbs() 738 desc_data.skb = ring->skbs[i]; in mtk_star_ring_free_skbs()
|
| /linux/drivers/net/ethernet/natsemi/ |
| H A D | ns83820.c | 401 struct sk_buff *skbs[NR_RX_DESC]; member 524 BUG_ON(NULL != dev->rx_info.skbs[next_empty]); in ns83820_add_rx_skb() 525 dev->rx_info.skbs[next_empty] = skb; in ns83820_add_rx_skb() 790 struct sk_buff *skb = dev->rx_info.skbs[i]; in ns83820_cleanup_rx() 791 dev->rx_info.skbs[i] = NULL; in ns83820_cleanup_rx() 856 skb = info->skbs[next_rx]; 857 info->skbs[next_rx] = NULL;
|
| /linux/drivers/net/wireless/intel/iwlegacy/ |
| H A D | 3945.c | 283 skb = txq->skbs[txq->q.read_ptr]; in il3945_tx_queue_reclaim() 285 txq->skbs[txq->q.read_ptr] = NULL; in il3945_tx_queue_reclaim() 332 info = IEEE80211_SKB_CB(txq->skbs[txq->q.read_ptr]); in il3945_hdl_tx() 667 if (txq->skbs) { in il3945_hw_txq_free_tfd() 668 struct sk_buff *skb = txq->skbs[txq->q.read_ptr]; in il3945_hw_txq_free_tfd() 673 txq->skbs[txq->q.read_ptr] = NULL; in il3945_hw_txq_free_tfd()
|
| H A D | 4965-mac.c | 1770 txq->skbs[q->write_ptr] = skb; in il4965_tx_skb() 2473 skb = txq->skbs[txq->q.read_ptr]; in il4965_tx_queue_reclaim() 2484 txq->skbs[txq->q.read_ptr] = NULL; in il4965_tx_queue_reclaim() 2548 info = IEEE80211_SKB_CB(il->txq[scd_flow].skbs[agg->start_idx]); in il4965_tx_status_reply_compressed_ba() 2674 info = IEEE80211_SKB_CB(il->txq[txq_id].skbs[idx]); in il4965_tx_status_reply_tx() 2707 skb = il->txq[txq_id].skbs[idx]; in il4965_tx_status_reply_tx() 2785 skb = txq->skbs[txq->q.read_ptr]; in il4965_hdl_tx() 3945 if (txq->skbs) { in il4965_hw_txq_free_tfd() 3946 struct sk_buff *skb = txq->skbs[txq->q.read_ptr]; in il4965_hw_txq_free_tfd() 3951 txq->skbs[txq->q.read_ptr] = NULL; in il4965_hw_txq_free_tfd()
|
| /linux/drivers/net/ethernet/chelsio/inline_crypto/chtls/ |
| H A D | chtls_main.c | 423 struct sk_buff **skbs, const __be64 *rsp) in chtls_recv() argument 425 struct sk_buff *skb = *skbs; in chtls_recv()
|
| /linux/drivers/net/wireless/intel/iwlwifi/ |
| H A D | iwl-trans.c | 664 struct sk_buff_head *skbs, bool is_flush) in iwl_trans_reclaim() argument 673 iwl_pcie_reclaim(trans, queue, ssn, skbs, is_flush); in iwl_trans_reclaim()
|
| /linux/net/bluetooth/ |
| H A D | l2cap_core.c | 61 struct sk_buff_head *skbs, u8 event); 1926 struct sk_buff_head *skbs) in l2cap_streaming_send() argument 1931 BT_DBG("chan %p, skbs %p", chan, skbs); in l2cap_streaming_send() 1933 skb_queue_splice_tail_init(skbs, &chan->tx_q); in l2cap_streaming_send() 2768 struct sk_buff_head *skbs, u8 event) in l2cap_tx_state_xmit() argument 2770 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs, in l2cap_tx_state_xmit() 2776 chan->tx_send_head = skb_peek(skbs); in l2cap_tx_state_xmit() 2778 skb_queue_splice_tail_init(skbs, &chan->tx_q); in l2cap_tx_state_xmit() 2840 struct sk_buff_head *skbs, u8 event) in l2cap_tx_state_wait_f() argument 2842 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs, in l2cap_tx_state_wait_f() [all …]
|
| /linux/drivers/net/ethernet/chelsio/cxgb3/ |
| H A D | sge.c | 1877 struct sk_buff *skbs[], int n) in deliver_partial_bundle() argument 1881 tdev->recv(tdev, skbs, n); in deliver_partial_bundle() 1904 struct sk_buff *skb, *tmp, *skbs[RX_BUNDLE_SIZE]; in ofld_poll() local 1926 skbs[ngathered] = skb; in ofld_poll() 1929 adapter->tdev.recv(&adapter->tdev, skbs, in ofld_poll() 1940 deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered); in ofld_poll()
|