| /linux/io_uring/ |
| H A D | napi.c | 4 #include "napi.h" 37 /* napi approximating usecs, reverse busy_loop_current_time */ in net_to_ktime() 46 /* Non-NAPI IDs can be rejected. */ in __io_napi_add_id() 89 /* Non-NAPI IDs can be rejected. */ in __io_napi_del_id() 212 /* Singular lists use a different napi loop end check function and are in io_napi_blocking_busy_loop() 232 * io_napi_init() - Init napi settings 235 * Init napi settings in the io-uring context. 249 * io_napi_free() - Deallocate napi 252 * Free the napi list and the hash table in the io-uring context. 267 struct io_uring_napi *napi) in io_napi_register_napi() argument [all …]
|
| /linux/tools/testing/selftests/drivers/net/ |
| H A D | napi_threaded.py | 5 Test napi threaded states. 15 napi = nl.napi_get({'id': napi_id}) 16 ksft_eq(napi['threaded'], 'enabled') 17 ksft_ne(napi.get('pid'), None) 21 napi = nl.napi_get({'id': napi_id}) 22 ksft_eq(napi['threaded'], 'disabled') 23 ksft_eq(napi.get('pid'), None) 44 Test that threaded state (in the persistent NAPI config) gets updated 45 even when NAPI with given ID is not allocated at the time. 56 for napi i [all...] |
| /linux/include/trace/events/ |
| H A D | napi.h | 3 #define TRACE_SYSTEM napi 16 TP_PROTO(struct napi_struct *napi, int work, int budget), 18 TP_ARGS(napi, work, budget), 21 __field( struct napi_struct *, napi) 22 __string( dev_name, napi->dev ? napi->dev->name : NO_DEV) 28 __entry->napi = napi; 34 TP_printk("napi poll on napi struc [all...] |
| /linux/drivers/net/ethernet/intel/igc/ |
| H A D | igc_xdp.c | 32 napi_disable(&adapter->rx_ring[i]->q_vector->napi); in igc_xdp_set_prog() 47 napi_enable(&adapter->rx_ring[i]->q_vector->napi); in igc_xdp_set_prog() 62 struct napi_struct *napi; in igc_xdp_enable_pool() local 91 /* Rx and Tx rings share the same napi context. */ in igc_xdp_enable_pool() 92 napi = &rx_ring->q_vector->napi; in igc_xdp_enable_pool() 97 napi_disable(napi); in igc_xdp_enable_pool() 104 napi_enable(napi); in igc_xdp_enable_pool() 122 struct napi_struct *napi; in igc_xdp_disable_pool() local 137 /* Rx and Tx rings share the same napi context. */ in igc_xdp_disable_pool() 138 napi = &rx_ring->q_vector->napi; in igc_xdp_disable_pool() [all …]
|
| /linux/net/core/ |
| H A D | gro_cells.c | 10 struct napi_struct napi; member 44 napi_schedule(&cell->napi); in gro_cells_receive() 57 static int gro_cell_poll(struct napi_struct *napi, int budget) in gro_cell_poll() argument 59 struct gro_cell *cell = container_of(napi, struct gro_cell, napi); in gro_cell_poll() 69 napi_gro_receive(napi, skb); in gro_cell_poll() 74 napi_complete_done(napi, work_done); in gro_cell_poll() 92 set_bit(NAPI_STATE_NO_BUSY_POLL, &cell->napi.state); in gro_cells_init() 94 netif_napi_add(dev, &cell->napi, gro_cell_poll); in gro_cells_init() 95 napi_enable(&cell->napi); in gro_cells_init() 125 napi_disable(&cell->napi); in gro_cells_destroy() [all …]
|
| H A D | dev.c | 136 #include <trace/events/napi.h> 463 * (e.g. NAPI context). 778 struct napi_struct *napi; in napi_by_id() local 780 hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node) in napi_by_id() 781 if (napi->napi_id == napi_id) in napi_by_id() 782 return napi; in napi_by_id() 791 struct napi_struct *napi; in netdev_napi_by_id() local 793 napi = napi_by_id(napi_id); in netdev_napi_by_id() 794 if (!napi) in netdev_napi_by_id() 797 if (WARN_ON_ONCE(!napi->dev)) in netdev_napi_by_id() [all …]
|
| H A D | gro.c | 455 /* Do not adjust napi->gro_hash[].count, caller is adding a new in gro_flush_oldest() 640 static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb) in napi_reuse_skb() argument 652 skb->dev = napi->dev; in napi_reuse_skb() 673 napi->skb = skb; in napi_reuse_skb() 676 struct sk_buff *napi_get_frags(struct napi_struct *napi) in napi_get_frags() argument 678 struct sk_buff *skb = napi->skb; in napi_get_frags() 681 skb = napi_alloc_skb(napi, GRO_MAX_HEAD); in napi_get_frags() 683 napi->skb = skb; in napi_get_frags() 684 skb_mark_napi_id(skb, napi); in napi_get_frags() 691 static gro_result_t napi_frags_finish(struct napi_struct *napi, in napi_frags_finish() argument [all …]
|
| H A D | netpoll.c | 38 #include <trace/events/napi.h> 142 static void poll_one_napi(struct napi_struct *napi) in poll_one_napi() argument 147 * that indicates that napi has been disabled and we need in poll_one_napi() 150 if (test_and_set_bit(NAPI_STATE_NPSVC, &napi->state)) in poll_one_napi() 156 work = napi->poll(napi, 0); in poll_one_napi() 157 WARN_ONCE(work, "%pS exceeded budget in poll\n", napi->poll); in poll_one_napi() 158 trace_napi_poll(napi, work, 0); in poll_one_napi() 160 clear_bit(NAPI_STATE_NPSVC, &napi->state); in poll_one_napi() 165 struct napi_struct *napi; in poll_napi() local 168 list_for_each_entry_rcu(napi, &dev->napi_list, dev_list) { in poll_napi() [all …]
|
| /linux/drivers/infiniband/hw/hfi1/ |
| H A D | ipoib_rx.c | 22 static struct sk_buff *prepare_frag_skb(struct napi_struct *napi, int size) in prepare_frag_skb() argument 33 return napi_alloc_skb(napi, size); in prepare_frag_skb() 49 struct napi_struct *napi = &rxq->napi; in hfi1_ipoib_prepare_skb() local 55 * napi cache. Otherwise we will try to use napi frag cache. in hfi1_ipoib_prepare_skb() 58 skb = napi_alloc_skb(napi, skb_size); in hfi1_ipoib_prepare_skb() 60 skb = prepare_frag_skb(napi, skb_size); in hfi1_ipoib_prepare_skb()
|
| H A D | netdev_rx.c | 211 rxq->rcd->napi = &rxq->napi; in hfi1_netdev_rxq_init() 212 dd_dev_info(dd, "Setting rcv queue %d napi to context %d\n", in hfi1_netdev_rxq_init() 215 * Disable BUSY_POLL on this NAPI as this is not supported in hfi1_netdev_rxq_init() 218 set_bit(NAPI_STATE_NO_BUSY_POLL, &rxq->napi.state); in hfi1_netdev_rxq_init() 219 netif_napi_add(dev, &rxq->napi, hfi1_netdev_rx_napi); in hfi1_netdev_rxq_init() 252 netif_napi_del(&rxq->napi); in hfi1_netdev_rxq_deinit() 272 napi_enable(&rxq->napi); in enable_queues() 291 /* wait for napi if it was scheduled */ in disable_queues() 295 napi_synchronize(&rxq->napi); in disable_queues() 296 napi_disable(&rxq->napi); in disable_queues() [all …]
|
| H A D | netdev.h | 18 * @napi: napi object 23 struct napi_struct napi; member 47 * When 0 NAPI will be disabled. 58 /* count of enabled napi polls */ 103 int hfi1_netdev_rx_napi(struct napi_struct *napi, int budget);
|
| /linux/drivers/net/netdevsim/ |
| H A D | netdev.c | 48 idx = rq->napi.index; in nsim_start_peer_tx_queue() 412 struct net_device *dev = rq->napi.dev; in nsim_rcv() 443 napi_gro_receive(&rq->napi, skb); in nsim_rcv() 450 static int nsim_poll(struct napi_struct *napi, int budget) in nsim_poll() argument 452 struct nsim_rq *rq = container_of(napi, struct nsim_rq, napi); in nsim_poll() 457 napi_complete_done(napi, done); in nsim_poll() 462 static int nsim_create_page_pool(struct page_pool **p, struct napi_struct *napi) in nsim_create_page_pool() argument 468 .dev = &napi->dev->dev, in nsim_create_page_pool() 469 .napi = napi, in nsim_create_page_pool() 471 .netdev = napi->dev, in nsim_create_page_pool() [all …]
|
| /linux/drivers/net/wireless/ath/wil6210/ |
| H A D | netdev.c | 91 static int wil6210_netdev_poll_rx(struct napi_struct *napi, int budget) in wil6210_netdev_poll_rx() argument 93 struct wil6210_priv *wil = container_of(napi, struct wil6210_priv, in wil6210_netdev_poll_rx() 102 napi_complete_done(napi, done); in wil6210_netdev_poll_rx() 104 wil_dbg_txrx(wil, "NAPI RX complete\n"); in wil6210_netdev_poll_rx() 107 wil_dbg_txrx(wil, "NAPI RX poll(%d) done %d\n", budget, done); in wil6210_netdev_poll_rx() 112 static int wil6210_netdev_poll_rx_edma(struct napi_struct *napi, int budget) in wil6210_netdev_poll_rx_edma() argument 114 struct wil6210_priv *wil = container_of(napi, struct wil6210_priv, in wil6210_netdev_poll_rx_edma() 123 napi_complete_done(napi, done); in wil6210_netdev_poll_rx_edma() 125 wil_dbg_txrx(wil, "NAPI RX complete\n"); in wil6210_netdev_poll_rx_edma() 128 wil_dbg_txrx(wil, "NAPI RX poll(%d) done %d\n", budget, done); in wil6210_netdev_poll_rx_edma() [all …]
|
| /linux/drivers/net/ethernet/synopsys/ |
| H A D | dwc-xlgmac-net.c | 283 * enabled before using the private data napi structure. in xlgmac_isr() 290 if (napi_schedule_prep(&pdata->napi)) { in xlgmac_isr() 296 __napi_schedule_irqoff(&pdata->napi); in xlgmac_isr() 348 * channel napi structure and not the private data napi structure in xlgmac_dma_isr() 350 if (napi_schedule_prep(&channel->napi)) { in xlgmac_dma_isr() 355 __napi_schedule_irqoff(&channel->napi); in xlgmac_dma_isr() 366 struct napi_struct *napi; in xlgmac_tx_timer() local 368 napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi; in xlgmac_tx_timer() 370 if (napi_schedule_prep(napi)) { in xlgmac_tx_timer() 379 __napi_schedule(napi); in xlgmac_tx_timer() [all …]
|
| /linux/drivers/net/ethernet/aquantia/atlantic/ |
| H A D | aq_vec.c | 21 struct napi_struct napi; member 28 static int aq_vec_poll(struct napi_struct *napi, int budget) in aq_vec_poll() argument 30 struct aq_vec_s *self = container_of(napi, struct aq_vec_s, napi); in aq_vec_poll() 68 napi, in aq_vec_poll() 93 napi_complete_done(napi, work_done); in aq_vec_poll() 122 netif_napi_add(aq_nic_get_ndev(aq_nic), &self->napi, aq_vec_poll); in aq_vec_alloc() 151 self->napi.napi_id) < 0) { in aq_vec_ring_alloc() 245 napi_enable(&self->napi); in aq_vec_start() 265 napi_disable(&self->napi); in aq_vec_stop() 290 netif_napi_del(&self->napi); in aq_vec_free() [all …]
|
| /linux/tools/testing/selftests/net/ |
| H A D | nl_netdev.py | 40 Test that verifies various cases of napi threaded 41 set and unset at napi and device level. 54 # set napi threaded and verify 68 # verify if napi threaded is still set 78 # unset napi threaded and verify 87 # check napi threaded is set for both napis 98 # check napi threaded is unset for both napis 106 # set napi threaded for napi0 115 # check napi threaded is unset for both napis 125 Test that verifies various cases of napi threaded [all …]
|
| /linux/drivers/net/ethernet/google/gve/ |
| H A D | gve_utils.c | 67 struct sk_buff *gve_rx_copy_data(struct net_device *dev, struct napi_struct *napi, in gve_rx_copy_data() argument 72 skb = napi_alloc_skb(napi, len); in gve_rx_copy_data() 83 struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi, in gve_rx_copy() argument 89 return gve_rx_copy_data(dev, napi, va, len); in gve_rx_copy() 113 netif_napi_add_locked(priv->dev, &block->napi, gve_poll); in gve_add_napi() 114 netif_napi_set_irq_locked(&block->napi, block->irq); in gve_add_napi() 121 netif_napi_del_locked(&block->napi); in gve_remove_napi()
|
| H A D | gve_rx.c | 450 static struct sk_buff *gve_rx_add_frags(struct napi_struct *napi, in gve_rx_add_frags() argument 460 skb = napi_get_frags(napi); in gve_rx_add_frags() 469 skb = napi_alloc_skb(napi, 0); in gve_rx_add_frags() 520 struct napi_struct *napi, in gve_rx_raw_addressing() argument 524 struct sk_buff *skb = gve_rx_add_frags(napi, page_info, packet_buffer_size, len, ctx); in gve_rx_raw_addressing() 540 u16 len, struct napi_struct *napi) in gve_rx_copy_to_pool() argument 581 skb = gve_rx_add_frags(napi, &alloc_page_info, in gve_rx_copy_to_pool() 597 skb = gve_rx_add_frags(napi, copy_page_info, in gve_rx_copy_to_pool() 626 u16 len, struct napi_struct *napi, in gve_rx_qpl() argument 638 skb = gve_rx_add_frags(napi, page_info, page_info->buf_size, in gve_rx_qpl() [all …]
|
| /linux/drivers/net/caif/ |
| H A D | caif_virtio.c | 30 /* NAPI schedule quota */ 41 /* struct cfv_napi_contxt - NAPI context info 54 * @rx_napi_complete: Number of NAPI completions (RX) 84 * @napi: Napi context used in cfv_rx_poll() 111 struct napi_struct napi; member 250 static int cfv_rx_poll(struct napi_struct *napi, int quota) in cfv_rx_poll() argument 252 struct cfv_info *cfv = container_of(napi, struct cfv_info, napi); in cfv_rx_poll() 319 napi_complete(napi); in cfv_rx_poll() 321 napi_schedule_prep(napi)) { in cfv_rx_poll() 323 __napi_schedule(napi); in cfv_rx_poll() [all …]
|
| /linux/drivers/net/ethernet/meta/fbnic/ |
| H A D | fbnic_txrx.c | 606 txq = txring_txq(nv->napi.dev, ring); in fbnic_clean_twq0() 698 netdev_err(nv->napi.dev, in fbnic_clean_tsq() 710 fbn = netdev_priv(nv->napi.dev); in fbnic_clean_tsq() 748 /* sub0 is always fed system pages, from the NAPI-level page_pool */ in fbnic_page_pool_get_head() 1021 netdev_err_once(nv->napi.dev, in fbnic_add_rx_frag() 1150 struct fbnic_net *fbn = netdev_priv(nv->napi.dev); in fbnic_run_xdp() 1170 bpf_warn_invalid_xdp_action(nv->napi.dev, xdp_prog, act); in fbnic_run_xdp() 1173 trace_xdp_exception(nv->napi.dev, xdp_prog, act); in fbnic_run_xdp() 1198 fbn = netdev_priv(nv->napi.dev); in fbnic_rx_tstamp() 1211 struct net_device *netdev = nv->napi in fbnic_populate_skb_fields() 1369 fbnic_poll(struct napi_struct * napi,int budget) fbnic_poll() argument [all...] |
| /linux/drivers/net/ethernet/intel/libeth/ |
| H A D | xsk.c | 126 * current NAPI poll when there are no free buffers left. 201 * @napi: NAPI corresponding to this queue 207 void libeth_xsk_init_wakeup(call_single_data_t *csd, struct napi_struct *napi) in libeth_xsk_init_wakeup() argument 209 INIT_CSD(csd, libeth_xsk_napi_sched, napi); in libeth_xsk_init_wakeup() 218 * Try to mark the NAPI as missed first, so that it could be rescheduled. 224 struct napi_struct *napi = csd->info; in libeth_xsk_wakeup() local 226 if (napi_if_scheduled_mark_missed(napi) || in libeth_xsk_wakeup() 227 unlikely(!napi_schedule_prep(napi))) in libeth_xsk_wakeup() 236 __napi_schedule(napi); in libeth_xsk_wakeup()
|
| /linux/drivers/net/ethernet/qlogic/qlcnic/ |
| H A D | qlcnic_io.c | 959 static int qlcnic_poll(struct napi_struct *napi, int budget) in qlcnic_poll() argument 966 sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi); in qlcnic_poll() 979 napi_complete_done(&sds_ring->napi, work_done); in qlcnic_poll() 989 static int qlcnic_tx_poll(struct napi_struct *napi, int budget) in qlcnic_tx_poll() argument 995 tx_ring = container_of(napi, struct qlcnic_host_tx_ring, napi); in qlcnic_tx_poll() 1000 napi_complete(&tx_ring->napi); in qlcnic_tx_poll() 1011 static int qlcnic_rx_poll(struct napi_struct *napi, int budget) in qlcnic_rx_poll() argument 1017 sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi); in qlcnic_rx_poll() 1023 napi_complete_done(&sds_ring->napi, work_done); in qlcnic_rx_poll() 1248 napi_gro_receive(&sds_ring->napi, skb); in qlcnic_process_rcv() [all …]
|
| /linux/drivers/net/ethernet/hisilicon/hibmcge/ |
| H A D | hbg_txrx.c | 193 static int hbg_napi_tx_recycle(struct napi_struct *napi, int budget) in hbg_napi_tx_recycle() 195 struct hbg_ring *ring = container_of(napi, struct hbg_ring, napi); in hbg_napi_tx_recycle() 206 * Per NAPI documentation budget is for Rx. in hbg_rx_check_l3l4_error() 207 * So We hardcode the amount of work Tx NAPI does to 128. in hbg_rx_check_l3l4_error() 233 napi_complete_done(napi, packet_done))) in hbg_rx_check_l3l4_error() 481 static int hbg_napi_rx_poll(struct napi_struct *napi, int budget) in hbg_ring_uninit() 483 struct hbg_ring *ring = container_of(napi, struct hbg_ring, napi); in hbg_ring_uninit() 520 napi_gro_receive(napi, buffe in hbg_ring_init() 158 hbg_napi_tx_recycle(struct napi_struct * napi,int budget) hbg_napi_tx_recycle() argument 411 hbg_napi_rx_poll(struct napi_struct * napi,int budget) hbg_napi_rx_poll() argument [all...] |
| /linux/drivers/staging/octeon/ |
| H A D | ethernet-rx.c | 37 struct napi_struct napi; member 43 * @napi_id: Cookie to identify the NAPI instance. 406 * cvm_oct_napi_poll - the NAPI poll function. 407 * @napi: The NAPI instance. 412 static int cvm_oct_napi_poll(struct napi_struct *napi, int budget) in cvm_oct_napi_poll() argument 414 struct oct_rx_group *rx_group = container_of(napi, struct oct_rx_group, in cvm_oct_napi_poll() 415 napi); in cvm_oct_napi_poll() 422 napi_complete_done(napi, rx_count); in cvm_oct_napi_poll() 472 netif_napi_add_weight(dev_for_napi, &oct_rx_group[i].napi, in cvm_oct_rx_initialize() 474 napi_enable(&oct_rx_group[i].napi); in cvm_oct_rx_initialize() [all …]
|
| /linux/include/net/page_pool/ |
| H A D | types.h | 44 * use-case. The NAPI budget is 64 packets. After a NAPI poll the RX 50 * this array, as it shares the same softirq/NAPI protection. If 68 * @napi: NAPI which is the sole consumer of pages, otherwise NULL 84 struct napi_struct *napi; 206 * Softirq/BH scheduling and napi_schedule. NAPI schedule 237 * protected by NAPI, due to above pp_alloc_cache. This 269 struct napi_struct *napi); 307 /* Caller must provide appropriate safe context, e.g. NAPI. */
|