Home
last modified time | relevance | path

Searched refs:rxq (Results 1 – 25 of 162) sorted by relevance

1234567

/linux/drivers/net/wwan/t7xx/
H A Dt7xx_hif_dpmaif_rx.c83 struct dpmaif_rx_queue *rxq = &dpmaif_ctrl->rxq[q_num]; in t7xx_dpmaif_update_bat_wr_idx() local
84 struct dpmaif_bat_request *bat_req = rxq->bat_req; in t7xx_dpmaif_update_bat_wr_idx()
87 if (!rxq->que_started) { in t7xx_dpmaif_update_bat_wr_idx()
88 dev_err(dpmaif_ctrl->dev, "RX queue %d has not been started\n", rxq->index); in t7xx_dpmaif_update_bat_wr_idx()
235 static int t7xx_dpmaifq_release_pit_entry(struct dpmaif_rx_queue *rxq, in t7xx_dpmaifq_release_pit_entry() argument
238 struct dpmaif_hw_info *hw_info = &rxq->dpmaif_ctrl->hw_info; in t7xx_dpmaifq_release_pit_entry()
242 if (!rxq->que_started) in t7xx_dpmaifq_release_pit_entry()
245 if (rel_entry_num >= rxq->pit_size_cnt) { in t7xx_dpmaifq_release_pit_entry()
246 dev_err(rxq->dpmaif_ctrl->dev, "Invalid PIT release index\n"); in t7xx_dpmaifq_release_pit_entry()
250 old_rel_idx = rxq->pit_release_rd_idx; in t7xx_dpmaifq_release_pit_entry()
[all …]
H A Dt7xx_hif_dpmaif.c227 rx_q = &dpmaif_ctrl->rxq[rx_idx]; in t7xx_dpmaif_rxtx_sw_allocs()
267 rx_q = &dpmaif_ctrl->rxq[i]; in t7xx_dpmaif_rxtx_sw_allocs()
294 rx_q = &dpmaif_ctrl->rxq[i]; in t7xx_dpmaif_sw_release()
303 struct dpmaif_rx_queue *rxq; in t7xx_dpmaif_start() local
314 rxq = &dpmaif_ctrl->rxq[i]; in t7xx_dpmaif_start()
315 rxq->que_started = true; in t7xx_dpmaif_start()
316 rxq->index = i; in t7xx_dpmaif_start()
317 rxq->budget = rxq->bat_req->bat_size_cnt - 1; in t7xx_dpmaif_start()
319 hw_init_para.pkt_bat_base_addr[i] = rxq->bat_req->bat_bus_addr; in t7xx_dpmaif_start()
320 hw_init_para.pkt_bat_size_cnt[i] = rxq->bat_req->bat_size_cnt; in t7xx_dpmaif_start()
[all …]
H A Dt7xx_hif_cldma.c603 queue_work(md_ctrl->rxq[i].worker, &md_ctrl->rxq[i].cldma_work); in t7xx_cldma_irq_work_cb()
657 flush_work(&md_ctrl->rxq[i].cldma_work); in t7xx_cldma_stop()
706 cancel_work_sync(&md_ctrl->rxq[i].cldma_work); in t7xx_cldma_reset()
709 md_cd_queue_struct_reset(&md_ctrl->rxq[i], md_ctrl, MTK_RX, i); in t7xx_cldma_reset()
742 if (md_ctrl->rxq[i].tr_done) in t7xx_cldma_start()
744 md_ctrl->rxq[i].tr_done->gpd_addr, in t7xx_cldma_start()
780 struct cldma_queue *rxq = &md_ctrl->rxq[qnum]; in t7xx_cldma_clear_rxq() local
786 spin_lock_irqsave(&rxq->ring_lock, flags); in t7xx_cldma_clear_rxq()
787 t7xx_cldma_q_reset(rxq); in t7xx_cldma_clear_rxq()
788 list_for_each_entry(req, &rxq->tr_ring->gpd_ring, entry) { in t7xx_cldma_clear_rxq()
[all …]
/linux/drivers/net/ethernet/huawei/hinic/
H A Dhinic_rx.c53 static void hinic_rxq_clean_stats(struct hinic_rxq *rxq) in hinic_rxq_clean_stats() argument
55 struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats; in hinic_rxq_clean_stats()
71 void hinic_rxq_get_stats(struct hinic_rxq *rxq, struct hinic_rxq_stats *stats) in hinic_rxq_get_stats() argument
73 struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats; in hinic_rxq_get_stats()
91 static void rxq_stats_init(struct hinic_rxq *rxq) in rxq_stats_init() argument
93 struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats; in rxq_stats_init()
96 hinic_rxq_clean_stats(rxq); in rxq_stats_init()
99 static void rx_csum(struct hinic_rxq *rxq, u32 status, in rx_csum() argument
102 struct net_device *netdev = rxq->netdev; in rx_csum()
115 rxq->rxq_stats.csum_errors++; in rx_csum()
[all …]
H A Dhinic_rx.h44 void hinic_rxq_get_stats(struct hinic_rxq *rxq, struct hinic_rxq_stats *stats);
46 int hinic_init_rxq(struct hinic_rxq *rxq, struct hinic_rq *rq,
49 void hinic_clean_rxq(struct hinic_rxq *rxq);
/linux/drivers/net/wireless/intel/iwlwifi/pcie/
H A Drx.c119 static int iwl_rxq_space(const struct iwl_rxq *rxq) in iwl_rxq_space() argument
122 WARN_ON(rxq->queue_size & (rxq->queue_size - 1)); in iwl_rxq_space()
130 return (rxq->read - rxq->write - 1) & (rxq->queue_size - 1); in iwl_rxq_space()
167 struct iwl_rxq *rxq) in iwl_pcie_rxq_inc_wr_ptr() argument
171 lockdep_assert_held(&rxq->lock); in iwl_pcie_rxq_inc_wr_ptr()
187 rxq->need_update = true; in iwl_pcie_rxq_inc_wr_ptr()
192 rxq->write_actual = round_down(rxq->write, 8); in iwl_pcie_rxq_inc_wr_ptr()
194 iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual); in iwl_pcie_rxq_inc_wr_ptr()
196 iwl_write32(trans, HBUS_TARG_WRPTR, rxq->write_actual | in iwl_pcie_rxq_inc_wr_ptr()
197 HBUS_TARG_WRPTR_RX_Q(rxq->id)); in iwl_pcie_rxq_inc_wr_ptr()
[all …]
/linux/drivers/infiniband/hw/hfi1/
H A Dnetdev_rx.c194 rx->rxq = kcalloc_node(rx->num_rx_q, sizeof(*rx->rxq), in hfi1_netdev_rxq_init()
197 if (!rx->rxq) { in hfi1_netdev_rxq_init()
203 struct hfi1_netdev_rxq *rxq = &rx->rxq[i]; in hfi1_netdev_rxq_init() local
205 rc = hfi1_netdev_allot_ctxt(rx, &rxq->rcd); in hfi1_netdev_rxq_init()
209 hfi1_rcd_get(rxq->rcd); in hfi1_netdev_rxq_init()
210 rxq->rx = rx; in hfi1_netdev_rxq_init()
211 rxq->rcd->napi = &rxq->napi; in hfi1_netdev_rxq_init()
213 i, rxq->rcd->ctxt); in hfi1_netdev_rxq_init()
218 set_bit(NAPI_STATE_NO_BUSY_POLL, &rxq->napi.state); in hfi1_netdev_rxq_init()
219 netif_napi_add(dev, &rxq->napi, hfi1_netdev_rx_napi); in hfi1_netdev_rxq_init()
[all …]
H A Dvnic_main.c292 static inline int hfi1_vnic_decap_skb(struct hfi1_vnic_rx_queue *rxq, in hfi1_vnic_decap_skb() argument
295 struct hfi1_vnic_vport_info *vinfo = rxq->vinfo; in hfi1_vnic_decap_skb()
303 vinfo->stats[rxq->idx].rx_oversize++; in hfi1_vnic_decap_skb()
305 vinfo->stats[rxq->idx].rx_runt++; in hfi1_vnic_decap_skb()
336 struct hfi1_vnic_rx_queue *rxq; in hfi1_vnic_bypass_rcv() local
370 rxq = &vinfo->rxq[q_idx]; in hfi1_vnic_bypass_rcv()
389 rc = hfi1_vnic_decap_skb(rxq, skb); in hfi1_vnic_bypass_rcv()
392 hfi1_vnic_update_rx_counters(vinfo, rxq->idx, skb, rc); in hfi1_vnic_bypass_rcv()
399 skb->protocol = eth_type_trans(skb, rxq->netdev); in hfi1_vnic_bypass_rcv()
401 napi_gro_receive(&rxq->napi, skb); in hfi1_vnic_bypass_rcv()
[all …]
/linux/drivers/net/ethernet/qlogic/qede/
H A Dqede_fp.c27 int qede_alloc_rx_buffer(struct qede_rx_queue *rxq, bool allow_lazy) in qede_alloc_rx_buffer() argument
38 if (allow_lazy && likely(rxq->filled_buffers > 12)) { in qede_alloc_rx_buffer()
39 rxq->filled_buffers--; in qede_alloc_rx_buffer()
50 mapping = dma_map_page(rxq->dev, data, 0, in qede_alloc_rx_buffer()
51 PAGE_SIZE, rxq->data_direction); in qede_alloc_rx_buffer()
52 if (unlikely(dma_mapping_error(rxq->dev, mapping))) { in qede_alloc_rx_buffer()
57 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX]; in qede_alloc_rx_buffer()
63 rx_bd = (struct eth_rx_bd *)qed_chain_produce(&rxq->rx_bd_ring); in qede_alloc_rx_buffer()
67 rxq->rx_headroom); in qede_alloc_rx_buffer()
69 rxq->sw_rx_prod++; in qede_alloc_rx_buffer()
[all …]
H A Dqede_main.c964 if (fp->rxq && xdp_rxq_info_is_reg(&fp->rxq->xdp_rxq)) in qede_free_fp_array()
965 xdp_rxq_info_unreg(&fp->rxq->xdp_rxq); in qede_free_fp_array()
966 kfree(fp->rxq); in qede_free_fp_array()
1035 fp->rxq = kzalloc(sizeof(*fp->rxq), GFP_KERNEL); in qede_alloc_fp_array()
1036 if (!fp->rxq) in qede_alloc_fp_array()
1528 struct qede_rx_queue *rxq) in qede_free_rx_buffers() argument
1532 for (i = rxq->sw_rx_cons; i != rxq->sw_rx_prod; i++) { in qede_free_rx_buffers()
1536 rx_buf = &rxq->sw_rx_ring[i & NUM_RX_BDS_MAX]; in qede_free_rx_buffers()
1540 rx_buf->mapping, PAGE_SIZE, rxq->data_direction); in qede_free_rx_buffers()
1547 static void qede_free_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq) in qede_free_mem_rxq() argument
[all …]
/linux/drivers/net/ethernet/microsoft/mana/
H A Dmana_en.c564 static void *mana_get_rxbuf_pre(struct mana_rxq *rxq, dma_addr_t *da) in mana_get_rxbuf_pre() argument
566 struct net_device *ndev = rxq->ndev; in mana_get_rxbuf_pre()
578 if (mpc->rxbpre_datasize != rxq->datasize) { in mana_get_rxbuf_pre()
580 mpc->rxbpre_datasize, rxq->datasize); in mana_get_rxbuf_pre()
584 if (mpc->rxbpre_alloc_size != rxq->alloc_size) { in mana_get_rxbuf_pre()
586 mpc->rxbpre_alloc_size, rxq->alloc_size); in mana_get_rxbuf_pre()
590 if (mpc->rxbpre_headroom != rxq->headroom) { in mana_get_rxbuf_pre()
592 mpc->rxbpre_headroom, rxq->headroom); in mana_get_rxbuf_pre()
1318 static int mana_fence_rq(struct mana_port_context *apc, struct mana_rxq *rxq) in mana_fence_rq() argument
1324 init_completion(&rxq->fence_event); in mana_fence_rq()
[all …]
H A Dmana_bpf.c80 u32 mana_run_xdp(struct net_device *ndev, struct mana_rxq *rxq, in mana_run_xdp() argument
88 prog = rcu_dereference(rxq->bpf_prog); in mana_run_xdp()
93 xdp_init_buff(xdp, PAGE_SIZE, &rxq->xdp_rxq); in mana_run_xdp()
98 rx_stats = &rxq->stats; in mana_run_xdp()
107 rxq->xdp_rc = xdp_do_redirect(ndev, xdp, prog); in mana_run_xdp()
108 if (!rxq->xdp_rc) { in mana_run_xdp()
109 rxq->xdp_flush = true; in mana_run_xdp()
/linux/drivers/net/ethernet/marvell/
H A Dmvneta.c109 #define MVNETA_VLAN_PRIO_RXQ_MAP(prio, rxq) ((rxq) << ((prio) * 3)) argument
134 #define MVNETA_CPU_RXQ_ACCESS(rxq) BIT(rxq) argument
857 struct mvneta_rx_queue *rxq, in mvneta_rxq_non_occup_desc_add() argument
864 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), in mvneta_rxq_non_occup_desc_add()
870 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), in mvneta_rxq_non_occup_desc_add()
876 struct mvneta_rx_queue *rxq) in mvneta_rxq_busy_desc_num_get() argument
880 val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id)); in mvneta_rxq_busy_desc_num_get()
888 struct mvneta_rx_queue *rxq, in mvneta_rxq_desc_num_update() argument
896 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val); in mvneta_rxq_desc_num_update()
916 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val); in mvneta_rxq_desc_num_update()
[all …]
H A Dmv643xx_eth.c399 struct rx_queue rxq[8]; member
441 static struct mv643xx_eth_private *rxq_to_mp(struct rx_queue *rxq) in rxq_to_mp() argument
443 return container_of(rxq, struct mv643xx_eth_private, rxq[rxq->index]); in rxq_to_mp()
451 static void rxq_enable(struct rx_queue *rxq) in rxq_enable() argument
453 struct mv643xx_eth_private *mp = rxq_to_mp(rxq); in rxq_enable()
454 wrlp(mp, RXQ_COMMAND, 1 << rxq->index); in rxq_enable()
457 static void rxq_disable(struct rx_queue *rxq) in rxq_disable() argument
459 struct mv643xx_eth_private *mp = rxq_to_mp(rxq); in rxq_disable()
460 u8 mask = 1 << rxq->index; in rxq_disable()
506 static int rxq_process(struct rx_queue *rxq, int budget) in rxq_process() argument
[all …]
/linux/net/core/
H A Ddevmem.c108 struct netdev_rx_queue *rxq; in net_devmem_unbind_dmabuf() local
115 xa_for_each(&binding->bound_rxqs, xa_idx, rxq) { in net_devmem_unbind_dmabuf()
116 WARN_ON(rxq->mp_params.mp_priv != binding); in net_devmem_unbind_dmabuf()
118 rxq->mp_params.mp_priv = NULL; in net_devmem_unbind_dmabuf()
120 rxq_idx = get_netdev_rx_queue_index(rxq); in net_devmem_unbind_dmabuf()
134 struct netdev_rx_queue *rxq; in net_devmem_bind_dmabuf_to_queue() local
143 rxq = __netif_get_rx_queue(dev, rxq_idx); in net_devmem_bind_dmabuf_to_queue()
144 if (rxq->mp_params.mp_priv) { in net_devmem_bind_dmabuf_to_queue()
150 if (rxq->pool) { in net_devmem_bind_dmabuf_to_queue()
156 err = xa_alloc(&binding->bound_rxqs, &xa_idx, rxq, xa_limit_32b, in net_devmem_bind_dmabuf_to_queue()
[all …]
/linux/drivers/bluetooth/
H A Dbtintel_pcie.c171 static void btintel_pcie_prepare_rx(struct rxq *rxq, u16 frbd_index) in btintel_pcie_prepare_rx() argument
177 buf = &rxq->bufs[frbd_index]; in btintel_pcie_prepare_rx()
179 frbd = &rxq->frbds[frbd_index]; in btintel_pcie_prepare_rx()
190 struct rxq *rxq = &data->rxq; in btintel_pcie_submit_rx() local
194 if (frbd_index > rxq->count) in btintel_pcie_submit_rx()
200 btintel_pcie_prepare_rx(rxq, frbd_index); in btintel_pcie_submit_rx()
202 frbd_index = (frbd_index + 1) % rxq->count; in btintel_pcie_submit_rx()
772 struct rxq *rxq; in btintel_pcie_msix_rx_handle() local
789 rxq = &data->rxq; in btintel_pcie_msix_rx_handle()
795 urbd1 = &rxq->urbd1s[cr_tia]; in btintel_pcie_msix_rx_handle()
[all …]
/linux/drivers/net/ethernet/intel/idpf/
H A Didpf_txrx.c444 static void idpf_rx_buf_rel_all(struct idpf_rx_queue *rxq) in idpf_rx_buf_rel_all() argument
447 .fqes = rxq->rx_buf, in idpf_rx_buf_rel_all()
448 .pp = rxq->pp, in idpf_rx_buf_rel_all()
451 if (!rxq->rx_buf) in idpf_rx_buf_rel_all()
454 for (u32 i = 0; i < rxq->desc_count; i++) in idpf_rx_buf_rel_all()
455 idpf_rx_page_rel(&rxq->rx_buf[i]); in idpf_rx_buf_rel_all()
458 rxq->rx_buf = NULL; in idpf_rx_buf_rel_all()
459 rxq->pp = NULL; in idpf_rx_buf_rel_all()
470 static void idpf_rx_desc_rel(struct idpf_rx_queue *rxq, struct device *dev, in idpf_rx_desc_rel() argument
473 if (!rxq) in idpf_rx_desc_rel()
[all …]
/linux/drivers/net/wireless/intel/iwlegacy/
H A D3945-mac.c929 struct il_rx_queue *rxq = &il->rxq; in il3945_rx_queue_restock() local
934 spin_lock_irqsave(&rxq->lock, flags); in il3945_rx_queue_restock()
935 while (il_rx_queue_space(rxq) > 0 && rxq->free_count) { in il3945_rx_queue_restock()
937 element = rxq->rx_free.next; in il3945_rx_queue_restock()
942 rxq->bd[rxq->write] = in il3945_rx_queue_restock()
944 rxq->queue[rxq->write] = rxb; in il3945_rx_queue_restock()
945 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; in il3945_rx_queue_restock()
946 rxq->free_count--; in il3945_rx_queue_restock()
948 spin_unlock_irqrestore(&rxq->lock, flags); in il3945_rx_queue_restock()
951 if (rxq->free_count <= RX_LOW_WATERMARK) in il3945_rx_queue_restock()
[all …]
/linux/tools/testing/selftests/bpf/
H A Dxdp_hw_metadata.c63 int rxq; variable
405 static int verify_metadata(struct xsk *rx_xsk, int rxq, int server_fd, clockid_t clock_id) in verify_metadata() argument
408 struct pollfd fds[rxq + 1]; in verify_metadata()
415 for (i = 0; i < rxq; i++) { in verify_metadata()
421 fds[rxq].fd = server_fd; in verify_metadata()
422 fds[rxq].events = POLLIN; in verify_metadata()
423 fds[rxq].revents = 0; in verify_metadata()
428 for (i = 0; i < rxq; i++) { in verify_metadata()
434 ret = poll(fds, rxq + 1, 1000); in verify_metadata()
443 if (fds[rxq].revents) in verify_metadata()
[all …]
/linux/drivers/net/ethernet/mellanox/mlx5/core/
H A Den_arfs.c90 int rxq; member
437 arfs_rule->rxq, arfs_rule->flow_id, in arfs_may_expire_flow()
449 priv->channel_stats[arfs_rule->rxq]->rq.arfs_expired++; in arfs_may_expire_flow()
528 priv->channel_stats[arfs_rule->rxq]->rq.arfs_err++; in arfs_add_rule()
597 dest.tir_num = mlx5e_rx_res_get_tirn_direct(priv->rx_res, arfs_rule->rxq); in arfs_add_rule()
601 priv->channel_stats[arfs_rule->rxq]->rq.arfs_err++; in arfs_add_rule()
604 __func__, arfs_rule->filter_id, arfs_rule->rxq, in arfs_add_rule()
614 struct mlx5_flow_handle *rule, u16 rxq) in arfs_modify_rule_rq() argument
620 dst.tir_num = mlx5e_rx_res_get_tirn_direct(priv->rx_res, rxq); in arfs_modify_rule_rq()
623 priv->channel_stats[rxq]->rq.arfs_err++; in arfs_modify_rule_rq()
[all …]
/linux/drivers/net/ethernet/chelsio/cxgb4vf/
H A Dcxgb4vf_main.c383 int rxq, msi, err; in request_msix_queue_irqs() local
397 for_each_ethrxq(s, rxq) { in request_msix_queue_irqs()
401 &s->ethrxq[rxq].rspq); in request_msix_queue_irqs()
409 while (--rxq >= 0) in request_msix_queue_irqs()
410 free_irq(adapter->msix_info[--msi].vec, &s->ethrxq[rxq].rspq); in request_msix_queue_irqs()
421 int rxq, msi; in free_msix_queue_irqs() local
425 for_each_ethrxq(s, rxq) in free_msix_queue_irqs()
427 &s->ethrxq[rxq].rspq); in free_msix_queue_irqs()
452 int rxq; in enable_rx() local
455 for_each_ethrxq(s, rxq) in enable_rx()
[all …]
H A Dsge.c1564 static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl, in do_gro() argument
1567 struct adapter *adapter = rxq->rspq.adapter; in do_gro()
1573 skb = napi_get_frags(&rxq->rspq.napi); in do_gro()
1576 rxq->stats.rx_drops++; in do_gro()
1585 skb_record_rx_queue(skb, rxq->rspq.idx); in do_gro()
1591 rxq->stats.vlan_ex++; in do_gro()
1593 ret = napi_gro_frags(&rxq->rspq.napi); in do_gro()
1596 rxq->stats.lro_pkts++; in do_gro()
1598 rxq->stats.lro_merged++; in do_gro()
1599 rxq->stats.pkts++; in do_gro()
[all …]
/linux/drivers/net/ethernet/freescale/
H A Dfec_main.c467 struct fec_enet_priv_rx_q *rxq, int size) in fec_enet_create_page_pool() argument
482 rxq->page_pool = page_pool_create(&pp_params); in fec_enet_create_page_pool()
483 if (IS_ERR(rxq->page_pool)) { in fec_enet_create_page_pool()
484 err = PTR_ERR(rxq->page_pool); in fec_enet_create_page_pool()
485 rxq->page_pool = NULL; in fec_enet_create_page_pool()
489 err = xdp_rxq_info_reg(&rxq->xdp_rxq, fep->netdev, rxq->id, 0); in fec_enet_create_page_pool()
493 err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL, in fec_enet_create_page_pool()
494 rxq->page_pool); in fec_enet_create_page_pool()
501 xdp_rxq_info_unreg(&rxq->xdp_rxq); in fec_enet_create_page_pool()
503 page_pool_destroy(rxq->page_pool); in fec_enet_create_page_pool()
[all …]
/linux/drivers/net/ethernet/alacritech/
H A Dslicoss.c120 static unsigned int slic_get_free_rx_descs(struct slic_rx_queue *rxq) in slic_get_free_rx_descs() argument
122 return slic_get_free_queue_descs(rxq->put_idx, rxq->done_idx, rxq->len); in slic_get_free_rx_descs()
396 struct slic_rx_queue *rxq = &sdev->rxq; in slic_refill_rx_queue() local
405 while (slic_get_free_rx_descs(rxq) > SLIC_MAX_REQ_RX_DESCS) { in slic_refill_rx_queue()
437 buff = &rxq->rxbuffs[rxq->put_idx]; in slic_refill_rx_queue()
446 rxq->put_idx = slic_next_queue_idx(rxq->put_idx, rxq->len); in slic_refill_rx_queue()
549 struct slic_rx_queue *rxq = &sdev->rxq; in slic_handle_receive() local
559 while (todo && (rxq->done_idx != rxq->put_idx)) { in slic_handle_receive()
560 buff = &rxq->rxbuffs[rxq->done_idx]; in slic_handle_receive()
613 rxq->done_idx = slic_next_queue_idx(rxq->done_idx, rxq->len); in slic_handle_receive()
[all …]
/linux/include/net/
H A Dnetdev_rx_queue.h43 __netif_get_rx_queue(struct net_device *dev, unsigned int rxq) in __netif_get_rx_queue() argument
45 return dev->_rx + rxq; in __netif_get_rx_queue()
58 int netdev_rx_queue_restart(struct net_device *dev, unsigned int rxq);

1234567