| /linux/drivers/infiniband/hw/bnxt_re/ |
| H A D | qplib_fp.c | 163 struct bnxt_qplib_nq *nq = nq_work->nq; in bnxt_qpn_cqn_sched_task() local 165 if (cq && nq) { in bnxt_qpn_cqn_sched_task() 167 if (atomic_read(&cq->arm_state) && nq->cqn_handler) { in bnxt_qpn_cqn_sched_task() 168 dev_dbg(&nq->pdev->dev, in bnxt_qpn_cqn_sched_task() 170 __func__, cq, nq); in bnxt_qpn_cqn_sched_task() 171 nq->cqn_handler(nq, cq); in bnxt_qpn_cqn_sched_task() 239 static void clean_nq(struct bnxt_qplib_nq *nq, struct bnxt_qplib_cq *cq) in clean_nq() argument 241 struct bnxt_qplib_hwq *hwq = &nq->hwq; in clean_nq() 243 int budget = nq->budget; in clean_nq() 252 if (!NQE_CMP_VALID(nqe, nq->nq_db.dbinfo.flags)) in clean_nq() [all …]
|
| H A D | qplib_fp.h | 448 struct bnxt_qplib_nq *nq; member 512 typedef int (*cqn_handler_t)(struct bnxt_qplib_nq *nq, 514 typedef int (*srqn_handler_t)(struct bnxt_qplib_nq *nq, 538 struct bnxt_qplib_nq *nq; member 542 void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill); 543 void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq); 544 int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx, 546 int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq, 589 void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq); 590 int bnxt_qplib_alloc_nq(struct bnxt_qplib_res *res, struct bnxt_qplib_nq *nq); [all …]
|
| H A D | main.c | 442 struct bnxt_qplib_nq *nq; in bnxt_re_stop_irq() local 459 nq = &rdev->nqr->nq[indx - 1]; in bnxt_re_stop_irq() 460 bnxt_qplib_nq_stop_irq(nq, false); in bnxt_re_stop_irq() 472 struct bnxt_qplib_nq *nq; in bnxt_re_start_irq() local 503 nq = &rdev->nqr->nq[indx - 1]; in bnxt_re_start_irq() 504 rc = bnxt_qplib_nq_start_irq(nq, indx - 1, in bnxt_re_start_irq() 1717 static int bnxt_re_srqn_handler(struct bnxt_qplib_nq *nq, in bnxt_re_srqn_handler() argument 1736 static int bnxt_re_cqn_handler(struct bnxt_qplib_nq *nq, in bnxt_re_cqn_handler() argument 1753 bnxt_qplib_disable_nq(&rdev->nqr->nq[i - 1]); in bnxt_re_cleanup_res() 1771 rc = bnxt_qplib_enable_nq(rdev->en_dev->pdev, &rdev->nqr->nq[i - 1], in bnxt_re_init_res() [all …]
|
| H A D | bnxt_re.h | 155 struct bnxt_qplib_nq nq[BNXT_RE_MAX_MSIX]; member
|
| H A D | ib_verbs.c | 1018 scq_nq = qplib_qp->scq->nq; in bnxt_re_destroy_qp() 1019 rcq_nq = qplib_qp->rcq->nq; in bnxt_re_destroy_qp() 1918 srq->qplib_srq.eventq_hw_ring_id = rdev->nqr->nq[0].ring_id; in bnxt_re_create_srq() 3081 if (rdev->nqr->nq[min].load > rdev->nqr->nq[indx].load) in bnxt_re_get_nq() 3084 rdev->nqr->nq[min].load++; in bnxt_re_get_nq() 3087 return &rdev->nqr->nq[min]; in bnxt_re_get_nq() 3090 static void bnxt_re_put_nq(struct bnxt_re_dev *rdev, struct bnxt_qplib_nq *nq) in bnxt_re_put_nq() argument 3093 nq->load--; in bnxt_re_put_nq() 3101 struct bnxt_qplib_nq *nq; in bnxt_re_destroy_cq() local 3107 nq = cq->qplib_cq.nq; in bnxt_re_destroy_cq() [all …]
|
| /linux/drivers/net/ethernet/broadcom/bnge/ |
| H A D | bnge_resc.c | 230 hwr->nq = hw_resc->resv_irqs; in bnge_copy_reserved_rings() 240 return hwr->tx && hwr->rx && hwr->nq && hwr->grp && hwr->vnic && in bnge_rings_ok() 274 u16 nq = bd->nq_nr_rings; in bnge_reserve_rings() local 290 hwr.nq = nq + aux_msix; in bnge_reserve_rings() 292 hwr.nq = bnge_nqs_demand(bd); in bnge_reserve_rings() 325 hwr.nq = min_t(u16, hwr.nq, bd->nq_nr_rings); in bnge_reserve_rings() 328 hwr.nq = min_t(u16, hwr.nq, hwr.stat); in bnge_reserve_rings() 331 rc = bnge_adjust_rings(bd, &rx_rings, &hwr.tx, hwr.nq, sh); in bnge_reserve_rings() 335 hwr.nq = sh ? max_t(u16, tx_cp, rx_rings) : tx_cp + rx_rings; in bnge_reserve_rings() 343 bd->nq_nr_rings = hwr.nq; in bnge_reserve_rings() [all …]
|
| H A D | bnge_resc.h | 50 u16 nq; member
|
| /linux/drivers/block/null_blk/ |
| H A D | main.c | 844 ktime_t kt = cmd->nq->dev->completion_nsec; in null_cmd_end_timer() 1276 struct nullb *nullb = cmd->nq->dev->nullb; in null_handle_data_transfer() 1307 struct nullb_device *dev = cmd->nq->dev; in null_handle_throttled() 1339 struct badblocks *bb = &cmd->nq->dev->badblocks; in null_handle_badblocks() 1340 struct nullb_device *dev = cmd->nq->dev; in null_handle_badblocks() 1348 if (cmd->nq->dev->badblocks_once) in null_handle_badblocks() 1351 if (cmd->nq->dev->badblocks_partial_io) { in null_handle_badblocks() 1365 struct nullb_device *dev = cmd->nq->dev; in null_handle_memory_backed() 1376 struct nullb_device *dev = cmd->nq->dev; in nullb_zero_read_cmd_buffer() 1400 switch (cmd->nq->dev->irqmode) { in nullb_complete_cmd() [all …]
|
| /linux/lib/crypto/ |
| H A D | curve25519-hacl64.c | 544 ladder_smallloop_cmult_small_loop_step(u64 *nq, u64 *nqpq, u64 *nq2, u64 *nqpq2, in ladder_smallloop_cmult_small_loop_step() argument 549 point_swap_conditional(nq, nqpq, bit0); in ladder_smallloop_cmult_small_loop_step() 550 addanddouble_fmonty(nq2, nqpq2, nq, nqpq, q); in ladder_smallloop_cmult_small_loop_step() 556 ladder_smallloop_cmult_small_loop_double_step(u64 *nq, u64 *nqpq, u64 *nq2, in ladder_smallloop_cmult_small_loop_double_step() argument 560 ladder_smallloop_cmult_small_loop_step(nq, nqpq, nq2, nqpq2, q, byt); in ladder_smallloop_cmult_small_loop_double_step() 562 ladder_smallloop_cmult_small_loop_step(nq2, nqpq2, nq, nqpq, q, byt1); in ladder_smallloop_cmult_small_loop_double_step() 566 ladder_smallloop_cmult_small_loop(u64 *nq, u64 *nqpq, u64 *nq2, u64 *nqpq2, in ladder_smallloop_cmult_small_loop() argument 570 ladder_smallloop_cmult_small_loop_double_step(nq, nqpq, nq2, in ladder_smallloop_cmult_small_loop() 576 static __always_inline void ladder_bigloop_cmult_big_loop(u8 *n1, u64 *nq, in ladder_bigloop_cmult_big_loop() argument 583 ladder_smallloop_cmult_small_loop(nq, nqpq, nq2, nqpq2, q, in ladder_bigloop_cmult_big_loop() [all …]
|
| /linux/drivers/net/vmxnet3/ |
| H A D | vmxnet3_xdp.c | 210 struct netdev_queue *nq; in vmxnet3_xdp_xmit_back() local 217 nq = netdev_get_tx_queue(adapter->netdev, tq->qid); in vmxnet3_xdp_xmit_back() 219 __netif_tx_lock(nq, smp_processor_id()); in vmxnet3_xdp_xmit_back() 221 __netif_tx_unlock(nq); in vmxnet3_xdp_xmit_back() 233 struct netdev_queue *nq; in vmxnet3_xdp_xmit() local 245 nq = netdev_get_tx_queue(adapter->netdev, tq->qid); in vmxnet3_xdp_xmit() 247 __netif_tx_lock(nq, smp_processor_id()); in vmxnet3_xdp_xmit() 255 __netif_tx_unlock(nq); in vmxnet3_xdp_xmit()
|
| /linux/lib/crypto/x86/ |
| H A D | curve25519.h | 972 u64 *nq = p01_tmp1; in point_add_and_double() local 976 u64 *x2 = nq; in point_add_and_double() 977 u64 *z2 = nq + (u32)4U; in point_add_and_double() 1019 fmul2(nq, dc1, ab1, tmp2); in point_add_and_double() 1023 static void point_double(u64 *nq, u64 *tmp1, u64 *tmp2) in point_double() argument 1025 u64 *x2 = nq; in point_double() 1026 u64 *z2 = nq + (u32)4U; in point_double() 1043 fmul2(nq, dc, ab, tmp2); in point_double()
|
| /linux/drivers/net/ethernet/pensando/ionic/ |
| H A D | ionic_txrx.c | 402 struct netdev_queue *nq; in ionic_xdp_xmit() local 423 nq = netdev_get_tx_queue(netdev, txq->index); in ionic_xdp_xmit() 424 __netif_tx_lock(nq, cpu); in ionic_xdp_xmit() 425 txq_trans_cond_update(nq); in ionic_xdp_xmit() 427 if (netif_tx_queue_stopped(nq) || in ionic_xdp_xmit() 431 __netif_tx_unlock(nq); in ionic_xdp_xmit() 453 __netif_tx_unlock(nq); in ionic_xdp_xmit() 480 struct netdev_queue *nq; in ionic_run_xdp() local 555 nq = netdev_get_tx_queue(netdev, txq->index); in ionic_run_xdp() 556 __netif_tx_lock(nq, smp_processor_id()); in ionic_run_xdp() [all …]
|
| /linux/drivers/net/ethernet/chelsio/cxgb4/ |
| H A D | cxgb4_uld.c | 111 unsigned int nq = rxq_info->nrxq + rxq_info->nciq; in alloc_uld_rxqs() local 125 for (i = 0; i < nq; i++, q++) { in alloc_uld_rxqs() 403 int nq = txq_info->ntxq; in free_sge_txq_uld() local 406 for (i = 0; i < nq; i++) { in free_sge_txq_uld() 426 int nq = txq_info->ntxq; in alloc_sge_txq_uld() local 429 j = nq / adap->params.nports; in alloc_sge_txq_uld() 430 for (i = 0; i < nq; i++) { in alloc_sge_txq_uld()
|
| /linux/drivers/net/ethernet/intel/igc/ |
| H A D | igc_tsn.c | 95 struct netdev_queue *nq; in igc_fpe_xmit_smd_frame() local 101 nq = txring_txq(ring); in igc_fpe_xmit_smd_frame() 109 __netif_tx_lock(nq, cpu); in igc_fpe_xmit_smd_frame() 114 __netif_tx_unlock(nq); in igc_fpe_xmit_smd_frame()
|
| /linux/drivers/infiniband/hw/bng_re/ |
| H A D | bng_re.h | 46 struct bng_re_nq nq[BNG_RE_MAX_MSIX]; member
|
| /linux/drivers/net/ethernet/marvell/ |
| H A D | mv643xx_eth.c | 496 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); in txq_maybe_wake() local 498 if (netif_tx_queue_stopped(nq)) { in txq_maybe_wake() 499 __netif_tx_lock(nq, smp_processor_id()); in txq_maybe_wake() 501 netif_tx_wake_queue(nq); in txq_maybe_wake() 502 __netif_tx_unlock(nq); in txq_maybe_wake() 998 struct netdev_queue *nq; in mv643xx_eth_xmit() local 1002 nq = netdev_get_tx_queue(dev, queue); in mv643xx_eth_xmit() 1021 netif_tx_stop_queue(nq); in mv643xx_eth_xmit() 1035 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); in txq_kick() local 1039 __netif_tx_lock(nq, smp_processor_id()); in txq_kick() [all …]
|
| /linux/drivers/net/ethernet/chelsio/cxgb4vf/ |
| H A D | t4vf_hw.c | 1272 int nq = min(n, 32); in t4vf_config_rss_range() local 1279 cmd.niqid = cpu_to_be16(nq); in t4vf_config_rss_range() 1285 start += nq; in t4vf_config_rss_range() 1286 n -= nq; in t4vf_config_rss_range() 1293 while (nq > 0) { in t4vf_config_rss_range() 1302 int nqbuf = min(3, nq); in t4vf_config_rss_range() 1304 nq -= nqbuf; in t4vf_config_rss_range()
|
| /linux/drivers/net/ |
| H A D | tap.c | 127 struct tap_queue *nq; in tap_disable_queue() local 138 nq = rtnl_dereference(tap->taps[tap->numvtaps - 1]); in tap_disable_queue() 139 nq->queue_index = index; in tap_disable_queue() 141 rcu_assign_pointer(tap->taps[index], nq); in tap_disable_queue()
|
| /linux/arch/s390/mm/ |
| H A D | pgtable.c | 760 unsigned char key, bool nq) in set_guest_storage_key() argument 817 page_set_storage_key(paddr, skey, !nq); in set_guest_storage_key() 841 bool nq, bool mr, bool mc) in cond_set_guest_storage_key() argument 860 rc = set_guest_storage_key(current->mm, addr, key, nq); in cond_set_guest_storage_key()
|
| /linux/drivers/net/ethernet/aquantia/atlantic/ |
| H A D | aq_nic.c | 837 struct netdev_queue *nq; in aq_nic_xmit_xdpf() local 848 nq = netdev_get_tx_queue(ndev, tx_ring->idx); in aq_nic_xmit_xdpf() 849 __netif_tx_lock(nq, cpu); in aq_nic_xmit_xdpf() 862 __netif_tx_unlock(nq); in aq_nic_xmit_xdpf()
|
| /linux/net/sched/ |
| H A D | sch_api.c | 322 struct netdev_queue *nq; in qdisc_lookup_rcu() local 331 nq = dev_ingress_queue_rcu(dev); in qdisc_lookup_rcu() 332 if (nq) in qdisc_lookup_rcu() 333 q = qdisc_match_from_root(rcu_dereference(nq->qdisc_sleeping), in qdisc_lookup_rcu()
|
| /linux/drivers/net/ethernet/stmicro/stmmac/ |
| H A D | stmmac_main.c | 2602 struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue); in stmmac_xdp_xmit_zc() local 2614 txq_trans_cond_update(nq); in stmmac_xdp_xmit_zc() 5131 struct netdev_queue *nq; in stmmac_xdp_xmit_back() local 5139 nq = netdev_get_tx_queue(priv->dev, queue); in stmmac_xdp_xmit_back() 5141 __netif_tx_lock(nq, cpu); in stmmac_xdp_xmit_back() 5143 txq_trans_cond_update(nq); in stmmac_xdp_xmit_back() 5149 __netif_tx_unlock(nq); in stmmac_xdp_xmit_back() 6812 struct netdev_queue *nq; in stmmac_xdp_xmit() local 6823 nq = netdev_get_tx_queue(priv->dev, queue); in stmmac_xdp_xmit() 6825 __netif_tx_lock(nq, cpu); in stmmac_xdp_xmit() [all …]
|
| /linux/drivers/net/ethernet/freescale/dpaa2/ |
| H A D | dpaa2-eth.c | 1404 struct netdev_queue *nq; in __dpaa2_eth_tx() local 1483 nq = netdev_get_tx_queue(net_dev, queue_mapping); in __dpaa2_eth_tx() 1484 netdev_tx_sent_queue(nq, fd_len); in __dpaa2_eth_tx() 1506 netdev_tx_completed_queue(nq, 1, fd_len); in __dpaa2_eth_tx() 1947 struct netdev_queue *nq; in dpaa2_eth_poll() local 2034 nq = netdev_get_tx_queue(priv->net_dev, txc_fq->flowid); in dpaa2_eth_poll() 2035 netdev_tx_completed_queue(nq, txc_fq->dq_frames, in dpaa2_eth_poll()
|
| /linux/arch/s390/include/asm/ |
| H A D | pgtable.h | 1378 unsigned char key, bool nq); 1381 bool nq, bool mr, bool mc);
|
| /linux/drivers/net/ethernet/broadcom/bnx2x/ |
| H A D | bnx2x_cmn.c | 63 int nq = bnx2x_num_queues ? : netif_get_num_default_rss_queues(); in bnx2x_calc_num_queues() local 67 nq = 1; in bnx2x_calc_num_queues() 69 nq = clamp(nq, 1, BNX2X_MAX_QUEUES(bp)); in bnx2x_calc_num_queues() 70 return nq; in bnx2x_calc_num_queues()
|