| /linux/drivers/net/ethernet/broadcom/bnge/ |
| H A D | bnge_resc.c | 230 hwr->nq = hw_resc->resv_irqs; in bnge_copy_reserved_rings() 240 return hwr->tx && hwr->rx && hwr->nq && hwr->grp && hwr->vnic && in bnge_rings_ok() 274 u16 nq = bd->nq_nr_rings; in bnge_reserve_rings() local 290 hwr.nq = nq + aux_msix; in bnge_reserve_rings() 292 hwr.nq = bnge_nqs_demand(bd); in bnge_reserve_rings() 325 hwr.nq = min_t(u16, hwr.nq, bd->nq_nr_rings); in bnge_reserve_rings() 328 hwr.nq = min_t(u16, hwr.nq, hwr.stat); in bnge_reserve_rings() 331 rc = bnge_adjust_rings(bd, &rx_rings, &hwr.tx, hwr.nq, sh); in bnge_reserve_rings() 335 hwr.nq = sh ? max_t(u16, tx_cp, rx_rings) : tx_cp + rx_rings; in bnge_reserve_rings() 343 bd->nq_nr_rings = hwr.nq; in bnge_reserve_rings() [all …]
|
| H A D | bnge_resc.h | 50 u16 nq; member
|
| /linux/drivers/block/null_blk/ |
| H A D | main.c | 854 ktime_t kt = cmd->nq->dev->completion_nsec; in null_cmd_end_timer() 1286 struct nullb *nullb = cmd->nq->dev->nullb; in null_handle_data_transfer() 1317 struct nullb_device *dev = cmd->nq->dev; in null_handle_throttled() 1349 struct badblocks *bb = &cmd->nq->dev->badblocks; in null_handle_badblocks() 1350 struct nullb_device *dev = cmd->nq->dev; in null_handle_badblocks() 1358 if (cmd->nq->dev->badblocks_once) in null_handle_badblocks() 1361 if (cmd->nq->dev->badblocks_partial_io) { in null_handle_badblocks() 1375 struct nullb_device *dev = cmd->nq->dev; in null_handle_memory_backed() 1386 struct nullb_device *dev = cmd->nq->dev; in nullb_zero_read_cmd_buffer() 1410 switch (cmd->nq->dev->irqmode) { in nullb_complete_cmd() [all …]
|
| H A D | zoned.c | 353 struct nullb_device *dev = cmd->nq->dev; in null_zone_write() 649 struct nullb_device *dev = cmd->nq->dev; in null_zone_mgmt() 727 dev = cmd->nq->dev; in null_process_zoned_cmd()
|
| /linux/lib/crypto/ |
| H A D | curve25519-hacl64.c | 544 ladder_smallloop_cmult_small_loop_step(u64 *nq, u64 *nqpq, u64 *nq2, u64 *nqpq2, in ladder_smallloop_cmult_small_loop_step() argument 549 point_swap_conditional(nq, nqpq, bit0); in ladder_smallloop_cmult_small_loop_step() 550 addanddouble_fmonty(nq2, nqpq2, nq, nqpq, q); in ladder_smallloop_cmult_small_loop_step() 556 ladder_smallloop_cmult_small_loop_double_step(u64 *nq, u64 *nqpq, u64 *nq2, in ladder_smallloop_cmult_small_loop_double_step() argument 560 ladder_smallloop_cmult_small_loop_step(nq, nqpq, nq2, nqpq2, q, byt); in ladder_smallloop_cmult_small_loop_double_step() 562 ladder_smallloop_cmult_small_loop_step(nq2, nqpq2, nq, nqpq, q, byt1); in ladder_smallloop_cmult_small_loop_double_step() 566 ladder_smallloop_cmult_small_loop(u64 *nq, u64 *nqpq, u64 *nq2, u64 *nqpq2, in ladder_smallloop_cmult_small_loop() argument 570 ladder_smallloop_cmult_small_loop_double_step(nq, nqpq, nq2, in ladder_smallloop_cmult_small_loop() 576 static __always_inline void ladder_bigloop_cmult_big_loop(u8 *n1, u64 *nq, in ladder_bigloop_cmult_big_loop() argument 583 ladder_smallloop_cmult_small_loop(nq, nqpq, nq2, nqpq2, q, in ladder_bigloop_cmult_big_loop() [all …]
|
| /linux/drivers/net/vmxnet3/ |
| H A D | vmxnet3_xdp.c | 210 struct netdev_queue *nq; in vmxnet3_xdp_xmit_back() local 217 nq = netdev_get_tx_queue(adapter->netdev, tq->qid); in vmxnet3_xdp_xmit_back() 219 __netif_tx_lock(nq, smp_processor_id()); in vmxnet3_xdp_xmit_back() 221 __netif_tx_unlock(nq); in vmxnet3_xdp_xmit_back() 233 struct netdev_queue *nq; in vmxnet3_xdp_xmit() local 245 nq = netdev_get_tx_queue(adapter->netdev, tq->qid); in vmxnet3_xdp_xmit() 247 __netif_tx_lock(nq, smp_processor_id()); in vmxnet3_xdp_xmit() 255 __netif_tx_unlock(nq); in vmxnet3_xdp_xmit()
|
| /linux/lib/crypto/x86/ |
| H A D | curve25519.h | 972 u64 *nq = p01_tmp1; in point_add_and_double() local 976 u64 *x2 = nq; in point_add_and_double() 977 u64 *z2 = nq + (u32)4U; in point_add_and_double() 1019 fmul2(nq, dc1, ab1, tmp2); in point_add_and_double() 1023 static void point_double(u64 *nq, u64 *tmp1, u64 *tmp2) in point_double() argument 1025 u64 *x2 = nq; in point_double() 1026 u64 *z2 = nq + (u32)4U; in point_double() 1043 fmul2(nq, dc, ab, tmp2); in point_double()
|
| /linux/drivers/net/ethernet/engleder/ |
| H A D | tsnep_main.c | 349 struct netdev_queue *nq; in tsnep_tx_enable() local 351 nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index); in tsnep_tx_enable() 353 __netif_tx_lock_bh(nq); in tsnep_tx_enable() 354 netif_tx_wake_queue(nq); in tsnep_tx_enable() 355 __netif_tx_unlock_bh(nq); in tsnep_tx_enable() 360 struct netdev_queue *nq; in tsnep_tx_disable() local 363 nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index); in tsnep_tx_disable() 365 __netif_tx_lock_bh(nq); in tsnep_tx_disable() 366 netif_tx_stop_queue(nq); in tsnep_tx_disable() 367 __netif_tx_unlock_bh(nq); in tsnep_tx_disable() [all …]
|
| /linux/drivers/net/ethernet/pensando/ionic/ |
| H A D | ionic_txrx.c | 402 struct netdev_queue *nq; in ionic_xdp_xmit() local 423 nq = netdev_get_tx_queue(netdev, txq->index); in ionic_xdp_xmit() 424 __netif_tx_lock(nq, cpu); in ionic_xdp_xmit() 425 txq_trans_cond_update(nq); in ionic_xdp_xmit() 427 if (netif_tx_queue_stopped(nq) || in ionic_xdp_xmit() 431 __netif_tx_unlock(nq); in ionic_xdp_xmit() 453 __netif_tx_unlock(nq); in ionic_xdp_xmit() 480 struct netdev_queue *nq; in ionic_run_xdp() local 555 nq = netdev_get_tx_queue(netdev, txq->index); in ionic_run_xdp() 556 __netif_tx_lock(nq, smp_processor_id()); in ionic_run_xdp() [all …]
|
| /linux/drivers/net/ethernet/chelsio/cxgb4/ |
| H A D | cxgb4_uld.c | 111 unsigned int nq = rxq_info->nrxq + rxq_info->nciq; in alloc_uld_rxqs() local 125 for (i = 0; i < nq; i++, q++) { in alloc_uld_rxqs() 402 int nq = txq_info->ntxq; in free_sge_txq_uld() local 405 for (i = 0; i < nq; i++) { in free_sge_txq_uld() 425 int nq = txq_info->ntxq; in alloc_sge_txq_uld() local 428 j = nq / adap->params.nports; in alloc_sge_txq_uld() 429 for (i = 0; i < nq; i++) { in alloc_sge_txq_uld()
|
| /linux/drivers/net/ethernet/intel/igc/ |
| H A D | igc_tsn.c | 95 struct netdev_queue *nq; in igc_fpe_xmit_smd_frame() local 101 nq = txring_txq(ring); in igc_fpe_xmit_smd_frame() 109 __netif_tx_lock(nq, cpu); in igc_fpe_xmit_smd_frame() 114 __netif_tx_unlock(nq); in igc_fpe_xmit_smd_frame()
|
| H A D | igc_main.c | 2495 struct netdev_queue *nq; in igc_xdp_xmit_back() local 2503 nq = txring_txq(ring); in igc_xdp_xmit_back() 2505 __netif_tx_lock(nq, cpu); in igc_xdp_xmit_back() 2507 txq_trans_cond_update(nq); in igc_xdp_xmit_back() 2509 __netif_tx_unlock(nq); in igc_xdp_xmit_back() 2575 struct netdev_queue *nq; in igc_finalize_xdp() local 2580 nq = txring_txq(ring); in igc_finalize_xdp() 2582 __netif_tx_lock(nq, cpu); in igc_finalize_xdp() 2584 __netif_tx_unlock(nq); in igc_finalize_xdp() 3040 struct netdev_queue *nq = txring_txq(ring); in igc_xdp_xmit_zc() local [all …]
|
| /linux/arch/s390/kvm/ |
| H A D | dat.c | 654 union skey skey, bool nq) in dat_set_storage_key() argument 667 page_set_storage_key(large_crste_to_phys(*crstep, gfn), skey.skey, !nq); in dat_set_storage_key() 689 page_set_storage_key(pte_origin(*ptep), skey.skey, !nq); in dat_set_storage_key() 698 bool nq, bool mr, bool mc) in page_cond_set_storage_key() argument 704 page_set_storage_key(paddr, skey.skey, !nq); in page_cond_set_storage_key() 709 union skey skey, union skey *oldkey, bool nq, bool mr, bool mc) in dat_cond_set_storage_key() argument 724 nq, mr, mc); in dat_cond_set_storage_key() 736 rc = page_cond_set_storage_key(pte_origin(*ptep), skey, &prev, nq, mr, mc); in dat_cond_set_storage_key()
|
| H A D | dat.h | 535 union skey skey, bool nq); 537 union skey skey, union skey *oldkey, bool nq, bool mr, bool mc);
|
| /linux/drivers/infiniband/hw/bng_re/ |
| H A D | bng_re.h | 46 struct bng_re_nq nq[BNG_RE_MAX_MSIX]; member
|
| /linux/drivers/net/ethernet/freescale/ |
| H A D | fec_main.c | 950 struct netdev_queue *nq; in fec_enet_start_xmit() local 955 nq = netdev_get_tx_queue(ndev, queue); in fec_enet_start_xmit() 966 netif_tx_stop_queue(nq); in fec_enet_start_xmit() 1490 struct netdev_queue *nq; in fec_enet_xsk_xmit() local 1497 nq = netdev_get_tx_queue(fep->netdev, queue); in fec_enet_xsk_xmit() 1498 __netif_tx_lock(nq, cpu); in fec_enet_xsk_xmit() 1500 txq_trans_cond_update(nq); in fec_enet_xsk_xmit() 1555 __netif_tx_unlock(nq); in fec_enet_xsk_xmit() 1560 __netif_tx_unlock(nq); in fec_enet_xsk_xmit() 1568 struct netdev_queue *nq = netdev_get_tx_queue(fep->netdev, queue); in fec_enet_tx_queue() local [all …]
|
| /linux/drivers/net/ethernet/marvell/ |
| H A D | mv643xx_eth.c | 496 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); in txq_maybe_wake() local 498 if (netif_tx_queue_stopped(nq)) { in txq_maybe_wake() 499 __netif_tx_lock(nq, smp_processor_id()); in txq_maybe_wake() 501 netif_tx_wake_queue(nq); in txq_maybe_wake() 502 __netif_tx_unlock(nq); in txq_maybe_wake() 998 struct netdev_queue *nq; in mv643xx_eth_xmit() local 1002 nq = netdev_get_tx_queue(dev, queue); in mv643xx_eth_xmit() 1021 netif_tx_stop_queue(nq); in mv643xx_eth_xmit() 1035 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); in txq_kick() local 1039 __netif_tx_lock(nq, smp_processor_id()); in txq_kick() [all …]
|
| /linux/drivers/net/ethernet/chelsio/cxgb4vf/ |
| H A D | t4vf_hw.c | 1272 int nq = min(n, 32); in t4vf_config_rss_range() local 1279 cmd.niqid = cpu_to_be16(nq); in t4vf_config_rss_range() 1285 start += nq; in t4vf_config_rss_range() 1286 n -= nq; in t4vf_config_rss_range() 1293 while (nq > 0) { in t4vf_config_rss_range() 1302 int nqbuf = min(3, nq); in t4vf_config_rss_range() 1304 nq -= nqbuf; in t4vf_config_rss_range()
|
| /linux/drivers/infiniband/hw/bnxt_re/ |
| H A D | bnxt_re.h | 155 struct bnxt_qplib_nq nq[BNXT_RE_MAX_MSIX]; member
|
| /linux/drivers/net/ |
| H A D | tap.c | 127 struct tap_queue *nq; in tap_disable_queue() local 138 nq = rtnl_dereference(tap->taps[tap->numvtaps - 1]); in tap_disable_queue() 139 nq->queue_index = index; in tap_disable_queue() 141 rcu_assign_pointer(tap->taps[index], nq); in tap_disable_queue()
|
| /linux/drivers/net/ethernet/intel/igb/ |
| H A D | igb_main.c | 2961 struct netdev_queue *nq; in igb_xdp_xmit_back() local 2975 nq = txring_txq(tx_ring); in igb_xdp_xmit_back() 2976 __netif_tx_lock(nq, cpu); in igb_xdp_xmit_back() 2978 txq_trans_cond_update(nq); in igb_xdp_xmit_back() 2980 __netif_tx_unlock(nq); in igb_xdp_xmit_back() 2991 struct netdev_queue *nq; in igb_xdp_xmit() local 3012 nq = txring_txq(tx_ring); in igb_xdp_xmit() 3013 __netif_tx_lock(nq, cpu); in igb_xdp_xmit() 3016 txq_trans_cond_update(nq); in igb_xdp_xmit() 3031 __netif_tx_unlock(nq); in igb_xdp_xmit() [all …]
|
| /linux/drivers/net/ethernet/intel/idpf/ |
| H A D | idpf_txrx.c | 2284 struct netdev_queue *nq; in idpf_tx_clean_complq() local 2294 nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx); in idpf_tx_clean_complq() 2299 __netif_txq_completed_wake(nq, tx_q->cleaned_pkts, tx_q->cleaned_bytes, in idpf_tx_clean_complq() 2477 struct netdev_queue *nq; in idpf_tx_buf_hw_update() local 2479 nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx); in idpf_tx_buf_hw_update() 2490 if (netif_xmit_stopped(nq) || !xmit_more) in idpf_tx_buf_hw_update() 2650 struct netdev_queue *nq; in idpf_tx_splitq_map() local 2807 nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx); in idpf_tx_splitq_map() 2808 netdev_tx_sent_queue(nq, first->bytes); in idpf_tx_splitq_map()
|
| /linux/net/sched/ |
| H A D | sch_api.c | 322 struct netdev_queue *nq; in qdisc_lookup_rcu() local 331 nq = dev_ingress_queue_rcu(dev); in qdisc_lookup_rcu() 332 if (nq) in qdisc_lookup_rcu() 333 q = qdisc_match_from_root(rcu_dereference(nq->qdisc_sleeping), in qdisc_lookup_rcu()
|
| /linux/drivers/net/ethernet/stmicro/stmmac/ |
| H A D | stmmac_main.c | 2692 struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue); in stmmac_xdp_xmit_zc() local 2704 txq_trans_cond_update(nq); in stmmac_xdp_xmit_zc() 5263 struct netdev_queue *nq; in stmmac_xdp_xmit_back() local 5271 nq = netdev_get_tx_queue(priv->dev, queue); in stmmac_xdp_xmit_back() 5273 __netif_tx_lock(nq, cpu); in stmmac_xdp_xmit_back() 5275 txq_trans_cond_update(nq); in stmmac_xdp_xmit_back() 5290 __netif_tx_unlock(nq); in stmmac_xdp_xmit_back() 6957 struct netdev_queue *nq; in stmmac_xdp_xmit() local 6968 nq = netdev_get_tx_queue(priv->dev, queue); in stmmac_xdp_xmit() 6970 __netif_tx_lock(nq, cpu); in stmmac_xdp_xmit() [all …]
|
| /linux/drivers/net/ethernet/marvell/mvpp2/ |
| H A D | mvpp2_main.c | 2861 struct netdev_queue *nq = netdev_get_tx_queue(port->dev, txq->log_id); in mvpp2_txq_done() local 2874 if (netif_tx_queue_stopped(nq)) in mvpp2_txq_done() 2876 netif_tx_wake_queue(nq); in mvpp2_txq_done() 3372 static void mvpp2_isr_handle_ptp_queue(struct mvpp2_port *port, int nq) in mvpp2_isr_handle_ptp_queue() argument 3382 if (nq) in mvpp2_isr_handle_ptp_queue() 3385 queue = &port->tx_hwtstamp_queue[nq]; in mvpp2_isr_handle_ptp_queue() 3643 struct netdev_queue *nq; in mvpp2_xdp_finish_tx() local 3647 nq = netdev_get_tx_queue(port->dev, txq_id); in mvpp2_xdp_finish_tx() 3659 netif_tx_stop_queue(nq); in mvpp2_xdp_finish_tx() 4434 struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id); in mvpp2_tx() local [all …]
|