/linux/drivers/net/ethernet/sun/ |
H A D | sunvnet.c | 125 ethtool_sprintf(&buf, "p%u.%s-%pM", port->q_index, in vnet_get_strings() 128 ethtool_sprintf(&buf, "p%u.rx_packets", port->q_index); in vnet_get_strings() 129 ethtool_sprintf(&buf, "p%u.tx_packets", port->q_index); in vnet_get_strings() 130 ethtool_sprintf(&buf, "p%u.rx_bytes", port->q_index); in vnet_get_strings() 131 ethtool_sprintf(&buf, "p%u.tx_bytes", port->q_index); in vnet_get_strings() 132 ethtool_sprintf(&buf, "p%u.event_up", port->q_index); in vnet_get_strings() 133 ethtool_sprintf(&buf, "p%u.event_reset", port->q_index); in vnet_get_strings() 167 data[i++] = port->q_index; in vnet_get_ethtool_stats() 231 return port->q_index; in vnet_select_queue()
|
H A D | sunvnet_common.h | 90 u16 q_index; member
|
H A D | sunvnet_common.c | 727 txq = netdev_get_tx_queue(dev, port->q_index); in vnet_ack() 765 port->q_index); in maybe_tx_wakeup() 1260 txq = netdev_get_tx_queue(dev, port->q_index); in vnet_handle_offloads() 1804 port->q_index = smallest; in sunvnet_port_add_txq_common() 1811 port->vp->q_used[port->q_index]--; in sunvnet_port_rm_txq_common() 1812 port->q_index = 0; in sunvnet_port_rm_txq_common()
|
H A D | ldmvsw.c | 111 return port->q_index; in vsw_select_queue()
|
/linux/drivers/net/ethernet/intel/ice/ |
H A D | ice_arfs.c | 124 e->flow_id, e->fltr_info.q_index); in ice_arfs_del_flow_rules() 165 ep->arfs_entry->fltr_info.q_index); in ice_arfs_add_flow_rules() 185 if (rps_may_expire_flow(vsi->netdev, arfs_entry->fltr_info.q_index, in ice_arfs_is_flow_expired() 315 fltr_info->q_index = rxq_idx; in ice_arfs_build_entry() 453 if (fltr_info->q_index == rxq_idx || in ice_rx_flow_steer() 458 fltr_info->q_index = rxq_idx; in ice_rx_flow_steer()
|
H A D | ice_txrx.h | 320 u16 q_index; /* Queue number of ring */ member 392 u16 q_index; /* Queue number of ring */ member
|
H A D | ice_trace.h | 76 __entry->q_vector->rx.rx_ring->q_index, 103 __entry->q_vector->tx.tx_ring->q_index,
|
H A D | ice_ethtool_fdir.c | 1466 static void ice_update_per_q_fltr(struct ice_vsi *vsi, u32 q_index, bool inc) in ice_update_per_q_fltr() argument 1470 if (!vsi->num_rxq || q_index >= vsi->num_rxq) in ice_update_per_q_fltr() 1473 rx_ring = vsi->rx_rings[q_index]; in ice_update_per_q_fltr() 1833 s16 q_index = ICE_FDIR_NO_QUEUE_IDX; in ice_set_fdir_input_set() local 1865 q_index = ring; in ice_set_fdir_input_set() 1869 input->q_index = q_index; in ice_set_fdir_input_set()
|
H A D | ice_fdir.h | 191 s16 q_index; member
|
H A D | ice_fdir.c | 673 fdir_fltr_ctx.qindex = input->q_index; in ice_fdir_get_prgm_desc() 1290 rule->q_index != input->q_index) in ice_fdir_is_dup_fltr()
|
H A D | ice_txrx.c | 144 return netdev_get_tx_queue(ring->netdev, ring->q_index); in txring_txq() 1000 skb_record_rx_queue(skb, rx_ring->q_index); in ice_build_skb() 1050 skb_record_rx_queue(skb, rx_ring->q_index); in ice_construct_skb()
|
/linux/net/core/ |
H A D | netpoll.c | 105 unsigned int q_index; in queue_process() local 114 q_index = skb_get_queue_mapping(skb); in queue_process() 115 if (unlikely(q_index >= dev->real_num_tx_queues)) { in queue_process() 116 q_index = q_index % dev->real_num_tx_queues; in queue_process() 117 skb_set_queue_mapping(skb, q_index); in queue_process() 119 txq = netdev_get_tx_queue(dev, q_index); in queue_process()
|
/linux/drivers/net/ethernet/cavium/liquidio/ |
H A D | octeon_iq.h | 173 int q_index; member 391 int q_index, union oct_txpciq iq_no, u32 num_descs,
|
H A D | request_manager.c | 193 int q_index, in octeon_setup_iq() argument 217 oct->instr_queue[iq_no]->q_index = q_index; in octeon_setup_iq()
|
H A D | lio_core.c | 525 if (__netif_subqueue_stopped(netdev, iq->q_index) && in lio_update_txq_status() 528 netif_wake_subqueue(netdev, iq->q_index); in lio_update_txq_status()
|
/linux/drivers/net/ethernet/intel/iavf/ |
H A D | iavf_fdir.h | 122 u32 q_index; member
|
H A D | iavf_ethtool.c | 1044 fsp->ring_cookie = rule->q_index; in iavf_get_ethtool_fdir_entry() 1104 u32 flow_type, q_index = 0; in iavf_add_fdir_fltr_info() local 1111 q_index = fsp->ring_cookie; in iavf_add_fdir_fltr_info() 1112 if (q_index >= adapter->num_active_queues) in iavf_add_fdir_fltr_info() 1120 fltr->q_index = q_index; in iavf_add_fdir_fltr_info()
|
H A D | iavf_fdir.c | 667 vc_msg->rule_cfg.action_set.actions[0].act_conf.queue.index = fltr->q_index; in iavf_fill_fdir_add_msg()
|
H A D | iavf_main.c | 4151 unsigned int q_index; in iavf_add_cls_u32() local 4212 q_index = tcf_skbedit_rx_queue_mapping(act); in iavf_add_cls_u32() 4213 if (q_index >= adapter->num_active_queues) { in iavf_add_cls_u32() 4219 vact->act_conf.queue.index = q_index; in iavf_add_cls_u32()
|
/linux/drivers/gpu/drm/amd/amdkfd/ |
H A D | kfd_process_queue_manager.c | 867 unsigned int *q_index, in criu_checkpoint_queues_device() argument 927 *q_index = *q_index + 1; in criu_checkpoint_queues_device() 939 int ret = 0, pdd_index, q_index = 0; in kfd_criu_checkpoint_queues() local 948 ret = criu_checkpoint_queues_device(pdd, user_priv_data, &q_index, in kfd_criu_checkpoint_queues()
|
/linux/drivers/net/ethernet/freescale/dpaa2/ |
H A D | dpni.h | 954 u8 q_index, 963 u8 q_index,
|
/linux/drivers/scsi/ibmvscsi/ |
H A D | ibmvfc.c | 2493 int wait, i, q_index, q_size; in ibmvfc_wait_for_ops() local 2510 for (q_index = 0; q_index < q_size; q_index++) { in ibmvfc_wait_for_ops() 2511 spin_lock(&queues[q_index].l_lock); in ibmvfc_wait_for_ops() 2512 for (i = 0; i < queues[q_index].evt_pool.size; i++) { in ibmvfc_wait_for_ops() 2513 evt = &queues[q_index].evt_pool.events[i]; in ibmvfc_wait_for_ops() 2521 spin_unlock(&queues[q_index].l_lock); in ibmvfc_wait_for_ops() 2531 for (q_index = 0; q_index < q_size; q_index++) { in ibmvfc_wait_for_ops() 2532 spin_lock(&queues[q_index].l_lock); in ibmvfc_wait_for_ops() 2533 for (i = 0; i < queues[q_index].evt_pool.size; i++) { in ibmvfc_wait_for_ops() 2534 evt = &queues[q_index].evt_pool.events[i]; in ibmvfc_wait_for_ops() [all …]
|
/linux/drivers/scsi/pm8001/ |
H A D | pm80xx_hwi.c | 4300 u32 q_index; in pm80xx_chip_ssp_io_req() local 4318 q_index = pm80xx_chip_get_q_index(task); in pm80xx_chip_ssp_io_req() 4389 task->ssp_task.cmd->cmnd[0], q_index); in pm80xx_chip_ssp_io_req() 4436 return pm8001_mpi_build_cmd(pm8001_ha, q_index, opc, &ssp_cmd, in pm80xx_chip_ssp_io_req() 4437 sizeof(ssp_cmd), q_index); in pm80xx_chip_ssp_io_req() 4447 u32 tag = ccb->ccb_tag, q_index; in pm80xx_chip_sata_req() local 4457 q_index = pm80xx_chip_get_q_index(task); in pm80xx_chip_sata_req() 4562 sata_cmd.sata_fis.command, q_index); in pm80xx_chip_sata_req() 4635 return pm8001_mpi_build_cmd(pm8001_ha, q_index, opc, &sata_cmd, in pm80xx_chip_sata_req() 4636 sizeof(sata_cmd), q_index); in pm80xx_chip_sata_req()
|
H A D | pm8001_sas.h | 657 u32 q_index, u32 opCode, void *payload, size_t nb,
|
/linux/drivers/net/ethernet/intel/idpf/ |
H A D | idpf_txrx.c | 4067 u32 i, qv_idx, q_index; in idpf_vport_intr_map_vector_to_qs() local 4089 q_index = q->q_vector->num_rxq; in idpf_vport_intr_map_vector_to_qs() 4090 q->q_vector->rx[q_index] = q; in idpf_vport_intr_map_vector_to_qs() 4103 q_index = bufq->q_vector->num_bufq; in idpf_vport_intr_map_vector_to_qs() 4104 bufq->q_vector->bufq[q_index] = bufq; in idpf_vport_intr_map_vector_to_qs()
|