Home
last modified time | relevance | path

Searched refs:vmdq (Results 1 – 11 of 11) sorted by relevance

/linux/drivers/net/ethernet/intel/ixgbe/
H A Dixgbe_common.h47 int ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
66 int ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
67 int ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq);
68 int ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
H A Dixgbe_sriov.c704 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; in ixgbe_vf_reset_event() local
706 u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask); in ixgbe_vf_reset_event()
815 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; in ixgbe_write_qde() local
816 u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask); in ixgbe_write_qde()
885 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; in ixgbe_vf_reset_msg() local
891 u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask); in ixgbe_vf_reset_msg()
1068 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; in ixgbe_get_vf_queues() local
1087 msgbuf[IXGBE_VF_TX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask); in ixgbe_get_vf_queues()
1088 msgbuf[IXGBE_VF_RX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask); in ixgbe_get_vf_queues()
1702 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; in ixgbe_set_vf_rate_limit() local
[all …]
H A Dixgbe_82598.c785 static int ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) in ixgbe_set_vmdq_82598() argument
798 rar_high |= FIELD_PREP(IXGBE_RAH_VIND_MASK, vmdq); in ixgbe_set_vmdq_82598()
809 static int ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) in ixgbe_clear_vmdq_82598() argument
H A Dixgbe_type_e610.h876 u8 vmdq; /* VMDQ supported */ member
H A Dixgbe_e610.c566 caps->vmdq = (number == 1); in ixgbe_parse_e610_caps()
H A Dixgbe_main.c8211 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; in ixgbe_vf_tx_pending() local
8212 u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask); in ixgbe_vf_tx_pending()
10064 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; in handle_redirect_action() local
10074 *queue = vf * __ALIGN_MASK(1, ~vmdq->mask); in handle_redirect_action()
/linux/drivers/net/ethernet/intel/ixgbevf/
H A Dvf.c437 u32 vmdq) in ixgbevf_set_rar_vf() argument
473 u32 vmdq) in ixgbevf_hv_set_rar_vf() argument
/linux/drivers/net/ethernet/intel/i40e/
H A Di40e_type.h221 bool vmdq; member
H A Di40e_common.c2707 p->vmdq = true; in i40e_parse_discover_capabilities()
H A Di40e_main.c12750 if (pf->hw.func_caps.vmdq && num_online_cpus() != 1) { in i40e_sw_init()
/linux/drivers/net/ethernet/wangxun/libwx/
H A Dwx_hw.c845 static int wx_clear_vmdq(struct wx *wx, u32 rar, u32 __maybe_unused vmdq) in wx_clear_vmdq() argument