Home
last modified time | relevance | path

Searched refs:vf (Results 1 – 25 of 269) sorted by relevance

1234567891011

/linux/drivers/net/ethernet/intel/ice/
H A Dice_vf_lib.c27 struct ice_vf *vf; in ice_get_vf_by_id() local
30 hash_for_each_possible_rcu(pf->vfs.table, vf, entry, vf_id) { in ice_get_vf_by_id()
31 if (vf->vf_id == vf_id) { in ice_get_vf_by_id()
34 if (kref_get_unless_zero(&vf->refcnt)) in ice_get_vf_by_id()
35 found = vf; in ice_get_vf_by_id()
57 struct ice_vf *vf = container_of(ref, struct ice_vf, refcnt); in ice_release_vf() local
59 pci_dev_put(vf->vfdev); in ice_release_vf()
61 vf->vf_ops->free(vf); in ice_release_vf()
75 void ice_put_vf(struct ice_vf *vf) in ice_put_vf() argument
77 kref_put(&vf->refcnt, ice_release_vf); in ice_put_vf()
[all …]
H A Dice_sriov.c28 struct ice_vf *vf; in ice_free_vf_entries() local
37 hash_for_each_safe(vfs->table, bkt, tmp, vf, entry) { in ice_free_vf_entries()
38 hash_del_rcu(&vf->entry); in ice_free_vf_entries()
39 ice_put_vf(vf); in ice_free_vf_entries()
47 static void ice_free_vf_res(struct ice_vf *vf) in ice_free_vf_res() argument
49 struct ice_pf *pf = vf->pf; in ice_free_vf_res()
55 clear_bit(ICE_VF_STATE_INIT, vf->vf_states); in ice_free_vf_res()
56 ice_vf_fdir_exit(vf); in ice_free_vf_res()
58 if (vf->ctrl_vsi_idx != ICE_NO_VSI) in ice_free_vf_res()
59 ice_vf_ctrl_vsi_release(vf); in ice_free_vf_res()
[all …]
H A Dice_virtchnl.h32 int (*get_ver_msg)(struct ice_vf *vf, u8 *msg);
33 int (*get_vf_res_msg)(struct ice_vf *vf, u8 *msg);
34 void (*reset_vf)(struct ice_vf *vf);
35 int (*add_mac_addr_msg)(struct ice_vf *vf, u8 *msg);
36 int (*del_mac_addr_msg)(struct ice_vf *vf, u8 *msg);
37 int (*cfg_qs_msg)(struct ice_vf *vf, u8 *msg);
38 int (*ena_qs_msg)(struct ice_vf *vf, u8 *msg);
39 int (*dis_qs_msg)(struct ice_vf *vf, u8 *msg);
40 int (*request_qs_msg)(struct ice_vf *vf, u8 *msg);
41 int (*cfg_irq_map_msg)(struct ice_vf *vf, u8 *msg);
[all …]
H A Dice_virtchnl.c181 struct ice_vf *vf; in ice_vc_vf_broadcast() local
185 ice_for_each_vf(pf, bkt, vf) { in ice_vc_vf_broadcast()
187 if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) && in ice_vc_vf_broadcast()
188 !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) in ice_vc_vf_broadcast()
194 ice_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval, msg, in ice_vc_vf_broadcast()
208 ice_set_pfe_link(struct ice_vf *vf, struct virtchnl_pf_event *pfe, in ice_set_pfe_link() argument
211 if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) { in ice_set_pfe_link()
231 void ice_vc_notify_vf_link_state(struct ice_vf *vf) in ice_vc_notify_vf_link_state() argument
234 struct ice_hw *hw = &vf->pf->hw; in ice_vc_notify_vf_link_state()
239 if (ice_is_vf_link_up(vf)) in ice_vc_notify_vf_link_state()
[all …]
H A Dice_vf_lib.h65 void (*free)(struct ice_vf *vf);
66 void (*clear_reset_state)(struct ice_vf *vf);
67 void (*clear_mbx_register)(struct ice_vf *vf);
68 void (*trigger_reset_register)(struct ice_vf *vf, bool is_vflr);
69 bool (*poll_reset_status)(struct ice_vf *vf);
70 void (*clear_reset_trigger)(struct ice_vf *vf);
71 void (*irq_close)(struct ice_vf *vf);
72 void (*post_vsi_rebuild)(struct ice_vf *vf);
152 static inline u16 ice_vf_get_port_vlan_id(struct ice_vf *vf) in ice_vf_get_port_vlan_id() argument
154 return vf->port_vlan_info.vid; in ice_vf_get_port_vlan_id()
[all …]
H A Dice_virtchnl_fdir.c98 ice_vc_fdir_param_check(struct ice_vf *vf, u16 vsi_id) in ice_vc_fdir_param_check() argument
100 struct ice_pf *pf = vf->pf; in ice_vc_fdir_param_check()
105 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) in ice_vc_fdir_param_check()
108 if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_FDIR_PF)) in ice_vc_fdir_param_check()
111 if (!ice_vc_isvalid_vsi_id(vf, vsi_id)) in ice_vc_fdir_param_check()
114 if (!ice_get_vf_vsi(vf)) in ice_vc_fdir_param_check()
128 static int ice_vf_start_ctrl_vsi(struct ice_vf *vf) in ice_vf_start_ctrl_vsi() argument
130 struct ice_pf *pf = vf->pf; in ice_vf_start_ctrl_vsi()
136 if (vf->ctrl_vsi_idx != ICE_NO_VSI) in ice_vf_start_ctrl_vsi()
139 ctrl_vsi = ice_vf_ctrl_vsi_setup(vf); in ice_vf_start_ctrl_vsi()
[all …]
H A Dice_vf_lib_private.h26 void ice_initialize_vf_entry(struct ice_vf *vf);
27 void ice_dis_vf_qs(struct ice_vf *vf);
28 int ice_check_vf_init(struct ice_vf *vf);
30 struct ice_port_info *ice_vf_get_port_info(struct ice_vf *vf);
32 bool ice_is_vf_trusted(struct ice_vf *vf);
33 bool ice_vf_has_no_qs_ena(struct ice_vf *vf);
34 bool ice_is_vf_link_up(struct ice_vf *vf);
35 void ice_vf_ctrl_invalidate_vsi(struct ice_vf *vf);
36 void ice_vf_ctrl_vsi_release(struct ice_vf *vf);
37 struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf);
[all …]
/linux/drivers/vdpa/ifcvf/
H A Difcvf_main.c22 struct ifcvf_hw *vf = arg; in ifcvf_config_changed() local
24 if (vf->config_cb.callback) in ifcvf_config_changed()
25 return vf->config_cb.callback(vf->config_cb.private); in ifcvf_config_changed()
42 struct ifcvf_hw *vf = arg; in ifcvf_vqs_reused_intr_handler() local
46 for (i = 0; i < vf->nr_vring; i++) { in ifcvf_vqs_reused_intr_handler()
47 vring = &vf->vring[i]; in ifcvf_vqs_reused_intr_handler()
57 struct ifcvf_hw *vf = arg; in ifcvf_dev_intr_handler() local
60 isr = vp_ioread8(vf->isr); in ifcvf_dev_intr_handler()
72 static void ifcvf_free_per_vq_irq(struct ifcvf_hw *vf) in ifcvf_free_per_vq_irq() argument
74 struct pci_dev *pdev = vf->pdev; in ifcvf_free_per_vq_irq()
[all …]
/linux/drivers/net/ethernet/sfc/siena/
H A Dsiena_sriov.c191 static unsigned abs_index(struct siena_vf *vf, unsigned index) in abs_index() argument
193 return EFX_VI_BASE + vf->index * efx_vf_size(vf->efx) + index; in abs_index()
302 static void efx_siena_sriov_reset_tx_filter(struct siena_vf *vf) in efx_siena_sriov_reset_tx_filter() argument
304 struct efx_nic *efx = vf->efx; in efx_siena_sriov_reset_tx_filter()
309 if (vf->tx_filter_id != -1) { in efx_siena_sriov_reset_tx_filter()
311 vf->tx_filter_id); in efx_siena_sriov_reset_tx_filter()
313 vf->pci_name, vf->tx_filter_id); in efx_siena_sriov_reset_tx_filter()
314 vf->tx_filter_id = -1; in efx_siena_sriov_reset_tx_filter()
317 if (is_zero_ether_addr(vf->addr.mac_addr)) in efx_siena_sriov_reset_tx_filter()
323 if (vf->tx_filter_mode == VF_TX_FILTER_AUTO && vf_max_tx_channels <= 2) in efx_siena_sriov_reset_tx_filter()
[all …]
/linux/drivers/net/ethernet/marvell/octeontx2/nic/
H A Dotx2_vf.c37 static void otx2vf_process_vfaf_mbox_msg(struct otx2_nic *vf, in otx2vf_process_vfaf_mbox_msg() argument
41 dev_err(vf->dev, in otx2vf_process_vfaf_mbox_msg()
47 dev_err(vf->dev, in otx2vf_process_vfaf_mbox_msg()
54 dev_err(vf->dev, in otx2vf_process_vfaf_mbox_msg()
62 vf->pcifunc = msg->pcifunc; in otx2vf_process_vfaf_mbox_msg()
65 mbox_handler_msix_offset(vf, (struct msix_offset_rsp *)msg); in otx2vf_process_vfaf_mbox_msg()
68 mbox_handler_npa_lf_alloc(vf, (struct npa_lf_alloc_rsp *)msg); in otx2vf_process_vfaf_mbox_msg()
71 mbox_handler_nix_lf_alloc(vf, (struct nix_lf_alloc_rsp *)msg); in otx2vf_process_vfaf_mbox_msg()
74 mbox_handler_nix_bp_enable(vf, (struct nix_bp_cfg_rsp *)msg); in otx2vf_process_vfaf_mbox_msg()
78 dev_err(vf->dev, in otx2vf_process_vfaf_mbox_msg()
[all …]
/linux/drivers/net/ethernet/intel/i40e/
H A Di40e_virtchnl_pf.c26 struct i40e_vf *vf = pf->vf; in i40e_vc_vf_broadcast() local
29 for (i = 0; i < pf->num_alloc_vfs; i++, vf++) { in i40e_vc_vf_broadcast()
30 int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id; in i40e_vc_vf_broadcast()
32 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) && in i40e_vc_vf_broadcast()
33 !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) in i40e_vc_vf_broadcast()
85 static void i40e_set_vf_link_state(struct i40e_vf *vf, in i40e_set_vf_link_state() argument
90 if (vf->link_forced) in i40e_set_vf_link_state()
91 link_status = vf->link_up; in i40e_set_vf_link_state()
93 if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) { in i40e_set_vf_link_state()
110 static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf) in i40e_vc_notify_vf_link_state() argument
[all …]
/linux/drivers/net/ethernet/intel/ixgbe/
H A Dixgbe_sriov.c43 mv_list[i].vf = -1; in ixgbe_alloc_vf_macvlans()
129 int vf = 0; in ixgbe_get_vfs() local
144 if (vf >= adapter->num_vfs) in ixgbe_get_vfs()
147 adapter->vfinfo[vf].vfdev = vfdev; in ixgbe_get_vfs()
148 ++vf; in ixgbe_get_vfs()
208 unsigned int num_vfs = adapter->num_vfs, vf; in ixgbe_disable_sriov() local
218 for (vf = 0; vf < num_vfs; ++vf) { in ixgbe_disable_sriov()
219 struct pci_dev *vfdev = adapter->vfinfo[vf].vfdev; in ixgbe_disable_sriov()
223 adapter->vfinfo[vf].vfdev = NULL; in ixgbe_disable_sriov()
364 u32 *msgbuf, u32 vf) in ixgbe_set_vf_multicasts() argument
[all …]
/linux/drivers/net/ethernet/broadcom/bnx2x/
H A Dbnx2x_sriov.c30 struct bnx2x_virtf **vf,
78 static void bnx2x_vf_igu_ack_sb(struct bnx2x *bp, struct bnx2x_virtf *vf, in bnx2x_vf_igu_ack_sb() argument
86 u32 func_encode = vf->abs_vfid; in bnx2x_vf_igu_ack_sb()
112 struct bnx2x_virtf *vf, in bnx2x_validate_vf_sp_objs() argument
115 if (!bnx2x_leading_vfq(vf, sp_initialized)) { in bnx2x_validate_vf_sp_objs()
126 void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf, in bnx2x_vfop_qctor_dump_tx() argument
133 vf->abs_vfid, in bnx2x_vfop_qctor_dump_tx()
142 void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf, in bnx2x_vfop_qctor_dump_rx() argument
151 vf->abs_vfid, in bnx2x_vfop_qctor_dump_rx()
167 struct bnx2x_virtf *vf, in bnx2x_vfop_qctor_prep() argument
[all …]
H A Dbnx2x_sriov.h166 #define vf_rxq_count(vf) ((vf)->alloc_resc.num_rxqs) argument
167 #define vf_txq_count(vf) ((vf)->alloc_resc.num_txqs) argument
168 #define vf_sb_count(vf) ((vf)->alloc_resc.num_sbs) argument
169 #define vf_mac_rules_cnt(vf) ((vf)->alloc_resc.num_mac_filters) argument
170 #define vf_vlan_rules_cnt(vf) ((vf)->alloc_resc.num_vlan_filters) argument
171 #define vf_mc_rules_cnt(vf) ((vf)->alloc_resc.num_mc_filters) argument
179 #define bnx2x_vfq(vf, nr, var) ((vf)->vfqs[(nr)].var) argument
180 #define bnx2x_leading_vfq(vf, var) ((vf)->vfqs[LEADING_IDX].var) argument
224 #define for_each_vfq(vf, var) \ argument
225 for ((var) = 0; (var) < vf_rxq_count(vf); (var)++)
[all …]
H A Dbnx2x_vfpf.c544 static void bnx2x_leading_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf, in bnx2x_leading_vfq_init() argument
547 u8 cl_id = vfq_cl_id(vf, q); in bnx2x_leading_vfq_init()
548 u8 func_id = FW_VF_HANDLE(vf->abs_vfid); in bnx2x_leading_vfq_init()
553 bnx2x_vf_sp(bp, vf, mac_rdata), in bnx2x_leading_vfq_init()
554 bnx2x_vf_sp_map(bp, vf, mac_rdata), in bnx2x_leading_vfq_init()
556 &vf->filter_state, in bnx2x_leading_vfq_init()
558 &vf->vf_macs_pool); in bnx2x_leading_vfq_init()
562 bnx2x_vf_sp(bp, vf, vlan_rdata), in bnx2x_leading_vfq_init()
563 bnx2x_vf_sp_map(bp, vf, vlan_rdata), in bnx2x_leading_vfq_init()
565 &vf->filter_state, in bnx2x_leading_vfq_init()
[all …]
/linux/drivers/crypto/cavium/cpt/
H A Dcptpf_mbox.c8 static void cpt_send_msg_to_vf(struct cpt_device *cpt, int vf, in cpt_send_msg_to_vf() argument
12 cpt_write_csr64(cpt->reg_base, CPTX_PF_VFX_MBOXX(0, vf, 1), in cpt_send_msg_to_vf()
14 cpt_write_csr64(cpt->reg_base, CPTX_PF_VFX_MBOXX(0, vf, 0), mbx->msg); in cpt_send_msg_to_vf()
20 static void cpt_mbox_send_ack(struct cpt_device *cpt, int vf, in cpt_mbox_send_ack() argument
25 cpt_send_msg_to_vf(cpt, vf, mbx); in cpt_mbox_send_ack()
28 static void cpt_clear_mbox_intr(struct cpt_device *cpt, u32 vf) in cpt_clear_mbox_intr() argument
31 cpt_write_csr64(cpt->reg_base, CPTX_PF_MBOX_INTX(0, 0), (1 << vf)); in cpt_clear_mbox_intr()
37 static void cpt_cfg_qlen_for_vf(struct cpt_device *cpt, int vf, u32 size) in cpt_cfg_qlen_for_vf() argument
41 pf_qx_ctl.u = cpt_read_csr64(cpt->reg_base, CPTX_PF_QX_CTL(0, vf)); in cpt_cfg_qlen_for_vf()
44 cpt_write_csr64(cpt->reg_base, CPTX_PF_QX_CTL(0, vf), pf_qx_ctl.u); in cpt_cfg_qlen_for_vf()
[all …]
/linux/drivers/net/ethernet/sfc/
H A Def10_sriov.c118 if (!nic_data->vf) in efx_ef10_sriov_free_vf_vports()
122 struct ef10_vf *vf = nic_data->vf + i; in efx_ef10_sriov_free_vf_vports() local
125 if (vf->pci_dev && pci_is_dev_assigned(vf->pci_dev)) in efx_ef10_sriov_free_vf_vports()
128 if (vf->vport_assigned) { in efx_ef10_sriov_free_vf_vports()
130 vf->vport_assigned = 0; in efx_ef10_sriov_free_vf_vports()
133 if (!is_zero_ether_addr(vf->mac)) { in efx_ef10_sriov_free_vf_vports()
134 efx_ef10_vport_del_mac(efx, vf->vport_id, vf->mac); in efx_ef10_sriov_free_vf_vports()
135 eth_zero_addr(vf->mac); in efx_ef10_sriov_free_vf_vports()
138 if (vf->vport_id) { in efx_ef10_sriov_free_vf_vports()
139 efx_ef10_vport_free(efx, vf->vport_id); in efx_ef10_sriov_free_vf_vports()
[all …]
/linux/drivers/crypto/marvell/octeontx/
H A Dotx_cptpf_mbox.c74 static void otx_cpt_send_msg_to_vf(struct otx_cpt_device *cpt, int vf, in otx_cpt_send_msg_to_vf() argument
78 writeq(mbx->data, cpt->reg_base + OTX_CPT_PF_VFX_MBOXX(vf, 1)); in otx_cpt_send_msg_to_vf()
79 writeq(mbx->msg, cpt->reg_base + OTX_CPT_PF_VFX_MBOXX(vf, 0)); in otx_cpt_send_msg_to_vf()
86 static void otx_cpt_mbox_send_ack(struct otx_cpt_device *cpt, int vf, in otx_cpt_mbox_send_ack() argument
91 otx_cpt_send_msg_to_vf(cpt, vf, mbx); in otx_cpt_mbox_send_ack()
95 static void otx_cptpf_mbox_send_nack(struct otx_cpt_device *cpt, int vf, in otx_cptpf_mbox_send_nack() argument
100 otx_cpt_send_msg_to_vf(cpt, vf, mbx); in otx_cptpf_mbox_send_nack()
103 static void otx_cpt_clear_mbox_intr(struct otx_cpt_device *cpt, u32 vf) in otx_cpt_clear_mbox_intr() argument
106 writeq(1ull << vf, cpt->reg_base + OTX_CPT_PF_MBOX_INTX(0)); in otx_cpt_clear_mbox_intr()
112 static void otx_cpt_cfg_qlen_for_vf(struct otx_cpt_device *cpt, int vf, in otx_cpt_cfg_qlen_for_vf() argument
[all …]
/linux/drivers/net/ethernet/qlogic/qlcnic/
H A Dqlcnic_sriov_pf.c743 struct qlcnic_vf_info *vf = trans->vf; in qlcnic_sriov_pf_channel_cfg_cmd() local
744 struct qlcnic_vport *vp = vf->vp; in qlcnic_sriov_pf_channel_cfg_cmd()
747 u16 func = vf->pci_func; in qlcnic_sriov_pf_channel_cfg_cmd()
751 adapter = vf->adapter; in qlcnic_sriov_pf_channel_cfg_cmd()
763 size = sizeof(*vf->sriov_vlans); in qlcnic_sriov_pf_channel_cfg_cmd()
765 memset(vf->sriov_vlans, 0, size); in qlcnic_sriov_pf_channel_cfg_cmd()
777 set_bit(QLC_BC_VF_STATE, &vf->state); in qlcnic_sriov_pf_channel_cfg_cmd()
779 clear_bit(QLC_BC_VF_STATE, &vf->state); in qlcnic_sriov_pf_channel_cfg_cmd()
789 struct qlcnic_vf_info *vf, in qlcnic_sriov_cfg_vf_def_mac() argument
800 vp = vf->vp; in qlcnic_sriov_cfg_vf_def_mac()
[all …]
/linux/drivers/net/ethernet/cisco/enic/
H A Denic_pp.c23 int enic_is_valid_pp_vf(struct enic *enic, int vf, int *err) in enic_is_valid_pp_vf() argument
25 if (vf != PORT_SELF_VF) { in enic_is_valid_pp_vf()
28 if (vf < 0 || vf >= enic->num_vfs) { in enic_is_valid_pp_vf()
42 if (vf == PORT_SELF_VF && !enic_is_dynamic(enic)) { in enic_is_valid_pp_vf()
54 static int enic_set_port_profile(struct enic *enic, int vf) in enic_set_port_profile() argument
66 ENIC_PP_BY_INDEX(enic, vf, pp, &err); in enic_set_port_profile()
84 } else if (vf == PORT_SELF_VF) { in enic_set_port_profile()
88 "for VF %d\n", vf); in enic_set_port_profile()
120 ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic, vnic_dev_init_prov2, (u8 *)vp, in enic_set_port_profile()
130 static int enic_unset_port_profile(struct enic *enic, int vf) in enic_unset_port_profile() argument
[all …]
/linux/drivers/net/ethernet/broadcom/bnxt/
H A Dbnxt_sriov.c29 struct bnxt_vf_info *vf, u16 event_id) in bnxt_hwrm_fwd_async_event_cmpl() argument
39 if (vf) in bnxt_hwrm_fwd_async_event_cmpl()
40 req->encap_async_event_target_id = cpu_to_le16(vf->fw_fid); in bnxt_hwrm_fwd_async_event_cmpl()
75 struct bnxt_vf_info *vf; in bnxt_set_vf_spoofchk() local
86 vf = &bp->pf.vf[vf_id]; in bnxt_set_vf_spoofchk()
87 if (vf->flags & BNXT_VF_SPOOFCHK) in bnxt_set_vf_spoofchk()
101 req->fid = cpu_to_le16(vf->fw_fid); in bnxt_set_vf_spoofchk()
106 vf->flags |= BNXT_VF_SPOOFCHK; in bnxt_set_vf_spoofchk()
108 vf->flags &= ~BNXT_VF_SPOOFCHK; in bnxt_set_vf_spoofchk()
114 static int bnxt_hwrm_func_qcfg_flags(struct bnxt *bp, struct bnxt_vf_info *vf) in bnxt_hwrm_func_qcfg_flags() argument
[all …]
/linux/drivers/net/ethernet/netronome/nfp/
H A Dnfp_net_sriov.c18 nfp_net_sriov_check(struct nfp_app *app, int vf, u16 cap, const char *msg, bool warn) in nfp_net_sriov_check() argument
32 if (vf < 0 || vf >= app->pf->num_vfs) { in nfp_net_sriov_check()
34 nfp_warn(app->pf->cpp, "invalid VF id %d\n", vf); in nfp_net_sriov_check()
42 nfp_net_sriov_update(struct nfp_app *app, int vf, u16 update, const char *msg) in nfp_net_sriov_update() argument
48 writeb(vf, app->pf->vfcfg_tbl2 + NFP_NET_VF_CFG_MB_VF_NUM); in nfp_net_sriov_update()
64 int nfp_app_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) in nfp_app_set_vf_mac() argument
70 err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_MAC, "mac", true); in nfp_app_set_vf_mac()
77 mac, vf); in nfp_app_set_vf_mac()
82 vf_offset = NFP_NET_VF_CFG_MB_SZ + vf * NFP_NET_VF_CFG_SZ; in nfp_app_set_vf_mac()
87 err = nfp_net_sriov_update(app, vf, NFP_NET_VF_CFG_MB_UPD_MAC, "MAC"); in nfp_app_set_vf_mac()
[all …]
/linux/drivers/crypto/intel/qat/qat_common/
H A Dadf_pfvf_pf_msg.c15 struct adf_accel_vf_info *vf; in adf_pf2vf_notify_restarting() local
20 for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++) { in adf_pf2vf_notify_restarting()
21 if (vf->init && vf->vf_compat_ver >= ADF_PFVF_COMPAT_FALLBACK) in adf_pf2vf_notify_restarting()
22 vf->restarting = true; in adf_pf2vf_notify_restarting()
24 vf->restarting = false; in adf_pf2vf_notify_restarting()
26 if (!vf->init) in adf_pf2vf_notify_restarting()
39 struct adf_accel_vf_info *vf; in adf_pf2vf_wait_for_restarting_complete() local
45 for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++) in adf_pf2vf_wait_for_restarting_complete()
46 if (vf->restarting) in adf_pf2vf_wait_for_restarting_complete()
61 struct adf_accel_vf_info *vf; in adf_pf2vf_notify_restarted() local
[all …]
/linux/drivers/net/ethernet/mellanox/mlx5/core/
H A Dsriov.c40 static int sriov_restore_guids(struct mlx5_core_dev *dev, int vf, u16 func_id) in sriov_restore_guids() argument
47 if (sriov->vfs_ctx[vf].node_guid || in sriov_restore_guids()
48 sriov->vfs_ctx[vf].port_guid || in sriov_restore_guids()
49 sriov->vfs_ctx[vf].policy != MLX5_POLICY_INVALID) { in sriov_restore_guids()
54 in->node_guid = sriov->vfs_ctx[vf].node_guid; in sriov_restore_guids()
55 in->port_guid = sriov->vfs_ctx[vf].port_guid; in sriov_restore_guids()
56 in->policy = sriov->vfs_ctx[vf].policy; in sriov_restore_guids()
64 mlx5_core_warn(dev, "modify vport context failed, unable to restore VF %d settings\n", vf); in sriov_restore_guids()
75 int err, vf, num_msix_count; in mlx5_device_enable_sriov() local
86 for (vf = 0; vf < num_vfs; vf++) { in mlx5_device_enable_sriov()
[all …]
/linux/drivers/infiniband/hw/mlx5/
H A Dib_virt.c50 int mlx5_ib_get_vf_config(struct ib_device *device, int vf, u32 port, in mlx5_ib_get_vf_config() argument
62 err = mlx5_query_hca_vport_context(mdev, 1, 1, vf + 1, rep); in mlx5_ib_get_vf_config()
65 vf, err); in mlx5_ib_get_vf_config()
92 int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf, in mlx5_ib_set_vf_link_state() argument
111 err = mlx5_core_modify_hca_vport_context(mdev, 1, 1, vf + 1, in); in mlx5_ib_set_vf_link_state()
113 vfs_ctx[vf].policy = in->policy; in mlx5_ib_set_vf_link_state()
120 int mlx5_ib_get_vf_stats(struct ib_device *device, int vf, in mlx5_ib_get_vf_stats() argument
136 err = mlx5_core_query_vport_counter(mdev, true, vf, port, out); in mlx5_ib_get_vf_stats()
151 static int set_vf_node_guid(struct ib_device *device, int vf, u32 port, in set_vf_node_guid() argument
166 err = mlx5_core_modify_hca_vport_context(mdev, 1, 1, vf + 1, in); in set_vf_node_guid()
[all …]

1234567891011