1c0c050c5SMichael Chan /* Broadcom NetXtreme-C/E network driver. 2c0c050c5SMichael Chan * 311f15ed3SMichael Chan * Copyright (c) 2014-2016 Broadcom Corporation 4746df139SVasundhara Volam * Copyright (c) 2016-2018 Broadcom Limited 5c0c050c5SMichael Chan * 6c0c050c5SMichael Chan * This program is free software; you can redistribute it and/or modify 7c0c050c5SMichael Chan * it under the terms of the GNU General Public License as published by 8c0c050c5SMichael Chan * the Free Software Foundation. 9c0c050c5SMichael Chan */ 10c0c050c5SMichael Chan 11c0c050c5SMichael Chan #include <linux/module.h> 12c0c050c5SMichael Chan #include <linux/pci.h> 13c0c050c5SMichael Chan #include <linux/netdevice.h> 14c0c050c5SMichael Chan #include <linux/if_vlan.h> 15c0c050c5SMichael Chan #include <linux/interrupt.h> 16c0c050c5SMichael Chan #include <linux/etherdevice.h> 17c0c050c5SMichael Chan #include "bnxt_hsi.h" 18c0c050c5SMichael Chan #include "bnxt.h" 192f593846SMichael Chan #include "bnxt_ulp.h" 20c0c050c5SMichael Chan #include "bnxt_sriov.h" 214ab0c6a8SSathya Perla #include "bnxt_vfr.h" 22c0c050c5SMichael Chan #include "bnxt_ethtool.h" 23c0c050c5SMichael Chan 24c0c050c5SMichael Chan #ifdef CONFIG_BNXT_SRIOV 25350a7149SEddie Wai static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt *bp, 26350a7149SEddie Wai struct bnxt_vf_info *vf, u16 event_id) 27350a7149SEddie Wai { 28350a7149SEddie Wai struct hwrm_fwd_async_event_cmpl_input req = {0}; 29350a7149SEddie Wai struct hwrm_async_event_cmpl *async_cmpl; 30350a7149SEddie Wai int rc = 0; 31350a7149SEddie Wai 32350a7149SEddie Wai bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_ASYNC_EVENT_CMPL, -1, -1); 33350a7149SEddie Wai if (vf) 34350a7149SEddie Wai req.encap_async_event_target_id = cpu_to_le16(vf->fw_fid); 35350a7149SEddie Wai else 36350a7149SEddie Wai /* broadcast this async event to all VFs */ 37350a7149SEddie Wai req.encap_async_event_target_id = cpu_to_le16(0xffff); 38350a7149SEddie Wai async_cmpl = (struct hwrm_async_event_cmpl *)req.encap_async_event_cmpl; 3987c374deSMichael Chan async_cmpl->type = cpu_to_le16(ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT); 40350a7149SEddie Wai async_cmpl->event_id = cpu_to_le16(event_id); 41350a7149SEddie Wai 42a798302dSMichael Chan rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 43a798302dSMichael Chan if (rc) 44350a7149SEddie Wai netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl failed. rc:%d\n", 45350a7149SEddie Wai rc); 46350a7149SEddie Wai return rc; 47350a7149SEddie Wai } 48350a7149SEddie Wai 49c0c050c5SMichael Chan static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id) 50c0c050c5SMichael Chan { 51caefe526SMichael Chan if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { 52c0c050c5SMichael Chan netdev_err(bp->dev, "vf ndo called though PF is down\n"); 53c0c050c5SMichael Chan return -EINVAL; 54c0c050c5SMichael Chan } 55c0c050c5SMichael Chan if (!bp->pf.active_vfs) { 56c0c050c5SMichael Chan netdev_err(bp->dev, "vf ndo called though sriov is disabled\n"); 57c0c050c5SMichael Chan return -EINVAL; 58c0c050c5SMichael Chan } 5978f30004SVenkat Duvvuru if (vf_id >= bp->pf.active_vfs) { 60c0c050c5SMichael Chan netdev_err(bp->dev, "Invalid VF id %d\n", vf_id); 61c0c050c5SMichael Chan return -EINVAL; 62c0c050c5SMichael Chan } 63c0c050c5SMichael Chan return 0; 64c0c050c5SMichael Chan } 65c0c050c5SMichael Chan 66c0c050c5SMichael Chan int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting) 67c0c050c5SMichael Chan { 68c0c050c5SMichael Chan struct hwrm_func_cfg_input req = {0}; 69c0c050c5SMichael Chan struct bnxt *bp = netdev_priv(dev); 70c0c050c5SMichael Chan struct bnxt_vf_info *vf; 71c0c050c5SMichael Chan bool old_setting = false; 72c0c050c5SMichael Chan u32 func_flags; 73c0c050c5SMichael Chan int rc; 74c0c050c5SMichael Chan 758eb992e8SMichael Chan if (bp->hwrm_spec_code < 0x10701) 768eb992e8SMichael Chan return -ENOTSUPP; 778eb992e8SMichael Chan 78c0c050c5SMichael Chan rc = bnxt_vf_ndo_prep(bp, vf_id); 79c0c050c5SMichael Chan if (rc) 80c0c050c5SMichael Chan return rc; 81c0c050c5SMichael Chan 82c0c050c5SMichael Chan vf = &bp->pf.vf[vf_id]; 83c0c050c5SMichael Chan if (vf->flags & BNXT_VF_SPOOFCHK) 84c0c050c5SMichael Chan old_setting = true; 85c0c050c5SMichael Chan if (old_setting == setting) 86c0c050c5SMichael Chan return 0; 87c0c050c5SMichael Chan 88c0c050c5SMichael Chan if (setting) 89c71c4e49SMichael Chan func_flags = FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE; 90c0c050c5SMichael Chan else 91c71c4e49SMichael Chan func_flags = FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE; 92c0c050c5SMichael Chan /*TODO: if the driver supports VLAN filter on guest VLAN, 93c0c050c5SMichael Chan * the spoof check should also include vlan anti-spoofing 94c0c050c5SMichael Chan */ 95c0c050c5SMichael Chan bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 96c193554eSMichael Chan req.fid = cpu_to_le16(vf->fw_fid); 97c0c050c5SMichael Chan req.flags = cpu_to_le32(func_flags); 98c0c050c5SMichael Chan rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 99c0c050c5SMichael Chan if (!rc) { 100c0c050c5SMichael Chan if (setting) 101c0c050c5SMichael Chan vf->flags |= BNXT_VF_SPOOFCHK; 102c0c050c5SMichael Chan else 103c0c050c5SMichael Chan vf->flags &= ~BNXT_VF_SPOOFCHK; 104c0c050c5SMichael Chan } 105c0c050c5SMichael Chan return rc; 106c0c050c5SMichael Chan } 107c0c050c5SMichael Chan 1082a516444SMichael Chan static int bnxt_hwrm_func_qcfg_flags(struct bnxt *bp, struct bnxt_vf_info *vf) 1092a516444SMichael Chan { 1102a516444SMichael Chan struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; 1112a516444SMichael Chan struct hwrm_func_qcfg_input req = {0}; 1122a516444SMichael Chan int rc; 1132a516444SMichael Chan 1142a516444SMichael Chan bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1); 1152a516444SMichael Chan req.fid = cpu_to_le16(vf->fw_fid); 1162a516444SMichael Chan mutex_lock(&bp->hwrm_cmd_lock); 1172a516444SMichael Chan rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 1182a516444SMichael Chan if (rc) { 1192a516444SMichael Chan mutex_unlock(&bp->hwrm_cmd_lock); 120d4f1420dSMichael Chan return rc; 1212a516444SMichael Chan } 1222a516444SMichael Chan vf->func_qcfg_flags = le16_to_cpu(resp->flags); 1232a516444SMichael Chan mutex_unlock(&bp->hwrm_cmd_lock); 1242a516444SMichael Chan return 0; 1252a516444SMichael Chan } 1262a516444SMichael Chan 1272a516444SMichael Chan static bool bnxt_is_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf) 1282a516444SMichael Chan { 1292a516444SMichael Chan if (!(bp->fw_cap & BNXT_FW_CAP_TRUSTED_VF)) 1302a516444SMichael Chan return !!(vf->flags & BNXT_VF_TRUST); 1312a516444SMichael Chan 1322a516444SMichael Chan bnxt_hwrm_func_qcfg_flags(bp, vf); 1332a516444SMichael Chan return !!(vf->func_qcfg_flags & FUNC_QCFG_RESP_FLAGS_TRUSTED_VF); 1342a516444SMichael Chan } 1352a516444SMichael Chan 1362a516444SMichael Chan static int bnxt_hwrm_set_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf) 1372a516444SMichael Chan { 1382a516444SMichael Chan struct hwrm_func_cfg_input req = {0}; 1392a516444SMichael Chan 1402a516444SMichael Chan if (!(bp->fw_cap & BNXT_FW_CAP_TRUSTED_VF)) 1412a516444SMichael Chan return 0; 1422a516444SMichael Chan 1432a516444SMichael Chan bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 1442a516444SMichael Chan req.fid = cpu_to_le16(vf->fw_fid); 1452a516444SMichael Chan if (vf->flags & BNXT_VF_TRUST) 1462a516444SMichael Chan req.flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE); 1472a516444SMichael Chan else 1482a516444SMichael Chan req.flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_DISABLE); 1499f90445cSVasundhara Volam return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 1502a516444SMichael Chan } 1512a516444SMichael Chan 152746df139SVasundhara Volam int bnxt_set_vf_trust(struct net_device *dev, int vf_id, bool trusted) 153746df139SVasundhara Volam { 154746df139SVasundhara Volam struct bnxt *bp = netdev_priv(dev); 155746df139SVasundhara Volam struct bnxt_vf_info *vf; 156746df139SVasundhara Volam 157746df139SVasundhara Volam if (bnxt_vf_ndo_prep(bp, vf_id)) 158746df139SVasundhara Volam return -EINVAL; 159746df139SVasundhara Volam 160746df139SVasundhara Volam vf = &bp->pf.vf[vf_id]; 161746df139SVasundhara Volam if (trusted) 162746df139SVasundhara Volam vf->flags |= BNXT_VF_TRUST; 163746df139SVasundhara Volam else 164746df139SVasundhara Volam vf->flags &= ~BNXT_VF_TRUST; 165746df139SVasundhara Volam 1662a516444SMichael Chan bnxt_hwrm_set_trusted_vf(bp, vf); 167746df139SVasundhara Volam return 0; 168746df139SVasundhara Volam } 169746df139SVasundhara Volam 170c0c050c5SMichael Chan int bnxt_get_vf_config(struct net_device *dev, int vf_id, 171c0c050c5SMichael Chan struct ifla_vf_info *ivi) 172c0c050c5SMichael Chan { 173c0c050c5SMichael Chan struct bnxt *bp = netdev_priv(dev); 174c0c050c5SMichael Chan struct bnxt_vf_info *vf; 175c0c050c5SMichael Chan int rc; 176c0c050c5SMichael Chan 177c0c050c5SMichael Chan rc = bnxt_vf_ndo_prep(bp, vf_id); 178c0c050c5SMichael Chan if (rc) 179c0c050c5SMichael Chan return rc; 180c0c050c5SMichael Chan 181c0c050c5SMichael Chan ivi->vf = vf_id; 182c0c050c5SMichael Chan vf = &bp->pf.vf[vf_id]; 183c0c050c5SMichael Chan 18491cdda40SVasundhara Volam if (is_valid_ether_addr(vf->mac_addr)) 185c0c050c5SMichael Chan memcpy(&ivi->mac, vf->mac_addr, ETH_ALEN); 18691cdda40SVasundhara Volam else 18791cdda40SVasundhara Volam memcpy(&ivi->mac, vf->vf_mac_addr, ETH_ALEN); 188c0c050c5SMichael Chan ivi->max_tx_rate = vf->max_tx_rate; 189c0c050c5SMichael Chan ivi->min_tx_rate = vf->min_tx_rate; 190c0c050c5SMichael Chan ivi->vlan = vf->vlan; 191f0249056SMichael Chan if (vf->flags & BNXT_VF_QOS) 192f0249056SMichael Chan ivi->qos = vf->vlan >> VLAN_PRIO_SHIFT; 193f0249056SMichael Chan else 194f0249056SMichael Chan ivi->qos = 0; 195f0249056SMichael Chan ivi->spoofchk = !!(vf->flags & BNXT_VF_SPOOFCHK); 1962a516444SMichael Chan ivi->trusted = bnxt_is_trusted_vf(bp, vf); 197c0c050c5SMichael Chan if (!(vf->flags & BNXT_VF_LINK_FORCED)) 198c0c050c5SMichael Chan ivi->linkstate = IFLA_VF_LINK_STATE_AUTO; 199c0c050c5SMichael Chan else if (vf->flags & BNXT_VF_LINK_UP) 200c0c050c5SMichael Chan ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE; 201c0c050c5SMichael Chan else 202c0c050c5SMichael Chan ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE; 203c0c050c5SMichael Chan 204c0c050c5SMichael Chan return 0; 205c0c050c5SMichael Chan } 206c0c050c5SMichael Chan 207c0c050c5SMichael Chan int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac) 208c0c050c5SMichael Chan { 209c0c050c5SMichael Chan struct hwrm_func_cfg_input req = {0}; 210c0c050c5SMichael Chan struct bnxt *bp = netdev_priv(dev); 211c0c050c5SMichael Chan struct bnxt_vf_info *vf; 212c0c050c5SMichael Chan int rc; 213c0c050c5SMichael Chan 214c0c050c5SMichael Chan rc = bnxt_vf_ndo_prep(bp, vf_id); 215c0c050c5SMichael Chan if (rc) 216c0c050c5SMichael Chan return rc; 217c0c050c5SMichael Chan /* reject bc or mc mac addr, zero mac addr means allow 218c0c050c5SMichael Chan * VF to use its own mac addr 219c0c050c5SMichael Chan */ 220c0c050c5SMichael Chan if (is_multicast_ether_addr(mac)) { 221c0c050c5SMichael Chan netdev_err(dev, "Invalid VF ethernet address\n"); 222c0c050c5SMichael Chan return -EINVAL; 223c0c050c5SMichael Chan } 224c0c050c5SMichael Chan vf = &bp->pf.vf[vf_id]; 225c0c050c5SMichael Chan 226c0c050c5SMichael Chan memcpy(vf->mac_addr, mac, ETH_ALEN); 227c0c050c5SMichael Chan bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 228c193554eSMichael Chan req.fid = cpu_to_le16(vf->fw_fid); 229c0c050c5SMichael Chan req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR); 230c0c050c5SMichael Chan memcpy(req.dflt_mac_addr, mac, ETH_ALEN); 231c0c050c5SMichael Chan return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 232c0c050c5SMichael Chan } 233c0c050c5SMichael Chan 23479aab093SMoshe Shemesh int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos, 23579aab093SMoshe Shemesh __be16 vlan_proto) 236c0c050c5SMichael Chan { 237c0c050c5SMichael Chan struct hwrm_func_cfg_input req = {0}; 238c0c050c5SMichael Chan struct bnxt *bp = netdev_priv(dev); 239c0c050c5SMichael Chan struct bnxt_vf_info *vf; 240c0c050c5SMichael Chan u16 vlan_tag; 241c0c050c5SMichael Chan int rc; 242c0c050c5SMichael Chan 243cf6645f8SMichael Chan if (bp->hwrm_spec_code < 0x10201) 244cf6645f8SMichael Chan return -ENOTSUPP; 245cf6645f8SMichael Chan 24679aab093SMoshe Shemesh if (vlan_proto != htons(ETH_P_8021Q)) 24779aab093SMoshe Shemesh return -EPROTONOSUPPORT; 24879aab093SMoshe Shemesh 249c0c050c5SMichael Chan rc = bnxt_vf_ndo_prep(bp, vf_id); 250c0c050c5SMichael Chan if (rc) 251c0c050c5SMichael Chan return rc; 252c0c050c5SMichael Chan 253c0c050c5SMichael Chan /* TODO: needed to implement proper handling of user priority, 254c0c050c5SMichael Chan * currently fail the command if there is valid priority 255c0c050c5SMichael Chan */ 256c0c050c5SMichael Chan if (vlan_id > 4095 || qos) 257c0c050c5SMichael Chan return -EINVAL; 258c0c050c5SMichael Chan 259c0c050c5SMichael Chan vf = &bp->pf.vf[vf_id]; 260c0c050c5SMichael Chan vlan_tag = vlan_id; 261c0c050c5SMichael Chan if (vlan_tag == vf->vlan) 262c0c050c5SMichael Chan return 0; 263c0c050c5SMichael Chan 264c0c050c5SMichael Chan bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 265c193554eSMichael Chan req.fid = cpu_to_le16(vf->fw_fid); 266c0c050c5SMichael Chan req.dflt_vlan = cpu_to_le16(vlan_tag); 267c0c050c5SMichael Chan req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN); 268c0c050c5SMichael Chan rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 269c0c050c5SMichael Chan if (!rc) 270c0c050c5SMichael Chan vf->vlan = vlan_tag; 271c0c050c5SMichael Chan return rc; 272c0c050c5SMichael Chan } 273c0c050c5SMichael Chan 274c0c050c5SMichael Chan int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate, 275c0c050c5SMichael Chan int max_tx_rate) 276c0c050c5SMichael Chan { 277c0c050c5SMichael Chan struct hwrm_func_cfg_input req = {0}; 278c0c050c5SMichael Chan struct bnxt *bp = netdev_priv(dev); 279c0c050c5SMichael Chan struct bnxt_vf_info *vf; 280c0c050c5SMichael Chan u32 pf_link_speed; 281c0c050c5SMichael Chan int rc; 282c0c050c5SMichael Chan 283c0c050c5SMichael Chan rc = bnxt_vf_ndo_prep(bp, vf_id); 284c0c050c5SMichael Chan if (rc) 285c0c050c5SMichael Chan return rc; 286c0c050c5SMichael Chan 287c0c050c5SMichael Chan vf = &bp->pf.vf[vf_id]; 288c0c050c5SMichael Chan pf_link_speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed); 289c0c050c5SMichael Chan if (max_tx_rate > pf_link_speed) { 290c0c050c5SMichael Chan netdev_info(bp->dev, "max tx rate %d exceed PF link speed for VF %d\n", 291c0c050c5SMichael Chan max_tx_rate, vf_id); 292c0c050c5SMichael Chan return -EINVAL; 293c0c050c5SMichael Chan } 294c0c050c5SMichael Chan 295c0c050c5SMichael Chan if (min_tx_rate > pf_link_speed || min_tx_rate > max_tx_rate) { 296c0c050c5SMichael Chan netdev_info(bp->dev, "min tx rate %d is invalid for VF %d\n", 297c0c050c5SMichael Chan min_tx_rate, vf_id); 298c0c050c5SMichael Chan return -EINVAL; 299c0c050c5SMichael Chan } 300c0c050c5SMichael Chan if (min_tx_rate == vf->min_tx_rate && max_tx_rate == vf->max_tx_rate) 301c0c050c5SMichael Chan return 0; 302c0c050c5SMichael Chan bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 303c193554eSMichael Chan req.fid = cpu_to_le16(vf->fw_fid); 304c0c050c5SMichael Chan req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW); 305c0c050c5SMichael Chan req.max_bw = cpu_to_le32(max_tx_rate); 306c0c050c5SMichael Chan req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW); 307c0c050c5SMichael Chan req.min_bw = cpu_to_le32(min_tx_rate); 308c0c050c5SMichael Chan rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 309c0c050c5SMichael Chan if (!rc) { 310c0c050c5SMichael Chan vf->min_tx_rate = min_tx_rate; 311c0c050c5SMichael Chan vf->max_tx_rate = max_tx_rate; 312c0c050c5SMichael Chan } 313c0c050c5SMichael Chan return rc; 314c0c050c5SMichael Chan } 315c0c050c5SMichael Chan 316c0c050c5SMichael Chan int bnxt_set_vf_link_state(struct net_device *dev, int vf_id, int link) 317c0c050c5SMichael Chan { 318c0c050c5SMichael Chan struct bnxt *bp = netdev_priv(dev); 319c0c050c5SMichael Chan struct bnxt_vf_info *vf; 320c0c050c5SMichael Chan int rc; 321c0c050c5SMichael Chan 322c0c050c5SMichael Chan rc = bnxt_vf_ndo_prep(bp, vf_id); 323c0c050c5SMichael Chan if (rc) 324c0c050c5SMichael Chan return rc; 325c0c050c5SMichael Chan 326c0c050c5SMichael Chan vf = &bp->pf.vf[vf_id]; 327c0c050c5SMichael Chan 328c0c050c5SMichael Chan vf->flags &= ~(BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED); 329c0c050c5SMichael Chan switch (link) { 330c0c050c5SMichael Chan case IFLA_VF_LINK_STATE_AUTO: 331c0c050c5SMichael Chan vf->flags |= BNXT_VF_LINK_UP; 332c0c050c5SMichael Chan break; 333c0c050c5SMichael Chan case IFLA_VF_LINK_STATE_DISABLE: 334c0c050c5SMichael Chan vf->flags |= BNXT_VF_LINK_FORCED; 335c0c050c5SMichael Chan break; 336c0c050c5SMichael Chan case IFLA_VF_LINK_STATE_ENABLE: 337c0c050c5SMichael Chan vf->flags |= BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED; 338c0c050c5SMichael Chan break; 339c0c050c5SMichael Chan default: 340c0c050c5SMichael Chan netdev_err(bp->dev, "Invalid link option\n"); 341c0c050c5SMichael Chan rc = -EINVAL; 342c0c050c5SMichael Chan break; 343c0c050c5SMichael Chan } 344350a7149SEddie Wai if (vf->flags & (BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED)) 345350a7149SEddie Wai rc = bnxt_hwrm_fwd_async_event_cmpl(bp, vf, 34687c374deSMichael Chan ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE); 347c0c050c5SMichael Chan return rc; 348c0c050c5SMichael Chan } 349c0c050c5SMichael Chan 350c0c050c5SMichael Chan static int bnxt_set_vf_attr(struct bnxt *bp, int num_vfs) 351c0c050c5SMichael Chan { 352c0c050c5SMichael Chan int i; 353c0c050c5SMichael Chan struct bnxt_vf_info *vf; 354c0c050c5SMichael Chan 355c0c050c5SMichael Chan for (i = 0; i < num_vfs; i++) { 356c0c050c5SMichael Chan vf = &bp->pf.vf[i]; 357c0c050c5SMichael Chan memset(vf, 0, sizeof(*vf)); 358c0c050c5SMichael Chan } 359c0c050c5SMichael Chan return 0; 360c0c050c5SMichael Chan } 361c0c050c5SMichael Chan 3624bb6cdceSJeffrey Huang static int bnxt_hwrm_func_vf_resource_free(struct bnxt *bp, int num_vfs) 363c0c050c5SMichael Chan { 364c0c050c5SMichael Chan int i, rc = 0; 365c0c050c5SMichael Chan struct bnxt_pf_info *pf = &bp->pf; 366c0c050c5SMichael Chan struct hwrm_func_vf_resc_free_input req = {0}; 367c0c050c5SMichael Chan 368c0c050c5SMichael Chan bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESC_FREE, -1, -1); 369c0c050c5SMichael Chan 370c0c050c5SMichael Chan mutex_lock(&bp->hwrm_cmd_lock); 3714bb6cdceSJeffrey Huang for (i = pf->first_vf_id; i < pf->first_vf_id + num_vfs; i++) { 372c0c050c5SMichael Chan req.vf_id = cpu_to_le16(i); 373c0c050c5SMichael Chan rc = _hwrm_send_message(bp, &req, sizeof(req), 374c0c050c5SMichael Chan HWRM_CMD_TIMEOUT); 375c0c050c5SMichael Chan if (rc) 376c0c050c5SMichael Chan break; 377c0c050c5SMichael Chan } 378c0c050c5SMichael Chan mutex_unlock(&bp->hwrm_cmd_lock); 379c0c050c5SMichael Chan return rc; 380c0c050c5SMichael Chan } 381c0c050c5SMichael Chan 382c0c050c5SMichael Chan static void bnxt_free_vf_resources(struct bnxt *bp) 383c0c050c5SMichael Chan { 384c0c050c5SMichael Chan struct pci_dev *pdev = bp->pdev; 385c0c050c5SMichael Chan int i; 386c0c050c5SMichael Chan 387c0c050c5SMichael Chan kfree(bp->pf.vf_event_bmap); 388c0c050c5SMichael Chan bp->pf.vf_event_bmap = NULL; 389c0c050c5SMichael Chan 390c0c050c5SMichael Chan for (i = 0; i < 4; i++) { 391c0c050c5SMichael Chan if (bp->pf.hwrm_cmd_req_addr[i]) { 392c0c050c5SMichael Chan dma_free_coherent(&pdev->dev, BNXT_PAGE_SIZE, 393c0c050c5SMichael Chan bp->pf.hwrm_cmd_req_addr[i], 394c0c050c5SMichael Chan bp->pf.hwrm_cmd_req_dma_addr[i]); 395c0c050c5SMichael Chan bp->pf.hwrm_cmd_req_addr[i] = NULL; 396c0c050c5SMichael Chan } 397c0c050c5SMichael Chan } 398c0c050c5SMichael Chan 399c8b1d743SDavide Caratti bp->pf.active_vfs = 0; 400c0c050c5SMichael Chan kfree(bp->pf.vf); 401c0c050c5SMichael Chan bp->pf.vf = NULL; 402c0c050c5SMichael Chan } 403c0c050c5SMichael Chan 404c0c050c5SMichael Chan static int bnxt_alloc_vf_resources(struct bnxt *bp, int num_vfs) 405c0c050c5SMichael Chan { 406c0c050c5SMichael Chan struct pci_dev *pdev = bp->pdev; 407c0c050c5SMichael Chan u32 nr_pages, size, i, j, k = 0; 408c0c050c5SMichael Chan 409c0c050c5SMichael Chan bp->pf.vf = kcalloc(num_vfs, sizeof(struct bnxt_vf_info), GFP_KERNEL); 410c0c050c5SMichael Chan if (!bp->pf.vf) 411c0c050c5SMichael Chan return -ENOMEM; 412c0c050c5SMichael Chan 413c0c050c5SMichael Chan bnxt_set_vf_attr(bp, num_vfs); 414c0c050c5SMichael Chan 415c0c050c5SMichael Chan size = num_vfs * BNXT_HWRM_REQ_MAX_SIZE; 416c0c050c5SMichael Chan nr_pages = size / BNXT_PAGE_SIZE; 417c0c050c5SMichael Chan if (size & (BNXT_PAGE_SIZE - 1)) 418c0c050c5SMichael Chan nr_pages++; 419c0c050c5SMichael Chan 420c0c050c5SMichael Chan for (i = 0; i < nr_pages; i++) { 421c0c050c5SMichael Chan bp->pf.hwrm_cmd_req_addr[i] = 422c0c050c5SMichael Chan dma_alloc_coherent(&pdev->dev, BNXT_PAGE_SIZE, 423c0c050c5SMichael Chan &bp->pf.hwrm_cmd_req_dma_addr[i], 424c0c050c5SMichael Chan GFP_KERNEL); 425c0c050c5SMichael Chan 426c0c050c5SMichael Chan if (!bp->pf.hwrm_cmd_req_addr[i]) 427c0c050c5SMichael Chan return -ENOMEM; 428c0c050c5SMichael Chan 429c0c050c5SMichael Chan for (j = 0; j < BNXT_HWRM_REQS_PER_PAGE && k < num_vfs; j++) { 430c0c050c5SMichael Chan struct bnxt_vf_info *vf = &bp->pf.vf[k]; 431c0c050c5SMichael Chan 432c0c050c5SMichael Chan vf->hwrm_cmd_req_addr = bp->pf.hwrm_cmd_req_addr[i] + 433c0c050c5SMichael Chan j * BNXT_HWRM_REQ_MAX_SIZE; 434c0c050c5SMichael Chan vf->hwrm_cmd_req_dma_addr = 435c0c050c5SMichael Chan bp->pf.hwrm_cmd_req_dma_addr[i] + j * 436c0c050c5SMichael Chan BNXT_HWRM_REQ_MAX_SIZE; 437c0c050c5SMichael Chan k++; 438c0c050c5SMichael Chan } 439c0c050c5SMichael Chan } 440c0c050c5SMichael Chan 441c0c050c5SMichael Chan /* Max 128 VF's */ 442c0c050c5SMichael Chan bp->pf.vf_event_bmap = kzalloc(16, GFP_KERNEL); 443c0c050c5SMichael Chan if (!bp->pf.vf_event_bmap) 444c0c050c5SMichael Chan return -ENOMEM; 445c0c050c5SMichael Chan 446c0c050c5SMichael Chan bp->pf.hwrm_cmd_req_pages = nr_pages; 447c0c050c5SMichael Chan return 0; 448c0c050c5SMichael Chan } 449c0c050c5SMichael Chan 450c0c050c5SMichael Chan static int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp) 451c0c050c5SMichael Chan { 452c0c050c5SMichael Chan struct hwrm_func_buf_rgtr_input req = {0}; 453c0c050c5SMichael Chan 454c0c050c5SMichael Chan bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BUF_RGTR, -1, -1); 455c0c050c5SMichael Chan 456c0c050c5SMichael Chan req.req_buf_num_pages = cpu_to_le16(bp->pf.hwrm_cmd_req_pages); 457c0c050c5SMichael Chan req.req_buf_page_size = cpu_to_le16(BNXT_PAGE_SHIFT); 458c0c050c5SMichael Chan req.req_buf_len = cpu_to_le16(BNXT_HWRM_REQ_MAX_SIZE); 459c0c050c5SMichael Chan req.req_buf_page_addr0 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[0]); 460c0c050c5SMichael Chan req.req_buf_page_addr1 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[1]); 461c0c050c5SMichael Chan req.req_buf_page_addr2 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[2]); 462c0c050c5SMichael Chan req.req_buf_page_addr3 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[3]); 463c0c050c5SMichael Chan 464c0c050c5SMichael Chan return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 465c0c050c5SMichael Chan } 466c0c050c5SMichael Chan 4672cd86968SVasundhara Volam /* Caller holds bp->hwrm_cmd_lock mutex lock */ 4682cd86968SVasundhara Volam static void __bnxt_set_vf_params(struct bnxt *bp, int vf_id) 4692cd86968SVasundhara Volam { 4702cd86968SVasundhara Volam struct hwrm_func_cfg_input req = {0}; 4712cd86968SVasundhara Volam struct bnxt_vf_info *vf; 4722cd86968SVasundhara Volam 4732cd86968SVasundhara Volam vf = &bp->pf.vf[vf_id]; 4742cd86968SVasundhara Volam bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 4752cd86968SVasundhara Volam req.fid = cpu_to_le16(vf->fw_fid); 4762cd86968SVasundhara Volam 4772cd86968SVasundhara Volam if (is_valid_ether_addr(vf->mac_addr)) { 4782cd86968SVasundhara Volam req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR); 4792cd86968SVasundhara Volam memcpy(req.dflt_mac_addr, vf->mac_addr, ETH_ALEN); 4802cd86968SVasundhara Volam } 4812cd86968SVasundhara Volam if (vf->vlan) { 4822cd86968SVasundhara Volam req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN); 4832cd86968SVasundhara Volam req.dflt_vlan = cpu_to_le16(vf->vlan); 4842cd86968SVasundhara Volam } 4852cd86968SVasundhara Volam if (vf->max_tx_rate) { 4862cd86968SVasundhara Volam req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW); 4872cd86968SVasundhara Volam req.max_bw = cpu_to_le32(vf->max_tx_rate); 4882cd86968SVasundhara Volam #ifdef HAVE_IFLA_TX_RATE 4892cd86968SVasundhara Volam req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW); 4902cd86968SVasundhara Volam req.min_bw = cpu_to_le32(vf->min_tx_rate); 4912cd86968SVasundhara Volam #endif 4922cd86968SVasundhara Volam } 4932cd86968SVasundhara Volam if (vf->flags & BNXT_VF_TRUST) 4942cd86968SVasundhara Volam req.flags |= cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE); 4952cd86968SVasundhara Volam 4962cd86968SVasundhara Volam _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4972cd86968SVasundhara Volam } 4982cd86968SVasundhara Volam 4994673d664SMichael Chan /* Only called by PF to reserve resources for VFs, returns actual number of 5004673d664SMichael Chan * VFs configured, or < 0 on error. 5014673d664SMichael Chan */ 5022cd86968SVasundhara Volam static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset) 5034673d664SMichael Chan { 5044673d664SMichael Chan struct hwrm_func_vf_resource_cfg_input req = {0}; 5054673d664SMichael Chan struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 5064673d664SMichael Chan u16 vf_tx_rings, vf_rx_rings, vf_cp_rings; 5074673d664SMichael Chan u16 vf_stat_ctx, vf_vnics, vf_ring_grps; 5084673d664SMichael Chan struct bnxt_pf_info *pf = &bp->pf; 509bf82736dSMichael Chan int i, rc = 0, min = 1; 510b16b6891SMichael Chan u16 vf_msix = 0; 5111acefc9aSMichael Chan u16 vf_rss; 5124673d664SMichael Chan 5134673d664SMichael Chan bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESOURCE_CFG, -1, -1); 5144673d664SMichael Chan 515b16b6891SMichael Chan if (bp->flags & BNXT_FLAG_CHIP_P5) { 516b16b6891SMichael Chan vf_msix = hw_resc->max_nqs - bnxt_nq_rings_in_use(bp); 517b16b6891SMichael Chan vf_ring_grps = 0; 518b16b6891SMichael Chan } else { 519b16b6891SMichael Chan vf_ring_grps = hw_resc->max_hw_ring_grps - bp->rx_nr_rings; 520b16b6891SMichael Chan } 521e916b081SMichael Chan vf_cp_rings = bnxt_get_avail_cp_rings_for_en(bp); 522c027c6b4SVasundhara Volam vf_stat_ctx = bnxt_get_avail_stat_ctxs_for_en(bp); 5234673d664SMichael Chan if (bp->flags & BNXT_FLAG_AGG_RINGS) 5244673d664SMichael Chan vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings * 2; 5254673d664SMichael Chan else 5264673d664SMichael Chan vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings; 5274673d664SMichael Chan vf_tx_rings = hw_resc->max_tx_rings - bp->tx_nr_rings; 5284673d664SMichael Chan vf_vnics = hw_resc->max_vnics - bp->nr_vnics; 5294673d664SMichael Chan vf_vnics = min_t(u16, vf_vnics, vf_rx_rings); 5301acefc9aSMichael Chan vf_rss = hw_resc->max_rsscos_ctxs - bp->rsscos_nr_ctxs; 5314673d664SMichael Chan 53286c3380dSMichael Chan req.min_rsscos_ctx = cpu_to_le16(BNXT_VF_MIN_RSS_CTX); 533bf82736dSMichael Chan if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) { 534bf82736dSMichael Chan min = 0; 535bf82736dSMichael Chan req.min_rsscos_ctx = cpu_to_le16(min); 536bf82736dSMichael Chan } 537bf82736dSMichael Chan if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL || 538bf82736dSMichael Chan pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) { 539bf82736dSMichael Chan req.min_cmpl_rings = cpu_to_le16(min); 540bf82736dSMichael Chan req.min_tx_rings = cpu_to_le16(min); 541bf82736dSMichael Chan req.min_rx_rings = cpu_to_le16(min); 542bf82736dSMichael Chan req.min_l2_ctxs = cpu_to_le16(min); 543bf82736dSMichael Chan req.min_vnics = cpu_to_le16(min); 544bf82736dSMichael Chan req.min_stat_ctx = cpu_to_le16(min); 545b16b6891SMichael Chan if (!(bp->flags & BNXT_FLAG_CHIP_P5)) 546bf82736dSMichael Chan req.min_hw_ring_grps = cpu_to_le16(min); 5474673d664SMichael Chan } else { 5484673d664SMichael Chan vf_cp_rings /= num_vfs; 5494673d664SMichael Chan vf_tx_rings /= num_vfs; 5504673d664SMichael Chan vf_rx_rings /= num_vfs; 5514673d664SMichael Chan vf_vnics /= num_vfs; 5524673d664SMichael Chan vf_stat_ctx /= num_vfs; 5534673d664SMichael Chan vf_ring_grps /= num_vfs; 5541acefc9aSMichael Chan vf_rss /= num_vfs; 5554673d664SMichael Chan 5564673d664SMichael Chan req.min_cmpl_rings = cpu_to_le16(vf_cp_rings); 5574673d664SMichael Chan req.min_tx_rings = cpu_to_le16(vf_tx_rings); 5584673d664SMichael Chan req.min_rx_rings = cpu_to_le16(vf_rx_rings); 55986c3380dSMichael Chan req.min_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX); 5604673d664SMichael Chan req.min_vnics = cpu_to_le16(vf_vnics); 5614673d664SMichael Chan req.min_stat_ctx = cpu_to_le16(vf_stat_ctx); 5624673d664SMichael Chan req.min_hw_ring_grps = cpu_to_le16(vf_ring_grps); 5631acefc9aSMichael Chan req.min_rsscos_ctx = cpu_to_le16(vf_rss); 5644673d664SMichael Chan } 5654673d664SMichael Chan req.max_cmpl_rings = cpu_to_le16(vf_cp_rings); 5664673d664SMichael Chan req.max_tx_rings = cpu_to_le16(vf_tx_rings); 5674673d664SMichael Chan req.max_rx_rings = cpu_to_le16(vf_rx_rings); 56886c3380dSMichael Chan req.max_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX); 5694673d664SMichael Chan req.max_vnics = cpu_to_le16(vf_vnics); 5704673d664SMichael Chan req.max_stat_ctx = cpu_to_le16(vf_stat_ctx); 5714673d664SMichael Chan req.max_hw_ring_grps = cpu_to_le16(vf_ring_grps); 5721acefc9aSMichael Chan req.max_rsscos_ctx = cpu_to_le16(vf_rss); 573b16b6891SMichael Chan if (bp->flags & BNXT_FLAG_CHIP_P5) 574b16b6891SMichael Chan req.max_msix = cpu_to_le16(vf_msix / num_vfs); 5754673d664SMichael Chan 5764673d664SMichael Chan mutex_lock(&bp->hwrm_cmd_lock); 5774673d664SMichael Chan for (i = 0; i < num_vfs; i++) { 5782cd86968SVasundhara Volam if (reset) 5792cd86968SVasundhara Volam __bnxt_set_vf_params(bp, i); 5802cd86968SVasundhara Volam 5814673d664SMichael Chan req.vf_id = cpu_to_le16(pf->first_vf_id + i); 5824673d664SMichael Chan rc = _hwrm_send_message(bp, &req, sizeof(req), 5834673d664SMichael Chan HWRM_CMD_TIMEOUT); 584d4f1420dSMichael Chan if (rc) 5854673d664SMichael Chan break; 5864673d664SMichael Chan pf->active_vfs = i + 1; 5874673d664SMichael Chan pf->vf[i].fw_fid = pf->first_vf_id + i; 5884673d664SMichael Chan } 5894673d664SMichael Chan mutex_unlock(&bp->hwrm_cmd_lock); 5904673d664SMichael Chan if (pf->active_vfs) { 591596f9d55SMichael Chan u16 n = pf->active_vfs; 5924673d664SMichael Chan 593596f9d55SMichael Chan hw_resc->max_tx_rings -= le16_to_cpu(req.min_tx_rings) * n; 594596f9d55SMichael Chan hw_resc->max_rx_rings -= le16_to_cpu(req.min_rx_rings) * n; 595596f9d55SMichael Chan hw_resc->max_hw_ring_grps -= le16_to_cpu(req.min_hw_ring_grps) * 596596f9d55SMichael Chan n; 597596f9d55SMichael Chan hw_resc->max_cp_rings -= le16_to_cpu(req.min_cmpl_rings) * n; 5981acefc9aSMichael Chan hw_resc->max_rsscos_ctxs -= le16_to_cpu(req.min_rsscos_ctx) * n; 599596f9d55SMichael Chan hw_resc->max_stat_ctxs -= le16_to_cpu(req.min_stat_ctx) * n; 600596f9d55SMichael Chan hw_resc->max_vnics -= le16_to_cpu(req.min_vnics) * n; 601b16b6891SMichael Chan if (bp->flags & BNXT_FLAG_CHIP_P5) 602b16b6891SMichael Chan hw_resc->max_irqs -= vf_msix * n; 6034673d664SMichael Chan 6044673d664SMichael Chan rc = pf->active_vfs; 6054673d664SMichael Chan } 6064673d664SMichael Chan return rc; 6074673d664SMichael Chan } 6084673d664SMichael Chan 6094673d664SMichael Chan /* Only called by PF to reserve resources for VFs, returns actual number of 6104673d664SMichael Chan * VFs configured, or < 0 on error. 6114673d664SMichael Chan */ 61292268c32SMichael Chan static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs) 613c0c050c5SMichael Chan { 614c0c050c5SMichael Chan u32 rc = 0, mtu, i; 615c0c050c5SMichael Chan u16 vf_tx_rings, vf_rx_rings, vf_cp_rings, vf_stat_ctx, vf_vnics; 6166a4f2947SMichael Chan struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 617c0c050c5SMichael Chan struct hwrm_func_cfg_input req = {0}; 618c0c050c5SMichael Chan struct bnxt_pf_info *pf = &bp->pf; 619391be5c2SMichael Chan int total_vf_tx_rings = 0; 620c027c6b4SVasundhara Volam u16 vf_ring_grps; 621c0c050c5SMichael Chan 622c0c050c5SMichael Chan bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 623c0c050c5SMichael Chan 624c0c050c5SMichael Chan /* Remaining rings are distributed equally amongs VF's for now */ 625e916b081SMichael Chan vf_cp_rings = bnxt_get_avail_cp_rings_for_en(bp) / num_vfs; 626c027c6b4SVasundhara Volam vf_stat_ctx = bnxt_get_avail_stat_ctxs_for_en(bp) / num_vfs; 627c0c050c5SMichael Chan if (bp->flags & BNXT_FLAG_AGG_RINGS) 6286a4f2947SMichael Chan vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings * 2) / 62992268c32SMichael Chan num_vfs; 630c0c050c5SMichael Chan else 6316a4f2947SMichael Chan vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings) / 6326a4f2947SMichael Chan num_vfs; 6336a4f2947SMichael Chan vf_ring_grps = (hw_resc->max_hw_ring_grps - bp->rx_nr_rings) / num_vfs; 6346a4f2947SMichael Chan vf_tx_rings = (hw_resc->max_tx_rings - bp->tx_nr_rings) / num_vfs; 6356a4f2947SMichael Chan vf_vnics = (hw_resc->max_vnics - bp->nr_vnics) / num_vfs; 6368427af81SMichael Chan vf_vnics = min_t(u16, vf_vnics, vf_rx_rings); 637c0c050c5SMichael Chan 638c0c050c5SMichael Chan req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MTU | 639c0c050c5SMichael Chan FUNC_CFG_REQ_ENABLES_MRU | 640c0c050c5SMichael Chan FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS | 641c0c050c5SMichael Chan FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS | 642c0c050c5SMichael Chan FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS | 643c0c050c5SMichael Chan FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS | 644c0c050c5SMichael Chan FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS | 645c0c050c5SMichael Chan FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS | 646b72d4a68SMichael Chan FUNC_CFG_REQ_ENABLES_NUM_VNICS | 647b72d4a68SMichael Chan FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS); 648c0c050c5SMichael Chan 649d0b82c54SVasundhara Volam mtu = bp->dev->mtu + ETH_HLEN + VLAN_HLEN; 650c0c050c5SMichael Chan req.mru = cpu_to_le16(mtu); 651c0c050c5SMichael Chan req.mtu = cpu_to_le16(mtu); 652c0c050c5SMichael Chan 653c0c050c5SMichael Chan req.num_rsscos_ctxs = cpu_to_le16(1); 654c0c050c5SMichael Chan req.num_cmpl_rings = cpu_to_le16(vf_cp_rings); 655c0c050c5SMichael Chan req.num_tx_rings = cpu_to_le16(vf_tx_rings); 656c0c050c5SMichael Chan req.num_rx_rings = cpu_to_le16(vf_rx_rings); 657b72d4a68SMichael Chan req.num_hw_ring_grps = cpu_to_le16(vf_ring_grps); 658c0c050c5SMichael Chan req.num_l2_ctxs = cpu_to_le16(4); 659c0c050c5SMichael Chan 660c0c050c5SMichael Chan req.num_vnics = cpu_to_le16(vf_vnics); 661c0c050c5SMichael Chan /* FIXME spec currently uses 1 bit for stats ctx */ 662c0c050c5SMichael Chan req.num_stat_ctxs = cpu_to_le16(vf_stat_ctx); 663c0c050c5SMichael Chan 664c0c050c5SMichael Chan mutex_lock(&bp->hwrm_cmd_lock); 66592268c32SMichael Chan for (i = 0; i < num_vfs; i++) { 666391be5c2SMichael Chan int vf_tx_rsvd = vf_tx_rings; 667391be5c2SMichael Chan 668c193554eSMichael Chan req.fid = cpu_to_le16(pf->first_vf_id + i); 669c0c050c5SMichael Chan rc = _hwrm_send_message(bp, &req, sizeof(req), 670c0c050c5SMichael Chan HWRM_CMD_TIMEOUT); 671c0c050c5SMichael Chan if (rc) 672c0c050c5SMichael Chan break; 67392268c32SMichael Chan pf->active_vfs = i + 1; 674c193554eSMichael Chan pf->vf[i].fw_fid = le16_to_cpu(req.fid); 675391be5c2SMichael Chan rc = __bnxt_hwrm_get_tx_rings(bp, pf->vf[i].fw_fid, 676391be5c2SMichael Chan &vf_tx_rsvd); 677391be5c2SMichael Chan if (rc) 678391be5c2SMichael Chan break; 679391be5c2SMichael Chan total_vf_tx_rings += vf_tx_rsvd; 680c0c050c5SMichael Chan } 681c0c050c5SMichael Chan mutex_unlock(&bp->hwrm_cmd_lock); 6824673d664SMichael Chan if (pf->active_vfs) { 6836a4f2947SMichael Chan hw_resc->max_tx_rings -= total_vf_tx_rings; 6846a4f2947SMichael Chan hw_resc->max_rx_rings -= vf_rx_rings * num_vfs; 6856a4f2947SMichael Chan hw_resc->max_hw_ring_grps -= vf_ring_grps * num_vfs; 6866a4f2947SMichael Chan hw_resc->max_cp_rings -= vf_cp_rings * num_vfs; 6876a4f2947SMichael Chan hw_resc->max_rsscos_ctxs -= num_vfs; 6886a4f2947SMichael Chan hw_resc->max_stat_ctxs -= vf_stat_ctx * num_vfs; 6896a4f2947SMichael Chan hw_resc->max_vnics -= vf_vnics * num_vfs; 6904673d664SMichael Chan rc = pf->active_vfs; 691c0c050c5SMichael Chan } 692c0c050c5SMichael Chan return rc; 693c0c050c5SMichael Chan } 694c0c050c5SMichael Chan 6952cd86968SVasundhara Volam static int bnxt_func_cfg(struct bnxt *bp, int num_vfs, bool reset) 6964673d664SMichael Chan { 697f1ca94deSMichael Chan if (BNXT_NEW_RM(bp)) 6982cd86968SVasundhara Volam return bnxt_hwrm_func_vf_resc_cfg(bp, num_vfs, reset); 6994673d664SMichael Chan else 7004673d664SMichael Chan return bnxt_hwrm_func_cfg(bp, num_vfs); 7014673d664SMichael Chan } 7024673d664SMichael Chan 7032cd86968SVasundhara Volam int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset) 704702d5011SMichael Chan { 705702d5011SMichael Chan int rc; 706702d5011SMichael Chan 70791b9be48SVasundhara Volam /* Register buffers for VFs */ 70891b9be48SVasundhara Volam rc = bnxt_hwrm_func_buf_rgtr(bp); 70991b9be48SVasundhara Volam if (rc) 71091b9be48SVasundhara Volam return rc; 71191b9be48SVasundhara Volam 712702d5011SMichael Chan /* Reserve resources for VFs */ 7132cd86968SVasundhara Volam rc = bnxt_func_cfg(bp, *num_vfs, reset); 714702d5011SMichael Chan if (rc != *num_vfs) { 715702d5011SMichael Chan if (rc <= 0) { 716702d5011SMichael Chan netdev_warn(bp->dev, "Unable to reserve resources for SRIOV.\n"); 717702d5011SMichael Chan *num_vfs = 0; 718702d5011SMichael Chan return rc; 719702d5011SMichael Chan } 720702d5011SMichael Chan netdev_warn(bp->dev, "Only able to reserve resources for %d VFs.\n", 721702d5011SMichael Chan rc); 722702d5011SMichael Chan *num_vfs = rc; 723702d5011SMichael Chan } 724702d5011SMichael Chan 725702d5011SMichael Chan bnxt_ulp_sriov_cfg(bp, *num_vfs); 726702d5011SMichael Chan return 0; 727702d5011SMichael Chan } 728702d5011SMichael Chan 729c0c050c5SMichael Chan static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs) 730c0c050c5SMichael Chan { 731c0c050c5SMichael Chan int rc = 0, vfs_supported; 732c0c050c5SMichael Chan int min_rx_rings, min_tx_rings, min_rss_ctxs; 7336a4f2947SMichael Chan struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 734c0c050c5SMichael Chan int tx_ok = 0, rx_ok = 0, rss_ok = 0; 73502157079SMichael Chan int avail_cp, avail_stat; 736c0c050c5SMichael Chan 737c0c050c5SMichael Chan /* Check if we can enable requested num of vf's. At a mininum 738c0c050c5SMichael Chan * we require 1 RX 1 TX rings for each VF. In this minimum conf 739c0c050c5SMichael Chan * features like TPA will not be available. 740c0c050c5SMichael Chan */ 741c0c050c5SMichael Chan vfs_supported = *num_vfs; 742c0c050c5SMichael Chan 743e916b081SMichael Chan avail_cp = bnxt_get_avail_cp_rings_for_en(bp); 744c027c6b4SVasundhara Volam avail_stat = bnxt_get_avail_stat_ctxs_for_en(bp); 74502157079SMichael Chan avail_cp = min_t(int, avail_cp, avail_stat); 74602157079SMichael Chan 747c0c050c5SMichael Chan while (vfs_supported) { 748c0c050c5SMichael Chan min_rx_rings = vfs_supported; 749c0c050c5SMichael Chan min_tx_rings = vfs_supported; 750c0c050c5SMichael Chan min_rss_ctxs = vfs_supported; 751c0c050c5SMichael Chan 752c0c050c5SMichael Chan if (bp->flags & BNXT_FLAG_AGG_RINGS) { 7536a4f2947SMichael Chan if (hw_resc->max_rx_rings - bp->rx_nr_rings * 2 >= 754c0c050c5SMichael Chan min_rx_rings) 755c0c050c5SMichael Chan rx_ok = 1; 756c0c050c5SMichael Chan } else { 7576a4f2947SMichael Chan if (hw_resc->max_rx_rings - bp->rx_nr_rings >= 758c0c050c5SMichael Chan min_rx_rings) 759c0c050c5SMichael Chan rx_ok = 1; 760c0c050c5SMichael Chan } 7616a4f2947SMichael Chan if (hw_resc->max_vnics - bp->nr_vnics < min_rx_rings || 76202157079SMichael Chan avail_cp < min_rx_rings) 7638427af81SMichael Chan rx_ok = 0; 764c0c050c5SMichael Chan 7656a4f2947SMichael Chan if (hw_resc->max_tx_rings - bp->tx_nr_rings >= min_tx_rings && 76602157079SMichael Chan avail_cp >= min_tx_rings) 767c0c050c5SMichael Chan tx_ok = 1; 768c0c050c5SMichael Chan 7696a4f2947SMichael Chan if (hw_resc->max_rsscos_ctxs - bp->rsscos_nr_ctxs >= 7706a4f2947SMichael Chan min_rss_ctxs) 771c0c050c5SMichael Chan rss_ok = 1; 772c0c050c5SMichael Chan 773c0c050c5SMichael Chan if (tx_ok && rx_ok && rss_ok) 774c0c050c5SMichael Chan break; 775c0c050c5SMichael Chan 776c0c050c5SMichael Chan vfs_supported--; 777c0c050c5SMichael Chan } 778c0c050c5SMichael Chan 779c0c050c5SMichael Chan if (!vfs_supported) { 780c0c050c5SMichael Chan netdev_err(bp->dev, "Cannot enable VF's as all resources are used by PF\n"); 781c0c050c5SMichael Chan return -EINVAL; 782c0c050c5SMichael Chan } 783c0c050c5SMichael Chan 784c0c050c5SMichael Chan if (vfs_supported != *num_vfs) { 785c0c050c5SMichael Chan netdev_info(bp->dev, "Requested VFs %d, can enable %d\n", 786c0c050c5SMichael Chan *num_vfs, vfs_supported); 787c0c050c5SMichael Chan *num_vfs = vfs_supported; 788c0c050c5SMichael Chan } 789c0c050c5SMichael Chan 790c0c050c5SMichael Chan rc = bnxt_alloc_vf_resources(bp, *num_vfs); 791c0c050c5SMichael Chan if (rc) 792c0c050c5SMichael Chan goto err_out1; 793c0c050c5SMichael Chan 7942cd86968SVasundhara Volam rc = bnxt_cfg_hw_sriov(bp, num_vfs, false); 795c0c050c5SMichael Chan if (rc) 796c0c050c5SMichael Chan goto err_out2; 797c0c050c5SMichael Chan 798c0c050c5SMichael Chan rc = pci_enable_sriov(bp->pdev, *num_vfs); 799c0c050c5SMichael Chan if (rc) 800c0c050c5SMichael Chan goto err_out2; 801c0c050c5SMichael Chan 802c0c050c5SMichael Chan return 0; 803c0c050c5SMichael Chan 804c0c050c5SMichael Chan err_out2: 805c0c050c5SMichael Chan /* Free the resources reserved for various VF's */ 8064bb6cdceSJeffrey Huang bnxt_hwrm_func_vf_resource_free(bp, *num_vfs); 807c0c050c5SMichael Chan 808c0c050c5SMichael Chan err_out1: 809c0c050c5SMichael Chan bnxt_free_vf_resources(bp); 810c0c050c5SMichael Chan 811c0c050c5SMichael Chan return rc; 812c0c050c5SMichael Chan } 813c0c050c5SMichael Chan 814c0c050c5SMichael Chan void bnxt_sriov_disable(struct bnxt *bp) 815c0c050c5SMichael Chan { 8164bb6cdceSJeffrey Huang u16 num_vfs = pci_num_vf(bp->pdev); 8174bb6cdceSJeffrey Huang 8184bb6cdceSJeffrey Huang if (!num_vfs) 819c0c050c5SMichael Chan return; 820c0c050c5SMichael Chan 8214ab0c6a8SSathya Perla /* synchronize VF and VF-rep create and destroy */ 8224ab0c6a8SSathya Perla mutex_lock(&bp->sriov_lock); 8234ab0c6a8SSathya Perla bnxt_vf_reps_destroy(bp); 8244ab0c6a8SSathya Perla 8254bb6cdceSJeffrey Huang if (pci_vfs_assigned(bp->pdev)) { 82619241368SJeffrey Huang bnxt_hwrm_fwd_async_event_cmpl( 82787c374deSMichael Chan bp, NULL, ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD); 8284bb6cdceSJeffrey Huang netdev_warn(bp->dev, "Unable to free %d VFs because some are assigned to VMs.\n", 8294bb6cdceSJeffrey Huang num_vfs); 8304bb6cdceSJeffrey Huang } else { 831c0c050c5SMichael Chan pci_disable_sriov(bp->pdev); 8324bb6cdceSJeffrey Huang /* Free the HW resources reserved for various VF's */ 8334bb6cdceSJeffrey Huang bnxt_hwrm_func_vf_resource_free(bp, num_vfs); 8344bb6cdceSJeffrey Huang } 8354ab0c6a8SSathya Perla mutex_unlock(&bp->sriov_lock); 836c0c050c5SMichael Chan 837c0c050c5SMichael Chan bnxt_free_vf_resources(bp); 838c0c050c5SMichael Chan 8394a21b49bSMichael Chan /* Reclaim all resources for the PF. */ 8407b08f661SMichael Chan rtnl_lock(); 8417b08f661SMichael Chan bnxt_restore_pf_fw_resources(bp); 8427b08f661SMichael Chan rtnl_unlock(); 8432f593846SMichael Chan 8442f593846SMichael Chan bnxt_ulp_sriov_cfg(bp, 0); 845c0c050c5SMichael Chan } 846c0c050c5SMichael Chan 847c0c050c5SMichael Chan int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs) 848c0c050c5SMichael Chan { 849c0c050c5SMichael Chan struct net_device *dev = pci_get_drvdata(pdev); 850c0c050c5SMichael Chan struct bnxt *bp = netdev_priv(dev); 851c0c050c5SMichael Chan 852c0c050c5SMichael Chan if (!(bp->flags & BNXT_FLAG_USING_MSIX)) { 853c0c050c5SMichael Chan netdev_warn(dev, "Not allow SRIOV if the irq mode is not MSIX\n"); 854c0c050c5SMichael Chan return 0; 855c0c050c5SMichael Chan } 856c0c050c5SMichael Chan 857c0c050c5SMichael Chan rtnl_lock(); 858c0c050c5SMichael Chan if (!netif_running(dev)) { 859c0c050c5SMichael Chan netdev_warn(dev, "Reject SRIOV config request since if is down!\n"); 860c0c050c5SMichael Chan rtnl_unlock(); 861c0c050c5SMichael Chan return 0; 862c0c050c5SMichael Chan } 8633bc7d4a3SMichael Chan if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { 8643bc7d4a3SMichael Chan netdev_warn(dev, "Reject SRIOV config request when FW reset is in progress\n"); 8653bc7d4a3SMichael Chan rtnl_unlock(); 8663bc7d4a3SMichael Chan return 0; 8673bc7d4a3SMichael Chan } 868c0c050c5SMichael Chan bp->sriov_cfg = true; 869c0c050c5SMichael Chan rtnl_unlock(); 8704bb6cdceSJeffrey Huang 8714bb6cdceSJeffrey Huang if (pci_vfs_assigned(bp->pdev)) { 8724bb6cdceSJeffrey Huang netdev_warn(dev, "Unable to configure SRIOV since some VFs are assigned to VMs.\n"); 8734bb6cdceSJeffrey Huang num_vfs = 0; 8744bb6cdceSJeffrey Huang goto sriov_cfg_exit; 875c0c050c5SMichael Chan } 876c0c050c5SMichael Chan 877c0c050c5SMichael Chan /* Check if enabled VFs is same as requested */ 8784bb6cdceSJeffrey Huang if (num_vfs && num_vfs == bp->pf.active_vfs) 8794bb6cdceSJeffrey Huang goto sriov_cfg_exit; 8804bb6cdceSJeffrey Huang 8814bb6cdceSJeffrey Huang /* if there are previous existing VFs, clean them up */ 8824bb6cdceSJeffrey Huang bnxt_sriov_disable(bp); 8834bb6cdceSJeffrey Huang if (!num_vfs) 8844bb6cdceSJeffrey Huang goto sriov_cfg_exit; 885c0c050c5SMichael Chan 886c0c050c5SMichael Chan bnxt_sriov_enable(bp, &num_vfs); 887c0c050c5SMichael Chan 8884bb6cdceSJeffrey Huang sriov_cfg_exit: 889c0c050c5SMichael Chan bp->sriov_cfg = false; 890c0c050c5SMichael Chan wake_up(&bp->sriov_cfg_wait); 891c0c050c5SMichael Chan 892c0c050c5SMichael Chan return num_vfs; 893c0c050c5SMichael Chan } 894c0c050c5SMichael Chan 895c0c050c5SMichael Chan static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf, 896c0c050c5SMichael Chan void *encap_resp, __le64 encap_resp_addr, 897c0c050c5SMichael Chan __le16 encap_resp_cpr, u32 msg_size) 898c0c050c5SMichael Chan { 899c0c050c5SMichael Chan int rc = 0; 900c0c050c5SMichael Chan struct hwrm_fwd_resp_input req = {0}; 901c0c050c5SMichael Chan 90259895f59SMichael Chan if (BNXT_FWD_RESP_SIZE_ERR(msg_size)) 90359895f59SMichael Chan return -EINVAL; 90459895f59SMichael Chan 905c0c050c5SMichael Chan bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_RESP, -1, -1); 906c0c050c5SMichael Chan 907c0c050c5SMichael Chan /* Set the new target id */ 908c0c050c5SMichael Chan req.target_id = cpu_to_le16(vf->fw_fid); 909c193554eSMichael Chan req.encap_resp_target_id = cpu_to_le16(vf->fw_fid); 910c0c050c5SMichael Chan req.encap_resp_len = cpu_to_le16(msg_size); 911c0c050c5SMichael Chan req.encap_resp_addr = encap_resp_addr; 912c0c050c5SMichael Chan req.encap_resp_cmpl_ring = encap_resp_cpr; 913c0c050c5SMichael Chan memcpy(req.encap_resp, encap_resp, msg_size); 914c0c050c5SMichael Chan 915a798302dSMichael Chan rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 916a798302dSMichael Chan if (rc) 917c0c050c5SMichael Chan netdev_err(bp->dev, "hwrm_fwd_resp failed. rc:%d\n", rc); 918c0c050c5SMichael Chan return rc; 919c0c050c5SMichael Chan } 920c0c050c5SMichael Chan 921c0c050c5SMichael Chan static int bnxt_hwrm_fwd_err_resp(struct bnxt *bp, struct bnxt_vf_info *vf, 922c0c050c5SMichael Chan u32 msg_size) 923c0c050c5SMichael Chan { 924c0c050c5SMichael Chan int rc = 0; 925c0c050c5SMichael Chan struct hwrm_reject_fwd_resp_input req = {0}; 926c0c050c5SMichael Chan 92759895f59SMichael Chan if (BNXT_REJ_FWD_RESP_SIZE_ERR(msg_size)) 92859895f59SMichael Chan return -EINVAL; 92959895f59SMichael Chan 930c0c050c5SMichael Chan bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_REJECT_FWD_RESP, -1, -1); 931c0c050c5SMichael Chan /* Set the new target id */ 932c0c050c5SMichael Chan req.target_id = cpu_to_le16(vf->fw_fid); 933c193554eSMichael Chan req.encap_resp_target_id = cpu_to_le16(vf->fw_fid); 934c0c050c5SMichael Chan memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size); 935c0c050c5SMichael Chan 936a798302dSMichael Chan rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 937a798302dSMichael Chan if (rc) 938c0c050c5SMichael Chan netdev_err(bp->dev, "hwrm_fwd_err_resp failed. rc:%d\n", rc); 939c0c050c5SMichael Chan return rc; 940c0c050c5SMichael Chan } 941c0c050c5SMichael Chan 942c0c050c5SMichael Chan static int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf, 943c0c050c5SMichael Chan u32 msg_size) 944c0c050c5SMichael Chan { 945c0c050c5SMichael Chan int rc = 0; 946c0c050c5SMichael Chan struct hwrm_exec_fwd_resp_input req = {0}; 947c0c050c5SMichael Chan 94859895f59SMichael Chan if (BNXT_EXEC_FWD_RESP_SIZE_ERR(msg_size)) 94959895f59SMichael Chan return -EINVAL; 95059895f59SMichael Chan 951c0c050c5SMichael Chan bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_EXEC_FWD_RESP, -1, -1); 952c0c050c5SMichael Chan /* Set the new target id */ 953c0c050c5SMichael Chan req.target_id = cpu_to_le16(vf->fw_fid); 954c193554eSMichael Chan req.encap_resp_target_id = cpu_to_le16(vf->fw_fid); 955c0c050c5SMichael Chan memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size); 956c0c050c5SMichael Chan 957a798302dSMichael Chan rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 958a798302dSMichael Chan if (rc) 959c0c050c5SMichael Chan netdev_err(bp->dev, "hwrm_exec_fw_resp failed. rc:%d\n", rc); 960c0c050c5SMichael Chan return rc; 961c0c050c5SMichael Chan } 962c0c050c5SMichael Chan 963746df139SVasundhara Volam static int bnxt_vf_configure_mac(struct bnxt *bp, struct bnxt_vf_info *vf) 96491cdda40SVasundhara Volam { 96591cdda40SVasundhara Volam u32 msg_size = sizeof(struct hwrm_func_vf_cfg_input); 96691cdda40SVasundhara Volam struct hwrm_func_vf_cfg_input *req = 96791cdda40SVasundhara Volam (struct hwrm_func_vf_cfg_input *)vf->hwrm_cmd_req_addr; 96891cdda40SVasundhara Volam 969746df139SVasundhara Volam /* Allow VF to set a valid MAC address, if trust is set to on or 970746df139SVasundhara Volam * if the PF assigned MAC address is zero 97191cdda40SVasundhara Volam */ 97291cdda40SVasundhara Volam if (req->enables & cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR)) { 9732a516444SMichael Chan bool trust = bnxt_is_trusted_vf(bp, vf); 9742a516444SMichael Chan 97591cdda40SVasundhara Volam if (is_valid_ether_addr(req->dflt_mac_addr) && 9762a516444SMichael Chan (trust || !is_valid_ether_addr(vf->mac_addr) || 977707e7e96SMichael Chan ether_addr_equal(req->dflt_mac_addr, vf->mac_addr))) { 97891cdda40SVasundhara Volam ether_addr_copy(vf->vf_mac_addr, req->dflt_mac_addr); 97991cdda40SVasundhara Volam return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size); 98091cdda40SVasundhara Volam } 98191cdda40SVasundhara Volam return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size); 98291cdda40SVasundhara Volam } 98391cdda40SVasundhara Volam return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size); 98491cdda40SVasundhara Volam } 98591cdda40SVasundhara Volam 986c0c050c5SMichael Chan static int bnxt_vf_validate_set_mac(struct bnxt *bp, struct bnxt_vf_info *vf) 987c0c050c5SMichael Chan { 988c0c050c5SMichael Chan u32 msg_size = sizeof(struct hwrm_cfa_l2_filter_alloc_input); 989c0c050c5SMichael Chan struct hwrm_cfa_l2_filter_alloc_input *req = 990c0c050c5SMichael Chan (struct hwrm_cfa_l2_filter_alloc_input *)vf->hwrm_cmd_req_addr; 99191cdda40SVasundhara Volam bool mac_ok = false; 992c0c050c5SMichael Chan 993746df139SVasundhara Volam if (!is_valid_ether_addr((const u8 *)req->l2_addr)) 994746df139SVasundhara Volam return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size); 995746df139SVasundhara Volam 996746df139SVasundhara Volam /* Allow VF to set a valid MAC address, if trust is set to on. 997746df139SVasundhara Volam * Or VF MAC address must first match MAC address in PF's context. 99891cdda40SVasundhara Volam * Otherwise, it must match the VF MAC address if firmware spec >= 99991cdda40SVasundhara Volam * 1.2.2 100091cdda40SVasundhara Volam */ 10012a516444SMichael Chan if (bnxt_is_trusted_vf(bp, vf)) { 1002746df139SVasundhara Volam mac_ok = true; 1003746df139SVasundhara Volam } else if (is_valid_ether_addr(vf->mac_addr)) { 100491cdda40SVasundhara Volam if (ether_addr_equal((const u8 *)req->l2_addr, vf->mac_addr)) 100591cdda40SVasundhara Volam mac_ok = true; 100691cdda40SVasundhara Volam } else if (is_valid_ether_addr(vf->vf_mac_addr)) { 100791cdda40SVasundhara Volam if (ether_addr_equal((const u8 *)req->l2_addr, vf->vf_mac_addr)) 100891cdda40SVasundhara Volam mac_ok = true; 100991cdda40SVasundhara Volam } else { 10106fd544c8SYueHaibing /* There are two cases: 10116fd544c8SYueHaibing * 1.If firmware spec < 0x10202,VF MAC address is not forwarded 10126fd544c8SYueHaibing * to the PF and so it doesn't have to match 10136fd544c8SYueHaibing * 2.Allow VF to modify it's own MAC when PF has not assigned a 10146fd544c8SYueHaibing * valid MAC address and firmware spec >= 0x10202 10156fd544c8SYueHaibing */ 101691cdda40SVasundhara Volam mac_ok = true; 101791cdda40SVasundhara Volam } 101891cdda40SVasundhara Volam if (mac_ok) 1019c0c050c5SMichael Chan return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size); 1020c0c050c5SMichael Chan return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size); 1021c0c050c5SMichael Chan } 1022c0c050c5SMichael Chan 1023c0c050c5SMichael Chan static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf) 1024c0c050c5SMichael Chan { 1025c0c050c5SMichael Chan int rc = 0; 1026c0c050c5SMichael Chan 1027c0c050c5SMichael Chan if (!(vf->flags & BNXT_VF_LINK_FORCED)) { 1028c0c050c5SMichael Chan /* real link */ 1029c0c050c5SMichael Chan rc = bnxt_hwrm_exec_fwd_resp( 1030c0c050c5SMichael Chan bp, vf, sizeof(struct hwrm_port_phy_qcfg_input)); 1031c0c050c5SMichael Chan } else { 1032*9d6b648cSMichael Chan struct hwrm_port_phy_qcfg_output phy_qcfg_resp = {0}; 1033c0c050c5SMichael Chan struct hwrm_port_phy_qcfg_input *phy_qcfg_req; 1034c0c050c5SMichael Chan 1035c0c050c5SMichael Chan phy_qcfg_req = 1036c0c050c5SMichael Chan (struct hwrm_port_phy_qcfg_input *)vf->hwrm_cmd_req_addr; 1037c0c050c5SMichael Chan mutex_lock(&bp->hwrm_cmd_lock); 1038c0c050c5SMichael Chan memcpy(&phy_qcfg_resp, &bp->link_info.phy_qcfg_resp, 1039c0c050c5SMichael Chan sizeof(phy_qcfg_resp)); 1040c0c050c5SMichael Chan mutex_unlock(&bp->hwrm_cmd_lock); 1041845adfe4SMichael Chan phy_qcfg_resp.resp_len = cpu_to_le16(sizeof(phy_qcfg_resp)); 1042c0c050c5SMichael Chan phy_qcfg_resp.seq_id = phy_qcfg_req->seq_id; 1043845adfe4SMichael Chan phy_qcfg_resp.valid = 1; 1044c0c050c5SMichael Chan 1045c0c050c5SMichael Chan if (vf->flags & BNXT_VF_LINK_UP) { 1046c0c050c5SMichael Chan /* if physical link is down, force link up on VF */ 104773b9bad6SMichael Chan if (phy_qcfg_resp.link != 104873b9bad6SMichael Chan PORT_PHY_QCFG_RESP_LINK_LINK) { 1049c0c050c5SMichael Chan phy_qcfg_resp.link = 1050c0c050c5SMichael Chan PORT_PHY_QCFG_RESP_LINK_LINK; 105111f15ed3SMichael Chan phy_qcfg_resp.link_speed = cpu_to_le16( 105211f15ed3SMichael Chan PORT_PHY_QCFG_RESP_LINK_SPEED_10GB); 1053acb20054SMichael Chan phy_qcfg_resp.duplex_cfg = 1054acb20054SMichael Chan PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL; 1055acb20054SMichael Chan phy_qcfg_resp.duplex_state = 1056acb20054SMichael Chan PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL; 1057c0c050c5SMichael Chan phy_qcfg_resp.pause = 1058c0c050c5SMichael Chan (PORT_PHY_QCFG_RESP_PAUSE_TX | 1059c0c050c5SMichael Chan PORT_PHY_QCFG_RESP_PAUSE_RX); 1060c0c050c5SMichael Chan } 1061c0c050c5SMichael Chan } else { 1062c0c050c5SMichael Chan /* force link down */ 1063c0c050c5SMichael Chan phy_qcfg_resp.link = PORT_PHY_QCFG_RESP_LINK_NO_LINK; 1064c0c050c5SMichael Chan phy_qcfg_resp.link_speed = 0; 1065acb20054SMichael Chan phy_qcfg_resp.duplex_state = 1066acb20054SMichael Chan PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF; 1067c0c050c5SMichael Chan phy_qcfg_resp.pause = 0; 1068c0c050c5SMichael Chan } 1069c0c050c5SMichael Chan rc = bnxt_hwrm_fwd_resp(bp, vf, &phy_qcfg_resp, 1070c0c050c5SMichael Chan phy_qcfg_req->resp_addr, 1071c0c050c5SMichael Chan phy_qcfg_req->cmpl_ring, 1072c0c050c5SMichael Chan sizeof(phy_qcfg_resp)); 1073c0c050c5SMichael Chan } 1074c0c050c5SMichael Chan return rc; 1075c0c050c5SMichael Chan } 1076c0c050c5SMichael Chan 1077c0c050c5SMichael Chan static int bnxt_vf_req_validate_snd(struct bnxt *bp, struct bnxt_vf_info *vf) 1078c0c050c5SMichael Chan { 1079c0c050c5SMichael Chan int rc = 0; 1080a8643e16SMichael Chan struct input *encap_req = vf->hwrm_cmd_req_addr; 1081a8643e16SMichael Chan u32 req_type = le16_to_cpu(encap_req->req_type); 1082c0c050c5SMichael Chan 1083c0c050c5SMichael Chan switch (req_type) { 108491cdda40SVasundhara Volam case HWRM_FUNC_VF_CFG: 1085746df139SVasundhara Volam rc = bnxt_vf_configure_mac(bp, vf); 108691cdda40SVasundhara Volam break; 1087c0c050c5SMichael Chan case HWRM_CFA_L2_FILTER_ALLOC: 1088c0c050c5SMichael Chan rc = bnxt_vf_validate_set_mac(bp, vf); 1089c0c050c5SMichael Chan break; 1090c0c050c5SMichael Chan case HWRM_FUNC_CFG: 1091c0c050c5SMichael Chan /* TODO Validate if VF is allowed to change mac address, 1092c0c050c5SMichael Chan * mtu, num of rings etc 1093c0c050c5SMichael Chan */ 1094c0c050c5SMichael Chan rc = bnxt_hwrm_exec_fwd_resp( 1095c0c050c5SMichael Chan bp, vf, sizeof(struct hwrm_func_cfg_input)); 1096c0c050c5SMichael Chan break; 1097c0c050c5SMichael Chan case HWRM_PORT_PHY_QCFG: 1098c0c050c5SMichael Chan rc = bnxt_vf_set_link(bp, vf); 1099c0c050c5SMichael Chan break; 1100c0c050c5SMichael Chan default: 1101c0c050c5SMichael Chan break; 1102c0c050c5SMichael Chan } 1103c0c050c5SMichael Chan return rc; 1104c0c050c5SMichael Chan } 1105c0c050c5SMichael Chan 1106c0c050c5SMichael Chan void bnxt_hwrm_exec_fwd_req(struct bnxt *bp) 1107c0c050c5SMichael Chan { 1108c0c050c5SMichael Chan u32 i = 0, active_vfs = bp->pf.active_vfs, vf_id; 1109c0c050c5SMichael Chan 1110c0c050c5SMichael Chan /* Scan through VF's and process commands */ 1111c0c050c5SMichael Chan while (1) { 1112c0c050c5SMichael Chan vf_id = find_next_bit(bp->pf.vf_event_bmap, active_vfs, i); 1113c0c050c5SMichael Chan if (vf_id >= active_vfs) 1114c0c050c5SMichael Chan break; 1115c0c050c5SMichael Chan 1116c0c050c5SMichael Chan clear_bit(vf_id, bp->pf.vf_event_bmap); 1117c0c050c5SMichael Chan bnxt_vf_req_validate_snd(bp, &bp->pf.vf[vf_id]); 1118c0c050c5SMichael Chan i = vf_id + 1; 1119c0c050c5SMichael Chan } 1120c0c050c5SMichael Chan } 1121379a80a1SMichael Chan 1122379a80a1SMichael Chan void bnxt_update_vf_mac(struct bnxt *bp) 1123379a80a1SMichael Chan { 1124379a80a1SMichael Chan struct hwrm_func_qcaps_input req = {0}; 1125379a80a1SMichael Chan struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr; 1126379a80a1SMichael Chan 1127379a80a1SMichael Chan bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1); 1128379a80a1SMichael Chan req.fid = cpu_to_le16(0xffff); 1129379a80a1SMichael Chan 1130379a80a1SMichael Chan mutex_lock(&bp->hwrm_cmd_lock); 1131379a80a1SMichael Chan if (_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT)) 1132379a80a1SMichael Chan goto update_vf_mac_exit; 1133379a80a1SMichael Chan 11343874d6a8SJeffrey Huang /* Store MAC address from the firmware. There are 2 cases: 11353874d6a8SJeffrey Huang * 1. MAC address is valid. It is assigned from the PF and we 11363874d6a8SJeffrey Huang * need to override the current VF MAC address with it. 11373874d6a8SJeffrey Huang * 2. MAC address is zero. The VF will use a random MAC address by 11383874d6a8SJeffrey Huang * default but the stored zero MAC will allow the VF user to change 11393874d6a8SJeffrey Huang * the random MAC address using ndo_set_mac_address() if he wants. 11403874d6a8SJeffrey Huang */ 114111f15ed3SMichael Chan if (!ether_addr_equal(resp->mac_address, bp->vf.mac_addr)) 114211f15ed3SMichael Chan memcpy(bp->vf.mac_addr, resp->mac_address, ETH_ALEN); 11433874d6a8SJeffrey Huang 11443874d6a8SJeffrey Huang /* overwrite netdev dev_addr with admin VF MAC */ 11453874d6a8SJeffrey Huang if (is_valid_ether_addr(bp->vf.mac_addr)) 1146379a80a1SMichael Chan memcpy(bp->dev->dev_addr, bp->vf.mac_addr, ETH_ALEN); 1147379a80a1SMichael Chan update_vf_mac_exit: 1148379a80a1SMichael Chan mutex_unlock(&bp->hwrm_cmd_lock); 1149379a80a1SMichael Chan } 1150379a80a1SMichael Chan 115128ea334bSMichael Chan int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict) 115284c33dd3SMichael Chan { 115384c33dd3SMichael Chan struct hwrm_func_vf_cfg_input req = {0}; 115484c33dd3SMichael Chan int rc = 0; 115584c33dd3SMichael Chan 115684c33dd3SMichael Chan if (!BNXT_VF(bp)) 115784c33dd3SMichael Chan return 0; 115884c33dd3SMichael Chan 115984c33dd3SMichael Chan if (bp->hwrm_spec_code < 0x10202) { 116084c33dd3SMichael Chan if (is_valid_ether_addr(bp->vf.mac_addr)) 116184c33dd3SMichael Chan rc = -EADDRNOTAVAIL; 116284c33dd3SMichael Chan goto mac_done; 116384c33dd3SMichael Chan } 116484c33dd3SMichael Chan bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1); 116584c33dd3SMichael Chan req.enables = cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR); 116684c33dd3SMichael Chan memcpy(req.dflt_mac_addr, mac, ETH_ALEN); 116784c33dd3SMichael Chan rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 116884c33dd3SMichael Chan mac_done: 116928ea334bSMichael Chan if (rc && strict) { 117084c33dd3SMichael Chan rc = -EADDRNOTAVAIL; 117184c33dd3SMichael Chan netdev_warn(bp->dev, "VF MAC address %pM not approved by the PF\n", 117284c33dd3SMichael Chan mac); 117384c33dd3SMichael Chan return rc; 117484c33dd3SMichael Chan } 117528ea334bSMichael Chan return 0; 117628ea334bSMichael Chan } 1177c0c050c5SMichael Chan #else 1178c0c050c5SMichael Chan 11792cd86968SVasundhara Volam int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset) 1180702d5011SMichael Chan { 1181702d5011SMichael Chan if (*num_vfs) 1182702d5011SMichael Chan return -EOPNOTSUPP; 1183702d5011SMichael Chan return 0; 1184702d5011SMichael Chan } 1185702d5011SMichael Chan 1186c0c050c5SMichael Chan void bnxt_sriov_disable(struct bnxt *bp) 1187c0c050c5SMichael Chan { 1188c0c050c5SMichael Chan } 1189c0c050c5SMichael Chan 1190c0c050c5SMichael Chan void bnxt_hwrm_exec_fwd_req(struct bnxt *bp) 1191c0c050c5SMichael Chan { 1192379a80a1SMichael Chan netdev_err(bp->dev, "Invalid VF message received when SRIOV is not enable\n"); 1193379a80a1SMichael Chan } 1194379a80a1SMichael Chan 1195379a80a1SMichael Chan void bnxt_update_vf_mac(struct bnxt *bp) 1196379a80a1SMichael Chan { 1197c0c050c5SMichael Chan } 119884c33dd3SMichael Chan 119928ea334bSMichael Chan int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict) 120084c33dd3SMichael Chan { 120184c33dd3SMichael Chan return 0; 120284c33dd3SMichael Chan } 1203c0c050c5SMichael Chan #endif 1204