1c0c050c5SMichael Chan /* Broadcom NetXtreme-C/E network driver. 2c0c050c5SMichael Chan * 311f15ed3SMichael Chan * Copyright (c) 2014-2016 Broadcom Corporation 4746df139SVasundhara Volam * Copyright (c) 2016-2018 Broadcom Limited 5c0c050c5SMichael Chan * 6c0c050c5SMichael Chan * This program is free software; you can redistribute it and/or modify 7c0c050c5SMichael Chan * it under the terms of the GNU General Public License as published by 8c0c050c5SMichael Chan * the Free Software Foundation. 9c0c050c5SMichael Chan */ 10c0c050c5SMichael Chan 11cc69837fSJakub Kicinski #include <linux/ethtool.h> 12c0c050c5SMichael Chan #include <linux/module.h> 13c0c050c5SMichael Chan #include <linux/pci.h> 14c0c050c5SMichael Chan #include <linux/netdevice.h> 15c0c050c5SMichael Chan #include <linux/if_vlan.h> 16c0c050c5SMichael Chan #include <linux/interrupt.h> 17c0c050c5SMichael Chan #include <linux/etherdevice.h> 18c0c050c5SMichael Chan #include "bnxt_hsi.h" 19c0c050c5SMichael Chan #include "bnxt.h" 202f593846SMichael Chan #include "bnxt_ulp.h" 21c0c050c5SMichael Chan #include "bnxt_sriov.h" 224ab0c6a8SSathya Perla #include "bnxt_vfr.h" 23c0c050c5SMichael Chan #include "bnxt_ethtool.h" 24c0c050c5SMichael Chan 25c0c050c5SMichael Chan #ifdef CONFIG_BNXT_SRIOV 26350a7149SEddie Wai static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt *bp, 27350a7149SEddie Wai struct bnxt_vf_info *vf, u16 event_id) 28350a7149SEddie Wai { 29350a7149SEddie Wai struct hwrm_fwd_async_event_cmpl_input req = {0}; 30350a7149SEddie Wai struct hwrm_async_event_cmpl *async_cmpl; 31350a7149SEddie Wai int rc = 0; 32350a7149SEddie Wai 33350a7149SEddie Wai bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_ASYNC_EVENT_CMPL, -1, -1); 34350a7149SEddie Wai if (vf) 35350a7149SEddie Wai req.encap_async_event_target_id = cpu_to_le16(vf->fw_fid); 36350a7149SEddie Wai else 37350a7149SEddie Wai /* broadcast this async event to all VFs */ 38350a7149SEddie Wai req.encap_async_event_target_id = cpu_to_le16(0xffff); 39350a7149SEddie Wai async_cmpl = (struct hwrm_async_event_cmpl *)req.encap_async_event_cmpl; 4087c374deSMichael Chan async_cmpl->type = cpu_to_le16(ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT); 41350a7149SEddie Wai async_cmpl->event_id = cpu_to_le16(event_id); 42350a7149SEddie Wai 43a798302dSMichael Chan rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 44a798302dSMichael Chan if (rc) 45350a7149SEddie Wai netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl failed. rc:%d\n", 46350a7149SEddie Wai rc); 47350a7149SEddie Wai return rc; 48350a7149SEddie Wai } 49350a7149SEddie Wai 50c0c050c5SMichael Chan static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id) 51c0c050c5SMichael Chan { 52caefe526SMichael Chan if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { 53c0c050c5SMichael Chan netdev_err(bp->dev, "vf ndo called though PF is down\n"); 54c0c050c5SMichael Chan return -EINVAL; 55c0c050c5SMichael Chan } 56c0c050c5SMichael Chan if (!bp->pf.active_vfs) { 57c0c050c5SMichael Chan netdev_err(bp->dev, "vf ndo called though sriov is disabled\n"); 58c0c050c5SMichael Chan return -EINVAL; 59c0c050c5SMichael Chan } 6078f30004SVenkat Duvvuru if (vf_id >= bp->pf.active_vfs) { 61c0c050c5SMichael Chan netdev_err(bp->dev, "Invalid VF id %d\n", vf_id); 62c0c050c5SMichael Chan return -EINVAL; 63c0c050c5SMichael Chan } 64c0c050c5SMichael Chan return 0; 65c0c050c5SMichael Chan } 66c0c050c5SMichael Chan 67c0c050c5SMichael Chan int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting) 68c0c050c5SMichael Chan { 69c0c050c5SMichael Chan struct hwrm_func_cfg_input req = {0}; 70c0c050c5SMichael Chan struct bnxt *bp = netdev_priv(dev); 71c0c050c5SMichael Chan struct bnxt_vf_info *vf; 72c0c050c5SMichael Chan bool old_setting = false; 73c0c050c5SMichael Chan u32 func_flags; 74c0c050c5SMichael Chan int rc; 75c0c050c5SMichael Chan 768eb992e8SMichael Chan if (bp->hwrm_spec_code < 0x10701) 778eb992e8SMichael Chan return -ENOTSUPP; 788eb992e8SMichael Chan 79c0c050c5SMichael Chan rc = bnxt_vf_ndo_prep(bp, vf_id); 80c0c050c5SMichael Chan if (rc) 81c0c050c5SMichael Chan return rc; 82c0c050c5SMichael Chan 83c0c050c5SMichael Chan vf = &bp->pf.vf[vf_id]; 84c0c050c5SMichael Chan if (vf->flags & BNXT_VF_SPOOFCHK) 85c0c050c5SMichael Chan old_setting = true; 86c0c050c5SMichael Chan if (old_setting == setting) 87c0c050c5SMichael Chan return 0; 88c0c050c5SMichael Chan 89c0c050c5SMichael Chan if (setting) 90c71c4e49SMichael Chan func_flags = FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE; 91c0c050c5SMichael Chan else 92c71c4e49SMichael Chan func_flags = FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE; 93c0c050c5SMichael Chan /*TODO: if the driver supports VLAN filter on guest VLAN, 94c0c050c5SMichael Chan * the spoof check should also include vlan anti-spoofing 95c0c050c5SMichael Chan */ 96c0c050c5SMichael Chan bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 97c193554eSMichael Chan req.fid = cpu_to_le16(vf->fw_fid); 98c0c050c5SMichael Chan req.flags = cpu_to_le32(func_flags); 99c0c050c5SMichael Chan rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 100c0c050c5SMichael Chan if (!rc) { 101c0c050c5SMichael Chan if (setting) 102c0c050c5SMichael Chan vf->flags |= BNXT_VF_SPOOFCHK; 103c0c050c5SMichael Chan else 104c0c050c5SMichael Chan vf->flags &= ~BNXT_VF_SPOOFCHK; 105c0c050c5SMichael Chan } 106c0c050c5SMichael Chan return rc; 107c0c050c5SMichael Chan } 108c0c050c5SMichael Chan 1092a516444SMichael Chan static int bnxt_hwrm_func_qcfg_flags(struct bnxt *bp, struct bnxt_vf_info *vf) 1102a516444SMichael Chan { 1112a516444SMichael Chan struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; 1122a516444SMichael Chan struct hwrm_func_qcfg_input req = {0}; 1132a516444SMichael Chan int rc; 1142a516444SMichael Chan 1152a516444SMichael Chan bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1); 116*dd85fc0aSEdwin Peer req.fid = cpu_to_le16(BNXT_PF(bp) ? vf->fw_fid : 0xffff); 1172a516444SMichael Chan mutex_lock(&bp->hwrm_cmd_lock); 1182a516444SMichael Chan rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 1192a516444SMichael Chan if (rc) { 1202a516444SMichael Chan mutex_unlock(&bp->hwrm_cmd_lock); 121d4f1420dSMichael Chan return rc; 1222a516444SMichael Chan } 1232a516444SMichael Chan vf->func_qcfg_flags = le16_to_cpu(resp->flags); 1242a516444SMichael Chan mutex_unlock(&bp->hwrm_cmd_lock); 1252a516444SMichael Chan return 0; 1262a516444SMichael Chan } 1272a516444SMichael Chan 128*dd85fc0aSEdwin Peer bool bnxt_is_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf) 1292a516444SMichael Chan { 130*dd85fc0aSEdwin Peer if (BNXT_PF(bp) && !(bp->fw_cap & BNXT_FW_CAP_TRUSTED_VF)) 1312a516444SMichael Chan return !!(vf->flags & BNXT_VF_TRUST); 1322a516444SMichael Chan 1332a516444SMichael Chan bnxt_hwrm_func_qcfg_flags(bp, vf); 1342a516444SMichael Chan return !!(vf->func_qcfg_flags & FUNC_QCFG_RESP_FLAGS_TRUSTED_VF); 1352a516444SMichael Chan } 1362a516444SMichael Chan 1372a516444SMichael Chan static int bnxt_hwrm_set_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf) 1382a516444SMichael Chan { 1392a516444SMichael Chan struct hwrm_func_cfg_input req = {0}; 1402a516444SMichael Chan 1412a516444SMichael Chan if (!(bp->fw_cap & BNXT_FW_CAP_TRUSTED_VF)) 1422a516444SMichael Chan return 0; 1432a516444SMichael Chan 1442a516444SMichael Chan bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 1452a516444SMichael Chan req.fid = cpu_to_le16(vf->fw_fid); 1462a516444SMichael Chan if (vf->flags & BNXT_VF_TRUST) 1472a516444SMichael Chan req.flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE); 1482a516444SMichael Chan else 1492a516444SMichael Chan req.flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_DISABLE); 1509f90445cSVasundhara Volam return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 1512a516444SMichael Chan } 1522a516444SMichael Chan 153746df139SVasundhara Volam int bnxt_set_vf_trust(struct net_device *dev, int vf_id, bool trusted) 154746df139SVasundhara Volam { 155746df139SVasundhara Volam struct bnxt *bp = netdev_priv(dev); 156746df139SVasundhara Volam struct bnxt_vf_info *vf; 157746df139SVasundhara Volam 158746df139SVasundhara Volam if (bnxt_vf_ndo_prep(bp, vf_id)) 159746df139SVasundhara Volam return -EINVAL; 160746df139SVasundhara Volam 161746df139SVasundhara Volam vf = &bp->pf.vf[vf_id]; 162746df139SVasundhara Volam if (trusted) 163746df139SVasundhara Volam vf->flags |= BNXT_VF_TRUST; 164746df139SVasundhara Volam else 165746df139SVasundhara Volam vf->flags &= ~BNXT_VF_TRUST; 166746df139SVasundhara Volam 1672a516444SMichael Chan bnxt_hwrm_set_trusted_vf(bp, vf); 168746df139SVasundhara Volam return 0; 169746df139SVasundhara Volam } 170746df139SVasundhara Volam 171c0c050c5SMichael Chan int bnxt_get_vf_config(struct net_device *dev, int vf_id, 172c0c050c5SMichael Chan struct ifla_vf_info *ivi) 173c0c050c5SMichael Chan { 174c0c050c5SMichael Chan struct bnxt *bp = netdev_priv(dev); 175c0c050c5SMichael Chan struct bnxt_vf_info *vf; 176c0c050c5SMichael Chan int rc; 177c0c050c5SMichael Chan 178c0c050c5SMichael Chan rc = bnxt_vf_ndo_prep(bp, vf_id); 179c0c050c5SMichael Chan if (rc) 180c0c050c5SMichael Chan return rc; 181c0c050c5SMichael Chan 182c0c050c5SMichael Chan ivi->vf = vf_id; 183c0c050c5SMichael Chan vf = &bp->pf.vf[vf_id]; 184c0c050c5SMichael Chan 18591cdda40SVasundhara Volam if (is_valid_ether_addr(vf->mac_addr)) 186c0c050c5SMichael Chan memcpy(&ivi->mac, vf->mac_addr, ETH_ALEN); 18791cdda40SVasundhara Volam else 18891cdda40SVasundhara Volam memcpy(&ivi->mac, vf->vf_mac_addr, ETH_ALEN); 189c0c050c5SMichael Chan ivi->max_tx_rate = vf->max_tx_rate; 190c0c050c5SMichael Chan ivi->min_tx_rate = vf->min_tx_rate; 191c0c050c5SMichael Chan ivi->vlan = vf->vlan; 192f0249056SMichael Chan if (vf->flags & BNXT_VF_QOS) 193f0249056SMichael Chan ivi->qos = vf->vlan >> VLAN_PRIO_SHIFT; 194f0249056SMichael Chan else 195f0249056SMichael Chan ivi->qos = 0; 196f0249056SMichael Chan ivi->spoofchk = !!(vf->flags & BNXT_VF_SPOOFCHK); 1972a516444SMichael Chan ivi->trusted = bnxt_is_trusted_vf(bp, vf); 198c0c050c5SMichael Chan if (!(vf->flags & BNXT_VF_LINK_FORCED)) 199c0c050c5SMichael Chan ivi->linkstate = IFLA_VF_LINK_STATE_AUTO; 200c0c050c5SMichael Chan else if (vf->flags & BNXT_VF_LINK_UP) 201c0c050c5SMichael Chan ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE; 202c0c050c5SMichael Chan else 203c0c050c5SMichael Chan ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE; 204c0c050c5SMichael Chan 205c0c050c5SMichael Chan return 0; 206c0c050c5SMichael Chan } 207c0c050c5SMichael Chan 208c0c050c5SMichael Chan int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac) 209c0c050c5SMichael Chan { 210c0c050c5SMichael Chan struct hwrm_func_cfg_input req = {0}; 211c0c050c5SMichael Chan struct bnxt *bp = netdev_priv(dev); 212c0c050c5SMichael Chan struct bnxt_vf_info *vf; 213c0c050c5SMichael Chan int rc; 214c0c050c5SMichael Chan 215c0c050c5SMichael Chan rc = bnxt_vf_ndo_prep(bp, vf_id); 216c0c050c5SMichael Chan if (rc) 217c0c050c5SMichael Chan return rc; 218c0c050c5SMichael Chan /* reject bc or mc mac addr, zero mac addr means allow 219c0c050c5SMichael Chan * VF to use its own mac addr 220c0c050c5SMichael Chan */ 221c0c050c5SMichael Chan if (is_multicast_ether_addr(mac)) { 222c0c050c5SMichael Chan netdev_err(dev, "Invalid VF ethernet address\n"); 223c0c050c5SMichael Chan return -EINVAL; 224c0c050c5SMichael Chan } 225c0c050c5SMichael Chan vf = &bp->pf.vf[vf_id]; 226c0c050c5SMichael Chan 227c0c050c5SMichael Chan memcpy(vf->mac_addr, mac, ETH_ALEN); 228c0c050c5SMichael Chan bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 229c193554eSMichael Chan req.fid = cpu_to_le16(vf->fw_fid); 230c0c050c5SMichael Chan req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR); 231c0c050c5SMichael Chan memcpy(req.dflt_mac_addr, mac, ETH_ALEN); 232c0c050c5SMichael Chan return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 233c0c050c5SMichael Chan } 234c0c050c5SMichael Chan 23579aab093SMoshe Shemesh int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos, 23679aab093SMoshe Shemesh __be16 vlan_proto) 237c0c050c5SMichael Chan { 238c0c050c5SMichael Chan struct hwrm_func_cfg_input req = {0}; 239c0c050c5SMichael Chan struct bnxt *bp = netdev_priv(dev); 240c0c050c5SMichael Chan struct bnxt_vf_info *vf; 241c0c050c5SMichael Chan u16 vlan_tag; 242c0c050c5SMichael Chan int rc; 243c0c050c5SMichael Chan 244cf6645f8SMichael Chan if (bp->hwrm_spec_code < 0x10201) 245cf6645f8SMichael Chan return -ENOTSUPP; 246cf6645f8SMichael Chan 24779aab093SMoshe Shemesh if (vlan_proto != htons(ETH_P_8021Q)) 24879aab093SMoshe Shemesh return -EPROTONOSUPPORT; 24979aab093SMoshe Shemesh 250c0c050c5SMichael Chan rc = bnxt_vf_ndo_prep(bp, vf_id); 251c0c050c5SMichael Chan if (rc) 252c0c050c5SMichael Chan return rc; 253c0c050c5SMichael Chan 254c0c050c5SMichael Chan /* TODO: needed to implement proper handling of user priority, 255c0c050c5SMichael Chan * currently fail the command if there is valid priority 256c0c050c5SMichael Chan */ 257c0c050c5SMichael Chan if (vlan_id > 4095 || qos) 258c0c050c5SMichael Chan return -EINVAL; 259c0c050c5SMichael Chan 260c0c050c5SMichael Chan vf = &bp->pf.vf[vf_id]; 261c0c050c5SMichael Chan vlan_tag = vlan_id; 262c0c050c5SMichael Chan if (vlan_tag == vf->vlan) 263c0c050c5SMichael Chan return 0; 264c0c050c5SMichael Chan 265c0c050c5SMichael Chan bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 266c193554eSMichael Chan req.fid = cpu_to_le16(vf->fw_fid); 267c0c050c5SMichael Chan req.dflt_vlan = cpu_to_le16(vlan_tag); 268c0c050c5SMichael Chan req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN); 269c0c050c5SMichael Chan rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 270c0c050c5SMichael Chan if (!rc) 271c0c050c5SMichael Chan vf->vlan = vlan_tag; 272c0c050c5SMichael Chan return rc; 273c0c050c5SMichael Chan } 274c0c050c5SMichael Chan 275c0c050c5SMichael Chan int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate, 276c0c050c5SMichael Chan int max_tx_rate) 277c0c050c5SMichael Chan { 278c0c050c5SMichael Chan struct hwrm_func_cfg_input req = {0}; 279c0c050c5SMichael Chan struct bnxt *bp = netdev_priv(dev); 280c0c050c5SMichael Chan struct bnxt_vf_info *vf; 281c0c050c5SMichael Chan u32 pf_link_speed; 282c0c050c5SMichael Chan int rc; 283c0c050c5SMichael Chan 284c0c050c5SMichael Chan rc = bnxt_vf_ndo_prep(bp, vf_id); 285c0c050c5SMichael Chan if (rc) 286c0c050c5SMichael Chan return rc; 287c0c050c5SMichael Chan 288c0c050c5SMichael Chan vf = &bp->pf.vf[vf_id]; 289c0c050c5SMichael Chan pf_link_speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed); 290c0c050c5SMichael Chan if (max_tx_rate > pf_link_speed) { 291c0c050c5SMichael Chan netdev_info(bp->dev, "max tx rate %d exceed PF link speed for VF %d\n", 292c0c050c5SMichael Chan max_tx_rate, vf_id); 293c0c050c5SMichael Chan return -EINVAL; 294c0c050c5SMichael Chan } 295c0c050c5SMichael Chan 296c0c050c5SMichael Chan if (min_tx_rate > pf_link_speed || min_tx_rate > max_tx_rate) { 297c0c050c5SMichael Chan netdev_info(bp->dev, "min tx rate %d is invalid for VF %d\n", 298c0c050c5SMichael Chan min_tx_rate, vf_id); 299c0c050c5SMichael Chan return -EINVAL; 300c0c050c5SMichael Chan } 301c0c050c5SMichael Chan if (min_tx_rate == vf->min_tx_rate && max_tx_rate == vf->max_tx_rate) 302c0c050c5SMichael Chan return 0; 303c0c050c5SMichael Chan bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 304c193554eSMichael Chan req.fid = cpu_to_le16(vf->fw_fid); 305c0c050c5SMichael Chan req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW); 306c0c050c5SMichael Chan req.max_bw = cpu_to_le32(max_tx_rate); 307c0c050c5SMichael Chan req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW); 308c0c050c5SMichael Chan req.min_bw = cpu_to_le32(min_tx_rate); 309c0c050c5SMichael Chan rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 310c0c050c5SMichael Chan if (!rc) { 311c0c050c5SMichael Chan vf->min_tx_rate = min_tx_rate; 312c0c050c5SMichael Chan vf->max_tx_rate = max_tx_rate; 313c0c050c5SMichael Chan } 314c0c050c5SMichael Chan return rc; 315c0c050c5SMichael Chan } 316c0c050c5SMichael Chan 317c0c050c5SMichael Chan int bnxt_set_vf_link_state(struct net_device *dev, int vf_id, int link) 318c0c050c5SMichael Chan { 319c0c050c5SMichael Chan struct bnxt *bp = netdev_priv(dev); 320c0c050c5SMichael Chan struct bnxt_vf_info *vf; 321c0c050c5SMichael Chan int rc; 322c0c050c5SMichael Chan 323c0c050c5SMichael Chan rc = bnxt_vf_ndo_prep(bp, vf_id); 324c0c050c5SMichael Chan if (rc) 325c0c050c5SMichael Chan return rc; 326c0c050c5SMichael Chan 327c0c050c5SMichael Chan vf = &bp->pf.vf[vf_id]; 328c0c050c5SMichael Chan 329c0c050c5SMichael Chan vf->flags &= ~(BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED); 330c0c050c5SMichael Chan switch (link) { 331c0c050c5SMichael Chan case IFLA_VF_LINK_STATE_AUTO: 332c0c050c5SMichael Chan vf->flags |= BNXT_VF_LINK_UP; 333c0c050c5SMichael Chan break; 334c0c050c5SMichael Chan case IFLA_VF_LINK_STATE_DISABLE: 335c0c050c5SMichael Chan vf->flags |= BNXT_VF_LINK_FORCED; 336c0c050c5SMichael Chan break; 337c0c050c5SMichael Chan case IFLA_VF_LINK_STATE_ENABLE: 338c0c050c5SMichael Chan vf->flags |= BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED; 339c0c050c5SMichael Chan break; 340c0c050c5SMichael Chan default: 341c0c050c5SMichael Chan netdev_err(bp->dev, "Invalid link option\n"); 342c0c050c5SMichael Chan rc = -EINVAL; 343c0c050c5SMichael Chan break; 344c0c050c5SMichael Chan } 345350a7149SEddie Wai if (vf->flags & (BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED)) 346350a7149SEddie Wai rc = bnxt_hwrm_fwd_async_event_cmpl(bp, vf, 34787c374deSMichael Chan ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE); 348c0c050c5SMichael Chan return rc; 349c0c050c5SMichael Chan } 350c0c050c5SMichael Chan 351c0c050c5SMichael Chan static int bnxt_set_vf_attr(struct bnxt *bp, int num_vfs) 352c0c050c5SMichael Chan { 353c0c050c5SMichael Chan int i; 354c0c050c5SMichael Chan struct bnxt_vf_info *vf; 355c0c050c5SMichael Chan 356c0c050c5SMichael Chan for (i = 0; i < num_vfs; i++) { 357c0c050c5SMichael Chan vf = &bp->pf.vf[i]; 358c0c050c5SMichael Chan memset(vf, 0, sizeof(*vf)); 359c0c050c5SMichael Chan } 360c0c050c5SMichael Chan return 0; 361c0c050c5SMichael Chan } 362c0c050c5SMichael Chan 3634bb6cdceSJeffrey Huang static int bnxt_hwrm_func_vf_resource_free(struct bnxt *bp, int num_vfs) 364c0c050c5SMichael Chan { 365c0c050c5SMichael Chan int i, rc = 0; 366c0c050c5SMichael Chan struct bnxt_pf_info *pf = &bp->pf; 367c0c050c5SMichael Chan struct hwrm_func_vf_resc_free_input req = {0}; 368c0c050c5SMichael Chan 369c0c050c5SMichael Chan bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESC_FREE, -1, -1); 370c0c050c5SMichael Chan 371c0c050c5SMichael Chan mutex_lock(&bp->hwrm_cmd_lock); 3724bb6cdceSJeffrey Huang for (i = pf->first_vf_id; i < pf->first_vf_id + num_vfs; i++) { 373c0c050c5SMichael Chan req.vf_id = cpu_to_le16(i); 374c0c050c5SMichael Chan rc = _hwrm_send_message(bp, &req, sizeof(req), 375c0c050c5SMichael Chan HWRM_CMD_TIMEOUT); 376c0c050c5SMichael Chan if (rc) 377c0c050c5SMichael Chan break; 378c0c050c5SMichael Chan } 379c0c050c5SMichael Chan mutex_unlock(&bp->hwrm_cmd_lock); 380c0c050c5SMichael Chan return rc; 381c0c050c5SMichael Chan } 382c0c050c5SMichael Chan 383c0c050c5SMichael Chan static void bnxt_free_vf_resources(struct bnxt *bp) 384c0c050c5SMichael Chan { 385c0c050c5SMichael Chan struct pci_dev *pdev = bp->pdev; 386c0c050c5SMichael Chan int i; 387c0c050c5SMichael Chan 388c0c050c5SMichael Chan kfree(bp->pf.vf_event_bmap); 389c0c050c5SMichael Chan bp->pf.vf_event_bmap = NULL; 390c0c050c5SMichael Chan 391c0c050c5SMichael Chan for (i = 0; i < 4; i++) { 392c0c050c5SMichael Chan if (bp->pf.hwrm_cmd_req_addr[i]) { 393c0c050c5SMichael Chan dma_free_coherent(&pdev->dev, BNXT_PAGE_SIZE, 394c0c050c5SMichael Chan bp->pf.hwrm_cmd_req_addr[i], 395c0c050c5SMichael Chan bp->pf.hwrm_cmd_req_dma_addr[i]); 396c0c050c5SMichael Chan bp->pf.hwrm_cmd_req_addr[i] = NULL; 397c0c050c5SMichael Chan } 398c0c050c5SMichael Chan } 399c0c050c5SMichael Chan 400c8b1d743SDavide Caratti bp->pf.active_vfs = 0; 401c0c050c5SMichael Chan kfree(bp->pf.vf); 402c0c050c5SMichael Chan bp->pf.vf = NULL; 403c0c050c5SMichael Chan } 404c0c050c5SMichael Chan 405c0c050c5SMichael Chan static int bnxt_alloc_vf_resources(struct bnxt *bp, int num_vfs) 406c0c050c5SMichael Chan { 407c0c050c5SMichael Chan struct pci_dev *pdev = bp->pdev; 408c0c050c5SMichael Chan u32 nr_pages, size, i, j, k = 0; 409c0c050c5SMichael Chan 410c0c050c5SMichael Chan bp->pf.vf = kcalloc(num_vfs, sizeof(struct bnxt_vf_info), GFP_KERNEL); 411c0c050c5SMichael Chan if (!bp->pf.vf) 412c0c050c5SMichael Chan return -ENOMEM; 413c0c050c5SMichael Chan 414c0c050c5SMichael Chan bnxt_set_vf_attr(bp, num_vfs); 415c0c050c5SMichael Chan 416c0c050c5SMichael Chan size = num_vfs * BNXT_HWRM_REQ_MAX_SIZE; 417c0c050c5SMichael Chan nr_pages = size / BNXT_PAGE_SIZE; 418c0c050c5SMichael Chan if (size & (BNXT_PAGE_SIZE - 1)) 419c0c050c5SMichael Chan nr_pages++; 420c0c050c5SMichael Chan 421c0c050c5SMichael Chan for (i = 0; i < nr_pages; i++) { 422c0c050c5SMichael Chan bp->pf.hwrm_cmd_req_addr[i] = 423c0c050c5SMichael Chan dma_alloc_coherent(&pdev->dev, BNXT_PAGE_SIZE, 424c0c050c5SMichael Chan &bp->pf.hwrm_cmd_req_dma_addr[i], 425c0c050c5SMichael Chan GFP_KERNEL); 426c0c050c5SMichael Chan 427c0c050c5SMichael Chan if (!bp->pf.hwrm_cmd_req_addr[i]) 428c0c050c5SMichael Chan return -ENOMEM; 429c0c050c5SMichael Chan 430c0c050c5SMichael Chan for (j = 0; j < BNXT_HWRM_REQS_PER_PAGE && k < num_vfs; j++) { 431c0c050c5SMichael Chan struct bnxt_vf_info *vf = &bp->pf.vf[k]; 432c0c050c5SMichael Chan 433c0c050c5SMichael Chan vf->hwrm_cmd_req_addr = bp->pf.hwrm_cmd_req_addr[i] + 434c0c050c5SMichael Chan j * BNXT_HWRM_REQ_MAX_SIZE; 435c0c050c5SMichael Chan vf->hwrm_cmd_req_dma_addr = 436c0c050c5SMichael Chan bp->pf.hwrm_cmd_req_dma_addr[i] + j * 437c0c050c5SMichael Chan BNXT_HWRM_REQ_MAX_SIZE; 438c0c050c5SMichael Chan k++; 439c0c050c5SMichael Chan } 440c0c050c5SMichael Chan } 441c0c050c5SMichael Chan 442c0c050c5SMichael Chan /* Max 128 VF's */ 443c0c050c5SMichael Chan bp->pf.vf_event_bmap = kzalloc(16, GFP_KERNEL); 444c0c050c5SMichael Chan if (!bp->pf.vf_event_bmap) 445c0c050c5SMichael Chan return -ENOMEM; 446c0c050c5SMichael Chan 447c0c050c5SMichael Chan bp->pf.hwrm_cmd_req_pages = nr_pages; 448c0c050c5SMichael Chan return 0; 449c0c050c5SMichael Chan } 450c0c050c5SMichael Chan 451c0c050c5SMichael Chan static int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp) 452c0c050c5SMichael Chan { 453c0c050c5SMichael Chan struct hwrm_func_buf_rgtr_input req = {0}; 454c0c050c5SMichael Chan 455c0c050c5SMichael Chan bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BUF_RGTR, -1, -1); 456c0c050c5SMichael Chan 457c0c050c5SMichael Chan req.req_buf_num_pages = cpu_to_le16(bp->pf.hwrm_cmd_req_pages); 458c0c050c5SMichael Chan req.req_buf_page_size = cpu_to_le16(BNXT_PAGE_SHIFT); 459c0c050c5SMichael Chan req.req_buf_len = cpu_to_le16(BNXT_HWRM_REQ_MAX_SIZE); 460c0c050c5SMichael Chan req.req_buf_page_addr0 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[0]); 461c0c050c5SMichael Chan req.req_buf_page_addr1 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[1]); 462c0c050c5SMichael Chan req.req_buf_page_addr2 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[2]); 463c0c050c5SMichael Chan req.req_buf_page_addr3 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[3]); 464c0c050c5SMichael Chan 465c0c050c5SMichael Chan return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 466c0c050c5SMichael Chan } 467c0c050c5SMichael Chan 4682cd86968SVasundhara Volam /* Caller holds bp->hwrm_cmd_lock mutex lock */ 4692cd86968SVasundhara Volam static void __bnxt_set_vf_params(struct bnxt *bp, int vf_id) 4702cd86968SVasundhara Volam { 4712cd86968SVasundhara Volam struct hwrm_func_cfg_input req = {0}; 4722cd86968SVasundhara Volam struct bnxt_vf_info *vf; 4732cd86968SVasundhara Volam 4742cd86968SVasundhara Volam vf = &bp->pf.vf[vf_id]; 4752cd86968SVasundhara Volam bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 4762cd86968SVasundhara Volam req.fid = cpu_to_le16(vf->fw_fid); 4772cd86968SVasundhara Volam 4782cd86968SVasundhara Volam if (is_valid_ether_addr(vf->mac_addr)) { 4792cd86968SVasundhara Volam req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR); 4802cd86968SVasundhara Volam memcpy(req.dflt_mac_addr, vf->mac_addr, ETH_ALEN); 4812cd86968SVasundhara Volam } 4822cd86968SVasundhara Volam if (vf->vlan) { 4832cd86968SVasundhara Volam req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN); 4842cd86968SVasundhara Volam req.dflt_vlan = cpu_to_le16(vf->vlan); 4852cd86968SVasundhara Volam } 4862cd86968SVasundhara Volam if (vf->max_tx_rate) { 4872cd86968SVasundhara Volam req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW); 4882cd86968SVasundhara Volam req.max_bw = cpu_to_le32(vf->max_tx_rate); 4892cd86968SVasundhara Volam #ifdef HAVE_IFLA_TX_RATE 4902cd86968SVasundhara Volam req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW); 4912cd86968SVasundhara Volam req.min_bw = cpu_to_le32(vf->min_tx_rate); 4922cd86968SVasundhara Volam #endif 4932cd86968SVasundhara Volam } 4942cd86968SVasundhara Volam if (vf->flags & BNXT_VF_TRUST) 4952cd86968SVasundhara Volam req.flags |= cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE); 4962cd86968SVasundhara Volam 4972cd86968SVasundhara Volam _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4982cd86968SVasundhara Volam } 4992cd86968SVasundhara Volam 5004673d664SMichael Chan /* Only called by PF to reserve resources for VFs, returns actual number of 5014673d664SMichael Chan * VFs configured, or < 0 on error. 5024673d664SMichael Chan */ 5032cd86968SVasundhara Volam static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset) 5044673d664SMichael Chan { 5054673d664SMichael Chan struct hwrm_func_vf_resource_cfg_input req = {0}; 5064673d664SMichael Chan struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 5074673d664SMichael Chan u16 vf_tx_rings, vf_rx_rings, vf_cp_rings; 5084673d664SMichael Chan u16 vf_stat_ctx, vf_vnics, vf_ring_grps; 5094673d664SMichael Chan struct bnxt_pf_info *pf = &bp->pf; 510bf82736dSMichael Chan int i, rc = 0, min = 1; 511b16b6891SMichael Chan u16 vf_msix = 0; 5121acefc9aSMichael Chan u16 vf_rss; 5134673d664SMichael Chan 5144673d664SMichael Chan bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESOURCE_CFG, -1, -1); 5154673d664SMichael Chan 516b16b6891SMichael Chan if (bp->flags & BNXT_FLAG_CHIP_P5) { 517b16b6891SMichael Chan vf_msix = hw_resc->max_nqs - bnxt_nq_rings_in_use(bp); 518b16b6891SMichael Chan vf_ring_grps = 0; 519b16b6891SMichael Chan } else { 520b16b6891SMichael Chan vf_ring_grps = hw_resc->max_hw_ring_grps - bp->rx_nr_rings; 521b16b6891SMichael Chan } 522e916b081SMichael Chan vf_cp_rings = bnxt_get_avail_cp_rings_for_en(bp); 523c027c6b4SVasundhara Volam vf_stat_ctx = bnxt_get_avail_stat_ctxs_for_en(bp); 5244673d664SMichael Chan if (bp->flags & BNXT_FLAG_AGG_RINGS) 5254673d664SMichael Chan vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings * 2; 5264673d664SMichael Chan else 5274673d664SMichael Chan vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings; 5284673d664SMichael Chan vf_tx_rings = hw_resc->max_tx_rings - bp->tx_nr_rings; 5294673d664SMichael Chan vf_vnics = hw_resc->max_vnics - bp->nr_vnics; 5304673d664SMichael Chan vf_vnics = min_t(u16, vf_vnics, vf_rx_rings); 5311acefc9aSMichael Chan vf_rss = hw_resc->max_rsscos_ctxs - bp->rsscos_nr_ctxs; 5324673d664SMichael Chan 53386c3380dSMichael Chan req.min_rsscos_ctx = cpu_to_le16(BNXT_VF_MIN_RSS_CTX); 534bf82736dSMichael Chan if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) { 535bf82736dSMichael Chan min = 0; 536bf82736dSMichael Chan req.min_rsscos_ctx = cpu_to_le16(min); 537bf82736dSMichael Chan } 538bf82736dSMichael Chan if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL || 539bf82736dSMichael Chan pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) { 540bf82736dSMichael Chan req.min_cmpl_rings = cpu_to_le16(min); 541bf82736dSMichael Chan req.min_tx_rings = cpu_to_le16(min); 542bf82736dSMichael Chan req.min_rx_rings = cpu_to_le16(min); 543bf82736dSMichael Chan req.min_l2_ctxs = cpu_to_le16(min); 544bf82736dSMichael Chan req.min_vnics = cpu_to_le16(min); 545bf82736dSMichael Chan req.min_stat_ctx = cpu_to_le16(min); 546b16b6891SMichael Chan if (!(bp->flags & BNXT_FLAG_CHIP_P5)) 547bf82736dSMichael Chan req.min_hw_ring_grps = cpu_to_le16(min); 5484673d664SMichael Chan } else { 5494673d664SMichael Chan vf_cp_rings /= num_vfs; 5504673d664SMichael Chan vf_tx_rings /= num_vfs; 5514673d664SMichael Chan vf_rx_rings /= num_vfs; 5524673d664SMichael Chan vf_vnics /= num_vfs; 5534673d664SMichael Chan vf_stat_ctx /= num_vfs; 5544673d664SMichael Chan vf_ring_grps /= num_vfs; 5551acefc9aSMichael Chan vf_rss /= num_vfs; 5564673d664SMichael Chan 5574673d664SMichael Chan req.min_cmpl_rings = cpu_to_le16(vf_cp_rings); 5584673d664SMichael Chan req.min_tx_rings = cpu_to_le16(vf_tx_rings); 5594673d664SMichael Chan req.min_rx_rings = cpu_to_le16(vf_rx_rings); 56086c3380dSMichael Chan req.min_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX); 5614673d664SMichael Chan req.min_vnics = cpu_to_le16(vf_vnics); 5624673d664SMichael Chan req.min_stat_ctx = cpu_to_le16(vf_stat_ctx); 5634673d664SMichael Chan req.min_hw_ring_grps = cpu_to_le16(vf_ring_grps); 5641acefc9aSMichael Chan req.min_rsscos_ctx = cpu_to_le16(vf_rss); 5654673d664SMichael Chan } 5664673d664SMichael Chan req.max_cmpl_rings = cpu_to_le16(vf_cp_rings); 5674673d664SMichael Chan req.max_tx_rings = cpu_to_le16(vf_tx_rings); 5684673d664SMichael Chan req.max_rx_rings = cpu_to_le16(vf_rx_rings); 56986c3380dSMichael Chan req.max_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX); 5704673d664SMichael Chan req.max_vnics = cpu_to_le16(vf_vnics); 5714673d664SMichael Chan req.max_stat_ctx = cpu_to_le16(vf_stat_ctx); 5724673d664SMichael Chan req.max_hw_ring_grps = cpu_to_le16(vf_ring_grps); 5731acefc9aSMichael Chan req.max_rsscos_ctx = cpu_to_le16(vf_rss); 574b16b6891SMichael Chan if (bp->flags & BNXT_FLAG_CHIP_P5) 575b16b6891SMichael Chan req.max_msix = cpu_to_le16(vf_msix / num_vfs); 5764673d664SMichael Chan 5774673d664SMichael Chan mutex_lock(&bp->hwrm_cmd_lock); 5784673d664SMichael Chan for (i = 0; i < num_vfs; i++) { 5792cd86968SVasundhara Volam if (reset) 5802cd86968SVasundhara Volam __bnxt_set_vf_params(bp, i); 5812cd86968SVasundhara Volam 5824673d664SMichael Chan req.vf_id = cpu_to_le16(pf->first_vf_id + i); 5834673d664SMichael Chan rc = _hwrm_send_message(bp, &req, sizeof(req), 5844673d664SMichael Chan HWRM_CMD_TIMEOUT); 585d4f1420dSMichael Chan if (rc) 5864673d664SMichael Chan break; 5874673d664SMichael Chan pf->active_vfs = i + 1; 5884673d664SMichael Chan pf->vf[i].fw_fid = pf->first_vf_id + i; 5894673d664SMichael Chan } 5904673d664SMichael Chan mutex_unlock(&bp->hwrm_cmd_lock); 5914673d664SMichael Chan if (pf->active_vfs) { 592596f9d55SMichael Chan u16 n = pf->active_vfs; 5934673d664SMichael Chan 594596f9d55SMichael Chan hw_resc->max_tx_rings -= le16_to_cpu(req.min_tx_rings) * n; 595596f9d55SMichael Chan hw_resc->max_rx_rings -= le16_to_cpu(req.min_rx_rings) * n; 596596f9d55SMichael Chan hw_resc->max_hw_ring_grps -= le16_to_cpu(req.min_hw_ring_grps) * 597596f9d55SMichael Chan n; 598596f9d55SMichael Chan hw_resc->max_cp_rings -= le16_to_cpu(req.min_cmpl_rings) * n; 5991acefc9aSMichael Chan hw_resc->max_rsscos_ctxs -= le16_to_cpu(req.min_rsscos_ctx) * n; 600596f9d55SMichael Chan hw_resc->max_stat_ctxs -= le16_to_cpu(req.min_stat_ctx) * n; 601596f9d55SMichael Chan hw_resc->max_vnics -= le16_to_cpu(req.min_vnics) * n; 602b16b6891SMichael Chan if (bp->flags & BNXT_FLAG_CHIP_P5) 603b16b6891SMichael Chan hw_resc->max_irqs -= vf_msix * n; 6044673d664SMichael Chan 6054673d664SMichael Chan rc = pf->active_vfs; 6064673d664SMichael Chan } 6074673d664SMichael Chan return rc; 6084673d664SMichael Chan } 6094673d664SMichael Chan 6104673d664SMichael Chan /* Only called by PF to reserve resources for VFs, returns actual number of 6114673d664SMichael Chan * VFs configured, or < 0 on error. 6124673d664SMichael Chan */ 61392268c32SMichael Chan static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs) 614c0c050c5SMichael Chan { 615c0c050c5SMichael Chan u32 rc = 0, mtu, i; 616c0c050c5SMichael Chan u16 vf_tx_rings, vf_rx_rings, vf_cp_rings, vf_stat_ctx, vf_vnics; 6176a4f2947SMichael Chan struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 618c0c050c5SMichael Chan struct hwrm_func_cfg_input req = {0}; 619c0c050c5SMichael Chan struct bnxt_pf_info *pf = &bp->pf; 620391be5c2SMichael Chan int total_vf_tx_rings = 0; 621c027c6b4SVasundhara Volam u16 vf_ring_grps; 622c0c050c5SMichael Chan 623c0c050c5SMichael Chan bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 624c0c050c5SMichael Chan 625c0c050c5SMichael Chan /* Remaining rings are distributed equally amongs VF's for now */ 626e916b081SMichael Chan vf_cp_rings = bnxt_get_avail_cp_rings_for_en(bp) / num_vfs; 627c027c6b4SVasundhara Volam vf_stat_ctx = bnxt_get_avail_stat_ctxs_for_en(bp) / num_vfs; 628c0c050c5SMichael Chan if (bp->flags & BNXT_FLAG_AGG_RINGS) 6296a4f2947SMichael Chan vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings * 2) / 63092268c32SMichael Chan num_vfs; 631c0c050c5SMichael Chan else 6326a4f2947SMichael Chan vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings) / 6336a4f2947SMichael Chan num_vfs; 6346a4f2947SMichael Chan vf_ring_grps = (hw_resc->max_hw_ring_grps - bp->rx_nr_rings) / num_vfs; 6356a4f2947SMichael Chan vf_tx_rings = (hw_resc->max_tx_rings - bp->tx_nr_rings) / num_vfs; 6366a4f2947SMichael Chan vf_vnics = (hw_resc->max_vnics - bp->nr_vnics) / num_vfs; 6378427af81SMichael Chan vf_vnics = min_t(u16, vf_vnics, vf_rx_rings); 638c0c050c5SMichael Chan 639c0c050c5SMichael Chan req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MTU | 640c0c050c5SMichael Chan FUNC_CFG_REQ_ENABLES_MRU | 641c0c050c5SMichael Chan FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS | 642c0c050c5SMichael Chan FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS | 643c0c050c5SMichael Chan FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS | 644c0c050c5SMichael Chan FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS | 645c0c050c5SMichael Chan FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS | 646c0c050c5SMichael Chan FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS | 647b72d4a68SMichael Chan FUNC_CFG_REQ_ENABLES_NUM_VNICS | 648b72d4a68SMichael Chan FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS); 649c0c050c5SMichael Chan 650d0b82c54SVasundhara Volam mtu = bp->dev->mtu + ETH_HLEN + VLAN_HLEN; 651c0c050c5SMichael Chan req.mru = cpu_to_le16(mtu); 652c0c050c5SMichael Chan req.mtu = cpu_to_le16(mtu); 653c0c050c5SMichael Chan 654c0c050c5SMichael Chan req.num_rsscos_ctxs = cpu_to_le16(1); 655c0c050c5SMichael Chan req.num_cmpl_rings = cpu_to_le16(vf_cp_rings); 656c0c050c5SMichael Chan req.num_tx_rings = cpu_to_le16(vf_tx_rings); 657c0c050c5SMichael Chan req.num_rx_rings = cpu_to_le16(vf_rx_rings); 658b72d4a68SMichael Chan req.num_hw_ring_grps = cpu_to_le16(vf_ring_grps); 659c0c050c5SMichael Chan req.num_l2_ctxs = cpu_to_le16(4); 660c0c050c5SMichael Chan 661c0c050c5SMichael Chan req.num_vnics = cpu_to_le16(vf_vnics); 662c0c050c5SMichael Chan /* FIXME spec currently uses 1 bit for stats ctx */ 663c0c050c5SMichael Chan req.num_stat_ctxs = cpu_to_le16(vf_stat_ctx); 664c0c050c5SMichael Chan 665c0c050c5SMichael Chan mutex_lock(&bp->hwrm_cmd_lock); 66692268c32SMichael Chan for (i = 0; i < num_vfs; i++) { 667391be5c2SMichael Chan int vf_tx_rsvd = vf_tx_rings; 668391be5c2SMichael Chan 669c193554eSMichael Chan req.fid = cpu_to_le16(pf->first_vf_id + i); 670c0c050c5SMichael Chan rc = _hwrm_send_message(bp, &req, sizeof(req), 671c0c050c5SMichael Chan HWRM_CMD_TIMEOUT); 672c0c050c5SMichael Chan if (rc) 673c0c050c5SMichael Chan break; 67492268c32SMichael Chan pf->active_vfs = i + 1; 675c193554eSMichael Chan pf->vf[i].fw_fid = le16_to_cpu(req.fid); 676391be5c2SMichael Chan rc = __bnxt_hwrm_get_tx_rings(bp, pf->vf[i].fw_fid, 677391be5c2SMichael Chan &vf_tx_rsvd); 678391be5c2SMichael Chan if (rc) 679391be5c2SMichael Chan break; 680391be5c2SMichael Chan total_vf_tx_rings += vf_tx_rsvd; 681c0c050c5SMichael Chan } 682c0c050c5SMichael Chan mutex_unlock(&bp->hwrm_cmd_lock); 6834673d664SMichael Chan if (pf->active_vfs) { 6846a4f2947SMichael Chan hw_resc->max_tx_rings -= total_vf_tx_rings; 6856a4f2947SMichael Chan hw_resc->max_rx_rings -= vf_rx_rings * num_vfs; 6866a4f2947SMichael Chan hw_resc->max_hw_ring_grps -= vf_ring_grps * num_vfs; 6876a4f2947SMichael Chan hw_resc->max_cp_rings -= vf_cp_rings * num_vfs; 6886a4f2947SMichael Chan hw_resc->max_rsscos_ctxs -= num_vfs; 6896a4f2947SMichael Chan hw_resc->max_stat_ctxs -= vf_stat_ctx * num_vfs; 6906a4f2947SMichael Chan hw_resc->max_vnics -= vf_vnics * num_vfs; 6914673d664SMichael Chan rc = pf->active_vfs; 692c0c050c5SMichael Chan } 693c0c050c5SMichael Chan return rc; 694c0c050c5SMichael Chan } 695c0c050c5SMichael Chan 6962cd86968SVasundhara Volam static int bnxt_func_cfg(struct bnxt *bp, int num_vfs, bool reset) 6974673d664SMichael Chan { 698f1ca94deSMichael Chan if (BNXT_NEW_RM(bp)) 6992cd86968SVasundhara Volam return bnxt_hwrm_func_vf_resc_cfg(bp, num_vfs, reset); 7004673d664SMichael Chan else 7014673d664SMichael Chan return bnxt_hwrm_func_cfg(bp, num_vfs); 7024673d664SMichael Chan } 7034673d664SMichael Chan 7042cd86968SVasundhara Volam int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset) 705702d5011SMichael Chan { 706702d5011SMichael Chan int rc; 707702d5011SMichael Chan 70891b9be48SVasundhara Volam /* Register buffers for VFs */ 70991b9be48SVasundhara Volam rc = bnxt_hwrm_func_buf_rgtr(bp); 71091b9be48SVasundhara Volam if (rc) 71191b9be48SVasundhara Volam return rc; 71291b9be48SVasundhara Volam 713702d5011SMichael Chan /* Reserve resources for VFs */ 7142cd86968SVasundhara Volam rc = bnxt_func_cfg(bp, *num_vfs, reset); 715702d5011SMichael Chan if (rc != *num_vfs) { 716702d5011SMichael Chan if (rc <= 0) { 717702d5011SMichael Chan netdev_warn(bp->dev, "Unable to reserve resources for SRIOV.\n"); 718702d5011SMichael Chan *num_vfs = 0; 719702d5011SMichael Chan return rc; 720702d5011SMichael Chan } 721702d5011SMichael Chan netdev_warn(bp->dev, "Only able to reserve resources for %d VFs.\n", 722702d5011SMichael Chan rc); 723702d5011SMichael Chan *num_vfs = rc; 724702d5011SMichael Chan } 725702d5011SMichael Chan 726702d5011SMichael Chan bnxt_ulp_sriov_cfg(bp, *num_vfs); 727702d5011SMichael Chan return 0; 728702d5011SMichael Chan } 729702d5011SMichael Chan 730c0c050c5SMichael Chan static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs) 731c0c050c5SMichael Chan { 732c0c050c5SMichael Chan int rc = 0, vfs_supported; 733c0c050c5SMichael Chan int min_rx_rings, min_tx_rings, min_rss_ctxs; 7346a4f2947SMichael Chan struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 735c0c050c5SMichael Chan int tx_ok = 0, rx_ok = 0, rss_ok = 0; 73602157079SMichael Chan int avail_cp, avail_stat; 737c0c050c5SMichael Chan 738c0c050c5SMichael Chan /* Check if we can enable requested num of vf's. At a mininum 739c0c050c5SMichael Chan * we require 1 RX 1 TX rings for each VF. In this minimum conf 740c0c050c5SMichael Chan * features like TPA will not be available. 741c0c050c5SMichael Chan */ 742c0c050c5SMichael Chan vfs_supported = *num_vfs; 743c0c050c5SMichael Chan 744e916b081SMichael Chan avail_cp = bnxt_get_avail_cp_rings_for_en(bp); 745c027c6b4SVasundhara Volam avail_stat = bnxt_get_avail_stat_ctxs_for_en(bp); 74602157079SMichael Chan avail_cp = min_t(int, avail_cp, avail_stat); 74702157079SMichael Chan 748c0c050c5SMichael Chan while (vfs_supported) { 749c0c050c5SMichael Chan min_rx_rings = vfs_supported; 750c0c050c5SMichael Chan min_tx_rings = vfs_supported; 751c0c050c5SMichael Chan min_rss_ctxs = vfs_supported; 752c0c050c5SMichael Chan 753c0c050c5SMichael Chan if (bp->flags & BNXT_FLAG_AGG_RINGS) { 7546a4f2947SMichael Chan if (hw_resc->max_rx_rings - bp->rx_nr_rings * 2 >= 755c0c050c5SMichael Chan min_rx_rings) 756c0c050c5SMichael Chan rx_ok = 1; 757c0c050c5SMichael Chan } else { 7586a4f2947SMichael Chan if (hw_resc->max_rx_rings - bp->rx_nr_rings >= 759c0c050c5SMichael Chan min_rx_rings) 760c0c050c5SMichael Chan rx_ok = 1; 761c0c050c5SMichael Chan } 7626a4f2947SMichael Chan if (hw_resc->max_vnics - bp->nr_vnics < min_rx_rings || 76302157079SMichael Chan avail_cp < min_rx_rings) 7648427af81SMichael Chan rx_ok = 0; 765c0c050c5SMichael Chan 7666a4f2947SMichael Chan if (hw_resc->max_tx_rings - bp->tx_nr_rings >= min_tx_rings && 76702157079SMichael Chan avail_cp >= min_tx_rings) 768c0c050c5SMichael Chan tx_ok = 1; 769c0c050c5SMichael Chan 7706a4f2947SMichael Chan if (hw_resc->max_rsscos_ctxs - bp->rsscos_nr_ctxs >= 7716a4f2947SMichael Chan min_rss_ctxs) 772c0c050c5SMichael Chan rss_ok = 1; 773c0c050c5SMichael Chan 774c0c050c5SMichael Chan if (tx_ok && rx_ok && rss_ok) 775c0c050c5SMichael Chan break; 776c0c050c5SMichael Chan 777c0c050c5SMichael Chan vfs_supported--; 778c0c050c5SMichael Chan } 779c0c050c5SMichael Chan 780c0c050c5SMichael Chan if (!vfs_supported) { 781c0c050c5SMichael Chan netdev_err(bp->dev, "Cannot enable VF's as all resources are used by PF\n"); 782c0c050c5SMichael Chan return -EINVAL; 783c0c050c5SMichael Chan } 784c0c050c5SMichael Chan 785c0c050c5SMichael Chan if (vfs_supported != *num_vfs) { 786c0c050c5SMichael Chan netdev_info(bp->dev, "Requested VFs %d, can enable %d\n", 787c0c050c5SMichael Chan *num_vfs, vfs_supported); 788c0c050c5SMichael Chan *num_vfs = vfs_supported; 789c0c050c5SMichael Chan } 790c0c050c5SMichael Chan 791c0c050c5SMichael Chan rc = bnxt_alloc_vf_resources(bp, *num_vfs); 792c0c050c5SMichael Chan if (rc) 793c0c050c5SMichael Chan goto err_out1; 794c0c050c5SMichael Chan 7952cd86968SVasundhara Volam rc = bnxt_cfg_hw_sriov(bp, num_vfs, false); 796c0c050c5SMichael Chan if (rc) 797c0c050c5SMichael Chan goto err_out2; 798c0c050c5SMichael Chan 799c0c050c5SMichael Chan rc = pci_enable_sriov(bp->pdev, *num_vfs); 800c0c050c5SMichael Chan if (rc) 801c0c050c5SMichael Chan goto err_out2; 802c0c050c5SMichael Chan 803c0c050c5SMichael Chan return 0; 804c0c050c5SMichael Chan 805c0c050c5SMichael Chan err_out2: 806c0c050c5SMichael Chan /* Free the resources reserved for various VF's */ 8074bb6cdceSJeffrey Huang bnxt_hwrm_func_vf_resource_free(bp, *num_vfs); 808c0c050c5SMichael Chan 809c0c050c5SMichael Chan err_out1: 810c0c050c5SMichael Chan bnxt_free_vf_resources(bp); 811c0c050c5SMichael Chan 812c0c050c5SMichael Chan return rc; 813c0c050c5SMichael Chan } 814c0c050c5SMichael Chan 815c0c050c5SMichael Chan void bnxt_sriov_disable(struct bnxt *bp) 816c0c050c5SMichael Chan { 8174bb6cdceSJeffrey Huang u16 num_vfs = pci_num_vf(bp->pdev); 8184bb6cdceSJeffrey Huang 8194bb6cdceSJeffrey Huang if (!num_vfs) 820c0c050c5SMichael Chan return; 821c0c050c5SMichael Chan 8224ab0c6a8SSathya Perla /* synchronize VF and VF-rep create and destroy */ 8234ab0c6a8SSathya Perla mutex_lock(&bp->sriov_lock); 8244ab0c6a8SSathya Perla bnxt_vf_reps_destroy(bp); 8254ab0c6a8SSathya Perla 8264bb6cdceSJeffrey Huang if (pci_vfs_assigned(bp->pdev)) { 82719241368SJeffrey Huang bnxt_hwrm_fwd_async_event_cmpl( 82887c374deSMichael Chan bp, NULL, ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD); 8294bb6cdceSJeffrey Huang netdev_warn(bp->dev, "Unable to free %d VFs because some are assigned to VMs.\n", 8304bb6cdceSJeffrey Huang num_vfs); 8314bb6cdceSJeffrey Huang } else { 832c0c050c5SMichael Chan pci_disable_sriov(bp->pdev); 8334bb6cdceSJeffrey Huang /* Free the HW resources reserved for various VF's */ 8344bb6cdceSJeffrey Huang bnxt_hwrm_func_vf_resource_free(bp, num_vfs); 8354bb6cdceSJeffrey Huang } 8364ab0c6a8SSathya Perla mutex_unlock(&bp->sriov_lock); 837c0c050c5SMichael Chan 838c0c050c5SMichael Chan bnxt_free_vf_resources(bp); 839c0c050c5SMichael Chan 8404a21b49bSMichael Chan /* Reclaim all resources for the PF. */ 8417b08f661SMichael Chan rtnl_lock(); 8427b08f661SMichael Chan bnxt_restore_pf_fw_resources(bp); 8437b08f661SMichael Chan rtnl_unlock(); 8442f593846SMichael Chan 8452f593846SMichael Chan bnxt_ulp_sriov_cfg(bp, 0); 846c0c050c5SMichael Chan } 847c0c050c5SMichael Chan 848c0c050c5SMichael Chan int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs) 849c0c050c5SMichael Chan { 850c0c050c5SMichael Chan struct net_device *dev = pci_get_drvdata(pdev); 851c0c050c5SMichael Chan struct bnxt *bp = netdev_priv(dev); 852c0c050c5SMichael Chan 853c0c050c5SMichael Chan if (!(bp->flags & BNXT_FLAG_USING_MSIX)) { 854c0c050c5SMichael Chan netdev_warn(dev, "Not allow SRIOV if the irq mode is not MSIX\n"); 855c0c050c5SMichael Chan return 0; 856c0c050c5SMichael Chan } 857c0c050c5SMichael Chan 858c0c050c5SMichael Chan rtnl_lock(); 859c0c050c5SMichael Chan if (!netif_running(dev)) { 860c0c050c5SMichael Chan netdev_warn(dev, "Reject SRIOV config request since if is down!\n"); 861c0c050c5SMichael Chan rtnl_unlock(); 862c0c050c5SMichael Chan return 0; 863c0c050c5SMichael Chan } 8643bc7d4a3SMichael Chan if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { 8653bc7d4a3SMichael Chan netdev_warn(dev, "Reject SRIOV config request when FW reset is in progress\n"); 8663bc7d4a3SMichael Chan rtnl_unlock(); 8673bc7d4a3SMichael Chan return 0; 8683bc7d4a3SMichael Chan } 869c0c050c5SMichael Chan bp->sriov_cfg = true; 870c0c050c5SMichael Chan rtnl_unlock(); 8714bb6cdceSJeffrey Huang 8724bb6cdceSJeffrey Huang if (pci_vfs_assigned(bp->pdev)) { 8734bb6cdceSJeffrey Huang netdev_warn(dev, "Unable to configure SRIOV since some VFs are assigned to VMs.\n"); 8744bb6cdceSJeffrey Huang num_vfs = 0; 8754bb6cdceSJeffrey Huang goto sriov_cfg_exit; 876c0c050c5SMichael Chan } 877c0c050c5SMichael Chan 878c0c050c5SMichael Chan /* Check if enabled VFs is same as requested */ 8794bb6cdceSJeffrey Huang if (num_vfs && num_vfs == bp->pf.active_vfs) 8804bb6cdceSJeffrey Huang goto sriov_cfg_exit; 8814bb6cdceSJeffrey Huang 8824bb6cdceSJeffrey Huang /* if there are previous existing VFs, clean them up */ 8834bb6cdceSJeffrey Huang bnxt_sriov_disable(bp); 8844bb6cdceSJeffrey Huang if (!num_vfs) 8854bb6cdceSJeffrey Huang goto sriov_cfg_exit; 886c0c050c5SMichael Chan 887c0c050c5SMichael Chan bnxt_sriov_enable(bp, &num_vfs); 888c0c050c5SMichael Chan 8894bb6cdceSJeffrey Huang sriov_cfg_exit: 890c0c050c5SMichael Chan bp->sriov_cfg = false; 891c0c050c5SMichael Chan wake_up(&bp->sriov_cfg_wait); 892c0c050c5SMichael Chan 893c0c050c5SMichael Chan return num_vfs; 894c0c050c5SMichael Chan } 895c0c050c5SMichael Chan 896c0c050c5SMichael Chan static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf, 897c0c050c5SMichael Chan void *encap_resp, __le64 encap_resp_addr, 898c0c050c5SMichael Chan __le16 encap_resp_cpr, u32 msg_size) 899c0c050c5SMichael Chan { 900c0c050c5SMichael Chan int rc = 0; 901c0c050c5SMichael Chan struct hwrm_fwd_resp_input req = {0}; 902c0c050c5SMichael Chan 90359895f59SMichael Chan if (BNXT_FWD_RESP_SIZE_ERR(msg_size)) 90459895f59SMichael Chan return -EINVAL; 90559895f59SMichael Chan 906c0c050c5SMichael Chan bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_RESP, -1, -1); 907c0c050c5SMichael Chan 908c0c050c5SMichael Chan /* Set the new target id */ 909c0c050c5SMichael Chan req.target_id = cpu_to_le16(vf->fw_fid); 910c193554eSMichael Chan req.encap_resp_target_id = cpu_to_le16(vf->fw_fid); 911c0c050c5SMichael Chan req.encap_resp_len = cpu_to_le16(msg_size); 912c0c050c5SMichael Chan req.encap_resp_addr = encap_resp_addr; 913c0c050c5SMichael Chan req.encap_resp_cmpl_ring = encap_resp_cpr; 914c0c050c5SMichael Chan memcpy(req.encap_resp, encap_resp, msg_size); 915c0c050c5SMichael Chan 916a798302dSMichael Chan rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 917a798302dSMichael Chan if (rc) 918c0c050c5SMichael Chan netdev_err(bp->dev, "hwrm_fwd_resp failed. rc:%d\n", rc); 919c0c050c5SMichael Chan return rc; 920c0c050c5SMichael Chan } 921c0c050c5SMichael Chan 922c0c050c5SMichael Chan static int bnxt_hwrm_fwd_err_resp(struct bnxt *bp, struct bnxt_vf_info *vf, 923c0c050c5SMichael Chan u32 msg_size) 924c0c050c5SMichael Chan { 925c0c050c5SMichael Chan int rc = 0; 926c0c050c5SMichael Chan struct hwrm_reject_fwd_resp_input req = {0}; 927c0c050c5SMichael Chan 92859895f59SMichael Chan if (BNXT_REJ_FWD_RESP_SIZE_ERR(msg_size)) 92959895f59SMichael Chan return -EINVAL; 93059895f59SMichael Chan 931c0c050c5SMichael Chan bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_REJECT_FWD_RESP, -1, -1); 932c0c050c5SMichael Chan /* Set the new target id */ 933c0c050c5SMichael Chan req.target_id = cpu_to_le16(vf->fw_fid); 934c193554eSMichael Chan req.encap_resp_target_id = cpu_to_le16(vf->fw_fid); 935c0c050c5SMichael Chan memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size); 936c0c050c5SMichael Chan 937a798302dSMichael Chan rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 938a798302dSMichael Chan if (rc) 939c0c050c5SMichael Chan netdev_err(bp->dev, "hwrm_fwd_err_resp failed. rc:%d\n", rc); 940c0c050c5SMichael Chan return rc; 941c0c050c5SMichael Chan } 942c0c050c5SMichael Chan 943c0c050c5SMichael Chan static int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf, 944c0c050c5SMichael Chan u32 msg_size) 945c0c050c5SMichael Chan { 946c0c050c5SMichael Chan int rc = 0; 947c0c050c5SMichael Chan struct hwrm_exec_fwd_resp_input req = {0}; 948c0c050c5SMichael Chan 94959895f59SMichael Chan if (BNXT_EXEC_FWD_RESP_SIZE_ERR(msg_size)) 95059895f59SMichael Chan return -EINVAL; 95159895f59SMichael Chan 952c0c050c5SMichael Chan bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_EXEC_FWD_RESP, -1, -1); 953c0c050c5SMichael Chan /* Set the new target id */ 954c0c050c5SMichael Chan req.target_id = cpu_to_le16(vf->fw_fid); 955c193554eSMichael Chan req.encap_resp_target_id = cpu_to_le16(vf->fw_fid); 956c0c050c5SMichael Chan memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size); 957c0c050c5SMichael Chan 958a798302dSMichael Chan rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 959a798302dSMichael Chan if (rc) 960c0c050c5SMichael Chan netdev_err(bp->dev, "hwrm_exec_fw_resp failed. rc:%d\n", rc); 961c0c050c5SMichael Chan return rc; 962c0c050c5SMichael Chan } 963c0c050c5SMichael Chan 964746df139SVasundhara Volam static int bnxt_vf_configure_mac(struct bnxt *bp, struct bnxt_vf_info *vf) 96591cdda40SVasundhara Volam { 96691cdda40SVasundhara Volam u32 msg_size = sizeof(struct hwrm_func_vf_cfg_input); 96791cdda40SVasundhara Volam struct hwrm_func_vf_cfg_input *req = 96891cdda40SVasundhara Volam (struct hwrm_func_vf_cfg_input *)vf->hwrm_cmd_req_addr; 96991cdda40SVasundhara Volam 970746df139SVasundhara Volam /* Allow VF to set a valid MAC address, if trust is set to on or 971746df139SVasundhara Volam * if the PF assigned MAC address is zero 97291cdda40SVasundhara Volam */ 97391cdda40SVasundhara Volam if (req->enables & cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR)) { 9742a516444SMichael Chan bool trust = bnxt_is_trusted_vf(bp, vf); 9752a516444SMichael Chan 97691cdda40SVasundhara Volam if (is_valid_ether_addr(req->dflt_mac_addr) && 9772a516444SMichael Chan (trust || !is_valid_ether_addr(vf->mac_addr) || 978707e7e96SMichael Chan ether_addr_equal(req->dflt_mac_addr, vf->mac_addr))) { 97991cdda40SVasundhara Volam ether_addr_copy(vf->vf_mac_addr, req->dflt_mac_addr); 98091cdda40SVasundhara Volam return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size); 98191cdda40SVasundhara Volam } 98291cdda40SVasundhara Volam return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size); 98391cdda40SVasundhara Volam } 98491cdda40SVasundhara Volam return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size); 98591cdda40SVasundhara Volam } 98691cdda40SVasundhara Volam 987c0c050c5SMichael Chan static int bnxt_vf_validate_set_mac(struct bnxt *bp, struct bnxt_vf_info *vf) 988c0c050c5SMichael Chan { 989c0c050c5SMichael Chan u32 msg_size = sizeof(struct hwrm_cfa_l2_filter_alloc_input); 990c0c050c5SMichael Chan struct hwrm_cfa_l2_filter_alloc_input *req = 991c0c050c5SMichael Chan (struct hwrm_cfa_l2_filter_alloc_input *)vf->hwrm_cmd_req_addr; 99291cdda40SVasundhara Volam bool mac_ok = false; 993c0c050c5SMichael Chan 994746df139SVasundhara Volam if (!is_valid_ether_addr((const u8 *)req->l2_addr)) 995746df139SVasundhara Volam return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size); 996746df139SVasundhara Volam 997746df139SVasundhara Volam /* Allow VF to set a valid MAC address, if trust is set to on. 998746df139SVasundhara Volam * Or VF MAC address must first match MAC address in PF's context. 99991cdda40SVasundhara Volam * Otherwise, it must match the VF MAC address if firmware spec >= 100091cdda40SVasundhara Volam * 1.2.2 100191cdda40SVasundhara Volam */ 10022a516444SMichael Chan if (bnxt_is_trusted_vf(bp, vf)) { 1003746df139SVasundhara Volam mac_ok = true; 1004746df139SVasundhara Volam } else if (is_valid_ether_addr(vf->mac_addr)) { 100591cdda40SVasundhara Volam if (ether_addr_equal((const u8 *)req->l2_addr, vf->mac_addr)) 100691cdda40SVasundhara Volam mac_ok = true; 100791cdda40SVasundhara Volam } else if (is_valid_ether_addr(vf->vf_mac_addr)) { 100891cdda40SVasundhara Volam if (ether_addr_equal((const u8 *)req->l2_addr, vf->vf_mac_addr)) 100991cdda40SVasundhara Volam mac_ok = true; 101091cdda40SVasundhara Volam } else { 10116fd544c8SYueHaibing /* There are two cases: 10126fd544c8SYueHaibing * 1.If firmware spec < 0x10202,VF MAC address is not forwarded 10136fd544c8SYueHaibing * to the PF and so it doesn't have to match 10146fd544c8SYueHaibing * 2.Allow VF to modify it's own MAC when PF has not assigned a 10156fd544c8SYueHaibing * valid MAC address and firmware spec >= 0x10202 10166fd544c8SYueHaibing */ 101791cdda40SVasundhara Volam mac_ok = true; 101891cdda40SVasundhara Volam } 101991cdda40SVasundhara Volam if (mac_ok) 1020c0c050c5SMichael Chan return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size); 1021c0c050c5SMichael Chan return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size); 1022c0c050c5SMichael Chan } 1023c0c050c5SMichael Chan 1024c0c050c5SMichael Chan static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf) 1025c0c050c5SMichael Chan { 1026c0c050c5SMichael Chan int rc = 0; 1027c0c050c5SMichael Chan 1028c0c050c5SMichael Chan if (!(vf->flags & BNXT_VF_LINK_FORCED)) { 1029c0c050c5SMichael Chan /* real link */ 1030c0c050c5SMichael Chan rc = bnxt_hwrm_exec_fwd_resp( 1031c0c050c5SMichael Chan bp, vf, sizeof(struct hwrm_port_phy_qcfg_input)); 1032c0c050c5SMichael Chan } else { 10339d6b648cSMichael Chan struct hwrm_port_phy_qcfg_output phy_qcfg_resp = {0}; 1034c0c050c5SMichael Chan struct hwrm_port_phy_qcfg_input *phy_qcfg_req; 1035c0c050c5SMichael Chan 1036c0c050c5SMichael Chan phy_qcfg_req = 1037c0c050c5SMichael Chan (struct hwrm_port_phy_qcfg_input *)vf->hwrm_cmd_req_addr; 1038c0c050c5SMichael Chan mutex_lock(&bp->hwrm_cmd_lock); 1039c0c050c5SMichael Chan memcpy(&phy_qcfg_resp, &bp->link_info.phy_qcfg_resp, 1040c0c050c5SMichael Chan sizeof(phy_qcfg_resp)); 1041c0c050c5SMichael Chan mutex_unlock(&bp->hwrm_cmd_lock); 1042845adfe4SMichael Chan phy_qcfg_resp.resp_len = cpu_to_le16(sizeof(phy_qcfg_resp)); 1043c0c050c5SMichael Chan phy_qcfg_resp.seq_id = phy_qcfg_req->seq_id; 1044845adfe4SMichael Chan phy_qcfg_resp.valid = 1; 1045c0c050c5SMichael Chan 1046c0c050c5SMichael Chan if (vf->flags & BNXT_VF_LINK_UP) { 1047c0c050c5SMichael Chan /* if physical link is down, force link up on VF */ 104873b9bad6SMichael Chan if (phy_qcfg_resp.link != 104973b9bad6SMichael Chan PORT_PHY_QCFG_RESP_LINK_LINK) { 1050c0c050c5SMichael Chan phy_qcfg_resp.link = 1051c0c050c5SMichael Chan PORT_PHY_QCFG_RESP_LINK_LINK; 105211f15ed3SMichael Chan phy_qcfg_resp.link_speed = cpu_to_le16( 105311f15ed3SMichael Chan PORT_PHY_QCFG_RESP_LINK_SPEED_10GB); 1054acb20054SMichael Chan phy_qcfg_resp.duplex_cfg = 1055acb20054SMichael Chan PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL; 1056acb20054SMichael Chan phy_qcfg_resp.duplex_state = 1057acb20054SMichael Chan PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL; 1058c0c050c5SMichael Chan phy_qcfg_resp.pause = 1059c0c050c5SMichael Chan (PORT_PHY_QCFG_RESP_PAUSE_TX | 1060c0c050c5SMichael Chan PORT_PHY_QCFG_RESP_PAUSE_RX); 1061c0c050c5SMichael Chan } 1062c0c050c5SMichael Chan } else { 1063c0c050c5SMichael Chan /* force link down */ 1064c0c050c5SMichael Chan phy_qcfg_resp.link = PORT_PHY_QCFG_RESP_LINK_NO_LINK; 1065c0c050c5SMichael Chan phy_qcfg_resp.link_speed = 0; 1066acb20054SMichael Chan phy_qcfg_resp.duplex_state = 1067acb20054SMichael Chan PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF; 1068c0c050c5SMichael Chan phy_qcfg_resp.pause = 0; 1069c0c050c5SMichael Chan } 1070c0c050c5SMichael Chan rc = bnxt_hwrm_fwd_resp(bp, vf, &phy_qcfg_resp, 1071c0c050c5SMichael Chan phy_qcfg_req->resp_addr, 1072c0c050c5SMichael Chan phy_qcfg_req->cmpl_ring, 1073c0c050c5SMichael Chan sizeof(phy_qcfg_resp)); 1074c0c050c5SMichael Chan } 1075c0c050c5SMichael Chan return rc; 1076c0c050c5SMichael Chan } 1077c0c050c5SMichael Chan 1078c0c050c5SMichael Chan static int bnxt_vf_req_validate_snd(struct bnxt *bp, struct bnxt_vf_info *vf) 1079c0c050c5SMichael Chan { 1080c0c050c5SMichael Chan int rc = 0; 1081a8643e16SMichael Chan struct input *encap_req = vf->hwrm_cmd_req_addr; 1082a8643e16SMichael Chan u32 req_type = le16_to_cpu(encap_req->req_type); 1083c0c050c5SMichael Chan 1084c0c050c5SMichael Chan switch (req_type) { 108591cdda40SVasundhara Volam case HWRM_FUNC_VF_CFG: 1086746df139SVasundhara Volam rc = bnxt_vf_configure_mac(bp, vf); 108791cdda40SVasundhara Volam break; 1088c0c050c5SMichael Chan case HWRM_CFA_L2_FILTER_ALLOC: 1089c0c050c5SMichael Chan rc = bnxt_vf_validate_set_mac(bp, vf); 1090c0c050c5SMichael Chan break; 1091c0c050c5SMichael Chan case HWRM_FUNC_CFG: 1092c0c050c5SMichael Chan /* TODO Validate if VF is allowed to change mac address, 1093c0c050c5SMichael Chan * mtu, num of rings etc 1094c0c050c5SMichael Chan */ 1095c0c050c5SMichael Chan rc = bnxt_hwrm_exec_fwd_resp( 1096c0c050c5SMichael Chan bp, vf, sizeof(struct hwrm_func_cfg_input)); 1097c0c050c5SMichael Chan break; 1098c0c050c5SMichael Chan case HWRM_PORT_PHY_QCFG: 1099c0c050c5SMichael Chan rc = bnxt_vf_set_link(bp, vf); 1100c0c050c5SMichael Chan break; 1101c0c050c5SMichael Chan default: 1102c0c050c5SMichael Chan break; 1103c0c050c5SMichael Chan } 1104c0c050c5SMichael Chan return rc; 1105c0c050c5SMichael Chan } 1106c0c050c5SMichael Chan 1107c0c050c5SMichael Chan void bnxt_hwrm_exec_fwd_req(struct bnxt *bp) 1108c0c050c5SMichael Chan { 1109c0c050c5SMichael Chan u32 i = 0, active_vfs = bp->pf.active_vfs, vf_id; 1110c0c050c5SMichael Chan 1111c0c050c5SMichael Chan /* Scan through VF's and process commands */ 1112c0c050c5SMichael Chan while (1) { 1113c0c050c5SMichael Chan vf_id = find_next_bit(bp->pf.vf_event_bmap, active_vfs, i); 1114c0c050c5SMichael Chan if (vf_id >= active_vfs) 1115c0c050c5SMichael Chan break; 1116c0c050c5SMichael Chan 1117c0c050c5SMichael Chan clear_bit(vf_id, bp->pf.vf_event_bmap); 1118c0c050c5SMichael Chan bnxt_vf_req_validate_snd(bp, &bp->pf.vf[vf_id]); 1119c0c050c5SMichael Chan i = vf_id + 1; 1120c0c050c5SMichael Chan } 1121c0c050c5SMichael Chan } 1122379a80a1SMichael Chan 1123379a80a1SMichael Chan void bnxt_update_vf_mac(struct bnxt *bp) 1124379a80a1SMichael Chan { 1125379a80a1SMichael Chan struct hwrm_func_qcaps_input req = {0}; 1126379a80a1SMichael Chan struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr; 1127379a80a1SMichael Chan 1128379a80a1SMichael Chan bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1); 1129379a80a1SMichael Chan req.fid = cpu_to_le16(0xffff); 1130379a80a1SMichael Chan 1131379a80a1SMichael Chan mutex_lock(&bp->hwrm_cmd_lock); 1132379a80a1SMichael Chan if (_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT)) 1133379a80a1SMichael Chan goto update_vf_mac_exit; 1134379a80a1SMichael Chan 11353874d6a8SJeffrey Huang /* Store MAC address from the firmware. There are 2 cases: 11363874d6a8SJeffrey Huang * 1. MAC address is valid. It is assigned from the PF and we 11373874d6a8SJeffrey Huang * need to override the current VF MAC address with it. 11383874d6a8SJeffrey Huang * 2. MAC address is zero. The VF will use a random MAC address by 11393874d6a8SJeffrey Huang * default but the stored zero MAC will allow the VF user to change 11403874d6a8SJeffrey Huang * the random MAC address using ndo_set_mac_address() if he wants. 11413874d6a8SJeffrey Huang */ 114211f15ed3SMichael Chan if (!ether_addr_equal(resp->mac_address, bp->vf.mac_addr)) 114311f15ed3SMichael Chan memcpy(bp->vf.mac_addr, resp->mac_address, ETH_ALEN); 11443874d6a8SJeffrey Huang 11453874d6a8SJeffrey Huang /* overwrite netdev dev_addr with admin VF MAC */ 11463874d6a8SJeffrey Huang if (is_valid_ether_addr(bp->vf.mac_addr)) 1147379a80a1SMichael Chan memcpy(bp->dev->dev_addr, bp->vf.mac_addr, ETH_ALEN); 1148379a80a1SMichael Chan update_vf_mac_exit: 1149379a80a1SMichael Chan mutex_unlock(&bp->hwrm_cmd_lock); 1150379a80a1SMichael Chan } 1151379a80a1SMichael Chan 115228ea334bSMichael Chan int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict) 115384c33dd3SMichael Chan { 115484c33dd3SMichael Chan struct hwrm_func_vf_cfg_input req = {0}; 115584c33dd3SMichael Chan int rc = 0; 115684c33dd3SMichael Chan 115784c33dd3SMichael Chan if (!BNXT_VF(bp)) 115884c33dd3SMichael Chan return 0; 115984c33dd3SMichael Chan 116084c33dd3SMichael Chan if (bp->hwrm_spec_code < 0x10202) { 116184c33dd3SMichael Chan if (is_valid_ether_addr(bp->vf.mac_addr)) 116284c33dd3SMichael Chan rc = -EADDRNOTAVAIL; 116384c33dd3SMichael Chan goto mac_done; 116484c33dd3SMichael Chan } 116584c33dd3SMichael Chan bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1); 116684c33dd3SMichael Chan req.enables = cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR); 116784c33dd3SMichael Chan memcpy(req.dflt_mac_addr, mac, ETH_ALEN); 116884c33dd3SMichael Chan rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 116984c33dd3SMichael Chan mac_done: 117028ea334bSMichael Chan if (rc && strict) { 117184c33dd3SMichael Chan rc = -EADDRNOTAVAIL; 117284c33dd3SMichael Chan netdev_warn(bp->dev, "VF MAC address %pM not approved by the PF\n", 117384c33dd3SMichael Chan mac); 117484c33dd3SMichael Chan return rc; 117584c33dd3SMichael Chan } 117628ea334bSMichael Chan return 0; 117728ea334bSMichael Chan } 1178c0c050c5SMichael Chan #else 1179c0c050c5SMichael Chan 11802cd86968SVasundhara Volam int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset) 1181702d5011SMichael Chan { 1182702d5011SMichael Chan if (*num_vfs) 1183702d5011SMichael Chan return -EOPNOTSUPP; 1184702d5011SMichael Chan return 0; 1185702d5011SMichael Chan } 1186702d5011SMichael Chan 1187c0c050c5SMichael Chan void bnxt_sriov_disable(struct bnxt *bp) 1188c0c050c5SMichael Chan { 1189c0c050c5SMichael Chan } 1190c0c050c5SMichael Chan 1191c0c050c5SMichael Chan void bnxt_hwrm_exec_fwd_req(struct bnxt *bp) 1192c0c050c5SMichael Chan { 1193379a80a1SMichael Chan netdev_err(bp->dev, "Invalid VF message received when SRIOV is not enable\n"); 1194379a80a1SMichael Chan } 1195379a80a1SMichael Chan 1196379a80a1SMichael Chan void bnxt_update_vf_mac(struct bnxt *bp) 1197379a80a1SMichael Chan { 1198c0c050c5SMichael Chan } 119984c33dd3SMichael Chan 120028ea334bSMichael Chan int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict) 120184c33dd3SMichael Chan { 120284c33dd3SMichael Chan return 0; 120384c33dd3SMichael Chan } 1204c0c050c5SMichael Chan #endif 1205