1c0c050c5SMichael Chan /* Broadcom NetXtreme-C/E network driver. 2c0c050c5SMichael Chan * 311f15ed3SMichael Chan * Copyright (c) 2014-2016 Broadcom Corporation 4746df139SVasundhara Volam * Copyright (c) 2016-2018 Broadcom Limited 5c0c050c5SMichael Chan * 6c0c050c5SMichael Chan * This program is free software; you can redistribute it and/or modify 7c0c050c5SMichael Chan * it under the terms of the GNU General Public License as published by 8c0c050c5SMichael Chan * the Free Software Foundation. 9c0c050c5SMichael Chan */ 10c0c050c5SMichael Chan 11cc69837fSJakub Kicinski #include <linux/ethtool.h> 12c0c050c5SMichael Chan #include <linux/module.h> 13c0c050c5SMichael Chan #include <linux/pci.h> 14c0c050c5SMichael Chan #include <linux/netdevice.h> 15c0c050c5SMichael Chan #include <linux/if_vlan.h> 16c0c050c5SMichael Chan #include <linux/interrupt.h> 17c0c050c5SMichael Chan #include <linux/etherdevice.h> 18c0c050c5SMichael Chan #include "bnxt_hsi.h" 19c0c050c5SMichael Chan #include "bnxt.h" 20*3c8c20dbSEdwin Peer #include "bnxt_hwrm.h" 212f593846SMichael Chan #include "bnxt_ulp.h" 22c0c050c5SMichael Chan #include "bnxt_sriov.h" 234ab0c6a8SSathya Perla #include "bnxt_vfr.h" 24c0c050c5SMichael Chan #include "bnxt_ethtool.h" 25c0c050c5SMichael Chan 26c0c050c5SMichael Chan #ifdef CONFIG_BNXT_SRIOV 27350a7149SEddie Wai static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt *bp, 28350a7149SEddie Wai struct bnxt_vf_info *vf, u16 event_id) 29350a7149SEddie Wai { 30350a7149SEddie Wai struct hwrm_fwd_async_event_cmpl_input req = {0}; 31350a7149SEddie Wai struct hwrm_async_event_cmpl *async_cmpl; 32350a7149SEddie Wai int rc = 0; 33350a7149SEddie Wai 34350a7149SEddie Wai bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_ASYNC_EVENT_CMPL, -1, -1); 35350a7149SEddie Wai if (vf) 36350a7149SEddie Wai req.encap_async_event_target_id = cpu_to_le16(vf->fw_fid); 37350a7149SEddie Wai else 38350a7149SEddie Wai /* broadcast this async event to all VFs */ 39350a7149SEddie Wai req.encap_async_event_target_id = cpu_to_le16(0xffff); 40350a7149SEddie Wai async_cmpl = (struct hwrm_async_event_cmpl *)req.encap_async_event_cmpl; 4187c374deSMichael Chan async_cmpl->type = cpu_to_le16(ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT); 42350a7149SEddie Wai async_cmpl->event_id = cpu_to_le16(event_id); 43350a7149SEddie Wai 44a798302dSMichael Chan rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 45a798302dSMichael Chan if (rc) 46350a7149SEddie Wai netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl failed. rc:%d\n", 47350a7149SEddie Wai rc); 48350a7149SEddie Wai return rc; 49350a7149SEddie Wai } 50350a7149SEddie Wai 51c0c050c5SMichael Chan static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id) 52c0c050c5SMichael Chan { 53c0c050c5SMichael Chan if (!bp->pf.active_vfs) { 54c0c050c5SMichael Chan netdev_err(bp->dev, "vf ndo called though sriov is disabled\n"); 55c0c050c5SMichael Chan return -EINVAL; 56c0c050c5SMichael Chan } 5778f30004SVenkat Duvvuru if (vf_id >= bp->pf.active_vfs) { 58c0c050c5SMichael Chan netdev_err(bp->dev, "Invalid VF id %d\n", vf_id); 59c0c050c5SMichael Chan return -EINVAL; 60c0c050c5SMichael Chan } 61c0c050c5SMichael Chan return 0; 62c0c050c5SMichael Chan } 63c0c050c5SMichael Chan 64c0c050c5SMichael Chan int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting) 65c0c050c5SMichael Chan { 66c0c050c5SMichael Chan struct hwrm_func_cfg_input req = {0}; 67c0c050c5SMichael Chan struct bnxt *bp = netdev_priv(dev); 68c0c050c5SMichael Chan struct bnxt_vf_info *vf; 69c0c050c5SMichael Chan bool old_setting = false; 70c0c050c5SMichael Chan u32 func_flags; 71c0c050c5SMichael Chan int rc; 72c0c050c5SMichael Chan 738eb992e8SMichael Chan if (bp->hwrm_spec_code < 0x10701) 748eb992e8SMichael Chan return -ENOTSUPP; 758eb992e8SMichael Chan 76c0c050c5SMichael Chan rc = bnxt_vf_ndo_prep(bp, vf_id); 77c0c050c5SMichael Chan if (rc) 78c0c050c5SMichael Chan return rc; 79c0c050c5SMichael Chan 80c0c050c5SMichael Chan vf = &bp->pf.vf[vf_id]; 81c0c050c5SMichael Chan if (vf->flags & BNXT_VF_SPOOFCHK) 82c0c050c5SMichael Chan old_setting = true; 83c0c050c5SMichael Chan if (old_setting == setting) 84c0c050c5SMichael Chan return 0; 85c0c050c5SMichael Chan 86c0c050c5SMichael Chan if (setting) 87c71c4e49SMichael Chan func_flags = FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE; 88c0c050c5SMichael Chan else 89c71c4e49SMichael Chan func_flags = FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE; 90c0c050c5SMichael Chan /*TODO: if the driver supports VLAN filter on guest VLAN, 91c0c050c5SMichael Chan * the spoof check should also include vlan anti-spoofing 92c0c050c5SMichael Chan */ 93c0c050c5SMichael Chan bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 94c193554eSMichael Chan req.fid = cpu_to_le16(vf->fw_fid); 95c0c050c5SMichael Chan req.flags = cpu_to_le32(func_flags); 96c0c050c5SMichael Chan rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 97c0c050c5SMichael Chan if (!rc) { 98c0c050c5SMichael Chan if (setting) 99c0c050c5SMichael Chan vf->flags |= BNXT_VF_SPOOFCHK; 100c0c050c5SMichael Chan else 101c0c050c5SMichael Chan vf->flags &= ~BNXT_VF_SPOOFCHK; 102c0c050c5SMichael Chan } 103c0c050c5SMichael Chan return rc; 104c0c050c5SMichael Chan } 105c0c050c5SMichael Chan 1062a516444SMichael Chan static int bnxt_hwrm_func_qcfg_flags(struct bnxt *bp, struct bnxt_vf_info *vf) 1072a516444SMichael Chan { 1082a516444SMichael Chan struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; 1092a516444SMichael Chan struct hwrm_func_qcfg_input req = {0}; 1102a516444SMichael Chan int rc; 1112a516444SMichael Chan 1122a516444SMichael Chan bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1); 113dd85fc0aSEdwin Peer req.fid = cpu_to_le16(BNXT_PF(bp) ? vf->fw_fid : 0xffff); 1142a516444SMichael Chan mutex_lock(&bp->hwrm_cmd_lock); 1152a516444SMichael Chan rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 1162a516444SMichael Chan if (rc) { 1172a516444SMichael Chan mutex_unlock(&bp->hwrm_cmd_lock); 118d4f1420dSMichael Chan return rc; 1192a516444SMichael Chan } 1202a516444SMichael Chan vf->func_qcfg_flags = le16_to_cpu(resp->flags); 1212a516444SMichael Chan mutex_unlock(&bp->hwrm_cmd_lock); 1222a516444SMichael Chan return 0; 1232a516444SMichael Chan } 1242a516444SMichael Chan 125dd85fc0aSEdwin Peer bool bnxt_is_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf) 1262a516444SMichael Chan { 127dd85fc0aSEdwin Peer if (BNXT_PF(bp) && !(bp->fw_cap & BNXT_FW_CAP_TRUSTED_VF)) 1282a516444SMichael Chan return !!(vf->flags & BNXT_VF_TRUST); 1292a516444SMichael Chan 1302a516444SMichael Chan bnxt_hwrm_func_qcfg_flags(bp, vf); 1312a516444SMichael Chan return !!(vf->func_qcfg_flags & FUNC_QCFG_RESP_FLAGS_TRUSTED_VF); 1322a516444SMichael Chan } 1332a516444SMichael Chan 1342a516444SMichael Chan static int bnxt_hwrm_set_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf) 1352a516444SMichael Chan { 1362a516444SMichael Chan struct hwrm_func_cfg_input req = {0}; 1372a516444SMichael Chan 1382a516444SMichael Chan if (!(bp->fw_cap & BNXT_FW_CAP_TRUSTED_VF)) 1392a516444SMichael Chan return 0; 1402a516444SMichael Chan 1412a516444SMichael Chan bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 1422a516444SMichael Chan req.fid = cpu_to_le16(vf->fw_fid); 1432a516444SMichael Chan if (vf->flags & BNXT_VF_TRUST) 1442a516444SMichael Chan req.flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE); 1452a516444SMichael Chan else 1462a516444SMichael Chan req.flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_DISABLE); 1479f90445cSVasundhara Volam return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 1482a516444SMichael Chan } 1492a516444SMichael Chan 150746df139SVasundhara Volam int bnxt_set_vf_trust(struct net_device *dev, int vf_id, bool trusted) 151746df139SVasundhara Volam { 152746df139SVasundhara Volam struct bnxt *bp = netdev_priv(dev); 153746df139SVasundhara Volam struct bnxt_vf_info *vf; 154746df139SVasundhara Volam 155746df139SVasundhara Volam if (bnxt_vf_ndo_prep(bp, vf_id)) 156746df139SVasundhara Volam return -EINVAL; 157746df139SVasundhara Volam 158746df139SVasundhara Volam vf = &bp->pf.vf[vf_id]; 159746df139SVasundhara Volam if (trusted) 160746df139SVasundhara Volam vf->flags |= BNXT_VF_TRUST; 161746df139SVasundhara Volam else 162746df139SVasundhara Volam vf->flags &= ~BNXT_VF_TRUST; 163746df139SVasundhara Volam 1642a516444SMichael Chan bnxt_hwrm_set_trusted_vf(bp, vf); 165746df139SVasundhara Volam return 0; 166746df139SVasundhara Volam } 167746df139SVasundhara Volam 168c0c050c5SMichael Chan int bnxt_get_vf_config(struct net_device *dev, int vf_id, 169c0c050c5SMichael Chan struct ifla_vf_info *ivi) 170c0c050c5SMichael Chan { 171c0c050c5SMichael Chan struct bnxt *bp = netdev_priv(dev); 172c0c050c5SMichael Chan struct bnxt_vf_info *vf; 173c0c050c5SMichael Chan int rc; 174c0c050c5SMichael Chan 175c0c050c5SMichael Chan rc = bnxt_vf_ndo_prep(bp, vf_id); 176c0c050c5SMichael Chan if (rc) 177c0c050c5SMichael Chan return rc; 178c0c050c5SMichael Chan 179c0c050c5SMichael Chan ivi->vf = vf_id; 180c0c050c5SMichael Chan vf = &bp->pf.vf[vf_id]; 181c0c050c5SMichael Chan 18291cdda40SVasundhara Volam if (is_valid_ether_addr(vf->mac_addr)) 183c0c050c5SMichael Chan memcpy(&ivi->mac, vf->mac_addr, ETH_ALEN); 18491cdda40SVasundhara Volam else 18591cdda40SVasundhara Volam memcpy(&ivi->mac, vf->vf_mac_addr, ETH_ALEN); 186c0c050c5SMichael Chan ivi->max_tx_rate = vf->max_tx_rate; 187c0c050c5SMichael Chan ivi->min_tx_rate = vf->min_tx_rate; 188c0c050c5SMichael Chan ivi->vlan = vf->vlan; 189f0249056SMichael Chan if (vf->flags & BNXT_VF_QOS) 190f0249056SMichael Chan ivi->qos = vf->vlan >> VLAN_PRIO_SHIFT; 191f0249056SMichael Chan else 192f0249056SMichael Chan ivi->qos = 0; 193f0249056SMichael Chan ivi->spoofchk = !!(vf->flags & BNXT_VF_SPOOFCHK); 1942a516444SMichael Chan ivi->trusted = bnxt_is_trusted_vf(bp, vf); 195c0c050c5SMichael Chan if (!(vf->flags & BNXT_VF_LINK_FORCED)) 196c0c050c5SMichael Chan ivi->linkstate = IFLA_VF_LINK_STATE_AUTO; 197c0c050c5SMichael Chan else if (vf->flags & BNXT_VF_LINK_UP) 198c0c050c5SMichael Chan ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE; 199c0c050c5SMichael Chan else 200c0c050c5SMichael Chan ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE; 201c0c050c5SMichael Chan 202c0c050c5SMichael Chan return 0; 203c0c050c5SMichael Chan } 204c0c050c5SMichael Chan 205c0c050c5SMichael Chan int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac) 206c0c050c5SMichael Chan { 207c0c050c5SMichael Chan struct hwrm_func_cfg_input req = {0}; 208c0c050c5SMichael Chan struct bnxt *bp = netdev_priv(dev); 209c0c050c5SMichael Chan struct bnxt_vf_info *vf; 210c0c050c5SMichael Chan int rc; 211c0c050c5SMichael Chan 212c0c050c5SMichael Chan rc = bnxt_vf_ndo_prep(bp, vf_id); 213c0c050c5SMichael Chan if (rc) 214c0c050c5SMichael Chan return rc; 215c0c050c5SMichael Chan /* reject bc or mc mac addr, zero mac addr means allow 216c0c050c5SMichael Chan * VF to use its own mac addr 217c0c050c5SMichael Chan */ 218c0c050c5SMichael Chan if (is_multicast_ether_addr(mac)) { 219c0c050c5SMichael Chan netdev_err(dev, "Invalid VF ethernet address\n"); 220c0c050c5SMichael Chan return -EINVAL; 221c0c050c5SMichael Chan } 222c0c050c5SMichael Chan vf = &bp->pf.vf[vf_id]; 223c0c050c5SMichael Chan 224c0c050c5SMichael Chan memcpy(vf->mac_addr, mac, ETH_ALEN); 225c0c050c5SMichael Chan bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 226c193554eSMichael Chan req.fid = cpu_to_le16(vf->fw_fid); 227c0c050c5SMichael Chan req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR); 228c0c050c5SMichael Chan memcpy(req.dflt_mac_addr, mac, ETH_ALEN); 229c0c050c5SMichael Chan return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 230c0c050c5SMichael Chan } 231c0c050c5SMichael Chan 23279aab093SMoshe Shemesh int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos, 23379aab093SMoshe Shemesh __be16 vlan_proto) 234c0c050c5SMichael Chan { 235c0c050c5SMichael Chan struct hwrm_func_cfg_input req = {0}; 236c0c050c5SMichael Chan struct bnxt *bp = netdev_priv(dev); 237c0c050c5SMichael Chan struct bnxt_vf_info *vf; 238c0c050c5SMichael Chan u16 vlan_tag; 239c0c050c5SMichael Chan int rc; 240c0c050c5SMichael Chan 241cf6645f8SMichael Chan if (bp->hwrm_spec_code < 0x10201) 242cf6645f8SMichael Chan return -ENOTSUPP; 243cf6645f8SMichael Chan 24479aab093SMoshe Shemesh if (vlan_proto != htons(ETH_P_8021Q)) 24579aab093SMoshe Shemesh return -EPROTONOSUPPORT; 24679aab093SMoshe Shemesh 247c0c050c5SMichael Chan rc = bnxt_vf_ndo_prep(bp, vf_id); 248c0c050c5SMichael Chan if (rc) 249c0c050c5SMichael Chan return rc; 250c0c050c5SMichael Chan 251c0c050c5SMichael Chan /* TODO: needed to implement proper handling of user priority, 252c0c050c5SMichael Chan * currently fail the command if there is valid priority 253c0c050c5SMichael Chan */ 254c0c050c5SMichael Chan if (vlan_id > 4095 || qos) 255c0c050c5SMichael Chan return -EINVAL; 256c0c050c5SMichael Chan 257c0c050c5SMichael Chan vf = &bp->pf.vf[vf_id]; 258c0c050c5SMichael Chan vlan_tag = vlan_id; 259c0c050c5SMichael Chan if (vlan_tag == vf->vlan) 260c0c050c5SMichael Chan return 0; 261c0c050c5SMichael Chan 262c0c050c5SMichael Chan bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 263c193554eSMichael Chan req.fid = cpu_to_le16(vf->fw_fid); 264c0c050c5SMichael Chan req.dflt_vlan = cpu_to_le16(vlan_tag); 265c0c050c5SMichael Chan req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN); 266c0c050c5SMichael Chan rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 267c0c050c5SMichael Chan if (!rc) 268c0c050c5SMichael Chan vf->vlan = vlan_tag; 269c0c050c5SMichael Chan return rc; 270c0c050c5SMichael Chan } 271c0c050c5SMichael Chan 272c0c050c5SMichael Chan int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate, 273c0c050c5SMichael Chan int max_tx_rate) 274c0c050c5SMichael Chan { 275c0c050c5SMichael Chan struct hwrm_func_cfg_input req = {0}; 276c0c050c5SMichael Chan struct bnxt *bp = netdev_priv(dev); 277c0c050c5SMichael Chan struct bnxt_vf_info *vf; 278c0c050c5SMichael Chan u32 pf_link_speed; 279c0c050c5SMichael Chan int rc; 280c0c050c5SMichael Chan 281c0c050c5SMichael Chan rc = bnxt_vf_ndo_prep(bp, vf_id); 282c0c050c5SMichael Chan if (rc) 283c0c050c5SMichael Chan return rc; 284c0c050c5SMichael Chan 285c0c050c5SMichael Chan vf = &bp->pf.vf[vf_id]; 286c0c050c5SMichael Chan pf_link_speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed); 287c0c050c5SMichael Chan if (max_tx_rate > pf_link_speed) { 288c0c050c5SMichael Chan netdev_info(bp->dev, "max tx rate %d exceed PF link speed for VF %d\n", 289c0c050c5SMichael Chan max_tx_rate, vf_id); 290c0c050c5SMichael Chan return -EINVAL; 291c0c050c5SMichael Chan } 292c0c050c5SMichael Chan 293c0c050c5SMichael Chan if (min_tx_rate > pf_link_speed || min_tx_rate > max_tx_rate) { 294c0c050c5SMichael Chan netdev_info(bp->dev, "min tx rate %d is invalid for VF %d\n", 295c0c050c5SMichael Chan min_tx_rate, vf_id); 296c0c050c5SMichael Chan return -EINVAL; 297c0c050c5SMichael Chan } 298c0c050c5SMichael Chan if (min_tx_rate == vf->min_tx_rate && max_tx_rate == vf->max_tx_rate) 299c0c050c5SMichael Chan return 0; 300c0c050c5SMichael Chan bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 301c193554eSMichael Chan req.fid = cpu_to_le16(vf->fw_fid); 302c0c050c5SMichael Chan req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW); 303c0c050c5SMichael Chan req.max_bw = cpu_to_le32(max_tx_rate); 304c0c050c5SMichael Chan req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW); 305c0c050c5SMichael Chan req.min_bw = cpu_to_le32(min_tx_rate); 306c0c050c5SMichael Chan rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 307c0c050c5SMichael Chan if (!rc) { 308c0c050c5SMichael Chan vf->min_tx_rate = min_tx_rate; 309c0c050c5SMichael Chan vf->max_tx_rate = max_tx_rate; 310c0c050c5SMichael Chan } 311c0c050c5SMichael Chan return rc; 312c0c050c5SMichael Chan } 313c0c050c5SMichael Chan 314c0c050c5SMichael Chan int bnxt_set_vf_link_state(struct net_device *dev, int vf_id, int link) 315c0c050c5SMichael Chan { 316c0c050c5SMichael Chan struct bnxt *bp = netdev_priv(dev); 317c0c050c5SMichael Chan struct bnxt_vf_info *vf; 318c0c050c5SMichael Chan int rc; 319c0c050c5SMichael Chan 320c0c050c5SMichael Chan rc = bnxt_vf_ndo_prep(bp, vf_id); 321c0c050c5SMichael Chan if (rc) 322c0c050c5SMichael Chan return rc; 323c0c050c5SMichael Chan 324c0c050c5SMichael Chan vf = &bp->pf.vf[vf_id]; 325c0c050c5SMichael Chan 326c0c050c5SMichael Chan vf->flags &= ~(BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED); 327c0c050c5SMichael Chan switch (link) { 328c0c050c5SMichael Chan case IFLA_VF_LINK_STATE_AUTO: 329c0c050c5SMichael Chan vf->flags |= BNXT_VF_LINK_UP; 330c0c050c5SMichael Chan break; 331c0c050c5SMichael Chan case IFLA_VF_LINK_STATE_DISABLE: 332c0c050c5SMichael Chan vf->flags |= BNXT_VF_LINK_FORCED; 333c0c050c5SMichael Chan break; 334c0c050c5SMichael Chan case IFLA_VF_LINK_STATE_ENABLE: 335c0c050c5SMichael Chan vf->flags |= BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED; 336c0c050c5SMichael Chan break; 337c0c050c5SMichael Chan default: 338c0c050c5SMichael Chan netdev_err(bp->dev, "Invalid link option\n"); 339c0c050c5SMichael Chan rc = -EINVAL; 340c0c050c5SMichael Chan break; 341c0c050c5SMichael Chan } 342350a7149SEddie Wai if (vf->flags & (BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED)) 343350a7149SEddie Wai rc = bnxt_hwrm_fwd_async_event_cmpl(bp, vf, 34487c374deSMichael Chan ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE); 345c0c050c5SMichael Chan return rc; 346c0c050c5SMichael Chan } 347c0c050c5SMichael Chan 348c0c050c5SMichael Chan static int bnxt_set_vf_attr(struct bnxt *bp, int num_vfs) 349c0c050c5SMichael Chan { 350c0c050c5SMichael Chan int i; 351c0c050c5SMichael Chan struct bnxt_vf_info *vf; 352c0c050c5SMichael Chan 353c0c050c5SMichael Chan for (i = 0; i < num_vfs; i++) { 354c0c050c5SMichael Chan vf = &bp->pf.vf[i]; 355c0c050c5SMichael Chan memset(vf, 0, sizeof(*vf)); 356c0c050c5SMichael Chan } 357c0c050c5SMichael Chan return 0; 358c0c050c5SMichael Chan } 359c0c050c5SMichael Chan 3604bb6cdceSJeffrey Huang static int bnxt_hwrm_func_vf_resource_free(struct bnxt *bp, int num_vfs) 361c0c050c5SMichael Chan { 362c0c050c5SMichael Chan int i, rc = 0; 363c0c050c5SMichael Chan struct bnxt_pf_info *pf = &bp->pf; 364c0c050c5SMichael Chan struct hwrm_func_vf_resc_free_input req = {0}; 365c0c050c5SMichael Chan 366c0c050c5SMichael Chan bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESC_FREE, -1, -1); 367c0c050c5SMichael Chan 368c0c050c5SMichael Chan mutex_lock(&bp->hwrm_cmd_lock); 3694bb6cdceSJeffrey Huang for (i = pf->first_vf_id; i < pf->first_vf_id + num_vfs; i++) { 370c0c050c5SMichael Chan req.vf_id = cpu_to_le16(i); 371c0c050c5SMichael Chan rc = _hwrm_send_message(bp, &req, sizeof(req), 372c0c050c5SMichael Chan HWRM_CMD_TIMEOUT); 373c0c050c5SMichael Chan if (rc) 374c0c050c5SMichael Chan break; 375c0c050c5SMichael Chan } 376c0c050c5SMichael Chan mutex_unlock(&bp->hwrm_cmd_lock); 377c0c050c5SMichael Chan return rc; 378c0c050c5SMichael Chan } 379c0c050c5SMichael Chan 380c0c050c5SMichael Chan static void bnxt_free_vf_resources(struct bnxt *bp) 381c0c050c5SMichael Chan { 382c0c050c5SMichael Chan struct pci_dev *pdev = bp->pdev; 383c0c050c5SMichael Chan int i; 384c0c050c5SMichael Chan 385c0c050c5SMichael Chan kfree(bp->pf.vf_event_bmap); 386c0c050c5SMichael Chan bp->pf.vf_event_bmap = NULL; 387c0c050c5SMichael Chan 388c0c050c5SMichael Chan for (i = 0; i < 4; i++) { 389c0c050c5SMichael Chan if (bp->pf.hwrm_cmd_req_addr[i]) { 390c0c050c5SMichael Chan dma_free_coherent(&pdev->dev, BNXT_PAGE_SIZE, 391c0c050c5SMichael Chan bp->pf.hwrm_cmd_req_addr[i], 392c0c050c5SMichael Chan bp->pf.hwrm_cmd_req_dma_addr[i]); 393c0c050c5SMichael Chan bp->pf.hwrm_cmd_req_addr[i] = NULL; 394c0c050c5SMichael Chan } 395c0c050c5SMichael Chan } 396c0c050c5SMichael Chan 397c8b1d743SDavide Caratti bp->pf.active_vfs = 0; 398c0c050c5SMichael Chan kfree(bp->pf.vf); 399c0c050c5SMichael Chan bp->pf.vf = NULL; 400c0c050c5SMichael Chan } 401c0c050c5SMichael Chan 402c0c050c5SMichael Chan static int bnxt_alloc_vf_resources(struct bnxt *bp, int num_vfs) 403c0c050c5SMichael Chan { 404c0c050c5SMichael Chan struct pci_dev *pdev = bp->pdev; 405c0c050c5SMichael Chan u32 nr_pages, size, i, j, k = 0; 406c0c050c5SMichael Chan 407c0c050c5SMichael Chan bp->pf.vf = kcalloc(num_vfs, sizeof(struct bnxt_vf_info), GFP_KERNEL); 408c0c050c5SMichael Chan if (!bp->pf.vf) 409c0c050c5SMichael Chan return -ENOMEM; 410c0c050c5SMichael Chan 411c0c050c5SMichael Chan bnxt_set_vf_attr(bp, num_vfs); 412c0c050c5SMichael Chan 413c0c050c5SMichael Chan size = num_vfs * BNXT_HWRM_REQ_MAX_SIZE; 414c0c050c5SMichael Chan nr_pages = size / BNXT_PAGE_SIZE; 415c0c050c5SMichael Chan if (size & (BNXT_PAGE_SIZE - 1)) 416c0c050c5SMichael Chan nr_pages++; 417c0c050c5SMichael Chan 418c0c050c5SMichael Chan for (i = 0; i < nr_pages; i++) { 419c0c050c5SMichael Chan bp->pf.hwrm_cmd_req_addr[i] = 420c0c050c5SMichael Chan dma_alloc_coherent(&pdev->dev, BNXT_PAGE_SIZE, 421c0c050c5SMichael Chan &bp->pf.hwrm_cmd_req_dma_addr[i], 422c0c050c5SMichael Chan GFP_KERNEL); 423c0c050c5SMichael Chan 424c0c050c5SMichael Chan if (!bp->pf.hwrm_cmd_req_addr[i]) 425c0c050c5SMichael Chan return -ENOMEM; 426c0c050c5SMichael Chan 427c0c050c5SMichael Chan for (j = 0; j < BNXT_HWRM_REQS_PER_PAGE && k < num_vfs; j++) { 428c0c050c5SMichael Chan struct bnxt_vf_info *vf = &bp->pf.vf[k]; 429c0c050c5SMichael Chan 430c0c050c5SMichael Chan vf->hwrm_cmd_req_addr = bp->pf.hwrm_cmd_req_addr[i] + 431c0c050c5SMichael Chan j * BNXT_HWRM_REQ_MAX_SIZE; 432c0c050c5SMichael Chan vf->hwrm_cmd_req_dma_addr = 433c0c050c5SMichael Chan bp->pf.hwrm_cmd_req_dma_addr[i] + j * 434c0c050c5SMichael Chan BNXT_HWRM_REQ_MAX_SIZE; 435c0c050c5SMichael Chan k++; 436c0c050c5SMichael Chan } 437c0c050c5SMichael Chan } 438c0c050c5SMichael Chan 439c0c050c5SMichael Chan /* Max 128 VF's */ 440c0c050c5SMichael Chan bp->pf.vf_event_bmap = kzalloc(16, GFP_KERNEL); 441c0c050c5SMichael Chan if (!bp->pf.vf_event_bmap) 442c0c050c5SMichael Chan return -ENOMEM; 443c0c050c5SMichael Chan 444c0c050c5SMichael Chan bp->pf.hwrm_cmd_req_pages = nr_pages; 445c0c050c5SMichael Chan return 0; 446c0c050c5SMichael Chan } 447c0c050c5SMichael Chan 448c0c050c5SMichael Chan static int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp) 449c0c050c5SMichael Chan { 450c0c050c5SMichael Chan struct hwrm_func_buf_rgtr_input req = {0}; 451c0c050c5SMichael Chan 452c0c050c5SMichael Chan bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BUF_RGTR, -1, -1); 453c0c050c5SMichael Chan 454c0c050c5SMichael Chan req.req_buf_num_pages = cpu_to_le16(bp->pf.hwrm_cmd_req_pages); 455c0c050c5SMichael Chan req.req_buf_page_size = cpu_to_le16(BNXT_PAGE_SHIFT); 456c0c050c5SMichael Chan req.req_buf_len = cpu_to_le16(BNXT_HWRM_REQ_MAX_SIZE); 457c0c050c5SMichael Chan req.req_buf_page_addr0 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[0]); 458c0c050c5SMichael Chan req.req_buf_page_addr1 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[1]); 459c0c050c5SMichael Chan req.req_buf_page_addr2 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[2]); 460c0c050c5SMichael Chan req.req_buf_page_addr3 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[3]); 461c0c050c5SMichael Chan 462c0c050c5SMichael Chan return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 463c0c050c5SMichael Chan } 464c0c050c5SMichael Chan 4652cd86968SVasundhara Volam /* Caller holds bp->hwrm_cmd_lock mutex lock */ 4662cd86968SVasundhara Volam static void __bnxt_set_vf_params(struct bnxt *bp, int vf_id) 4672cd86968SVasundhara Volam { 4682cd86968SVasundhara Volam struct hwrm_func_cfg_input req = {0}; 4692cd86968SVasundhara Volam struct bnxt_vf_info *vf; 4702cd86968SVasundhara Volam 4712cd86968SVasundhara Volam vf = &bp->pf.vf[vf_id]; 4722cd86968SVasundhara Volam bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 4732cd86968SVasundhara Volam req.fid = cpu_to_le16(vf->fw_fid); 4742cd86968SVasundhara Volam 4752cd86968SVasundhara Volam if (is_valid_ether_addr(vf->mac_addr)) { 4762cd86968SVasundhara Volam req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR); 4772cd86968SVasundhara Volam memcpy(req.dflt_mac_addr, vf->mac_addr, ETH_ALEN); 4782cd86968SVasundhara Volam } 4792cd86968SVasundhara Volam if (vf->vlan) { 4802cd86968SVasundhara Volam req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN); 4812cd86968SVasundhara Volam req.dflt_vlan = cpu_to_le16(vf->vlan); 4822cd86968SVasundhara Volam } 4832cd86968SVasundhara Volam if (vf->max_tx_rate) { 4842cd86968SVasundhara Volam req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW); 4852cd86968SVasundhara Volam req.max_bw = cpu_to_le32(vf->max_tx_rate); 4862cd86968SVasundhara Volam #ifdef HAVE_IFLA_TX_RATE 4872cd86968SVasundhara Volam req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW); 4882cd86968SVasundhara Volam req.min_bw = cpu_to_le32(vf->min_tx_rate); 4892cd86968SVasundhara Volam #endif 4902cd86968SVasundhara Volam } 4912cd86968SVasundhara Volam if (vf->flags & BNXT_VF_TRUST) 4922cd86968SVasundhara Volam req.flags |= cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE); 4932cd86968SVasundhara Volam 4942cd86968SVasundhara Volam _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4952cd86968SVasundhara Volam } 4962cd86968SVasundhara Volam 4974673d664SMichael Chan /* Only called by PF to reserve resources for VFs, returns actual number of 4984673d664SMichael Chan * VFs configured, or < 0 on error. 4994673d664SMichael Chan */ 5002cd86968SVasundhara Volam static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset) 5014673d664SMichael Chan { 5024673d664SMichael Chan struct hwrm_func_vf_resource_cfg_input req = {0}; 5034673d664SMichael Chan struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 5044673d664SMichael Chan u16 vf_tx_rings, vf_rx_rings, vf_cp_rings; 5054673d664SMichael Chan u16 vf_stat_ctx, vf_vnics, vf_ring_grps; 5064673d664SMichael Chan struct bnxt_pf_info *pf = &bp->pf; 507bf82736dSMichael Chan int i, rc = 0, min = 1; 508b16b6891SMichael Chan u16 vf_msix = 0; 5091acefc9aSMichael Chan u16 vf_rss; 5104673d664SMichael Chan 5114673d664SMichael Chan bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESOURCE_CFG, -1, -1); 5124673d664SMichael Chan 513b16b6891SMichael Chan if (bp->flags & BNXT_FLAG_CHIP_P5) { 514b16b6891SMichael Chan vf_msix = hw_resc->max_nqs - bnxt_nq_rings_in_use(bp); 515b16b6891SMichael Chan vf_ring_grps = 0; 516b16b6891SMichael Chan } else { 517b16b6891SMichael Chan vf_ring_grps = hw_resc->max_hw_ring_grps - bp->rx_nr_rings; 518b16b6891SMichael Chan } 519e916b081SMichael Chan vf_cp_rings = bnxt_get_avail_cp_rings_for_en(bp); 520c027c6b4SVasundhara Volam vf_stat_ctx = bnxt_get_avail_stat_ctxs_for_en(bp); 5214673d664SMichael Chan if (bp->flags & BNXT_FLAG_AGG_RINGS) 5224673d664SMichael Chan vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings * 2; 5234673d664SMichael Chan else 5244673d664SMichael Chan vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings; 5254673d664SMichael Chan vf_tx_rings = hw_resc->max_tx_rings - bp->tx_nr_rings; 5264673d664SMichael Chan vf_vnics = hw_resc->max_vnics - bp->nr_vnics; 5274673d664SMichael Chan vf_vnics = min_t(u16, vf_vnics, vf_rx_rings); 5281acefc9aSMichael Chan vf_rss = hw_resc->max_rsscos_ctxs - bp->rsscos_nr_ctxs; 5294673d664SMichael Chan 53086c3380dSMichael Chan req.min_rsscos_ctx = cpu_to_le16(BNXT_VF_MIN_RSS_CTX); 531bf82736dSMichael Chan if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) { 532bf82736dSMichael Chan min = 0; 533bf82736dSMichael Chan req.min_rsscos_ctx = cpu_to_le16(min); 534bf82736dSMichael Chan } 535bf82736dSMichael Chan if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL || 536bf82736dSMichael Chan pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) { 537bf82736dSMichael Chan req.min_cmpl_rings = cpu_to_le16(min); 538bf82736dSMichael Chan req.min_tx_rings = cpu_to_le16(min); 539bf82736dSMichael Chan req.min_rx_rings = cpu_to_le16(min); 540bf82736dSMichael Chan req.min_l2_ctxs = cpu_to_le16(min); 541bf82736dSMichael Chan req.min_vnics = cpu_to_le16(min); 542bf82736dSMichael Chan req.min_stat_ctx = cpu_to_le16(min); 543b16b6891SMichael Chan if (!(bp->flags & BNXT_FLAG_CHIP_P5)) 544bf82736dSMichael Chan req.min_hw_ring_grps = cpu_to_le16(min); 5454673d664SMichael Chan } else { 5464673d664SMichael Chan vf_cp_rings /= num_vfs; 5474673d664SMichael Chan vf_tx_rings /= num_vfs; 5484673d664SMichael Chan vf_rx_rings /= num_vfs; 5494673d664SMichael Chan vf_vnics /= num_vfs; 5504673d664SMichael Chan vf_stat_ctx /= num_vfs; 5514673d664SMichael Chan vf_ring_grps /= num_vfs; 5521acefc9aSMichael Chan vf_rss /= num_vfs; 5534673d664SMichael Chan 5544673d664SMichael Chan req.min_cmpl_rings = cpu_to_le16(vf_cp_rings); 5554673d664SMichael Chan req.min_tx_rings = cpu_to_le16(vf_tx_rings); 5564673d664SMichael Chan req.min_rx_rings = cpu_to_le16(vf_rx_rings); 55786c3380dSMichael Chan req.min_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX); 5584673d664SMichael Chan req.min_vnics = cpu_to_le16(vf_vnics); 5594673d664SMichael Chan req.min_stat_ctx = cpu_to_le16(vf_stat_ctx); 5604673d664SMichael Chan req.min_hw_ring_grps = cpu_to_le16(vf_ring_grps); 5611acefc9aSMichael Chan req.min_rsscos_ctx = cpu_to_le16(vf_rss); 5624673d664SMichael Chan } 5634673d664SMichael Chan req.max_cmpl_rings = cpu_to_le16(vf_cp_rings); 5644673d664SMichael Chan req.max_tx_rings = cpu_to_le16(vf_tx_rings); 5654673d664SMichael Chan req.max_rx_rings = cpu_to_le16(vf_rx_rings); 56686c3380dSMichael Chan req.max_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX); 5674673d664SMichael Chan req.max_vnics = cpu_to_le16(vf_vnics); 5684673d664SMichael Chan req.max_stat_ctx = cpu_to_le16(vf_stat_ctx); 5694673d664SMichael Chan req.max_hw_ring_grps = cpu_to_le16(vf_ring_grps); 5701acefc9aSMichael Chan req.max_rsscos_ctx = cpu_to_le16(vf_rss); 571b16b6891SMichael Chan if (bp->flags & BNXT_FLAG_CHIP_P5) 572b16b6891SMichael Chan req.max_msix = cpu_to_le16(vf_msix / num_vfs); 5734673d664SMichael Chan 5744673d664SMichael Chan mutex_lock(&bp->hwrm_cmd_lock); 5754673d664SMichael Chan for (i = 0; i < num_vfs; i++) { 5762cd86968SVasundhara Volam if (reset) 5772cd86968SVasundhara Volam __bnxt_set_vf_params(bp, i); 5782cd86968SVasundhara Volam 5794673d664SMichael Chan req.vf_id = cpu_to_le16(pf->first_vf_id + i); 5804673d664SMichael Chan rc = _hwrm_send_message(bp, &req, sizeof(req), 5814673d664SMichael Chan HWRM_CMD_TIMEOUT); 582d4f1420dSMichael Chan if (rc) 5834673d664SMichael Chan break; 5844673d664SMichael Chan pf->active_vfs = i + 1; 5854673d664SMichael Chan pf->vf[i].fw_fid = pf->first_vf_id + i; 5864673d664SMichael Chan } 5874673d664SMichael Chan mutex_unlock(&bp->hwrm_cmd_lock); 5884673d664SMichael Chan if (pf->active_vfs) { 589596f9d55SMichael Chan u16 n = pf->active_vfs; 5904673d664SMichael Chan 591596f9d55SMichael Chan hw_resc->max_tx_rings -= le16_to_cpu(req.min_tx_rings) * n; 592596f9d55SMichael Chan hw_resc->max_rx_rings -= le16_to_cpu(req.min_rx_rings) * n; 593596f9d55SMichael Chan hw_resc->max_hw_ring_grps -= le16_to_cpu(req.min_hw_ring_grps) * 594596f9d55SMichael Chan n; 595596f9d55SMichael Chan hw_resc->max_cp_rings -= le16_to_cpu(req.min_cmpl_rings) * n; 5961acefc9aSMichael Chan hw_resc->max_rsscos_ctxs -= le16_to_cpu(req.min_rsscos_ctx) * n; 597596f9d55SMichael Chan hw_resc->max_stat_ctxs -= le16_to_cpu(req.min_stat_ctx) * n; 598596f9d55SMichael Chan hw_resc->max_vnics -= le16_to_cpu(req.min_vnics) * n; 599b16b6891SMichael Chan if (bp->flags & BNXT_FLAG_CHIP_P5) 600b16b6891SMichael Chan hw_resc->max_irqs -= vf_msix * n; 6014673d664SMichael Chan 6024673d664SMichael Chan rc = pf->active_vfs; 6034673d664SMichael Chan } 6044673d664SMichael Chan return rc; 6054673d664SMichael Chan } 6064673d664SMichael Chan 6074673d664SMichael Chan /* Only called by PF to reserve resources for VFs, returns actual number of 6084673d664SMichael Chan * VFs configured, or < 0 on error. 6094673d664SMichael Chan */ 61092268c32SMichael Chan static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs) 611c0c050c5SMichael Chan { 612c0c050c5SMichael Chan u32 rc = 0, mtu, i; 613c0c050c5SMichael Chan u16 vf_tx_rings, vf_rx_rings, vf_cp_rings, vf_stat_ctx, vf_vnics; 6146a4f2947SMichael Chan struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 615c0c050c5SMichael Chan struct hwrm_func_cfg_input req = {0}; 616c0c050c5SMichael Chan struct bnxt_pf_info *pf = &bp->pf; 617391be5c2SMichael Chan int total_vf_tx_rings = 0; 618c027c6b4SVasundhara Volam u16 vf_ring_grps; 619c0c050c5SMichael Chan 620c0c050c5SMichael Chan bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 621c0c050c5SMichael Chan 622c0c050c5SMichael Chan /* Remaining rings are distributed equally amongs VF's for now */ 623e916b081SMichael Chan vf_cp_rings = bnxt_get_avail_cp_rings_for_en(bp) / num_vfs; 624c027c6b4SVasundhara Volam vf_stat_ctx = bnxt_get_avail_stat_ctxs_for_en(bp) / num_vfs; 625c0c050c5SMichael Chan if (bp->flags & BNXT_FLAG_AGG_RINGS) 6266a4f2947SMichael Chan vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings * 2) / 62792268c32SMichael Chan num_vfs; 628c0c050c5SMichael Chan else 6296a4f2947SMichael Chan vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings) / 6306a4f2947SMichael Chan num_vfs; 6316a4f2947SMichael Chan vf_ring_grps = (hw_resc->max_hw_ring_grps - bp->rx_nr_rings) / num_vfs; 6326a4f2947SMichael Chan vf_tx_rings = (hw_resc->max_tx_rings - bp->tx_nr_rings) / num_vfs; 6336a4f2947SMichael Chan vf_vnics = (hw_resc->max_vnics - bp->nr_vnics) / num_vfs; 6348427af81SMichael Chan vf_vnics = min_t(u16, vf_vnics, vf_rx_rings); 635c0c050c5SMichael Chan 63678eeadb8SMichael Chan req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ADMIN_MTU | 637c0c050c5SMichael Chan FUNC_CFG_REQ_ENABLES_MRU | 638c0c050c5SMichael Chan FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS | 639c0c050c5SMichael Chan FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS | 640c0c050c5SMichael Chan FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS | 641c0c050c5SMichael Chan FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS | 642c0c050c5SMichael Chan FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS | 643c0c050c5SMichael Chan FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS | 644b72d4a68SMichael Chan FUNC_CFG_REQ_ENABLES_NUM_VNICS | 645b72d4a68SMichael Chan FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS); 646c0c050c5SMichael Chan 647d0b82c54SVasundhara Volam mtu = bp->dev->mtu + ETH_HLEN + VLAN_HLEN; 648c0c050c5SMichael Chan req.mru = cpu_to_le16(mtu); 64978eeadb8SMichael Chan req.admin_mtu = cpu_to_le16(mtu); 650c0c050c5SMichael Chan 651c0c050c5SMichael Chan req.num_rsscos_ctxs = cpu_to_le16(1); 652c0c050c5SMichael Chan req.num_cmpl_rings = cpu_to_le16(vf_cp_rings); 653c0c050c5SMichael Chan req.num_tx_rings = cpu_to_le16(vf_tx_rings); 654c0c050c5SMichael Chan req.num_rx_rings = cpu_to_le16(vf_rx_rings); 655b72d4a68SMichael Chan req.num_hw_ring_grps = cpu_to_le16(vf_ring_grps); 656c0c050c5SMichael Chan req.num_l2_ctxs = cpu_to_le16(4); 657c0c050c5SMichael Chan 658c0c050c5SMichael Chan req.num_vnics = cpu_to_le16(vf_vnics); 659c0c050c5SMichael Chan /* FIXME spec currently uses 1 bit for stats ctx */ 660c0c050c5SMichael Chan req.num_stat_ctxs = cpu_to_le16(vf_stat_ctx); 661c0c050c5SMichael Chan 662c0c050c5SMichael Chan mutex_lock(&bp->hwrm_cmd_lock); 66392268c32SMichael Chan for (i = 0; i < num_vfs; i++) { 664391be5c2SMichael Chan int vf_tx_rsvd = vf_tx_rings; 665391be5c2SMichael Chan 666c193554eSMichael Chan req.fid = cpu_to_le16(pf->first_vf_id + i); 667c0c050c5SMichael Chan rc = _hwrm_send_message(bp, &req, sizeof(req), 668c0c050c5SMichael Chan HWRM_CMD_TIMEOUT); 669c0c050c5SMichael Chan if (rc) 670c0c050c5SMichael Chan break; 67192268c32SMichael Chan pf->active_vfs = i + 1; 672c193554eSMichael Chan pf->vf[i].fw_fid = le16_to_cpu(req.fid); 673391be5c2SMichael Chan rc = __bnxt_hwrm_get_tx_rings(bp, pf->vf[i].fw_fid, 674391be5c2SMichael Chan &vf_tx_rsvd); 675391be5c2SMichael Chan if (rc) 676391be5c2SMichael Chan break; 677391be5c2SMichael Chan total_vf_tx_rings += vf_tx_rsvd; 678c0c050c5SMichael Chan } 679c0c050c5SMichael Chan mutex_unlock(&bp->hwrm_cmd_lock); 6804673d664SMichael Chan if (pf->active_vfs) { 6816a4f2947SMichael Chan hw_resc->max_tx_rings -= total_vf_tx_rings; 6826a4f2947SMichael Chan hw_resc->max_rx_rings -= vf_rx_rings * num_vfs; 6836a4f2947SMichael Chan hw_resc->max_hw_ring_grps -= vf_ring_grps * num_vfs; 6846a4f2947SMichael Chan hw_resc->max_cp_rings -= vf_cp_rings * num_vfs; 6856a4f2947SMichael Chan hw_resc->max_rsscos_ctxs -= num_vfs; 6866a4f2947SMichael Chan hw_resc->max_stat_ctxs -= vf_stat_ctx * num_vfs; 6876a4f2947SMichael Chan hw_resc->max_vnics -= vf_vnics * num_vfs; 6884673d664SMichael Chan rc = pf->active_vfs; 689c0c050c5SMichael Chan } 690c0c050c5SMichael Chan return rc; 691c0c050c5SMichael Chan } 692c0c050c5SMichael Chan 6932cd86968SVasundhara Volam static int bnxt_func_cfg(struct bnxt *bp, int num_vfs, bool reset) 6944673d664SMichael Chan { 695f1ca94deSMichael Chan if (BNXT_NEW_RM(bp)) 6962cd86968SVasundhara Volam return bnxt_hwrm_func_vf_resc_cfg(bp, num_vfs, reset); 6974673d664SMichael Chan else 6984673d664SMichael Chan return bnxt_hwrm_func_cfg(bp, num_vfs); 6994673d664SMichael Chan } 7004673d664SMichael Chan 7012cd86968SVasundhara Volam int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset) 702702d5011SMichael Chan { 703702d5011SMichael Chan int rc; 704702d5011SMichael Chan 70591b9be48SVasundhara Volam /* Register buffers for VFs */ 70691b9be48SVasundhara Volam rc = bnxt_hwrm_func_buf_rgtr(bp); 70791b9be48SVasundhara Volam if (rc) 70891b9be48SVasundhara Volam return rc; 70991b9be48SVasundhara Volam 710702d5011SMichael Chan /* Reserve resources for VFs */ 7112cd86968SVasundhara Volam rc = bnxt_func_cfg(bp, *num_vfs, reset); 712702d5011SMichael Chan if (rc != *num_vfs) { 713702d5011SMichael Chan if (rc <= 0) { 714702d5011SMichael Chan netdev_warn(bp->dev, "Unable to reserve resources for SRIOV.\n"); 715702d5011SMichael Chan *num_vfs = 0; 716702d5011SMichael Chan return rc; 717702d5011SMichael Chan } 718702d5011SMichael Chan netdev_warn(bp->dev, "Only able to reserve resources for %d VFs.\n", 719702d5011SMichael Chan rc); 720702d5011SMichael Chan *num_vfs = rc; 721702d5011SMichael Chan } 722702d5011SMichael Chan 723702d5011SMichael Chan bnxt_ulp_sriov_cfg(bp, *num_vfs); 724702d5011SMichael Chan return 0; 725702d5011SMichael Chan } 726702d5011SMichael Chan 727c0c050c5SMichael Chan static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs) 728c0c050c5SMichael Chan { 729c0c050c5SMichael Chan int rc = 0, vfs_supported; 730c0c050c5SMichael Chan int min_rx_rings, min_tx_rings, min_rss_ctxs; 7316a4f2947SMichael Chan struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 732c0c050c5SMichael Chan int tx_ok = 0, rx_ok = 0, rss_ok = 0; 73302157079SMichael Chan int avail_cp, avail_stat; 734c0c050c5SMichael Chan 735c0c050c5SMichael Chan /* Check if we can enable requested num of vf's. At a mininum 736c0c050c5SMichael Chan * we require 1 RX 1 TX rings for each VF. In this minimum conf 737c0c050c5SMichael Chan * features like TPA will not be available. 738c0c050c5SMichael Chan */ 739c0c050c5SMichael Chan vfs_supported = *num_vfs; 740c0c050c5SMichael Chan 741e916b081SMichael Chan avail_cp = bnxt_get_avail_cp_rings_for_en(bp); 742c027c6b4SVasundhara Volam avail_stat = bnxt_get_avail_stat_ctxs_for_en(bp); 74302157079SMichael Chan avail_cp = min_t(int, avail_cp, avail_stat); 74402157079SMichael Chan 745c0c050c5SMichael Chan while (vfs_supported) { 746c0c050c5SMichael Chan min_rx_rings = vfs_supported; 747c0c050c5SMichael Chan min_tx_rings = vfs_supported; 748c0c050c5SMichael Chan min_rss_ctxs = vfs_supported; 749c0c050c5SMichael Chan 750c0c050c5SMichael Chan if (bp->flags & BNXT_FLAG_AGG_RINGS) { 7516a4f2947SMichael Chan if (hw_resc->max_rx_rings - bp->rx_nr_rings * 2 >= 752c0c050c5SMichael Chan min_rx_rings) 753c0c050c5SMichael Chan rx_ok = 1; 754c0c050c5SMichael Chan } else { 7556a4f2947SMichael Chan if (hw_resc->max_rx_rings - bp->rx_nr_rings >= 756c0c050c5SMichael Chan min_rx_rings) 757c0c050c5SMichael Chan rx_ok = 1; 758c0c050c5SMichael Chan } 7596a4f2947SMichael Chan if (hw_resc->max_vnics - bp->nr_vnics < min_rx_rings || 76002157079SMichael Chan avail_cp < min_rx_rings) 7618427af81SMichael Chan rx_ok = 0; 762c0c050c5SMichael Chan 7636a4f2947SMichael Chan if (hw_resc->max_tx_rings - bp->tx_nr_rings >= min_tx_rings && 76402157079SMichael Chan avail_cp >= min_tx_rings) 765c0c050c5SMichael Chan tx_ok = 1; 766c0c050c5SMichael Chan 7676a4f2947SMichael Chan if (hw_resc->max_rsscos_ctxs - bp->rsscos_nr_ctxs >= 7686a4f2947SMichael Chan min_rss_ctxs) 769c0c050c5SMichael Chan rss_ok = 1; 770c0c050c5SMichael Chan 771c0c050c5SMichael Chan if (tx_ok && rx_ok && rss_ok) 772c0c050c5SMichael Chan break; 773c0c050c5SMichael Chan 774c0c050c5SMichael Chan vfs_supported--; 775c0c050c5SMichael Chan } 776c0c050c5SMichael Chan 777c0c050c5SMichael Chan if (!vfs_supported) { 778c0c050c5SMichael Chan netdev_err(bp->dev, "Cannot enable VF's as all resources are used by PF\n"); 779c0c050c5SMichael Chan return -EINVAL; 780c0c050c5SMichael Chan } 781c0c050c5SMichael Chan 782c0c050c5SMichael Chan if (vfs_supported != *num_vfs) { 783c0c050c5SMichael Chan netdev_info(bp->dev, "Requested VFs %d, can enable %d\n", 784c0c050c5SMichael Chan *num_vfs, vfs_supported); 785c0c050c5SMichael Chan *num_vfs = vfs_supported; 786c0c050c5SMichael Chan } 787c0c050c5SMichael Chan 788c0c050c5SMichael Chan rc = bnxt_alloc_vf_resources(bp, *num_vfs); 789c0c050c5SMichael Chan if (rc) 790c0c050c5SMichael Chan goto err_out1; 791c0c050c5SMichael Chan 7922cd86968SVasundhara Volam rc = bnxt_cfg_hw_sriov(bp, num_vfs, false); 793c0c050c5SMichael Chan if (rc) 794c0c050c5SMichael Chan goto err_out2; 795c0c050c5SMichael Chan 796c0c050c5SMichael Chan rc = pci_enable_sriov(bp->pdev, *num_vfs); 797c0c050c5SMichael Chan if (rc) 798c0c050c5SMichael Chan goto err_out2; 799c0c050c5SMichael Chan 800c0c050c5SMichael Chan return 0; 801c0c050c5SMichael Chan 802c0c050c5SMichael Chan err_out2: 803c0c050c5SMichael Chan /* Free the resources reserved for various VF's */ 8044bb6cdceSJeffrey Huang bnxt_hwrm_func_vf_resource_free(bp, *num_vfs); 805c0c050c5SMichael Chan 806c0c050c5SMichael Chan err_out1: 807c0c050c5SMichael Chan bnxt_free_vf_resources(bp); 808c0c050c5SMichael Chan 809c0c050c5SMichael Chan return rc; 810c0c050c5SMichael Chan } 811c0c050c5SMichael Chan 812c0c050c5SMichael Chan void bnxt_sriov_disable(struct bnxt *bp) 813c0c050c5SMichael Chan { 8144bb6cdceSJeffrey Huang u16 num_vfs = pci_num_vf(bp->pdev); 8154bb6cdceSJeffrey Huang 8164bb6cdceSJeffrey Huang if (!num_vfs) 817c0c050c5SMichael Chan return; 818c0c050c5SMichael Chan 8194ab0c6a8SSathya Perla /* synchronize VF and VF-rep create and destroy */ 8204ab0c6a8SSathya Perla mutex_lock(&bp->sriov_lock); 8214ab0c6a8SSathya Perla bnxt_vf_reps_destroy(bp); 8224ab0c6a8SSathya Perla 8234bb6cdceSJeffrey Huang if (pci_vfs_assigned(bp->pdev)) { 82419241368SJeffrey Huang bnxt_hwrm_fwd_async_event_cmpl( 82587c374deSMichael Chan bp, NULL, ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD); 8264bb6cdceSJeffrey Huang netdev_warn(bp->dev, "Unable to free %d VFs because some are assigned to VMs.\n", 8274bb6cdceSJeffrey Huang num_vfs); 8284bb6cdceSJeffrey Huang } else { 829c0c050c5SMichael Chan pci_disable_sriov(bp->pdev); 8304bb6cdceSJeffrey Huang /* Free the HW resources reserved for various VF's */ 8314bb6cdceSJeffrey Huang bnxt_hwrm_func_vf_resource_free(bp, num_vfs); 8324bb6cdceSJeffrey Huang } 8334ab0c6a8SSathya Perla mutex_unlock(&bp->sriov_lock); 834c0c050c5SMichael Chan 835c0c050c5SMichael Chan bnxt_free_vf_resources(bp); 836c0c050c5SMichael Chan 8374a21b49bSMichael Chan /* Reclaim all resources for the PF. */ 8387b08f661SMichael Chan rtnl_lock(); 8397b08f661SMichael Chan bnxt_restore_pf_fw_resources(bp); 8407b08f661SMichael Chan rtnl_unlock(); 8412f593846SMichael Chan 8422f593846SMichael Chan bnxt_ulp_sriov_cfg(bp, 0); 843c0c050c5SMichael Chan } 844c0c050c5SMichael Chan 845c0c050c5SMichael Chan int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs) 846c0c050c5SMichael Chan { 847c0c050c5SMichael Chan struct net_device *dev = pci_get_drvdata(pdev); 848c0c050c5SMichael Chan struct bnxt *bp = netdev_priv(dev); 849c0c050c5SMichael Chan 850c0c050c5SMichael Chan if (!(bp->flags & BNXT_FLAG_USING_MSIX)) { 851c0c050c5SMichael Chan netdev_warn(dev, "Not allow SRIOV if the irq mode is not MSIX\n"); 852c0c050c5SMichael Chan return 0; 853c0c050c5SMichael Chan } 854c0c050c5SMichael Chan 855c0c050c5SMichael Chan rtnl_lock(); 856c0c050c5SMichael Chan if (!netif_running(dev)) { 857c0c050c5SMichael Chan netdev_warn(dev, "Reject SRIOV config request since if is down!\n"); 858c0c050c5SMichael Chan rtnl_unlock(); 859c0c050c5SMichael Chan return 0; 860c0c050c5SMichael Chan } 8613bc7d4a3SMichael Chan if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { 8623bc7d4a3SMichael Chan netdev_warn(dev, "Reject SRIOV config request when FW reset is in progress\n"); 8633bc7d4a3SMichael Chan rtnl_unlock(); 8643bc7d4a3SMichael Chan return 0; 8653bc7d4a3SMichael Chan } 866c0c050c5SMichael Chan bp->sriov_cfg = true; 867c0c050c5SMichael Chan rtnl_unlock(); 8684bb6cdceSJeffrey Huang 8694bb6cdceSJeffrey Huang if (pci_vfs_assigned(bp->pdev)) { 8704bb6cdceSJeffrey Huang netdev_warn(dev, "Unable to configure SRIOV since some VFs are assigned to VMs.\n"); 8714bb6cdceSJeffrey Huang num_vfs = 0; 8724bb6cdceSJeffrey Huang goto sriov_cfg_exit; 873c0c050c5SMichael Chan } 874c0c050c5SMichael Chan 875c0c050c5SMichael Chan /* Check if enabled VFs is same as requested */ 8764bb6cdceSJeffrey Huang if (num_vfs && num_vfs == bp->pf.active_vfs) 8774bb6cdceSJeffrey Huang goto sriov_cfg_exit; 8784bb6cdceSJeffrey Huang 8794bb6cdceSJeffrey Huang /* if there are previous existing VFs, clean them up */ 8804bb6cdceSJeffrey Huang bnxt_sriov_disable(bp); 8814bb6cdceSJeffrey Huang if (!num_vfs) 8824bb6cdceSJeffrey Huang goto sriov_cfg_exit; 883c0c050c5SMichael Chan 884c0c050c5SMichael Chan bnxt_sriov_enable(bp, &num_vfs); 885c0c050c5SMichael Chan 8864bb6cdceSJeffrey Huang sriov_cfg_exit: 887c0c050c5SMichael Chan bp->sriov_cfg = false; 888c0c050c5SMichael Chan wake_up(&bp->sriov_cfg_wait); 889c0c050c5SMichael Chan 890c0c050c5SMichael Chan return num_vfs; 891c0c050c5SMichael Chan } 892c0c050c5SMichael Chan 893c0c050c5SMichael Chan static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf, 894c0c050c5SMichael Chan void *encap_resp, __le64 encap_resp_addr, 895c0c050c5SMichael Chan __le16 encap_resp_cpr, u32 msg_size) 896c0c050c5SMichael Chan { 897c0c050c5SMichael Chan int rc = 0; 898c0c050c5SMichael Chan struct hwrm_fwd_resp_input req = {0}; 899c0c050c5SMichael Chan 90059895f59SMichael Chan if (BNXT_FWD_RESP_SIZE_ERR(msg_size)) 90159895f59SMichael Chan return -EINVAL; 90259895f59SMichael Chan 903c0c050c5SMichael Chan bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_RESP, -1, -1); 904c0c050c5SMichael Chan 905c0c050c5SMichael Chan /* Set the new target id */ 906c0c050c5SMichael Chan req.target_id = cpu_to_le16(vf->fw_fid); 907c193554eSMichael Chan req.encap_resp_target_id = cpu_to_le16(vf->fw_fid); 908c0c050c5SMichael Chan req.encap_resp_len = cpu_to_le16(msg_size); 909c0c050c5SMichael Chan req.encap_resp_addr = encap_resp_addr; 910c0c050c5SMichael Chan req.encap_resp_cmpl_ring = encap_resp_cpr; 911c0c050c5SMichael Chan memcpy(req.encap_resp, encap_resp, msg_size); 912c0c050c5SMichael Chan 913a798302dSMichael Chan rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 914a798302dSMichael Chan if (rc) 915c0c050c5SMichael Chan netdev_err(bp->dev, "hwrm_fwd_resp failed. rc:%d\n", rc); 916c0c050c5SMichael Chan return rc; 917c0c050c5SMichael Chan } 918c0c050c5SMichael Chan 919c0c050c5SMichael Chan static int bnxt_hwrm_fwd_err_resp(struct bnxt *bp, struct bnxt_vf_info *vf, 920c0c050c5SMichael Chan u32 msg_size) 921c0c050c5SMichael Chan { 922c0c050c5SMichael Chan int rc = 0; 923c0c050c5SMichael Chan struct hwrm_reject_fwd_resp_input req = {0}; 924c0c050c5SMichael Chan 92559895f59SMichael Chan if (BNXT_REJ_FWD_RESP_SIZE_ERR(msg_size)) 92659895f59SMichael Chan return -EINVAL; 92759895f59SMichael Chan 928c0c050c5SMichael Chan bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_REJECT_FWD_RESP, -1, -1); 929c0c050c5SMichael Chan /* Set the new target id */ 930c0c050c5SMichael Chan req.target_id = cpu_to_le16(vf->fw_fid); 931c193554eSMichael Chan req.encap_resp_target_id = cpu_to_le16(vf->fw_fid); 932c0c050c5SMichael Chan memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size); 933c0c050c5SMichael Chan 934a798302dSMichael Chan rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 935a798302dSMichael Chan if (rc) 936c0c050c5SMichael Chan netdev_err(bp->dev, "hwrm_fwd_err_resp failed. rc:%d\n", rc); 937c0c050c5SMichael Chan return rc; 938c0c050c5SMichael Chan } 939c0c050c5SMichael Chan 940c0c050c5SMichael Chan static int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf, 941c0c050c5SMichael Chan u32 msg_size) 942c0c050c5SMichael Chan { 943c0c050c5SMichael Chan int rc = 0; 944c0c050c5SMichael Chan struct hwrm_exec_fwd_resp_input req = {0}; 945c0c050c5SMichael Chan 94659895f59SMichael Chan if (BNXT_EXEC_FWD_RESP_SIZE_ERR(msg_size)) 94759895f59SMichael Chan return -EINVAL; 94859895f59SMichael Chan 949c0c050c5SMichael Chan bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_EXEC_FWD_RESP, -1, -1); 950c0c050c5SMichael Chan /* Set the new target id */ 951c0c050c5SMichael Chan req.target_id = cpu_to_le16(vf->fw_fid); 952c193554eSMichael Chan req.encap_resp_target_id = cpu_to_le16(vf->fw_fid); 953c0c050c5SMichael Chan memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size); 954c0c050c5SMichael Chan 955a798302dSMichael Chan rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 956a798302dSMichael Chan if (rc) 957c0c050c5SMichael Chan netdev_err(bp->dev, "hwrm_exec_fw_resp failed. rc:%d\n", rc); 958c0c050c5SMichael Chan return rc; 959c0c050c5SMichael Chan } 960c0c050c5SMichael Chan 961746df139SVasundhara Volam static int bnxt_vf_configure_mac(struct bnxt *bp, struct bnxt_vf_info *vf) 96291cdda40SVasundhara Volam { 96391cdda40SVasundhara Volam u32 msg_size = sizeof(struct hwrm_func_vf_cfg_input); 96491cdda40SVasundhara Volam struct hwrm_func_vf_cfg_input *req = 96591cdda40SVasundhara Volam (struct hwrm_func_vf_cfg_input *)vf->hwrm_cmd_req_addr; 96691cdda40SVasundhara Volam 967746df139SVasundhara Volam /* Allow VF to set a valid MAC address, if trust is set to on or 968746df139SVasundhara Volam * if the PF assigned MAC address is zero 96991cdda40SVasundhara Volam */ 97091cdda40SVasundhara Volam if (req->enables & cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR)) { 9712a516444SMichael Chan bool trust = bnxt_is_trusted_vf(bp, vf); 9722a516444SMichael Chan 97391cdda40SVasundhara Volam if (is_valid_ether_addr(req->dflt_mac_addr) && 9742a516444SMichael Chan (trust || !is_valid_ether_addr(vf->mac_addr) || 975707e7e96SMichael Chan ether_addr_equal(req->dflt_mac_addr, vf->mac_addr))) { 97691cdda40SVasundhara Volam ether_addr_copy(vf->vf_mac_addr, req->dflt_mac_addr); 97791cdda40SVasundhara Volam return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size); 97891cdda40SVasundhara Volam } 97991cdda40SVasundhara Volam return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size); 98091cdda40SVasundhara Volam } 98191cdda40SVasundhara Volam return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size); 98291cdda40SVasundhara Volam } 98391cdda40SVasundhara Volam 984c0c050c5SMichael Chan static int bnxt_vf_validate_set_mac(struct bnxt *bp, struct bnxt_vf_info *vf) 985c0c050c5SMichael Chan { 986c0c050c5SMichael Chan u32 msg_size = sizeof(struct hwrm_cfa_l2_filter_alloc_input); 987c0c050c5SMichael Chan struct hwrm_cfa_l2_filter_alloc_input *req = 988c0c050c5SMichael Chan (struct hwrm_cfa_l2_filter_alloc_input *)vf->hwrm_cmd_req_addr; 98991cdda40SVasundhara Volam bool mac_ok = false; 990c0c050c5SMichael Chan 991746df139SVasundhara Volam if (!is_valid_ether_addr((const u8 *)req->l2_addr)) 992746df139SVasundhara Volam return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size); 993746df139SVasundhara Volam 994746df139SVasundhara Volam /* Allow VF to set a valid MAC address, if trust is set to on. 995746df139SVasundhara Volam * Or VF MAC address must first match MAC address in PF's context. 99691cdda40SVasundhara Volam * Otherwise, it must match the VF MAC address if firmware spec >= 99791cdda40SVasundhara Volam * 1.2.2 99891cdda40SVasundhara Volam */ 9992a516444SMichael Chan if (bnxt_is_trusted_vf(bp, vf)) { 1000746df139SVasundhara Volam mac_ok = true; 1001746df139SVasundhara Volam } else if (is_valid_ether_addr(vf->mac_addr)) { 100291cdda40SVasundhara Volam if (ether_addr_equal((const u8 *)req->l2_addr, vf->mac_addr)) 100391cdda40SVasundhara Volam mac_ok = true; 100491cdda40SVasundhara Volam } else if (is_valid_ether_addr(vf->vf_mac_addr)) { 100591cdda40SVasundhara Volam if (ether_addr_equal((const u8 *)req->l2_addr, vf->vf_mac_addr)) 100691cdda40SVasundhara Volam mac_ok = true; 100791cdda40SVasundhara Volam } else { 10086fd544c8SYueHaibing /* There are two cases: 10096fd544c8SYueHaibing * 1.If firmware spec < 0x10202,VF MAC address is not forwarded 10106fd544c8SYueHaibing * to the PF and so it doesn't have to match 10116fd544c8SYueHaibing * 2.Allow VF to modify it's own MAC when PF has not assigned a 10126fd544c8SYueHaibing * valid MAC address and firmware spec >= 0x10202 10136fd544c8SYueHaibing */ 101491cdda40SVasundhara Volam mac_ok = true; 101591cdda40SVasundhara Volam } 101691cdda40SVasundhara Volam if (mac_ok) 1017c0c050c5SMichael Chan return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size); 1018c0c050c5SMichael Chan return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size); 1019c0c050c5SMichael Chan } 1020c0c050c5SMichael Chan 1021c0c050c5SMichael Chan static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf) 1022c0c050c5SMichael Chan { 1023c0c050c5SMichael Chan int rc = 0; 1024c0c050c5SMichael Chan 1025c0c050c5SMichael Chan if (!(vf->flags & BNXT_VF_LINK_FORCED)) { 1026c0c050c5SMichael Chan /* real link */ 1027c0c050c5SMichael Chan rc = bnxt_hwrm_exec_fwd_resp( 1028c0c050c5SMichael Chan bp, vf, sizeof(struct hwrm_port_phy_qcfg_input)); 1029c0c050c5SMichael Chan } else { 10309d6b648cSMichael Chan struct hwrm_port_phy_qcfg_output phy_qcfg_resp = {0}; 1031c0c050c5SMichael Chan struct hwrm_port_phy_qcfg_input *phy_qcfg_req; 1032c0c050c5SMichael Chan 1033c0c050c5SMichael Chan phy_qcfg_req = 1034c0c050c5SMichael Chan (struct hwrm_port_phy_qcfg_input *)vf->hwrm_cmd_req_addr; 1035c0c050c5SMichael Chan mutex_lock(&bp->hwrm_cmd_lock); 1036c0c050c5SMichael Chan memcpy(&phy_qcfg_resp, &bp->link_info.phy_qcfg_resp, 1037c0c050c5SMichael Chan sizeof(phy_qcfg_resp)); 1038c0c050c5SMichael Chan mutex_unlock(&bp->hwrm_cmd_lock); 1039845adfe4SMichael Chan phy_qcfg_resp.resp_len = cpu_to_le16(sizeof(phy_qcfg_resp)); 1040c0c050c5SMichael Chan phy_qcfg_resp.seq_id = phy_qcfg_req->seq_id; 1041845adfe4SMichael Chan phy_qcfg_resp.valid = 1; 1042c0c050c5SMichael Chan 1043c0c050c5SMichael Chan if (vf->flags & BNXT_VF_LINK_UP) { 1044c0c050c5SMichael Chan /* if physical link is down, force link up on VF */ 104573b9bad6SMichael Chan if (phy_qcfg_resp.link != 104673b9bad6SMichael Chan PORT_PHY_QCFG_RESP_LINK_LINK) { 1047c0c050c5SMichael Chan phy_qcfg_resp.link = 1048c0c050c5SMichael Chan PORT_PHY_QCFG_RESP_LINK_LINK; 104911f15ed3SMichael Chan phy_qcfg_resp.link_speed = cpu_to_le16( 105011f15ed3SMichael Chan PORT_PHY_QCFG_RESP_LINK_SPEED_10GB); 1051acb20054SMichael Chan phy_qcfg_resp.duplex_cfg = 1052acb20054SMichael Chan PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL; 1053acb20054SMichael Chan phy_qcfg_resp.duplex_state = 1054acb20054SMichael Chan PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL; 1055c0c050c5SMichael Chan phy_qcfg_resp.pause = 1056c0c050c5SMichael Chan (PORT_PHY_QCFG_RESP_PAUSE_TX | 1057c0c050c5SMichael Chan PORT_PHY_QCFG_RESP_PAUSE_RX); 1058c0c050c5SMichael Chan } 1059c0c050c5SMichael Chan } else { 1060c0c050c5SMichael Chan /* force link down */ 1061c0c050c5SMichael Chan phy_qcfg_resp.link = PORT_PHY_QCFG_RESP_LINK_NO_LINK; 1062c0c050c5SMichael Chan phy_qcfg_resp.link_speed = 0; 1063acb20054SMichael Chan phy_qcfg_resp.duplex_state = 1064acb20054SMichael Chan PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF; 1065c0c050c5SMichael Chan phy_qcfg_resp.pause = 0; 1066c0c050c5SMichael Chan } 1067c0c050c5SMichael Chan rc = bnxt_hwrm_fwd_resp(bp, vf, &phy_qcfg_resp, 1068c0c050c5SMichael Chan phy_qcfg_req->resp_addr, 1069c0c050c5SMichael Chan phy_qcfg_req->cmpl_ring, 1070c0c050c5SMichael Chan sizeof(phy_qcfg_resp)); 1071c0c050c5SMichael Chan } 1072c0c050c5SMichael Chan return rc; 1073c0c050c5SMichael Chan } 1074c0c050c5SMichael Chan 1075c0c050c5SMichael Chan static int bnxt_vf_req_validate_snd(struct bnxt *bp, struct bnxt_vf_info *vf) 1076c0c050c5SMichael Chan { 1077c0c050c5SMichael Chan int rc = 0; 1078a8643e16SMichael Chan struct input *encap_req = vf->hwrm_cmd_req_addr; 1079a8643e16SMichael Chan u32 req_type = le16_to_cpu(encap_req->req_type); 1080c0c050c5SMichael Chan 1081c0c050c5SMichael Chan switch (req_type) { 108291cdda40SVasundhara Volam case HWRM_FUNC_VF_CFG: 1083746df139SVasundhara Volam rc = bnxt_vf_configure_mac(bp, vf); 108491cdda40SVasundhara Volam break; 1085c0c050c5SMichael Chan case HWRM_CFA_L2_FILTER_ALLOC: 1086c0c050c5SMichael Chan rc = bnxt_vf_validate_set_mac(bp, vf); 1087c0c050c5SMichael Chan break; 1088c0c050c5SMichael Chan case HWRM_FUNC_CFG: 1089c0c050c5SMichael Chan /* TODO Validate if VF is allowed to change mac address, 1090c0c050c5SMichael Chan * mtu, num of rings etc 1091c0c050c5SMichael Chan */ 1092c0c050c5SMichael Chan rc = bnxt_hwrm_exec_fwd_resp( 1093c0c050c5SMichael Chan bp, vf, sizeof(struct hwrm_func_cfg_input)); 1094c0c050c5SMichael Chan break; 1095c0c050c5SMichael Chan case HWRM_PORT_PHY_QCFG: 1096c0c050c5SMichael Chan rc = bnxt_vf_set_link(bp, vf); 1097c0c050c5SMichael Chan break; 1098c0c050c5SMichael Chan default: 1099c0c050c5SMichael Chan break; 1100c0c050c5SMichael Chan } 1101c0c050c5SMichael Chan return rc; 1102c0c050c5SMichael Chan } 1103c0c050c5SMichael Chan 1104c0c050c5SMichael Chan void bnxt_hwrm_exec_fwd_req(struct bnxt *bp) 1105c0c050c5SMichael Chan { 1106c0c050c5SMichael Chan u32 i = 0, active_vfs = bp->pf.active_vfs, vf_id; 1107c0c050c5SMichael Chan 1108c0c050c5SMichael Chan /* Scan through VF's and process commands */ 1109c0c050c5SMichael Chan while (1) { 1110c0c050c5SMichael Chan vf_id = find_next_bit(bp->pf.vf_event_bmap, active_vfs, i); 1111c0c050c5SMichael Chan if (vf_id >= active_vfs) 1112c0c050c5SMichael Chan break; 1113c0c050c5SMichael Chan 1114c0c050c5SMichael Chan clear_bit(vf_id, bp->pf.vf_event_bmap); 1115c0c050c5SMichael Chan bnxt_vf_req_validate_snd(bp, &bp->pf.vf[vf_id]); 1116c0c050c5SMichael Chan i = vf_id + 1; 1117c0c050c5SMichael Chan } 1118c0c050c5SMichael Chan } 1119379a80a1SMichael Chan 11207b3c8e27SMichael Chan int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict) 11217b3c8e27SMichael Chan { 11227b3c8e27SMichael Chan struct hwrm_func_vf_cfg_input req = {0}; 11237b3c8e27SMichael Chan int rc = 0; 11247b3c8e27SMichael Chan 11257b3c8e27SMichael Chan if (!BNXT_VF(bp)) 11267b3c8e27SMichael Chan return 0; 11277b3c8e27SMichael Chan 11287b3c8e27SMichael Chan if (bp->hwrm_spec_code < 0x10202) { 11297b3c8e27SMichael Chan if (is_valid_ether_addr(bp->vf.mac_addr)) 11307b3c8e27SMichael Chan rc = -EADDRNOTAVAIL; 11317b3c8e27SMichael Chan goto mac_done; 11327b3c8e27SMichael Chan } 11337b3c8e27SMichael Chan bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1); 11347b3c8e27SMichael Chan req.enables = cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR); 11357b3c8e27SMichael Chan memcpy(req.dflt_mac_addr, mac, ETH_ALEN); 11367b3c8e27SMichael Chan rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 11377b3c8e27SMichael Chan mac_done: 11387b3c8e27SMichael Chan if (rc && strict) { 11397b3c8e27SMichael Chan rc = -EADDRNOTAVAIL; 11407b3c8e27SMichael Chan netdev_warn(bp->dev, "VF MAC address %pM not approved by the PF\n", 11417b3c8e27SMichael Chan mac); 11427b3c8e27SMichael Chan return rc; 11437b3c8e27SMichael Chan } 11447b3c8e27SMichael Chan return 0; 11457b3c8e27SMichael Chan } 11467b3c8e27SMichael Chan 1147379a80a1SMichael Chan void bnxt_update_vf_mac(struct bnxt *bp) 1148379a80a1SMichael Chan { 1149379a80a1SMichael Chan struct hwrm_func_qcaps_input req = {0}; 1150379a80a1SMichael Chan struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr; 115192923cc7SMichael Chan bool inform_pf = false; 1152379a80a1SMichael Chan 1153379a80a1SMichael Chan bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1); 1154379a80a1SMichael Chan req.fid = cpu_to_le16(0xffff); 1155379a80a1SMichael Chan 1156379a80a1SMichael Chan mutex_lock(&bp->hwrm_cmd_lock); 1157379a80a1SMichael Chan if (_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT)) 1158379a80a1SMichael Chan goto update_vf_mac_exit; 1159379a80a1SMichael Chan 11603874d6a8SJeffrey Huang /* Store MAC address from the firmware. There are 2 cases: 11613874d6a8SJeffrey Huang * 1. MAC address is valid. It is assigned from the PF and we 11623874d6a8SJeffrey Huang * need to override the current VF MAC address with it. 11633874d6a8SJeffrey Huang * 2. MAC address is zero. The VF will use a random MAC address by 11643874d6a8SJeffrey Huang * default but the stored zero MAC will allow the VF user to change 11653874d6a8SJeffrey Huang * the random MAC address using ndo_set_mac_address() if he wants. 11663874d6a8SJeffrey Huang */ 116792923cc7SMichael Chan if (!ether_addr_equal(resp->mac_address, bp->vf.mac_addr)) { 116811f15ed3SMichael Chan memcpy(bp->vf.mac_addr, resp->mac_address, ETH_ALEN); 116992923cc7SMichael Chan /* This means we are now using our own MAC address, let 117092923cc7SMichael Chan * the PF know about this MAC address. 117192923cc7SMichael Chan */ 117292923cc7SMichael Chan if (!is_valid_ether_addr(bp->vf.mac_addr)) 117392923cc7SMichael Chan inform_pf = true; 117492923cc7SMichael Chan } 11753874d6a8SJeffrey Huang 11763874d6a8SJeffrey Huang /* overwrite netdev dev_addr with admin VF MAC */ 11773874d6a8SJeffrey Huang if (is_valid_ether_addr(bp->vf.mac_addr)) 1178379a80a1SMichael Chan memcpy(bp->dev->dev_addr, bp->vf.mac_addr, ETH_ALEN); 1179379a80a1SMichael Chan update_vf_mac_exit: 1180379a80a1SMichael Chan mutex_unlock(&bp->hwrm_cmd_lock); 118192923cc7SMichael Chan if (inform_pf) 118292923cc7SMichael Chan bnxt_approve_mac(bp, bp->dev->dev_addr, false); 1183379a80a1SMichael Chan } 1184379a80a1SMichael Chan 1185c0c050c5SMichael Chan #else 1186c0c050c5SMichael Chan 11872cd86968SVasundhara Volam int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset) 1188702d5011SMichael Chan { 1189702d5011SMichael Chan if (*num_vfs) 1190702d5011SMichael Chan return -EOPNOTSUPP; 1191702d5011SMichael Chan return 0; 1192702d5011SMichael Chan } 1193702d5011SMichael Chan 1194c0c050c5SMichael Chan void bnxt_sriov_disable(struct bnxt *bp) 1195c0c050c5SMichael Chan { 1196c0c050c5SMichael Chan } 1197c0c050c5SMichael Chan 1198c0c050c5SMichael Chan void bnxt_hwrm_exec_fwd_req(struct bnxt *bp) 1199c0c050c5SMichael Chan { 1200379a80a1SMichael Chan netdev_err(bp->dev, "Invalid VF message received when SRIOV is not enable\n"); 1201379a80a1SMichael Chan } 1202379a80a1SMichael Chan 1203379a80a1SMichael Chan void bnxt_update_vf_mac(struct bnxt *bp) 1204379a80a1SMichael Chan { 1205c0c050c5SMichael Chan } 120684c33dd3SMichael Chan 120728ea334bSMichael Chan int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict) 120884c33dd3SMichael Chan { 120984c33dd3SMichael Chan return 0; 121084c33dd3SMichael Chan } 1211c0c050c5SMichael Chan #endif 1212