1c0c050c5SMichael Chan /* Broadcom NetXtreme-C/E network driver. 2c0c050c5SMichael Chan * 311f15ed3SMichael Chan * Copyright (c) 2014-2016 Broadcom Corporation 4746df139SVasundhara Volam * Copyright (c) 2016-2018 Broadcom Limited 5c0c050c5SMichael Chan * 6c0c050c5SMichael Chan * This program is free software; you can redistribute it and/or modify 7c0c050c5SMichael Chan * it under the terms of the GNU General Public License as published by 8c0c050c5SMichael Chan * the Free Software Foundation. 9c0c050c5SMichael Chan */ 10c0c050c5SMichael Chan 11c0c050c5SMichael Chan #include <linux/module.h> 12c0c050c5SMichael Chan #include <linux/pci.h> 13c0c050c5SMichael Chan #include <linux/netdevice.h> 14c0c050c5SMichael Chan #include <linux/if_vlan.h> 15c0c050c5SMichael Chan #include <linux/interrupt.h> 16c0c050c5SMichael Chan #include <linux/etherdevice.h> 17c0c050c5SMichael Chan #include "bnxt_hsi.h" 18c0c050c5SMichael Chan #include "bnxt.h" 192f593846SMichael Chan #include "bnxt_ulp.h" 20c0c050c5SMichael Chan #include "bnxt_sriov.h" 214ab0c6a8SSathya Perla #include "bnxt_vfr.h" 22c0c050c5SMichael Chan #include "bnxt_ethtool.h" 23c0c050c5SMichael Chan 24c0c050c5SMichael Chan #ifdef CONFIG_BNXT_SRIOV 25350a7149SEddie Wai static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt *bp, 26350a7149SEddie Wai struct bnxt_vf_info *vf, u16 event_id) 27350a7149SEddie Wai { 28350a7149SEddie Wai struct hwrm_fwd_async_event_cmpl_output *resp = bp->hwrm_cmd_resp_addr; 29350a7149SEddie Wai struct hwrm_fwd_async_event_cmpl_input req = {0}; 30350a7149SEddie Wai struct hwrm_async_event_cmpl *async_cmpl; 31350a7149SEddie Wai int rc = 0; 32350a7149SEddie Wai 33350a7149SEddie Wai bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_ASYNC_EVENT_CMPL, -1, -1); 34350a7149SEddie Wai if (vf) 35350a7149SEddie Wai req.encap_async_event_target_id = cpu_to_le16(vf->fw_fid); 36350a7149SEddie Wai else 37350a7149SEddie Wai /* broadcast this async event to all VFs */ 38350a7149SEddie Wai req.encap_async_event_target_id = cpu_to_le16(0xffff); 39350a7149SEddie Wai async_cmpl = (struct hwrm_async_event_cmpl *)req.encap_async_event_cmpl; 4087c374deSMichael Chan async_cmpl->type = cpu_to_le16(ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT); 41350a7149SEddie Wai async_cmpl->event_id = cpu_to_le16(event_id); 42350a7149SEddie Wai 43350a7149SEddie Wai mutex_lock(&bp->hwrm_cmd_lock); 44350a7149SEddie Wai rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 45350a7149SEddie Wai 46350a7149SEddie Wai if (rc) { 47350a7149SEddie Wai netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl failed. rc:%d\n", 48350a7149SEddie Wai rc); 49350a7149SEddie Wai goto fwd_async_event_cmpl_exit; 50350a7149SEddie Wai } 51350a7149SEddie Wai 52350a7149SEddie Wai if (resp->error_code) { 53350a7149SEddie Wai netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl error %d\n", 54350a7149SEddie Wai resp->error_code); 55350a7149SEddie Wai rc = -1; 56350a7149SEddie Wai } 57350a7149SEddie Wai 58350a7149SEddie Wai fwd_async_event_cmpl_exit: 59350a7149SEddie Wai mutex_unlock(&bp->hwrm_cmd_lock); 60350a7149SEddie Wai return rc; 61350a7149SEddie Wai } 62350a7149SEddie Wai 63c0c050c5SMichael Chan static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id) 64c0c050c5SMichael Chan { 65caefe526SMichael Chan if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { 66c0c050c5SMichael Chan netdev_err(bp->dev, "vf ndo called though PF is down\n"); 67c0c050c5SMichael Chan return -EINVAL; 68c0c050c5SMichael Chan } 69c0c050c5SMichael Chan if (!bp->pf.active_vfs) { 70c0c050c5SMichael Chan netdev_err(bp->dev, "vf ndo called though sriov is disabled\n"); 71c0c050c5SMichael Chan return -EINVAL; 72c0c050c5SMichael Chan } 7378f30004SVenkat Duvvuru if (vf_id >= bp->pf.active_vfs) { 74c0c050c5SMichael Chan netdev_err(bp->dev, "Invalid VF id %d\n", vf_id); 75c0c050c5SMichael Chan return -EINVAL; 76c0c050c5SMichael Chan } 77c0c050c5SMichael Chan return 0; 78c0c050c5SMichael Chan } 79c0c050c5SMichael Chan 80c0c050c5SMichael Chan int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting) 81c0c050c5SMichael Chan { 82c0c050c5SMichael Chan struct hwrm_func_cfg_input req = {0}; 83c0c050c5SMichael Chan struct bnxt *bp = netdev_priv(dev); 84c0c050c5SMichael Chan struct bnxt_vf_info *vf; 85c0c050c5SMichael Chan bool old_setting = false; 86c0c050c5SMichael Chan u32 func_flags; 87c0c050c5SMichael Chan int rc; 88c0c050c5SMichael Chan 898eb992e8SMichael Chan if (bp->hwrm_spec_code < 0x10701) 908eb992e8SMichael Chan return -ENOTSUPP; 918eb992e8SMichael Chan 92c0c050c5SMichael Chan rc = bnxt_vf_ndo_prep(bp, vf_id); 93c0c050c5SMichael Chan if (rc) 94c0c050c5SMichael Chan return rc; 95c0c050c5SMichael Chan 96c0c050c5SMichael Chan vf = &bp->pf.vf[vf_id]; 97c0c050c5SMichael Chan if (vf->flags & BNXT_VF_SPOOFCHK) 98c0c050c5SMichael Chan old_setting = true; 99c0c050c5SMichael Chan if (old_setting == setting) 100c0c050c5SMichael Chan return 0; 101c0c050c5SMichael Chan 102c0c050c5SMichael Chan func_flags = vf->func_flags; 103c0c050c5SMichael Chan if (setting) 1048eb992e8SMichael Chan func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE; 105c0c050c5SMichael Chan else 1068eb992e8SMichael Chan func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE; 107c0c050c5SMichael Chan /*TODO: if the driver supports VLAN filter on guest VLAN, 108c0c050c5SMichael Chan * the spoof check should also include vlan anti-spoofing 109c0c050c5SMichael Chan */ 110c0c050c5SMichael Chan bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 111c193554eSMichael Chan req.fid = cpu_to_le16(vf->fw_fid); 112c0c050c5SMichael Chan req.flags = cpu_to_le32(func_flags); 113c0c050c5SMichael Chan rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 114c0c050c5SMichael Chan if (!rc) { 115c0c050c5SMichael Chan vf->func_flags = func_flags; 116c0c050c5SMichael Chan if (setting) 117c0c050c5SMichael Chan vf->flags |= BNXT_VF_SPOOFCHK; 118c0c050c5SMichael Chan else 119c0c050c5SMichael Chan vf->flags &= ~BNXT_VF_SPOOFCHK; 120c0c050c5SMichael Chan } 121c0c050c5SMichael Chan return rc; 122c0c050c5SMichael Chan } 123c0c050c5SMichael Chan 124746df139SVasundhara Volam int bnxt_set_vf_trust(struct net_device *dev, int vf_id, bool trusted) 125746df139SVasundhara Volam { 126746df139SVasundhara Volam struct bnxt *bp = netdev_priv(dev); 127746df139SVasundhara Volam struct bnxt_vf_info *vf; 128746df139SVasundhara Volam 129746df139SVasundhara Volam if (bnxt_vf_ndo_prep(bp, vf_id)) 130746df139SVasundhara Volam return -EINVAL; 131746df139SVasundhara Volam 132746df139SVasundhara Volam vf = &bp->pf.vf[vf_id]; 133746df139SVasundhara Volam if (trusted) 134746df139SVasundhara Volam vf->flags |= BNXT_VF_TRUST; 135746df139SVasundhara Volam else 136746df139SVasundhara Volam vf->flags &= ~BNXT_VF_TRUST; 137746df139SVasundhara Volam 138746df139SVasundhara Volam return 0; 139746df139SVasundhara Volam } 140746df139SVasundhara Volam 141c0c050c5SMichael Chan int bnxt_get_vf_config(struct net_device *dev, int vf_id, 142c0c050c5SMichael Chan struct ifla_vf_info *ivi) 143c0c050c5SMichael Chan { 144c0c050c5SMichael Chan struct bnxt *bp = netdev_priv(dev); 145c0c050c5SMichael Chan struct bnxt_vf_info *vf; 146c0c050c5SMichael Chan int rc; 147c0c050c5SMichael Chan 148c0c050c5SMichael Chan rc = bnxt_vf_ndo_prep(bp, vf_id); 149c0c050c5SMichael Chan if (rc) 150c0c050c5SMichael Chan return rc; 151c0c050c5SMichael Chan 152c0c050c5SMichael Chan ivi->vf = vf_id; 153c0c050c5SMichael Chan vf = &bp->pf.vf[vf_id]; 154c0c050c5SMichael Chan 15591cdda40SVasundhara Volam if (is_valid_ether_addr(vf->mac_addr)) 156c0c050c5SMichael Chan memcpy(&ivi->mac, vf->mac_addr, ETH_ALEN); 15791cdda40SVasundhara Volam else 15891cdda40SVasundhara Volam memcpy(&ivi->mac, vf->vf_mac_addr, ETH_ALEN); 159c0c050c5SMichael Chan ivi->max_tx_rate = vf->max_tx_rate; 160c0c050c5SMichael Chan ivi->min_tx_rate = vf->min_tx_rate; 161c0c050c5SMichael Chan ivi->vlan = vf->vlan; 162f0249056SMichael Chan if (vf->flags & BNXT_VF_QOS) 163f0249056SMichael Chan ivi->qos = vf->vlan >> VLAN_PRIO_SHIFT; 164f0249056SMichael Chan else 165f0249056SMichael Chan ivi->qos = 0; 166f0249056SMichael Chan ivi->spoofchk = !!(vf->flags & BNXT_VF_SPOOFCHK); 167746df139SVasundhara Volam ivi->trusted = !!(vf->flags & BNXT_VF_TRUST); 168c0c050c5SMichael Chan if (!(vf->flags & BNXT_VF_LINK_FORCED)) 169c0c050c5SMichael Chan ivi->linkstate = IFLA_VF_LINK_STATE_AUTO; 170c0c050c5SMichael Chan else if (vf->flags & BNXT_VF_LINK_UP) 171c0c050c5SMichael Chan ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE; 172c0c050c5SMichael Chan else 173c0c050c5SMichael Chan ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE; 174c0c050c5SMichael Chan 175c0c050c5SMichael Chan return 0; 176c0c050c5SMichael Chan } 177c0c050c5SMichael Chan 178c0c050c5SMichael Chan int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac) 179c0c050c5SMichael Chan { 180c0c050c5SMichael Chan struct hwrm_func_cfg_input req = {0}; 181c0c050c5SMichael Chan struct bnxt *bp = netdev_priv(dev); 182c0c050c5SMichael Chan struct bnxt_vf_info *vf; 183c0c050c5SMichael Chan int rc; 184c0c050c5SMichael Chan 185c0c050c5SMichael Chan rc = bnxt_vf_ndo_prep(bp, vf_id); 186c0c050c5SMichael Chan if (rc) 187c0c050c5SMichael Chan return rc; 188c0c050c5SMichael Chan /* reject bc or mc mac addr, zero mac addr means allow 189c0c050c5SMichael Chan * VF to use its own mac addr 190c0c050c5SMichael Chan */ 191c0c050c5SMichael Chan if (is_multicast_ether_addr(mac)) { 192c0c050c5SMichael Chan netdev_err(dev, "Invalid VF ethernet address\n"); 193c0c050c5SMichael Chan return -EINVAL; 194c0c050c5SMichael Chan } 195c0c050c5SMichael Chan vf = &bp->pf.vf[vf_id]; 196c0c050c5SMichael Chan 197c0c050c5SMichael Chan memcpy(vf->mac_addr, mac, ETH_ALEN); 198c0c050c5SMichael Chan bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 199c193554eSMichael Chan req.fid = cpu_to_le16(vf->fw_fid); 200c0c050c5SMichael Chan req.flags = cpu_to_le32(vf->func_flags); 201c0c050c5SMichael Chan req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR); 202c0c050c5SMichael Chan memcpy(req.dflt_mac_addr, mac, ETH_ALEN); 203c0c050c5SMichael Chan return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 204c0c050c5SMichael Chan } 205c0c050c5SMichael Chan 20679aab093SMoshe Shemesh int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos, 20779aab093SMoshe Shemesh __be16 vlan_proto) 208c0c050c5SMichael Chan { 209c0c050c5SMichael Chan struct hwrm_func_cfg_input req = {0}; 210c0c050c5SMichael Chan struct bnxt *bp = netdev_priv(dev); 211c0c050c5SMichael Chan struct bnxt_vf_info *vf; 212c0c050c5SMichael Chan u16 vlan_tag; 213c0c050c5SMichael Chan int rc; 214c0c050c5SMichael Chan 215cf6645f8SMichael Chan if (bp->hwrm_spec_code < 0x10201) 216cf6645f8SMichael Chan return -ENOTSUPP; 217cf6645f8SMichael Chan 21879aab093SMoshe Shemesh if (vlan_proto != htons(ETH_P_8021Q)) 21979aab093SMoshe Shemesh return -EPROTONOSUPPORT; 22079aab093SMoshe Shemesh 221c0c050c5SMichael Chan rc = bnxt_vf_ndo_prep(bp, vf_id); 222c0c050c5SMichael Chan if (rc) 223c0c050c5SMichael Chan return rc; 224c0c050c5SMichael Chan 225c0c050c5SMichael Chan /* TODO: needed to implement proper handling of user priority, 226c0c050c5SMichael Chan * currently fail the command if there is valid priority 227c0c050c5SMichael Chan */ 228c0c050c5SMichael Chan if (vlan_id > 4095 || qos) 229c0c050c5SMichael Chan return -EINVAL; 230c0c050c5SMichael Chan 231c0c050c5SMichael Chan vf = &bp->pf.vf[vf_id]; 232c0c050c5SMichael Chan vlan_tag = vlan_id; 233c0c050c5SMichael Chan if (vlan_tag == vf->vlan) 234c0c050c5SMichael Chan return 0; 235c0c050c5SMichael Chan 236c0c050c5SMichael Chan bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 237c193554eSMichael Chan req.fid = cpu_to_le16(vf->fw_fid); 238c0c050c5SMichael Chan req.flags = cpu_to_le32(vf->func_flags); 239c0c050c5SMichael Chan req.dflt_vlan = cpu_to_le16(vlan_tag); 240c0c050c5SMichael Chan req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN); 241c0c050c5SMichael Chan rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 242c0c050c5SMichael Chan if (!rc) 243c0c050c5SMichael Chan vf->vlan = vlan_tag; 244c0c050c5SMichael Chan return rc; 245c0c050c5SMichael Chan } 246c0c050c5SMichael Chan 247c0c050c5SMichael Chan int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate, 248c0c050c5SMichael Chan int max_tx_rate) 249c0c050c5SMichael Chan { 250c0c050c5SMichael Chan struct hwrm_func_cfg_input req = {0}; 251c0c050c5SMichael Chan struct bnxt *bp = netdev_priv(dev); 252c0c050c5SMichael Chan struct bnxt_vf_info *vf; 253c0c050c5SMichael Chan u32 pf_link_speed; 254c0c050c5SMichael Chan int rc; 255c0c050c5SMichael Chan 256c0c050c5SMichael Chan rc = bnxt_vf_ndo_prep(bp, vf_id); 257c0c050c5SMichael Chan if (rc) 258c0c050c5SMichael Chan return rc; 259c0c050c5SMichael Chan 260c0c050c5SMichael Chan vf = &bp->pf.vf[vf_id]; 261c0c050c5SMichael Chan pf_link_speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed); 262c0c050c5SMichael Chan if (max_tx_rate > pf_link_speed) { 263c0c050c5SMichael Chan netdev_info(bp->dev, "max tx rate %d exceed PF link speed for VF %d\n", 264c0c050c5SMichael Chan max_tx_rate, vf_id); 265c0c050c5SMichael Chan return -EINVAL; 266c0c050c5SMichael Chan } 267c0c050c5SMichael Chan 268c0c050c5SMichael Chan if (min_tx_rate > pf_link_speed || min_tx_rate > max_tx_rate) { 269c0c050c5SMichael Chan netdev_info(bp->dev, "min tx rate %d is invalid for VF %d\n", 270c0c050c5SMichael Chan min_tx_rate, vf_id); 271c0c050c5SMichael Chan return -EINVAL; 272c0c050c5SMichael Chan } 273c0c050c5SMichael Chan if (min_tx_rate == vf->min_tx_rate && max_tx_rate == vf->max_tx_rate) 274c0c050c5SMichael Chan return 0; 275c0c050c5SMichael Chan bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 276c193554eSMichael Chan req.fid = cpu_to_le16(vf->fw_fid); 277c0c050c5SMichael Chan req.flags = cpu_to_le32(vf->func_flags); 278c0c050c5SMichael Chan req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW); 279c0c050c5SMichael Chan req.max_bw = cpu_to_le32(max_tx_rate); 280c0c050c5SMichael Chan req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW); 281c0c050c5SMichael Chan req.min_bw = cpu_to_le32(min_tx_rate); 282c0c050c5SMichael Chan rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 283c0c050c5SMichael Chan if (!rc) { 284c0c050c5SMichael Chan vf->min_tx_rate = min_tx_rate; 285c0c050c5SMichael Chan vf->max_tx_rate = max_tx_rate; 286c0c050c5SMichael Chan } 287c0c050c5SMichael Chan return rc; 288c0c050c5SMichael Chan } 289c0c050c5SMichael Chan 290c0c050c5SMichael Chan int bnxt_set_vf_link_state(struct net_device *dev, int vf_id, int link) 291c0c050c5SMichael Chan { 292c0c050c5SMichael Chan struct bnxt *bp = netdev_priv(dev); 293c0c050c5SMichael Chan struct bnxt_vf_info *vf; 294c0c050c5SMichael Chan int rc; 295c0c050c5SMichael Chan 296c0c050c5SMichael Chan rc = bnxt_vf_ndo_prep(bp, vf_id); 297c0c050c5SMichael Chan if (rc) 298c0c050c5SMichael Chan return rc; 299c0c050c5SMichael Chan 300c0c050c5SMichael Chan vf = &bp->pf.vf[vf_id]; 301c0c050c5SMichael Chan 302c0c050c5SMichael Chan vf->flags &= ~(BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED); 303c0c050c5SMichael Chan switch (link) { 304c0c050c5SMichael Chan case IFLA_VF_LINK_STATE_AUTO: 305c0c050c5SMichael Chan vf->flags |= BNXT_VF_LINK_UP; 306c0c050c5SMichael Chan break; 307c0c050c5SMichael Chan case IFLA_VF_LINK_STATE_DISABLE: 308c0c050c5SMichael Chan vf->flags |= BNXT_VF_LINK_FORCED; 309c0c050c5SMichael Chan break; 310c0c050c5SMichael Chan case IFLA_VF_LINK_STATE_ENABLE: 311c0c050c5SMichael Chan vf->flags |= BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED; 312c0c050c5SMichael Chan break; 313c0c050c5SMichael Chan default: 314c0c050c5SMichael Chan netdev_err(bp->dev, "Invalid link option\n"); 315c0c050c5SMichael Chan rc = -EINVAL; 316c0c050c5SMichael Chan break; 317c0c050c5SMichael Chan } 318350a7149SEddie Wai if (vf->flags & (BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED)) 319350a7149SEddie Wai rc = bnxt_hwrm_fwd_async_event_cmpl(bp, vf, 32087c374deSMichael Chan ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE); 321c0c050c5SMichael Chan return rc; 322c0c050c5SMichael Chan } 323c0c050c5SMichael Chan 324c0c050c5SMichael Chan static int bnxt_set_vf_attr(struct bnxt *bp, int num_vfs) 325c0c050c5SMichael Chan { 326c0c050c5SMichael Chan int i; 327c0c050c5SMichael Chan struct bnxt_vf_info *vf; 328c0c050c5SMichael Chan 329c0c050c5SMichael Chan for (i = 0; i < num_vfs; i++) { 330c0c050c5SMichael Chan vf = &bp->pf.vf[i]; 331c0c050c5SMichael Chan memset(vf, 0, sizeof(*vf)); 332c0c050c5SMichael Chan } 333c0c050c5SMichael Chan return 0; 334c0c050c5SMichael Chan } 335c0c050c5SMichael Chan 3364bb6cdceSJeffrey Huang static int bnxt_hwrm_func_vf_resource_free(struct bnxt *bp, int num_vfs) 337c0c050c5SMichael Chan { 338c0c050c5SMichael Chan int i, rc = 0; 339c0c050c5SMichael Chan struct bnxt_pf_info *pf = &bp->pf; 340c0c050c5SMichael Chan struct hwrm_func_vf_resc_free_input req = {0}; 341c0c050c5SMichael Chan 342c0c050c5SMichael Chan bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESC_FREE, -1, -1); 343c0c050c5SMichael Chan 344c0c050c5SMichael Chan mutex_lock(&bp->hwrm_cmd_lock); 3454bb6cdceSJeffrey Huang for (i = pf->first_vf_id; i < pf->first_vf_id + num_vfs; i++) { 346c0c050c5SMichael Chan req.vf_id = cpu_to_le16(i); 347c0c050c5SMichael Chan rc = _hwrm_send_message(bp, &req, sizeof(req), 348c0c050c5SMichael Chan HWRM_CMD_TIMEOUT); 349c0c050c5SMichael Chan if (rc) 350c0c050c5SMichael Chan break; 351c0c050c5SMichael Chan } 352c0c050c5SMichael Chan mutex_unlock(&bp->hwrm_cmd_lock); 353c0c050c5SMichael Chan return rc; 354c0c050c5SMichael Chan } 355c0c050c5SMichael Chan 356c0c050c5SMichael Chan static void bnxt_free_vf_resources(struct bnxt *bp) 357c0c050c5SMichael Chan { 358c0c050c5SMichael Chan struct pci_dev *pdev = bp->pdev; 359c0c050c5SMichael Chan int i; 360c0c050c5SMichael Chan 361c0c050c5SMichael Chan kfree(bp->pf.vf_event_bmap); 362c0c050c5SMichael Chan bp->pf.vf_event_bmap = NULL; 363c0c050c5SMichael Chan 364c0c050c5SMichael Chan for (i = 0; i < 4; i++) { 365c0c050c5SMichael Chan if (bp->pf.hwrm_cmd_req_addr[i]) { 366c0c050c5SMichael Chan dma_free_coherent(&pdev->dev, BNXT_PAGE_SIZE, 367c0c050c5SMichael Chan bp->pf.hwrm_cmd_req_addr[i], 368c0c050c5SMichael Chan bp->pf.hwrm_cmd_req_dma_addr[i]); 369c0c050c5SMichael Chan bp->pf.hwrm_cmd_req_addr[i] = NULL; 370c0c050c5SMichael Chan } 371c0c050c5SMichael Chan } 372c0c050c5SMichael Chan 373c0c050c5SMichael Chan kfree(bp->pf.vf); 374c0c050c5SMichael Chan bp->pf.vf = NULL; 375c0c050c5SMichael Chan } 376c0c050c5SMichael Chan 377c0c050c5SMichael Chan static int bnxt_alloc_vf_resources(struct bnxt *bp, int num_vfs) 378c0c050c5SMichael Chan { 379c0c050c5SMichael Chan struct pci_dev *pdev = bp->pdev; 380c0c050c5SMichael Chan u32 nr_pages, size, i, j, k = 0; 381c0c050c5SMichael Chan 382c0c050c5SMichael Chan bp->pf.vf = kcalloc(num_vfs, sizeof(struct bnxt_vf_info), GFP_KERNEL); 383c0c050c5SMichael Chan if (!bp->pf.vf) 384c0c050c5SMichael Chan return -ENOMEM; 385c0c050c5SMichael Chan 386c0c050c5SMichael Chan bnxt_set_vf_attr(bp, num_vfs); 387c0c050c5SMichael Chan 388c0c050c5SMichael Chan size = num_vfs * BNXT_HWRM_REQ_MAX_SIZE; 389c0c050c5SMichael Chan nr_pages = size / BNXT_PAGE_SIZE; 390c0c050c5SMichael Chan if (size & (BNXT_PAGE_SIZE - 1)) 391c0c050c5SMichael Chan nr_pages++; 392c0c050c5SMichael Chan 393c0c050c5SMichael Chan for (i = 0; i < nr_pages; i++) { 394c0c050c5SMichael Chan bp->pf.hwrm_cmd_req_addr[i] = 395c0c050c5SMichael Chan dma_alloc_coherent(&pdev->dev, BNXT_PAGE_SIZE, 396c0c050c5SMichael Chan &bp->pf.hwrm_cmd_req_dma_addr[i], 397c0c050c5SMichael Chan GFP_KERNEL); 398c0c050c5SMichael Chan 399c0c050c5SMichael Chan if (!bp->pf.hwrm_cmd_req_addr[i]) 400c0c050c5SMichael Chan return -ENOMEM; 401c0c050c5SMichael Chan 402c0c050c5SMichael Chan for (j = 0; j < BNXT_HWRM_REQS_PER_PAGE && k < num_vfs; j++) { 403c0c050c5SMichael Chan struct bnxt_vf_info *vf = &bp->pf.vf[k]; 404c0c050c5SMichael Chan 405c0c050c5SMichael Chan vf->hwrm_cmd_req_addr = bp->pf.hwrm_cmd_req_addr[i] + 406c0c050c5SMichael Chan j * BNXT_HWRM_REQ_MAX_SIZE; 407c0c050c5SMichael Chan vf->hwrm_cmd_req_dma_addr = 408c0c050c5SMichael Chan bp->pf.hwrm_cmd_req_dma_addr[i] + j * 409c0c050c5SMichael Chan BNXT_HWRM_REQ_MAX_SIZE; 410c0c050c5SMichael Chan k++; 411c0c050c5SMichael Chan } 412c0c050c5SMichael Chan } 413c0c050c5SMichael Chan 414c0c050c5SMichael Chan /* Max 128 VF's */ 415c0c050c5SMichael Chan bp->pf.vf_event_bmap = kzalloc(16, GFP_KERNEL); 416c0c050c5SMichael Chan if (!bp->pf.vf_event_bmap) 417c0c050c5SMichael Chan return -ENOMEM; 418c0c050c5SMichael Chan 419c0c050c5SMichael Chan bp->pf.hwrm_cmd_req_pages = nr_pages; 420c0c050c5SMichael Chan return 0; 421c0c050c5SMichael Chan } 422c0c050c5SMichael Chan 423c0c050c5SMichael Chan static int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp) 424c0c050c5SMichael Chan { 425c0c050c5SMichael Chan struct hwrm_func_buf_rgtr_input req = {0}; 426c0c050c5SMichael Chan 427c0c050c5SMichael Chan bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BUF_RGTR, -1, -1); 428c0c050c5SMichael Chan 429c0c050c5SMichael Chan req.req_buf_num_pages = cpu_to_le16(bp->pf.hwrm_cmd_req_pages); 430c0c050c5SMichael Chan req.req_buf_page_size = cpu_to_le16(BNXT_PAGE_SHIFT); 431c0c050c5SMichael Chan req.req_buf_len = cpu_to_le16(BNXT_HWRM_REQ_MAX_SIZE); 432c0c050c5SMichael Chan req.req_buf_page_addr0 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[0]); 433c0c050c5SMichael Chan req.req_buf_page_addr1 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[1]); 434c0c050c5SMichael Chan req.req_buf_page_addr2 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[2]); 435c0c050c5SMichael Chan req.req_buf_page_addr3 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[3]); 436c0c050c5SMichael Chan 437c0c050c5SMichael Chan return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 438c0c050c5SMichael Chan } 439c0c050c5SMichael Chan 4404673d664SMichael Chan /* Only called by PF to reserve resources for VFs, returns actual number of 4414673d664SMichael Chan * VFs configured, or < 0 on error. 4424673d664SMichael Chan */ 4434673d664SMichael Chan static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs) 4444673d664SMichael Chan { 4454673d664SMichael Chan struct hwrm_func_vf_resource_cfg_input req = {0}; 4464673d664SMichael Chan struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 4474673d664SMichael Chan u16 vf_tx_rings, vf_rx_rings, vf_cp_rings; 4484673d664SMichael Chan u16 vf_stat_ctx, vf_vnics, vf_ring_grps; 4494673d664SMichael Chan struct bnxt_pf_info *pf = &bp->pf; 450*bf82736dSMichael Chan int i, rc = 0, min = 1; 4514673d664SMichael Chan 4524673d664SMichael Chan bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESOURCE_CFG, -1, -1); 4534673d664SMichael Chan 4544673d664SMichael Chan vf_cp_rings = hw_resc->max_cp_rings - bp->cp_nr_rings; 4554673d664SMichael Chan vf_stat_ctx = hw_resc->max_stat_ctxs - bp->num_stat_ctxs; 4564673d664SMichael Chan if (bp->flags & BNXT_FLAG_AGG_RINGS) 4574673d664SMichael Chan vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings * 2; 4584673d664SMichael Chan else 4594673d664SMichael Chan vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings; 4604673d664SMichael Chan vf_ring_grps = hw_resc->max_hw_ring_grps - bp->rx_nr_rings; 4614673d664SMichael Chan vf_tx_rings = hw_resc->max_tx_rings - bp->tx_nr_rings; 4624673d664SMichael Chan vf_vnics = hw_resc->max_vnics - bp->nr_vnics; 4634673d664SMichael Chan vf_vnics = min_t(u16, vf_vnics, vf_rx_rings); 4644673d664SMichael Chan 46586c3380dSMichael Chan req.min_rsscos_ctx = cpu_to_le16(BNXT_VF_MIN_RSS_CTX); 46686c3380dSMichael Chan req.max_rsscos_ctx = cpu_to_le16(BNXT_VF_MAX_RSS_CTX); 467*bf82736dSMichael Chan if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) { 468*bf82736dSMichael Chan min = 0; 469*bf82736dSMichael Chan req.min_rsscos_ctx = cpu_to_le16(min); 470*bf82736dSMichael Chan } 471*bf82736dSMichael Chan if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL || 472*bf82736dSMichael Chan pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) { 473*bf82736dSMichael Chan req.min_cmpl_rings = cpu_to_le16(min); 474*bf82736dSMichael Chan req.min_tx_rings = cpu_to_le16(min); 475*bf82736dSMichael Chan req.min_rx_rings = cpu_to_le16(min); 476*bf82736dSMichael Chan req.min_l2_ctxs = cpu_to_le16(min); 477*bf82736dSMichael Chan req.min_vnics = cpu_to_le16(min); 478*bf82736dSMichael Chan req.min_stat_ctx = cpu_to_le16(min); 479*bf82736dSMichael Chan req.min_hw_ring_grps = cpu_to_le16(min); 4804673d664SMichael Chan } else { 4814673d664SMichael Chan vf_cp_rings /= num_vfs; 4824673d664SMichael Chan vf_tx_rings /= num_vfs; 4834673d664SMichael Chan vf_rx_rings /= num_vfs; 4844673d664SMichael Chan vf_vnics /= num_vfs; 4854673d664SMichael Chan vf_stat_ctx /= num_vfs; 4864673d664SMichael Chan vf_ring_grps /= num_vfs; 4874673d664SMichael Chan 4884673d664SMichael Chan req.min_cmpl_rings = cpu_to_le16(vf_cp_rings); 4894673d664SMichael Chan req.min_tx_rings = cpu_to_le16(vf_tx_rings); 4904673d664SMichael Chan req.min_rx_rings = cpu_to_le16(vf_rx_rings); 49186c3380dSMichael Chan req.min_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX); 4924673d664SMichael Chan req.min_vnics = cpu_to_le16(vf_vnics); 4934673d664SMichael Chan req.min_stat_ctx = cpu_to_le16(vf_stat_ctx); 4944673d664SMichael Chan req.min_hw_ring_grps = cpu_to_le16(vf_ring_grps); 4954673d664SMichael Chan } 4964673d664SMichael Chan req.max_cmpl_rings = cpu_to_le16(vf_cp_rings); 4974673d664SMichael Chan req.max_tx_rings = cpu_to_le16(vf_tx_rings); 4984673d664SMichael Chan req.max_rx_rings = cpu_to_le16(vf_rx_rings); 49986c3380dSMichael Chan req.max_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX); 5004673d664SMichael Chan req.max_vnics = cpu_to_le16(vf_vnics); 5014673d664SMichael Chan req.max_stat_ctx = cpu_to_le16(vf_stat_ctx); 5024673d664SMichael Chan req.max_hw_ring_grps = cpu_to_le16(vf_ring_grps); 5034673d664SMichael Chan 5044673d664SMichael Chan mutex_lock(&bp->hwrm_cmd_lock); 5054673d664SMichael Chan for (i = 0; i < num_vfs; i++) { 5064673d664SMichael Chan req.vf_id = cpu_to_le16(pf->first_vf_id + i); 5074673d664SMichael Chan rc = _hwrm_send_message(bp, &req, sizeof(req), 5084673d664SMichael Chan HWRM_CMD_TIMEOUT); 5094673d664SMichael Chan if (rc) { 5104673d664SMichael Chan rc = -ENOMEM; 5114673d664SMichael Chan break; 5124673d664SMichael Chan } 5134673d664SMichael Chan pf->active_vfs = i + 1; 5144673d664SMichael Chan pf->vf[i].fw_fid = pf->first_vf_id + i; 5154673d664SMichael Chan } 5164673d664SMichael Chan mutex_unlock(&bp->hwrm_cmd_lock); 5174673d664SMichael Chan if (pf->active_vfs) { 518596f9d55SMichael Chan u16 n = pf->active_vfs; 5194673d664SMichael Chan 520596f9d55SMichael Chan hw_resc->max_tx_rings -= le16_to_cpu(req.min_tx_rings) * n; 521596f9d55SMichael Chan hw_resc->max_rx_rings -= le16_to_cpu(req.min_rx_rings) * n; 522596f9d55SMichael Chan hw_resc->max_hw_ring_grps -= le16_to_cpu(req.min_hw_ring_grps) * 523596f9d55SMichael Chan n; 524596f9d55SMichael Chan hw_resc->max_cp_rings -= le16_to_cpu(req.min_cmpl_rings) * n; 5254673d664SMichael Chan hw_resc->max_rsscos_ctxs -= pf->active_vfs; 526596f9d55SMichael Chan hw_resc->max_stat_ctxs -= le16_to_cpu(req.min_stat_ctx) * n; 527596f9d55SMichael Chan hw_resc->max_vnics -= le16_to_cpu(req.min_vnics) * n; 5284673d664SMichael Chan 5294673d664SMichael Chan rc = pf->active_vfs; 5304673d664SMichael Chan } 5314673d664SMichael Chan return rc; 5324673d664SMichael Chan } 5334673d664SMichael Chan 5344673d664SMichael Chan /* Only called by PF to reserve resources for VFs, returns actual number of 5354673d664SMichael Chan * VFs configured, or < 0 on error. 5364673d664SMichael Chan */ 53792268c32SMichael Chan static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs) 538c0c050c5SMichael Chan { 539c0c050c5SMichael Chan u32 rc = 0, mtu, i; 540c0c050c5SMichael Chan u16 vf_tx_rings, vf_rx_rings, vf_cp_rings, vf_stat_ctx, vf_vnics; 5416a4f2947SMichael Chan struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 5426a4f2947SMichael Chan u16 vf_ring_grps, max_stat_ctxs; 543c0c050c5SMichael Chan struct hwrm_func_cfg_input req = {0}; 544c0c050c5SMichael Chan struct bnxt_pf_info *pf = &bp->pf; 545391be5c2SMichael Chan int total_vf_tx_rings = 0; 546c0c050c5SMichael Chan 547c0c050c5SMichael Chan bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 548c0c050c5SMichael Chan 5496a4f2947SMichael Chan max_stat_ctxs = hw_resc->max_stat_ctxs; 5506a4f2947SMichael Chan 551c0c050c5SMichael Chan /* Remaining rings are distributed equally amongs VF's for now */ 5526a4f2947SMichael Chan vf_cp_rings = (hw_resc->max_cp_rings - bp->cp_nr_rings) / num_vfs; 5536a4f2947SMichael Chan vf_stat_ctx = (max_stat_ctxs - bp->num_stat_ctxs) / num_vfs; 554c0c050c5SMichael Chan if (bp->flags & BNXT_FLAG_AGG_RINGS) 5556a4f2947SMichael Chan vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings * 2) / 55692268c32SMichael Chan num_vfs; 557c0c050c5SMichael Chan else 5586a4f2947SMichael Chan vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings) / 5596a4f2947SMichael Chan num_vfs; 5606a4f2947SMichael Chan vf_ring_grps = (hw_resc->max_hw_ring_grps - bp->rx_nr_rings) / num_vfs; 5616a4f2947SMichael Chan vf_tx_rings = (hw_resc->max_tx_rings - bp->tx_nr_rings) / num_vfs; 5626a4f2947SMichael Chan vf_vnics = (hw_resc->max_vnics - bp->nr_vnics) / num_vfs; 5638427af81SMichael Chan vf_vnics = min_t(u16, vf_vnics, vf_rx_rings); 564c0c050c5SMichael Chan 565c0c050c5SMichael Chan req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MTU | 566c0c050c5SMichael Chan FUNC_CFG_REQ_ENABLES_MRU | 567c0c050c5SMichael Chan FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS | 568c0c050c5SMichael Chan FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS | 569c0c050c5SMichael Chan FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS | 570c0c050c5SMichael Chan FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS | 571c0c050c5SMichael Chan FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS | 572c0c050c5SMichael Chan FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS | 573b72d4a68SMichael Chan FUNC_CFG_REQ_ENABLES_NUM_VNICS | 574b72d4a68SMichael Chan FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS); 575c0c050c5SMichael Chan 576c0c050c5SMichael Chan mtu = bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; 577c0c050c5SMichael Chan req.mru = cpu_to_le16(mtu); 578c0c050c5SMichael Chan req.mtu = cpu_to_le16(mtu); 579c0c050c5SMichael Chan 580c0c050c5SMichael Chan req.num_rsscos_ctxs = cpu_to_le16(1); 581c0c050c5SMichael Chan req.num_cmpl_rings = cpu_to_le16(vf_cp_rings); 582c0c050c5SMichael Chan req.num_tx_rings = cpu_to_le16(vf_tx_rings); 583c0c050c5SMichael Chan req.num_rx_rings = cpu_to_le16(vf_rx_rings); 584b72d4a68SMichael Chan req.num_hw_ring_grps = cpu_to_le16(vf_ring_grps); 585c0c050c5SMichael Chan req.num_l2_ctxs = cpu_to_le16(4); 586c0c050c5SMichael Chan 587c0c050c5SMichael Chan req.num_vnics = cpu_to_le16(vf_vnics); 588c0c050c5SMichael Chan /* FIXME spec currently uses 1 bit for stats ctx */ 589c0c050c5SMichael Chan req.num_stat_ctxs = cpu_to_le16(vf_stat_ctx); 590c0c050c5SMichael Chan 591c0c050c5SMichael Chan mutex_lock(&bp->hwrm_cmd_lock); 59292268c32SMichael Chan for (i = 0; i < num_vfs; i++) { 593391be5c2SMichael Chan int vf_tx_rsvd = vf_tx_rings; 594391be5c2SMichael Chan 595c193554eSMichael Chan req.fid = cpu_to_le16(pf->first_vf_id + i); 596c0c050c5SMichael Chan rc = _hwrm_send_message(bp, &req, sizeof(req), 597c0c050c5SMichael Chan HWRM_CMD_TIMEOUT); 598c0c050c5SMichael Chan if (rc) 599c0c050c5SMichael Chan break; 60092268c32SMichael Chan pf->active_vfs = i + 1; 601c193554eSMichael Chan pf->vf[i].fw_fid = le16_to_cpu(req.fid); 602391be5c2SMichael Chan rc = __bnxt_hwrm_get_tx_rings(bp, pf->vf[i].fw_fid, 603391be5c2SMichael Chan &vf_tx_rsvd); 604391be5c2SMichael Chan if (rc) 605391be5c2SMichael Chan break; 606391be5c2SMichael Chan total_vf_tx_rings += vf_tx_rsvd; 607c0c050c5SMichael Chan } 608c0c050c5SMichael Chan mutex_unlock(&bp->hwrm_cmd_lock); 6094673d664SMichael Chan if (rc) 6104673d664SMichael Chan rc = -ENOMEM; 6114673d664SMichael Chan if (pf->active_vfs) { 6126a4f2947SMichael Chan hw_resc->max_tx_rings -= total_vf_tx_rings; 6136a4f2947SMichael Chan hw_resc->max_rx_rings -= vf_rx_rings * num_vfs; 6146a4f2947SMichael Chan hw_resc->max_hw_ring_grps -= vf_ring_grps * num_vfs; 6156a4f2947SMichael Chan hw_resc->max_cp_rings -= vf_cp_rings * num_vfs; 6166a4f2947SMichael Chan hw_resc->max_rsscos_ctxs -= num_vfs; 6176a4f2947SMichael Chan hw_resc->max_stat_ctxs -= vf_stat_ctx * num_vfs; 6186a4f2947SMichael Chan hw_resc->max_vnics -= vf_vnics * num_vfs; 6194673d664SMichael Chan rc = pf->active_vfs; 620c0c050c5SMichael Chan } 621c0c050c5SMichael Chan return rc; 622c0c050c5SMichael Chan } 623c0c050c5SMichael Chan 6244673d664SMichael Chan static int bnxt_func_cfg(struct bnxt *bp, int num_vfs) 6254673d664SMichael Chan { 6264673d664SMichael Chan if (bp->flags & BNXT_FLAG_NEW_RM) 6274673d664SMichael Chan return bnxt_hwrm_func_vf_resc_cfg(bp, num_vfs); 6284673d664SMichael Chan else 6294673d664SMichael Chan return bnxt_hwrm_func_cfg(bp, num_vfs); 6304673d664SMichael Chan } 6314673d664SMichael Chan 632c0c050c5SMichael Chan static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs) 633c0c050c5SMichael Chan { 634c0c050c5SMichael Chan int rc = 0, vfs_supported; 635c0c050c5SMichael Chan int min_rx_rings, min_tx_rings, min_rss_ctxs; 6366a4f2947SMichael Chan struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 637c0c050c5SMichael Chan int tx_ok = 0, rx_ok = 0, rss_ok = 0; 63802157079SMichael Chan int avail_cp, avail_stat; 639c0c050c5SMichael Chan 640c0c050c5SMichael Chan /* Check if we can enable requested num of vf's. At a mininum 641c0c050c5SMichael Chan * we require 1 RX 1 TX rings for each VF. In this minimum conf 642c0c050c5SMichael Chan * features like TPA will not be available. 643c0c050c5SMichael Chan */ 644c0c050c5SMichael Chan vfs_supported = *num_vfs; 645c0c050c5SMichael Chan 6466a4f2947SMichael Chan avail_cp = hw_resc->max_cp_rings - bp->cp_nr_rings; 6476a4f2947SMichael Chan avail_stat = hw_resc->max_stat_ctxs - bp->num_stat_ctxs; 64802157079SMichael Chan avail_cp = min_t(int, avail_cp, avail_stat); 64902157079SMichael Chan 650c0c050c5SMichael Chan while (vfs_supported) { 651c0c050c5SMichael Chan min_rx_rings = vfs_supported; 652c0c050c5SMichael Chan min_tx_rings = vfs_supported; 653c0c050c5SMichael Chan min_rss_ctxs = vfs_supported; 654c0c050c5SMichael Chan 655c0c050c5SMichael Chan if (bp->flags & BNXT_FLAG_AGG_RINGS) { 6566a4f2947SMichael Chan if (hw_resc->max_rx_rings - bp->rx_nr_rings * 2 >= 657c0c050c5SMichael Chan min_rx_rings) 658c0c050c5SMichael Chan rx_ok = 1; 659c0c050c5SMichael Chan } else { 6606a4f2947SMichael Chan if (hw_resc->max_rx_rings - bp->rx_nr_rings >= 661c0c050c5SMichael Chan min_rx_rings) 662c0c050c5SMichael Chan rx_ok = 1; 663c0c050c5SMichael Chan } 6646a4f2947SMichael Chan if (hw_resc->max_vnics - bp->nr_vnics < min_rx_rings || 66502157079SMichael Chan avail_cp < min_rx_rings) 6668427af81SMichael Chan rx_ok = 0; 667c0c050c5SMichael Chan 6686a4f2947SMichael Chan if (hw_resc->max_tx_rings - bp->tx_nr_rings >= min_tx_rings && 66902157079SMichael Chan avail_cp >= min_tx_rings) 670c0c050c5SMichael Chan tx_ok = 1; 671c0c050c5SMichael Chan 6726a4f2947SMichael Chan if (hw_resc->max_rsscos_ctxs - bp->rsscos_nr_ctxs >= 6736a4f2947SMichael Chan min_rss_ctxs) 674c0c050c5SMichael Chan rss_ok = 1; 675c0c050c5SMichael Chan 676c0c050c5SMichael Chan if (tx_ok && rx_ok && rss_ok) 677c0c050c5SMichael Chan break; 678c0c050c5SMichael Chan 679c0c050c5SMichael Chan vfs_supported--; 680c0c050c5SMichael Chan } 681c0c050c5SMichael Chan 682c0c050c5SMichael Chan if (!vfs_supported) { 683c0c050c5SMichael Chan netdev_err(bp->dev, "Cannot enable VF's as all resources are used by PF\n"); 684c0c050c5SMichael Chan return -EINVAL; 685c0c050c5SMichael Chan } 686c0c050c5SMichael Chan 687c0c050c5SMichael Chan if (vfs_supported != *num_vfs) { 688c0c050c5SMichael Chan netdev_info(bp->dev, "Requested VFs %d, can enable %d\n", 689c0c050c5SMichael Chan *num_vfs, vfs_supported); 690c0c050c5SMichael Chan *num_vfs = vfs_supported; 691c0c050c5SMichael Chan } 692c0c050c5SMichael Chan 693c0c050c5SMichael Chan rc = bnxt_alloc_vf_resources(bp, *num_vfs); 694c0c050c5SMichael Chan if (rc) 695c0c050c5SMichael Chan goto err_out1; 696c0c050c5SMichael Chan 697c0c050c5SMichael Chan /* Reserve resources for VFs */ 6984673d664SMichael Chan rc = bnxt_func_cfg(bp, *num_vfs); 6994673d664SMichael Chan if (rc != *num_vfs) { 7004673d664SMichael Chan if (rc <= 0) { 7014673d664SMichael Chan netdev_warn(bp->dev, "Unable to reserve resources for SRIOV.\n"); 7024673d664SMichael Chan *num_vfs = 0; 703c0c050c5SMichael Chan goto err_out2; 7044673d664SMichael Chan } 7054673d664SMichael Chan netdev_warn(bp->dev, "Only able to reserve resources for %d VFs.\n", rc); 7064673d664SMichael Chan *num_vfs = rc; 7074673d664SMichael Chan } 708c0c050c5SMichael Chan 709c0c050c5SMichael Chan /* Register buffers for VFs */ 710c0c050c5SMichael Chan rc = bnxt_hwrm_func_buf_rgtr(bp); 711c0c050c5SMichael Chan if (rc) 712c0c050c5SMichael Chan goto err_out2; 713c0c050c5SMichael Chan 7142f593846SMichael Chan bnxt_ulp_sriov_cfg(bp, *num_vfs); 7152f593846SMichael Chan 716c0c050c5SMichael Chan rc = pci_enable_sriov(bp->pdev, *num_vfs); 717c0c050c5SMichael Chan if (rc) 718c0c050c5SMichael Chan goto err_out2; 719c0c050c5SMichael Chan 720c0c050c5SMichael Chan return 0; 721c0c050c5SMichael Chan 722c0c050c5SMichael Chan err_out2: 723c0c050c5SMichael Chan /* Free the resources reserved for various VF's */ 7244bb6cdceSJeffrey Huang bnxt_hwrm_func_vf_resource_free(bp, *num_vfs); 725c0c050c5SMichael Chan 726c0c050c5SMichael Chan err_out1: 727c0c050c5SMichael Chan bnxt_free_vf_resources(bp); 728c0c050c5SMichael Chan 729c0c050c5SMichael Chan return rc; 730c0c050c5SMichael Chan } 731c0c050c5SMichael Chan 732c0c050c5SMichael Chan void bnxt_sriov_disable(struct bnxt *bp) 733c0c050c5SMichael Chan { 7344bb6cdceSJeffrey Huang u16 num_vfs = pci_num_vf(bp->pdev); 7354bb6cdceSJeffrey Huang 7364bb6cdceSJeffrey Huang if (!num_vfs) 737c0c050c5SMichael Chan return; 738c0c050c5SMichael Chan 7394ab0c6a8SSathya Perla /* synchronize VF and VF-rep create and destroy */ 7404ab0c6a8SSathya Perla mutex_lock(&bp->sriov_lock); 7414ab0c6a8SSathya Perla bnxt_vf_reps_destroy(bp); 7424ab0c6a8SSathya Perla 7434bb6cdceSJeffrey Huang if (pci_vfs_assigned(bp->pdev)) { 74419241368SJeffrey Huang bnxt_hwrm_fwd_async_event_cmpl( 74587c374deSMichael Chan bp, NULL, ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD); 7464bb6cdceSJeffrey Huang netdev_warn(bp->dev, "Unable to free %d VFs because some are assigned to VMs.\n", 7474bb6cdceSJeffrey Huang num_vfs); 7484bb6cdceSJeffrey Huang } else { 749c0c050c5SMichael Chan pci_disable_sriov(bp->pdev); 7504bb6cdceSJeffrey Huang /* Free the HW resources reserved for various VF's */ 7514bb6cdceSJeffrey Huang bnxt_hwrm_func_vf_resource_free(bp, num_vfs); 7524bb6cdceSJeffrey Huang } 7534ab0c6a8SSathya Perla mutex_unlock(&bp->sriov_lock); 754c0c050c5SMichael Chan 755c0c050c5SMichael Chan bnxt_free_vf_resources(bp); 756c0c050c5SMichael Chan 757c0c050c5SMichael Chan bp->pf.active_vfs = 0; 7584a21b49bSMichael Chan /* Reclaim all resources for the PF. */ 7597b08f661SMichael Chan rtnl_lock(); 7607b08f661SMichael Chan bnxt_restore_pf_fw_resources(bp); 7617b08f661SMichael Chan rtnl_unlock(); 7622f593846SMichael Chan 7632f593846SMichael Chan bnxt_ulp_sriov_cfg(bp, 0); 764c0c050c5SMichael Chan } 765c0c050c5SMichael Chan 766c0c050c5SMichael Chan int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs) 767c0c050c5SMichael Chan { 768c0c050c5SMichael Chan struct net_device *dev = pci_get_drvdata(pdev); 769c0c050c5SMichael Chan struct bnxt *bp = netdev_priv(dev); 770c0c050c5SMichael Chan 771c0c050c5SMichael Chan if (!(bp->flags & BNXT_FLAG_USING_MSIX)) { 772c0c050c5SMichael Chan netdev_warn(dev, "Not allow SRIOV if the irq mode is not MSIX\n"); 773c0c050c5SMichael Chan return 0; 774c0c050c5SMichael Chan } 775c0c050c5SMichael Chan 776c0c050c5SMichael Chan rtnl_lock(); 777c0c050c5SMichael Chan if (!netif_running(dev)) { 778c0c050c5SMichael Chan netdev_warn(dev, "Reject SRIOV config request since if is down!\n"); 779c0c050c5SMichael Chan rtnl_unlock(); 780c0c050c5SMichael Chan return 0; 781c0c050c5SMichael Chan } 782c0c050c5SMichael Chan bp->sriov_cfg = true; 783c0c050c5SMichael Chan rtnl_unlock(); 7844bb6cdceSJeffrey Huang 7854bb6cdceSJeffrey Huang if (pci_vfs_assigned(bp->pdev)) { 7864bb6cdceSJeffrey Huang netdev_warn(dev, "Unable to configure SRIOV since some VFs are assigned to VMs.\n"); 7874bb6cdceSJeffrey Huang num_vfs = 0; 7884bb6cdceSJeffrey Huang goto sriov_cfg_exit; 789c0c050c5SMichael Chan } 790c0c050c5SMichael Chan 791c0c050c5SMichael Chan /* Check if enabled VFs is same as requested */ 7924bb6cdceSJeffrey Huang if (num_vfs && num_vfs == bp->pf.active_vfs) 7934bb6cdceSJeffrey Huang goto sriov_cfg_exit; 7944bb6cdceSJeffrey Huang 7954bb6cdceSJeffrey Huang /* if there are previous existing VFs, clean them up */ 7964bb6cdceSJeffrey Huang bnxt_sriov_disable(bp); 7974bb6cdceSJeffrey Huang if (!num_vfs) 7984bb6cdceSJeffrey Huang goto sriov_cfg_exit; 799c0c050c5SMichael Chan 800c0c050c5SMichael Chan bnxt_sriov_enable(bp, &num_vfs); 801c0c050c5SMichael Chan 8024bb6cdceSJeffrey Huang sriov_cfg_exit: 803c0c050c5SMichael Chan bp->sriov_cfg = false; 804c0c050c5SMichael Chan wake_up(&bp->sriov_cfg_wait); 805c0c050c5SMichael Chan 806c0c050c5SMichael Chan return num_vfs; 807c0c050c5SMichael Chan } 808c0c050c5SMichael Chan 809c0c050c5SMichael Chan static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf, 810c0c050c5SMichael Chan void *encap_resp, __le64 encap_resp_addr, 811c0c050c5SMichael Chan __le16 encap_resp_cpr, u32 msg_size) 812c0c050c5SMichael Chan { 813c0c050c5SMichael Chan int rc = 0; 814c0c050c5SMichael Chan struct hwrm_fwd_resp_input req = {0}; 815c0c050c5SMichael Chan struct hwrm_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr; 816c0c050c5SMichael Chan 81759895f59SMichael Chan if (BNXT_FWD_RESP_SIZE_ERR(msg_size)) 81859895f59SMichael Chan return -EINVAL; 81959895f59SMichael Chan 820c0c050c5SMichael Chan bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_RESP, -1, -1); 821c0c050c5SMichael Chan 822c0c050c5SMichael Chan /* Set the new target id */ 823c0c050c5SMichael Chan req.target_id = cpu_to_le16(vf->fw_fid); 824c193554eSMichael Chan req.encap_resp_target_id = cpu_to_le16(vf->fw_fid); 825c0c050c5SMichael Chan req.encap_resp_len = cpu_to_le16(msg_size); 826c0c050c5SMichael Chan req.encap_resp_addr = encap_resp_addr; 827c0c050c5SMichael Chan req.encap_resp_cmpl_ring = encap_resp_cpr; 828c0c050c5SMichael Chan memcpy(req.encap_resp, encap_resp, msg_size); 829c0c050c5SMichael Chan 830c0c050c5SMichael Chan mutex_lock(&bp->hwrm_cmd_lock); 831c0c050c5SMichael Chan rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 832c0c050c5SMichael Chan 833c0c050c5SMichael Chan if (rc) { 834c0c050c5SMichael Chan netdev_err(bp->dev, "hwrm_fwd_resp failed. rc:%d\n", rc); 835c0c050c5SMichael Chan goto fwd_resp_exit; 836c0c050c5SMichael Chan } 837c0c050c5SMichael Chan 838c0c050c5SMichael Chan if (resp->error_code) { 839c0c050c5SMichael Chan netdev_err(bp->dev, "hwrm_fwd_resp error %d\n", 840c0c050c5SMichael Chan resp->error_code); 841c0c050c5SMichael Chan rc = -1; 842c0c050c5SMichael Chan } 843c0c050c5SMichael Chan 844c0c050c5SMichael Chan fwd_resp_exit: 845c0c050c5SMichael Chan mutex_unlock(&bp->hwrm_cmd_lock); 846c0c050c5SMichael Chan return rc; 847c0c050c5SMichael Chan } 848c0c050c5SMichael Chan 849c0c050c5SMichael Chan static int bnxt_hwrm_fwd_err_resp(struct bnxt *bp, struct bnxt_vf_info *vf, 850c0c050c5SMichael Chan u32 msg_size) 851c0c050c5SMichael Chan { 852c0c050c5SMichael Chan int rc = 0; 853c0c050c5SMichael Chan struct hwrm_reject_fwd_resp_input req = {0}; 854c0c050c5SMichael Chan struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr; 855c0c050c5SMichael Chan 85659895f59SMichael Chan if (BNXT_REJ_FWD_RESP_SIZE_ERR(msg_size)) 85759895f59SMichael Chan return -EINVAL; 85859895f59SMichael Chan 859c0c050c5SMichael Chan bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_REJECT_FWD_RESP, -1, -1); 860c0c050c5SMichael Chan /* Set the new target id */ 861c0c050c5SMichael Chan req.target_id = cpu_to_le16(vf->fw_fid); 862c193554eSMichael Chan req.encap_resp_target_id = cpu_to_le16(vf->fw_fid); 863c0c050c5SMichael Chan memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size); 864c0c050c5SMichael Chan 865c0c050c5SMichael Chan mutex_lock(&bp->hwrm_cmd_lock); 866c0c050c5SMichael Chan rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 867c0c050c5SMichael Chan 868c0c050c5SMichael Chan if (rc) { 869c0c050c5SMichael Chan netdev_err(bp->dev, "hwrm_fwd_err_resp failed. rc:%d\n", rc); 870c0c050c5SMichael Chan goto fwd_err_resp_exit; 871c0c050c5SMichael Chan } 872c0c050c5SMichael Chan 873c0c050c5SMichael Chan if (resp->error_code) { 874c0c050c5SMichael Chan netdev_err(bp->dev, "hwrm_fwd_err_resp error %d\n", 875c0c050c5SMichael Chan resp->error_code); 876c0c050c5SMichael Chan rc = -1; 877c0c050c5SMichael Chan } 878c0c050c5SMichael Chan 879c0c050c5SMichael Chan fwd_err_resp_exit: 880c0c050c5SMichael Chan mutex_unlock(&bp->hwrm_cmd_lock); 881c0c050c5SMichael Chan return rc; 882c0c050c5SMichael Chan } 883c0c050c5SMichael Chan 884c0c050c5SMichael Chan static int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf, 885c0c050c5SMichael Chan u32 msg_size) 886c0c050c5SMichael Chan { 887c0c050c5SMichael Chan int rc = 0; 888c0c050c5SMichael Chan struct hwrm_exec_fwd_resp_input req = {0}; 889c0c050c5SMichael Chan struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr; 890c0c050c5SMichael Chan 89159895f59SMichael Chan if (BNXT_EXEC_FWD_RESP_SIZE_ERR(msg_size)) 89259895f59SMichael Chan return -EINVAL; 89359895f59SMichael Chan 894c0c050c5SMichael Chan bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_EXEC_FWD_RESP, -1, -1); 895c0c050c5SMichael Chan /* Set the new target id */ 896c0c050c5SMichael Chan req.target_id = cpu_to_le16(vf->fw_fid); 897c193554eSMichael Chan req.encap_resp_target_id = cpu_to_le16(vf->fw_fid); 898c0c050c5SMichael Chan memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size); 899c0c050c5SMichael Chan 900c0c050c5SMichael Chan mutex_lock(&bp->hwrm_cmd_lock); 901c0c050c5SMichael Chan rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 902c0c050c5SMichael Chan 903c0c050c5SMichael Chan if (rc) { 904c0c050c5SMichael Chan netdev_err(bp->dev, "hwrm_exec_fw_resp failed. rc:%d\n", rc); 905c0c050c5SMichael Chan goto exec_fwd_resp_exit; 906c0c050c5SMichael Chan } 907c0c050c5SMichael Chan 908c0c050c5SMichael Chan if (resp->error_code) { 909c0c050c5SMichael Chan netdev_err(bp->dev, "hwrm_exec_fw_resp error %d\n", 910c0c050c5SMichael Chan resp->error_code); 911c0c050c5SMichael Chan rc = -1; 912c0c050c5SMichael Chan } 913c0c050c5SMichael Chan 914c0c050c5SMichael Chan exec_fwd_resp_exit: 915c0c050c5SMichael Chan mutex_unlock(&bp->hwrm_cmd_lock); 916c0c050c5SMichael Chan return rc; 917c0c050c5SMichael Chan } 918c0c050c5SMichael Chan 919746df139SVasundhara Volam static int bnxt_vf_configure_mac(struct bnxt *bp, struct bnxt_vf_info *vf) 92091cdda40SVasundhara Volam { 92191cdda40SVasundhara Volam u32 msg_size = sizeof(struct hwrm_func_vf_cfg_input); 92291cdda40SVasundhara Volam struct hwrm_func_vf_cfg_input *req = 92391cdda40SVasundhara Volam (struct hwrm_func_vf_cfg_input *)vf->hwrm_cmd_req_addr; 92491cdda40SVasundhara Volam 925746df139SVasundhara Volam /* Allow VF to set a valid MAC address, if trust is set to on or 926746df139SVasundhara Volam * if the PF assigned MAC address is zero 92791cdda40SVasundhara Volam */ 92891cdda40SVasundhara Volam if (req->enables & cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR)) { 92991cdda40SVasundhara Volam if (is_valid_ether_addr(req->dflt_mac_addr) && 930746df139SVasundhara Volam ((vf->flags & BNXT_VF_TRUST) || 931707e7e96SMichael Chan !is_valid_ether_addr(vf->mac_addr) || 932707e7e96SMichael Chan ether_addr_equal(req->dflt_mac_addr, vf->mac_addr))) { 93391cdda40SVasundhara Volam ether_addr_copy(vf->vf_mac_addr, req->dflt_mac_addr); 93491cdda40SVasundhara Volam return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size); 93591cdda40SVasundhara Volam } 93691cdda40SVasundhara Volam return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size); 93791cdda40SVasundhara Volam } 93891cdda40SVasundhara Volam return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size); 93991cdda40SVasundhara Volam } 94091cdda40SVasundhara Volam 941c0c050c5SMichael Chan static int bnxt_vf_validate_set_mac(struct bnxt *bp, struct bnxt_vf_info *vf) 942c0c050c5SMichael Chan { 943c0c050c5SMichael Chan u32 msg_size = sizeof(struct hwrm_cfa_l2_filter_alloc_input); 944c0c050c5SMichael Chan struct hwrm_cfa_l2_filter_alloc_input *req = 945c0c050c5SMichael Chan (struct hwrm_cfa_l2_filter_alloc_input *)vf->hwrm_cmd_req_addr; 94691cdda40SVasundhara Volam bool mac_ok = false; 947c0c050c5SMichael Chan 948746df139SVasundhara Volam if (!is_valid_ether_addr((const u8 *)req->l2_addr)) 949746df139SVasundhara Volam return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size); 950746df139SVasundhara Volam 951746df139SVasundhara Volam /* Allow VF to set a valid MAC address, if trust is set to on. 952746df139SVasundhara Volam * Or VF MAC address must first match MAC address in PF's context. 95391cdda40SVasundhara Volam * Otherwise, it must match the VF MAC address if firmware spec >= 95491cdda40SVasundhara Volam * 1.2.2 95591cdda40SVasundhara Volam */ 956746df139SVasundhara Volam if (vf->flags & BNXT_VF_TRUST) { 957746df139SVasundhara Volam mac_ok = true; 958746df139SVasundhara Volam } else if (is_valid_ether_addr(vf->mac_addr)) { 95991cdda40SVasundhara Volam if (ether_addr_equal((const u8 *)req->l2_addr, vf->mac_addr)) 96091cdda40SVasundhara Volam mac_ok = true; 96191cdda40SVasundhara Volam } else if (is_valid_ether_addr(vf->vf_mac_addr)) { 96291cdda40SVasundhara Volam if (ether_addr_equal((const u8 *)req->l2_addr, vf->vf_mac_addr)) 96391cdda40SVasundhara Volam mac_ok = true; 96491cdda40SVasundhara Volam } else { 9656fd544c8SYueHaibing /* There are two cases: 9666fd544c8SYueHaibing * 1.If firmware spec < 0x10202,VF MAC address is not forwarded 9676fd544c8SYueHaibing * to the PF and so it doesn't have to match 9686fd544c8SYueHaibing * 2.Allow VF to modify it's own MAC when PF has not assigned a 9696fd544c8SYueHaibing * valid MAC address and firmware spec >= 0x10202 9706fd544c8SYueHaibing */ 97191cdda40SVasundhara Volam mac_ok = true; 97291cdda40SVasundhara Volam } 97391cdda40SVasundhara Volam if (mac_ok) 974c0c050c5SMichael Chan return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size); 975c0c050c5SMichael Chan return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size); 976c0c050c5SMichael Chan } 977c0c050c5SMichael Chan 978c0c050c5SMichael Chan static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf) 979c0c050c5SMichael Chan { 980c0c050c5SMichael Chan int rc = 0; 981c0c050c5SMichael Chan 982c0c050c5SMichael Chan if (!(vf->flags & BNXT_VF_LINK_FORCED)) { 983c0c050c5SMichael Chan /* real link */ 984c0c050c5SMichael Chan rc = bnxt_hwrm_exec_fwd_resp( 985c0c050c5SMichael Chan bp, vf, sizeof(struct hwrm_port_phy_qcfg_input)); 986c0c050c5SMichael Chan } else { 987c0c050c5SMichael Chan struct hwrm_port_phy_qcfg_output phy_qcfg_resp; 988c0c050c5SMichael Chan struct hwrm_port_phy_qcfg_input *phy_qcfg_req; 989c0c050c5SMichael Chan 990c0c050c5SMichael Chan phy_qcfg_req = 991c0c050c5SMichael Chan (struct hwrm_port_phy_qcfg_input *)vf->hwrm_cmd_req_addr; 992c0c050c5SMichael Chan mutex_lock(&bp->hwrm_cmd_lock); 993c0c050c5SMichael Chan memcpy(&phy_qcfg_resp, &bp->link_info.phy_qcfg_resp, 994c0c050c5SMichael Chan sizeof(phy_qcfg_resp)); 995c0c050c5SMichael Chan mutex_unlock(&bp->hwrm_cmd_lock); 996845adfe4SMichael Chan phy_qcfg_resp.resp_len = cpu_to_le16(sizeof(phy_qcfg_resp)); 997c0c050c5SMichael Chan phy_qcfg_resp.seq_id = phy_qcfg_req->seq_id; 998845adfe4SMichael Chan phy_qcfg_resp.valid = 1; 999c0c050c5SMichael Chan 1000c0c050c5SMichael Chan if (vf->flags & BNXT_VF_LINK_UP) { 1001c0c050c5SMichael Chan /* if physical link is down, force link up on VF */ 100273b9bad6SMichael Chan if (phy_qcfg_resp.link != 100373b9bad6SMichael Chan PORT_PHY_QCFG_RESP_LINK_LINK) { 1004c0c050c5SMichael Chan phy_qcfg_resp.link = 1005c0c050c5SMichael Chan PORT_PHY_QCFG_RESP_LINK_LINK; 100611f15ed3SMichael Chan phy_qcfg_resp.link_speed = cpu_to_le16( 100711f15ed3SMichael Chan PORT_PHY_QCFG_RESP_LINK_SPEED_10GB); 1008acb20054SMichael Chan phy_qcfg_resp.duplex_cfg = 1009acb20054SMichael Chan PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL; 1010acb20054SMichael Chan phy_qcfg_resp.duplex_state = 1011acb20054SMichael Chan PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL; 1012c0c050c5SMichael Chan phy_qcfg_resp.pause = 1013c0c050c5SMichael Chan (PORT_PHY_QCFG_RESP_PAUSE_TX | 1014c0c050c5SMichael Chan PORT_PHY_QCFG_RESP_PAUSE_RX); 1015c0c050c5SMichael Chan } 1016c0c050c5SMichael Chan } else { 1017c0c050c5SMichael Chan /* force link down */ 1018c0c050c5SMichael Chan phy_qcfg_resp.link = PORT_PHY_QCFG_RESP_LINK_NO_LINK; 1019c0c050c5SMichael Chan phy_qcfg_resp.link_speed = 0; 1020acb20054SMichael Chan phy_qcfg_resp.duplex_state = 1021acb20054SMichael Chan PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF; 1022c0c050c5SMichael Chan phy_qcfg_resp.pause = 0; 1023c0c050c5SMichael Chan } 1024c0c050c5SMichael Chan rc = bnxt_hwrm_fwd_resp(bp, vf, &phy_qcfg_resp, 1025c0c050c5SMichael Chan phy_qcfg_req->resp_addr, 1026c0c050c5SMichael Chan phy_qcfg_req->cmpl_ring, 1027c0c050c5SMichael Chan sizeof(phy_qcfg_resp)); 1028c0c050c5SMichael Chan } 1029c0c050c5SMichael Chan return rc; 1030c0c050c5SMichael Chan } 1031c0c050c5SMichael Chan 1032c0c050c5SMichael Chan static int bnxt_vf_req_validate_snd(struct bnxt *bp, struct bnxt_vf_info *vf) 1033c0c050c5SMichael Chan { 1034c0c050c5SMichael Chan int rc = 0; 1035a8643e16SMichael Chan struct input *encap_req = vf->hwrm_cmd_req_addr; 1036a8643e16SMichael Chan u32 req_type = le16_to_cpu(encap_req->req_type); 1037c0c050c5SMichael Chan 1038c0c050c5SMichael Chan switch (req_type) { 103991cdda40SVasundhara Volam case HWRM_FUNC_VF_CFG: 1040746df139SVasundhara Volam rc = bnxt_vf_configure_mac(bp, vf); 104191cdda40SVasundhara Volam break; 1042c0c050c5SMichael Chan case HWRM_CFA_L2_FILTER_ALLOC: 1043c0c050c5SMichael Chan rc = bnxt_vf_validate_set_mac(bp, vf); 1044c0c050c5SMichael Chan break; 1045c0c050c5SMichael Chan case HWRM_FUNC_CFG: 1046c0c050c5SMichael Chan /* TODO Validate if VF is allowed to change mac address, 1047c0c050c5SMichael Chan * mtu, num of rings etc 1048c0c050c5SMichael Chan */ 1049c0c050c5SMichael Chan rc = bnxt_hwrm_exec_fwd_resp( 1050c0c050c5SMichael Chan bp, vf, sizeof(struct hwrm_func_cfg_input)); 1051c0c050c5SMichael Chan break; 1052c0c050c5SMichael Chan case HWRM_PORT_PHY_QCFG: 1053c0c050c5SMichael Chan rc = bnxt_vf_set_link(bp, vf); 1054c0c050c5SMichael Chan break; 1055c0c050c5SMichael Chan default: 1056c0c050c5SMichael Chan break; 1057c0c050c5SMichael Chan } 1058c0c050c5SMichael Chan return rc; 1059c0c050c5SMichael Chan } 1060c0c050c5SMichael Chan 1061c0c050c5SMichael Chan void bnxt_hwrm_exec_fwd_req(struct bnxt *bp) 1062c0c050c5SMichael Chan { 1063c0c050c5SMichael Chan u32 i = 0, active_vfs = bp->pf.active_vfs, vf_id; 1064c0c050c5SMichael Chan 1065c0c050c5SMichael Chan /* Scan through VF's and process commands */ 1066c0c050c5SMichael Chan while (1) { 1067c0c050c5SMichael Chan vf_id = find_next_bit(bp->pf.vf_event_bmap, active_vfs, i); 1068c0c050c5SMichael Chan if (vf_id >= active_vfs) 1069c0c050c5SMichael Chan break; 1070c0c050c5SMichael Chan 1071c0c050c5SMichael Chan clear_bit(vf_id, bp->pf.vf_event_bmap); 1072c0c050c5SMichael Chan bnxt_vf_req_validate_snd(bp, &bp->pf.vf[vf_id]); 1073c0c050c5SMichael Chan i = vf_id + 1; 1074c0c050c5SMichael Chan } 1075c0c050c5SMichael Chan } 1076379a80a1SMichael Chan 1077379a80a1SMichael Chan void bnxt_update_vf_mac(struct bnxt *bp) 1078379a80a1SMichael Chan { 1079379a80a1SMichael Chan struct hwrm_func_qcaps_input req = {0}; 1080379a80a1SMichael Chan struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr; 1081379a80a1SMichael Chan 1082379a80a1SMichael Chan bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1); 1083379a80a1SMichael Chan req.fid = cpu_to_le16(0xffff); 1084379a80a1SMichael Chan 1085379a80a1SMichael Chan mutex_lock(&bp->hwrm_cmd_lock); 1086379a80a1SMichael Chan if (_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT)) 1087379a80a1SMichael Chan goto update_vf_mac_exit; 1088379a80a1SMichael Chan 10893874d6a8SJeffrey Huang /* Store MAC address from the firmware. There are 2 cases: 10903874d6a8SJeffrey Huang * 1. MAC address is valid. It is assigned from the PF and we 10913874d6a8SJeffrey Huang * need to override the current VF MAC address with it. 10923874d6a8SJeffrey Huang * 2. MAC address is zero. The VF will use a random MAC address by 10933874d6a8SJeffrey Huang * default but the stored zero MAC will allow the VF user to change 10943874d6a8SJeffrey Huang * the random MAC address using ndo_set_mac_address() if he wants. 10953874d6a8SJeffrey Huang */ 109611f15ed3SMichael Chan if (!ether_addr_equal(resp->mac_address, bp->vf.mac_addr)) 109711f15ed3SMichael Chan memcpy(bp->vf.mac_addr, resp->mac_address, ETH_ALEN); 10983874d6a8SJeffrey Huang 10993874d6a8SJeffrey Huang /* overwrite netdev dev_addr with admin VF MAC */ 11003874d6a8SJeffrey Huang if (is_valid_ether_addr(bp->vf.mac_addr)) 1101379a80a1SMichael Chan memcpy(bp->dev->dev_addr, bp->vf.mac_addr, ETH_ALEN); 1102379a80a1SMichael Chan update_vf_mac_exit: 1103379a80a1SMichael Chan mutex_unlock(&bp->hwrm_cmd_lock); 1104379a80a1SMichael Chan } 1105379a80a1SMichael Chan 110684c33dd3SMichael Chan int bnxt_approve_mac(struct bnxt *bp, u8 *mac) 110784c33dd3SMichael Chan { 110884c33dd3SMichael Chan struct hwrm_func_vf_cfg_input req = {0}; 110984c33dd3SMichael Chan int rc = 0; 111084c33dd3SMichael Chan 111184c33dd3SMichael Chan if (!BNXT_VF(bp)) 111284c33dd3SMichael Chan return 0; 111384c33dd3SMichael Chan 111484c33dd3SMichael Chan if (bp->hwrm_spec_code < 0x10202) { 111584c33dd3SMichael Chan if (is_valid_ether_addr(bp->vf.mac_addr)) 111684c33dd3SMichael Chan rc = -EADDRNOTAVAIL; 111784c33dd3SMichael Chan goto mac_done; 111884c33dd3SMichael Chan } 111984c33dd3SMichael Chan bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1); 112084c33dd3SMichael Chan req.enables = cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR); 112184c33dd3SMichael Chan memcpy(req.dflt_mac_addr, mac, ETH_ALEN); 112284c33dd3SMichael Chan rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 112384c33dd3SMichael Chan mac_done: 112484c33dd3SMichael Chan if (rc) { 112584c33dd3SMichael Chan rc = -EADDRNOTAVAIL; 112684c33dd3SMichael Chan netdev_warn(bp->dev, "VF MAC address %pM not approved by the PF\n", 112784c33dd3SMichael Chan mac); 112884c33dd3SMichael Chan } 112984c33dd3SMichael Chan return rc; 113084c33dd3SMichael Chan } 1131c0c050c5SMichael Chan #else 1132c0c050c5SMichael Chan 1133c0c050c5SMichael Chan void bnxt_sriov_disable(struct bnxt *bp) 1134c0c050c5SMichael Chan { 1135c0c050c5SMichael Chan } 1136c0c050c5SMichael Chan 1137c0c050c5SMichael Chan void bnxt_hwrm_exec_fwd_req(struct bnxt *bp) 1138c0c050c5SMichael Chan { 1139379a80a1SMichael Chan netdev_err(bp->dev, "Invalid VF message received when SRIOV is not enable\n"); 1140379a80a1SMichael Chan } 1141379a80a1SMichael Chan 1142379a80a1SMichael Chan void bnxt_update_vf_mac(struct bnxt *bp) 1143379a80a1SMichael Chan { 1144c0c050c5SMichael Chan } 114584c33dd3SMichael Chan 114684c33dd3SMichael Chan int bnxt_approve_mac(struct bnxt *bp, u8 *mac) 114784c33dd3SMichael Chan { 114884c33dd3SMichael Chan return 0; 114984c33dd3SMichael Chan } 1150c0c050c5SMichael Chan #endif 1151