1c0c050c5SMichael Chan /* Broadcom NetXtreme-C/E network driver. 2c0c050c5SMichael Chan * 311f15ed3SMichael Chan * Copyright (c) 2014-2016 Broadcom Corporation 4c0c050c5SMichael Chan * 5c0c050c5SMichael Chan * This program is free software; you can redistribute it and/or modify 6c0c050c5SMichael Chan * it under the terms of the GNU General Public License as published by 7c0c050c5SMichael Chan * the Free Software Foundation. 8c0c050c5SMichael Chan */ 9c0c050c5SMichael Chan 10c0c050c5SMichael Chan #include <linux/module.h> 11c0c050c5SMichael Chan #include <linux/pci.h> 12c0c050c5SMichael Chan #include <linux/netdevice.h> 13c0c050c5SMichael Chan #include <linux/if_vlan.h> 14c0c050c5SMichael Chan #include <linux/interrupt.h> 15c0c050c5SMichael Chan #include <linux/etherdevice.h> 16c0c050c5SMichael Chan #include "bnxt_hsi.h" 17c0c050c5SMichael Chan #include "bnxt.h" 18c0c050c5SMichael Chan #include "bnxt_sriov.h" 19c0c050c5SMichael Chan #include "bnxt_ethtool.h" 20c0c050c5SMichael Chan 21c0c050c5SMichael Chan #ifdef CONFIG_BNXT_SRIOV 22c0c050c5SMichael Chan static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id) 23c0c050c5SMichael Chan { 24caefe526SMichael Chan if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { 25c0c050c5SMichael Chan netdev_err(bp->dev, "vf ndo called though PF is down\n"); 26c0c050c5SMichael Chan return -EINVAL; 27c0c050c5SMichael Chan } 28c0c050c5SMichael Chan if (!bp->pf.active_vfs) { 29c0c050c5SMichael Chan netdev_err(bp->dev, "vf ndo called though sriov is disabled\n"); 30c0c050c5SMichael Chan return -EINVAL; 31c0c050c5SMichael Chan } 32c0c050c5SMichael Chan if (vf_id >= bp->pf.max_vfs) { 33c0c050c5SMichael Chan netdev_err(bp->dev, "Invalid VF id %d\n", vf_id); 34c0c050c5SMichael Chan return -EINVAL; 35c0c050c5SMichael Chan } 36c0c050c5SMichael Chan return 0; 37c0c050c5SMichael Chan } 38c0c050c5SMichael Chan 39c0c050c5SMichael Chan int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting) 40c0c050c5SMichael Chan { 41c0c050c5SMichael Chan struct hwrm_func_cfg_input req = {0}; 42c0c050c5SMichael Chan struct bnxt *bp = netdev_priv(dev); 43c0c050c5SMichael Chan struct bnxt_vf_info *vf; 44c0c050c5SMichael Chan bool old_setting = false; 45c0c050c5SMichael Chan u32 func_flags; 46c0c050c5SMichael Chan int rc; 47c0c050c5SMichael Chan 48c0c050c5SMichael Chan rc = bnxt_vf_ndo_prep(bp, vf_id); 49c0c050c5SMichael Chan if (rc) 50c0c050c5SMichael Chan return rc; 51c0c050c5SMichael Chan 52c0c050c5SMichael Chan vf = &bp->pf.vf[vf_id]; 53c0c050c5SMichael Chan if (vf->flags & BNXT_VF_SPOOFCHK) 54c0c050c5SMichael Chan old_setting = true; 55c0c050c5SMichael Chan if (old_setting == setting) 56c0c050c5SMichael Chan return 0; 57c0c050c5SMichael Chan 58c0c050c5SMichael Chan func_flags = vf->func_flags; 59c0c050c5SMichael Chan if (setting) 60c0c050c5SMichael Chan func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK; 61c0c050c5SMichael Chan else 62c0c050c5SMichael Chan func_flags &= ~FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK; 63c0c050c5SMichael Chan /*TODO: if the driver supports VLAN filter on guest VLAN, 64c0c050c5SMichael Chan * the spoof check should also include vlan anti-spoofing 65c0c050c5SMichael Chan */ 66c0c050c5SMichael Chan bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 67c193554eSMichael Chan req.fid = cpu_to_le16(vf->fw_fid); 68c0c050c5SMichael Chan req.flags = cpu_to_le32(func_flags); 69c0c050c5SMichael Chan rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 70c0c050c5SMichael Chan if (!rc) { 71c0c050c5SMichael Chan vf->func_flags = func_flags; 72c0c050c5SMichael Chan if (setting) 73c0c050c5SMichael Chan vf->flags |= BNXT_VF_SPOOFCHK; 74c0c050c5SMichael Chan else 75c0c050c5SMichael Chan vf->flags &= ~BNXT_VF_SPOOFCHK; 76c0c050c5SMichael Chan } 77c0c050c5SMichael Chan return rc; 78c0c050c5SMichael Chan } 79c0c050c5SMichael Chan 80c0c050c5SMichael Chan int bnxt_get_vf_config(struct net_device *dev, int vf_id, 81c0c050c5SMichael Chan struct ifla_vf_info *ivi) 82c0c050c5SMichael Chan { 83c0c050c5SMichael Chan struct bnxt *bp = netdev_priv(dev); 84c0c050c5SMichael Chan struct bnxt_vf_info *vf; 85c0c050c5SMichael Chan int rc; 86c0c050c5SMichael Chan 87c0c050c5SMichael Chan rc = bnxt_vf_ndo_prep(bp, vf_id); 88c0c050c5SMichael Chan if (rc) 89c0c050c5SMichael Chan return rc; 90c0c050c5SMichael Chan 91c0c050c5SMichael Chan ivi->vf = vf_id; 92c0c050c5SMichael Chan vf = &bp->pf.vf[vf_id]; 93c0c050c5SMichael Chan 94c0c050c5SMichael Chan memcpy(&ivi->mac, vf->mac_addr, ETH_ALEN); 95c0c050c5SMichael Chan ivi->max_tx_rate = vf->max_tx_rate; 96c0c050c5SMichael Chan ivi->min_tx_rate = vf->min_tx_rate; 97c0c050c5SMichael Chan ivi->vlan = vf->vlan; 98c0c050c5SMichael Chan ivi->qos = vf->flags & BNXT_VF_QOS; 99c0c050c5SMichael Chan ivi->spoofchk = vf->flags & BNXT_VF_SPOOFCHK; 100c0c050c5SMichael Chan if (!(vf->flags & BNXT_VF_LINK_FORCED)) 101c0c050c5SMichael Chan ivi->linkstate = IFLA_VF_LINK_STATE_AUTO; 102c0c050c5SMichael Chan else if (vf->flags & BNXT_VF_LINK_UP) 103c0c050c5SMichael Chan ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE; 104c0c050c5SMichael Chan else 105c0c050c5SMichael Chan ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE; 106c0c050c5SMichael Chan 107c0c050c5SMichael Chan return 0; 108c0c050c5SMichael Chan } 109c0c050c5SMichael Chan 110c0c050c5SMichael Chan int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac) 111c0c050c5SMichael Chan { 112c0c050c5SMichael Chan struct hwrm_func_cfg_input req = {0}; 113c0c050c5SMichael Chan struct bnxt *bp = netdev_priv(dev); 114c0c050c5SMichael Chan struct bnxt_vf_info *vf; 115c0c050c5SMichael Chan int rc; 116c0c050c5SMichael Chan 117c0c050c5SMichael Chan rc = bnxt_vf_ndo_prep(bp, vf_id); 118c0c050c5SMichael Chan if (rc) 119c0c050c5SMichael Chan return rc; 120c0c050c5SMichael Chan /* reject bc or mc mac addr, zero mac addr means allow 121c0c050c5SMichael Chan * VF to use its own mac addr 122c0c050c5SMichael Chan */ 123c0c050c5SMichael Chan if (is_multicast_ether_addr(mac)) { 124c0c050c5SMichael Chan netdev_err(dev, "Invalid VF ethernet address\n"); 125c0c050c5SMichael Chan return -EINVAL; 126c0c050c5SMichael Chan } 127c0c050c5SMichael Chan vf = &bp->pf.vf[vf_id]; 128c0c050c5SMichael Chan 129c0c050c5SMichael Chan memcpy(vf->mac_addr, mac, ETH_ALEN); 130c0c050c5SMichael Chan bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 131c193554eSMichael Chan req.fid = cpu_to_le16(vf->fw_fid); 132c0c050c5SMichael Chan req.flags = cpu_to_le32(vf->func_flags); 133c0c050c5SMichael Chan req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR); 134c0c050c5SMichael Chan memcpy(req.dflt_mac_addr, mac, ETH_ALEN); 135c0c050c5SMichael Chan return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 136c0c050c5SMichael Chan } 137c0c050c5SMichael Chan 138c0c050c5SMichael Chan int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos) 139c0c050c5SMichael Chan { 140c0c050c5SMichael Chan struct hwrm_func_cfg_input req = {0}; 141c0c050c5SMichael Chan struct bnxt *bp = netdev_priv(dev); 142c0c050c5SMichael Chan struct bnxt_vf_info *vf; 143c0c050c5SMichael Chan u16 vlan_tag; 144c0c050c5SMichael Chan int rc; 145c0c050c5SMichael Chan 146c0c050c5SMichael Chan rc = bnxt_vf_ndo_prep(bp, vf_id); 147c0c050c5SMichael Chan if (rc) 148c0c050c5SMichael Chan return rc; 149c0c050c5SMichael Chan 150c0c050c5SMichael Chan /* TODO: needed to implement proper handling of user priority, 151c0c050c5SMichael Chan * currently fail the command if there is valid priority 152c0c050c5SMichael Chan */ 153c0c050c5SMichael Chan if (vlan_id > 4095 || qos) 154c0c050c5SMichael Chan return -EINVAL; 155c0c050c5SMichael Chan 156c0c050c5SMichael Chan vf = &bp->pf.vf[vf_id]; 157c0c050c5SMichael Chan vlan_tag = vlan_id; 158c0c050c5SMichael Chan if (vlan_tag == vf->vlan) 159c0c050c5SMichael Chan return 0; 160c0c050c5SMichael Chan 161c0c050c5SMichael Chan bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 162c193554eSMichael Chan req.fid = cpu_to_le16(vf->fw_fid); 163c0c050c5SMichael Chan req.flags = cpu_to_le32(vf->func_flags); 164c0c050c5SMichael Chan req.dflt_vlan = cpu_to_le16(vlan_tag); 165c0c050c5SMichael Chan req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN); 166c0c050c5SMichael Chan rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 167c0c050c5SMichael Chan if (!rc) 168c0c050c5SMichael Chan vf->vlan = vlan_tag; 169c0c050c5SMichael Chan return rc; 170c0c050c5SMichael Chan } 171c0c050c5SMichael Chan 172c0c050c5SMichael Chan int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate, 173c0c050c5SMichael Chan int max_tx_rate) 174c0c050c5SMichael Chan { 175c0c050c5SMichael Chan struct hwrm_func_cfg_input req = {0}; 176c0c050c5SMichael Chan struct bnxt *bp = netdev_priv(dev); 177c0c050c5SMichael Chan struct bnxt_vf_info *vf; 178c0c050c5SMichael Chan u32 pf_link_speed; 179c0c050c5SMichael Chan int rc; 180c0c050c5SMichael Chan 181c0c050c5SMichael Chan rc = bnxt_vf_ndo_prep(bp, vf_id); 182c0c050c5SMichael Chan if (rc) 183c0c050c5SMichael Chan return rc; 184c0c050c5SMichael Chan 185c0c050c5SMichael Chan vf = &bp->pf.vf[vf_id]; 186c0c050c5SMichael Chan pf_link_speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed); 187c0c050c5SMichael Chan if (max_tx_rate > pf_link_speed) { 188c0c050c5SMichael Chan netdev_info(bp->dev, "max tx rate %d exceed PF link speed for VF %d\n", 189c0c050c5SMichael Chan max_tx_rate, vf_id); 190c0c050c5SMichael Chan return -EINVAL; 191c0c050c5SMichael Chan } 192c0c050c5SMichael Chan 193c0c050c5SMichael Chan if (min_tx_rate > pf_link_speed || min_tx_rate > max_tx_rate) { 194c0c050c5SMichael Chan netdev_info(bp->dev, "min tx rate %d is invalid for VF %d\n", 195c0c050c5SMichael Chan min_tx_rate, vf_id); 196c0c050c5SMichael Chan return -EINVAL; 197c0c050c5SMichael Chan } 198c0c050c5SMichael Chan if (min_tx_rate == vf->min_tx_rate && max_tx_rate == vf->max_tx_rate) 199c0c050c5SMichael Chan return 0; 200c0c050c5SMichael Chan bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 201c193554eSMichael Chan req.fid = cpu_to_le16(vf->fw_fid); 202c0c050c5SMichael Chan req.flags = cpu_to_le32(vf->func_flags); 203c0c050c5SMichael Chan req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW); 204c0c050c5SMichael Chan req.max_bw = cpu_to_le32(max_tx_rate); 205c0c050c5SMichael Chan req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW); 206c0c050c5SMichael Chan req.min_bw = cpu_to_le32(min_tx_rate); 207c0c050c5SMichael Chan rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 208c0c050c5SMichael Chan if (!rc) { 209c0c050c5SMichael Chan vf->min_tx_rate = min_tx_rate; 210c0c050c5SMichael Chan vf->max_tx_rate = max_tx_rate; 211c0c050c5SMichael Chan } 212c0c050c5SMichael Chan return rc; 213c0c050c5SMichael Chan } 214c0c050c5SMichael Chan 215c0c050c5SMichael Chan int bnxt_set_vf_link_state(struct net_device *dev, int vf_id, int link) 216c0c050c5SMichael Chan { 217c0c050c5SMichael Chan struct bnxt *bp = netdev_priv(dev); 218c0c050c5SMichael Chan struct bnxt_vf_info *vf; 219c0c050c5SMichael Chan int rc; 220c0c050c5SMichael Chan 221c0c050c5SMichael Chan rc = bnxt_vf_ndo_prep(bp, vf_id); 222c0c050c5SMichael Chan if (rc) 223c0c050c5SMichael Chan return rc; 224c0c050c5SMichael Chan 225c0c050c5SMichael Chan vf = &bp->pf.vf[vf_id]; 226c0c050c5SMichael Chan 227c0c050c5SMichael Chan vf->flags &= ~(BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED); 228c0c050c5SMichael Chan switch (link) { 229c0c050c5SMichael Chan case IFLA_VF_LINK_STATE_AUTO: 230c0c050c5SMichael Chan vf->flags |= BNXT_VF_LINK_UP; 231c0c050c5SMichael Chan break; 232c0c050c5SMichael Chan case IFLA_VF_LINK_STATE_DISABLE: 233c0c050c5SMichael Chan vf->flags |= BNXT_VF_LINK_FORCED; 234c0c050c5SMichael Chan break; 235c0c050c5SMichael Chan case IFLA_VF_LINK_STATE_ENABLE: 236c0c050c5SMichael Chan vf->flags |= BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED; 237c0c050c5SMichael Chan break; 238c0c050c5SMichael Chan default: 239c0c050c5SMichael Chan netdev_err(bp->dev, "Invalid link option\n"); 240c0c050c5SMichael Chan rc = -EINVAL; 241c0c050c5SMichael Chan break; 242c0c050c5SMichael Chan } 243c0c050c5SMichael Chan /* CHIMP TODO: send msg to VF to update new link state */ 244c0c050c5SMichael Chan 245c0c050c5SMichael Chan return rc; 246c0c050c5SMichael Chan } 247c0c050c5SMichael Chan 248c0c050c5SMichael Chan static int bnxt_set_vf_attr(struct bnxt *bp, int num_vfs) 249c0c050c5SMichael Chan { 250c0c050c5SMichael Chan int i; 251c0c050c5SMichael Chan struct bnxt_vf_info *vf; 252c0c050c5SMichael Chan 253c0c050c5SMichael Chan for (i = 0; i < num_vfs; i++) { 254c0c050c5SMichael Chan vf = &bp->pf.vf[i]; 255c0c050c5SMichael Chan memset(vf, 0, sizeof(*vf)); 256c0c050c5SMichael Chan vf->flags = BNXT_VF_QOS | BNXT_VF_LINK_UP; 257c0c050c5SMichael Chan } 258c0c050c5SMichael Chan return 0; 259c0c050c5SMichael Chan } 260c0c050c5SMichael Chan 2614bb6cdceSJeffrey Huang static int bnxt_hwrm_func_vf_resource_free(struct bnxt *bp, int num_vfs) 262c0c050c5SMichael Chan { 263c0c050c5SMichael Chan int i, rc = 0; 264c0c050c5SMichael Chan struct bnxt_pf_info *pf = &bp->pf; 265c0c050c5SMichael Chan struct hwrm_func_vf_resc_free_input req = {0}; 266c0c050c5SMichael Chan 267c0c050c5SMichael Chan bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESC_FREE, -1, -1); 268c0c050c5SMichael Chan 269c0c050c5SMichael Chan mutex_lock(&bp->hwrm_cmd_lock); 2704bb6cdceSJeffrey Huang for (i = pf->first_vf_id; i < pf->first_vf_id + num_vfs; i++) { 271c0c050c5SMichael Chan req.vf_id = cpu_to_le16(i); 272c0c050c5SMichael Chan rc = _hwrm_send_message(bp, &req, sizeof(req), 273c0c050c5SMichael Chan HWRM_CMD_TIMEOUT); 274c0c050c5SMichael Chan if (rc) 275c0c050c5SMichael Chan break; 276c0c050c5SMichael Chan } 277c0c050c5SMichael Chan mutex_unlock(&bp->hwrm_cmd_lock); 278c0c050c5SMichael Chan return rc; 279c0c050c5SMichael Chan } 280c0c050c5SMichael Chan 281c0c050c5SMichael Chan static void bnxt_free_vf_resources(struct bnxt *bp) 282c0c050c5SMichael Chan { 283c0c050c5SMichael Chan struct pci_dev *pdev = bp->pdev; 284c0c050c5SMichael Chan int i; 285c0c050c5SMichael Chan 286c0c050c5SMichael Chan kfree(bp->pf.vf_event_bmap); 287c0c050c5SMichael Chan bp->pf.vf_event_bmap = NULL; 288c0c050c5SMichael Chan 289c0c050c5SMichael Chan for (i = 0; i < 4; i++) { 290c0c050c5SMichael Chan if (bp->pf.hwrm_cmd_req_addr[i]) { 291c0c050c5SMichael Chan dma_free_coherent(&pdev->dev, BNXT_PAGE_SIZE, 292c0c050c5SMichael Chan bp->pf.hwrm_cmd_req_addr[i], 293c0c050c5SMichael Chan bp->pf.hwrm_cmd_req_dma_addr[i]); 294c0c050c5SMichael Chan bp->pf.hwrm_cmd_req_addr[i] = NULL; 295c0c050c5SMichael Chan } 296c0c050c5SMichael Chan } 297c0c050c5SMichael Chan 298c0c050c5SMichael Chan kfree(bp->pf.vf); 299c0c050c5SMichael Chan bp->pf.vf = NULL; 300c0c050c5SMichael Chan } 301c0c050c5SMichael Chan 302c0c050c5SMichael Chan static int bnxt_alloc_vf_resources(struct bnxt *bp, int num_vfs) 303c0c050c5SMichael Chan { 304c0c050c5SMichael Chan struct pci_dev *pdev = bp->pdev; 305c0c050c5SMichael Chan u32 nr_pages, size, i, j, k = 0; 306c0c050c5SMichael Chan 307c0c050c5SMichael Chan bp->pf.vf = kcalloc(num_vfs, sizeof(struct bnxt_vf_info), GFP_KERNEL); 308c0c050c5SMichael Chan if (!bp->pf.vf) 309c0c050c5SMichael Chan return -ENOMEM; 310c0c050c5SMichael Chan 311c0c050c5SMichael Chan bnxt_set_vf_attr(bp, num_vfs); 312c0c050c5SMichael Chan 313c0c050c5SMichael Chan size = num_vfs * BNXT_HWRM_REQ_MAX_SIZE; 314c0c050c5SMichael Chan nr_pages = size / BNXT_PAGE_SIZE; 315c0c050c5SMichael Chan if (size & (BNXT_PAGE_SIZE - 1)) 316c0c050c5SMichael Chan nr_pages++; 317c0c050c5SMichael Chan 318c0c050c5SMichael Chan for (i = 0; i < nr_pages; i++) { 319c0c050c5SMichael Chan bp->pf.hwrm_cmd_req_addr[i] = 320c0c050c5SMichael Chan dma_alloc_coherent(&pdev->dev, BNXT_PAGE_SIZE, 321c0c050c5SMichael Chan &bp->pf.hwrm_cmd_req_dma_addr[i], 322c0c050c5SMichael Chan GFP_KERNEL); 323c0c050c5SMichael Chan 324c0c050c5SMichael Chan if (!bp->pf.hwrm_cmd_req_addr[i]) 325c0c050c5SMichael Chan return -ENOMEM; 326c0c050c5SMichael Chan 327c0c050c5SMichael Chan for (j = 0; j < BNXT_HWRM_REQS_PER_PAGE && k < num_vfs; j++) { 328c0c050c5SMichael Chan struct bnxt_vf_info *vf = &bp->pf.vf[k]; 329c0c050c5SMichael Chan 330c0c050c5SMichael Chan vf->hwrm_cmd_req_addr = bp->pf.hwrm_cmd_req_addr[i] + 331c0c050c5SMichael Chan j * BNXT_HWRM_REQ_MAX_SIZE; 332c0c050c5SMichael Chan vf->hwrm_cmd_req_dma_addr = 333c0c050c5SMichael Chan bp->pf.hwrm_cmd_req_dma_addr[i] + j * 334c0c050c5SMichael Chan BNXT_HWRM_REQ_MAX_SIZE; 335c0c050c5SMichael Chan k++; 336c0c050c5SMichael Chan } 337c0c050c5SMichael Chan } 338c0c050c5SMichael Chan 339c0c050c5SMichael Chan /* Max 128 VF's */ 340c0c050c5SMichael Chan bp->pf.vf_event_bmap = kzalloc(16, GFP_KERNEL); 341c0c050c5SMichael Chan if (!bp->pf.vf_event_bmap) 342c0c050c5SMichael Chan return -ENOMEM; 343c0c050c5SMichael Chan 344c0c050c5SMichael Chan bp->pf.hwrm_cmd_req_pages = nr_pages; 345c0c050c5SMichael Chan return 0; 346c0c050c5SMichael Chan } 347c0c050c5SMichael Chan 348c0c050c5SMichael Chan static int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp) 349c0c050c5SMichael Chan { 350c0c050c5SMichael Chan struct hwrm_func_buf_rgtr_input req = {0}; 351c0c050c5SMichael Chan 352c0c050c5SMichael Chan bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BUF_RGTR, -1, -1); 353c0c050c5SMichael Chan 354c0c050c5SMichael Chan req.req_buf_num_pages = cpu_to_le16(bp->pf.hwrm_cmd_req_pages); 355c0c050c5SMichael Chan req.req_buf_page_size = cpu_to_le16(BNXT_PAGE_SHIFT); 356c0c050c5SMichael Chan req.req_buf_len = cpu_to_le16(BNXT_HWRM_REQ_MAX_SIZE); 357c0c050c5SMichael Chan req.req_buf_page_addr0 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[0]); 358c0c050c5SMichael Chan req.req_buf_page_addr1 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[1]); 359c0c050c5SMichael Chan req.req_buf_page_addr2 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[2]); 360c0c050c5SMichael Chan req.req_buf_page_addr3 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[3]); 361c0c050c5SMichael Chan 362c0c050c5SMichael Chan return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 363c0c050c5SMichael Chan } 364c0c050c5SMichael Chan 365c0c050c5SMichael Chan /* only call by PF to reserve resources for VF */ 36692268c32SMichael Chan static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs) 367c0c050c5SMichael Chan { 368c0c050c5SMichael Chan u32 rc = 0, mtu, i; 369c0c050c5SMichael Chan u16 vf_tx_rings, vf_rx_rings, vf_cp_rings, vf_stat_ctx, vf_vnics; 370b72d4a68SMichael Chan u16 vf_ring_grps; 371c0c050c5SMichael Chan struct hwrm_func_cfg_input req = {0}; 372c0c050c5SMichael Chan struct bnxt_pf_info *pf = &bp->pf; 373c0c050c5SMichael Chan 374c0c050c5SMichael Chan bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 375c0c050c5SMichael Chan 376c0c050c5SMichael Chan /* Remaining rings are distributed equally amongs VF's for now */ 377c0c050c5SMichael Chan /* TODO: the following workaroud is needed to restrict total number 378c0c050c5SMichael Chan * of vf_cp_rings not exceed number of HW ring groups. This WA should 379c0c050c5SMichael Chan * be removed once new HWRM provides HW ring groups capability in 380c0c050c5SMichael Chan * hwrm_func_qcap. 381c0c050c5SMichael Chan */ 38292268c32SMichael Chan vf_cp_rings = min_t(u16, pf->max_cp_rings, pf->max_stat_ctxs); 38392268c32SMichael Chan vf_cp_rings = (vf_cp_rings - bp->cp_nr_rings) / num_vfs; 384c0c050c5SMichael Chan /* TODO: restore this logic below once the WA above is removed */ 38592268c32SMichael Chan /* vf_cp_rings = (pf->max_cp_rings - bp->cp_nr_rings) / num_vfs; */ 38692268c32SMichael Chan vf_stat_ctx = (pf->max_stat_ctxs - bp->num_stat_ctxs) / num_vfs; 387c0c050c5SMichael Chan if (bp->flags & BNXT_FLAG_AGG_RINGS) 38892268c32SMichael Chan vf_rx_rings = (pf->max_rx_rings - bp->rx_nr_rings * 2) / 38992268c32SMichael Chan num_vfs; 390c0c050c5SMichael Chan else 39192268c32SMichael Chan vf_rx_rings = (pf->max_rx_rings - bp->rx_nr_rings) / num_vfs; 392b72d4a68SMichael Chan vf_ring_grps = (bp->pf.max_hw_ring_grps - bp->rx_nr_rings) / num_vfs; 39392268c32SMichael Chan vf_tx_rings = (pf->max_tx_rings - bp->tx_nr_rings) / num_vfs; 394c0c050c5SMichael Chan 395c0c050c5SMichael Chan req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MTU | 396c0c050c5SMichael Chan FUNC_CFG_REQ_ENABLES_MRU | 397c0c050c5SMichael Chan FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS | 398c0c050c5SMichael Chan FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS | 399c0c050c5SMichael Chan FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS | 400c0c050c5SMichael Chan FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS | 401c0c050c5SMichael Chan FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS | 402c0c050c5SMichael Chan FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS | 403b72d4a68SMichael Chan FUNC_CFG_REQ_ENABLES_NUM_VNICS | 404b72d4a68SMichael Chan FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS); 405c0c050c5SMichael Chan 406c0c050c5SMichael Chan mtu = bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; 407c0c050c5SMichael Chan req.mru = cpu_to_le16(mtu); 408c0c050c5SMichael Chan req.mtu = cpu_to_le16(mtu); 409c0c050c5SMichael Chan 410c0c050c5SMichael Chan req.num_rsscos_ctxs = cpu_to_le16(1); 411c0c050c5SMichael Chan req.num_cmpl_rings = cpu_to_le16(vf_cp_rings); 412c0c050c5SMichael Chan req.num_tx_rings = cpu_to_le16(vf_tx_rings); 413c0c050c5SMichael Chan req.num_rx_rings = cpu_to_le16(vf_rx_rings); 414b72d4a68SMichael Chan req.num_hw_ring_grps = cpu_to_le16(vf_ring_grps); 415c0c050c5SMichael Chan req.num_l2_ctxs = cpu_to_le16(4); 416c0c050c5SMichael Chan vf_vnics = 1; 417c0c050c5SMichael Chan 418c0c050c5SMichael Chan req.num_vnics = cpu_to_le16(vf_vnics); 419c0c050c5SMichael Chan /* FIXME spec currently uses 1 bit for stats ctx */ 420c0c050c5SMichael Chan req.num_stat_ctxs = cpu_to_le16(vf_stat_ctx); 421c0c050c5SMichael Chan 422c0c050c5SMichael Chan mutex_lock(&bp->hwrm_cmd_lock); 42392268c32SMichael Chan for (i = 0; i < num_vfs; i++) { 424c193554eSMichael Chan req.fid = cpu_to_le16(pf->first_vf_id + i); 425c0c050c5SMichael Chan rc = _hwrm_send_message(bp, &req, sizeof(req), 426c0c050c5SMichael Chan HWRM_CMD_TIMEOUT); 427c0c050c5SMichael Chan if (rc) 428c0c050c5SMichael Chan break; 42992268c32SMichael Chan pf->active_vfs = i + 1; 430c193554eSMichael Chan pf->vf[i].fw_fid = le16_to_cpu(req.fid); 431c0c050c5SMichael Chan } 432c0c050c5SMichael Chan mutex_unlock(&bp->hwrm_cmd_lock); 433c0c050c5SMichael Chan if (!rc) { 4344a21b49bSMichael Chan pf->max_tx_rings -= vf_tx_rings * num_vfs; 4354a21b49bSMichael Chan pf->max_rx_rings -= vf_rx_rings * num_vfs; 436b72d4a68SMichael Chan pf->max_hw_ring_grps -= vf_ring_grps * num_vfs; 4374a21b49bSMichael Chan pf->max_cp_rings -= vf_cp_rings * num_vfs; 4384a21b49bSMichael Chan pf->max_rsscos_ctxs -= num_vfs; 4394a21b49bSMichael Chan pf->max_stat_ctxs -= vf_stat_ctx * num_vfs; 4404a21b49bSMichael Chan pf->max_vnics -= vf_vnics * num_vfs; 441c0c050c5SMichael Chan } 442c0c050c5SMichael Chan return rc; 443c0c050c5SMichael Chan } 444c0c050c5SMichael Chan 445c0c050c5SMichael Chan static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs) 446c0c050c5SMichael Chan { 447c0c050c5SMichael Chan int rc = 0, vfs_supported; 448c0c050c5SMichael Chan int min_rx_rings, min_tx_rings, min_rss_ctxs; 449c0c050c5SMichael Chan int tx_ok = 0, rx_ok = 0, rss_ok = 0; 450c0c050c5SMichael Chan 451c0c050c5SMichael Chan /* Check if we can enable requested num of vf's. At a mininum 452c0c050c5SMichael Chan * we require 1 RX 1 TX rings for each VF. In this minimum conf 453c0c050c5SMichael Chan * features like TPA will not be available. 454c0c050c5SMichael Chan */ 455c0c050c5SMichael Chan vfs_supported = *num_vfs; 456c0c050c5SMichael Chan 457c0c050c5SMichael Chan while (vfs_supported) { 458c0c050c5SMichael Chan min_rx_rings = vfs_supported; 459c0c050c5SMichael Chan min_tx_rings = vfs_supported; 460c0c050c5SMichael Chan min_rss_ctxs = vfs_supported; 461c0c050c5SMichael Chan 462c0c050c5SMichael Chan if (bp->flags & BNXT_FLAG_AGG_RINGS) { 463c0c050c5SMichael Chan if (bp->pf.max_rx_rings - bp->rx_nr_rings * 2 >= 464c0c050c5SMichael Chan min_rx_rings) 465c0c050c5SMichael Chan rx_ok = 1; 466c0c050c5SMichael Chan } else { 467c0c050c5SMichael Chan if (bp->pf.max_rx_rings - bp->rx_nr_rings >= 468c0c050c5SMichael Chan min_rx_rings) 469c0c050c5SMichael Chan rx_ok = 1; 470c0c050c5SMichael Chan } 471c0c050c5SMichael Chan 472c0c050c5SMichael Chan if (bp->pf.max_tx_rings - bp->tx_nr_rings >= min_tx_rings) 473c0c050c5SMichael Chan tx_ok = 1; 474c0c050c5SMichael Chan 475c0c050c5SMichael Chan if (bp->pf.max_rsscos_ctxs - bp->rsscos_nr_ctxs >= min_rss_ctxs) 476c0c050c5SMichael Chan rss_ok = 1; 477c0c050c5SMichael Chan 478c0c050c5SMichael Chan if (tx_ok && rx_ok && rss_ok) 479c0c050c5SMichael Chan break; 480c0c050c5SMichael Chan 481c0c050c5SMichael Chan vfs_supported--; 482c0c050c5SMichael Chan } 483c0c050c5SMichael Chan 484c0c050c5SMichael Chan if (!vfs_supported) { 485c0c050c5SMichael Chan netdev_err(bp->dev, "Cannot enable VF's as all resources are used by PF\n"); 486c0c050c5SMichael Chan return -EINVAL; 487c0c050c5SMichael Chan } 488c0c050c5SMichael Chan 489c0c050c5SMichael Chan if (vfs_supported != *num_vfs) { 490c0c050c5SMichael Chan netdev_info(bp->dev, "Requested VFs %d, can enable %d\n", 491c0c050c5SMichael Chan *num_vfs, vfs_supported); 492c0c050c5SMichael Chan *num_vfs = vfs_supported; 493c0c050c5SMichael Chan } 494c0c050c5SMichael Chan 495c0c050c5SMichael Chan rc = bnxt_alloc_vf_resources(bp, *num_vfs); 496c0c050c5SMichael Chan if (rc) 497c0c050c5SMichael Chan goto err_out1; 498c0c050c5SMichael Chan 499c0c050c5SMichael Chan /* Reserve resources for VFs */ 50092268c32SMichael Chan rc = bnxt_hwrm_func_cfg(bp, *num_vfs); 501c0c050c5SMichael Chan if (rc) 502c0c050c5SMichael Chan goto err_out2; 503c0c050c5SMichael Chan 504c0c050c5SMichael Chan /* Register buffers for VFs */ 505c0c050c5SMichael Chan rc = bnxt_hwrm_func_buf_rgtr(bp); 506c0c050c5SMichael Chan if (rc) 507c0c050c5SMichael Chan goto err_out2; 508c0c050c5SMichael Chan 509c0c050c5SMichael Chan rc = pci_enable_sriov(bp->pdev, *num_vfs); 510c0c050c5SMichael Chan if (rc) 511c0c050c5SMichael Chan goto err_out2; 512c0c050c5SMichael Chan 513c0c050c5SMichael Chan return 0; 514c0c050c5SMichael Chan 515c0c050c5SMichael Chan err_out2: 516c0c050c5SMichael Chan /* Free the resources reserved for various VF's */ 5174bb6cdceSJeffrey Huang bnxt_hwrm_func_vf_resource_free(bp, *num_vfs); 518c0c050c5SMichael Chan 519c0c050c5SMichael Chan err_out1: 520c0c050c5SMichael Chan bnxt_free_vf_resources(bp); 521c0c050c5SMichael Chan 522c0c050c5SMichael Chan return rc; 523c0c050c5SMichael Chan } 524c0c050c5SMichael Chan 52519241368SJeffrey Huang static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt *bp, 52619241368SJeffrey Huang struct bnxt_vf_info *vf, 52719241368SJeffrey Huang u16 event_id) 52819241368SJeffrey Huang { 52919241368SJeffrey Huang int rc = 0; 53019241368SJeffrey Huang struct hwrm_fwd_async_event_cmpl_input req = {0}; 53119241368SJeffrey Huang struct hwrm_fwd_async_event_cmpl_output *resp = bp->hwrm_cmd_resp_addr; 53219241368SJeffrey Huang struct hwrm_async_event_cmpl *async_cmpl; 53319241368SJeffrey Huang 53419241368SJeffrey Huang bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_ASYNC_EVENT_CMPL, -1, -1); 53519241368SJeffrey Huang if (vf) 53619241368SJeffrey Huang req.encap_async_event_target_id = cpu_to_le16(vf->fw_fid); 53719241368SJeffrey Huang else 53819241368SJeffrey Huang /* broadcast this async event to all VFs */ 53919241368SJeffrey Huang req.encap_async_event_target_id = cpu_to_le16(0xffff); 54019241368SJeffrey Huang async_cmpl = (struct hwrm_async_event_cmpl *)req.encap_async_event_cmpl; 54119241368SJeffrey Huang async_cmpl->type = 54219241368SJeffrey Huang cpu_to_le16(HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT); 54319241368SJeffrey Huang async_cmpl->event_id = cpu_to_le16(event_id); 54419241368SJeffrey Huang 54519241368SJeffrey Huang mutex_lock(&bp->hwrm_cmd_lock); 54619241368SJeffrey Huang rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 54719241368SJeffrey Huang 54819241368SJeffrey Huang if (rc) { 54919241368SJeffrey Huang netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl failed. rc:%d\n", 55019241368SJeffrey Huang rc); 55119241368SJeffrey Huang goto fwd_async_event_cmpl_exit; 55219241368SJeffrey Huang } 55319241368SJeffrey Huang 55419241368SJeffrey Huang if (resp->error_code) { 55519241368SJeffrey Huang netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl error %d\n", 55619241368SJeffrey Huang resp->error_code); 55719241368SJeffrey Huang rc = -1; 55819241368SJeffrey Huang } 55919241368SJeffrey Huang 56019241368SJeffrey Huang fwd_async_event_cmpl_exit: 56119241368SJeffrey Huang mutex_unlock(&bp->hwrm_cmd_lock); 56219241368SJeffrey Huang return rc; 56319241368SJeffrey Huang } 56419241368SJeffrey Huang 565c0c050c5SMichael Chan void bnxt_sriov_disable(struct bnxt *bp) 566c0c050c5SMichael Chan { 5674bb6cdceSJeffrey Huang u16 num_vfs = pci_num_vf(bp->pdev); 5684bb6cdceSJeffrey Huang 5694bb6cdceSJeffrey Huang if (!num_vfs) 570c0c050c5SMichael Chan return; 571c0c050c5SMichael Chan 5724bb6cdceSJeffrey Huang if (pci_vfs_assigned(bp->pdev)) { 57319241368SJeffrey Huang bnxt_hwrm_fwd_async_event_cmpl( 57419241368SJeffrey Huang bp, NULL, 57519241368SJeffrey Huang HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD); 5764bb6cdceSJeffrey Huang netdev_warn(bp->dev, "Unable to free %d VFs because some are assigned to VMs.\n", 5774bb6cdceSJeffrey Huang num_vfs); 5784bb6cdceSJeffrey Huang } else { 579c0c050c5SMichael Chan pci_disable_sriov(bp->pdev); 5804bb6cdceSJeffrey Huang /* Free the HW resources reserved for various VF's */ 5814bb6cdceSJeffrey Huang bnxt_hwrm_func_vf_resource_free(bp, num_vfs); 5824bb6cdceSJeffrey Huang } 583c0c050c5SMichael Chan 584c0c050c5SMichael Chan bnxt_free_vf_resources(bp); 585c0c050c5SMichael Chan 586c0c050c5SMichael Chan bp->pf.active_vfs = 0; 5874a21b49bSMichael Chan /* Reclaim all resources for the PF. */ 5884a21b49bSMichael Chan bnxt_hwrm_func_qcaps(bp); 589c0c050c5SMichael Chan } 590c0c050c5SMichael Chan 591c0c050c5SMichael Chan int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs) 592c0c050c5SMichael Chan { 593c0c050c5SMichael Chan struct net_device *dev = pci_get_drvdata(pdev); 594c0c050c5SMichael Chan struct bnxt *bp = netdev_priv(dev); 595c0c050c5SMichael Chan 596c0c050c5SMichael Chan if (!(bp->flags & BNXT_FLAG_USING_MSIX)) { 597c0c050c5SMichael Chan netdev_warn(dev, "Not allow SRIOV if the irq mode is not MSIX\n"); 598c0c050c5SMichael Chan return 0; 599c0c050c5SMichael Chan } 600c0c050c5SMichael Chan 601c0c050c5SMichael Chan rtnl_lock(); 602c0c050c5SMichael Chan if (!netif_running(dev)) { 603c0c050c5SMichael Chan netdev_warn(dev, "Reject SRIOV config request since if is down!\n"); 604c0c050c5SMichael Chan rtnl_unlock(); 605c0c050c5SMichael Chan return 0; 606c0c050c5SMichael Chan } 607c0c050c5SMichael Chan bp->sriov_cfg = true; 608c0c050c5SMichael Chan rtnl_unlock(); 6094bb6cdceSJeffrey Huang 6104bb6cdceSJeffrey Huang if (pci_vfs_assigned(bp->pdev)) { 6114bb6cdceSJeffrey Huang netdev_warn(dev, "Unable to configure SRIOV since some VFs are assigned to VMs.\n"); 6124bb6cdceSJeffrey Huang num_vfs = 0; 6134bb6cdceSJeffrey Huang goto sriov_cfg_exit; 614c0c050c5SMichael Chan } 615c0c050c5SMichael Chan 616c0c050c5SMichael Chan /* Check if enabled VFs is same as requested */ 6174bb6cdceSJeffrey Huang if (num_vfs && num_vfs == bp->pf.active_vfs) 6184bb6cdceSJeffrey Huang goto sriov_cfg_exit; 6194bb6cdceSJeffrey Huang 6204bb6cdceSJeffrey Huang /* if there are previous existing VFs, clean them up */ 6214bb6cdceSJeffrey Huang bnxt_sriov_disable(bp); 6224bb6cdceSJeffrey Huang if (!num_vfs) 6234bb6cdceSJeffrey Huang goto sriov_cfg_exit; 624c0c050c5SMichael Chan 625c0c050c5SMichael Chan bnxt_sriov_enable(bp, &num_vfs); 626c0c050c5SMichael Chan 6274bb6cdceSJeffrey Huang sriov_cfg_exit: 628c0c050c5SMichael Chan bp->sriov_cfg = false; 629c0c050c5SMichael Chan wake_up(&bp->sriov_cfg_wait); 630c0c050c5SMichael Chan 631c0c050c5SMichael Chan return num_vfs; 632c0c050c5SMichael Chan } 633c0c050c5SMichael Chan 634c0c050c5SMichael Chan static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf, 635c0c050c5SMichael Chan void *encap_resp, __le64 encap_resp_addr, 636c0c050c5SMichael Chan __le16 encap_resp_cpr, u32 msg_size) 637c0c050c5SMichael Chan { 638c0c050c5SMichael Chan int rc = 0; 639c0c050c5SMichael Chan struct hwrm_fwd_resp_input req = {0}; 640c0c050c5SMichael Chan struct hwrm_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr; 641c0c050c5SMichael Chan 642c0c050c5SMichael Chan bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_RESP, -1, -1); 643c0c050c5SMichael Chan 644c0c050c5SMichael Chan /* Set the new target id */ 645c0c050c5SMichael Chan req.target_id = cpu_to_le16(vf->fw_fid); 646c193554eSMichael Chan req.encap_resp_target_id = cpu_to_le16(vf->fw_fid); 647c0c050c5SMichael Chan req.encap_resp_len = cpu_to_le16(msg_size); 648c0c050c5SMichael Chan req.encap_resp_addr = encap_resp_addr; 649c0c050c5SMichael Chan req.encap_resp_cmpl_ring = encap_resp_cpr; 650c0c050c5SMichael Chan memcpy(req.encap_resp, encap_resp, msg_size); 651c0c050c5SMichael Chan 652c0c050c5SMichael Chan mutex_lock(&bp->hwrm_cmd_lock); 653c0c050c5SMichael Chan rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 654c0c050c5SMichael Chan 655c0c050c5SMichael Chan if (rc) { 656c0c050c5SMichael Chan netdev_err(bp->dev, "hwrm_fwd_resp failed. rc:%d\n", rc); 657c0c050c5SMichael Chan goto fwd_resp_exit; 658c0c050c5SMichael Chan } 659c0c050c5SMichael Chan 660c0c050c5SMichael Chan if (resp->error_code) { 661c0c050c5SMichael Chan netdev_err(bp->dev, "hwrm_fwd_resp error %d\n", 662c0c050c5SMichael Chan resp->error_code); 663c0c050c5SMichael Chan rc = -1; 664c0c050c5SMichael Chan } 665c0c050c5SMichael Chan 666c0c050c5SMichael Chan fwd_resp_exit: 667c0c050c5SMichael Chan mutex_unlock(&bp->hwrm_cmd_lock); 668c0c050c5SMichael Chan return rc; 669c0c050c5SMichael Chan } 670c0c050c5SMichael Chan 671c0c050c5SMichael Chan static int bnxt_hwrm_fwd_err_resp(struct bnxt *bp, struct bnxt_vf_info *vf, 672c0c050c5SMichael Chan u32 msg_size) 673c0c050c5SMichael Chan { 674c0c050c5SMichael Chan int rc = 0; 675c0c050c5SMichael Chan struct hwrm_reject_fwd_resp_input req = {0}; 676c0c050c5SMichael Chan struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr; 677c0c050c5SMichael Chan 678c0c050c5SMichael Chan bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_REJECT_FWD_RESP, -1, -1); 679c0c050c5SMichael Chan /* Set the new target id */ 680c0c050c5SMichael Chan req.target_id = cpu_to_le16(vf->fw_fid); 681c193554eSMichael Chan req.encap_resp_target_id = cpu_to_le16(vf->fw_fid); 682c0c050c5SMichael Chan memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size); 683c0c050c5SMichael Chan 684c0c050c5SMichael Chan mutex_lock(&bp->hwrm_cmd_lock); 685c0c050c5SMichael Chan rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 686c0c050c5SMichael Chan 687c0c050c5SMichael Chan if (rc) { 688c0c050c5SMichael Chan netdev_err(bp->dev, "hwrm_fwd_err_resp failed. rc:%d\n", rc); 689c0c050c5SMichael Chan goto fwd_err_resp_exit; 690c0c050c5SMichael Chan } 691c0c050c5SMichael Chan 692c0c050c5SMichael Chan if (resp->error_code) { 693c0c050c5SMichael Chan netdev_err(bp->dev, "hwrm_fwd_err_resp error %d\n", 694c0c050c5SMichael Chan resp->error_code); 695c0c050c5SMichael Chan rc = -1; 696c0c050c5SMichael Chan } 697c0c050c5SMichael Chan 698c0c050c5SMichael Chan fwd_err_resp_exit: 699c0c050c5SMichael Chan mutex_unlock(&bp->hwrm_cmd_lock); 700c0c050c5SMichael Chan return rc; 701c0c050c5SMichael Chan } 702c0c050c5SMichael Chan 703c0c050c5SMichael Chan static int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf, 704c0c050c5SMichael Chan u32 msg_size) 705c0c050c5SMichael Chan { 706c0c050c5SMichael Chan int rc = 0; 707c0c050c5SMichael Chan struct hwrm_exec_fwd_resp_input req = {0}; 708c0c050c5SMichael Chan struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr; 709c0c050c5SMichael Chan 710c0c050c5SMichael Chan bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_EXEC_FWD_RESP, -1, -1); 711c0c050c5SMichael Chan /* Set the new target id */ 712c0c050c5SMichael Chan req.target_id = cpu_to_le16(vf->fw_fid); 713c193554eSMichael Chan req.encap_resp_target_id = cpu_to_le16(vf->fw_fid); 714c0c050c5SMichael Chan memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size); 715c0c050c5SMichael Chan 716c0c050c5SMichael Chan mutex_lock(&bp->hwrm_cmd_lock); 717c0c050c5SMichael Chan rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 718c0c050c5SMichael Chan 719c0c050c5SMichael Chan if (rc) { 720c0c050c5SMichael Chan netdev_err(bp->dev, "hwrm_exec_fw_resp failed. rc:%d\n", rc); 721c0c050c5SMichael Chan goto exec_fwd_resp_exit; 722c0c050c5SMichael Chan } 723c0c050c5SMichael Chan 724c0c050c5SMichael Chan if (resp->error_code) { 725c0c050c5SMichael Chan netdev_err(bp->dev, "hwrm_exec_fw_resp error %d\n", 726c0c050c5SMichael Chan resp->error_code); 727c0c050c5SMichael Chan rc = -1; 728c0c050c5SMichael Chan } 729c0c050c5SMichael Chan 730c0c050c5SMichael Chan exec_fwd_resp_exit: 731c0c050c5SMichael Chan mutex_unlock(&bp->hwrm_cmd_lock); 732c0c050c5SMichael Chan return rc; 733c0c050c5SMichael Chan } 734c0c050c5SMichael Chan 735c0c050c5SMichael Chan static int bnxt_vf_validate_set_mac(struct bnxt *bp, struct bnxt_vf_info *vf) 736c0c050c5SMichael Chan { 737c0c050c5SMichael Chan u32 msg_size = sizeof(struct hwrm_cfa_l2_filter_alloc_input); 738c0c050c5SMichael Chan struct hwrm_cfa_l2_filter_alloc_input *req = 739c0c050c5SMichael Chan (struct hwrm_cfa_l2_filter_alloc_input *)vf->hwrm_cmd_req_addr; 740c0c050c5SMichael Chan 741c0c050c5SMichael Chan if (!is_valid_ether_addr(vf->mac_addr) || 742c0c050c5SMichael Chan ether_addr_equal((const u8 *)req->l2_addr, vf->mac_addr)) 743c0c050c5SMichael Chan return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size); 744c0c050c5SMichael Chan else 745c0c050c5SMichael Chan return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size); 746c0c050c5SMichael Chan } 747c0c050c5SMichael Chan 748c0c050c5SMichael Chan static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf) 749c0c050c5SMichael Chan { 750c0c050c5SMichael Chan int rc = 0; 751c0c050c5SMichael Chan 752c0c050c5SMichael Chan if (!(vf->flags & BNXT_VF_LINK_FORCED)) { 753c0c050c5SMichael Chan /* real link */ 754c0c050c5SMichael Chan rc = bnxt_hwrm_exec_fwd_resp( 755c0c050c5SMichael Chan bp, vf, sizeof(struct hwrm_port_phy_qcfg_input)); 756c0c050c5SMichael Chan } else { 757c0c050c5SMichael Chan struct hwrm_port_phy_qcfg_output phy_qcfg_resp; 758c0c050c5SMichael Chan struct hwrm_port_phy_qcfg_input *phy_qcfg_req; 759c0c050c5SMichael Chan 760c0c050c5SMichael Chan phy_qcfg_req = 761c0c050c5SMichael Chan (struct hwrm_port_phy_qcfg_input *)vf->hwrm_cmd_req_addr; 762c0c050c5SMichael Chan mutex_lock(&bp->hwrm_cmd_lock); 763c0c050c5SMichael Chan memcpy(&phy_qcfg_resp, &bp->link_info.phy_qcfg_resp, 764c0c050c5SMichael Chan sizeof(phy_qcfg_resp)); 765c0c050c5SMichael Chan mutex_unlock(&bp->hwrm_cmd_lock); 766c0c050c5SMichael Chan phy_qcfg_resp.seq_id = phy_qcfg_req->seq_id; 767c0c050c5SMichael Chan 768c0c050c5SMichael Chan if (vf->flags & BNXT_VF_LINK_UP) { 769c0c050c5SMichael Chan /* if physical link is down, force link up on VF */ 770c0c050c5SMichael Chan if (phy_qcfg_resp.link == 771c0c050c5SMichael Chan PORT_PHY_QCFG_RESP_LINK_NO_LINK) { 772c0c050c5SMichael Chan phy_qcfg_resp.link = 773c0c050c5SMichael Chan PORT_PHY_QCFG_RESP_LINK_LINK; 77411f15ed3SMichael Chan phy_qcfg_resp.link_speed = cpu_to_le16( 77511f15ed3SMichael Chan PORT_PHY_QCFG_RESP_LINK_SPEED_10GB); 776c0c050c5SMichael Chan phy_qcfg_resp.duplex = 777c0c050c5SMichael Chan PORT_PHY_QCFG_RESP_DUPLEX_FULL; 778c0c050c5SMichael Chan phy_qcfg_resp.pause = 779c0c050c5SMichael Chan (PORT_PHY_QCFG_RESP_PAUSE_TX | 780c0c050c5SMichael Chan PORT_PHY_QCFG_RESP_PAUSE_RX); 781c0c050c5SMichael Chan } 782c0c050c5SMichael Chan } else { 783c0c050c5SMichael Chan /* force link down */ 784c0c050c5SMichael Chan phy_qcfg_resp.link = PORT_PHY_QCFG_RESP_LINK_NO_LINK; 785c0c050c5SMichael Chan phy_qcfg_resp.link_speed = 0; 786c0c050c5SMichael Chan phy_qcfg_resp.duplex = PORT_PHY_QCFG_RESP_DUPLEX_HALF; 787c0c050c5SMichael Chan phy_qcfg_resp.pause = 0; 788c0c050c5SMichael Chan } 789c0c050c5SMichael Chan rc = bnxt_hwrm_fwd_resp(bp, vf, &phy_qcfg_resp, 790c0c050c5SMichael Chan phy_qcfg_req->resp_addr, 791c0c050c5SMichael Chan phy_qcfg_req->cmpl_ring, 792c0c050c5SMichael Chan sizeof(phy_qcfg_resp)); 793c0c050c5SMichael Chan } 794c0c050c5SMichael Chan return rc; 795c0c050c5SMichael Chan } 796c0c050c5SMichael Chan 797c0c050c5SMichael Chan static int bnxt_vf_req_validate_snd(struct bnxt *bp, struct bnxt_vf_info *vf) 798c0c050c5SMichael Chan { 799c0c050c5SMichael Chan int rc = 0; 800a8643e16SMichael Chan struct input *encap_req = vf->hwrm_cmd_req_addr; 801a8643e16SMichael Chan u32 req_type = le16_to_cpu(encap_req->req_type); 802c0c050c5SMichael Chan 803c0c050c5SMichael Chan switch (req_type) { 804c0c050c5SMichael Chan case HWRM_CFA_L2_FILTER_ALLOC: 805c0c050c5SMichael Chan rc = bnxt_vf_validate_set_mac(bp, vf); 806c0c050c5SMichael Chan break; 807c0c050c5SMichael Chan case HWRM_FUNC_CFG: 808c0c050c5SMichael Chan /* TODO Validate if VF is allowed to change mac address, 809c0c050c5SMichael Chan * mtu, num of rings etc 810c0c050c5SMichael Chan */ 811c0c050c5SMichael Chan rc = bnxt_hwrm_exec_fwd_resp( 812c0c050c5SMichael Chan bp, vf, sizeof(struct hwrm_func_cfg_input)); 813c0c050c5SMichael Chan break; 814c0c050c5SMichael Chan case HWRM_PORT_PHY_QCFG: 815c0c050c5SMichael Chan rc = bnxt_vf_set_link(bp, vf); 816c0c050c5SMichael Chan break; 817c0c050c5SMichael Chan default: 818c0c050c5SMichael Chan break; 819c0c050c5SMichael Chan } 820c0c050c5SMichael Chan return rc; 821c0c050c5SMichael Chan } 822c0c050c5SMichael Chan 823c0c050c5SMichael Chan void bnxt_hwrm_exec_fwd_req(struct bnxt *bp) 824c0c050c5SMichael Chan { 825c0c050c5SMichael Chan u32 i = 0, active_vfs = bp->pf.active_vfs, vf_id; 826c0c050c5SMichael Chan 827c0c050c5SMichael Chan /* Scan through VF's and process commands */ 828c0c050c5SMichael Chan while (1) { 829c0c050c5SMichael Chan vf_id = find_next_bit(bp->pf.vf_event_bmap, active_vfs, i); 830c0c050c5SMichael Chan if (vf_id >= active_vfs) 831c0c050c5SMichael Chan break; 832c0c050c5SMichael Chan 833c0c050c5SMichael Chan clear_bit(vf_id, bp->pf.vf_event_bmap); 834c0c050c5SMichael Chan bnxt_vf_req_validate_snd(bp, &bp->pf.vf[vf_id]); 835c0c050c5SMichael Chan i = vf_id + 1; 836c0c050c5SMichael Chan } 837c0c050c5SMichael Chan } 838379a80a1SMichael Chan 839379a80a1SMichael Chan void bnxt_update_vf_mac(struct bnxt *bp) 840379a80a1SMichael Chan { 841379a80a1SMichael Chan struct hwrm_func_qcaps_input req = {0}; 842379a80a1SMichael Chan struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr; 843379a80a1SMichael Chan 844379a80a1SMichael Chan bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1); 845379a80a1SMichael Chan req.fid = cpu_to_le16(0xffff); 846379a80a1SMichael Chan 847379a80a1SMichael Chan mutex_lock(&bp->hwrm_cmd_lock); 848379a80a1SMichael Chan if (_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT)) 849379a80a1SMichael Chan goto update_vf_mac_exit; 850379a80a1SMichael Chan 8513874d6a8SJeffrey Huang /* Store MAC address from the firmware. There are 2 cases: 8523874d6a8SJeffrey Huang * 1. MAC address is valid. It is assigned from the PF and we 8533874d6a8SJeffrey Huang * need to override the current VF MAC address with it. 8543874d6a8SJeffrey Huang * 2. MAC address is zero. The VF will use a random MAC address by 8553874d6a8SJeffrey Huang * default but the stored zero MAC will allow the VF user to change 8563874d6a8SJeffrey Huang * the random MAC address using ndo_set_mac_address() if he wants. 8573874d6a8SJeffrey Huang */ 85811f15ed3SMichael Chan if (!ether_addr_equal(resp->mac_address, bp->vf.mac_addr)) 85911f15ed3SMichael Chan memcpy(bp->vf.mac_addr, resp->mac_address, ETH_ALEN); 8603874d6a8SJeffrey Huang 8613874d6a8SJeffrey Huang /* overwrite netdev dev_addr with admin VF MAC */ 8623874d6a8SJeffrey Huang if (is_valid_ether_addr(bp->vf.mac_addr)) 863379a80a1SMichael Chan memcpy(bp->dev->dev_addr, bp->vf.mac_addr, ETH_ALEN); 864379a80a1SMichael Chan update_vf_mac_exit: 865379a80a1SMichael Chan mutex_unlock(&bp->hwrm_cmd_lock); 866379a80a1SMichael Chan } 867379a80a1SMichael Chan 868*84c33dd3SMichael Chan int bnxt_approve_mac(struct bnxt *bp, u8 *mac) 869*84c33dd3SMichael Chan { 870*84c33dd3SMichael Chan struct hwrm_func_vf_cfg_input req = {0}; 871*84c33dd3SMichael Chan int rc = 0; 872*84c33dd3SMichael Chan 873*84c33dd3SMichael Chan if (!BNXT_VF(bp)) 874*84c33dd3SMichael Chan return 0; 875*84c33dd3SMichael Chan 876*84c33dd3SMichael Chan if (bp->hwrm_spec_code < 0x10202) { 877*84c33dd3SMichael Chan if (is_valid_ether_addr(bp->vf.mac_addr)) 878*84c33dd3SMichael Chan rc = -EADDRNOTAVAIL; 879*84c33dd3SMichael Chan goto mac_done; 880*84c33dd3SMichael Chan } 881*84c33dd3SMichael Chan bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1); 882*84c33dd3SMichael Chan req.enables = cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR); 883*84c33dd3SMichael Chan memcpy(req.dflt_mac_addr, mac, ETH_ALEN); 884*84c33dd3SMichael Chan rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 885*84c33dd3SMichael Chan mac_done: 886*84c33dd3SMichael Chan if (rc) { 887*84c33dd3SMichael Chan rc = -EADDRNOTAVAIL; 888*84c33dd3SMichael Chan netdev_warn(bp->dev, "VF MAC address %pM not approved by the PF\n", 889*84c33dd3SMichael Chan mac); 890*84c33dd3SMichael Chan } 891*84c33dd3SMichael Chan return rc; 892*84c33dd3SMichael Chan } 893c0c050c5SMichael Chan #else 894c0c050c5SMichael Chan 895c0c050c5SMichael Chan void bnxt_sriov_disable(struct bnxt *bp) 896c0c050c5SMichael Chan { 897c0c050c5SMichael Chan } 898c0c050c5SMichael Chan 899c0c050c5SMichael Chan void bnxt_hwrm_exec_fwd_req(struct bnxt *bp) 900c0c050c5SMichael Chan { 901379a80a1SMichael Chan netdev_err(bp->dev, "Invalid VF message received when SRIOV is not enable\n"); 902379a80a1SMichael Chan } 903379a80a1SMichael Chan 904379a80a1SMichael Chan void bnxt_update_vf_mac(struct bnxt *bp) 905379a80a1SMichael Chan { 906c0c050c5SMichael Chan } 907*84c33dd3SMichael Chan 908*84c33dd3SMichael Chan int bnxt_approve_mac(struct bnxt *bp, u8 *mac) 909*84c33dd3SMichael Chan { 910*84c33dd3SMichael Chan return 0; 911*84c33dd3SMichael Chan } 912c0c050c5SMichael Chan #endif 913