1 /* Broadcom NetXtreme-C/E network driver. 2 * 3 * Copyright (c) 2014-2016 Broadcom Corporation 4 * Copyright (c) 2016-2018 Broadcom Limited 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation. 9 */ 10 11 #include <linux/ethtool.h> 12 #include <linux/module.h> 13 #include <linux/pci.h> 14 #include <linux/netdevice.h> 15 #include <linux/if_vlan.h> 16 #include <linux/interrupt.h> 17 #include <linux/etherdevice.h> 18 #include "bnxt_hsi.h" 19 #include "bnxt.h" 20 #include "bnxt_ulp.h" 21 #include "bnxt_sriov.h" 22 #include "bnxt_vfr.h" 23 #include "bnxt_ethtool.h" 24 25 #ifdef CONFIG_BNXT_SRIOV 26 static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt *bp, 27 struct bnxt_vf_info *vf, u16 event_id) 28 { 29 struct hwrm_fwd_async_event_cmpl_input req = {0}; 30 struct hwrm_async_event_cmpl *async_cmpl; 31 int rc = 0; 32 33 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_ASYNC_EVENT_CMPL, -1, -1); 34 if (vf) 35 req.encap_async_event_target_id = cpu_to_le16(vf->fw_fid); 36 else 37 /* broadcast this async event to all VFs */ 38 req.encap_async_event_target_id = cpu_to_le16(0xffff); 39 async_cmpl = (struct hwrm_async_event_cmpl *)req.encap_async_event_cmpl; 40 async_cmpl->type = cpu_to_le16(ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT); 41 async_cmpl->event_id = cpu_to_le16(event_id); 42 43 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 44 if (rc) 45 netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl failed. rc:%d\n", 46 rc); 47 return rc; 48 } 49 50 static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id) 51 { 52 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { 53 netdev_err(bp->dev, "vf ndo called though PF is down\n"); 54 return -EINVAL; 55 } 56 if (!bp->pf.active_vfs) { 57 netdev_err(bp->dev, "vf ndo called though sriov is disabled\n"); 58 return -EINVAL; 59 } 60 if (vf_id >= bp->pf.active_vfs) { 61 netdev_err(bp->dev, "Invalid VF id %d\n", vf_id); 62 return -EINVAL; 63 } 64 return 0; 65 } 66 67 int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting) 68 { 69 struct hwrm_func_cfg_input req = {0}; 70 struct bnxt *bp = netdev_priv(dev); 71 struct bnxt_vf_info *vf; 72 bool old_setting = false; 73 u32 func_flags; 74 int rc; 75 76 if (bp->hwrm_spec_code < 0x10701) 77 return -ENOTSUPP; 78 79 rc = bnxt_vf_ndo_prep(bp, vf_id); 80 if (rc) 81 return rc; 82 83 vf = &bp->pf.vf[vf_id]; 84 if (vf->flags & BNXT_VF_SPOOFCHK) 85 old_setting = true; 86 if (old_setting == setting) 87 return 0; 88 89 if (setting) 90 func_flags = FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE; 91 else 92 func_flags = FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE; 93 /*TODO: if the driver supports VLAN filter on guest VLAN, 94 * the spoof check should also include vlan anti-spoofing 95 */ 96 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 97 req.fid = cpu_to_le16(vf->fw_fid); 98 req.flags = cpu_to_le32(func_flags); 99 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 100 if (!rc) { 101 if (setting) 102 vf->flags |= BNXT_VF_SPOOFCHK; 103 else 104 vf->flags &= ~BNXT_VF_SPOOFCHK; 105 } 106 return rc; 107 } 108 109 static int bnxt_hwrm_func_qcfg_flags(struct bnxt *bp, struct bnxt_vf_info *vf) 110 { 111 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; 112 struct hwrm_func_qcfg_input req = {0}; 113 int rc; 114 115 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1); 116 req.fid = cpu_to_le16(vf->fw_fid); 117 mutex_lock(&bp->hwrm_cmd_lock); 118 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 119 if (rc) { 120 mutex_unlock(&bp->hwrm_cmd_lock); 121 return rc; 122 } 123 vf->func_qcfg_flags = le16_to_cpu(resp->flags); 124 mutex_unlock(&bp->hwrm_cmd_lock); 125 return 0; 126 } 127 128 static bool bnxt_is_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf) 129 { 130 if (!(bp->fw_cap & BNXT_FW_CAP_TRUSTED_VF)) 131 return !!(vf->flags & BNXT_VF_TRUST); 132 133 bnxt_hwrm_func_qcfg_flags(bp, vf); 134 return !!(vf->func_qcfg_flags & FUNC_QCFG_RESP_FLAGS_TRUSTED_VF); 135 } 136 137 static int bnxt_hwrm_set_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf) 138 { 139 struct hwrm_func_cfg_input req = {0}; 140 141 if (!(bp->fw_cap & BNXT_FW_CAP_TRUSTED_VF)) 142 return 0; 143 144 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 145 req.fid = cpu_to_le16(vf->fw_fid); 146 if (vf->flags & BNXT_VF_TRUST) 147 req.flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE); 148 else 149 req.flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_DISABLE); 150 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 151 } 152 153 int bnxt_set_vf_trust(struct net_device *dev, int vf_id, bool trusted) 154 { 155 struct bnxt *bp = netdev_priv(dev); 156 struct bnxt_vf_info *vf; 157 158 if (bnxt_vf_ndo_prep(bp, vf_id)) 159 return -EINVAL; 160 161 vf = &bp->pf.vf[vf_id]; 162 if (trusted) 163 vf->flags |= BNXT_VF_TRUST; 164 else 165 vf->flags &= ~BNXT_VF_TRUST; 166 167 bnxt_hwrm_set_trusted_vf(bp, vf); 168 return 0; 169 } 170 171 int bnxt_get_vf_config(struct net_device *dev, int vf_id, 172 struct ifla_vf_info *ivi) 173 { 174 struct bnxt *bp = netdev_priv(dev); 175 struct bnxt_vf_info *vf; 176 int rc; 177 178 rc = bnxt_vf_ndo_prep(bp, vf_id); 179 if (rc) 180 return rc; 181 182 ivi->vf = vf_id; 183 vf = &bp->pf.vf[vf_id]; 184 185 if (is_valid_ether_addr(vf->mac_addr)) 186 memcpy(&ivi->mac, vf->mac_addr, ETH_ALEN); 187 else 188 memcpy(&ivi->mac, vf->vf_mac_addr, ETH_ALEN); 189 ivi->max_tx_rate = vf->max_tx_rate; 190 ivi->min_tx_rate = vf->min_tx_rate; 191 ivi->vlan = vf->vlan; 192 if (vf->flags & BNXT_VF_QOS) 193 ivi->qos = vf->vlan >> VLAN_PRIO_SHIFT; 194 else 195 ivi->qos = 0; 196 ivi->spoofchk = !!(vf->flags & BNXT_VF_SPOOFCHK); 197 ivi->trusted = bnxt_is_trusted_vf(bp, vf); 198 if (!(vf->flags & BNXT_VF_LINK_FORCED)) 199 ivi->linkstate = IFLA_VF_LINK_STATE_AUTO; 200 else if (vf->flags & BNXT_VF_LINK_UP) 201 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE; 202 else 203 ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE; 204 205 return 0; 206 } 207 208 int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac) 209 { 210 struct hwrm_func_cfg_input req = {0}; 211 struct bnxt *bp = netdev_priv(dev); 212 struct bnxt_vf_info *vf; 213 int rc; 214 215 rc = bnxt_vf_ndo_prep(bp, vf_id); 216 if (rc) 217 return rc; 218 /* reject bc or mc mac addr, zero mac addr means allow 219 * VF to use its own mac addr 220 */ 221 if (is_multicast_ether_addr(mac)) { 222 netdev_err(dev, "Invalid VF ethernet address\n"); 223 return -EINVAL; 224 } 225 vf = &bp->pf.vf[vf_id]; 226 227 memcpy(vf->mac_addr, mac, ETH_ALEN); 228 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 229 req.fid = cpu_to_le16(vf->fw_fid); 230 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR); 231 memcpy(req.dflt_mac_addr, mac, ETH_ALEN); 232 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 233 } 234 235 int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos, 236 __be16 vlan_proto) 237 { 238 struct hwrm_func_cfg_input req = {0}; 239 struct bnxt *bp = netdev_priv(dev); 240 struct bnxt_vf_info *vf; 241 u16 vlan_tag; 242 int rc; 243 244 if (bp->hwrm_spec_code < 0x10201) 245 return -ENOTSUPP; 246 247 if (vlan_proto != htons(ETH_P_8021Q)) 248 return -EPROTONOSUPPORT; 249 250 rc = bnxt_vf_ndo_prep(bp, vf_id); 251 if (rc) 252 return rc; 253 254 /* TODO: needed to implement proper handling of user priority, 255 * currently fail the command if there is valid priority 256 */ 257 if (vlan_id > 4095 || qos) 258 return -EINVAL; 259 260 vf = &bp->pf.vf[vf_id]; 261 vlan_tag = vlan_id; 262 if (vlan_tag == vf->vlan) 263 return 0; 264 265 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 266 req.fid = cpu_to_le16(vf->fw_fid); 267 req.dflt_vlan = cpu_to_le16(vlan_tag); 268 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN); 269 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 270 if (!rc) 271 vf->vlan = vlan_tag; 272 return rc; 273 } 274 275 int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate, 276 int max_tx_rate) 277 { 278 struct hwrm_func_cfg_input req = {0}; 279 struct bnxt *bp = netdev_priv(dev); 280 struct bnxt_vf_info *vf; 281 u32 pf_link_speed; 282 int rc; 283 284 rc = bnxt_vf_ndo_prep(bp, vf_id); 285 if (rc) 286 return rc; 287 288 vf = &bp->pf.vf[vf_id]; 289 pf_link_speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed); 290 if (max_tx_rate > pf_link_speed) { 291 netdev_info(bp->dev, "max tx rate %d exceed PF link speed for VF %d\n", 292 max_tx_rate, vf_id); 293 return -EINVAL; 294 } 295 296 if (min_tx_rate > pf_link_speed || min_tx_rate > max_tx_rate) { 297 netdev_info(bp->dev, "min tx rate %d is invalid for VF %d\n", 298 min_tx_rate, vf_id); 299 return -EINVAL; 300 } 301 if (min_tx_rate == vf->min_tx_rate && max_tx_rate == vf->max_tx_rate) 302 return 0; 303 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 304 req.fid = cpu_to_le16(vf->fw_fid); 305 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW); 306 req.max_bw = cpu_to_le32(max_tx_rate); 307 req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW); 308 req.min_bw = cpu_to_le32(min_tx_rate); 309 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 310 if (!rc) { 311 vf->min_tx_rate = min_tx_rate; 312 vf->max_tx_rate = max_tx_rate; 313 } 314 return rc; 315 } 316 317 int bnxt_set_vf_link_state(struct net_device *dev, int vf_id, int link) 318 { 319 struct bnxt *bp = netdev_priv(dev); 320 struct bnxt_vf_info *vf; 321 int rc; 322 323 rc = bnxt_vf_ndo_prep(bp, vf_id); 324 if (rc) 325 return rc; 326 327 vf = &bp->pf.vf[vf_id]; 328 329 vf->flags &= ~(BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED); 330 switch (link) { 331 case IFLA_VF_LINK_STATE_AUTO: 332 vf->flags |= BNXT_VF_LINK_UP; 333 break; 334 case IFLA_VF_LINK_STATE_DISABLE: 335 vf->flags |= BNXT_VF_LINK_FORCED; 336 break; 337 case IFLA_VF_LINK_STATE_ENABLE: 338 vf->flags |= BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED; 339 break; 340 default: 341 netdev_err(bp->dev, "Invalid link option\n"); 342 rc = -EINVAL; 343 break; 344 } 345 if (vf->flags & (BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED)) 346 rc = bnxt_hwrm_fwd_async_event_cmpl(bp, vf, 347 ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE); 348 return rc; 349 } 350 351 static int bnxt_set_vf_attr(struct bnxt *bp, int num_vfs) 352 { 353 int i; 354 struct bnxt_vf_info *vf; 355 356 for (i = 0; i < num_vfs; i++) { 357 vf = &bp->pf.vf[i]; 358 memset(vf, 0, sizeof(*vf)); 359 } 360 return 0; 361 } 362 363 static int bnxt_hwrm_func_vf_resource_free(struct bnxt *bp, int num_vfs) 364 { 365 int i, rc = 0; 366 struct bnxt_pf_info *pf = &bp->pf; 367 struct hwrm_func_vf_resc_free_input req = {0}; 368 369 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESC_FREE, -1, -1); 370 371 mutex_lock(&bp->hwrm_cmd_lock); 372 for (i = pf->first_vf_id; i < pf->first_vf_id + num_vfs; i++) { 373 req.vf_id = cpu_to_le16(i); 374 rc = _hwrm_send_message(bp, &req, sizeof(req), 375 HWRM_CMD_TIMEOUT); 376 if (rc) 377 break; 378 } 379 mutex_unlock(&bp->hwrm_cmd_lock); 380 return rc; 381 } 382 383 static void bnxt_free_vf_resources(struct bnxt *bp) 384 { 385 struct pci_dev *pdev = bp->pdev; 386 int i; 387 388 kfree(bp->pf.vf_event_bmap); 389 bp->pf.vf_event_bmap = NULL; 390 391 for (i = 0; i < 4; i++) { 392 if (bp->pf.hwrm_cmd_req_addr[i]) { 393 dma_free_coherent(&pdev->dev, BNXT_PAGE_SIZE, 394 bp->pf.hwrm_cmd_req_addr[i], 395 bp->pf.hwrm_cmd_req_dma_addr[i]); 396 bp->pf.hwrm_cmd_req_addr[i] = NULL; 397 } 398 } 399 400 bp->pf.active_vfs = 0; 401 kfree(bp->pf.vf); 402 bp->pf.vf = NULL; 403 } 404 405 static int bnxt_alloc_vf_resources(struct bnxt *bp, int num_vfs) 406 { 407 struct pci_dev *pdev = bp->pdev; 408 u32 nr_pages, size, i, j, k = 0; 409 410 bp->pf.vf = kcalloc(num_vfs, sizeof(struct bnxt_vf_info), GFP_KERNEL); 411 if (!bp->pf.vf) 412 return -ENOMEM; 413 414 bnxt_set_vf_attr(bp, num_vfs); 415 416 size = num_vfs * BNXT_HWRM_REQ_MAX_SIZE; 417 nr_pages = size / BNXT_PAGE_SIZE; 418 if (size & (BNXT_PAGE_SIZE - 1)) 419 nr_pages++; 420 421 for (i = 0; i < nr_pages; i++) { 422 bp->pf.hwrm_cmd_req_addr[i] = 423 dma_alloc_coherent(&pdev->dev, BNXT_PAGE_SIZE, 424 &bp->pf.hwrm_cmd_req_dma_addr[i], 425 GFP_KERNEL); 426 427 if (!bp->pf.hwrm_cmd_req_addr[i]) 428 return -ENOMEM; 429 430 for (j = 0; j < BNXT_HWRM_REQS_PER_PAGE && k < num_vfs; j++) { 431 struct bnxt_vf_info *vf = &bp->pf.vf[k]; 432 433 vf->hwrm_cmd_req_addr = bp->pf.hwrm_cmd_req_addr[i] + 434 j * BNXT_HWRM_REQ_MAX_SIZE; 435 vf->hwrm_cmd_req_dma_addr = 436 bp->pf.hwrm_cmd_req_dma_addr[i] + j * 437 BNXT_HWRM_REQ_MAX_SIZE; 438 k++; 439 } 440 } 441 442 /* Max 128 VF's */ 443 bp->pf.vf_event_bmap = kzalloc(16, GFP_KERNEL); 444 if (!bp->pf.vf_event_bmap) 445 return -ENOMEM; 446 447 bp->pf.hwrm_cmd_req_pages = nr_pages; 448 return 0; 449 } 450 451 static int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp) 452 { 453 struct hwrm_func_buf_rgtr_input req = {0}; 454 455 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BUF_RGTR, -1, -1); 456 457 req.req_buf_num_pages = cpu_to_le16(bp->pf.hwrm_cmd_req_pages); 458 req.req_buf_page_size = cpu_to_le16(BNXT_PAGE_SHIFT); 459 req.req_buf_len = cpu_to_le16(BNXT_HWRM_REQ_MAX_SIZE); 460 req.req_buf_page_addr0 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[0]); 461 req.req_buf_page_addr1 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[1]); 462 req.req_buf_page_addr2 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[2]); 463 req.req_buf_page_addr3 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[3]); 464 465 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 466 } 467 468 /* Caller holds bp->hwrm_cmd_lock mutex lock */ 469 static void __bnxt_set_vf_params(struct bnxt *bp, int vf_id) 470 { 471 struct hwrm_func_cfg_input req = {0}; 472 struct bnxt_vf_info *vf; 473 474 vf = &bp->pf.vf[vf_id]; 475 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 476 req.fid = cpu_to_le16(vf->fw_fid); 477 478 if (is_valid_ether_addr(vf->mac_addr)) { 479 req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR); 480 memcpy(req.dflt_mac_addr, vf->mac_addr, ETH_ALEN); 481 } 482 if (vf->vlan) { 483 req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN); 484 req.dflt_vlan = cpu_to_le16(vf->vlan); 485 } 486 if (vf->max_tx_rate) { 487 req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW); 488 req.max_bw = cpu_to_le32(vf->max_tx_rate); 489 #ifdef HAVE_IFLA_TX_RATE 490 req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW); 491 req.min_bw = cpu_to_le32(vf->min_tx_rate); 492 #endif 493 } 494 if (vf->flags & BNXT_VF_TRUST) 495 req.flags |= cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE); 496 497 _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 498 } 499 500 /* Only called by PF to reserve resources for VFs, returns actual number of 501 * VFs configured, or < 0 on error. 502 */ 503 static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset) 504 { 505 struct hwrm_func_vf_resource_cfg_input req = {0}; 506 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 507 u16 vf_tx_rings, vf_rx_rings, vf_cp_rings; 508 u16 vf_stat_ctx, vf_vnics, vf_ring_grps; 509 struct bnxt_pf_info *pf = &bp->pf; 510 int i, rc = 0, min = 1; 511 u16 vf_msix = 0; 512 u16 vf_rss; 513 514 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESOURCE_CFG, -1, -1); 515 516 if (bp->flags & BNXT_FLAG_CHIP_P5) { 517 vf_msix = hw_resc->max_nqs - bnxt_nq_rings_in_use(bp); 518 vf_ring_grps = 0; 519 } else { 520 vf_ring_grps = hw_resc->max_hw_ring_grps - bp->rx_nr_rings; 521 } 522 vf_cp_rings = bnxt_get_avail_cp_rings_for_en(bp); 523 vf_stat_ctx = bnxt_get_avail_stat_ctxs_for_en(bp); 524 if (bp->flags & BNXT_FLAG_AGG_RINGS) 525 vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings * 2; 526 else 527 vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings; 528 vf_tx_rings = hw_resc->max_tx_rings - bp->tx_nr_rings; 529 vf_vnics = hw_resc->max_vnics - bp->nr_vnics; 530 vf_vnics = min_t(u16, vf_vnics, vf_rx_rings); 531 vf_rss = hw_resc->max_rsscos_ctxs - bp->rsscos_nr_ctxs; 532 533 req.min_rsscos_ctx = cpu_to_le16(BNXT_VF_MIN_RSS_CTX); 534 if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) { 535 min = 0; 536 req.min_rsscos_ctx = cpu_to_le16(min); 537 } 538 if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL || 539 pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) { 540 req.min_cmpl_rings = cpu_to_le16(min); 541 req.min_tx_rings = cpu_to_le16(min); 542 req.min_rx_rings = cpu_to_le16(min); 543 req.min_l2_ctxs = cpu_to_le16(min); 544 req.min_vnics = cpu_to_le16(min); 545 req.min_stat_ctx = cpu_to_le16(min); 546 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) 547 req.min_hw_ring_grps = cpu_to_le16(min); 548 } else { 549 vf_cp_rings /= num_vfs; 550 vf_tx_rings /= num_vfs; 551 vf_rx_rings /= num_vfs; 552 vf_vnics /= num_vfs; 553 vf_stat_ctx /= num_vfs; 554 vf_ring_grps /= num_vfs; 555 vf_rss /= num_vfs; 556 557 req.min_cmpl_rings = cpu_to_le16(vf_cp_rings); 558 req.min_tx_rings = cpu_to_le16(vf_tx_rings); 559 req.min_rx_rings = cpu_to_le16(vf_rx_rings); 560 req.min_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX); 561 req.min_vnics = cpu_to_le16(vf_vnics); 562 req.min_stat_ctx = cpu_to_le16(vf_stat_ctx); 563 req.min_hw_ring_grps = cpu_to_le16(vf_ring_grps); 564 req.min_rsscos_ctx = cpu_to_le16(vf_rss); 565 } 566 req.max_cmpl_rings = cpu_to_le16(vf_cp_rings); 567 req.max_tx_rings = cpu_to_le16(vf_tx_rings); 568 req.max_rx_rings = cpu_to_le16(vf_rx_rings); 569 req.max_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX); 570 req.max_vnics = cpu_to_le16(vf_vnics); 571 req.max_stat_ctx = cpu_to_le16(vf_stat_ctx); 572 req.max_hw_ring_grps = cpu_to_le16(vf_ring_grps); 573 req.max_rsscos_ctx = cpu_to_le16(vf_rss); 574 if (bp->flags & BNXT_FLAG_CHIP_P5) 575 req.max_msix = cpu_to_le16(vf_msix / num_vfs); 576 577 mutex_lock(&bp->hwrm_cmd_lock); 578 for (i = 0; i < num_vfs; i++) { 579 if (reset) 580 __bnxt_set_vf_params(bp, i); 581 582 req.vf_id = cpu_to_le16(pf->first_vf_id + i); 583 rc = _hwrm_send_message(bp, &req, sizeof(req), 584 HWRM_CMD_TIMEOUT); 585 if (rc) 586 break; 587 pf->active_vfs = i + 1; 588 pf->vf[i].fw_fid = pf->first_vf_id + i; 589 } 590 mutex_unlock(&bp->hwrm_cmd_lock); 591 if (pf->active_vfs) { 592 u16 n = pf->active_vfs; 593 594 hw_resc->max_tx_rings -= le16_to_cpu(req.min_tx_rings) * n; 595 hw_resc->max_rx_rings -= le16_to_cpu(req.min_rx_rings) * n; 596 hw_resc->max_hw_ring_grps -= le16_to_cpu(req.min_hw_ring_grps) * 597 n; 598 hw_resc->max_cp_rings -= le16_to_cpu(req.min_cmpl_rings) * n; 599 hw_resc->max_rsscos_ctxs -= le16_to_cpu(req.min_rsscos_ctx) * n; 600 hw_resc->max_stat_ctxs -= le16_to_cpu(req.min_stat_ctx) * n; 601 hw_resc->max_vnics -= le16_to_cpu(req.min_vnics) * n; 602 if (bp->flags & BNXT_FLAG_CHIP_P5) 603 hw_resc->max_irqs -= vf_msix * n; 604 605 rc = pf->active_vfs; 606 } 607 return rc; 608 } 609 610 /* Only called by PF to reserve resources for VFs, returns actual number of 611 * VFs configured, or < 0 on error. 612 */ 613 static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs) 614 { 615 u32 rc = 0, mtu, i; 616 u16 vf_tx_rings, vf_rx_rings, vf_cp_rings, vf_stat_ctx, vf_vnics; 617 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 618 struct hwrm_func_cfg_input req = {0}; 619 struct bnxt_pf_info *pf = &bp->pf; 620 int total_vf_tx_rings = 0; 621 u16 vf_ring_grps; 622 623 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 624 625 /* Remaining rings are distributed equally amongs VF's for now */ 626 vf_cp_rings = bnxt_get_avail_cp_rings_for_en(bp) / num_vfs; 627 vf_stat_ctx = bnxt_get_avail_stat_ctxs_for_en(bp) / num_vfs; 628 if (bp->flags & BNXT_FLAG_AGG_RINGS) 629 vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings * 2) / 630 num_vfs; 631 else 632 vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings) / 633 num_vfs; 634 vf_ring_grps = (hw_resc->max_hw_ring_grps - bp->rx_nr_rings) / num_vfs; 635 vf_tx_rings = (hw_resc->max_tx_rings - bp->tx_nr_rings) / num_vfs; 636 vf_vnics = (hw_resc->max_vnics - bp->nr_vnics) / num_vfs; 637 vf_vnics = min_t(u16, vf_vnics, vf_rx_rings); 638 639 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MTU | 640 FUNC_CFG_REQ_ENABLES_MRU | 641 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS | 642 FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS | 643 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS | 644 FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS | 645 FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS | 646 FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS | 647 FUNC_CFG_REQ_ENABLES_NUM_VNICS | 648 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS); 649 650 mtu = bp->dev->mtu + ETH_HLEN + VLAN_HLEN; 651 req.mru = cpu_to_le16(mtu); 652 req.mtu = cpu_to_le16(mtu); 653 654 req.num_rsscos_ctxs = cpu_to_le16(1); 655 req.num_cmpl_rings = cpu_to_le16(vf_cp_rings); 656 req.num_tx_rings = cpu_to_le16(vf_tx_rings); 657 req.num_rx_rings = cpu_to_le16(vf_rx_rings); 658 req.num_hw_ring_grps = cpu_to_le16(vf_ring_grps); 659 req.num_l2_ctxs = cpu_to_le16(4); 660 661 req.num_vnics = cpu_to_le16(vf_vnics); 662 /* FIXME spec currently uses 1 bit for stats ctx */ 663 req.num_stat_ctxs = cpu_to_le16(vf_stat_ctx); 664 665 mutex_lock(&bp->hwrm_cmd_lock); 666 for (i = 0; i < num_vfs; i++) { 667 int vf_tx_rsvd = vf_tx_rings; 668 669 req.fid = cpu_to_le16(pf->first_vf_id + i); 670 rc = _hwrm_send_message(bp, &req, sizeof(req), 671 HWRM_CMD_TIMEOUT); 672 if (rc) 673 break; 674 pf->active_vfs = i + 1; 675 pf->vf[i].fw_fid = le16_to_cpu(req.fid); 676 rc = __bnxt_hwrm_get_tx_rings(bp, pf->vf[i].fw_fid, 677 &vf_tx_rsvd); 678 if (rc) 679 break; 680 total_vf_tx_rings += vf_tx_rsvd; 681 } 682 mutex_unlock(&bp->hwrm_cmd_lock); 683 if (pf->active_vfs) { 684 hw_resc->max_tx_rings -= total_vf_tx_rings; 685 hw_resc->max_rx_rings -= vf_rx_rings * num_vfs; 686 hw_resc->max_hw_ring_grps -= vf_ring_grps * num_vfs; 687 hw_resc->max_cp_rings -= vf_cp_rings * num_vfs; 688 hw_resc->max_rsscos_ctxs -= num_vfs; 689 hw_resc->max_stat_ctxs -= vf_stat_ctx * num_vfs; 690 hw_resc->max_vnics -= vf_vnics * num_vfs; 691 rc = pf->active_vfs; 692 } 693 return rc; 694 } 695 696 static int bnxt_func_cfg(struct bnxt *bp, int num_vfs, bool reset) 697 { 698 if (BNXT_NEW_RM(bp)) 699 return bnxt_hwrm_func_vf_resc_cfg(bp, num_vfs, reset); 700 else 701 return bnxt_hwrm_func_cfg(bp, num_vfs); 702 } 703 704 int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset) 705 { 706 int rc; 707 708 /* Register buffers for VFs */ 709 rc = bnxt_hwrm_func_buf_rgtr(bp); 710 if (rc) 711 return rc; 712 713 /* Reserve resources for VFs */ 714 rc = bnxt_func_cfg(bp, *num_vfs, reset); 715 if (rc != *num_vfs) { 716 if (rc <= 0) { 717 netdev_warn(bp->dev, "Unable to reserve resources for SRIOV.\n"); 718 *num_vfs = 0; 719 return rc; 720 } 721 netdev_warn(bp->dev, "Only able to reserve resources for %d VFs.\n", 722 rc); 723 *num_vfs = rc; 724 } 725 726 bnxt_ulp_sriov_cfg(bp, *num_vfs); 727 return 0; 728 } 729 730 static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs) 731 { 732 int rc = 0, vfs_supported; 733 int min_rx_rings, min_tx_rings, min_rss_ctxs; 734 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 735 int tx_ok = 0, rx_ok = 0, rss_ok = 0; 736 int avail_cp, avail_stat; 737 738 /* Check if we can enable requested num of vf's. At a mininum 739 * we require 1 RX 1 TX rings for each VF. In this minimum conf 740 * features like TPA will not be available. 741 */ 742 vfs_supported = *num_vfs; 743 744 avail_cp = bnxt_get_avail_cp_rings_for_en(bp); 745 avail_stat = bnxt_get_avail_stat_ctxs_for_en(bp); 746 avail_cp = min_t(int, avail_cp, avail_stat); 747 748 while (vfs_supported) { 749 min_rx_rings = vfs_supported; 750 min_tx_rings = vfs_supported; 751 min_rss_ctxs = vfs_supported; 752 753 if (bp->flags & BNXT_FLAG_AGG_RINGS) { 754 if (hw_resc->max_rx_rings - bp->rx_nr_rings * 2 >= 755 min_rx_rings) 756 rx_ok = 1; 757 } else { 758 if (hw_resc->max_rx_rings - bp->rx_nr_rings >= 759 min_rx_rings) 760 rx_ok = 1; 761 } 762 if (hw_resc->max_vnics - bp->nr_vnics < min_rx_rings || 763 avail_cp < min_rx_rings) 764 rx_ok = 0; 765 766 if (hw_resc->max_tx_rings - bp->tx_nr_rings >= min_tx_rings && 767 avail_cp >= min_tx_rings) 768 tx_ok = 1; 769 770 if (hw_resc->max_rsscos_ctxs - bp->rsscos_nr_ctxs >= 771 min_rss_ctxs) 772 rss_ok = 1; 773 774 if (tx_ok && rx_ok && rss_ok) 775 break; 776 777 vfs_supported--; 778 } 779 780 if (!vfs_supported) { 781 netdev_err(bp->dev, "Cannot enable VF's as all resources are used by PF\n"); 782 return -EINVAL; 783 } 784 785 if (vfs_supported != *num_vfs) { 786 netdev_info(bp->dev, "Requested VFs %d, can enable %d\n", 787 *num_vfs, vfs_supported); 788 *num_vfs = vfs_supported; 789 } 790 791 rc = bnxt_alloc_vf_resources(bp, *num_vfs); 792 if (rc) 793 goto err_out1; 794 795 rc = bnxt_cfg_hw_sriov(bp, num_vfs, false); 796 if (rc) 797 goto err_out2; 798 799 rc = pci_enable_sriov(bp->pdev, *num_vfs); 800 if (rc) 801 goto err_out2; 802 803 return 0; 804 805 err_out2: 806 /* Free the resources reserved for various VF's */ 807 bnxt_hwrm_func_vf_resource_free(bp, *num_vfs); 808 809 err_out1: 810 bnxt_free_vf_resources(bp); 811 812 return rc; 813 } 814 815 void bnxt_sriov_disable(struct bnxt *bp) 816 { 817 u16 num_vfs = pci_num_vf(bp->pdev); 818 819 if (!num_vfs) 820 return; 821 822 /* synchronize VF and VF-rep create and destroy */ 823 mutex_lock(&bp->sriov_lock); 824 bnxt_vf_reps_destroy(bp); 825 826 if (pci_vfs_assigned(bp->pdev)) { 827 bnxt_hwrm_fwd_async_event_cmpl( 828 bp, NULL, ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD); 829 netdev_warn(bp->dev, "Unable to free %d VFs because some are assigned to VMs.\n", 830 num_vfs); 831 } else { 832 pci_disable_sriov(bp->pdev); 833 /* Free the HW resources reserved for various VF's */ 834 bnxt_hwrm_func_vf_resource_free(bp, num_vfs); 835 } 836 mutex_unlock(&bp->sriov_lock); 837 838 bnxt_free_vf_resources(bp); 839 840 /* Reclaim all resources for the PF. */ 841 rtnl_lock(); 842 bnxt_restore_pf_fw_resources(bp); 843 rtnl_unlock(); 844 845 bnxt_ulp_sriov_cfg(bp, 0); 846 } 847 848 int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs) 849 { 850 struct net_device *dev = pci_get_drvdata(pdev); 851 struct bnxt *bp = netdev_priv(dev); 852 853 if (!(bp->flags & BNXT_FLAG_USING_MSIX)) { 854 netdev_warn(dev, "Not allow SRIOV if the irq mode is not MSIX\n"); 855 return 0; 856 } 857 858 rtnl_lock(); 859 if (!netif_running(dev)) { 860 netdev_warn(dev, "Reject SRIOV config request since if is down!\n"); 861 rtnl_unlock(); 862 return 0; 863 } 864 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { 865 netdev_warn(dev, "Reject SRIOV config request when FW reset is in progress\n"); 866 rtnl_unlock(); 867 return 0; 868 } 869 bp->sriov_cfg = true; 870 rtnl_unlock(); 871 872 if (pci_vfs_assigned(bp->pdev)) { 873 netdev_warn(dev, "Unable to configure SRIOV since some VFs are assigned to VMs.\n"); 874 num_vfs = 0; 875 goto sriov_cfg_exit; 876 } 877 878 /* Check if enabled VFs is same as requested */ 879 if (num_vfs && num_vfs == bp->pf.active_vfs) 880 goto sriov_cfg_exit; 881 882 /* if there are previous existing VFs, clean them up */ 883 bnxt_sriov_disable(bp); 884 if (!num_vfs) 885 goto sriov_cfg_exit; 886 887 bnxt_sriov_enable(bp, &num_vfs); 888 889 sriov_cfg_exit: 890 bp->sriov_cfg = false; 891 wake_up(&bp->sriov_cfg_wait); 892 893 return num_vfs; 894 } 895 896 static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf, 897 void *encap_resp, __le64 encap_resp_addr, 898 __le16 encap_resp_cpr, u32 msg_size) 899 { 900 int rc = 0; 901 struct hwrm_fwd_resp_input req = {0}; 902 903 if (BNXT_FWD_RESP_SIZE_ERR(msg_size)) 904 return -EINVAL; 905 906 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_RESP, -1, -1); 907 908 /* Set the new target id */ 909 req.target_id = cpu_to_le16(vf->fw_fid); 910 req.encap_resp_target_id = cpu_to_le16(vf->fw_fid); 911 req.encap_resp_len = cpu_to_le16(msg_size); 912 req.encap_resp_addr = encap_resp_addr; 913 req.encap_resp_cmpl_ring = encap_resp_cpr; 914 memcpy(req.encap_resp, encap_resp, msg_size); 915 916 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 917 if (rc) 918 netdev_err(bp->dev, "hwrm_fwd_resp failed. rc:%d\n", rc); 919 return rc; 920 } 921 922 static int bnxt_hwrm_fwd_err_resp(struct bnxt *bp, struct bnxt_vf_info *vf, 923 u32 msg_size) 924 { 925 int rc = 0; 926 struct hwrm_reject_fwd_resp_input req = {0}; 927 928 if (BNXT_REJ_FWD_RESP_SIZE_ERR(msg_size)) 929 return -EINVAL; 930 931 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_REJECT_FWD_RESP, -1, -1); 932 /* Set the new target id */ 933 req.target_id = cpu_to_le16(vf->fw_fid); 934 req.encap_resp_target_id = cpu_to_le16(vf->fw_fid); 935 memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size); 936 937 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 938 if (rc) 939 netdev_err(bp->dev, "hwrm_fwd_err_resp failed. rc:%d\n", rc); 940 return rc; 941 } 942 943 static int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf, 944 u32 msg_size) 945 { 946 int rc = 0; 947 struct hwrm_exec_fwd_resp_input req = {0}; 948 949 if (BNXT_EXEC_FWD_RESP_SIZE_ERR(msg_size)) 950 return -EINVAL; 951 952 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_EXEC_FWD_RESP, -1, -1); 953 /* Set the new target id */ 954 req.target_id = cpu_to_le16(vf->fw_fid); 955 req.encap_resp_target_id = cpu_to_le16(vf->fw_fid); 956 memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size); 957 958 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 959 if (rc) 960 netdev_err(bp->dev, "hwrm_exec_fw_resp failed. rc:%d\n", rc); 961 return rc; 962 } 963 964 static int bnxt_vf_configure_mac(struct bnxt *bp, struct bnxt_vf_info *vf) 965 { 966 u32 msg_size = sizeof(struct hwrm_func_vf_cfg_input); 967 struct hwrm_func_vf_cfg_input *req = 968 (struct hwrm_func_vf_cfg_input *)vf->hwrm_cmd_req_addr; 969 970 /* Allow VF to set a valid MAC address, if trust is set to on or 971 * if the PF assigned MAC address is zero 972 */ 973 if (req->enables & cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR)) { 974 bool trust = bnxt_is_trusted_vf(bp, vf); 975 976 if (is_valid_ether_addr(req->dflt_mac_addr) && 977 (trust || !is_valid_ether_addr(vf->mac_addr) || 978 ether_addr_equal(req->dflt_mac_addr, vf->mac_addr))) { 979 ether_addr_copy(vf->vf_mac_addr, req->dflt_mac_addr); 980 return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size); 981 } 982 return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size); 983 } 984 return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size); 985 } 986 987 static int bnxt_vf_validate_set_mac(struct bnxt *bp, struct bnxt_vf_info *vf) 988 { 989 u32 msg_size = sizeof(struct hwrm_cfa_l2_filter_alloc_input); 990 struct hwrm_cfa_l2_filter_alloc_input *req = 991 (struct hwrm_cfa_l2_filter_alloc_input *)vf->hwrm_cmd_req_addr; 992 bool mac_ok = false; 993 994 if (!is_valid_ether_addr((const u8 *)req->l2_addr)) 995 return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size); 996 997 /* Allow VF to set a valid MAC address, if trust is set to on. 998 * Or VF MAC address must first match MAC address in PF's context. 999 * Otherwise, it must match the VF MAC address if firmware spec >= 1000 * 1.2.2 1001 */ 1002 if (bnxt_is_trusted_vf(bp, vf)) { 1003 mac_ok = true; 1004 } else if (is_valid_ether_addr(vf->mac_addr)) { 1005 if (ether_addr_equal((const u8 *)req->l2_addr, vf->mac_addr)) 1006 mac_ok = true; 1007 } else if (is_valid_ether_addr(vf->vf_mac_addr)) { 1008 if (ether_addr_equal((const u8 *)req->l2_addr, vf->vf_mac_addr)) 1009 mac_ok = true; 1010 } else { 1011 /* There are two cases: 1012 * 1.If firmware spec < 0x10202,VF MAC address is not forwarded 1013 * to the PF and so it doesn't have to match 1014 * 2.Allow VF to modify it's own MAC when PF has not assigned a 1015 * valid MAC address and firmware spec >= 0x10202 1016 */ 1017 mac_ok = true; 1018 } 1019 if (mac_ok) 1020 return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size); 1021 return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size); 1022 } 1023 1024 static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf) 1025 { 1026 int rc = 0; 1027 1028 if (!(vf->flags & BNXT_VF_LINK_FORCED)) { 1029 /* real link */ 1030 rc = bnxt_hwrm_exec_fwd_resp( 1031 bp, vf, sizeof(struct hwrm_port_phy_qcfg_input)); 1032 } else { 1033 struct hwrm_port_phy_qcfg_output phy_qcfg_resp = {0}; 1034 struct hwrm_port_phy_qcfg_input *phy_qcfg_req; 1035 1036 phy_qcfg_req = 1037 (struct hwrm_port_phy_qcfg_input *)vf->hwrm_cmd_req_addr; 1038 mutex_lock(&bp->hwrm_cmd_lock); 1039 memcpy(&phy_qcfg_resp, &bp->link_info.phy_qcfg_resp, 1040 sizeof(phy_qcfg_resp)); 1041 mutex_unlock(&bp->hwrm_cmd_lock); 1042 phy_qcfg_resp.resp_len = cpu_to_le16(sizeof(phy_qcfg_resp)); 1043 phy_qcfg_resp.seq_id = phy_qcfg_req->seq_id; 1044 phy_qcfg_resp.valid = 1; 1045 1046 if (vf->flags & BNXT_VF_LINK_UP) { 1047 /* if physical link is down, force link up on VF */ 1048 if (phy_qcfg_resp.link != 1049 PORT_PHY_QCFG_RESP_LINK_LINK) { 1050 phy_qcfg_resp.link = 1051 PORT_PHY_QCFG_RESP_LINK_LINK; 1052 phy_qcfg_resp.link_speed = cpu_to_le16( 1053 PORT_PHY_QCFG_RESP_LINK_SPEED_10GB); 1054 phy_qcfg_resp.duplex_cfg = 1055 PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL; 1056 phy_qcfg_resp.duplex_state = 1057 PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL; 1058 phy_qcfg_resp.pause = 1059 (PORT_PHY_QCFG_RESP_PAUSE_TX | 1060 PORT_PHY_QCFG_RESP_PAUSE_RX); 1061 } 1062 } else { 1063 /* force link down */ 1064 phy_qcfg_resp.link = PORT_PHY_QCFG_RESP_LINK_NO_LINK; 1065 phy_qcfg_resp.link_speed = 0; 1066 phy_qcfg_resp.duplex_state = 1067 PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF; 1068 phy_qcfg_resp.pause = 0; 1069 } 1070 rc = bnxt_hwrm_fwd_resp(bp, vf, &phy_qcfg_resp, 1071 phy_qcfg_req->resp_addr, 1072 phy_qcfg_req->cmpl_ring, 1073 sizeof(phy_qcfg_resp)); 1074 } 1075 return rc; 1076 } 1077 1078 static int bnxt_vf_req_validate_snd(struct bnxt *bp, struct bnxt_vf_info *vf) 1079 { 1080 int rc = 0; 1081 struct input *encap_req = vf->hwrm_cmd_req_addr; 1082 u32 req_type = le16_to_cpu(encap_req->req_type); 1083 1084 switch (req_type) { 1085 case HWRM_FUNC_VF_CFG: 1086 rc = bnxt_vf_configure_mac(bp, vf); 1087 break; 1088 case HWRM_CFA_L2_FILTER_ALLOC: 1089 rc = bnxt_vf_validate_set_mac(bp, vf); 1090 break; 1091 case HWRM_FUNC_CFG: 1092 /* TODO Validate if VF is allowed to change mac address, 1093 * mtu, num of rings etc 1094 */ 1095 rc = bnxt_hwrm_exec_fwd_resp( 1096 bp, vf, sizeof(struct hwrm_func_cfg_input)); 1097 break; 1098 case HWRM_PORT_PHY_QCFG: 1099 rc = bnxt_vf_set_link(bp, vf); 1100 break; 1101 default: 1102 break; 1103 } 1104 return rc; 1105 } 1106 1107 void bnxt_hwrm_exec_fwd_req(struct bnxt *bp) 1108 { 1109 u32 i = 0, active_vfs = bp->pf.active_vfs, vf_id; 1110 1111 /* Scan through VF's and process commands */ 1112 while (1) { 1113 vf_id = find_next_bit(bp->pf.vf_event_bmap, active_vfs, i); 1114 if (vf_id >= active_vfs) 1115 break; 1116 1117 clear_bit(vf_id, bp->pf.vf_event_bmap); 1118 bnxt_vf_req_validate_snd(bp, &bp->pf.vf[vf_id]); 1119 i = vf_id + 1; 1120 } 1121 } 1122 1123 void bnxt_update_vf_mac(struct bnxt *bp) 1124 { 1125 struct hwrm_func_qcaps_input req = {0}; 1126 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr; 1127 1128 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1); 1129 req.fid = cpu_to_le16(0xffff); 1130 1131 mutex_lock(&bp->hwrm_cmd_lock); 1132 if (_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT)) 1133 goto update_vf_mac_exit; 1134 1135 /* Store MAC address from the firmware. There are 2 cases: 1136 * 1. MAC address is valid. It is assigned from the PF and we 1137 * need to override the current VF MAC address with it. 1138 * 2. MAC address is zero. The VF will use a random MAC address by 1139 * default but the stored zero MAC will allow the VF user to change 1140 * the random MAC address using ndo_set_mac_address() if he wants. 1141 */ 1142 if (!ether_addr_equal(resp->mac_address, bp->vf.mac_addr)) 1143 memcpy(bp->vf.mac_addr, resp->mac_address, ETH_ALEN); 1144 1145 /* overwrite netdev dev_addr with admin VF MAC */ 1146 if (is_valid_ether_addr(bp->vf.mac_addr)) 1147 memcpy(bp->dev->dev_addr, bp->vf.mac_addr, ETH_ALEN); 1148 update_vf_mac_exit: 1149 mutex_unlock(&bp->hwrm_cmd_lock); 1150 } 1151 1152 int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict) 1153 { 1154 struct hwrm_func_vf_cfg_input req = {0}; 1155 int rc = 0; 1156 1157 if (!BNXT_VF(bp)) 1158 return 0; 1159 1160 if (bp->hwrm_spec_code < 0x10202) { 1161 if (is_valid_ether_addr(bp->vf.mac_addr)) 1162 rc = -EADDRNOTAVAIL; 1163 goto mac_done; 1164 } 1165 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1); 1166 req.enables = cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR); 1167 memcpy(req.dflt_mac_addr, mac, ETH_ALEN); 1168 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 1169 mac_done: 1170 if (rc && strict) { 1171 rc = -EADDRNOTAVAIL; 1172 netdev_warn(bp->dev, "VF MAC address %pM not approved by the PF\n", 1173 mac); 1174 return rc; 1175 } 1176 return 0; 1177 } 1178 #else 1179 1180 int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset) 1181 { 1182 if (*num_vfs) 1183 return -EOPNOTSUPP; 1184 return 0; 1185 } 1186 1187 void bnxt_sriov_disable(struct bnxt *bp) 1188 { 1189 } 1190 1191 void bnxt_hwrm_exec_fwd_req(struct bnxt *bp) 1192 { 1193 netdev_err(bp->dev, "Invalid VF message received when SRIOV is not enable\n"); 1194 } 1195 1196 void bnxt_update_vf_mac(struct bnxt *bp) 1197 { 1198 } 1199 1200 int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict) 1201 { 1202 return 0; 1203 } 1204 #endif 1205