1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 1999 - 2018 Intel Corporation. */ 3 4 #include <linux/types.h> 5 #include <linux/module.h> 6 #include <linux/pci.h> 7 #include <linux/netdevice.h> 8 #include <linux/vmalloc.h> 9 #include <linux/string.h> 10 #include <linux/in.h> 11 #include <linux/ip.h> 12 #include <linux/tcp.h> 13 #include <linux/ipv6.h> 14 #include <linux/if_bridge.h> 15 #ifdef NETIF_F_HW_VLAN_CTAG_TX 16 #include <linux/if_vlan.h> 17 #endif 18 19 #include "ixgbe.h" 20 #include "ixgbe_type.h" 21 #include "ixgbe_mbx.h" 22 #include "ixgbe_sriov.h" 23 24 #ifdef CONFIG_PCI_IOV 25 static inline void ixgbe_alloc_vf_macvlans(struct ixgbe_adapter *adapter, 26 unsigned int num_vfs) 27 { 28 struct ixgbe_hw *hw = &adapter->hw; 29 struct vf_macvlans *mv_list; 30 int num_vf_macvlans, i; 31 32 /* Initialize list of VF macvlans */ 33 INIT_LIST_HEAD(&adapter->vf_mvs.l); 34 35 num_vf_macvlans = hw->mac.num_rar_entries - 36 (IXGBE_MAX_PF_MACVLANS + 1 + num_vfs); 37 if (!num_vf_macvlans) 38 return; 39 40 mv_list = kcalloc(num_vf_macvlans, sizeof(struct vf_macvlans), 41 GFP_KERNEL); 42 if (mv_list) { 43 for (i = 0; i < num_vf_macvlans; i++) { 44 mv_list[i].vf = -1; 45 mv_list[i].free = true; 46 list_add(&mv_list[i].l, &adapter->vf_mvs.l); 47 } 48 adapter->mv_list = mv_list; 49 } 50 } 51 52 static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter, 53 unsigned int num_vfs) 54 { 55 struct ixgbe_hw *hw = &adapter->hw; 56 int i; 57 58 if (adapter->xdp_prog) { 59 e_warn(probe, "SRIOV is not supported with XDP\n"); 60 return -EINVAL; 61 } 62 63 /* Enable VMDq flag so device will be set in VM mode */ 64 adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED | 65 IXGBE_FLAG_VMDQ_ENABLED; 66 67 /* Allocate memory for per VF control structures */ 68 adapter->vfinfo = kcalloc(num_vfs, sizeof(struct vf_data_storage), 69 GFP_KERNEL); 70 if (!adapter->vfinfo) 71 return -ENOMEM; 72 73 adapter->num_vfs = num_vfs; 74 75 ixgbe_alloc_vf_macvlans(adapter, num_vfs); 76 adapter->ring_feature[RING_F_VMDQ].offset = num_vfs; 77 78 /* Initialize default switching mode VEB */ 79 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); 80 adapter->bridge_mode = BRIDGE_MODE_VEB; 81 82 /* limit traffic classes based on VFs enabled */ 83 if ((adapter->hw.mac.type == ixgbe_mac_82599EB) && (num_vfs < 16)) { 84 adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS; 85 adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS; 86 } else if (num_vfs < 32) { 87 adapter->dcb_cfg.num_tcs.pg_tcs = 4; 88 adapter->dcb_cfg.num_tcs.pfc_tcs = 4; 89 } else { 90 adapter->dcb_cfg.num_tcs.pg_tcs = 1; 91 adapter->dcb_cfg.num_tcs.pfc_tcs = 1; 92 } 93 94 /* Disable RSC when in SR-IOV mode */ 95 adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE | 96 IXGBE_FLAG2_RSC_ENABLED); 97 98 for (i = 0; i < num_vfs; i++) { 99 /* enable spoof checking for all VFs */ 100 adapter->vfinfo[i].spoofchk_enabled = true; 101 adapter->vfinfo[i].link_enable = true; 102 103 /* We support VF RSS querying only for 82599 and x540 104 * devices at the moment. These devices share RSS 105 * indirection table and RSS hash key with PF therefore 106 * we want to disable the querying by default. 107 */ 108 adapter->vfinfo[i].rss_query_enabled = false; 109 110 /* Untrust all VFs */ 111 adapter->vfinfo[i].trusted = false; 112 113 /* set the default xcast mode */ 114 adapter->vfinfo[i].xcast_mode = IXGBEVF_XCAST_MODE_NONE; 115 } 116 117 e_info(probe, "SR-IOV enabled with %d VFs\n", num_vfs); 118 return 0; 119 } 120 121 /** 122 * ixgbe_get_vfs - Find and take references to all vf devices 123 * @adapter: Pointer to adapter struct 124 */ 125 static void ixgbe_get_vfs(struct ixgbe_adapter *adapter) 126 { 127 struct pci_dev *pdev = adapter->pdev; 128 u16 vendor = pdev->vendor; 129 struct pci_dev *vfdev; 130 int vf = 0; 131 u16 vf_id; 132 int pos; 133 134 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); 135 if (!pos) 136 return; 137 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &vf_id); 138 139 vfdev = pci_get_device(vendor, vf_id, NULL); 140 for (; vfdev; vfdev = pci_get_device(vendor, vf_id, vfdev)) { 141 if (!vfdev->is_virtfn) 142 continue; 143 if (vfdev->physfn != pdev) 144 continue; 145 if (vf >= adapter->num_vfs) 146 continue; 147 pci_dev_get(vfdev); 148 adapter->vfinfo[vf].vfdev = vfdev; 149 ++vf; 150 } 151 } 152 153 /* Note this function is called when the user wants to enable SR-IOV 154 * VFs using the now deprecated module parameter 155 */ 156 void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, unsigned int max_vfs) 157 { 158 int pre_existing_vfs = 0; 159 unsigned int num_vfs; 160 161 pre_existing_vfs = pci_num_vf(adapter->pdev); 162 if (!pre_existing_vfs && !max_vfs) 163 return; 164 165 /* If there are pre-existing VFs then we have to force 166 * use of that many - over ride any module parameter value. 167 * This may result from the user unloading the PF driver 168 * while VFs were assigned to guest VMs or because the VFs 169 * have been created via the new PCI SR-IOV sysfs interface. 170 */ 171 if (pre_existing_vfs) { 172 num_vfs = pre_existing_vfs; 173 dev_warn(&adapter->pdev->dev, 174 "Virtual Functions already enabled for this device - Please reload all VF drivers to avoid spoofed packet errors\n"); 175 } else { 176 int err; 177 /* 178 * The 82599 supports up to 64 VFs per physical function 179 * but this implementation limits allocation to 63 so that 180 * basic networking resources are still available to the 181 * physical function. If the user requests greater than 182 * 63 VFs then it is an error - reset to default of zero. 183 */ 184 num_vfs = min_t(unsigned int, max_vfs, IXGBE_MAX_VFS_DRV_LIMIT); 185 186 err = pci_enable_sriov(adapter->pdev, num_vfs); 187 if (err) { 188 e_err(probe, "Failed to enable PCI sriov: %d\n", err); 189 return; 190 } 191 } 192 193 if (!__ixgbe_enable_sriov(adapter, num_vfs)) { 194 ixgbe_get_vfs(adapter); 195 return; 196 } 197 198 /* If we have gotten to this point then there is no memory available 199 * to manage the VF devices - print message and bail. 200 */ 201 e_err(probe, "Unable to allocate memory for VF Data Storage - " 202 "SRIOV disabled\n"); 203 ixgbe_disable_sriov(adapter); 204 } 205 206 #endif /* #ifdef CONFIG_PCI_IOV */ 207 int ixgbe_disable_sriov(struct ixgbe_adapter *adapter) 208 { 209 unsigned int num_vfs = adapter->num_vfs, vf; 210 struct ixgbe_hw *hw = &adapter->hw; 211 unsigned long flags; 212 int rss; 213 214 spin_lock_irqsave(&adapter->vfs_lock, flags); 215 /* set num VFs to 0 to prevent access to vfinfo */ 216 adapter->num_vfs = 0; 217 spin_unlock_irqrestore(&adapter->vfs_lock, flags); 218 219 /* put the reference to all of the vf devices */ 220 for (vf = 0; vf < num_vfs; ++vf) { 221 struct pci_dev *vfdev = adapter->vfinfo[vf].vfdev; 222 223 if (!vfdev) 224 continue; 225 adapter->vfinfo[vf].vfdev = NULL; 226 pci_dev_put(vfdev); 227 } 228 229 /* free VF control structures */ 230 kfree(adapter->vfinfo); 231 adapter->vfinfo = NULL; 232 233 /* free macvlan list */ 234 kfree(adapter->mv_list); 235 adapter->mv_list = NULL; 236 237 /* if SR-IOV is already disabled then there is nothing to do */ 238 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) 239 return 0; 240 241 if (hw->mac.ops.disable_mdd) 242 hw->mac.ops.disable_mdd(hw); 243 244 #ifdef CONFIG_PCI_IOV 245 /* 246 * If our VFs are assigned we cannot shut down SR-IOV 247 * without causing issues, so just leave the hardware 248 * available but disabled 249 */ 250 if (pci_vfs_assigned(adapter->pdev)) { 251 e_dev_warn("Unloading driver while VFs are assigned - VFs will not be deallocated\n"); 252 return -EPERM; 253 } 254 /* disable iov and allow time for transactions to clear */ 255 pci_disable_sriov(adapter->pdev); 256 #endif 257 258 /* Disable VMDq flag so device will be set in VM mode */ 259 if (bitmap_weight(adapter->fwd_bitmask, adapter->num_rx_pools) == 1) { 260 adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED; 261 adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED; 262 rss = min_t(int, ixgbe_max_rss_indices(adapter), 263 num_online_cpus()); 264 } else { 265 rss = min_t(int, IXGBE_MAX_L2A_QUEUES, num_online_cpus()); 266 } 267 268 adapter->ring_feature[RING_F_VMDQ].offset = 0; 269 adapter->ring_feature[RING_F_RSS].limit = rss; 270 271 /* take a breather then clean up driver data */ 272 msleep(100); 273 return 0; 274 } 275 276 static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs) 277 { 278 #ifdef CONFIG_PCI_IOV 279 struct ixgbe_adapter *adapter = pci_get_drvdata(dev); 280 int pre_existing_vfs = pci_num_vf(dev); 281 int err = 0, num_rx_pools, i, limit; 282 u8 num_tc; 283 284 if (pre_existing_vfs && pre_existing_vfs != num_vfs) 285 err = ixgbe_disable_sriov(adapter); 286 else if (pre_existing_vfs && pre_existing_vfs == num_vfs) 287 return num_vfs; 288 289 if (err) 290 return err; 291 292 /* While the SR-IOV capability structure reports total VFs to be 64, 293 * we limit the actual number allocated as below based on two factors. 294 * Num_TCs MAX_VFs 295 * 1 63 296 * <=4 31 297 * >4 15 298 * First, we reserve some transmit/receive resources for the PF. 299 * Second, VMDQ also uses the same pools that SR-IOV does. We need to 300 * account for this, so that we don't accidentally allocate more VFs 301 * than we have available pools. The PCI bus driver already checks for 302 * other values out of range. 303 */ 304 num_tc = adapter->hw_tcs; 305 num_rx_pools = bitmap_weight(adapter->fwd_bitmask, 306 adapter->num_rx_pools); 307 limit = (num_tc > 4) ? IXGBE_MAX_VFS_8TC : 308 (num_tc > 1) ? IXGBE_MAX_VFS_4TC : IXGBE_MAX_VFS_1TC; 309 310 if (num_vfs > (limit - num_rx_pools)) { 311 e_dev_err("Currently configured with %d TCs, and %d offloaded macvlans. Creating more than %d VFs is not allowed\n", 312 num_tc, num_rx_pools - 1, limit - num_rx_pools); 313 return -EPERM; 314 } 315 316 err = __ixgbe_enable_sriov(adapter, num_vfs); 317 if (err) 318 return err; 319 320 for (i = 0; i < num_vfs; i++) 321 ixgbe_vf_configuration(dev, (i | 0x10000000)); 322 323 /* reset before enabling SRIOV to avoid mailbox issues */ 324 ixgbe_sriov_reinit(adapter); 325 326 err = pci_enable_sriov(dev, num_vfs); 327 if (err) { 328 e_dev_warn("Failed to enable PCI sriov: %d\n", err); 329 return err; 330 } 331 ixgbe_get_vfs(adapter); 332 333 return num_vfs; 334 #else 335 return 0; 336 #endif 337 } 338 339 static int ixgbe_pci_sriov_disable(struct pci_dev *dev) 340 { 341 struct ixgbe_adapter *adapter = pci_get_drvdata(dev); 342 int err; 343 #ifdef CONFIG_PCI_IOV 344 u32 current_flags = adapter->flags; 345 int prev_num_vf = pci_num_vf(dev); 346 #endif 347 348 err = ixgbe_disable_sriov(adapter); 349 350 /* Only reinit if no error and state changed */ 351 #ifdef CONFIG_PCI_IOV 352 if (!err && (current_flags != adapter->flags || 353 prev_num_vf != pci_num_vf(dev))) 354 ixgbe_sriov_reinit(adapter); 355 #endif 356 357 return err; 358 } 359 360 int ixgbe_pci_sriov_configure(struct pci_dev *dev, int num_vfs) 361 { 362 if (num_vfs == 0) 363 return ixgbe_pci_sriov_disable(dev); 364 else 365 return ixgbe_pci_sriov_enable(dev, num_vfs); 366 } 367 368 static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter, 369 u32 *msgbuf, u32 vf) 370 { 371 int entries = FIELD_GET(IXGBE_VT_MSGINFO_MASK, msgbuf[0]); 372 u16 *hash_list = (u16 *)&msgbuf[1]; 373 struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; 374 struct ixgbe_hw *hw = &adapter->hw; 375 int i; 376 u32 vector_bit; 377 u32 vector_reg; 378 u32 mta_reg; 379 u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); 380 381 /* only so many hash values supported */ 382 entries = min(entries, IXGBE_MAX_VF_MC_ENTRIES); 383 384 /* 385 * salt away the number of multi cast addresses assigned 386 * to this VF for later use to restore when the PF multi cast 387 * list changes 388 */ 389 vfinfo->num_vf_mc_hashes = entries; 390 391 /* 392 * VFs are limited to using the MTA hash table for their multicast 393 * addresses 394 */ 395 for (i = 0; i < entries; i++) { 396 vfinfo->vf_mc_hashes[i] = hash_list[i]; 397 } 398 399 for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) { 400 vector_reg = (vfinfo->vf_mc_hashes[i] >> 5) & 0x7F; 401 vector_bit = vfinfo->vf_mc_hashes[i] & 0x1F; 402 mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg)); 403 mta_reg |= BIT(vector_bit); 404 IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg); 405 } 406 vmolr |= IXGBE_VMOLR_ROMPE; 407 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr); 408 409 return 0; 410 } 411 412 #ifdef CONFIG_PCI_IOV 413 void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter) 414 { 415 struct ixgbe_hw *hw = &adapter->hw; 416 struct vf_data_storage *vfinfo; 417 int i, j; 418 u32 vector_bit; 419 u32 vector_reg; 420 u32 mta_reg; 421 422 for (i = 0; i < adapter->num_vfs; i++) { 423 u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(i)); 424 vfinfo = &adapter->vfinfo[i]; 425 for (j = 0; j < vfinfo->num_vf_mc_hashes; j++) { 426 hw->addr_ctrl.mta_in_use++; 427 vector_reg = (vfinfo->vf_mc_hashes[j] >> 5) & 0x7F; 428 vector_bit = vfinfo->vf_mc_hashes[j] & 0x1F; 429 mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg)); 430 mta_reg |= BIT(vector_bit); 431 IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg); 432 } 433 434 if (vfinfo->num_vf_mc_hashes) 435 vmolr |= IXGBE_VMOLR_ROMPE; 436 else 437 vmolr &= ~IXGBE_VMOLR_ROMPE; 438 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(i), vmolr); 439 } 440 441 /* Restore any VF macvlans */ 442 ixgbe_full_sync_mac_table(adapter); 443 } 444 #endif 445 446 static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, 447 u32 vf) 448 { 449 struct ixgbe_hw *hw = &adapter->hw; 450 int err; 451 452 /* If VLAN overlaps with one the PF is currently monitoring make 453 * sure that we are able to allocate a VLVF entry. This may be 454 * redundant but it guarantees PF will maintain visibility to 455 * the VLAN. 456 */ 457 if (add && test_bit(vid, adapter->active_vlans)) { 458 err = hw->mac.ops.set_vfta(hw, vid, VMDQ_P(0), true, false); 459 if (err) 460 return err; 461 } 462 463 err = hw->mac.ops.set_vfta(hw, vid, vf, !!add, false); 464 465 if (add && !err) 466 return err; 467 468 /* If we failed to add the VF VLAN or we are removing the VF VLAN 469 * we may need to drop the PF pool bit in order to allow us to free 470 * up the VLVF resources. 471 */ 472 if (test_bit(vid, adapter->active_vlans) || 473 (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)) 474 ixgbe_update_pf_promisc_vlvf(adapter, vid); 475 476 return err; 477 } 478 479 static int ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 max_frame, u32 vf) 480 { 481 struct ixgbe_hw *hw = &adapter->hw; 482 u32 max_frs; 483 484 if (max_frame < ETH_MIN_MTU || max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE) { 485 e_err(drv, "VF max_frame %d out of range\n", max_frame); 486 return -EINVAL; 487 } 488 489 /* 490 * For 82599EB we have to keep all PFs and VFs operating with 491 * the same max_frame value in order to avoid sending an oversize 492 * frame to a VF. In order to guarantee this is handled correctly 493 * for all cases we have several special exceptions to take into 494 * account before we can enable the VF for receive 495 */ 496 if (adapter->hw.mac.type == ixgbe_mac_82599EB) { 497 struct net_device *dev = adapter->netdev; 498 int pf_max_frame = dev->mtu + ETH_HLEN; 499 u32 reg_offset, vf_shift, vfre; 500 int err = 0; 501 502 #ifdef CONFIG_FCOE 503 if (dev->fcoe_mtu) 504 pf_max_frame = max_t(int, pf_max_frame, 505 IXGBE_FCOE_JUMBO_FRAME_SIZE); 506 507 #endif /* CONFIG_FCOE */ 508 switch (adapter->vfinfo[vf].vf_api) { 509 case ixgbe_mbox_api_11: 510 case ixgbe_mbox_api_12: 511 case ixgbe_mbox_api_13: 512 case ixgbe_mbox_api_14: 513 /* Version 1.1 supports jumbo frames on VFs if PF has 514 * jumbo frames enabled which means legacy VFs are 515 * disabled 516 */ 517 if (pf_max_frame > ETH_FRAME_LEN) 518 break; 519 fallthrough; 520 default: 521 /* If the PF or VF are running w/ jumbo frames enabled 522 * we need to shut down the VF Rx path as we cannot 523 * support jumbo frames on legacy VFs 524 */ 525 if ((pf_max_frame > ETH_FRAME_LEN) || 526 (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN))) 527 err = -EINVAL; 528 break; 529 } 530 531 /* determine VF receive enable location */ 532 vf_shift = vf % 32; 533 reg_offset = vf / 32; 534 535 /* enable or disable receive depending on error */ 536 vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset)); 537 if (err) 538 vfre &= ~BIT(vf_shift); 539 else 540 vfre |= BIT(vf_shift); 541 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), vfre); 542 543 if (err) { 544 e_err(drv, "VF max_frame %d out of range\n", max_frame); 545 return err; 546 } 547 } 548 549 /* pull current max frame size from hardware */ 550 max_frs = IXGBE_READ_REG(hw, IXGBE_MAXFRS); 551 max_frs &= IXGBE_MHADD_MFS_MASK; 552 max_frs >>= IXGBE_MHADD_MFS_SHIFT; 553 554 if (max_frs < max_frame) { 555 max_frs = max_frame << IXGBE_MHADD_MFS_SHIFT; 556 IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, max_frs); 557 } 558 559 e_info(hw, "VF requests change max MTU to %d\n", max_frame); 560 561 return 0; 562 } 563 564 static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe) 565 { 566 u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); 567 vmolr |= IXGBE_VMOLR_BAM; 568 if (aupe) 569 vmolr |= IXGBE_VMOLR_AUPE; 570 else 571 vmolr &= ~IXGBE_VMOLR_AUPE; 572 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr); 573 } 574 575 static void ixgbe_clear_vmvir(struct ixgbe_adapter *adapter, u32 vf) 576 { 577 struct ixgbe_hw *hw = &adapter->hw; 578 579 IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0); 580 } 581 582 static void ixgbe_clear_vf_vlans(struct ixgbe_adapter *adapter, u32 vf) 583 { 584 struct ixgbe_hw *hw = &adapter->hw; 585 u32 vlvfb_mask, pool_mask, i; 586 587 /* create mask for VF and other pools */ 588 pool_mask = ~BIT(VMDQ_P(0) % 32); 589 vlvfb_mask = BIT(vf % 32); 590 591 /* post increment loop, covers VLVF_ENTRIES - 1 to 0 */ 592 for (i = IXGBE_VLVF_ENTRIES; i--;) { 593 u32 bits[2], vlvfb, vid, vfta, vlvf; 594 u32 word = i * 2 + vf / 32; 595 u32 mask; 596 597 vlvfb = IXGBE_READ_REG(hw, IXGBE_VLVFB(word)); 598 599 /* if our bit isn't set we can skip it */ 600 if (!(vlvfb & vlvfb_mask)) 601 continue; 602 603 /* clear our bit from vlvfb */ 604 vlvfb ^= vlvfb_mask; 605 606 /* create 64b mask to chedk to see if we should clear VLVF */ 607 bits[word % 2] = vlvfb; 608 bits[~word % 2] = IXGBE_READ_REG(hw, IXGBE_VLVFB(word ^ 1)); 609 610 /* if other pools are present, just remove ourselves */ 611 if (bits[(VMDQ_P(0) / 32) ^ 1] || 612 (bits[VMDQ_P(0) / 32] & pool_mask)) 613 goto update_vlvfb; 614 615 /* if PF is present, leave VFTA */ 616 if (bits[0] || bits[1]) 617 goto update_vlvf; 618 619 /* if we cannot determine VLAN just remove ourselves */ 620 vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(i)); 621 if (!vlvf) 622 goto update_vlvfb; 623 624 vid = vlvf & VLAN_VID_MASK; 625 mask = BIT(vid % 32); 626 627 /* clear bit from VFTA */ 628 vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(vid / 32)); 629 if (vfta & mask) 630 IXGBE_WRITE_REG(hw, IXGBE_VFTA(vid / 32), vfta ^ mask); 631 update_vlvf: 632 /* clear POOL selection enable */ 633 IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), 0); 634 635 if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)) 636 vlvfb = 0; 637 update_vlvfb: 638 /* clear pool bits */ 639 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), vlvfb); 640 } 641 } 642 643 static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter, 644 int vf, int index, unsigned char *mac_addr) 645 { 646 struct vf_macvlans *entry; 647 bool found = false; 648 int retval = 0; 649 650 if (index <= 1) { 651 list_for_each_entry(entry, &adapter->vf_mvs.l, l) { 652 if (entry->vf == vf) { 653 entry->vf = -1; 654 entry->free = true; 655 entry->is_macvlan = false; 656 ixgbe_del_mac_filter(adapter, 657 entry->vf_macvlan, vf); 658 } 659 } 660 } 661 662 /* 663 * If index was zero then we were asked to clear the uc list 664 * for the VF. We're done. 665 */ 666 if (!index) 667 return 0; 668 669 list_for_each_entry(entry, &adapter->vf_mvs.l, l) { 670 if (entry->free) { 671 found = true; 672 break; 673 } 674 } 675 676 /* 677 * If we traversed the entire list and didn't find a free entry 678 * then we're out of space on the RAR table. It's also possible 679 * for the &adapter->vf_mvs.l list to be empty because the original 680 * memory allocation for the list failed, which is not fatal but does 681 * mean we can't support VF requests for MACVLAN because we couldn't 682 * allocate memory for the list management required. 683 */ 684 if (!found) 685 return -ENOSPC; 686 687 retval = ixgbe_add_mac_filter(adapter, mac_addr, vf); 688 if (retval < 0) 689 return retval; 690 691 entry->free = false; 692 entry->is_macvlan = true; 693 entry->vf = vf; 694 memcpy(entry->vf_macvlan, mac_addr, ETH_ALEN); 695 696 return 0; 697 } 698 699 static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf) 700 { 701 struct ixgbe_hw *hw = &adapter->hw; 702 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; 703 struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; 704 u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask); 705 u8 num_tcs = adapter->hw_tcs; 706 u32 reg_val; 707 u32 queue; 708 709 /* remove VLAN filters belonging to this VF */ 710 ixgbe_clear_vf_vlans(adapter, vf); 711 712 /* add back PF assigned VLAN or VLAN 0 */ 713 ixgbe_set_vf_vlan(adapter, true, vfinfo->pf_vlan, vf); 714 715 /* reset offloads to defaults */ 716 ixgbe_set_vmolr(hw, vf, !vfinfo->pf_vlan); 717 718 /* set outgoing tags for VFs */ 719 if (!vfinfo->pf_vlan && !vfinfo->pf_qos && !num_tcs) { 720 ixgbe_clear_vmvir(adapter, vf); 721 } else { 722 if (vfinfo->pf_qos || !num_tcs) 723 ixgbe_set_vmvir(adapter, vfinfo->pf_vlan, 724 vfinfo->pf_qos, vf); 725 else 726 ixgbe_set_vmvir(adapter, vfinfo->pf_vlan, 727 adapter->default_up, vf); 728 729 if (vfinfo->spoofchk_enabled) { 730 hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf); 731 hw->mac.ops.set_mac_anti_spoofing(hw, true, vf); 732 } 733 } 734 735 /* reset multicast table array for vf */ 736 adapter->vfinfo[vf].num_vf_mc_hashes = 0; 737 738 /* clear any ipsec table info */ 739 ixgbe_ipsec_vf_clear(adapter, vf); 740 741 /* Flush and reset the mta with the new values */ 742 ixgbe_set_rx_mode(adapter->netdev); 743 744 ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf); 745 ixgbe_set_vf_macvlan(adapter, vf, 0, NULL); 746 747 /* reset VF api back to unknown */ 748 adapter->vfinfo[vf].vf_api = ixgbe_mbox_api_10; 749 750 /* Restart each queue for given VF */ 751 for (queue = 0; queue < q_per_pool; queue++) { 752 unsigned int reg_idx = (vf * q_per_pool) + queue; 753 754 reg_val = IXGBE_READ_REG(hw, IXGBE_PVFTXDCTL(reg_idx)); 755 756 /* Re-enabling only configured queues */ 757 if (reg_val) { 758 reg_val |= IXGBE_TXDCTL_ENABLE; 759 IXGBE_WRITE_REG(hw, IXGBE_PVFTXDCTL(reg_idx), reg_val); 760 reg_val &= ~IXGBE_TXDCTL_ENABLE; 761 IXGBE_WRITE_REG(hw, IXGBE_PVFTXDCTL(reg_idx), reg_val); 762 } 763 } 764 765 IXGBE_WRITE_FLUSH(hw); 766 } 767 768 static void ixgbe_vf_clear_mbx(struct ixgbe_adapter *adapter, u32 vf) 769 { 770 struct ixgbe_hw *hw = &adapter->hw; 771 u32 word; 772 773 /* Clear VF's mailbox memory */ 774 for (word = 0; word < IXGBE_VFMAILBOX_SIZE; word++) 775 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf), word, 0); 776 777 IXGBE_WRITE_FLUSH(hw); 778 } 779 780 static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter, 781 int vf, unsigned char *mac_addr) 782 { 783 int retval; 784 785 ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf); 786 retval = ixgbe_add_mac_filter(adapter, mac_addr, vf); 787 if (retval >= 0) 788 memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, 789 ETH_ALEN); 790 else 791 eth_zero_addr(adapter->vfinfo[vf].vf_mac_addresses); 792 793 return retval; 794 } 795 796 int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask) 797 { 798 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); 799 unsigned int vfn = (event_mask & 0x3f); 800 801 bool enable = ((event_mask & 0x10000000U) != 0); 802 803 if (enable) 804 eth_zero_addr(adapter->vfinfo[vfn].vf_mac_addresses); 805 806 return 0; 807 } 808 809 static inline void ixgbe_write_qde(struct ixgbe_adapter *adapter, u32 vf, 810 u32 qde) 811 { 812 struct ixgbe_hw *hw = &adapter->hw; 813 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; 814 u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask); 815 int i; 816 817 for (i = vf * q_per_pool; i < ((vf + 1) * q_per_pool); i++) { 818 u32 reg; 819 820 /* flush previous write */ 821 IXGBE_WRITE_FLUSH(hw); 822 823 /* indicate to hardware that we want to set drop enable */ 824 reg = IXGBE_QDE_WRITE | qde; 825 reg |= i << IXGBE_QDE_IDX_SHIFT; 826 IXGBE_WRITE_REG(hw, IXGBE_QDE, reg); 827 } 828 } 829 830 /** 831 * ixgbe_set_vf_rx_tx - Set VF rx tx 832 * @adapter: Pointer to adapter struct 833 * @vf: VF identifier 834 * 835 * Set or reset correct transmit and receive for vf 836 **/ 837 static void ixgbe_set_vf_rx_tx(struct ixgbe_adapter *adapter, int vf) 838 { 839 u32 reg_cur_tx, reg_cur_rx, reg_req_tx, reg_req_rx; 840 struct ixgbe_hw *hw = &adapter->hw; 841 u32 reg_offset, vf_shift; 842 843 vf_shift = vf % 32; 844 reg_offset = vf / 32; 845 846 reg_cur_tx = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset)); 847 reg_cur_rx = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset)); 848 849 if (adapter->vfinfo[vf].link_enable) { 850 reg_req_tx = reg_cur_tx | 1 << vf_shift; 851 reg_req_rx = reg_cur_rx | 1 << vf_shift; 852 } else { 853 reg_req_tx = reg_cur_tx & ~(1 << vf_shift); 854 reg_req_rx = reg_cur_rx & ~(1 << vf_shift); 855 } 856 857 /* The 82599 cannot support a mix of jumbo and non-jumbo PF/VFs. 858 * For more info take a look at ixgbe_set_vf_lpe 859 */ 860 if (adapter->hw.mac.type == ixgbe_mac_82599EB) { 861 struct net_device *dev = adapter->netdev; 862 int pf_max_frame = dev->mtu + ETH_HLEN; 863 864 #if IS_ENABLED(CONFIG_FCOE) 865 if (dev->fcoe_mtu) 866 pf_max_frame = max_t(int, pf_max_frame, 867 IXGBE_FCOE_JUMBO_FRAME_SIZE); 868 #endif /* CONFIG_FCOE */ 869 870 if (pf_max_frame > ETH_FRAME_LEN) 871 reg_req_rx = reg_cur_rx & ~(1 << vf_shift); 872 } 873 874 /* Enable/Disable particular VF */ 875 if (reg_cur_tx != reg_req_tx) 876 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg_req_tx); 877 if (reg_cur_rx != reg_req_rx) 878 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg_req_rx); 879 } 880 881 static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) 882 { 883 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; 884 struct ixgbe_hw *hw = &adapter->hw; 885 unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses; 886 u32 reg, reg_offset, vf_shift; 887 u32 msgbuf[4] = {0, 0, 0, 0}; 888 u8 *addr = (u8 *)(&msgbuf[1]); 889 u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask); 890 int i; 891 892 e_info(probe, "VF Reset msg received from vf %d\n", vf); 893 894 /* reset the filters for the device */ 895 ixgbe_vf_reset_event(adapter, vf); 896 897 ixgbe_vf_clear_mbx(adapter, vf); 898 899 /* set vf mac address */ 900 if (!is_zero_ether_addr(vf_mac)) 901 ixgbe_set_vf_mac(adapter, vf, vf_mac); 902 903 vf_shift = vf % 32; 904 reg_offset = vf / 32; 905 906 /* force drop enable for all VF Rx queues */ 907 reg = IXGBE_QDE_ENABLE; 908 if (adapter->vfinfo[vf].pf_vlan) 909 reg |= IXGBE_QDE_HIDE_VLAN; 910 911 ixgbe_write_qde(adapter, vf, reg); 912 913 ixgbe_set_vf_rx_tx(adapter, vf); 914 915 /* enable VF mailbox for further messages */ 916 adapter->vfinfo[vf].clear_to_send = true; 917 918 /* Enable counting of spoofed packets in the SSVPC register */ 919 reg = IXGBE_READ_REG(hw, IXGBE_VMECM(reg_offset)); 920 reg |= BIT(vf_shift); 921 IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg); 922 923 /* 924 * Reset the VFs TDWBAL and TDWBAH registers 925 * which are not cleared by an FLR 926 */ 927 for (i = 0; i < q_per_pool; i++) { 928 IXGBE_WRITE_REG(hw, IXGBE_PVFTDWBAHn(q_per_pool, vf, i), 0); 929 IXGBE_WRITE_REG(hw, IXGBE_PVFTDWBALn(q_per_pool, vf, i), 0); 930 } 931 932 /* reply to reset with ack and vf mac address */ 933 msgbuf[0] = IXGBE_VF_RESET; 934 if (!is_zero_ether_addr(vf_mac) && adapter->vfinfo[vf].pf_set_mac) { 935 msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK; 936 memcpy(addr, vf_mac, ETH_ALEN); 937 } else { 938 msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK; 939 } 940 941 /* 942 * Piggyback the multicast filter type so VF can compute the 943 * correct vectors 944 */ 945 msgbuf[3] = hw->mac.mc_filter_type; 946 ixgbe_write_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN, vf); 947 948 return 0; 949 } 950 951 static int ixgbe_set_vf_mac_addr(struct ixgbe_adapter *adapter, 952 u32 *msgbuf, u32 vf) 953 { 954 u8 *new_mac = ((u8 *)(&msgbuf[1])); 955 956 if (!is_valid_ether_addr(new_mac)) { 957 e_warn(drv, "VF %d attempted to set invalid mac\n", vf); 958 return -1; 959 } 960 961 if (adapter->vfinfo[vf].pf_set_mac && !adapter->vfinfo[vf].trusted && 962 !ether_addr_equal(adapter->vfinfo[vf].vf_mac_addresses, new_mac)) { 963 e_warn(drv, 964 "VF %d attempted to override administratively set MAC address\n" 965 "Reload the VF driver to resume operations\n", 966 vf); 967 return -1; 968 } 969 970 return ixgbe_set_vf_mac(adapter, vf, new_mac) < 0; 971 } 972 973 static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter, 974 u32 *msgbuf, u32 vf) 975 { 976 u32 add = FIELD_GET(IXGBE_VT_MSGINFO_MASK, msgbuf[0]); 977 u32 vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK); 978 u8 tcs = adapter->hw_tcs; 979 980 if (adapter->vfinfo[vf].pf_vlan || tcs) { 981 e_warn(drv, 982 "VF %d attempted to override administratively set VLAN configuration\n" 983 "Reload the VF driver to resume operations\n", 984 vf); 985 return -1; 986 } 987 988 /* VLAN 0 is a special case, don't allow it to be removed */ 989 if (!vid && !add) 990 return 0; 991 992 return ixgbe_set_vf_vlan(adapter, add, vid, vf); 993 } 994 995 static int ixgbe_set_vf_macvlan_msg(struct ixgbe_adapter *adapter, 996 u32 *msgbuf, u32 vf) 997 { 998 u8 *new_mac = ((u8 *)(&msgbuf[1])); 999 int index = FIELD_GET(IXGBE_VT_MSGINFO_MASK, msgbuf[0]); 1000 int err; 1001 1002 if (adapter->vfinfo[vf].pf_set_mac && !adapter->vfinfo[vf].trusted && 1003 index > 0) { 1004 e_warn(drv, 1005 "VF %d requested MACVLAN filter but is administratively denied\n", 1006 vf); 1007 return -1; 1008 } 1009 1010 /* An non-zero index indicates the VF is setting a filter */ 1011 if (index) { 1012 if (!is_valid_ether_addr(new_mac)) { 1013 e_warn(drv, "VF %d attempted to set invalid mac\n", vf); 1014 return -1; 1015 } 1016 1017 /* 1018 * If the VF is allowed to set MAC filters then turn off 1019 * anti-spoofing to avoid false positives. 1020 */ 1021 if (adapter->vfinfo[vf].spoofchk_enabled) { 1022 struct ixgbe_hw *hw = &adapter->hw; 1023 1024 hw->mac.ops.set_mac_anti_spoofing(hw, false, vf); 1025 hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf); 1026 } 1027 } 1028 1029 err = ixgbe_set_vf_macvlan(adapter, vf, index, new_mac); 1030 if (err == -ENOSPC) 1031 e_warn(drv, 1032 "VF %d has requested a MACVLAN filter but there is no space for it\n", 1033 vf); 1034 1035 return err < 0; 1036 } 1037 1038 static int ixgbe_negotiate_vf_api(struct ixgbe_adapter *adapter, 1039 u32 *msgbuf, u32 vf) 1040 { 1041 int api = msgbuf[1]; 1042 1043 switch (api) { 1044 case ixgbe_mbox_api_10: 1045 case ixgbe_mbox_api_11: 1046 case ixgbe_mbox_api_12: 1047 case ixgbe_mbox_api_13: 1048 case ixgbe_mbox_api_14: 1049 adapter->vfinfo[vf].vf_api = api; 1050 return 0; 1051 default: 1052 break; 1053 } 1054 1055 e_dbg(drv, "VF %d requested unsupported api version %u\n", vf, api); 1056 1057 return -1; 1058 } 1059 1060 static int ixgbe_get_vf_queues(struct ixgbe_adapter *adapter, 1061 u32 *msgbuf, u32 vf) 1062 { 1063 struct net_device *dev = adapter->netdev; 1064 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; 1065 unsigned int default_tc = 0; 1066 u8 num_tcs = adapter->hw_tcs; 1067 1068 /* verify the PF is supporting the correct APIs */ 1069 switch (adapter->vfinfo[vf].vf_api) { 1070 case ixgbe_mbox_api_20: 1071 case ixgbe_mbox_api_11: 1072 case ixgbe_mbox_api_12: 1073 case ixgbe_mbox_api_13: 1074 case ixgbe_mbox_api_14: 1075 break; 1076 default: 1077 return -1; 1078 } 1079 1080 /* only allow 1 Tx queue for bandwidth limiting */ 1081 msgbuf[IXGBE_VF_TX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask); 1082 msgbuf[IXGBE_VF_RX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask); 1083 1084 /* if TCs > 1 determine which TC belongs to default user priority */ 1085 if (num_tcs > 1) 1086 default_tc = netdev_get_prio_tc_map(dev, adapter->default_up); 1087 1088 /* notify VF of need for VLAN tag stripping, and correct queue */ 1089 if (num_tcs) 1090 msgbuf[IXGBE_VF_TRANS_VLAN] = num_tcs; 1091 else if (adapter->vfinfo[vf].pf_vlan || adapter->vfinfo[vf].pf_qos) 1092 msgbuf[IXGBE_VF_TRANS_VLAN] = 1; 1093 else 1094 msgbuf[IXGBE_VF_TRANS_VLAN] = 0; 1095 1096 /* notify VF of default queue */ 1097 msgbuf[IXGBE_VF_DEF_QUEUE] = default_tc; 1098 1099 return 0; 1100 } 1101 1102 static int ixgbe_get_vf_reta(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) 1103 { 1104 u32 i, j; 1105 u32 *out_buf = &msgbuf[1]; 1106 const u8 *reta = adapter->rss_indir_tbl; 1107 u32 reta_size = ixgbe_rss_indir_tbl_entries(adapter); 1108 1109 /* Check if operation is permitted */ 1110 if (!adapter->vfinfo[vf].rss_query_enabled) 1111 return -EPERM; 1112 1113 /* verify the PF is supporting the correct API */ 1114 switch (adapter->vfinfo[vf].vf_api) { 1115 case ixgbe_mbox_api_14: 1116 case ixgbe_mbox_api_13: 1117 case ixgbe_mbox_api_12: 1118 break; 1119 default: 1120 return -EOPNOTSUPP; 1121 } 1122 1123 /* This mailbox command is supported (required) only for 82599 and x540 1124 * VFs which support up to 4 RSS queues. Therefore we will compress the 1125 * RETA by saving only 2 bits from each entry. This way we will be able 1126 * to transfer the whole RETA in a single mailbox operation. 1127 */ 1128 for (i = 0; i < reta_size / 16; i++) { 1129 out_buf[i] = 0; 1130 for (j = 0; j < 16; j++) 1131 out_buf[i] |= (u32)(reta[16 * i + j] & 0x3) << (2 * j); 1132 } 1133 1134 return 0; 1135 } 1136 1137 static int ixgbe_get_vf_rss_key(struct ixgbe_adapter *adapter, 1138 u32 *msgbuf, u32 vf) 1139 { 1140 u32 *rss_key = &msgbuf[1]; 1141 1142 /* Check if the operation is permitted */ 1143 if (!adapter->vfinfo[vf].rss_query_enabled) 1144 return -EPERM; 1145 1146 /* verify the PF is supporting the correct API */ 1147 switch (adapter->vfinfo[vf].vf_api) { 1148 case ixgbe_mbox_api_14: 1149 case ixgbe_mbox_api_13: 1150 case ixgbe_mbox_api_12: 1151 break; 1152 default: 1153 return -EOPNOTSUPP; 1154 } 1155 1156 memcpy(rss_key, adapter->rss_key, IXGBE_RSS_KEY_SIZE); 1157 1158 return 0; 1159 } 1160 1161 static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter, 1162 u32 *msgbuf, u32 vf) 1163 { 1164 struct ixgbe_hw *hw = &adapter->hw; 1165 int xcast_mode = msgbuf[1]; 1166 u32 vmolr, fctrl, disable, enable; 1167 1168 /* verify the PF is supporting the correct APIs */ 1169 switch (adapter->vfinfo[vf].vf_api) { 1170 case ixgbe_mbox_api_12: 1171 /* promisc introduced in 1.3 version */ 1172 if (xcast_mode == IXGBEVF_XCAST_MODE_PROMISC) 1173 return -EOPNOTSUPP; 1174 fallthrough; 1175 case ixgbe_mbox_api_13: 1176 case ixgbe_mbox_api_14: 1177 break; 1178 default: 1179 return -EOPNOTSUPP; 1180 } 1181 1182 if (xcast_mode > IXGBEVF_XCAST_MODE_MULTI && 1183 !adapter->vfinfo[vf].trusted) { 1184 xcast_mode = IXGBEVF_XCAST_MODE_MULTI; 1185 } 1186 1187 if (adapter->vfinfo[vf].xcast_mode == xcast_mode) 1188 goto out; 1189 1190 switch (xcast_mode) { 1191 case IXGBEVF_XCAST_MODE_NONE: 1192 disable = IXGBE_VMOLR_ROMPE | 1193 IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE; 1194 enable = IXGBE_VMOLR_BAM; 1195 break; 1196 case IXGBEVF_XCAST_MODE_MULTI: 1197 disable = IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE; 1198 enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE; 1199 break; 1200 case IXGBEVF_XCAST_MODE_ALLMULTI: 1201 disable = IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE; 1202 enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_MPE; 1203 break; 1204 case IXGBEVF_XCAST_MODE_PROMISC: 1205 if (hw->mac.type <= ixgbe_mac_82599EB) 1206 return -EOPNOTSUPP; 1207 1208 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 1209 if (!(fctrl & IXGBE_FCTRL_UPE)) { 1210 /* VF promisc requires PF in promisc */ 1211 e_warn(drv, 1212 "Enabling VF promisc requires PF in promisc\n"); 1213 return -EPERM; 1214 } 1215 1216 disable = IXGBE_VMOLR_VPE; 1217 enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | 1218 IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE; 1219 break; 1220 default: 1221 return -EOPNOTSUPP; 1222 } 1223 1224 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); 1225 vmolr &= ~disable; 1226 vmolr |= enable; 1227 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr); 1228 1229 adapter->vfinfo[vf].xcast_mode = xcast_mode; 1230 1231 out: 1232 msgbuf[1] = xcast_mode; 1233 1234 return 0; 1235 } 1236 1237 static int ixgbe_get_vf_link_state(struct ixgbe_adapter *adapter, 1238 u32 *msgbuf, u32 vf) 1239 { 1240 u32 *link_state = &msgbuf[1]; 1241 1242 /* verify the PF is supporting the correct API */ 1243 switch (adapter->vfinfo[vf].vf_api) { 1244 case ixgbe_mbox_api_12: 1245 case ixgbe_mbox_api_13: 1246 case ixgbe_mbox_api_14: 1247 break; 1248 default: 1249 return -EOPNOTSUPP; 1250 } 1251 1252 *link_state = adapter->vfinfo[vf].link_enable; 1253 1254 return 0; 1255 } 1256 1257 static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) 1258 { 1259 u32 mbx_size = IXGBE_VFMAILBOX_SIZE; 1260 u32 msgbuf[IXGBE_VFMAILBOX_SIZE]; 1261 struct ixgbe_hw *hw = &adapter->hw; 1262 int retval; 1263 1264 retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf); 1265 1266 if (retval) { 1267 pr_err("Error receiving message from VF\n"); 1268 return retval; 1269 } 1270 1271 /* this is a message we already processed, do nothing */ 1272 if (msgbuf[0] & (IXGBE_VT_MSGTYPE_ACK | IXGBE_VT_MSGTYPE_NACK)) 1273 return 0; 1274 1275 /* flush the ack before we write any messages back */ 1276 IXGBE_WRITE_FLUSH(hw); 1277 1278 if (msgbuf[0] == IXGBE_VF_RESET) 1279 return ixgbe_vf_reset_msg(adapter, vf); 1280 1281 /* 1282 * until the vf completes a virtual function reset it should not be 1283 * allowed to start any configuration. 1284 */ 1285 if (!adapter->vfinfo[vf].clear_to_send) { 1286 msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK; 1287 ixgbe_write_mbx(hw, msgbuf, 1, vf); 1288 return 0; 1289 } 1290 1291 switch ((msgbuf[0] & 0xFFFF)) { 1292 case IXGBE_VF_SET_MAC_ADDR: 1293 retval = ixgbe_set_vf_mac_addr(adapter, msgbuf, vf); 1294 break; 1295 case IXGBE_VF_SET_MULTICAST: 1296 retval = ixgbe_set_vf_multicasts(adapter, msgbuf, vf); 1297 break; 1298 case IXGBE_VF_SET_VLAN: 1299 retval = ixgbe_set_vf_vlan_msg(adapter, msgbuf, vf); 1300 break; 1301 case IXGBE_VF_SET_LPE: 1302 retval = ixgbe_set_vf_lpe(adapter, msgbuf[1], vf); 1303 break; 1304 case IXGBE_VF_SET_MACVLAN: 1305 retval = ixgbe_set_vf_macvlan_msg(adapter, msgbuf, vf); 1306 break; 1307 case IXGBE_VF_API_NEGOTIATE: 1308 retval = ixgbe_negotiate_vf_api(adapter, msgbuf, vf); 1309 break; 1310 case IXGBE_VF_GET_QUEUES: 1311 retval = ixgbe_get_vf_queues(adapter, msgbuf, vf); 1312 break; 1313 case IXGBE_VF_GET_RETA: 1314 retval = ixgbe_get_vf_reta(adapter, msgbuf, vf); 1315 break; 1316 case IXGBE_VF_GET_RSS_KEY: 1317 retval = ixgbe_get_vf_rss_key(adapter, msgbuf, vf); 1318 break; 1319 case IXGBE_VF_UPDATE_XCAST_MODE: 1320 retval = ixgbe_update_vf_xcast_mode(adapter, msgbuf, vf); 1321 break; 1322 case IXGBE_VF_GET_LINK_STATE: 1323 retval = ixgbe_get_vf_link_state(adapter, msgbuf, vf); 1324 break; 1325 case IXGBE_VF_IPSEC_ADD: 1326 retval = ixgbe_ipsec_vf_add_sa(adapter, msgbuf, vf); 1327 break; 1328 case IXGBE_VF_IPSEC_DEL: 1329 retval = ixgbe_ipsec_vf_del_sa(adapter, msgbuf, vf); 1330 break; 1331 default: 1332 e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]); 1333 retval = -EIO; 1334 break; 1335 } 1336 1337 /* notify the VF of the results of what it sent us */ 1338 if (retval) 1339 msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK; 1340 else 1341 msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK; 1342 1343 msgbuf[0] |= IXGBE_VT_MSGTYPE_CTS; 1344 1345 ixgbe_write_mbx(hw, msgbuf, mbx_size, vf); 1346 1347 return retval; 1348 } 1349 1350 static void ixgbe_rcv_ack_from_vf(struct ixgbe_adapter *adapter, u32 vf) 1351 { 1352 struct ixgbe_hw *hw = &adapter->hw; 1353 u32 msg = IXGBE_VT_MSGTYPE_NACK; 1354 1355 /* if device isn't clear to send it shouldn't be reading either */ 1356 if (!adapter->vfinfo[vf].clear_to_send) 1357 ixgbe_write_mbx(hw, &msg, 1, vf); 1358 } 1359 1360 /** 1361 * ixgbe_check_mdd_event - check for MDD event on all VFs 1362 * @adapter: pointer to ixgbe adapter 1363 * 1364 * Return: true if there is a VF on which MDD event occurred, false otherwise. 1365 */ 1366 bool ixgbe_check_mdd_event(struct ixgbe_adapter *adapter) 1367 { 1368 struct ixgbe_hw *hw = &adapter->hw; 1369 DECLARE_BITMAP(vf_bitmap, 64); 1370 bool ret = false; 1371 int i; 1372 1373 if (!hw->mac.ops.handle_mdd) 1374 return false; 1375 1376 /* Did we have a malicious event */ 1377 bitmap_zero(vf_bitmap, 64); 1378 hw->mac.ops.handle_mdd(hw, vf_bitmap); 1379 1380 /* Log any blocked queues and release lock */ 1381 for_each_set_bit(i, vf_bitmap, 64) { 1382 dev_warn(&adapter->pdev->dev, 1383 "Malicious event on VF %d tx:%x rx:%x\n", i, 1384 IXGBE_READ_REG(hw, IXGBE_LVMMC_TX), 1385 IXGBE_READ_REG(hw, IXGBE_LVMMC_RX)); 1386 1387 if (hw->mac.ops.restore_mdd_vf) { 1388 u32 ping; 1389 1390 hw->mac.ops.restore_mdd_vf(hw, i); 1391 1392 /* get the VF to rebuild its queues */ 1393 adapter->vfinfo[i].clear_to_send = 0; 1394 ping = IXGBE_PF_CONTROL_MSG | 1395 IXGBE_VT_MSGTYPE_CTS; 1396 ixgbe_write_mbx(hw, &ping, 1, i); 1397 } 1398 1399 ret = true; 1400 } 1401 1402 return ret; 1403 } 1404 1405 void ixgbe_msg_task(struct ixgbe_adapter *adapter) 1406 { 1407 struct ixgbe_hw *hw = &adapter->hw; 1408 unsigned long flags; 1409 u32 vf; 1410 1411 ixgbe_check_mdd_event(adapter); 1412 1413 spin_lock_irqsave(&adapter->vfs_lock, flags); 1414 for (vf = 0; vf < adapter->num_vfs; vf++) { 1415 /* process any reset requests */ 1416 if (!ixgbe_check_for_rst(hw, vf)) 1417 ixgbe_vf_reset_event(adapter, vf); 1418 1419 /* process any messages pending */ 1420 if (!ixgbe_check_for_msg(hw, vf)) 1421 ixgbe_rcv_msg_from_vf(adapter, vf); 1422 1423 /* process any acks */ 1424 if (!ixgbe_check_for_ack(hw, vf)) 1425 ixgbe_rcv_ack_from_vf(adapter, vf); 1426 } 1427 spin_unlock_irqrestore(&adapter->vfs_lock, flags); 1428 } 1429 1430 static inline void ixgbe_ping_vf(struct ixgbe_adapter *adapter, int vf) 1431 { 1432 struct ixgbe_hw *hw = &adapter->hw; 1433 u32 ping; 1434 1435 ping = IXGBE_PF_CONTROL_MSG; 1436 if (adapter->vfinfo[vf].clear_to_send) 1437 ping |= IXGBE_VT_MSGTYPE_CTS; 1438 ixgbe_write_mbx(hw, &ping, 1, vf); 1439 } 1440 1441 void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter) 1442 { 1443 struct ixgbe_hw *hw = &adapter->hw; 1444 u32 ping; 1445 int i; 1446 1447 for (i = 0 ; i < adapter->num_vfs; i++) { 1448 ping = IXGBE_PF_CONTROL_MSG; 1449 if (adapter->vfinfo[i].clear_to_send) 1450 ping |= IXGBE_VT_MSGTYPE_CTS; 1451 ixgbe_write_mbx(hw, &ping, 1, i); 1452 } 1453 } 1454 1455 /** 1456 * ixgbe_set_all_vfs - update vfs queues 1457 * @adapter: Pointer to adapter struct 1458 * 1459 * Update setting transmit and receive queues for all vfs 1460 **/ 1461 void ixgbe_set_all_vfs(struct ixgbe_adapter *adapter) 1462 { 1463 int i; 1464 1465 for (i = 0 ; i < adapter->num_vfs; i++) 1466 ixgbe_set_vf_link_state(adapter, i, 1467 adapter->vfinfo[i].link_state); 1468 } 1469 1470 int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) 1471 { 1472 struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); 1473 int retval; 1474 1475 if (vf >= adapter->num_vfs) 1476 return -EINVAL; 1477 1478 if (is_valid_ether_addr(mac)) { 1479 dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", 1480 mac, vf); 1481 dev_info(&adapter->pdev->dev, "Reload the VF driver to make this change effective."); 1482 1483 retval = ixgbe_set_vf_mac(adapter, vf, mac); 1484 if (retval >= 0) { 1485 adapter->vfinfo[vf].pf_set_mac = true; 1486 1487 if (test_bit(__IXGBE_DOWN, &adapter->state)) { 1488 dev_warn(&adapter->pdev->dev, "The VF MAC address has been set, but the PF device is not up.\n"); 1489 dev_warn(&adapter->pdev->dev, "Bring the PF device up before attempting to use the VF device.\n"); 1490 } 1491 } else { 1492 dev_warn(&adapter->pdev->dev, "The VF MAC address was NOT set due to invalid or duplicate MAC address.\n"); 1493 } 1494 } else if (is_zero_ether_addr(mac)) { 1495 unsigned char *vf_mac_addr = 1496 adapter->vfinfo[vf].vf_mac_addresses; 1497 1498 /* nothing to do */ 1499 if (is_zero_ether_addr(vf_mac_addr)) 1500 return 0; 1501 1502 dev_info(&adapter->pdev->dev, "removing MAC on VF %d\n", vf); 1503 1504 retval = ixgbe_del_mac_filter(adapter, vf_mac_addr, vf); 1505 if (retval >= 0) { 1506 adapter->vfinfo[vf].pf_set_mac = false; 1507 memcpy(vf_mac_addr, mac, ETH_ALEN); 1508 } else { 1509 dev_warn(&adapter->pdev->dev, "Could NOT remove the VF MAC address.\n"); 1510 } 1511 } else { 1512 retval = -EINVAL; 1513 } 1514 1515 return retval; 1516 } 1517 1518 static int ixgbe_enable_port_vlan(struct ixgbe_adapter *adapter, int vf, 1519 u16 vlan, u8 qos) 1520 { 1521 struct ixgbe_hw *hw = &adapter->hw; 1522 int err; 1523 1524 err = ixgbe_set_vf_vlan(adapter, true, vlan, vf); 1525 if (err) 1526 goto out; 1527 1528 /* Revoke tagless access via VLAN 0 */ 1529 ixgbe_set_vf_vlan(adapter, false, 0, vf); 1530 1531 ixgbe_set_vmvir(adapter, vlan, qos, vf); 1532 ixgbe_set_vmolr(hw, vf, false); 1533 1534 /* enable hide vlan on X550 */ 1535 if (hw->mac.type >= ixgbe_mac_X550) 1536 ixgbe_write_qde(adapter, vf, IXGBE_QDE_ENABLE | 1537 IXGBE_QDE_HIDE_VLAN); 1538 1539 adapter->vfinfo[vf].pf_vlan = vlan; 1540 adapter->vfinfo[vf].pf_qos = qos; 1541 dev_info(&adapter->pdev->dev, 1542 "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf); 1543 if (test_bit(__IXGBE_DOWN, &adapter->state)) { 1544 dev_warn(&adapter->pdev->dev, 1545 "The VF VLAN has been set, but the PF device is not up.\n"); 1546 dev_warn(&adapter->pdev->dev, 1547 "Bring the PF device up before attempting to use the VF device.\n"); 1548 } 1549 1550 out: 1551 return err; 1552 } 1553 1554 static int ixgbe_disable_port_vlan(struct ixgbe_adapter *adapter, int vf) 1555 { 1556 struct ixgbe_hw *hw = &adapter->hw; 1557 int err; 1558 1559 err = ixgbe_set_vf_vlan(adapter, false, 1560 adapter->vfinfo[vf].pf_vlan, vf); 1561 /* Restore tagless access via VLAN 0 */ 1562 ixgbe_set_vf_vlan(adapter, true, 0, vf); 1563 ixgbe_clear_vmvir(adapter, vf); 1564 ixgbe_set_vmolr(hw, vf, true); 1565 1566 /* disable hide VLAN on X550 */ 1567 if (hw->mac.type >= ixgbe_mac_X550) 1568 ixgbe_write_qde(adapter, vf, IXGBE_QDE_ENABLE); 1569 1570 adapter->vfinfo[vf].pf_vlan = 0; 1571 adapter->vfinfo[vf].pf_qos = 0; 1572 1573 return err; 1574 } 1575 1576 int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, 1577 u8 qos, __be16 vlan_proto) 1578 { 1579 int err = 0; 1580 struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); 1581 1582 if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7)) 1583 return -EINVAL; 1584 if (vlan_proto != htons(ETH_P_8021Q)) 1585 return -EPROTONOSUPPORT; 1586 if (vlan || qos) { 1587 /* Check if there is already a port VLAN set, if so 1588 * we have to delete the old one first before we 1589 * can set the new one. The usage model had 1590 * previously assumed the user would delete the 1591 * old port VLAN before setting a new one but this 1592 * is not necessarily the case. 1593 */ 1594 if (adapter->vfinfo[vf].pf_vlan) 1595 err = ixgbe_disable_port_vlan(adapter, vf); 1596 if (err) 1597 goto out; 1598 err = ixgbe_enable_port_vlan(adapter, vf, vlan, qos); 1599 } else { 1600 err = ixgbe_disable_port_vlan(adapter, vf); 1601 } 1602 1603 out: 1604 return err; 1605 } 1606 1607 int ixgbe_link_mbps(struct ixgbe_adapter *adapter) 1608 { 1609 switch (adapter->link_speed) { 1610 case IXGBE_LINK_SPEED_100_FULL: 1611 return 100; 1612 case IXGBE_LINK_SPEED_1GB_FULL: 1613 return 1000; 1614 case IXGBE_LINK_SPEED_10GB_FULL: 1615 return 10000; 1616 default: 1617 return 0; 1618 } 1619 } 1620 1621 static void ixgbe_set_vf_rate_limit(struct ixgbe_adapter *adapter, int vf) 1622 { 1623 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; 1624 struct ixgbe_hw *hw = &adapter->hw; 1625 u32 bcnrc_val = 0; 1626 u16 queue, queues_per_pool; 1627 u16 tx_rate = adapter->vfinfo[vf].tx_rate; 1628 1629 if (tx_rate) { 1630 /* start with base link speed value */ 1631 bcnrc_val = adapter->vf_rate_link_speed; 1632 1633 /* Calculate the rate factor values to set */ 1634 bcnrc_val <<= IXGBE_RTTBCNRC_RF_INT_SHIFT; 1635 bcnrc_val /= tx_rate; 1636 1637 /* clear everything but the rate factor */ 1638 bcnrc_val &= IXGBE_RTTBCNRC_RF_INT_MASK | 1639 IXGBE_RTTBCNRC_RF_DEC_MASK; 1640 1641 /* enable the rate scheduler */ 1642 bcnrc_val |= IXGBE_RTTBCNRC_RS_ENA; 1643 } 1644 1645 /* 1646 * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM 1647 * register. Typically MMW_SIZE=0x014 if 9728-byte jumbo is supported 1648 * and 0x004 otherwise. 1649 */ 1650 switch (hw->mac.type) { 1651 case ixgbe_mac_82599EB: 1652 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, 0x4); 1653 break; 1654 case ixgbe_mac_X540: 1655 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, 0x14); 1656 break; 1657 default: 1658 break; 1659 } 1660 1661 /* determine how many queues per pool based on VMDq mask */ 1662 queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask); 1663 1664 /* write value for all Tx queues belonging to VF */ 1665 for (queue = 0; queue < queues_per_pool; queue++) { 1666 unsigned int reg_idx = (vf * queues_per_pool) + queue; 1667 1668 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, reg_idx); 1669 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val); 1670 } 1671 } 1672 1673 void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter) 1674 { 1675 int i; 1676 1677 /* VF Tx rate limit was not set */ 1678 if (!adapter->vf_rate_link_speed) 1679 return; 1680 1681 if (ixgbe_link_mbps(adapter) != adapter->vf_rate_link_speed) { 1682 adapter->vf_rate_link_speed = 0; 1683 dev_info(&adapter->pdev->dev, 1684 "Link speed has been changed. VF Transmit rate is disabled\n"); 1685 } 1686 1687 for (i = 0; i < adapter->num_vfs; i++) { 1688 if (!adapter->vf_rate_link_speed) 1689 adapter->vfinfo[i].tx_rate = 0; 1690 1691 ixgbe_set_vf_rate_limit(adapter, i); 1692 } 1693 } 1694 1695 int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate, 1696 int max_tx_rate) 1697 { 1698 struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); 1699 int link_speed; 1700 1701 /* verify VF is active */ 1702 if (vf >= adapter->num_vfs) 1703 return -EINVAL; 1704 1705 /* verify link is up */ 1706 if (!adapter->link_up) 1707 return -EINVAL; 1708 1709 /* verify we are linked at 10Gbps */ 1710 link_speed = ixgbe_link_mbps(adapter); 1711 if (link_speed != 10000) 1712 return -EINVAL; 1713 1714 if (min_tx_rate) 1715 return -EINVAL; 1716 1717 /* rate limit cannot be less than 10Mbs or greater than link speed */ 1718 if (max_tx_rate && ((max_tx_rate <= 10) || (max_tx_rate > link_speed))) 1719 return -EINVAL; 1720 1721 /* store values */ 1722 adapter->vf_rate_link_speed = link_speed; 1723 adapter->vfinfo[vf].tx_rate = max_tx_rate; 1724 1725 /* update hardware configuration */ 1726 ixgbe_set_vf_rate_limit(adapter, vf); 1727 1728 return 0; 1729 } 1730 1731 int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting) 1732 { 1733 struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); 1734 struct ixgbe_hw *hw = &adapter->hw; 1735 1736 if (vf >= adapter->num_vfs) 1737 return -EINVAL; 1738 1739 adapter->vfinfo[vf].spoofchk_enabled = setting; 1740 1741 /* configure MAC spoofing */ 1742 hw->mac.ops.set_mac_anti_spoofing(hw, setting, vf); 1743 1744 /* configure VLAN spoofing */ 1745 hw->mac.ops.set_vlan_anti_spoofing(hw, setting, vf); 1746 1747 /* Ensure LLDP and FC is set for Ethertype Antispoofing if we will be 1748 * calling set_ethertype_anti_spoofing for each VF in loop below 1749 */ 1750 if (hw->mac.ops.set_ethertype_anti_spoofing) { 1751 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_LLDP), 1752 (IXGBE_ETQF_FILTER_EN | 1753 IXGBE_ETQF_TX_ANTISPOOF | 1754 ETH_P_LLDP)); 1755 1756 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FC), 1757 (IXGBE_ETQF_FILTER_EN | 1758 IXGBE_ETQF_TX_ANTISPOOF | 1759 ETH_P_PAUSE)); 1760 1761 hw->mac.ops.set_ethertype_anti_spoofing(hw, setting, vf); 1762 } 1763 1764 return 0; 1765 } 1766 1767 /** 1768 * ixgbe_set_vf_link_state - Set link state 1769 * @adapter: Pointer to adapter struct 1770 * @vf: VF identifier 1771 * @state: required link state 1772 * 1773 * Set a link force state on/off a single vf 1774 **/ 1775 void ixgbe_set_vf_link_state(struct ixgbe_adapter *adapter, int vf, int state) 1776 { 1777 adapter->vfinfo[vf].link_state = state; 1778 1779 switch (state) { 1780 case IFLA_VF_LINK_STATE_AUTO: 1781 if (test_bit(__IXGBE_DOWN, &adapter->state)) 1782 adapter->vfinfo[vf].link_enable = false; 1783 else 1784 adapter->vfinfo[vf].link_enable = true; 1785 break; 1786 case IFLA_VF_LINK_STATE_ENABLE: 1787 adapter->vfinfo[vf].link_enable = true; 1788 break; 1789 case IFLA_VF_LINK_STATE_DISABLE: 1790 adapter->vfinfo[vf].link_enable = false; 1791 break; 1792 } 1793 1794 ixgbe_set_vf_rx_tx(adapter, vf); 1795 1796 /* restart the VF */ 1797 adapter->vfinfo[vf].clear_to_send = false; 1798 ixgbe_ping_vf(adapter, vf); 1799 } 1800 1801 /** 1802 * ixgbe_ndo_set_vf_link_state - Set link state 1803 * @netdev: network interface device structure 1804 * @vf: VF identifier 1805 * @state: required link state 1806 * 1807 * Set the link state of a specified VF, regardless of physical link state 1808 **/ 1809 int ixgbe_ndo_set_vf_link_state(struct net_device *netdev, int vf, int state) 1810 { 1811 struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); 1812 int ret = 0; 1813 1814 if (vf < 0 || vf >= adapter->num_vfs) { 1815 dev_err(&adapter->pdev->dev, 1816 "NDO set VF link - invalid VF identifier %d\n", vf); 1817 return -EINVAL; 1818 } 1819 1820 switch (state) { 1821 case IFLA_VF_LINK_STATE_ENABLE: 1822 dev_info(&adapter->pdev->dev, 1823 "NDO set VF %d link state %d - not supported\n", 1824 vf, state); 1825 break; 1826 case IFLA_VF_LINK_STATE_DISABLE: 1827 dev_info(&adapter->pdev->dev, 1828 "NDO set VF %d link state disable\n", vf); 1829 ixgbe_set_vf_link_state(adapter, vf, state); 1830 break; 1831 case IFLA_VF_LINK_STATE_AUTO: 1832 dev_info(&adapter->pdev->dev, 1833 "NDO set VF %d link state auto\n", vf); 1834 ixgbe_set_vf_link_state(adapter, vf, state); 1835 break; 1836 default: 1837 dev_err(&adapter->pdev->dev, 1838 "NDO set VF %d - invalid link state %d\n", vf, state); 1839 ret = -EINVAL; 1840 } 1841 1842 return ret; 1843 } 1844 1845 int ixgbe_ndo_set_vf_rss_query_en(struct net_device *netdev, int vf, 1846 bool setting) 1847 { 1848 struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); 1849 1850 /* This operation is currently supported only for 82599 and x540 1851 * devices. 1852 */ 1853 if (adapter->hw.mac.type < ixgbe_mac_82599EB || 1854 adapter->hw.mac.type >= ixgbe_mac_X550) 1855 return -EOPNOTSUPP; 1856 1857 if (vf >= adapter->num_vfs) 1858 return -EINVAL; 1859 1860 adapter->vfinfo[vf].rss_query_enabled = setting; 1861 1862 return 0; 1863 } 1864 1865 int ixgbe_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting) 1866 { 1867 struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); 1868 1869 if (vf >= adapter->num_vfs) 1870 return -EINVAL; 1871 1872 /* nothing to do */ 1873 if (adapter->vfinfo[vf].trusted == setting) 1874 return 0; 1875 1876 adapter->vfinfo[vf].trusted = setting; 1877 1878 /* reset VF to reconfigure features */ 1879 adapter->vfinfo[vf].clear_to_send = false; 1880 ixgbe_ping_vf(adapter, vf); 1881 1882 e_info(drv, "VF %u is %strusted\n", vf, setting ? "" : "not "); 1883 1884 return 0; 1885 } 1886 1887 int ixgbe_ndo_get_vf_config(struct net_device *netdev, 1888 int vf, struct ifla_vf_info *ivi) 1889 { 1890 struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); 1891 if (vf >= adapter->num_vfs) 1892 return -EINVAL; 1893 ivi->vf = vf; 1894 memcpy(&ivi->mac, adapter->vfinfo[vf].vf_mac_addresses, ETH_ALEN); 1895 ivi->max_tx_rate = adapter->vfinfo[vf].tx_rate; 1896 ivi->min_tx_rate = 0; 1897 ivi->vlan = adapter->vfinfo[vf].pf_vlan; 1898 ivi->qos = adapter->vfinfo[vf].pf_qos; 1899 ivi->spoofchk = adapter->vfinfo[vf].spoofchk_enabled; 1900 ivi->rss_query_en = adapter->vfinfo[vf].rss_query_enabled; 1901 ivi->trusted = adapter->vfinfo[vf].trusted; 1902 ivi->linkstate = adapter->vfinfo[vf].link_state; 1903 return 0; 1904 } 1905