1 /******************************************************************************* 2 * 3 * Intel Ethernet Controller XL710 Family Linux Driver 4 * Copyright(c) 2013 - 2016 Intel Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 * You should have received a copy of the GNU General Public License along 16 * with this program. If not, see <http://www.gnu.org/licenses/>. 17 * 18 * The full GNU General Public License is included in this distribution in 19 * the file called "COPYING". 20 * 21 * Contact Information: 22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 24 * 25 ******************************************************************************/ 26 27 #include <linux/etherdevice.h> 28 #include <linux/of_net.h> 29 #include <linux/pci.h> 30 31 /* Local includes */ 32 #include "i40e.h" 33 #include "i40e_diag.h" 34 #if IS_ENABLED(CONFIG_VXLAN) 35 #include <net/vxlan.h> 36 #endif 37 #if IS_ENABLED(CONFIG_GENEVE) 38 #include <net/geneve.h> 39 #endif 40 41 const char i40e_driver_name[] = "i40e"; 42 static const char i40e_driver_string[] = 43 "Intel(R) Ethernet Connection XL710 Network Driver"; 44 45 #define DRV_KERN "-k" 46 47 #define DRV_VERSION_MAJOR 1 48 #define DRV_VERSION_MINOR 4 49 #define DRV_VERSION_BUILD 25 50 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ 51 __stringify(DRV_VERSION_MINOR) "." \ 52 __stringify(DRV_VERSION_BUILD) DRV_KERN 53 const char i40e_driver_version_str[] = DRV_VERSION; 54 static const char i40e_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporation."; 55 56 /* a bit of forward declarations */ 57 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi); 58 static void i40e_handle_reset_warning(struct i40e_pf *pf); 59 static int i40e_add_vsi(struct i40e_vsi *vsi); 60 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi); 61 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit); 62 static int i40e_setup_misc_vector(struct i40e_pf *pf); 63 static void i40e_determine_queue_usage(struct i40e_pf *pf); 64 static int i40e_setup_pf_filter_control(struct i40e_pf *pf); 65 static void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut, 66 u16 rss_table_size, u16 rss_size); 67 static void i40e_fdir_sb_setup(struct i40e_pf *pf); 68 static int i40e_veb_get_bw_info(struct i40e_veb *veb); 69 70 /* i40e_pci_tbl - PCI Device ID Table 71 * 72 * Last entry must be all 0s 73 * 74 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 75 * Class, Class Mask, private data (not used) } 76 */ 77 static const struct pci_device_id i40e_pci_tbl[] = { 78 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0}, 79 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0}, 80 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0}, 81 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0}, 82 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0}, 83 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0}, 84 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0}, 85 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0}, 86 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0}, 87 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0}, 88 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_X722), 0}, 89 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_X722), 0}, 90 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0}, 91 {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0}, 92 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0}, 93 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0}, 94 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0}, 95 /* required last entry */ 96 {0, } 97 }; 98 MODULE_DEVICE_TABLE(pci, i40e_pci_tbl); 99 100 #define I40E_MAX_VF_COUNT 128 101 static int debug = -1; 102 module_param(debug, int, 0); 103 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 104 105 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>"); 106 MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver"); 107 MODULE_LICENSE("GPL"); 108 MODULE_VERSION(DRV_VERSION); 109 110 static struct workqueue_struct *i40e_wq; 111 112 /** 113 * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code 114 * @hw: pointer to the HW structure 115 * @mem: ptr to mem struct to fill out 116 * @size: size of memory requested 117 * @alignment: what to align the allocation to 118 **/ 119 int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem, 120 u64 size, u32 alignment) 121 { 122 struct i40e_pf *pf = (struct i40e_pf *)hw->back; 123 124 mem->size = ALIGN(size, alignment); 125 mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size, 126 &mem->pa, GFP_KERNEL); 127 if (!mem->va) 128 return -ENOMEM; 129 130 return 0; 131 } 132 133 /** 134 * i40e_free_dma_mem_d - OS specific memory free for shared code 135 * @hw: pointer to the HW structure 136 * @mem: ptr to mem struct to free 137 **/ 138 int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem) 139 { 140 struct i40e_pf *pf = (struct i40e_pf *)hw->back; 141 142 dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa); 143 mem->va = NULL; 144 mem->pa = 0; 145 mem->size = 0; 146 147 return 0; 148 } 149 150 /** 151 * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code 152 * @hw: pointer to the HW structure 153 * @mem: ptr to mem struct to fill out 154 * @size: size of memory requested 155 **/ 156 int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem, 157 u32 size) 158 { 159 mem->size = size; 160 mem->va = kzalloc(size, GFP_KERNEL); 161 162 if (!mem->va) 163 return -ENOMEM; 164 165 return 0; 166 } 167 168 /** 169 * i40e_free_virt_mem_d - OS specific memory free for shared code 170 * @hw: pointer to the HW structure 171 * @mem: ptr to mem struct to free 172 **/ 173 int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem) 174 { 175 /* it's ok to kfree a NULL pointer */ 176 kfree(mem->va); 177 mem->va = NULL; 178 mem->size = 0; 179 180 return 0; 181 } 182 183 /** 184 * i40e_get_lump - find a lump of free generic resource 185 * @pf: board private structure 186 * @pile: the pile of resource to search 187 * @needed: the number of items needed 188 * @id: an owner id to stick on the items assigned 189 * 190 * Returns the base item index of the lump, or negative for error 191 * 192 * The search_hint trick and lack of advanced fit-finding only work 193 * because we're highly likely to have all the same size lump requests. 194 * Linear search time and any fragmentation should be minimal. 195 **/ 196 static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile, 197 u16 needed, u16 id) 198 { 199 int ret = -ENOMEM; 200 int i, j; 201 202 if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) { 203 dev_info(&pf->pdev->dev, 204 "param err: pile=%p needed=%d id=0x%04x\n", 205 pile, needed, id); 206 return -EINVAL; 207 } 208 209 /* start the linear search with an imperfect hint */ 210 i = pile->search_hint; 211 while (i < pile->num_entries) { 212 /* skip already allocated entries */ 213 if (pile->list[i] & I40E_PILE_VALID_BIT) { 214 i++; 215 continue; 216 } 217 218 /* do we have enough in this lump? */ 219 for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) { 220 if (pile->list[i+j] & I40E_PILE_VALID_BIT) 221 break; 222 } 223 224 if (j == needed) { 225 /* there was enough, so assign it to the requestor */ 226 for (j = 0; j < needed; j++) 227 pile->list[i+j] = id | I40E_PILE_VALID_BIT; 228 ret = i; 229 pile->search_hint = i + j; 230 break; 231 } 232 233 /* not enough, so skip over it and continue looking */ 234 i += j; 235 } 236 237 return ret; 238 } 239 240 /** 241 * i40e_put_lump - return a lump of generic resource 242 * @pile: the pile of resource to search 243 * @index: the base item index 244 * @id: the owner id of the items assigned 245 * 246 * Returns the count of items in the lump 247 **/ 248 static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id) 249 { 250 int valid_id = (id | I40E_PILE_VALID_BIT); 251 int count = 0; 252 int i; 253 254 if (!pile || index >= pile->num_entries) 255 return -EINVAL; 256 257 for (i = index; 258 i < pile->num_entries && pile->list[i] == valid_id; 259 i++) { 260 pile->list[i] = 0; 261 count++; 262 } 263 264 if (count && index < pile->search_hint) 265 pile->search_hint = index; 266 267 return count; 268 } 269 270 /** 271 * i40e_find_vsi_from_id - searches for the vsi with the given id 272 * @pf - the pf structure to search for the vsi 273 * @id - id of the vsi it is searching for 274 **/ 275 struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id) 276 { 277 int i; 278 279 for (i = 0; i < pf->num_alloc_vsi; i++) 280 if (pf->vsi[i] && (pf->vsi[i]->id == id)) 281 return pf->vsi[i]; 282 283 return NULL; 284 } 285 286 /** 287 * i40e_service_event_schedule - Schedule the service task to wake up 288 * @pf: board private structure 289 * 290 * If not already scheduled, this puts the task into the work queue 291 **/ 292 void i40e_service_event_schedule(struct i40e_pf *pf) 293 { 294 if (!test_bit(__I40E_DOWN, &pf->state) && 295 !test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) && 296 !test_and_set_bit(__I40E_SERVICE_SCHED, &pf->state)) 297 queue_work(i40e_wq, &pf->service_task); 298 } 299 300 /** 301 * i40e_tx_timeout - Respond to a Tx Hang 302 * @netdev: network interface device structure 303 * 304 * If any port has noticed a Tx timeout, it is likely that the whole 305 * device is munged, not just the one netdev port, so go for the full 306 * reset. 307 **/ 308 #ifdef I40E_FCOE 309 void i40e_tx_timeout(struct net_device *netdev) 310 #else 311 static void i40e_tx_timeout(struct net_device *netdev) 312 #endif 313 { 314 struct i40e_netdev_priv *np = netdev_priv(netdev); 315 struct i40e_vsi *vsi = np->vsi; 316 struct i40e_pf *pf = vsi->back; 317 struct i40e_ring *tx_ring = NULL; 318 unsigned int i, hung_queue = 0; 319 u32 head, val; 320 321 pf->tx_timeout_count++; 322 323 /* find the stopped queue the same way the stack does */ 324 for (i = 0; i < netdev->num_tx_queues; i++) { 325 struct netdev_queue *q; 326 unsigned long trans_start; 327 328 q = netdev_get_tx_queue(netdev, i); 329 trans_start = q->trans_start ? : netdev->trans_start; 330 if (netif_xmit_stopped(q) && 331 time_after(jiffies, 332 (trans_start + netdev->watchdog_timeo))) { 333 hung_queue = i; 334 break; 335 } 336 } 337 338 if (i == netdev->num_tx_queues) { 339 netdev_info(netdev, "tx_timeout: no netdev hung queue found\n"); 340 } else { 341 /* now that we have an index, find the tx_ring struct */ 342 for (i = 0; i < vsi->num_queue_pairs; i++) { 343 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) { 344 if (hung_queue == 345 vsi->tx_rings[i]->queue_index) { 346 tx_ring = vsi->tx_rings[i]; 347 break; 348 } 349 } 350 } 351 } 352 353 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20))) 354 pf->tx_timeout_recovery_level = 1; /* reset after some time */ 355 else if (time_before(jiffies, 356 (pf->tx_timeout_last_recovery + netdev->watchdog_timeo))) 357 return; /* don't do any new action before the next timeout */ 358 359 if (tx_ring) { 360 head = i40e_get_head(tx_ring); 361 /* Read interrupt register */ 362 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 363 val = rd32(&pf->hw, 364 I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx + 365 tx_ring->vsi->base_vector - 1)); 366 else 367 val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0); 368 369 netdev_info(netdev, "tx_timeout: VSI_seid: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n", 370 vsi->seid, hung_queue, tx_ring->next_to_clean, 371 head, tx_ring->next_to_use, 372 readl(tx_ring->tail), val); 373 } 374 375 pf->tx_timeout_last_recovery = jiffies; 376 netdev_info(netdev, "tx_timeout recovery level %d, hung_queue %d\n", 377 pf->tx_timeout_recovery_level, hung_queue); 378 379 switch (pf->tx_timeout_recovery_level) { 380 case 1: 381 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); 382 break; 383 case 2: 384 set_bit(__I40E_CORE_RESET_REQUESTED, &pf->state); 385 break; 386 case 3: 387 set_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state); 388 break; 389 default: 390 netdev_err(netdev, "tx_timeout recovery unsuccessful\n"); 391 break; 392 } 393 394 i40e_service_event_schedule(pf); 395 pf->tx_timeout_recovery_level++; 396 } 397 398 /** 399 * i40e_release_rx_desc - Store the new tail and head values 400 * @rx_ring: ring to bump 401 * @val: new head index 402 **/ 403 static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val) 404 { 405 rx_ring->next_to_use = val; 406 407 /* Force memory writes to complete before letting h/w 408 * know there are new descriptors to fetch. (Only 409 * applicable for weak-ordered memory model archs, 410 * such as IA-64). 411 */ 412 wmb(); 413 writel(val, rx_ring->tail); 414 } 415 416 /** 417 * i40e_get_vsi_stats_struct - Get System Network Statistics 418 * @vsi: the VSI we care about 419 * 420 * Returns the address of the device statistics structure. 421 * The statistics are actually updated from the service task. 422 **/ 423 struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi) 424 { 425 return &vsi->net_stats; 426 } 427 428 /** 429 * i40e_get_netdev_stats_struct - Get statistics for netdev interface 430 * @netdev: network interface device structure 431 * 432 * Returns the address of the device statistics structure. 433 * The statistics are actually updated from the service task. 434 **/ 435 #ifdef I40E_FCOE 436 struct rtnl_link_stats64 *i40e_get_netdev_stats_struct( 437 struct net_device *netdev, 438 struct rtnl_link_stats64 *stats) 439 #else 440 static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct( 441 struct net_device *netdev, 442 struct rtnl_link_stats64 *stats) 443 #endif 444 { 445 struct i40e_netdev_priv *np = netdev_priv(netdev); 446 struct i40e_ring *tx_ring, *rx_ring; 447 struct i40e_vsi *vsi = np->vsi; 448 struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi); 449 int i; 450 451 if (test_bit(__I40E_DOWN, &vsi->state)) 452 return stats; 453 454 if (!vsi->tx_rings) 455 return stats; 456 457 rcu_read_lock(); 458 for (i = 0; i < vsi->num_queue_pairs; i++) { 459 u64 bytes, packets; 460 unsigned int start; 461 462 tx_ring = ACCESS_ONCE(vsi->tx_rings[i]); 463 if (!tx_ring) 464 continue; 465 466 do { 467 start = u64_stats_fetch_begin_irq(&tx_ring->syncp); 468 packets = tx_ring->stats.packets; 469 bytes = tx_ring->stats.bytes; 470 } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start)); 471 472 stats->tx_packets += packets; 473 stats->tx_bytes += bytes; 474 rx_ring = &tx_ring[1]; 475 476 do { 477 start = u64_stats_fetch_begin_irq(&rx_ring->syncp); 478 packets = rx_ring->stats.packets; 479 bytes = rx_ring->stats.bytes; 480 } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start)); 481 482 stats->rx_packets += packets; 483 stats->rx_bytes += bytes; 484 } 485 rcu_read_unlock(); 486 487 /* following stats updated by i40e_watchdog_subtask() */ 488 stats->multicast = vsi_stats->multicast; 489 stats->tx_errors = vsi_stats->tx_errors; 490 stats->tx_dropped = vsi_stats->tx_dropped; 491 stats->rx_errors = vsi_stats->rx_errors; 492 stats->rx_dropped = vsi_stats->rx_dropped; 493 stats->rx_crc_errors = vsi_stats->rx_crc_errors; 494 stats->rx_length_errors = vsi_stats->rx_length_errors; 495 496 return stats; 497 } 498 499 /** 500 * i40e_vsi_reset_stats - Resets all stats of the given vsi 501 * @vsi: the VSI to have its stats reset 502 **/ 503 void i40e_vsi_reset_stats(struct i40e_vsi *vsi) 504 { 505 struct rtnl_link_stats64 *ns; 506 int i; 507 508 if (!vsi) 509 return; 510 511 ns = i40e_get_vsi_stats_struct(vsi); 512 memset(ns, 0, sizeof(*ns)); 513 memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets)); 514 memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats)); 515 memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets)); 516 if (vsi->rx_rings && vsi->rx_rings[0]) { 517 for (i = 0; i < vsi->num_queue_pairs; i++) { 518 memset(&vsi->rx_rings[i]->stats, 0, 519 sizeof(vsi->rx_rings[i]->stats)); 520 memset(&vsi->rx_rings[i]->rx_stats, 0, 521 sizeof(vsi->rx_rings[i]->rx_stats)); 522 memset(&vsi->tx_rings[i]->stats, 0, 523 sizeof(vsi->tx_rings[i]->stats)); 524 memset(&vsi->tx_rings[i]->tx_stats, 0, 525 sizeof(vsi->tx_rings[i]->tx_stats)); 526 } 527 } 528 vsi->stat_offsets_loaded = false; 529 } 530 531 /** 532 * i40e_pf_reset_stats - Reset all of the stats for the given PF 533 * @pf: the PF to be reset 534 **/ 535 void i40e_pf_reset_stats(struct i40e_pf *pf) 536 { 537 int i; 538 539 memset(&pf->stats, 0, sizeof(pf->stats)); 540 memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets)); 541 pf->stat_offsets_loaded = false; 542 543 for (i = 0; i < I40E_MAX_VEB; i++) { 544 if (pf->veb[i]) { 545 memset(&pf->veb[i]->stats, 0, 546 sizeof(pf->veb[i]->stats)); 547 memset(&pf->veb[i]->stats_offsets, 0, 548 sizeof(pf->veb[i]->stats_offsets)); 549 pf->veb[i]->stat_offsets_loaded = false; 550 } 551 } 552 } 553 554 /** 555 * i40e_stat_update48 - read and update a 48 bit stat from the chip 556 * @hw: ptr to the hardware info 557 * @hireg: the high 32 bit reg to read 558 * @loreg: the low 32 bit reg to read 559 * @offset_loaded: has the initial offset been loaded yet 560 * @offset: ptr to current offset value 561 * @stat: ptr to the stat 562 * 563 * Since the device stats are not reset at PFReset, they likely will not 564 * be zeroed when the driver starts. We'll save the first values read 565 * and use them as offsets to be subtracted from the raw values in order 566 * to report stats that count from zero. In the process, we also manage 567 * the potential roll-over. 568 **/ 569 static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg, 570 bool offset_loaded, u64 *offset, u64 *stat) 571 { 572 u64 new_data; 573 574 if (hw->device_id == I40E_DEV_ID_QEMU) { 575 new_data = rd32(hw, loreg); 576 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32; 577 } else { 578 new_data = rd64(hw, loreg); 579 } 580 if (!offset_loaded) 581 *offset = new_data; 582 if (likely(new_data >= *offset)) 583 *stat = new_data - *offset; 584 else 585 *stat = (new_data + BIT_ULL(48)) - *offset; 586 *stat &= 0xFFFFFFFFFFFFULL; 587 } 588 589 /** 590 * i40e_stat_update32 - read and update a 32 bit stat from the chip 591 * @hw: ptr to the hardware info 592 * @reg: the hw reg to read 593 * @offset_loaded: has the initial offset been loaded yet 594 * @offset: ptr to current offset value 595 * @stat: ptr to the stat 596 **/ 597 static void i40e_stat_update32(struct i40e_hw *hw, u32 reg, 598 bool offset_loaded, u64 *offset, u64 *stat) 599 { 600 u32 new_data; 601 602 new_data = rd32(hw, reg); 603 if (!offset_loaded) 604 *offset = new_data; 605 if (likely(new_data >= *offset)) 606 *stat = (u32)(new_data - *offset); 607 else 608 *stat = (u32)((new_data + BIT_ULL(32)) - *offset); 609 } 610 611 /** 612 * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters. 613 * @vsi: the VSI to be updated 614 **/ 615 void i40e_update_eth_stats(struct i40e_vsi *vsi) 616 { 617 int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx); 618 struct i40e_pf *pf = vsi->back; 619 struct i40e_hw *hw = &pf->hw; 620 struct i40e_eth_stats *oes; 621 struct i40e_eth_stats *es; /* device's eth stats */ 622 623 es = &vsi->eth_stats; 624 oes = &vsi->eth_stats_offsets; 625 626 /* Gather up the stats that the hw collects */ 627 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx), 628 vsi->stat_offsets_loaded, 629 &oes->tx_errors, &es->tx_errors); 630 i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx), 631 vsi->stat_offsets_loaded, 632 &oes->rx_discards, &es->rx_discards); 633 i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx), 634 vsi->stat_offsets_loaded, 635 &oes->rx_unknown_protocol, &es->rx_unknown_protocol); 636 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx), 637 vsi->stat_offsets_loaded, 638 &oes->tx_errors, &es->tx_errors); 639 640 i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx), 641 I40E_GLV_GORCL(stat_idx), 642 vsi->stat_offsets_loaded, 643 &oes->rx_bytes, &es->rx_bytes); 644 i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx), 645 I40E_GLV_UPRCL(stat_idx), 646 vsi->stat_offsets_loaded, 647 &oes->rx_unicast, &es->rx_unicast); 648 i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx), 649 I40E_GLV_MPRCL(stat_idx), 650 vsi->stat_offsets_loaded, 651 &oes->rx_multicast, &es->rx_multicast); 652 i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx), 653 I40E_GLV_BPRCL(stat_idx), 654 vsi->stat_offsets_loaded, 655 &oes->rx_broadcast, &es->rx_broadcast); 656 657 i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx), 658 I40E_GLV_GOTCL(stat_idx), 659 vsi->stat_offsets_loaded, 660 &oes->tx_bytes, &es->tx_bytes); 661 i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx), 662 I40E_GLV_UPTCL(stat_idx), 663 vsi->stat_offsets_loaded, 664 &oes->tx_unicast, &es->tx_unicast); 665 i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx), 666 I40E_GLV_MPTCL(stat_idx), 667 vsi->stat_offsets_loaded, 668 &oes->tx_multicast, &es->tx_multicast); 669 i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx), 670 I40E_GLV_BPTCL(stat_idx), 671 vsi->stat_offsets_loaded, 672 &oes->tx_broadcast, &es->tx_broadcast); 673 vsi->stat_offsets_loaded = true; 674 } 675 676 /** 677 * i40e_update_veb_stats - Update Switch component statistics 678 * @veb: the VEB being updated 679 **/ 680 static void i40e_update_veb_stats(struct i40e_veb *veb) 681 { 682 struct i40e_pf *pf = veb->pf; 683 struct i40e_hw *hw = &pf->hw; 684 struct i40e_eth_stats *oes; 685 struct i40e_eth_stats *es; /* device's eth stats */ 686 struct i40e_veb_tc_stats *veb_oes; 687 struct i40e_veb_tc_stats *veb_es; 688 int i, idx = 0; 689 690 idx = veb->stats_idx; 691 es = &veb->stats; 692 oes = &veb->stats_offsets; 693 veb_es = &veb->tc_stats; 694 veb_oes = &veb->tc_stats_offsets; 695 696 /* Gather up the stats that the hw collects */ 697 i40e_stat_update32(hw, I40E_GLSW_TDPC(idx), 698 veb->stat_offsets_loaded, 699 &oes->tx_discards, &es->tx_discards); 700 if (hw->revision_id > 0) 701 i40e_stat_update32(hw, I40E_GLSW_RUPP(idx), 702 veb->stat_offsets_loaded, 703 &oes->rx_unknown_protocol, 704 &es->rx_unknown_protocol); 705 i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx), 706 veb->stat_offsets_loaded, 707 &oes->rx_bytes, &es->rx_bytes); 708 i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx), 709 veb->stat_offsets_loaded, 710 &oes->rx_unicast, &es->rx_unicast); 711 i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx), 712 veb->stat_offsets_loaded, 713 &oes->rx_multicast, &es->rx_multicast); 714 i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx), 715 veb->stat_offsets_loaded, 716 &oes->rx_broadcast, &es->rx_broadcast); 717 718 i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx), 719 veb->stat_offsets_loaded, 720 &oes->tx_bytes, &es->tx_bytes); 721 i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx), 722 veb->stat_offsets_loaded, 723 &oes->tx_unicast, &es->tx_unicast); 724 i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx), 725 veb->stat_offsets_loaded, 726 &oes->tx_multicast, &es->tx_multicast); 727 i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx), 728 veb->stat_offsets_loaded, 729 &oes->tx_broadcast, &es->tx_broadcast); 730 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 731 i40e_stat_update48(hw, I40E_GLVEBTC_RPCH(i, idx), 732 I40E_GLVEBTC_RPCL(i, idx), 733 veb->stat_offsets_loaded, 734 &veb_oes->tc_rx_packets[i], 735 &veb_es->tc_rx_packets[i]); 736 i40e_stat_update48(hw, I40E_GLVEBTC_RBCH(i, idx), 737 I40E_GLVEBTC_RBCL(i, idx), 738 veb->stat_offsets_loaded, 739 &veb_oes->tc_rx_bytes[i], 740 &veb_es->tc_rx_bytes[i]); 741 i40e_stat_update48(hw, I40E_GLVEBTC_TPCH(i, idx), 742 I40E_GLVEBTC_TPCL(i, idx), 743 veb->stat_offsets_loaded, 744 &veb_oes->tc_tx_packets[i], 745 &veb_es->tc_tx_packets[i]); 746 i40e_stat_update48(hw, I40E_GLVEBTC_TBCH(i, idx), 747 I40E_GLVEBTC_TBCL(i, idx), 748 veb->stat_offsets_loaded, 749 &veb_oes->tc_tx_bytes[i], 750 &veb_es->tc_tx_bytes[i]); 751 } 752 veb->stat_offsets_loaded = true; 753 } 754 755 #ifdef I40E_FCOE 756 /** 757 * i40e_update_fcoe_stats - Update FCoE-specific ethernet statistics counters. 758 * @vsi: the VSI that is capable of doing FCoE 759 **/ 760 static void i40e_update_fcoe_stats(struct i40e_vsi *vsi) 761 { 762 struct i40e_pf *pf = vsi->back; 763 struct i40e_hw *hw = &pf->hw; 764 struct i40e_fcoe_stats *ofs; 765 struct i40e_fcoe_stats *fs; /* device's eth stats */ 766 int idx; 767 768 if (vsi->type != I40E_VSI_FCOE) 769 return; 770 771 idx = hw->pf_id + I40E_FCOE_PF_STAT_OFFSET; 772 fs = &vsi->fcoe_stats; 773 ofs = &vsi->fcoe_stats_offsets; 774 775 i40e_stat_update32(hw, I40E_GL_FCOEPRC(idx), 776 vsi->fcoe_stat_offsets_loaded, 777 &ofs->rx_fcoe_packets, &fs->rx_fcoe_packets); 778 i40e_stat_update48(hw, I40E_GL_FCOEDWRCH(idx), I40E_GL_FCOEDWRCL(idx), 779 vsi->fcoe_stat_offsets_loaded, 780 &ofs->rx_fcoe_dwords, &fs->rx_fcoe_dwords); 781 i40e_stat_update32(hw, I40E_GL_FCOERPDC(idx), 782 vsi->fcoe_stat_offsets_loaded, 783 &ofs->rx_fcoe_dropped, &fs->rx_fcoe_dropped); 784 i40e_stat_update32(hw, I40E_GL_FCOEPTC(idx), 785 vsi->fcoe_stat_offsets_loaded, 786 &ofs->tx_fcoe_packets, &fs->tx_fcoe_packets); 787 i40e_stat_update48(hw, I40E_GL_FCOEDWTCH(idx), I40E_GL_FCOEDWTCL(idx), 788 vsi->fcoe_stat_offsets_loaded, 789 &ofs->tx_fcoe_dwords, &fs->tx_fcoe_dwords); 790 i40e_stat_update32(hw, I40E_GL_FCOECRC(idx), 791 vsi->fcoe_stat_offsets_loaded, 792 &ofs->fcoe_bad_fccrc, &fs->fcoe_bad_fccrc); 793 i40e_stat_update32(hw, I40E_GL_FCOELAST(idx), 794 vsi->fcoe_stat_offsets_loaded, 795 &ofs->fcoe_last_error, &fs->fcoe_last_error); 796 i40e_stat_update32(hw, I40E_GL_FCOEDDPC(idx), 797 vsi->fcoe_stat_offsets_loaded, 798 &ofs->fcoe_ddp_count, &fs->fcoe_ddp_count); 799 800 vsi->fcoe_stat_offsets_loaded = true; 801 } 802 803 #endif 804 /** 805 * i40e_update_vsi_stats - Update the vsi statistics counters. 806 * @vsi: the VSI to be updated 807 * 808 * There are a few instances where we store the same stat in a 809 * couple of different structs. This is partly because we have 810 * the netdev stats that need to be filled out, which is slightly 811 * different from the "eth_stats" defined by the chip and used in 812 * VF communications. We sort it out here. 813 **/ 814 static void i40e_update_vsi_stats(struct i40e_vsi *vsi) 815 { 816 struct i40e_pf *pf = vsi->back; 817 struct rtnl_link_stats64 *ons; 818 struct rtnl_link_stats64 *ns; /* netdev stats */ 819 struct i40e_eth_stats *oes; 820 struct i40e_eth_stats *es; /* device's eth stats */ 821 u32 tx_restart, tx_busy; 822 u64 tx_lost_interrupt; 823 struct i40e_ring *p; 824 u32 rx_page, rx_buf; 825 u64 bytes, packets; 826 unsigned int start; 827 u64 tx_linearize; 828 u64 tx_force_wb; 829 u64 rx_p, rx_b; 830 u64 tx_p, tx_b; 831 u16 q; 832 833 if (test_bit(__I40E_DOWN, &vsi->state) || 834 test_bit(__I40E_CONFIG_BUSY, &pf->state)) 835 return; 836 837 ns = i40e_get_vsi_stats_struct(vsi); 838 ons = &vsi->net_stats_offsets; 839 es = &vsi->eth_stats; 840 oes = &vsi->eth_stats_offsets; 841 842 /* Gather up the netdev and vsi stats that the driver collects 843 * on the fly during packet processing 844 */ 845 rx_b = rx_p = 0; 846 tx_b = tx_p = 0; 847 tx_restart = tx_busy = tx_linearize = tx_force_wb = 0; 848 tx_lost_interrupt = 0; 849 rx_page = 0; 850 rx_buf = 0; 851 rcu_read_lock(); 852 for (q = 0; q < vsi->num_queue_pairs; q++) { 853 /* locate Tx ring */ 854 p = ACCESS_ONCE(vsi->tx_rings[q]); 855 856 do { 857 start = u64_stats_fetch_begin_irq(&p->syncp); 858 packets = p->stats.packets; 859 bytes = p->stats.bytes; 860 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 861 tx_b += bytes; 862 tx_p += packets; 863 tx_restart += p->tx_stats.restart_queue; 864 tx_busy += p->tx_stats.tx_busy; 865 tx_linearize += p->tx_stats.tx_linearize; 866 tx_force_wb += p->tx_stats.tx_force_wb; 867 tx_lost_interrupt += p->tx_stats.tx_lost_interrupt; 868 869 /* Rx queue is part of the same block as Tx queue */ 870 p = &p[1]; 871 do { 872 start = u64_stats_fetch_begin_irq(&p->syncp); 873 packets = p->stats.packets; 874 bytes = p->stats.bytes; 875 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 876 rx_b += bytes; 877 rx_p += packets; 878 rx_buf += p->rx_stats.alloc_buff_failed; 879 rx_page += p->rx_stats.alloc_page_failed; 880 } 881 rcu_read_unlock(); 882 vsi->tx_restart = tx_restart; 883 vsi->tx_busy = tx_busy; 884 vsi->tx_linearize = tx_linearize; 885 vsi->tx_force_wb = tx_force_wb; 886 vsi->tx_lost_interrupt = tx_lost_interrupt; 887 vsi->rx_page_failed = rx_page; 888 vsi->rx_buf_failed = rx_buf; 889 890 ns->rx_packets = rx_p; 891 ns->rx_bytes = rx_b; 892 ns->tx_packets = tx_p; 893 ns->tx_bytes = tx_b; 894 895 /* update netdev stats from eth stats */ 896 i40e_update_eth_stats(vsi); 897 ons->tx_errors = oes->tx_errors; 898 ns->tx_errors = es->tx_errors; 899 ons->multicast = oes->rx_multicast; 900 ns->multicast = es->rx_multicast; 901 ons->rx_dropped = oes->rx_discards; 902 ns->rx_dropped = es->rx_discards; 903 ons->tx_dropped = oes->tx_discards; 904 ns->tx_dropped = es->tx_discards; 905 906 /* pull in a couple PF stats if this is the main vsi */ 907 if (vsi == pf->vsi[pf->lan_vsi]) { 908 ns->rx_crc_errors = pf->stats.crc_errors; 909 ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes; 910 ns->rx_length_errors = pf->stats.rx_length_errors; 911 } 912 } 913 914 /** 915 * i40e_update_pf_stats - Update the PF statistics counters. 916 * @pf: the PF to be updated 917 **/ 918 static void i40e_update_pf_stats(struct i40e_pf *pf) 919 { 920 struct i40e_hw_port_stats *osd = &pf->stats_offsets; 921 struct i40e_hw_port_stats *nsd = &pf->stats; 922 struct i40e_hw *hw = &pf->hw; 923 u32 val; 924 int i; 925 926 i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port), 927 I40E_GLPRT_GORCL(hw->port), 928 pf->stat_offsets_loaded, 929 &osd->eth.rx_bytes, &nsd->eth.rx_bytes); 930 i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port), 931 I40E_GLPRT_GOTCL(hw->port), 932 pf->stat_offsets_loaded, 933 &osd->eth.tx_bytes, &nsd->eth.tx_bytes); 934 i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port), 935 pf->stat_offsets_loaded, 936 &osd->eth.rx_discards, 937 &nsd->eth.rx_discards); 938 i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port), 939 I40E_GLPRT_UPRCL(hw->port), 940 pf->stat_offsets_loaded, 941 &osd->eth.rx_unicast, 942 &nsd->eth.rx_unicast); 943 i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port), 944 I40E_GLPRT_MPRCL(hw->port), 945 pf->stat_offsets_loaded, 946 &osd->eth.rx_multicast, 947 &nsd->eth.rx_multicast); 948 i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port), 949 I40E_GLPRT_BPRCL(hw->port), 950 pf->stat_offsets_loaded, 951 &osd->eth.rx_broadcast, 952 &nsd->eth.rx_broadcast); 953 i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port), 954 I40E_GLPRT_UPTCL(hw->port), 955 pf->stat_offsets_loaded, 956 &osd->eth.tx_unicast, 957 &nsd->eth.tx_unicast); 958 i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port), 959 I40E_GLPRT_MPTCL(hw->port), 960 pf->stat_offsets_loaded, 961 &osd->eth.tx_multicast, 962 &nsd->eth.tx_multicast); 963 i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port), 964 I40E_GLPRT_BPTCL(hw->port), 965 pf->stat_offsets_loaded, 966 &osd->eth.tx_broadcast, 967 &nsd->eth.tx_broadcast); 968 969 i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port), 970 pf->stat_offsets_loaded, 971 &osd->tx_dropped_link_down, 972 &nsd->tx_dropped_link_down); 973 974 i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port), 975 pf->stat_offsets_loaded, 976 &osd->crc_errors, &nsd->crc_errors); 977 978 i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port), 979 pf->stat_offsets_loaded, 980 &osd->illegal_bytes, &nsd->illegal_bytes); 981 982 i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port), 983 pf->stat_offsets_loaded, 984 &osd->mac_local_faults, 985 &nsd->mac_local_faults); 986 i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port), 987 pf->stat_offsets_loaded, 988 &osd->mac_remote_faults, 989 &nsd->mac_remote_faults); 990 991 i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port), 992 pf->stat_offsets_loaded, 993 &osd->rx_length_errors, 994 &nsd->rx_length_errors); 995 996 i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port), 997 pf->stat_offsets_loaded, 998 &osd->link_xon_rx, &nsd->link_xon_rx); 999 i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port), 1000 pf->stat_offsets_loaded, 1001 &osd->link_xon_tx, &nsd->link_xon_tx); 1002 i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port), 1003 pf->stat_offsets_loaded, 1004 &osd->link_xoff_rx, &nsd->link_xoff_rx); 1005 i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port), 1006 pf->stat_offsets_loaded, 1007 &osd->link_xoff_tx, &nsd->link_xoff_tx); 1008 1009 for (i = 0; i < 8; i++) { 1010 i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i), 1011 pf->stat_offsets_loaded, 1012 &osd->priority_xoff_rx[i], 1013 &nsd->priority_xoff_rx[i]); 1014 i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i), 1015 pf->stat_offsets_loaded, 1016 &osd->priority_xon_rx[i], 1017 &nsd->priority_xon_rx[i]); 1018 i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i), 1019 pf->stat_offsets_loaded, 1020 &osd->priority_xon_tx[i], 1021 &nsd->priority_xon_tx[i]); 1022 i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i), 1023 pf->stat_offsets_loaded, 1024 &osd->priority_xoff_tx[i], 1025 &nsd->priority_xoff_tx[i]); 1026 i40e_stat_update32(hw, 1027 I40E_GLPRT_RXON2OFFCNT(hw->port, i), 1028 pf->stat_offsets_loaded, 1029 &osd->priority_xon_2_xoff[i], 1030 &nsd->priority_xon_2_xoff[i]); 1031 } 1032 1033 i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port), 1034 I40E_GLPRT_PRC64L(hw->port), 1035 pf->stat_offsets_loaded, 1036 &osd->rx_size_64, &nsd->rx_size_64); 1037 i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port), 1038 I40E_GLPRT_PRC127L(hw->port), 1039 pf->stat_offsets_loaded, 1040 &osd->rx_size_127, &nsd->rx_size_127); 1041 i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port), 1042 I40E_GLPRT_PRC255L(hw->port), 1043 pf->stat_offsets_loaded, 1044 &osd->rx_size_255, &nsd->rx_size_255); 1045 i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port), 1046 I40E_GLPRT_PRC511L(hw->port), 1047 pf->stat_offsets_loaded, 1048 &osd->rx_size_511, &nsd->rx_size_511); 1049 i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port), 1050 I40E_GLPRT_PRC1023L(hw->port), 1051 pf->stat_offsets_loaded, 1052 &osd->rx_size_1023, &nsd->rx_size_1023); 1053 i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port), 1054 I40E_GLPRT_PRC1522L(hw->port), 1055 pf->stat_offsets_loaded, 1056 &osd->rx_size_1522, &nsd->rx_size_1522); 1057 i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port), 1058 I40E_GLPRT_PRC9522L(hw->port), 1059 pf->stat_offsets_loaded, 1060 &osd->rx_size_big, &nsd->rx_size_big); 1061 1062 i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port), 1063 I40E_GLPRT_PTC64L(hw->port), 1064 pf->stat_offsets_loaded, 1065 &osd->tx_size_64, &nsd->tx_size_64); 1066 i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port), 1067 I40E_GLPRT_PTC127L(hw->port), 1068 pf->stat_offsets_loaded, 1069 &osd->tx_size_127, &nsd->tx_size_127); 1070 i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port), 1071 I40E_GLPRT_PTC255L(hw->port), 1072 pf->stat_offsets_loaded, 1073 &osd->tx_size_255, &nsd->tx_size_255); 1074 i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port), 1075 I40E_GLPRT_PTC511L(hw->port), 1076 pf->stat_offsets_loaded, 1077 &osd->tx_size_511, &nsd->tx_size_511); 1078 i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port), 1079 I40E_GLPRT_PTC1023L(hw->port), 1080 pf->stat_offsets_loaded, 1081 &osd->tx_size_1023, &nsd->tx_size_1023); 1082 i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port), 1083 I40E_GLPRT_PTC1522L(hw->port), 1084 pf->stat_offsets_loaded, 1085 &osd->tx_size_1522, &nsd->tx_size_1522); 1086 i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port), 1087 I40E_GLPRT_PTC9522L(hw->port), 1088 pf->stat_offsets_loaded, 1089 &osd->tx_size_big, &nsd->tx_size_big); 1090 1091 i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port), 1092 pf->stat_offsets_loaded, 1093 &osd->rx_undersize, &nsd->rx_undersize); 1094 i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port), 1095 pf->stat_offsets_loaded, 1096 &osd->rx_fragments, &nsd->rx_fragments); 1097 i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port), 1098 pf->stat_offsets_loaded, 1099 &osd->rx_oversize, &nsd->rx_oversize); 1100 i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port), 1101 pf->stat_offsets_loaded, 1102 &osd->rx_jabber, &nsd->rx_jabber); 1103 1104 /* FDIR stats */ 1105 i40e_stat_update32(hw, 1106 I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(pf->hw.pf_id)), 1107 pf->stat_offsets_loaded, 1108 &osd->fd_atr_match, &nsd->fd_atr_match); 1109 i40e_stat_update32(hw, 1110 I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(pf->hw.pf_id)), 1111 pf->stat_offsets_loaded, 1112 &osd->fd_sb_match, &nsd->fd_sb_match); 1113 i40e_stat_update32(hw, 1114 I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id)), 1115 pf->stat_offsets_loaded, 1116 &osd->fd_atr_tunnel_match, &nsd->fd_atr_tunnel_match); 1117 1118 val = rd32(hw, I40E_PRTPM_EEE_STAT); 1119 nsd->tx_lpi_status = 1120 (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >> 1121 I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT; 1122 nsd->rx_lpi_status = 1123 (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >> 1124 I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT; 1125 i40e_stat_update32(hw, I40E_PRTPM_TLPIC, 1126 pf->stat_offsets_loaded, 1127 &osd->tx_lpi_count, &nsd->tx_lpi_count); 1128 i40e_stat_update32(hw, I40E_PRTPM_RLPIC, 1129 pf->stat_offsets_loaded, 1130 &osd->rx_lpi_count, &nsd->rx_lpi_count); 1131 1132 if (pf->flags & I40E_FLAG_FD_SB_ENABLED && 1133 !(pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) 1134 nsd->fd_sb_status = true; 1135 else 1136 nsd->fd_sb_status = false; 1137 1138 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED && 1139 !(pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) 1140 nsd->fd_atr_status = true; 1141 else 1142 nsd->fd_atr_status = false; 1143 1144 pf->stat_offsets_loaded = true; 1145 } 1146 1147 /** 1148 * i40e_update_stats - Update the various statistics counters. 1149 * @vsi: the VSI to be updated 1150 * 1151 * Update the various stats for this VSI and its related entities. 1152 **/ 1153 void i40e_update_stats(struct i40e_vsi *vsi) 1154 { 1155 struct i40e_pf *pf = vsi->back; 1156 1157 if (vsi == pf->vsi[pf->lan_vsi]) 1158 i40e_update_pf_stats(pf); 1159 1160 i40e_update_vsi_stats(vsi); 1161 #ifdef I40E_FCOE 1162 i40e_update_fcoe_stats(vsi); 1163 #endif 1164 } 1165 1166 /** 1167 * i40e_find_filter - Search VSI filter list for specific mac/vlan filter 1168 * @vsi: the VSI to be searched 1169 * @macaddr: the MAC address 1170 * @vlan: the vlan 1171 * @is_vf: make sure its a VF filter, else doesn't matter 1172 * @is_netdev: make sure its a netdev filter, else doesn't matter 1173 * 1174 * Returns ptr to the filter object or NULL 1175 **/ 1176 static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi, 1177 u8 *macaddr, s16 vlan, 1178 bool is_vf, bool is_netdev) 1179 { 1180 struct i40e_mac_filter *f; 1181 1182 if (!vsi || !macaddr) 1183 return NULL; 1184 1185 list_for_each_entry(f, &vsi->mac_filter_list, list) { 1186 if ((ether_addr_equal(macaddr, f->macaddr)) && 1187 (vlan == f->vlan) && 1188 (!is_vf || f->is_vf) && 1189 (!is_netdev || f->is_netdev)) 1190 return f; 1191 } 1192 return NULL; 1193 } 1194 1195 /** 1196 * i40e_find_mac - Find a mac addr in the macvlan filters list 1197 * @vsi: the VSI to be searched 1198 * @macaddr: the MAC address we are searching for 1199 * @is_vf: make sure its a VF filter, else doesn't matter 1200 * @is_netdev: make sure its a netdev filter, else doesn't matter 1201 * 1202 * Returns the first filter with the provided MAC address or NULL if 1203 * MAC address was not found 1204 **/ 1205 struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr, 1206 bool is_vf, bool is_netdev) 1207 { 1208 struct i40e_mac_filter *f; 1209 1210 if (!vsi || !macaddr) 1211 return NULL; 1212 1213 list_for_each_entry(f, &vsi->mac_filter_list, list) { 1214 if ((ether_addr_equal(macaddr, f->macaddr)) && 1215 (!is_vf || f->is_vf) && 1216 (!is_netdev || f->is_netdev)) 1217 return f; 1218 } 1219 return NULL; 1220 } 1221 1222 /** 1223 * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode 1224 * @vsi: the VSI to be searched 1225 * 1226 * Returns true if VSI is in vlan mode or false otherwise 1227 **/ 1228 bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi) 1229 { 1230 struct i40e_mac_filter *f; 1231 1232 /* Only -1 for all the filters denotes not in vlan mode 1233 * so we have to go through all the list in order to make sure 1234 */ 1235 list_for_each_entry(f, &vsi->mac_filter_list, list) { 1236 if (f->vlan >= 0 || vsi->info.pvid) 1237 return true; 1238 } 1239 1240 return false; 1241 } 1242 1243 /** 1244 * i40e_put_mac_in_vlan - Make macvlan filters from macaddrs and vlans 1245 * @vsi: the VSI to be searched 1246 * @macaddr: the mac address to be filtered 1247 * @is_vf: true if it is a VF 1248 * @is_netdev: true if it is a netdev 1249 * 1250 * Goes through all the macvlan filters and adds a 1251 * macvlan filter for each unique vlan that already exists 1252 * 1253 * Returns first filter found on success, else NULL 1254 **/ 1255 struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr, 1256 bool is_vf, bool is_netdev) 1257 { 1258 struct i40e_mac_filter *f; 1259 1260 list_for_each_entry(f, &vsi->mac_filter_list, list) { 1261 if (vsi->info.pvid) 1262 f->vlan = le16_to_cpu(vsi->info.pvid); 1263 if (!i40e_find_filter(vsi, macaddr, f->vlan, 1264 is_vf, is_netdev)) { 1265 if (!i40e_add_filter(vsi, macaddr, f->vlan, 1266 is_vf, is_netdev)) 1267 return NULL; 1268 } 1269 } 1270 1271 return list_first_entry_or_null(&vsi->mac_filter_list, 1272 struct i40e_mac_filter, list); 1273 } 1274 1275 /** 1276 * i40e_del_mac_all_vlan - Remove a MAC filter from all VLANS 1277 * @vsi: the VSI to be searched 1278 * @macaddr: the mac address to be removed 1279 * @is_vf: true if it is a VF 1280 * @is_netdev: true if it is a netdev 1281 * 1282 * Removes a given MAC address from a VSI, regardless of VLAN 1283 * 1284 * Returns 0 for success, or error 1285 **/ 1286 int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, u8 *macaddr, 1287 bool is_vf, bool is_netdev) 1288 { 1289 struct i40e_mac_filter *f = NULL; 1290 int changed = 0; 1291 1292 WARN(!spin_is_locked(&vsi->mac_filter_list_lock), 1293 "Missing mac_filter_list_lock\n"); 1294 list_for_each_entry(f, &vsi->mac_filter_list, list) { 1295 if ((ether_addr_equal(macaddr, f->macaddr)) && 1296 (is_vf == f->is_vf) && 1297 (is_netdev == f->is_netdev)) { 1298 f->counter--; 1299 f->changed = true; 1300 changed = 1; 1301 } 1302 } 1303 if (changed) { 1304 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; 1305 vsi->back->flags |= I40E_FLAG_FILTER_SYNC; 1306 return 0; 1307 } 1308 return -ENOENT; 1309 } 1310 1311 /** 1312 * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM 1313 * @vsi: the PF Main VSI - inappropriate for any other VSI 1314 * @macaddr: the MAC address 1315 * 1316 * Some older firmware configurations set up a default promiscuous VLAN 1317 * filter that needs to be removed. 1318 **/ 1319 static int i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr) 1320 { 1321 struct i40e_aqc_remove_macvlan_element_data element; 1322 struct i40e_pf *pf = vsi->back; 1323 i40e_status ret; 1324 1325 /* Only appropriate for the PF main VSI */ 1326 if (vsi->type != I40E_VSI_MAIN) 1327 return -EINVAL; 1328 1329 memset(&element, 0, sizeof(element)); 1330 ether_addr_copy(element.mac_addr, macaddr); 1331 element.vlan_tag = 0; 1332 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH | 1333 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; 1334 ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); 1335 if (ret) 1336 return -ENOENT; 1337 1338 return 0; 1339 } 1340 1341 /** 1342 * i40e_add_filter - Add a mac/vlan filter to the VSI 1343 * @vsi: the VSI to be searched 1344 * @macaddr: the MAC address 1345 * @vlan: the vlan 1346 * @is_vf: make sure its a VF filter, else doesn't matter 1347 * @is_netdev: make sure its a netdev filter, else doesn't matter 1348 * 1349 * Returns ptr to the filter object or NULL when no memory available. 1350 * 1351 * NOTE: This function is expected to be called with mac_filter_list_lock 1352 * being held. 1353 **/ 1354 struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, 1355 u8 *macaddr, s16 vlan, 1356 bool is_vf, bool is_netdev) 1357 { 1358 struct i40e_mac_filter *f; 1359 1360 if (!vsi || !macaddr) 1361 return NULL; 1362 1363 f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev); 1364 if (!f) { 1365 f = kzalloc(sizeof(*f), GFP_ATOMIC); 1366 if (!f) 1367 goto add_filter_out; 1368 1369 ether_addr_copy(f->macaddr, macaddr); 1370 f->vlan = vlan; 1371 f->changed = true; 1372 1373 INIT_LIST_HEAD(&f->list); 1374 list_add_tail(&f->list, &vsi->mac_filter_list); 1375 } 1376 1377 /* increment counter and add a new flag if needed */ 1378 if (is_vf) { 1379 if (!f->is_vf) { 1380 f->is_vf = true; 1381 f->counter++; 1382 } 1383 } else if (is_netdev) { 1384 if (!f->is_netdev) { 1385 f->is_netdev = true; 1386 f->counter++; 1387 } 1388 } else { 1389 f->counter++; 1390 } 1391 1392 /* changed tells sync_filters_subtask to 1393 * push the filter down to the firmware 1394 */ 1395 if (f->changed) { 1396 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; 1397 vsi->back->flags |= I40E_FLAG_FILTER_SYNC; 1398 } 1399 1400 add_filter_out: 1401 return f; 1402 } 1403 1404 /** 1405 * i40e_del_filter - Remove a mac/vlan filter from the VSI 1406 * @vsi: the VSI to be searched 1407 * @macaddr: the MAC address 1408 * @vlan: the vlan 1409 * @is_vf: make sure it's a VF filter, else doesn't matter 1410 * @is_netdev: make sure it's a netdev filter, else doesn't matter 1411 * 1412 * NOTE: This function is expected to be called with mac_filter_list_lock 1413 * being held. 1414 **/ 1415 void i40e_del_filter(struct i40e_vsi *vsi, 1416 u8 *macaddr, s16 vlan, 1417 bool is_vf, bool is_netdev) 1418 { 1419 struct i40e_mac_filter *f; 1420 1421 if (!vsi || !macaddr) 1422 return; 1423 1424 f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev); 1425 if (!f || f->counter == 0) 1426 return; 1427 1428 if (is_vf) { 1429 if (f->is_vf) { 1430 f->is_vf = false; 1431 f->counter--; 1432 } 1433 } else if (is_netdev) { 1434 if (f->is_netdev) { 1435 f->is_netdev = false; 1436 f->counter--; 1437 } 1438 } else { 1439 /* make sure we don't remove a filter in use by VF or netdev */ 1440 int min_f = 0; 1441 1442 min_f += (f->is_vf ? 1 : 0); 1443 min_f += (f->is_netdev ? 1 : 0); 1444 1445 if (f->counter > min_f) 1446 f->counter--; 1447 } 1448 1449 /* counter == 0 tells sync_filters_subtask to 1450 * remove the filter from the firmware's list 1451 */ 1452 if (f->counter == 0) { 1453 f->changed = true; 1454 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; 1455 vsi->back->flags |= I40E_FLAG_FILTER_SYNC; 1456 } 1457 } 1458 1459 /** 1460 * i40e_set_mac - NDO callback to set mac address 1461 * @netdev: network interface device structure 1462 * @p: pointer to an address structure 1463 * 1464 * Returns 0 on success, negative on failure 1465 **/ 1466 #ifdef I40E_FCOE 1467 int i40e_set_mac(struct net_device *netdev, void *p) 1468 #else 1469 static int i40e_set_mac(struct net_device *netdev, void *p) 1470 #endif 1471 { 1472 struct i40e_netdev_priv *np = netdev_priv(netdev); 1473 struct i40e_vsi *vsi = np->vsi; 1474 struct i40e_pf *pf = vsi->back; 1475 struct i40e_hw *hw = &pf->hw; 1476 struct sockaddr *addr = p; 1477 struct i40e_mac_filter *f; 1478 1479 if (!is_valid_ether_addr(addr->sa_data)) 1480 return -EADDRNOTAVAIL; 1481 1482 if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) { 1483 netdev_info(netdev, "already using mac address %pM\n", 1484 addr->sa_data); 1485 return 0; 1486 } 1487 1488 if (test_bit(__I40E_DOWN, &vsi->back->state) || 1489 test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state)) 1490 return -EADDRNOTAVAIL; 1491 1492 if (ether_addr_equal(hw->mac.addr, addr->sa_data)) 1493 netdev_info(netdev, "returning to hw mac address %pM\n", 1494 hw->mac.addr); 1495 else 1496 netdev_info(netdev, "set new mac address %pM\n", addr->sa_data); 1497 1498 if (vsi->type == I40E_VSI_MAIN) { 1499 i40e_status ret; 1500 1501 ret = i40e_aq_mac_address_write(&vsi->back->hw, 1502 I40E_AQC_WRITE_TYPE_LAA_WOL, 1503 addr->sa_data, NULL); 1504 if (ret) { 1505 netdev_info(netdev, 1506 "Addr change for Main VSI failed: %d\n", 1507 ret); 1508 return -EADDRNOTAVAIL; 1509 } 1510 } 1511 1512 if (ether_addr_equal(netdev->dev_addr, hw->mac.addr)) { 1513 struct i40e_aqc_remove_macvlan_element_data element; 1514 1515 memset(&element, 0, sizeof(element)); 1516 ether_addr_copy(element.mac_addr, netdev->dev_addr); 1517 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; 1518 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); 1519 } else { 1520 spin_lock_bh(&vsi->mac_filter_list_lock); 1521 i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY, 1522 false, false); 1523 spin_unlock_bh(&vsi->mac_filter_list_lock); 1524 } 1525 1526 if (ether_addr_equal(addr->sa_data, hw->mac.addr)) { 1527 struct i40e_aqc_add_macvlan_element_data element; 1528 1529 memset(&element, 0, sizeof(element)); 1530 ether_addr_copy(element.mac_addr, hw->mac.addr); 1531 element.flags = cpu_to_le16(I40E_AQC_MACVLAN_ADD_PERFECT_MATCH); 1532 i40e_aq_add_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); 1533 } else { 1534 spin_lock_bh(&vsi->mac_filter_list_lock); 1535 f = i40e_add_filter(vsi, addr->sa_data, I40E_VLAN_ANY, 1536 false, false); 1537 if (f) 1538 f->is_laa = true; 1539 spin_unlock_bh(&vsi->mac_filter_list_lock); 1540 } 1541 1542 ether_addr_copy(netdev->dev_addr, addr->sa_data); 1543 1544 /* schedule our worker thread which will take care of 1545 * applying the new filter changes 1546 */ 1547 i40e_service_event_schedule(vsi->back); 1548 return 0; 1549 } 1550 1551 /** 1552 * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc 1553 * @vsi: the VSI being setup 1554 * @ctxt: VSI context structure 1555 * @enabled_tc: Enabled TCs bitmap 1556 * @is_add: True if called before Add VSI 1557 * 1558 * Setup VSI queue mapping for enabled traffic classes. 1559 **/ 1560 #ifdef I40E_FCOE 1561 void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, 1562 struct i40e_vsi_context *ctxt, 1563 u8 enabled_tc, 1564 bool is_add) 1565 #else 1566 static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, 1567 struct i40e_vsi_context *ctxt, 1568 u8 enabled_tc, 1569 bool is_add) 1570 #endif 1571 { 1572 struct i40e_pf *pf = vsi->back; 1573 u16 sections = 0; 1574 u8 netdev_tc = 0; 1575 u16 numtc = 0; 1576 u16 qcount; 1577 u8 offset; 1578 u16 qmap; 1579 int i; 1580 u16 num_tc_qps = 0; 1581 1582 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID; 1583 offset = 0; 1584 1585 if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) { 1586 /* Find numtc from enabled TC bitmap */ 1587 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 1588 if (enabled_tc & BIT(i)) /* TC is enabled */ 1589 numtc++; 1590 } 1591 if (!numtc) { 1592 dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n"); 1593 numtc = 1; 1594 } 1595 } else { 1596 /* At least TC0 is enabled in case of non-DCB case */ 1597 numtc = 1; 1598 } 1599 1600 vsi->tc_config.numtc = numtc; 1601 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1; 1602 /* Number of queues per enabled TC */ 1603 /* In MFP case we can have a much lower count of MSIx 1604 * vectors available and so we need to lower the used 1605 * q count. 1606 */ 1607 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 1608 qcount = min_t(int, vsi->alloc_queue_pairs, pf->num_lan_msix); 1609 else 1610 qcount = vsi->alloc_queue_pairs; 1611 num_tc_qps = qcount / numtc; 1612 num_tc_qps = min_t(int, num_tc_qps, i40e_pf_get_max_q_per_tc(pf)); 1613 1614 /* Setup queue offset/count for all TCs for given VSI */ 1615 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 1616 /* See if the given TC is enabled for the given VSI */ 1617 if (vsi->tc_config.enabled_tc & BIT(i)) { 1618 /* TC is enabled */ 1619 int pow, num_qps; 1620 1621 switch (vsi->type) { 1622 case I40E_VSI_MAIN: 1623 qcount = min_t(int, pf->alloc_rss_size, 1624 num_tc_qps); 1625 break; 1626 #ifdef I40E_FCOE 1627 case I40E_VSI_FCOE: 1628 qcount = num_tc_qps; 1629 break; 1630 #endif 1631 case I40E_VSI_FDIR: 1632 case I40E_VSI_SRIOV: 1633 case I40E_VSI_VMDQ2: 1634 default: 1635 qcount = num_tc_qps; 1636 WARN_ON(i != 0); 1637 break; 1638 } 1639 vsi->tc_config.tc_info[i].qoffset = offset; 1640 vsi->tc_config.tc_info[i].qcount = qcount; 1641 1642 /* find the next higher power-of-2 of num queue pairs */ 1643 num_qps = qcount; 1644 pow = 0; 1645 while (num_qps && (BIT_ULL(pow) < qcount)) { 1646 pow++; 1647 num_qps >>= 1; 1648 } 1649 1650 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++; 1651 qmap = 1652 (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) | 1653 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT); 1654 1655 offset += qcount; 1656 } else { 1657 /* TC is not enabled so set the offset to 1658 * default queue and allocate one queue 1659 * for the given TC. 1660 */ 1661 vsi->tc_config.tc_info[i].qoffset = 0; 1662 vsi->tc_config.tc_info[i].qcount = 1; 1663 vsi->tc_config.tc_info[i].netdev_tc = 0; 1664 1665 qmap = 0; 1666 } 1667 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap); 1668 } 1669 1670 /* Set actual Tx/Rx queue pairs */ 1671 vsi->num_queue_pairs = offset; 1672 if ((vsi->type == I40E_VSI_MAIN) && (numtc == 1)) { 1673 if (vsi->req_queue_pairs > 0) 1674 vsi->num_queue_pairs = vsi->req_queue_pairs; 1675 else if (pf->flags & I40E_FLAG_MSIX_ENABLED) 1676 vsi->num_queue_pairs = pf->num_lan_msix; 1677 } 1678 1679 /* Scheduler section valid can only be set for ADD VSI */ 1680 if (is_add) { 1681 sections |= I40E_AQ_VSI_PROP_SCHED_VALID; 1682 1683 ctxt->info.up_enable_bits = enabled_tc; 1684 } 1685 if (vsi->type == I40E_VSI_SRIOV) { 1686 ctxt->info.mapping_flags |= 1687 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG); 1688 for (i = 0; i < vsi->num_queue_pairs; i++) 1689 ctxt->info.queue_mapping[i] = 1690 cpu_to_le16(vsi->base_queue + i); 1691 } else { 1692 ctxt->info.mapping_flags |= 1693 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG); 1694 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue); 1695 } 1696 ctxt->info.valid_sections |= cpu_to_le16(sections); 1697 } 1698 1699 /** 1700 * i40e_set_rx_mode - NDO callback to set the netdev filters 1701 * @netdev: network interface device structure 1702 **/ 1703 #ifdef I40E_FCOE 1704 void i40e_set_rx_mode(struct net_device *netdev) 1705 #else 1706 static void i40e_set_rx_mode(struct net_device *netdev) 1707 #endif 1708 { 1709 struct i40e_netdev_priv *np = netdev_priv(netdev); 1710 struct i40e_mac_filter *f, *ftmp; 1711 struct i40e_vsi *vsi = np->vsi; 1712 struct netdev_hw_addr *uca; 1713 struct netdev_hw_addr *mca; 1714 struct netdev_hw_addr *ha; 1715 1716 spin_lock_bh(&vsi->mac_filter_list_lock); 1717 1718 /* add addr if not already in the filter list */ 1719 netdev_for_each_uc_addr(uca, netdev) { 1720 if (!i40e_find_mac(vsi, uca->addr, false, true)) { 1721 if (i40e_is_vsi_in_vlan(vsi)) 1722 i40e_put_mac_in_vlan(vsi, uca->addr, 1723 false, true); 1724 else 1725 i40e_add_filter(vsi, uca->addr, I40E_VLAN_ANY, 1726 false, true); 1727 } 1728 } 1729 1730 netdev_for_each_mc_addr(mca, netdev) { 1731 if (!i40e_find_mac(vsi, mca->addr, false, true)) { 1732 if (i40e_is_vsi_in_vlan(vsi)) 1733 i40e_put_mac_in_vlan(vsi, mca->addr, 1734 false, true); 1735 else 1736 i40e_add_filter(vsi, mca->addr, I40E_VLAN_ANY, 1737 false, true); 1738 } 1739 } 1740 1741 /* remove filter if not in netdev list */ 1742 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { 1743 1744 if (!f->is_netdev) 1745 continue; 1746 1747 netdev_for_each_mc_addr(mca, netdev) 1748 if (ether_addr_equal(mca->addr, f->macaddr)) 1749 goto bottom_of_search_loop; 1750 1751 netdev_for_each_uc_addr(uca, netdev) 1752 if (ether_addr_equal(uca->addr, f->macaddr)) 1753 goto bottom_of_search_loop; 1754 1755 for_each_dev_addr(netdev, ha) 1756 if (ether_addr_equal(ha->addr, f->macaddr)) 1757 goto bottom_of_search_loop; 1758 1759 /* f->macaddr wasn't found in uc, mc, or ha list so delete it */ 1760 i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY, false, true); 1761 1762 bottom_of_search_loop: 1763 continue; 1764 } 1765 spin_unlock_bh(&vsi->mac_filter_list_lock); 1766 1767 /* check for other flag changes */ 1768 if (vsi->current_netdev_flags != vsi->netdev->flags) { 1769 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; 1770 vsi->back->flags |= I40E_FLAG_FILTER_SYNC; 1771 } 1772 1773 /* schedule our worker thread which will take care of 1774 * applying the new filter changes 1775 */ 1776 i40e_service_event_schedule(vsi->back); 1777 } 1778 1779 /** 1780 * i40e_mac_filter_entry_clone - Clones a MAC filter entry 1781 * @src: source MAC filter entry to be clones 1782 * 1783 * Returns the pointer to newly cloned MAC filter entry or NULL 1784 * in case of error 1785 **/ 1786 static struct i40e_mac_filter *i40e_mac_filter_entry_clone( 1787 struct i40e_mac_filter *src) 1788 { 1789 struct i40e_mac_filter *f; 1790 1791 f = kzalloc(sizeof(*f), GFP_ATOMIC); 1792 if (!f) 1793 return NULL; 1794 *f = *src; 1795 1796 INIT_LIST_HEAD(&f->list); 1797 1798 return f; 1799 } 1800 1801 /** 1802 * i40e_undo_del_filter_entries - Undo the changes made to MAC filter entries 1803 * @vsi: pointer to vsi struct 1804 * @from: Pointer to list which contains MAC filter entries - changes to 1805 * those entries needs to be undone. 1806 * 1807 * MAC filter entries from list were slated to be removed from device. 1808 **/ 1809 static void i40e_undo_del_filter_entries(struct i40e_vsi *vsi, 1810 struct list_head *from) 1811 { 1812 struct i40e_mac_filter *f, *ftmp; 1813 1814 list_for_each_entry_safe(f, ftmp, from, list) { 1815 f->changed = true; 1816 /* Move the element back into MAC filter list*/ 1817 list_move_tail(&f->list, &vsi->mac_filter_list); 1818 } 1819 } 1820 1821 /** 1822 * i40e_undo_add_filter_entries - Undo the changes made to MAC filter entries 1823 * @vsi: pointer to vsi struct 1824 * 1825 * MAC filter entries from list were slated to be added from device. 1826 **/ 1827 static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi) 1828 { 1829 struct i40e_mac_filter *f, *ftmp; 1830 1831 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { 1832 if (!f->changed && f->counter) 1833 f->changed = true; 1834 } 1835 } 1836 1837 /** 1838 * i40e_cleanup_add_list - Deletes the element from add list and release 1839 * memory 1840 * @add_list: Pointer to list which contains MAC filter entries 1841 **/ 1842 static void i40e_cleanup_add_list(struct list_head *add_list) 1843 { 1844 struct i40e_mac_filter *f, *ftmp; 1845 1846 list_for_each_entry_safe(f, ftmp, add_list, list) { 1847 list_del(&f->list); 1848 kfree(f); 1849 } 1850 } 1851 1852 /** 1853 * i40e_sync_vsi_filters - Update the VSI filter list to the HW 1854 * @vsi: ptr to the VSI 1855 * 1856 * Push any outstanding VSI filter changes through the AdminQ. 1857 * 1858 * Returns 0 or error value 1859 **/ 1860 int i40e_sync_vsi_filters(struct i40e_vsi *vsi) 1861 { 1862 struct list_head tmp_del_list, tmp_add_list; 1863 struct i40e_mac_filter *f, *ftmp, *fclone; 1864 bool promisc_forced_on = false; 1865 bool add_happened = false; 1866 int filter_list_len = 0; 1867 u32 changed_flags = 0; 1868 i40e_status aq_ret = 0; 1869 bool err_cond = false; 1870 int retval = 0; 1871 struct i40e_pf *pf; 1872 int num_add = 0; 1873 int num_del = 0; 1874 int aq_err = 0; 1875 u16 cmd_flags; 1876 1877 /* empty array typed pointers, kcalloc later */ 1878 struct i40e_aqc_add_macvlan_element_data *add_list; 1879 struct i40e_aqc_remove_macvlan_element_data *del_list; 1880 1881 while (test_and_set_bit(__I40E_CONFIG_BUSY, &vsi->state)) 1882 usleep_range(1000, 2000); 1883 pf = vsi->back; 1884 1885 if (vsi->netdev) { 1886 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags; 1887 vsi->current_netdev_flags = vsi->netdev->flags; 1888 } 1889 1890 INIT_LIST_HEAD(&tmp_del_list); 1891 INIT_LIST_HEAD(&tmp_add_list); 1892 1893 if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) { 1894 vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED; 1895 1896 spin_lock_bh(&vsi->mac_filter_list_lock); 1897 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { 1898 if (!f->changed) 1899 continue; 1900 1901 if (f->counter != 0) 1902 continue; 1903 f->changed = false; 1904 1905 /* Move the element into temporary del_list */ 1906 list_move_tail(&f->list, &tmp_del_list); 1907 } 1908 1909 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { 1910 if (!f->changed) 1911 continue; 1912 1913 if (f->counter == 0) 1914 continue; 1915 f->changed = false; 1916 1917 /* Clone MAC filter entry and add into temporary list */ 1918 fclone = i40e_mac_filter_entry_clone(f); 1919 if (!fclone) { 1920 err_cond = true; 1921 break; 1922 } 1923 list_add_tail(&fclone->list, &tmp_add_list); 1924 } 1925 1926 /* if failed to clone MAC filter entry - undo */ 1927 if (err_cond) { 1928 i40e_undo_del_filter_entries(vsi, &tmp_del_list); 1929 i40e_undo_add_filter_entries(vsi); 1930 } 1931 spin_unlock_bh(&vsi->mac_filter_list_lock); 1932 1933 if (err_cond) { 1934 i40e_cleanup_add_list(&tmp_add_list); 1935 retval = -ENOMEM; 1936 goto out; 1937 } 1938 } 1939 1940 /* Now process 'del_list' outside the lock */ 1941 if (!list_empty(&tmp_del_list)) { 1942 int del_list_size; 1943 1944 filter_list_len = pf->hw.aq.asq_buf_size / 1945 sizeof(struct i40e_aqc_remove_macvlan_element_data); 1946 del_list_size = filter_list_len * 1947 sizeof(struct i40e_aqc_remove_macvlan_element_data); 1948 del_list = kzalloc(del_list_size, GFP_ATOMIC); 1949 if (!del_list) { 1950 i40e_cleanup_add_list(&tmp_add_list); 1951 1952 /* Undo VSI's MAC filter entry element updates */ 1953 spin_lock_bh(&vsi->mac_filter_list_lock); 1954 i40e_undo_del_filter_entries(vsi, &tmp_del_list); 1955 i40e_undo_add_filter_entries(vsi); 1956 spin_unlock_bh(&vsi->mac_filter_list_lock); 1957 retval = -ENOMEM; 1958 goto out; 1959 } 1960 1961 list_for_each_entry_safe(f, ftmp, &tmp_del_list, list) { 1962 cmd_flags = 0; 1963 1964 /* add to delete list */ 1965 ether_addr_copy(del_list[num_del].mac_addr, f->macaddr); 1966 del_list[num_del].vlan_tag = 1967 cpu_to_le16((u16)(f->vlan == 1968 I40E_VLAN_ANY ? 0 : f->vlan)); 1969 1970 cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; 1971 del_list[num_del].flags = cmd_flags; 1972 num_del++; 1973 1974 /* flush a full buffer */ 1975 if (num_del == filter_list_len) { 1976 aq_ret = i40e_aq_remove_macvlan(&pf->hw, 1977 vsi->seid, 1978 del_list, 1979 num_del, 1980 NULL); 1981 aq_err = pf->hw.aq.asq_last_status; 1982 num_del = 0; 1983 memset(del_list, 0, del_list_size); 1984 1985 if (aq_ret && aq_err != I40E_AQ_RC_ENOENT) { 1986 retval = -EIO; 1987 dev_err(&pf->pdev->dev, 1988 "ignoring delete macvlan error, err %s, aq_err %s while flushing a full buffer\n", 1989 i40e_stat_str(&pf->hw, aq_ret), 1990 i40e_aq_str(&pf->hw, aq_err)); 1991 } 1992 } 1993 /* Release memory for MAC filter entries which were 1994 * synced up with HW. 1995 */ 1996 list_del(&f->list); 1997 kfree(f); 1998 } 1999 2000 if (num_del) { 2001 aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, 2002 del_list, num_del, 2003 NULL); 2004 aq_err = pf->hw.aq.asq_last_status; 2005 num_del = 0; 2006 2007 if (aq_ret && aq_err != I40E_AQ_RC_ENOENT) 2008 dev_info(&pf->pdev->dev, 2009 "ignoring delete macvlan error, err %s aq_err %s\n", 2010 i40e_stat_str(&pf->hw, aq_ret), 2011 i40e_aq_str(&pf->hw, aq_err)); 2012 } 2013 2014 kfree(del_list); 2015 del_list = NULL; 2016 } 2017 2018 if (!list_empty(&tmp_add_list)) { 2019 int add_list_size; 2020 2021 /* do all the adds now */ 2022 filter_list_len = pf->hw.aq.asq_buf_size / 2023 sizeof(struct i40e_aqc_add_macvlan_element_data), 2024 add_list_size = filter_list_len * 2025 sizeof(struct i40e_aqc_add_macvlan_element_data); 2026 add_list = kzalloc(add_list_size, GFP_ATOMIC); 2027 if (!add_list) { 2028 /* Purge element from temporary lists */ 2029 i40e_cleanup_add_list(&tmp_add_list); 2030 2031 /* Undo add filter entries from VSI MAC filter list */ 2032 spin_lock_bh(&vsi->mac_filter_list_lock); 2033 i40e_undo_add_filter_entries(vsi); 2034 spin_unlock_bh(&vsi->mac_filter_list_lock); 2035 retval = -ENOMEM; 2036 goto out; 2037 } 2038 2039 list_for_each_entry_safe(f, ftmp, &tmp_add_list, list) { 2040 2041 add_happened = true; 2042 cmd_flags = 0; 2043 2044 /* add to add array */ 2045 ether_addr_copy(add_list[num_add].mac_addr, f->macaddr); 2046 add_list[num_add].vlan_tag = 2047 cpu_to_le16( 2048 (u16)(f->vlan == I40E_VLAN_ANY ? 0 : f->vlan)); 2049 add_list[num_add].queue_number = 0; 2050 2051 cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH; 2052 add_list[num_add].flags = cpu_to_le16(cmd_flags); 2053 num_add++; 2054 2055 /* flush a full buffer */ 2056 if (num_add == filter_list_len) { 2057 aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, 2058 add_list, num_add, 2059 NULL); 2060 aq_err = pf->hw.aq.asq_last_status; 2061 num_add = 0; 2062 2063 if (aq_ret) 2064 break; 2065 memset(add_list, 0, add_list_size); 2066 } 2067 /* Entries from tmp_add_list were cloned from MAC 2068 * filter list, hence clean those cloned entries 2069 */ 2070 list_del(&f->list); 2071 kfree(f); 2072 } 2073 2074 if (num_add) { 2075 aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, 2076 add_list, num_add, NULL); 2077 aq_err = pf->hw.aq.asq_last_status; 2078 num_add = 0; 2079 } 2080 kfree(add_list); 2081 add_list = NULL; 2082 2083 if (add_happened && aq_ret && aq_err != I40E_AQ_RC_EINVAL) { 2084 retval = i40e_aq_rc_to_posix(aq_ret, aq_err); 2085 dev_info(&pf->pdev->dev, 2086 "add filter failed, err %s aq_err %s\n", 2087 i40e_stat_str(&pf->hw, aq_ret), 2088 i40e_aq_str(&pf->hw, aq_err)); 2089 if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) && 2090 !test_bit(__I40E_FILTER_OVERFLOW_PROMISC, 2091 &vsi->state)) { 2092 promisc_forced_on = true; 2093 set_bit(__I40E_FILTER_OVERFLOW_PROMISC, 2094 &vsi->state); 2095 dev_info(&pf->pdev->dev, "promiscuous mode forced on\n"); 2096 } 2097 } 2098 } 2099 2100 /* check for changes in promiscuous modes */ 2101 if (changed_flags & IFF_ALLMULTI) { 2102 bool cur_multipromisc; 2103 2104 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI); 2105 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw, 2106 vsi->seid, 2107 cur_multipromisc, 2108 NULL); 2109 if (aq_ret) { 2110 retval = i40e_aq_rc_to_posix(aq_ret, 2111 pf->hw.aq.asq_last_status); 2112 dev_info(&pf->pdev->dev, 2113 "set multi promisc failed, err %s aq_err %s\n", 2114 i40e_stat_str(&pf->hw, aq_ret), 2115 i40e_aq_str(&pf->hw, 2116 pf->hw.aq.asq_last_status)); 2117 } 2118 } 2119 if ((changed_flags & IFF_PROMISC) || promisc_forced_on) { 2120 bool cur_promisc; 2121 2122 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) || 2123 test_bit(__I40E_FILTER_OVERFLOW_PROMISC, 2124 &vsi->state)); 2125 if ((vsi->type == I40E_VSI_MAIN) && 2126 (pf->lan_veb != I40E_NO_VEB) && 2127 !(pf->flags & I40E_FLAG_MFP_ENABLED)) { 2128 /* set defport ON for Main VSI instead of true promisc 2129 * this way we will get all unicast/multicast and VLAN 2130 * promisc behavior but will not get VF or VMDq traffic 2131 * replicated on the Main VSI. 2132 */ 2133 if (pf->cur_promisc != cur_promisc) { 2134 pf->cur_promisc = cur_promisc; 2135 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); 2136 } 2137 } else { 2138 aq_ret = i40e_aq_set_vsi_unicast_promiscuous( 2139 &vsi->back->hw, 2140 vsi->seid, 2141 cur_promisc, NULL); 2142 if (aq_ret) { 2143 retval = 2144 i40e_aq_rc_to_posix(aq_ret, 2145 pf->hw.aq.asq_last_status); 2146 dev_info(&pf->pdev->dev, 2147 "set unicast promisc failed, err %d, aq_err %d\n", 2148 aq_ret, pf->hw.aq.asq_last_status); 2149 } 2150 aq_ret = i40e_aq_set_vsi_multicast_promiscuous( 2151 &vsi->back->hw, 2152 vsi->seid, 2153 cur_promisc, NULL); 2154 if (aq_ret) { 2155 retval = 2156 i40e_aq_rc_to_posix(aq_ret, 2157 pf->hw.aq.asq_last_status); 2158 dev_info(&pf->pdev->dev, 2159 "set multicast promisc failed, err %d, aq_err %d\n", 2160 aq_ret, pf->hw.aq.asq_last_status); 2161 } 2162 } 2163 aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw, 2164 vsi->seid, 2165 cur_promisc, NULL); 2166 if (aq_ret) { 2167 retval = i40e_aq_rc_to_posix(aq_ret, 2168 pf->hw.aq.asq_last_status); 2169 dev_info(&pf->pdev->dev, 2170 "set brdcast promisc failed, err %s, aq_err %s\n", 2171 i40e_stat_str(&pf->hw, aq_ret), 2172 i40e_aq_str(&pf->hw, 2173 pf->hw.aq.asq_last_status)); 2174 } 2175 } 2176 out: 2177 /* if something went wrong then set the changed flag so we try again */ 2178 if (retval) 2179 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; 2180 2181 clear_bit(__I40E_CONFIG_BUSY, &vsi->state); 2182 return retval; 2183 } 2184 2185 /** 2186 * i40e_sync_filters_subtask - Sync the VSI filter list with HW 2187 * @pf: board private structure 2188 **/ 2189 static void i40e_sync_filters_subtask(struct i40e_pf *pf) 2190 { 2191 int v; 2192 2193 if (!pf || !(pf->flags & I40E_FLAG_FILTER_SYNC)) 2194 return; 2195 pf->flags &= ~I40E_FLAG_FILTER_SYNC; 2196 2197 for (v = 0; v < pf->num_alloc_vsi; v++) { 2198 if (pf->vsi[v] && 2199 (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED)) { 2200 int ret = i40e_sync_vsi_filters(pf->vsi[v]); 2201 2202 if (ret) { 2203 /* come back and try again later */ 2204 pf->flags |= I40E_FLAG_FILTER_SYNC; 2205 break; 2206 } 2207 } 2208 } 2209 } 2210 2211 /** 2212 * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit 2213 * @netdev: network interface device structure 2214 * @new_mtu: new value for maximum frame size 2215 * 2216 * Returns 0 on success, negative on failure 2217 **/ 2218 static int i40e_change_mtu(struct net_device *netdev, int new_mtu) 2219 { 2220 struct i40e_netdev_priv *np = netdev_priv(netdev); 2221 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; 2222 struct i40e_vsi *vsi = np->vsi; 2223 2224 /* MTU < 68 is an error and causes problems on some kernels */ 2225 if ((new_mtu < 68) || (max_frame > I40E_MAX_RXBUFFER)) 2226 return -EINVAL; 2227 2228 netdev_info(netdev, "changing MTU from %d to %d\n", 2229 netdev->mtu, new_mtu); 2230 netdev->mtu = new_mtu; 2231 if (netif_running(netdev)) 2232 i40e_vsi_reinit_locked(vsi); 2233 i40e_notify_client_of_l2_param_changes(vsi); 2234 return 0; 2235 } 2236 2237 /** 2238 * i40e_ioctl - Access the hwtstamp interface 2239 * @netdev: network interface device structure 2240 * @ifr: interface request data 2241 * @cmd: ioctl command 2242 **/ 2243 int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 2244 { 2245 struct i40e_netdev_priv *np = netdev_priv(netdev); 2246 struct i40e_pf *pf = np->vsi->back; 2247 2248 switch (cmd) { 2249 case SIOCGHWTSTAMP: 2250 return i40e_ptp_get_ts_config(pf, ifr); 2251 case SIOCSHWTSTAMP: 2252 return i40e_ptp_set_ts_config(pf, ifr); 2253 default: 2254 return -EOPNOTSUPP; 2255 } 2256 } 2257 2258 /** 2259 * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI 2260 * @vsi: the vsi being adjusted 2261 **/ 2262 void i40e_vlan_stripping_enable(struct i40e_vsi *vsi) 2263 { 2264 struct i40e_vsi_context ctxt; 2265 i40e_status ret; 2266 2267 if ((vsi->info.valid_sections & 2268 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) && 2269 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0)) 2270 return; /* already enabled */ 2271 2272 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); 2273 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL | 2274 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH; 2275 2276 ctxt.seid = vsi->seid; 2277 ctxt.info = vsi->info; 2278 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); 2279 if (ret) { 2280 dev_info(&vsi->back->pdev->dev, 2281 "update vlan stripping failed, err %s aq_err %s\n", 2282 i40e_stat_str(&vsi->back->hw, ret), 2283 i40e_aq_str(&vsi->back->hw, 2284 vsi->back->hw.aq.asq_last_status)); 2285 } 2286 } 2287 2288 /** 2289 * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI 2290 * @vsi: the vsi being adjusted 2291 **/ 2292 void i40e_vlan_stripping_disable(struct i40e_vsi *vsi) 2293 { 2294 struct i40e_vsi_context ctxt; 2295 i40e_status ret; 2296 2297 if ((vsi->info.valid_sections & 2298 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) && 2299 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) == 2300 I40E_AQ_VSI_PVLAN_EMOD_MASK)) 2301 return; /* already disabled */ 2302 2303 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); 2304 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL | 2305 I40E_AQ_VSI_PVLAN_EMOD_NOTHING; 2306 2307 ctxt.seid = vsi->seid; 2308 ctxt.info = vsi->info; 2309 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); 2310 if (ret) { 2311 dev_info(&vsi->back->pdev->dev, 2312 "update vlan stripping failed, err %s aq_err %s\n", 2313 i40e_stat_str(&vsi->back->hw, ret), 2314 i40e_aq_str(&vsi->back->hw, 2315 vsi->back->hw.aq.asq_last_status)); 2316 } 2317 } 2318 2319 /** 2320 * i40e_vlan_rx_register - Setup or shutdown vlan offload 2321 * @netdev: network interface to be adjusted 2322 * @features: netdev features to test if VLAN offload is enabled or not 2323 **/ 2324 static void i40e_vlan_rx_register(struct net_device *netdev, u32 features) 2325 { 2326 struct i40e_netdev_priv *np = netdev_priv(netdev); 2327 struct i40e_vsi *vsi = np->vsi; 2328 2329 if (features & NETIF_F_HW_VLAN_CTAG_RX) 2330 i40e_vlan_stripping_enable(vsi); 2331 else 2332 i40e_vlan_stripping_disable(vsi); 2333 } 2334 2335 /** 2336 * i40e_vsi_add_vlan - Add vsi membership for given vlan 2337 * @vsi: the vsi being configured 2338 * @vid: vlan id to be added (0 = untagged only , -1 = any) 2339 **/ 2340 int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid) 2341 { 2342 struct i40e_mac_filter *f, *add_f; 2343 bool is_netdev, is_vf; 2344 2345 is_vf = (vsi->type == I40E_VSI_SRIOV); 2346 is_netdev = !!(vsi->netdev); 2347 2348 /* Locked once because all functions invoked below iterates list*/ 2349 spin_lock_bh(&vsi->mac_filter_list_lock); 2350 2351 if (is_netdev) { 2352 add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, vid, 2353 is_vf, is_netdev); 2354 if (!add_f) { 2355 dev_info(&vsi->back->pdev->dev, 2356 "Could not add vlan filter %d for %pM\n", 2357 vid, vsi->netdev->dev_addr); 2358 spin_unlock_bh(&vsi->mac_filter_list_lock); 2359 return -ENOMEM; 2360 } 2361 } 2362 2363 list_for_each_entry(f, &vsi->mac_filter_list, list) { 2364 add_f = i40e_add_filter(vsi, f->macaddr, vid, is_vf, is_netdev); 2365 if (!add_f) { 2366 dev_info(&vsi->back->pdev->dev, 2367 "Could not add vlan filter %d for %pM\n", 2368 vid, f->macaddr); 2369 spin_unlock_bh(&vsi->mac_filter_list_lock); 2370 return -ENOMEM; 2371 } 2372 } 2373 2374 /* Now if we add a vlan tag, make sure to check if it is the first 2375 * tag (i.e. a "tag" -1 does exist) and if so replace the -1 "tag" 2376 * with 0, so we now accept untagged and specified tagged traffic 2377 * (and not any taged and untagged) 2378 */ 2379 if (vid > 0) { 2380 if (is_netdev && i40e_find_filter(vsi, vsi->netdev->dev_addr, 2381 I40E_VLAN_ANY, 2382 is_vf, is_netdev)) { 2383 i40e_del_filter(vsi, vsi->netdev->dev_addr, 2384 I40E_VLAN_ANY, is_vf, is_netdev); 2385 add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, 0, 2386 is_vf, is_netdev); 2387 if (!add_f) { 2388 dev_info(&vsi->back->pdev->dev, 2389 "Could not add filter 0 for %pM\n", 2390 vsi->netdev->dev_addr); 2391 spin_unlock_bh(&vsi->mac_filter_list_lock); 2392 return -ENOMEM; 2393 } 2394 } 2395 } 2396 2397 /* Do not assume that I40E_VLAN_ANY should be reset to VLAN 0 */ 2398 if (vid > 0 && !vsi->info.pvid) { 2399 list_for_each_entry(f, &vsi->mac_filter_list, list) { 2400 if (!i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY, 2401 is_vf, is_netdev)) 2402 continue; 2403 i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY, 2404 is_vf, is_netdev); 2405 add_f = i40e_add_filter(vsi, f->macaddr, 2406 0, is_vf, is_netdev); 2407 if (!add_f) { 2408 dev_info(&vsi->back->pdev->dev, 2409 "Could not add filter 0 for %pM\n", 2410 f->macaddr); 2411 spin_unlock_bh(&vsi->mac_filter_list_lock); 2412 return -ENOMEM; 2413 } 2414 } 2415 } 2416 2417 spin_unlock_bh(&vsi->mac_filter_list_lock); 2418 2419 /* schedule our worker thread which will take care of 2420 * applying the new filter changes 2421 */ 2422 i40e_service_event_schedule(vsi->back); 2423 return 0; 2424 } 2425 2426 /** 2427 * i40e_vsi_kill_vlan - Remove vsi membership for given vlan 2428 * @vsi: the vsi being configured 2429 * @vid: vlan id to be removed (0 = untagged only , -1 = any) 2430 * 2431 * Return: 0 on success or negative otherwise 2432 **/ 2433 int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid) 2434 { 2435 struct net_device *netdev = vsi->netdev; 2436 struct i40e_mac_filter *f, *add_f; 2437 bool is_vf, is_netdev; 2438 int filter_count = 0; 2439 2440 is_vf = (vsi->type == I40E_VSI_SRIOV); 2441 is_netdev = !!(netdev); 2442 2443 /* Locked once because all functions invoked below iterates list */ 2444 spin_lock_bh(&vsi->mac_filter_list_lock); 2445 2446 if (is_netdev) 2447 i40e_del_filter(vsi, netdev->dev_addr, vid, is_vf, is_netdev); 2448 2449 list_for_each_entry(f, &vsi->mac_filter_list, list) 2450 i40e_del_filter(vsi, f->macaddr, vid, is_vf, is_netdev); 2451 2452 /* go through all the filters for this VSI and if there is only 2453 * vid == 0 it means there are no other filters, so vid 0 must 2454 * be replaced with -1. This signifies that we should from now 2455 * on accept any traffic (with any tag present, or untagged) 2456 */ 2457 list_for_each_entry(f, &vsi->mac_filter_list, list) { 2458 if (is_netdev) { 2459 if (f->vlan && 2460 ether_addr_equal(netdev->dev_addr, f->macaddr)) 2461 filter_count++; 2462 } 2463 2464 if (f->vlan) 2465 filter_count++; 2466 } 2467 2468 if (!filter_count && is_netdev) { 2469 i40e_del_filter(vsi, netdev->dev_addr, 0, is_vf, is_netdev); 2470 f = i40e_add_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY, 2471 is_vf, is_netdev); 2472 if (!f) { 2473 dev_info(&vsi->back->pdev->dev, 2474 "Could not add filter %d for %pM\n", 2475 I40E_VLAN_ANY, netdev->dev_addr); 2476 spin_unlock_bh(&vsi->mac_filter_list_lock); 2477 return -ENOMEM; 2478 } 2479 } 2480 2481 if (!filter_count) { 2482 list_for_each_entry(f, &vsi->mac_filter_list, list) { 2483 i40e_del_filter(vsi, f->macaddr, 0, is_vf, is_netdev); 2484 add_f = i40e_add_filter(vsi, f->macaddr, I40E_VLAN_ANY, 2485 is_vf, is_netdev); 2486 if (!add_f) { 2487 dev_info(&vsi->back->pdev->dev, 2488 "Could not add filter %d for %pM\n", 2489 I40E_VLAN_ANY, f->macaddr); 2490 spin_unlock_bh(&vsi->mac_filter_list_lock); 2491 return -ENOMEM; 2492 } 2493 } 2494 } 2495 2496 spin_unlock_bh(&vsi->mac_filter_list_lock); 2497 2498 /* schedule our worker thread which will take care of 2499 * applying the new filter changes 2500 */ 2501 i40e_service_event_schedule(vsi->back); 2502 return 0; 2503 } 2504 2505 /** 2506 * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload 2507 * @netdev: network interface to be adjusted 2508 * @vid: vlan id to be added 2509 * 2510 * net_device_ops implementation for adding vlan ids 2511 **/ 2512 #ifdef I40E_FCOE 2513 int i40e_vlan_rx_add_vid(struct net_device *netdev, 2514 __always_unused __be16 proto, u16 vid) 2515 #else 2516 static int i40e_vlan_rx_add_vid(struct net_device *netdev, 2517 __always_unused __be16 proto, u16 vid) 2518 #endif 2519 { 2520 struct i40e_netdev_priv *np = netdev_priv(netdev); 2521 struct i40e_vsi *vsi = np->vsi; 2522 int ret = 0; 2523 2524 if (vid > 4095) 2525 return -EINVAL; 2526 2527 netdev_info(netdev, "adding %pM vid=%d\n", netdev->dev_addr, vid); 2528 2529 /* If the network stack called us with vid = 0 then 2530 * it is asking to receive priority tagged packets with 2531 * vlan id 0. Our HW receives them by default when configured 2532 * to receive untagged packets so there is no need to add an 2533 * extra filter for vlan 0 tagged packets. 2534 */ 2535 if (vid) 2536 ret = i40e_vsi_add_vlan(vsi, vid); 2537 2538 if (!ret && (vid < VLAN_N_VID)) 2539 set_bit(vid, vsi->active_vlans); 2540 2541 return ret; 2542 } 2543 2544 /** 2545 * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload 2546 * @netdev: network interface to be adjusted 2547 * @vid: vlan id to be removed 2548 * 2549 * net_device_ops implementation for removing vlan ids 2550 **/ 2551 #ifdef I40E_FCOE 2552 int i40e_vlan_rx_kill_vid(struct net_device *netdev, 2553 __always_unused __be16 proto, u16 vid) 2554 #else 2555 static int i40e_vlan_rx_kill_vid(struct net_device *netdev, 2556 __always_unused __be16 proto, u16 vid) 2557 #endif 2558 { 2559 struct i40e_netdev_priv *np = netdev_priv(netdev); 2560 struct i40e_vsi *vsi = np->vsi; 2561 2562 netdev_info(netdev, "removing %pM vid=%d\n", netdev->dev_addr, vid); 2563 2564 /* return code is ignored as there is nothing a user 2565 * can do about failure to remove and a log message was 2566 * already printed from the other function 2567 */ 2568 i40e_vsi_kill_vlan(vsi, vid); 2569 2570 clear_bit(vid, vsi->active_vlans); 2571 2572 return 0; 2573 } 2574 2575 /** 2576 * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up 2577 * @vsi: the vsi being brought back up 2578 **/ 2579 static void i40e_restore_vlan(struct i40e_vsi *vsi) 2580 { 2581 u16 vid; 2582 2583 if (!vsi->netdev) 2584 return; 2585 2586 i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features); 2587 2588 for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID) 2589 i40e_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q), 2590 vid); 2591 } 2592 2593 /** 2594 * i40e_vsi_add_pvid - Add pvid for the VSI 2595 * @vsi: the vsi being adjusted 2596 * @vid: the vlan id to set as a PVID 2597 **/ 2598 int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid) 2599 { 2600 struct i40e_vsi_context ctxt; 2601 i40e_status ret; 2602 2603 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); 2604 vsi->info.pvid = cpu_to_le16(vid); 2605 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED | 2606 I40E_AQ_VSI_PVLAN_INSERT_PVID | 2607 I40E_AQ_VSI_PVLAN_EMOD_STR; 2608 2609 ctxt.seid = vsi->seid; 2610 ctxt.info = vsi->info; 2611 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); 2612 if (ret) { 2613 dev_info(&vsi->back->pdev->dev, 2614 "add pvid failed, err %s aq_err %s\n", 2615 i40e_stat_str(&vsi->back->hw, ret), 2616 i40e_aq_str(&vsi->back->hw, 2617 vsi->back->hw.aq.asq_last_status)); 2618 return -ENOENT; 2619 } 2620 2621 return 0; 2622 } 2623 2624 /** 2625 * i40e_vsi_remove_pvid - Remove the pvid from the VSI 2626 * @vsi: the vsi being adjusted 2627 * 2628 * Just use the vlan_rx_register() service to put it back to normal 2629 **/ 2630 void i40e_vsi_remove_pvid(struct i40e_vsi *vsi) 2631 { 2632 i40e_vlan_stripping_disable(vsi); 2633 2634 vsi->info.pvid = 0; 2635 } 2636 2637 /** 2638 * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources 2639 * @vsi: ptr to the VSI 2640 * 2641 * If this function returns with an error, then it's possible one or 2642 * more of the rings is populated (while the rest are not). It is the 2643 * callers duty to clean those orphaned rings. 2644 * 2645 * Return 0 on success, negative on failure 2646 **/ 2647 static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi) 2648 { 2649 int i, err = 0; 2650 2651 for (i = 0; i < vsi->num_queue_pairs && !err; i++) 2652 err = i40e_setup_tx_descriptors(vsi->tx_rings[i]); 2653 2654 return err; 2655 } 2656 2657 /** 2658 * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues 2659 * @vsi: ptr to the VSI 2660 * 2661 * Free VSI's transmit software resources 2662 **/ 2663 static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi) 2664 { 2665 int i; 2666 2667 if (!vsi->tx_rings) 2668 return; 2669 2670 for (i = 0; i < vsi->num_queue_pairs; i++) 2671 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) 2672 i40e_free_tx_resources(vsi->tx_rings[i]); 2673 } 2674 2675 /** 2676 * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources 2677 * @vsi: ptr to the VSI 2678 * 2679 * If this function returns with an error, then it's possible one or 2680 * more of the rings is populated (while the rest are not). It is the 2681 * callers duty to clean those orphaned rings. 2682 * 2683 * Return 0 on success, negative on failure 2684 **/ 2685 static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi) 2686 { 2687 int i, err = 0; 2688 2689 for (i = 0; i < vsi->num_queue_pairs && !err; i++) 2690 err = i40e_setup_rx_descriptors(vsi->rx_rings[i]); 2691 #ifdef I40E_FCOE 2692 i40e_fcoe_setup_ddp_resources(vsi); 2693 #endif 2694 return err; 2695 } 2696 2697 /** 2698 * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues 2699 * @vsi: ptr to the VSI 2700 * 2701 * Free all receive software resources 2702 **/ 2703 static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi) 2704 { 2705 int i; 2706 2707 if (!vsi->rx_rings) 2708 return; 2709 2710 for (i = 0; i < vsi->num_queue_pairs; i++) 2711 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc) 2712 i40e_free_rx_resources(vsi->rx_rings[i]); 2713 #ifdef I40E_FCOE 2714 i40e_fcoe_free_ddp_resources(vsi); 2715 #endif 2716 } 2717 2718 /** 2719 * i40e_config_xps_tx_ring - Configure XPS for a Tx ring 2720 * @ring: The Tx ring to configure 2721 * 2722 * This enables/disables XPS for a given Tx descriptor ring 2723 * based on the TCs enabled for the VSI that ring belongs to. 2724 **/ 2725 static void i40e_config_xps_tx_ring(struct i40e_ring *ring) 2726 { 2727 struct i40e_vsi *vsi = ring->vsi; 2728 cpumask_var_t mask; 2729 2730 if (!ring->q_vector || !ring->netdev) 2731 return; 2732 2733 /* Single TC mode enable XPS */ 2734 if (vsi->tc_config.numtc <= 1) { 2735 if (!test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state)) 2736 netif_set_xps_queue(ring->netdev, 2737 &ring->q_vector->affinity_mask, 2738 ring->queue_index); 2739 } else if (alloc_cpumask_var(&mask, GFP_KERNEL)) { 2740 /* Disable XPS to allow selection based on TC */ 2741 bitmap_zero(cpumask_bits(mask), nr_cpumask_bits); 2742 netif_set_xps_queue(ring->netdev, mask, ring->queue_index); 2743 free_cpumask_var(mask); 2744 } 2745 2746 /* schedule our worker thread which will take care of 2747 * applying the new filter changes 2748 */ 2749 i40e_service_event_schedule(vsi->back); 2750 } 2751 2752 /** 2753 * i40e_configure_tx_ring - Configure a transmit ring context and rest 2754 * @ring: The Tx ring to configure 2755 * 2756 * Configure the Tx descriptor ring in the HMC context. 2757 **/ 2758 static int i40e_configure_tx_ring(struct i40e_ring *ring) 2759 { 2760 struct i40e_vsi *vsi = ring->vsi; 2761 u16 pf_q = vsi->base_queue + ring->queue_index; 2762 struct i40e_hw *hw = &vsi->back->hw; 2763 struct i40e_hmc_obj_txq tx_ctx; 2764 i40e_status err = 0; 2765 u32 qtx_ctl = 0; 2766 2767 /* some ATR related tx ring init */ 2768 if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) { 2769 ring->atr_sample_rate = vsi->back->atr_sample_rate; 2770 ring->atr_count = 0; 2771 } else { 2772 ring->atr_sample_rate = 0; 2773 } 2774 2775 /* configure XPS */ 2776 i40e_config_xps_tx_ring(ring); 2777 2778 /* clear the context structure first */ 2779 memset(&tx_ctx, 0, sizeof(tx_ctx)); 2780 2781 tx_ctx.new_context = 1; 2782 tx_ctx.base = (ring->dma / 128); 2783 tx_ctx.qlen = ring->count; 2784 tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED | 2785 I40E_FLAG_FD_ATR_ENABLED)); 2786 #ifdef I40E_FCOE 2787 tx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE); 2788 #endif 2789 tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP); 2790 /* FDIR VSI tx ring can still use RS bit and writebacks */ 2791 if (vsi->type != I40E_VSI_FDIR) 2792 tx_ctx.head_wb_ena = 1; 2793 tx_ctx.head_wb_addr = ring->dma + 2794 (ring->count * sizeof(struct i40e_tx_desc)); 2795 2796 /* As part of VSI creation/update, FW allocates certain 2797 * Tx arbitration queue sets for each TC enabled for 2798 * the VSI. The FW returns the handles to these queue 2799 * sets as part of the response buffer to Add VSI, 2800 * Update VSI, etc. AQ commands. It is expected that 2801 * these queue set handles be associated with the Tx 2802 * queues by the driver as part of the TX queue context 2803 * initialization. This has to be done regardless of 2804 * DCB as by default everything is mapped to TC0. 2805 */ 2806 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]); 2807 tx_ctx.rdylist_act = 0; 2808 2809 /* clear the context in the HMC */ 2810 err = i40e_clear_lan_tx_queue_context(hw, pf_q); 2811 if (err) { 2812 dev_info(&vsi->back->pdev->dev, 2813 "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n", 2814 ring->queue_index, pf_q, err); 2815 return -ENOMEM; 2816 } 2817 2818 /* set the context in the HMC */ 2819 err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx); 2820 if (err) { 2821 dev_info(&vsi->back->pdev->dev, 2822 "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n", 2823 ring->queue_index, pf_q, err); 2824 return -ENOMEM; 2825 } 2826 2827 /* Now associate this queue with this PCI function */ 2828 if (vsi->type == I40E_VSI_VMDQ2) { 2829 qtx_ctl = I40E_QTX_CTL_VM_QUEUE; 2830 qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) & 2831 I40E_QTX_CTL_VFVM_INDX_MASK; 2832 } else { 2833 qtx_ctl = I40E_QTX_CTL_PF_QUEUE; 2834 } 2835 2836 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) & 2837 I40E_QTX_CTL_PF_INDX_MASK); 2838 wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl); 2839 i40e_flush(hw); 2840 2841 /* cache tail off for easier writes later */ 2842 ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q); 2843 2844 return 0; 2845 } 2846 2847 /** 2848 * i40e_configure_rx_ring - Configure a receive ring context 2849 * @ring: The Rx ring to configure 2850 * 2851 * Configure the Rx descriptor ring in the HMC context. 2852 **/ 2853 static int i40e_configure_rx_ring(struct i40e_ring *ring) 2854 { 2855 struct i40e_vsi *vsi = ring->vsi; 2856 u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len; 2857 u16 pf_q = vsi->base_queue + ring->queue_index; 2858 struct i40e_hw *hw = &vsi->back->hw; 2859 struct i40e_hmc_obj_rxq rx_ctx; 2860 i40e_status err = 0; 2861 2862 ring->state = 0; 2863 2864 /* clear the context structure first */ 2865 memset(&rx_ctx, 0, sizeof(rx_ctx)); 2866 2867 ring->rx_buf_len = vsi->rx_buf_len; 2868 ring->rx_hdr_len = vsi->rx_hdr_len; 2869 2870 rx_ctx.dbuff = ring->rx_buf_len >> I40E_RXQ_CTX_DBUFF_SHIFT; 2871 rx_ctx.hbuff = ring->rx_hdr_len >> I40E_RXQ_CTX_HBUFF_SHIFT; 2872 2873 rx_ctx.base = (ring->dma / 128); 2874 rx_ctx.qlen = ring->count; 2875 2876 if (vsi->back->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED) { 2877 set_ring_16byte_desc_enabled(ring); 2878 rx_ctx.dsize = 0; 2879 } else { 2880 rx_ctx.dsize = 1; 2881 } 2882 2883 rx_ctx.dtype = vsi->dtype; 2884 if (vsi->dtype) { 2885 set_ring_ps_enabled(ring); 2886 rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 | 2887 I40E_RX_SPLIT_IP | 2888 I40E_RX_SPLIT_TCP_UDP | 2889 I40E_RX_SPLIT_SCTP; 2890 } else { 2891 rx_ctx.hsplit_0 = 0; 2892 } 2893 2894 rx_ctx.rxmax = min_t(u16, vsi->max_frame, 2895 (chain_len * ring->rx_buf_len)); 2896 if (hw->revision_id == 0) 2897 rx_ctx.lrxqthresh = 0; 2898 else 2899 rx_ctx.lrxqthresh = 2; 2900 rx_ctx.crcstrip = 1; 2901 rx_ctx.l2tsel = 1; 2902 /* this controls whether VLAN is stripped from inner headers */ 2903 rx_ctx.showiv = 0; 2904 #ifdef I40E_FCOE 2905 rx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE); 2906 #endif 2907 /* set the prefena field to 1 because the manual says to */ 2908 rx_ctx.prefena = 1; 2909 2910 /* clear the context in the HMC */ 2911 err = i40e_clear_lan_rx_queue_context(hw, pf_q); 2912 if (err) { 2913 dev_info(&vsi->back->pdev->dev, 2914 "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n", 2915 ring->queue_index, pf_q, err); 2916 return -ENOMEM; 2917 } 2918 2919 /* set the context in the HMC */ 2920 err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx); 2921 if (err) { 2922 dev_info(&vsi->back->pdev->dev, 2923 "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n", 2924 ring->queue_index, pf_q, err); 2925 return -ENOMEM; 2926 } 2927 2928 /* cache tail for quicker writes, and clear the reg before use */ 2929 ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q); 2930 writel(0, ring->tail); 2931 2932 if (ring_is_ps_enabled(ring)) { 2933 i40e_alloc_rx_headers(ring); 2934 i40e_alloc_rx_buffers_ps(ring, I40E_DESC_UNUSED(ring)); 2935 } else { 2936 i40e_alloc_rx_buffers_1buf(ring, I40E_DESC_UNUSED(ring)); 2937 } 2938 2939 return 0; 2940 } 2941 2942 /** 2943 * i40e_vsi_configure_tx - Configure the VSI for Tx 2944 * @vsi: VSI structure describing this set of rings and resources 2945 * 2946 * Configure the Tx VSI for operation. 2947 **/ 2948 static int i40e_vsi_configure_tx(struct i40e_vsi *vsi) 2949 { 2950 int err = 0; 2951 u16 i; 2952 2953 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++) 2954 err = i40e_configure_tx_ring(vsi->tx_rings[i]); 2955 2956 return err; 2957 } 2958 2959 /** 2960 * i40e_vsi_configure_rx - Configure the VSI for Rx 2961 * @vsi: the VSI being configured 2962 * 2963 * Configure the Rx VSI for operation. 2964 **/ 2965 static int i40e_vsi_configure_rx(struct i40e_vsi *vsi) 2966 { 2967 int err = 0; 2968 u16 i; 2969 2970 if (vsi->netdev && (vsi->netdev->mtu > ETH_DATA_LEN)) 2971 vsi->max_frame = vsi->netdev->mtu + ETH_HLEN 2972 + ETH_FCS_LEN + VLAN_HLEN; 2973 else 2974 vsi->max_frame = I40E_RXBUFFER_2048; 2975 2976 /* figure out correct receive buffer length */ 2977 switch (vsi->back->flags & (I40E_FLAG_RX_1BUF_ENABLED | 2978 I40E_FLAG_RX_PS_ENABLED)) { 2979 case I40E_FLAG_RX_1BUF_ENABLED: 2980 vsi->rx_hdr_len = 0; 2981 vsi->rx_buf_len = vsi->max_frame; 2982 vsi->dtype = I40E_RX_DTYPE_NO_SPLIT; 2983 break; 2984 case I40E_FLAG_RX_PS_ENABLED: 2985 vsi->rx_hdr_len = I40E_RX_HDR_SIZE; 2986 vsi->rx_buf_len = I40E_RXBUFFER_2048; 2987 vsi->dtype = I40E_RX_DTYPE_HEADER_SPLIT; 2988 break; 2989 default: 2990 vsi->rx_hdr_len = I40E_RX_HDR_SIZE; 2991 vsi->rx_buf_len = I40E_RXBUFFER_2048; 2992 vsi->dtype = I40E_RX_DTYPE_SPLIT_ALWAYS; 2993 break; 2994 } 2995 2996 #ifdef I40E_FCOE 2997 /* setup rx buffer for FCoE */ 2998 if ((vsi->type == I40E_VSI_FCOE) && 2999 (vsi->back->flags & I40E_FLAG_FCOE_ENABLED)) { 3000 vsi->rx_hdr_len = 0; 3001 vsi->rx_buf_len = I40E_RXBUFFER_3072; 3002 vsi->max_frame = I40E_RXBUFFER_3072; 3003 vsi->dtype = I40E_RX_DTYPE_NO_SPLIT; 3004 } 3005 3006 #endif /* I40E_FCOE */ 3007 /* round up for the chip's needs */ 3008 vsi->rx_hdr_len = ALIGN(vsi->rx_hdr_len, 3009 BIT_ULL(I40E_RXQ_CTX_HBUFF_SHIFT)); 3010 vsi->rx_buf_len = ALIGN(vsi->rx_buf_len, 3011 BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT)); 3012 3013 /* set up individual rings */ 3014 for (i = 0; i < vsi->num_queue_pairs && !err; i++) 3015 err = i40e_configure_rx_ring(vsi->rx_rings[i]); 3016 3017 return err; 3018 } 3019 3020 /** 3021 * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC 3022 * @vsi: ptr to the VSI 3023 **/ 3024 static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi) 3025 { 3026 struct i40e_ring *tx_ring, *rx_ring; 3027 u16 qoffset, qcount; 3028 int i, n; 3029 3030 if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) { 3031 /* Reset the TC information */ 3032 for (i = 0; i < vsi->num_queue_pairs; i++) { 3033 rx_ring = vsi->rx_rings[i]; 3034 tx_ring = vsi->tx_rings[i]; 3035 rx_ring->dcb_tc = 0; 3036 tx_ring->dcb_tc = 0; 3037 } 3038 } 3039 3040 for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) { 3041 if (!(vsi->tc_config.enabled_tc & BIT_ULL(n))) 3042 continue; 3043 3044 qoffset = vsi->tc_config.tc_info[n].qoffset; 3045 qcount = vsi->tc_config.tc_info[n].qcount; 3046 for (i = qoffset; i < (qoffset + qcount); i++) { 3047 rx_ring = vsi->rx_rings[i]; 3048 tx_ring = vsi->tx_rings[i]; 3049 rx_ring->dcb_tc = n; 3050 tx_ring->dcb_tc = n; 3051 } 3052 } 3053 } 3054 3055 /** 3056 * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI 3057 * @vsi: ptr to the VSI 3058 **/ 3059 static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi) 3060 { 3061 if (vsi->netdev) 3062 i40e_set_rx_mode(vsi->netdev); 3063 } 3064 3065 /** 3066 * i40e_fdir_filter_restore - Restore the Sideband Flow Director filters 3067 * @vsi: Pointer to the targeted VSI 3068 * 3069 * This function replays the hlist on the hw where all the SB Flow Director 3070 * filters were saved. 3071 **/ 3072 static void i40e_fdir_filter_restore(struct i40e_vsi *vsi) 3073 { 3074 struct i40e_fdir_filter *filter; 3075 struct i40e_pf *pf = vsi->back; 3076 struct hlist_node *node; 3077 3078 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) 3079 return; 3080 3081 hlist_for_each_entry_safe(filter, node, 3082 &pf->fdir_filter_list, fdir_node) { 3083 i40e_add_del_fdir(vsi, filter, true); 3084 } 3085 } 3086 3087 /** 3088 * i40e_vsi_configure - Set up the VSI for action 3089 * @vsi: the VSI being configured 3090 **/ 3091 static int i40e_vsi_configure(struct i40e_vsi *vsi) 3092 { 3093 int err; 3094 3095 i40e_set_vsi_rx_mode(vsi); 3096 i40e_restore_vlan(vsi); 3097 i40e_vsi_config_dcb_rings(vsi); 3098 err = i40e_vsi_configure_tx(vsi); 3099 if (!err) 3100 err = i40e_vsi_configure_rx(vsi); 3101 3102 return err; 3103 } 3104 3105 /** 3106 * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW 3107 * @vsi: the VSI being configured 3108 **/ 3109 static void i40e_vsi_configure_msix(struct i40e_vsi *vsi) 3110 { 3111 struct i40e_pf *pf = vsi->back; 3112 struct i40e_hw *hw = &pf->hw; 3113 u16 vector; 3114 int i, q; 3115 u32 qp; 3116 3117 /* The interrupt indexing is offset by 1 in the PFINT_ITRn 3118 * and PFINT_LNKLSTn registers, e.g.: 3119 * PFINT_ITRn[0..n-1] gets msix-1..msix-n (qpair interrupts) 3120 */ 3121 qp = vsi->base_queue; 3122 vector = vsi->base_vector; 3123 for (i = 0; i < vsi->num_q_vectors; i++, vector++) { 3124 struct i40e_q_vector *q_vector = vsi->q_vectors[i]; 3125 3126 q_vector->itr_countdown = ITR_COUNTDOWN_START; 3127 q_vector->rx.itr = ITR_TO_REG(vsi->rx_rings[i]->rx_itr_setting); 3128 q_vector->rx.latency_range = I40E_LOW_LATENCY; 3129 wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1), 3130 q_vector->rx.itr); 3131 q_vector->tx.itr = ITR_TO_REG(vsi->tx_rings[i]->tx_itr_setting); 3132 q_vector->tx.latency_range = I40E_LOW_LATENCY; 3133 wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1), 3134 q_vector->tx.itr); 3135 wr32(hw, I40E_PFINT_RATEN(vector - 1), 3136 INTRL_USEC_TO_REG(vsi->int_rate_limit)); 3137 3138 /* Linked list for the queuepairs assigned to this vector */ 3139 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp); 3140 for (q = 0; q < q_vector->num_ringpairs; q++) { 3141 u32 val; 3142 3143 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK | 3144 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) | 3145 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) | 3146 (qp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)| 3147 (I40E_QUEUE_TYPE_TX 3148 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT); 3149 3150 wr32(hw, I40E_QINT_RQCTL(qp), val); 3151 3152 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK | 3153 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) | 3154 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) | 3155 ((qp+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)| 3156 (I40E_QUEUE_TYPE_RX 3157 << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT); 3158 3159 /* Terminate the linked list */ 3160 if (q == (q_vector->num_ringpairs - 1)) 3161 val |= (I40E_QUEUE_END_OF_LIST 3162 << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT); 3163 3164 wr32(hw, I40E_QINT_TQCTL(qp), val); 3165 qp++; 3166 } 3167 } 3168 3169 i40e_flush(hw); 3170 } 3171 3172 /** 3173 * i40e_enable_misc_int_causes - enable the non-queue interrupts 3174 * @hw: ptr to the hardware info 3175 **/ 3176 static void i40e_enable_misc_int_causes(struct i40e_pf *pf) 3177 { 3178 struct i40e_hw *hw = &pf->hw; 3179 u32 val; 3180 3181 /* clear things first */ 3182 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */ 3183 rd32(hw, I40E_PFINT_ICR0); /* read to clear */ 3184 3185 val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | 3186 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | 3187 I40E_PFINT_ICR0_ENA_GRST_MASK | 3188 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | 3189 I40E_PFINT_ICR0_ENA_GPIO_MASK | 3190 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | 3191 I40E_PFINT_ICR0_ENA_VFLR_MASK | 3192 I40E_PFINT_ICR0_ENA_ADMINQ_MASK; 3193 3194 if (pf->flags & I40E_FLAG_IWARP_ENABLED) 3195 val |= I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK; 3196 3197 if (pf->flags & I40E_FLAG_PTP) 3198 val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK; 3199 3200 wr32(hw, I40E_PFINT_ICR0_ENA, val); 3201 3202 /* SW_ITR_IDX = 0, but don't change INTENA */ 3203 wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK | 3204 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK); 3205 3206 /* OTHER_ITR_IDX = 0 */ 3207 wr32(hw, I40E_PFINT_STAT_CTL0, 0); 3208 } 3209 3210 /** 3211 * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW 3212 * @vsi: the VSI being configured 3213 **/ 3214 static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi) 3215 { 3216 struct i40e_q_vector *q_vector = vsi->q_vectors[0]; 3217 struct i40e_pf *pf = vsi->back; 3218 struct i40e_hw *hw = &pf->hw; 3219 u32 val; 3220 3221 /* set the ITR configuration */ 3222 q_vector->itr_countdown = ITR_COUNTDOWN_START; 3223 q_vector->rx.itr = ITR_TO_REG(vsi->rx_rings[0]->rx_itr_setting); 3224 q_vector->rx.latency_range = I40E_LOW_LATENCY; 3225 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.itr); 3226 q_vector->tx.itr = ITR_TO_REG(vsi->tx_rings[0]->tx_itr_setting); 3227 q_vector->tx.latency_range = I40E_LOW_LATENCY; 3228 wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.itr); 3229 3230 i40e_enable_misc_int_causes(pf); 3231 3232 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */ 3233 wr32(hw, I40E_PFINT_LNKLST0, 0); 3234 3235 /* Associate the queue pair to the vector and enable the queue int */ 3236 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK | 3237 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) | 3238 (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT); 3239 3240 wr32(hw, I40E_QINT_RQCTL(0), val); 3241 3242 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK | 3243 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) | 3244 (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT); 3245 3246 wr32(hw, I40E_QINT_TQCTL(0), val); 3247 i40e_flush(hw); 3248 } 3249 3250 /** 3251 * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0 3252 * @pf: board private structure 3253 **/ 3254 void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf) 3255 { 3256 struct i40e_hw *hw = &pf->hw; 3257 3258 wr32(hw, I40E_PFINT_DYN_CTL0, 3259 I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT); 3260 i40e_flush(hw); 3261 } 3262 3263 /** 3264 * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0 3265 * @pf: board private structure 3266 * @clearpba: true when all pending interrupt events should be cleared 3267 **/ 3268 void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf, bool clearpba) 3269 { 3270 struct i40e_hw *hw = &pf->hw; 3271 u32 val; 3272 3273 val = I40E_PFINT_DYN_CTL0_INTENA_MASK | 3274 (clearpba ? I40E_PFINT_DYN_CTL0_CLEARPBA_MASK : 0) | 3275 (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT); 3276 3277 wr32(hw, I40E_PFINT_DYN_CTL0, val); 3278 i40e_flush(hw); 3279 } 3280 3281 /** 3282 * i40e_msix_clean_rings - MSIX mode Interrupt Handler 3283 * @irq: interrupt number 3284 * @data: pointer to a q_vector 3285 **/ 3286 static irqreturn_t i40e_msix_clean_rings(int irq, void *data) 3287 { 3288 struct i40e_q_vector *q_vector = data; 3289 3290 if (!q_vector->tx.ring && !q_vector->rx.ring) 3291 return IRQ_HANDLED; 3292 3293 napi_schedule_irqoff(&q_vector->napi); 3294 3295 return IRQ_HANDLED; 3296 } 3297 3298 /** 3299 * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts 3300 * @vsi: the VSI being configured 3301 * @basename: name for the vector 3302 * 3303 * Allocates MSI-X vectors and requests interrupts from the kernel. 3304 **/ 3305 static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename) 3306 { 3307 int q_vectors = vsi->num_q_vectors; 3308 struct i40e_pf *pf = vsi->back; 3309 int base = vsi->base_vector; 3310 int rx_int_idx = 0; 3311 int tx_int_idx = 0; 3312 int vector, err; 3313 3314 for (vector = 0; vector < q_vectors; vector++) { 3315 struct i40e_q_vector *q_vector = vsi->q_vectors[vector]; 3316 3317 if (q_vector->tx.ring && q_vector->rx.ring) { 3318 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 3319 "%s-%s-%d", basename, "TxRx", rx_int_idx++); 3320 tx_int_idx++; 3321 } else if (q_vector->rx.ring) { 3322 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 3323 "%s-%s-%d", basename, "rx", rx_int_idx++); 3324 } else if (q_vector->tx.ring) { 3325 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 3326 "%s-%s-%d", basename, "tx", tx_int_idx++); 3327 } else { 3328 /* skip this unused q_vector */ 3329 continue; 3330 } 3331 err = request_irq(pf->msix_entries[base + vector].vector, 3332 vsi->irq_handler, 3333 0, 3334 q_vector->name, 3335 q_vector); 3336 if (err) { 3337 dev_info(&pf->pdev->dev, 3338 "MSIX request_irq failed, error: %d\n", err); 3339 goto free_queue_irqs; 3340 } 3341 /* assign the mask for this irq */ 3342 irq_set_affinity_hint(pf->msix_entries[base + vector].vector, 3343 &q_vector->affinity_mask); 3344 } 3345 3346 vsi->irqs_ready = true; 3347 return 0; 3348 3349 free_queue_irqs: 3350 while (vector) { 3351 vector--; 3352 irq_set_affinity_hint(pf->msix_entries[base + vector].vector, 3353 NULL); 3354 free_irq(pf->msix_entries[base + vector].vector, 3355 &(vsi->q_vectors[vector])); 3356 } 3357 return err; 3358 } 3359 3360 /** 3361 * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI 3362 * @vsi: the VSI being un-configured 3363 **/ 3364 static void i40e_vsi_disable_irq(struct i40e_vsi *vsi) 3365 { 3366 struct i40e_pf *pf = vsi->back; 3367 struct i40e_hw *hw = &pf->hw; 3368 int base = vsi->base_vector; 3369 int i; 3370 3371 for (i = 0; i < vsi->num_queue_pairs; i++) { 3372 wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), 0); 3373 wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), 0); 3374 } 3375 3376 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 3377 for (i = vsi->base_vector; 3378 i < (vsi->num_q_vectors + vsi->base_vector); i++) 3379 wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0); 3380 3381 i40e_flush(hw); 3382 for (i = 0; i < vsi->num_q_vectors; i++) 3383 synchronize_irq(pf->msix_entries[i + base].vector); 3384 } else { 3385 /* Legacy and MSI mode - this stops all interrupt handling */ 3386 wr32(hw, I40E_PFINT_ICR0_ENA, 0); 3387 wr32(hw, I40E_PFINT_DYN_CTL0, 0); 3388 i40e_flush(hw); 3389 synchronize_irq(pf->pdev->irq); 3390 } 3391 } 3392 3393 /** 3394 * i40e_vsi_enable_irq - Enable IRQ for the given VSI 3395 * @vsi: the VSI being configured 3396 **/ 3397 static int i40e_vsi_enable_irq(struct i40e_vsi *vsi) 3398 { 3399 struct i40e_pf *pf = vsi->back; 3400 int i; 3401 3402 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 3403 for (i = 0; i < vsi->num_q_vectors; i++) 3404 i40e_irq_dynamic_enable(vsi, i); 3405 } else { 3406 i40e_irq_dynamic_enable_icr0(pf, true); 3407 } 3408 3409 i40e_flush(&pf->hw); 3410 return 0; 3411 } 3412 3413 /** 3414 * i40e_stop_misc_vector - Stop the vector that handles non-queue events 3415 * @pf: board private structure 3416 **/ 3417 static void i40e_stop_misc_vector(struct i40e_pf *pf) 3418 { 3419 /* Disable ICR 0 */ 3420 wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0); 3421 i40e_flush(&pf->hw); 3422 } 3423 3424 /** 3425 * i40e_intr - MSI/Legacy and non-queue interrupt handler 3426 * @irq: interrupt number 3427 * @data: pointer to a q_vector 3428 * 3429 * This is the handler used for all MSI/Legacy interrupts, and deals 3430 * with both queue and non-queue interrupts. This is also used in 3431 * MSIX mode to handle the non-queue interrupts. 3432 **/ 3433 static irqreturn_t i40e_intr(int irq, void *data) 3434 { 3435 struct i40e_pf *pf = (struct i40e_pf *)data; 3436 struct i40e_hw *hw = &pf->hw; 3437 irqreturn_t ret = IRQ_NONE; 3438 u32 icr0, icr0_remaining; 3439 u32 val, ena_mask; 3440 3441 icr0 = rd32(hw, I40E_PFINT_ICR0); 3442 ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA); 3443 3444 /* if sharing a legacy IRQ, we might get called w/o an intr pending */ 3445 if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0) 3446 goto enable_intr; 3447 3448 /* if interrupt but no bits showing, must be SWINT */ 3449 if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) || 3450 (icr0 & I40E_PFINT_ICR0_SWINT_MASK)) 3451 pf->sw_int_count++; 3452 3453 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) && 3454 (ena_mask & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) { 3455 ena_mask &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK; 3456 icr0 &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK; 3457 dev_info(&pf->pdev->dev, "cleared PE_CRITERR\n"); 3458 } 3459 3460 /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */ 3461 if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) { 3462 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; 3463 struct i40e_q_vector *q_vector = vsi->q_vectors[0]; 3464 3465 /* We do not have a way to disarm Queue causes while leaving 3466 * interrupt enabled for all other causes, ideally 3467 * interrupt should be disabled while we are in NAPI but 3468 * this is not a performance path and napi_schedule() 3469 * can deal with rescheduling. 3470 */ 3471 if (!test_bit(__I40E_DOWN, &pf->state)) 3472 napi_schedule_irqoff(&q_vector->napi); 3473 } 3474 3475 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) { 3476 ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK; 3477 set_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state); 3478 i40e_debug(&pf->hw, I40E_DEBUG_NVM, "AdminQ event\n"); 3479 } 3480 3481 if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) { 3482 ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK; 3483 set_bit(__I40E_MDD_EVENT_PENDING, &pf->state); 3484 } 3485 3486 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) { 3487 ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK; 3488 set_bit(__I40E_VFLR_EVENT_PENDING, &pf->state); 3489 } 3490 3491 if (icr0 & I40E_PFINT_ICR0_GRST_MASK) { 3492 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) 3493 set_bit(__I40E_RESET_INTR_RECEIVED, &pf->state); 3494 ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK; 3495 val = rd32(hw, I40E_GLGEN_RSTAT); 3496 val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK) 3497 >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT; 3498 if (val == I40E_RESET_CORER) { 3499 pf->corer_count++; 3500 } else if (val == I40E_RESET_GLOBR) { 3501 pf->globr_count++; 3502 } else if (val == I40E_RESET_EMPR) { 3503 pf->empr_count++; 3504 set_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state); 3505 } 3506 } 3507 3508 if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) { 3509 icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK; 3510 dev_info(&pf->pdev->dev, "HMC error interrupt\n"); 3511 dev_info(&pf->pdev->dev, "HMC error info 0x%x, HMC error data 0x%x\n", 3512 rd32(hw, I40E_PFHMC_ERRORINFO), 3513 rd32(hw, I40E_PFHMC_ERRORDATA)); 3514 } 3515 3516 if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) { 3517 u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0); 3518 3519 if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) { 3520 icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK; 3521 i40e_ptp_tx_hwtstamp(pf); 3522 } 3523 } 3524 3525 /* If a critical error is pending we have no choice but to reset the 3526 * device. 3527 * Report and mask out any remaining unexpected interrupts. 3528 */ 3529 icr0_remaining = icr0 & ena_mask; 3530 if (icr0_remaining) { 3531 dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n", 3532 icr0_remaining); 3533 if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) || 3534 (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) || 3535 (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) { 3536 dev_info(&pf->pdev->dev, "device will be reset\n"); 3537 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); 3538 i40e_service_event_schedule(pf); 3539 } 3540 ena_mask &= ~icr0_remaining; 3541 } 3542 ret = IRQ_HANDLED; 3543 3544 enable_intr: 3545 /* re-enable interrupt causes */ 3546 wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask); 3547 if (!test_bit(__I40E_DOWN, &pf->state)) { 3548 i40e_service_event_schedule(pf); 3549 i40e_irq_dynamic_enable_icr0(pf, false); 3550 } 3551 3552 return ret; 3553 } 3554 3555 /** 3556 * i40e_clean_fdir_tx_irq - Reclaim resources after transmit completes 3557 * @tx_ring: tx ring to clean 3558 * @budget: how many cleans we're allowed 3559 * 3560 * Returns true if there's any budget left (e.g. the clean is finished) 3561 **/ 3562 static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget) 3563 { 3564 struct i40e_vsi *vsi = tx_ring->vsi; 3565 u16 i = tx_ring->next_to_clean; 3566 struct i40e_tx_buffer *tx_buf; 3567 struct i40e_tx_desc *tx_desc; 3568 3569 tx_buf = &tx_ring->tx_bi[i]; 3570 tx_desc = I40E_TX_DESC(tx_ring, i); 3571 i -= tx_ring->count; 3572 3573 do { 3574 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch; 3575 3576 /* if next_to_watch is not set then there is no work pending */ 3577 if (!eop_desc) 3578 break; 3579 3580 /* prevent any other reads prior to eop_desc */ 3581 read_barrier_depends(); 3582 3583 /* if the descriptor isn't done, no work yet to do */ 3584 if (!(eop_desc->cmd_type_offset_bsz & 3585 cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE))) 3586 break; 3587 3588 /* clear next_to_watch to prevent false hangs */ 3589 tx_buf->next_to_watch = NULL; 3590 3591 tx_desc->buffer_addr = 0; 3592 tx_desc->cmd_type_offset_bsz = 0; 3593 /* move past filter desc */ 3594 tx_buf++; 3595 tx_desc++; 3596 i++; 3597 if (unlikely(!i)) { 3598 i -= tx_ring->count; 3599 tx_buf = tx_ring->tx_bi; 3600 tx_desc = I40E_TX_DESC(tx_ring, 0); 3601 } 3602 /* unmap skb header data */ 3603 dma_unmap_single(tx_ring->dev, 3604 dma_unmap_addr(tx_buf, dma), 3605 dma_unmap_len(tx_buf, len), 3606 DMA_TO_DEVICE); 3607 if (tx_buf->tx_flags & I40E_TX_FLAGS_FD_SB) 3608 kfree(tx_buf->raw_buf); 3609 3610 tx_buf->raw_buf = NULL; 3611 tx_buf->tx_flags = 0; 3612 tx_buf->next_to_watch = NULL; 3613 dma_unmap_len_set(tx_buf, len, 0); 3614 tx_desc->buffer_addr = 0; 3615 tx_desc->cmd_type_offset_bsz = 0; 3616 3617 /* move us past the eop_desc for start of next FD desc */ 3618 tx_buf++; 3619 tx_desc++; 3620 i++; 3621 if (unlikely(!i)) { 3622 i -= tx_ring->count; 3623 tx_buf = tx_ring->tx_bi; 3624 tx_desc = I40E_TX_DESC(tx_ring, 0); 3625 } 3626 3627 /* update budget accounting */ 3628 budget--; 3629 } while (likely(budget)); 3630 3631 i += tx_ring->count; 3632 tx_ring->next_to_clean = i; 3633 3634 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) 3635 i40e_irq_dynamic_enable(vsi, tx_ring->q_vector->v_idx); 3636 3637 return budget > 0; 3638 } 3639 3640 /** 3641 * i40e_fdir_clean_ring - Interrupt Handler for FDIR SB ring 3642 * @irq: interrupt number 3643 * @data: pointer to a q_vector 3644 **/ 3645 static irqreturn_t i40e_fdir_clean_ring(int irq, void *data) 3646 { 3647 struct i40e_q_vector *q_vector = data; 3648 struct i40e_vsi *vsi; 3649 3650 if (!q_vector->tx.ring) 3651 return IRQ_HANDLED; 3652 3653 vsi = q_vector->tx.ring->vsi; 3654 i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit); 3655 3656 return IRQ_HANDLED; 3657 } 3658 3659 /** 3660 * i40e_map_vector_to_qp - Assigns the queue pair to the vector 3661 * @vsi: the VSI being configured 3662 * @v_idx: vector index 3663 * @qp_idx: queue pair index 3664 **/ 3665 static void i40e_map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx) 3666 { 3667 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx]; 3668 struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx]; 3669 struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx]; 3670 3671 tx_ring->q_vector = q_vector; 3672 tx_ring->next = q_vector->tx.ring; 3673 q_vector->tx.ring = tx_ring; 3674 q_vector->tx.count++; 3675 3676 rx_ring->q_vector = q_vector; 3677 rx_ring->next = q_vector->rx.ring; 3678 q_vector->rx.ring = rx_ring; 3679 q_vector->rx.count++; 3680 } 3681 3682 /** 3683 * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors 3684 * @vsi: the VSI being configured 3685 * 3686 * This function maps descriptor rings to the queue-specific vectors 3687 * we were allotted through the MSI-X enabling code. Ideally, we'd have 3688 * one vector per queue pair, but on a constrained vector budget, we 3689 * group the queue pairs as "efficiently" as possible. 3690 **/ 3691 static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi) 3692 { 3693 int qp_remaining = vsi->num_queue_pairs; 3694 int q_vectors = vsi->num_q_vectors; 3695 int num_ringpairs; 3696 int v_start = 0; 3697 int qp_idx = 0; 3698 3699 /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to 3700 * group them so there are multiple queues per vector. 3701 * It is also important to go through all the vectors available to be 3702 * sure that if we don't use all the vectors, that the remaining vectors 3703 * are cleared. This is especially important when decreasing the 3704 * number of queues in use. 3705 */ 3706 for (; v_start < q_vectors; v_start++) { 3707 struct i40e_q_vector *q_vector = vsi->q_vectors[v_start]; 3708 3709 num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start); 3710 3711 q_vector->num_ringpairs = num_ringpairs; 3712 3713 q_vector->rx.count = 0; 3714 q_vector->tx.count = 0; 3715 q_vector->rx.ring = NULL; 3716 q_vector->tx.ring = NULL; 3717 3718 while (num_ringpairs--) { 3719 i40e_map_vector_to_qp(vsi, v_start, qp_idx); 3720 qp_idx++; 3721 qp_remaining--; 3722 } 3723 } 3724 } 3725 3726 /** 3727 * i40e_vsi_request_irq - Request IRQ from the OS 3728 * @vsi: the VSI being configured 3729 * @basename: name for the vector 3730 **/ 3731 static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename) 3732 { 3733 struct i40e_pf *pf = vsi->back; 3734 int err; 3735 3736 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 3737 err = i40e_vsi_request_irq_msix(vsi, basename); 3738 else if (pf->flags & I40E_FLAG_MSI_ENABLED) 3739 err = request_irq(pf->pdev->irq, i40e_intr, 0, 3740 pf->int_name, pf); 3741 else 3742 err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED, 3743 pf->int_name, pf); 3744 3745 if (err) 3746 dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err); 3747 3748 return err; 3749 } 3750 3751 #ifdef CONFIG_NET_POLL_CONTROLLER 3752 /** 3753 * i40e_netpoll - A Polling 'interrupt' handler 3754 * @netdev: network interface device structure 3755 * 3756 * This is used by netconsole to send skbs without having to re-enable 3757 * interrupts. It's not called while the normal interrupt routine is executing. 3758 **/ 3759 #ifdef I40E_FCOE 3760 void i40e_netpoll(struct net_device *netdev) 3761 #else 3762 static void i40e_netpoll(struct net_device *netdev) 3763 #endif 3764 { 3765 struct i40e_netdev_priv *np = netdev_priv(netdev); 3766 struct i40e_vsi *vsi = np->vsi; 3767 struct i40e_pf *pf = vsi->back; 3768 int i; 3769 3770 /* if interface is down do nothing */ 3771 if (test_bit(__I40E_DOWN, &vsi->state)) 3772 return; 3773 3774 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 3775 for (i = 0; i < vsi->num_q_vectors; i++) 3776 i40e_msix_clean_rings(0, vsi->q_vectors[i]); 3777 } else { 3778 i40e_intr(pf->pdev->irq, netdev); 3779 } 3780 } 3781 #endif 3782 3783 /** 3784 * i40e_pf_txq_wait - Wait for a PF's Tx queue to be enabled or disabled 3785 * @pf: the PF being configured 3786 * @pf_q: the PF queue 3787 * @enable: enable or disable state of the queue 3788 * 3789 * This routine will wait for the given Tx queue of the PF to reach the 3790 * enabled or disabled state. 3791 * Returns -ETIMEDOUT in case of failing to reach the requested state after 3792 * multiple retries; else will return 0 in case of success. 3793 **/ 3794 static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable) 3795 { 3796 int i; 3797 u32 tx_reg; 3798 3799 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) { 3800 tx_reg = rd32(&pf->hw, I40E_QTX_ENA(pf_q)); 3801 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) 3802 break; 3803 3804 usleep_range(10, 20); 3805 } 3806 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT) 3807 return -ETIMEDOUT; 3808 3809 return 0; 3810 } 3811 3812 /** 3813 * i40e_vsi_control_tx - Start or stop a VSI's rings 3814 * @vsi: the VSI being configured 3815 * @enable: start or stop the rings 3816 **/ 3817 static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable) 3818 { 3819 struct i40e_pf *pf = vsi->back; 3820 struct i40e_hw *hw = &pf->hw; 3821 int i, j, pf_q, ret = 0; 3822 u32 tx_reg; 3823 3824 pf_q = vsi->base_queue; 3825 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { 3826 3827 /* warn the TX unit of coming changes */ 3828 i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable); 3829 if (!enable) 3830 usleep_range(10, 20); 3831 3832 for (j = 0; j < 50; j++) { 3833 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q)); 3834 if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) == 3835 ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1)) 3836 break; 3837 usleep_range(1000, 2000); 3838 } 3839 /* Skip if the queue is already in the requested state */ 3840 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) 3841 continue; 3842 3843 /* turn on/off the queue */ 3844 if (enable) { 3845 wr32(hw, I40E_QTX_HEAD(pf_q), 0); 3846 tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK; 3847 } else { 3848 tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK; 3849 } 3850 3851 wr32(hw, I40E_QTX_ENA(pf_q), tx_reg); 3852 /* No waiting for the Tx queue to disable */ 3853 if (!enable && test_bit(__I40E_PORT_TX_SUSPENDED, &pf->state)) 3854 continue; 3855 3856 /* wait for the change to finish */ 3857 ret = i40e_pf_txq_wait(pf, pf_q, enable); 3858 if (ret) { 3859 dev_info(&pf->pdev->dev, 3860 "VSI seid %d Tx ring %d %sable timeout\n", 3861 vsi->seid, pf_q, (enable ? "en" : "dis")); 3862 break; 3863 } 3864 } 3865 3866 if (hw->revision_id == 0) 3867 mdelay(50); 3868 return ret; 3869 } 3870 3871 /** 3872 * i40e_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled 3873 * @pf: the PF being configured 3874 * @pf_q: the PF queue 3875 * @enable: enable or disable state of the queue 3876 * 3877 * This routine will wait for the given Rx queue of the PF to reach the 3878 * enabled or disabled state. 3879 * Returns -ETIMEDOUT in case of failing to reach the requested state after 3880 * multiple retries; else will return 0 in case of success. 3881 **/ 3882 static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable) 3883 { 3884 int i; 3885 u32 rx_reg; 3886 3887 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) { 3888 rx_reg = rd32(&pf->hw, I40E_QRX_ENA(pf_q)); 3889 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK)) 3890 break; 3891 3892 usleep_range(10, 20); 3893 } 3894 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT) 3895 return -ETIMEDOUT; 3896 3897 return 0; 3898 } 3899 3900 /** 3901 * i40e_vsi_control_rx - Start or stop a VSI's rings 3902 * @vsi: the VSI being configured 3903 * @enable: start or stop the rings 3904 **/ 3905 static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable) 3906 { 3907 struct i40e_pf *pf = vsi->back; 3908 struct i40e_hw *hw = &pf->hw; 3909 int i, j, pf_q, ret = 0; 3910 u32 rx_reg; 3911 3912 pf_q = vsi->base_queue; 3913 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { 3914 for (j = 0; j < 50; j++) { 3915 rx_reg = rd32(hw, I40E_QRX_ENA(pf_q)); 3916 if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) == 3917 ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1)) 3918 break; 3919 usleep_range(1000, 2000); 3920 } 3921 3922 /* Skip if the queue is already in the requested state */ 3923 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK)) 3924 continue; 3925 3926 /* turn on/off the queue */ 3927 if (enable) 3928 rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK; 3929 else 3930 rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK; 3931 wr32(hw, I40E_QRX_ENA(pf_q), rx_reg); 3932 /* No waiting for the Tx queue to disable */ 3933 if (!enable && test_bit(__I40E_PORT_TX_SUSPENDED, &pf->state)) 3934 continue; 3935 3936 /* wait for the change to finish */ 3937 ret = i40e_pf_rxq_wait(pf, pf_q, enable); 3938 if (ret) { 3939 dev_info(&pf->pdev->dev, 3940 "VSI seid %d Rx ring %d %sable timeout\n", 3941 vsi->seid, pf_q, (enable ? "en" : "dis")); 3942 break; 3943 } 3944 } 3945 3946 return ret; 3947 } 3948 3949 /** 3950 * i40e_vsi_control_rings - Start or stop a VSI's rings 3951 * @vsi: the VSI being configured 3952 * @enable: start or stop the rings 3953 **/ 3954 int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool request) 3955 { 3956 int ret = 0; 3957 3958 /* do rx first for enable and last for disable */ 3959 if (request) { 3960 ret = i40e_vsi_control_rx(vsi, request); 3961 if (ret) 3962 return ret; 3963 ret = i40e_vsi_control_tx(vsi, request); 3964 } else { 3965 /* Ignore return value, we need to shutdown whatever we can */ 3966 i40e_vsi_control_tx(vsi, request); 3967 i40e_vsi_control_rx(vsi, request); 3968 } 3969 3970 return ret; 3971 } 3972 3973 /** 3974 * i40e_vsi_free_irq - Free the irq association with the OS 3975 * @vsi: the VSI being configured 3976 **/ 3977 static void i40e_vsi_free_irq(struct i40e_vsi *vsi) 3978 { 3979 struct i40e_pf *pf = vsi->back; 3980 struct i40e_hw *hw = &pf->hw; 3981 int base = vsi->base_vector; 3982 u32 val, qp; 3983 int i; 3984 3985 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 3986 if (!vsi->q_vectors) 3987 return; 3988 3989 if (!vsi->irqs_ready) 3990 return; 3991 3992 vsi->irqs_ready = false; 3993 for (i = 0; i < vsi->num_q_vectors; i++) { 3994 u16 vector = i + base; 3995 3996 /* free only the irqs that were actually requested */ 3997 if (!vsi->q_vectors[i] || 3998 !vsi->q_vectors[i]->num_ringpairs) 3999 continue; 4000 4001 /* clear the affinity_mask in the IRQ descriptor */ 4002 irq_set_affinity_hint(pf->msix_entries[vector].vector, 4003 NULL); 4004 free_irq(pf->msix_entries[vector].vector, 4005 vsi->q_vectors[i]); 4006 4007 /* Tear down the interrupt queue link list 4008 * 4009 * We know that they come in pairs and always 4010 * the Rx first, then the Tx. To clear the 4011 * link list, stick the EOL value into the 4012 * next_q field of the registers. 4013 */ 4014 val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1)); 4015 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK) 4016 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT; 4017 val |= I40E_QUEUE_END_OF_LIST 4018 << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT; 4019 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val); 4020 4021 while (qp != I40E_QUEUE_END_OF_LIST) { 4022 u32 next; 4023 4024 val = rd32(hw, I40E_QINT_RQCTL(qp)); 4025 4026 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK | 4027 I40E_QINT_RQCTL_MSIX0_INDX_MASK | 4028 I40E_QINT_RQCTL_CAUSE_ENA_MASK | 4029 I40E_QINT_RQCTL_INTEVENT_MASK); 4030 4031 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK | 4032 I40E_QINT_RQCTL_NEXTQ_INDX_MASK); 4033 4034 wr32(hw, I40E_QINT_RQCTL(qp), val); 4035 4036 val = rd32(hw, I40E_QINT_TQCTL(qp)); 4037 4038 next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK) 4039 >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT; 4040 4041 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK | 4042 I40E_QINT_TQCTL_MSIX0_INDX_MASK | 4043 I40E_QINT_TQCTL_CAUSE_ENA_MASK | 4044 I40E_QINT_TQCTL_INTEVENT_MASK); 4045 4046 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK | 4047 I40E_QINT_TQCTL_NEXTQ_INDX_MASK); 4048 4049 wr32(hw, I40E_QINT_TQCTL(qp), val); 4050 qp = next; 4051 } 4052 } 4053 } else { 4054 free_irq(pf->pdev->irq, pf); 4055 4056 val = rd32(hw, I40E_PFINT_LNKLST0); 4057 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK) 4058 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT; 4059 val |= I40E_QUEUE_END_OF_LIST 4060 << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT; 4061 wr32(hw, I40E_PFINT_LNKLST0, val); 4062 4063 val = rd32(hw, I40E_QINT_RQCTL(qp)); 4064 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK | 4065 I40E_QINT_RQCTL_MSIX0_INDX_MASK | 4066 I40E_QINT_RQCTL_CAUSE_ENA_MASK | 4067 I40E_QINT_RQCTL_INTEVENT_MASK); 4068 4069 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK | 4070 I40E_QINT_RQCTL_NEXTQ_INDX_MASK); 4071 4072 wr32(hw, I40E_QINT_RQCTL(qp), val); 4073 4074 val = rd32(hw, I40E_QINT_TQCTL(qp)); 4075 4076 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK | 4077 I40E_QINT_TQCTL_MSIX0_INDX_MASK | 4078 I40E_QINT_TQCTL_CAUSE_ENA_MASK | 4079 I40E_QINT_TQCTL_INTEVENT_MASK); 4080 4081 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK | 4082 I40E_QINT_TQCTL_NEXTQ_INDX_MASK); 4083 4084 wr32(hw, I40E_QINT_TQCTL(qp), val); 4085 } 4086 } 4087 4088 /** 4089 * i40e_free_q_vector - Free memory allocated for specific interrupt vector 4090 * @vsi: the VSI being configured 4091 * @v_idx: Index of vector to be freed 4092 * 4093 * This function frees the memory allocated to the q_vector. In addition if 4094 * NAPI is enabled it will delete any references to the NAPI struct prior 4095 * to freeing the q_vector. 4096 **/ 4097 static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx) 4098 { 4099 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx]; 4100 struct i40e_ring *ring; 4101 4102 if (!q_vector) 4103 return; 4104 4105 /* disassociate q_vector from rings */ 4106 i40e_for_each_ring(ring, q_vector->tx) 4107 ring->q_vector = NULL; 4108 4109 i40e_for_each_ring(ring, q_vector->rx) 4110 ring->q_vector = NULL; 4111 4112 /* only VSI w/ an associated netdev is set up w/ NAPI */ 4113 if (vsi->netdev) 4114 netif_napi_del(&q_vector->napi); 4115 4116 vsi->q_vectors[v_idx] = NULL; 4117 4118 kfree_rcu(q_vector, rcu); 4119 } 4120 4121 /** 4122 * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors 4123 * @vsi: the VSI being un-configured 4124 * 4125 * This frees the memory allocated to the q_vectors and 4126 * deletes references to the NAPI struct. 4127 **/ 4128 static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi) 4129 { 4130 int v_idx; 4131 4132 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) 4133 i40e_free_q_vector(vsi, v_idx); 4134 } 4135 4136 /** 4137 * i40e_reset_interrupt_capability - Disable interrupt setup in OS 4138 * @pf: board private structure 4139 **/ 4140 static void i40e_reset_interrupt_capability(struct i40e_pf *pf) 4141 { 4142 /* If we're in Legacy mode, the interrupt was cleaned in vsi_close */ 4143 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 4144 pci_disable_msix(pf->pdev); 4145 kfree(pf->msix_entries); 4146 pf->msix_entries = NULL; 4147 kfree(pf->irq_pile); 4148 pf->irq_pile = NULL; 4149 } else if (pf->flags & I40E_FLAG_MSI_ENABLED) { 4150 pci_disable_msi(pf->pdev); 4151 } 4152 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED); 4153 } 4154 4155 /** 4156 * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings 4157 * @pf: board private structure 4158 * 4159 * We go through and clear interrupt specific resources and reset the structure 4160 * to pre-load conditions 4161 **/ 4162 static void i40e_clear_interrupt_scheme(struct i40e_pf *pf) 4163 { 4164 int i; 4165 4166 i40e_stop_misc_vector(pf); 4167 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 4168 synchronize_irq(pf->msix_entries[0].vector); 4169 free_irq(pf->msix_entries[0].vector, pf); 4170 } 4171 4172 i40e_put_lump(pf->irq_pile, pf->iwarp_base_vector, 4173 I40E_IWARP_IRQ_PILE_ID); 4174 4175 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1); 4176 for (i = 0; i < pf->num_alloc_vsi; i++) 4177 if (pf->vsi[i]) 4178 i40e_vsi_free_q_vectors(pf->vsi[i]); 4179 i40e_reset_interrupt_capability(pf); 4180 } 4181 4182 /** 4183 * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI 4184 * @vsi: the VSI being configured 4185 **/ 4186 static void i40e_napi_enable_all(struct i40e_vsi *vsi) 4187 { 4188 int q_idx; 4189 4190 if (!vsi->netdev) 4191 return; 4192 4193 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) 4194 napi_enable(&vsi->q_vectors[q_idx]->napi); 4195 } 4196 4197 /** 4198 * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI 4199 * @vsi: the VSI being configured 4200 **/ 4201 static void i40e_napi_disable_all(struct i40e_vsi *vsi) 4202 { 4203 int q_idx; 4204 4205 if (!vsi->netdev) 4206 return; 4207 4208 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) 4209 napi_disable(&vsi->q_vectors[q_idx]->napi); 4210 } 4211 4212 /** 4213 * i40e_vsi_close - Shut down a VSI 4214 * @vsi: the vsi to be quelled 4215 **/ 4216 static void i40e_vsi_close(struct i40e_vsi *vsi) 4217 { 4218 bool reset = false; 4219 4220 if (!test_and_set_bit(__I40E_DOWN, &vsi->state)) 4221 i40e_down(vsi); 4222 i40e_vsi_free_irq(vsi); 4223 i40e_vsi_free_tx_resources(vsi); 4224 i40e_vsi_free_rx_resources(vsi); 4225 vsi->current_netdev_flags = 0; 4226 if (test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state)) 4227 reset = true; 4228 i40e_notify_client_of_netdev_close(vsi, reset); 4229 } 4230 4231 /** 4232 * i40e_quiesce_vsi - Pause a given VSI 4233 * @vsi: the VSI being paused 4234 **/ 4235 static void i40e_quiesce_vsi(struct i40e_vsi *vsi) 4236 { 4237 if (test_bit(__I40E_DOWN, &vsi->state)) 4238 return; 4239 4240 /* No need to disable FCoE VSI when Tx suspended */ 4241 if ((test_bit(__I40E_PORT_TX_SUSPENDED, &vsi->back->state)) && 4242 vsi->type == I40E_VSI_FCOE) { 4243 dev_dbg(&vsi->back->pdev->dev, 4244 "VSI seid %d skipping FCoE VSI disable\n", vsi->seid); 4245 return; 4246 } 4247 4248 set_bit(__I40E_NEEDS_RESTART, &vsi->state); 4249 if (vsi->netdev && netif_running(vsi->netdev)) 4250 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); 4251 else 4252 i40e_vsi_close(vsi); 4253 } 4254 4255 /** 4256 * i40e_unquiesce_vsi - Resume a given VSI 4257 * @vsi: the VSI being resumed 4258 **/ 4259 static void i40e_unquiesce_vsi(struct i40e_vsi *vsi) 4260 { 4261 if (!test_bit(__I40E_NEEDS_RESTART, &vsi->state)) 4262 return; 4263 4264 clear_bit(__I40E_NEEDS_RESTART, &vsi->state); 4265 if (vsi->netdev && netif_running(vsi->netdev)) 4266 vsi->netdev->netdev_ops->ndo_open(vsi->netdev); 4267 else 4268 i40e_vsi_open(vsi); /* this clears the DOWN bit */ 4269 } 4270 4271 /** 4272 * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF 4273 * @pf: the PF 4274 **/ 4275 static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf) 4276 { 4277 int v; 4278 4279 for (v = 0; v < pf->num_alloc_vsi; v++) { 4280 if (pf->vsi[v]) 4281 i40e_quiesce_vsi(pf->vsi[v]); 4282 } 4283 } 4284 4285 /** 4286 * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF 4287 * @pf: the PF 4288 **/ 4289 static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf) 4290 { 4291 int v; 4292 4293 for (v = 0; v < pf->num_alloc_vsi; v++) { 4294 if (pf->vsi[v]) 4295 i40e_unquiesce_vsi(pf->vsi[v]); 4296 } 4297 } 4298 4299 #ifdef CONFIG_I40E_DCB 4300 /** 4301 * i40e_vsi_wait_queues_disabled - Wait for VSI's queues to be disabled 4302 * @vsi: the VSI being configured 4303 * 4304 * This function waits for the given VSI's queues to be disabled. 4305 **/ 4306 static int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi) 4307 { 4308 struct i40e_pf *pf = vsi->back; 4309 int i, pf_q, ret; 4310 4311 pf_q = vsi->base_queue; 4312 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { 4313 /* Check and wait for the disable status of the queue */ 4314 ret = i40e_pf_txq_wait(pf, pf_q, false); 4315 if (ret) { 4316 dev_info(&pf->pdev->dev, 4317 "VSI seid %d Tx ring %d disable timeout\n", 4318 vsi->seid, pf_q); 4319 return ret; 4320 } 4321 } 4322 4323 pf_q = vsi->base_queue; 4324 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { 4325 /* Check and wait for the disable status of the queue */ 4326 ret = i40e_pf_rxq_wait(pf, pf_q, false); 4327 if (ret) { 4328 dev_info(&pf->pdev->dev, 4329 "VSI seid %d Rx ring %d disable timeout\n", 4330 vsi->seid, pf_q); 4331 return ret; 4332 } 4333 } 4334 4335 return 0; 4336 } 4337 4338 /** 4339 * i40e_pf_wait_queues_disabled - Wait for all queues of PF VSIs to be disabled 4340 * @pf: the PF 4341 * 4342 * This function waits for the queues to be in disabled state for all the 4343 * VSIs that are managed by this PF. 4344 **/ 4345 static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf) 4346 { 4347 int v, ret = 0; 4348 4349 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { 4350 /* No need to wait for FCoE VSI queues */ 4351 if (pf->vsi[v] && pf->vsi[v]->type != I40E_VSI_FCOE) { 4352 ret = i40e_vsi_wait_queues_disabled(pf->vsi[v]); 4353 if (ret) 4354 break; 4355 } 4356 } 4357 4358 return ret; 4359 } 4360 4361 #endif 4362 4363 /** 4364 * i40e_detect_recover_hung_queue - Function to detect and recover hung_queue 4365 * @q_idx: TX queue number 4366 * @vsi: Pointer to VSI struct 4367 * 4368 * This function checks specified queue for given VSI. Detects hung condition. 4369 * Sets hung bit since it is two step process. Before next run of service task 4370 * if napi_poll runs, it reset 'hung' bit for respective q_vector. If not, 4371 * hung condition remain unchanged and during subsequent run, this function 4372 * issues SW interrupt to recover from hung condition. 4373 **/ 4374 static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi) 4375 { 4376 struct i40e_ring *tx_ring = NULL; 4377 struct i40e_pf *pf; 4378 u32 head, val, tx_pending_hw; 4379 int i; 4380 4381 pf = vsi->back; 4382 4383 /* now that we have an index, find the tx_ring struct */ 4384 for (i = 0; i < vsi->num_queue_pairs; i++) { 4385 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) { 4386 if (q_idx == vsi->tx_rings[i]->queue_index) { 4387 tx_ring = vsi->tx_rings[i]; 4388 break; 4389 } 4390 } 4391 } 4392 4393 if (!tx_ring) 4394 return; 4395 4396 /* Read interrupt register */ 4397 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 4398 val = rd32(&pf->hw, 4399 I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx + 4400 tx_ring->vsi->base_vector - 1)); 4401 else 4402 val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0); 4403 4404 head = i40e_get_head(tx_ring); 4405 4406 tx_pending_hw = i40e_get_tx_pending(tx_ring, false); 4407 4408 /* HW is done executing descriptors, updated HEAD write back, 4409 * but SW hasn't processed those descriptors. If interrupt is 4410 * not generated from this point ON, it could result into 4411 * dev_watchdog detecting timeout on those netdev_queue, 4412 * hence proactively trigger SW interrupt. 4413 */ 4414 if (tx_pending_hw && (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK))) { 4415 /* NAPI Poll didn't run and clear since it was set */ 4416 if (test_and_clear_bit(I40E_Q_VECTOR_HUNG_DETECT, 4417 &tx_ring->q_vector->hung_detected)) { 4418 netdev_info(vsi->netdev, "VSI_seid %d, Hung TX queue %d, tx_pending_hw: %d, NTC:0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x\n", 4419 vsi->seid, q_idx, tx_pending_hw, 4420 tx_ring->next_to_clean, head, 4421 tx_ring->next_to_use, 4422 readl(tx_ring->tail)); 4423 netdev_info(vsi->netdev, "VSI_seid %d, Issuing force_wb for TX queue %d, Interrupt Reg: 0x%x\n", 4424 vsi->seid, q_idx, val); 4425 i40e_force_wb(vsi, tx_ring->q_vector); 4426 } else { 4427 /* First Chance - detected possible hung */ 4428 set_bit(I40E_Q_VECTOR_HUNG_DETECT, 4429 &tx_ring->q_vector->hung_detected); 4430 } 4431 } 4432 4433 /* This is the case where we have interrupts missing, 4434 * so the tx_pending in HW will most likely be 0, but we 4435 * will have tx_pending in SW since the WB happened but the 4436 * interrupt got lost. 4437 */ 4438 if ((!tx_pending_hw) && i40e_get_tx_pending(tx_ring, true) && 4439 (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK))) { 4440 if (napi_reschedule(&tx_ring->q_vector->napi)) 4441 tx_ring->tx_stats.tx_lost_interrupt++; 4442 } 4443 } 4444 4445 /** 4446 * i40e_detect_recover_hung - Function to detect and recover hung_queues 4447 * @pf: pointer to PF struct 4448 * 4449 * LAN VSI has netdev and netdev has TX queues. This function is to check 4450 * each of those TX queues if they are hung, trigger recovery by issuing 4451 * SW interrupt. 4452 **/ 4453 static void i40e_detect_recover_hung(struct i40e_pf *pf) 4454 { 4455 struct net_device *netdev; 4456 struct i40e_vsi *vsi; 4457 int i; 4458 4459 /* Only for LAN VSI */ 4460 vsi = pf->vsi[pf->lan_vsi]; 4461 4462 if (!vsi) 4463 return; 4464 4465 /* Make sure, VSI state is not DOWN/RECOVERY_PENDING */ 4466 if (test_bit(__I40E_DOWN, &vsi->back->state) || 4467 test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state)) 4468 return; 4469 4470 /* Make sure type is MAIN VSI */ 4471 if (vsi->type != I40E_VSI_MAIN) 4472 return; 4473 4474 netdev = vsi->netdev; 4475 if (!netdev) 4476 return; 4477 4478 /* Bail out if netif_carrier is not OK */ 4479 if (!netif_carrier_ok(netdev)) 4480 return; 4481 4482 /* Go thru' TX queues for netdev */ 4483 for (i = 0; i < netdev->num_tx_queues; i++) { 4484 struct netdev_queue *q; 4485 4486 q = netdev_get_tx_queue(netdev, i); 4487 if (q) 4488 i40e_detect_recover_hung_queue(i, vsi); 4489 } 4490 } 4491 4492 /** 4493 * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP 4494 * @pf: pointer to PF 4495 * 4496 * Get TC map for ISCSI PF type that will include iSCSI TC 4497 * and LAN TC. 4498 **/ 4499 static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf) 4500 { 4501 struct i40e_dcb_app_priority_table app; 4502 struct i40e_hw *hw = &pf->hw; 4503 u8 enabled_tc = 1; /* TC0 is always enabled */ 4504 u8 tc, i; 4505 /* Get the iSCSI APP TLV */ 4506 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config; 4507 4508 for (i = 0; i < dcbcfg->numapps; i++) { 4509 app = dcbcfg->app[i]; 4510 if (app.selector == I40E_APP_SEL_TCPIP && 4511 app.protocolid == I40E_APP_PROTOID_ISCSI) { 4512 tc = dcbcfg->etscfg.prioritytable[app.priority]; 4513 enabled_tc |= BIT(tc); 4514 break; 4515 } 4516 } 4517 4518 return enabled_tc; 4519 } 4520 4521 /** 4522 * i40e_dcb_get_num_tc - Get the number of TCs from DCBx config 4523 * @dcbcfg: the corresponding DCBx configuration structure 4524 * 4525 * Return the number of TCs from given DCBx configuration 4526 **/ 4527 static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg) 4528 { 4529 u8 num_tc = 0; 4530 int i; 4531 4532 /* Scan the ETS Config Priority Table to find 4533 * traffic class enabled for a given priority 4534 * and use the traffic class index to get the 4535 * number of traffic classes enabled 4536 */ 4537 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { 4538 if (dcbcfg->etscfg.prioritytable[i] > num_tc) 4539 num_tc = dcbcfg->etscfg.prioritytable[i]; 4540 } 4541 4542 /* Traffic class index starts from zero so 4543 * increment to return the actual count 4544 */ 4545 return num_tc + 1; 4546 } 4547 4548 /** 4549 * i40e_dcb_get_enabled_tc - Get enabled traffic classes 4550 * @dcbcfg: the corresponding DCBx configuration structure 4551 * 4552 * Query the current DCB configuration and return the number of 4553 * traffic classes enabled from the given DCBX config 4554 **/ 4555 static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg) 4556 { 4557 u8 num_tc = i40e_dcb_get_num_tc(dcbcfg); 4558 u8 enabled_tc = 1; 4559 u8 i; 4560 4561 for (i = 0; i < num_tc; i++) 4562 enabled_tc |= BIT(i); 4563 4564 return enabled_tc; 4565 } 4566 4567 /** 4568 * i40e_pf_get_num_tc - Get enabled traffic classes for PF 4569 * @pf: PF being queried 4570 * 4571 * Return number of traffic classes enabled for the given PF 4572 **/ 4573 static u8 i40e_pf_get_num_tc(struct i40e_pf *pf) 4574 { 4575 struct i40e_hw *hw = &pf->hw; 4576 u8 i, enabled_tc; 4577 u8 num_tc = 0; 4578 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config; 4579 4580 /* If DCB is not enabled then always in single TC */ 4581 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) 4582 return 1; 4583 4584 /* SFP mode will be enabled for all TCs on port */ 4585 if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) 4586 return i40e_dcb_get_num_tc(dcbcfg); 4587 4588 /* MFP mode return count of enabled TCs for this PF */ 4589 if (pf->hw.func_caps.iscsi) 4590 enabled_tc = i40e_get_iscsi_tc_map(pf); 4591 else 4592 return 1; /* Only TC0 */ 4593 4594 /* At least have TC0 */ 4595 enabled_tc = (enabled_tc ? enabled_tc : 0x1); 4596 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 4597 if (enabled_tc & BIT(i)) 4598 num_tc++; 4599 } 4600 return num_tc; 4601 } 4602 4603 /** 4604 * i40e_pf_get_default_tc - Get bitmap for first enabled TC 4605 * @pf: PF being queried 4606 * 4607 * Return a bitmap for first enabled traffic class for this PF. 4608 **/ 4609 static u8 i40e_pf_get_default_tc(struct i40e_pf *pf) 4610 { 4611 u8 enabled_tc = pf->hw.func_caps.enabled_tcmap; 4612 u8 i = 0; 4613 4614 if (!enabled_tc) 4615 return 0x1; /* TC0 */ 4616 4617 /* Find the first enabled TC */ 4618 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 4619 if (enabled_tc & BIT(i)) 4620 break; 4621 } 4622 4623 return BIT(i); 4624 } 4625 4626 /** 4627 * i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes 4628 * @pf: PF being queried 4629 * 4630 * Return a bitmap for enabled traffic classes for this PF. 4631 **/ 4632 static u8 i40e_pf_get_tc_map(struct i40e_pf *pf) 4633 { 4634 /* If DCB is not enabled for this PF then just return default TC */ 4635 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) 4636 return i40e_pf_get_default_tc(pf); 4637 4638 /* SFP mode we want PF to be enabled for all TCs */ 4639 if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) 4640 return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config); 4641 4642 /* MFP enabled and iSCSI PF type */ 4643 if (pf->hw.func_caps.iscsi) 4644 return i40e_get_iscsi_tc_map(pf); 4645 else 4646 return i40e_pf_get_default_tc(pf); 4647 } 4648 4649 /** 4650 * i40e_vsi_get_bw_info - Query VSI BW Information 4651 * @vsi: the VSI being queried 4652 * 4653 * Returns 0 on success, negative value on failure 4654 **/ 4655 static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi) 4656 { 4657 struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0}; 4658 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0}; 4659 struct i40e_pf *pf = vsi->back; 4660 struct i40e_hw *hw = &pf->hw; 4661 i40e_status ret; 4662 u32 tc_bw_max; 4663 int i; 4664 4665 /* Get the VSI level BW configuration */ 4666 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL); 4667 if (ret) { 4668 dev_info(&pf->pdev->dev, 4669 "couldn't get PF vsi bw config, err %s aq_err %s\n", 4670 i40e_stat_str(&pf->hw, ret), 4671 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 4672 return -EINVAL; 4673 } 4674 4675 /* Get the VSI level BW configuration per TC */ 4676 ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config, 4677 NULL); 4678 if (ret) { 4679 dev_info(&pf->pdev->dev, 4680 "couldn't get PF vsi ets bw config, err %s aq_err %s\n", 4681 i40e_stat_str(&pf->hw, ret), 4682 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 4683 return -EINVAL; 4684 } 4685 4686 if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) { 4687 dev_info(&pf->pdev->dev, 4688 "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n", 4689 bw_config.tc_valid_bits, 4690 bw_ets_config.tc_valid_bits); 4691 /* Still continuing */ 4692 } 4693 4694 vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit); 4695 vsi->bw_max_quanta = bw_config.max_bw; 4696 tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) | 4697 (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16); 4698 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 4699 vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i]; 4700 vsi->bw_ets_limit_credits[i] = 4701 le16_to_cpu(bw_ets_config.credits[i]); 4702 /* 3 bits out of 4 for each TC */ 4703 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7); 4704 } 4705 4706 return 0; 4707 } 4708 4709 /** 4710 * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC 4711 * @vsi: the VSI being configured 4712 * @enabled_tc: TC bitmap 4713 * @bw_credits: BW shared credits per TC 4714 * 4715 * Returns 0 on success, negative value on failure 4716 **/ 4717 static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc, 4718 u8 *bw_share) 4719 { 4720 struct i40e_aqc_configure_vsi_tc_bw_data bw_data; 4721 i40e_status ret; 4722 int i; 4723 4724 bw_data.tc_valid_bits = enabled_tc; 4725 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) 4726 bw_data.tc_bw_credits[i] = bw_share[i]; 4727 4728 ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data, 4729 NULL); 4730 if (ret) { 4731 dev_info(&vsi->back->pdev->dev, 4732 "AQ command Config VSI BW allocation per TC failed = %d\n", 4733 vsi->back->hw.aq.asq_last_status); 4734 return -EINVAL; 4735 } 4736 4737 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) 4738 vsi->info.qs_handle[i] = bw_data.qs_handles[i]; 4739 4740 return 0; 4741 } 4742 4743 /** 4744 * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration 4745 * @vsi: the VSI being configured 4746 * @enabled_tc: TC map to be enabled 4747 * 4748 **/ 4749 static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc) 4750 { 4751 struct net_device *netdev = vsi->netdev; 4752 struct i40e_pf *pf = vsi->back; 4753 struct i40e_hw *hw = &pf->hw; 4754 u8 netdev_tc = 0; 4755 int i; 4756 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config; 4757 4758 if (!netdev) 4759 return; 4760 4761 if (!enabled_tc) { 4762 netdev_reset_tc(netdev); 4763 return; 4764 } 4765 4766 /* Set up actual enabled TCs on the VSI */ 4767 if (netdev_set_num_tc(netdev, vsi->tc_config.numtc)) 4768 return; 4769 4770 /* set per TC queues for the VSI */ 4771 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 4772 /* Only set TC queues for enabled tcs 4773 * 4774 * e.g. For a VSI that has TC0 and TC3 enabled the 4775 * enabled_tc bitmap would be 0x00001001; the driver 4776 * will set the numtc for netdev as 2 that will be 4777 * referenced by the netdev layer as TC 0 and 1. 4778 */ 4779 if (vsi->tc_config.enabled_tc & BIT(i)) 4780 netdev_set_tc_queue(netdev, 4781 vsi->tc_config.tc_info[i].netdev_tc, 4782 vsi->tc_config.tc_info[i].qcount, 4783 vsi->tc_config.tc_info[i].qoffset); 4784 } 4785 4786 /* Assign UP2TC map for the VSI */ 4787 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { 4788 /* Get the actual TC# for the UP */ 4789 u8 ets_tc = dcbcfg->etscfg.prioritytable[i]; 4790 /* Get the mapped netdev TC# for the UP */ 4791 netdev_tc = vsi->tc_config.tc_info[ets_tc].netdev_tc; 4792 netdev_set_prio_tc_map(netdev, i, netdev_tc); 4793 } 4794 } 4795 4796 /** 4797 * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map 4798 * @vsi: the VSI being configured 4799 * @ctxt: the ctxt buffer returned from AQ VSI update param command 4800 **/ 4801 static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi, 4802 struct i40e_vsi_context *ctxt) 4803 { 4804 /* copy just the sections touched not the entire info 4805 * since not all sections are valid as returned by 4806 * update vsi params 4807 */ 4808 vsi->info.mapping_flags = ctxt->info.mapping_flags; 4809 memcpy(&vsi->info.queue_mapping, 4810 &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping)); 4811 memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping, 4812 sizeof(vsi->info.tc_mapping)); 4813 } 4814 4815 /** 4816 * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map 4817 * @vsi: VSI to be configured 4818 * @enabled_tc: TC bitmap 4819 * 4820 * This configures a particular VSI for TCs that are mapped to the 4821 * given TC bitmap. It uses default bandwidth share for TCs across 4822 * VSIs to configure TC for a particular VSI. 4823 * 4824 * NOTE: 4825 * It is expected that the VSI queues have been quisced before calling 4826 * this function. 4827 **/ 4828 static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc) 4829 { 4830 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0}; 4831 struct i40e_vsi_context ctxt; 4832 int ret = 0; 4833 int i; 4834 4835 /* Check if enabled_tc is same as existing or new TCs */ 4836 if (vsi->tc_config.enabled_tc == enabled_tc) 4837 return ret; 4838 4839 /* Enable ETS TCs with equal BW Share for now across all VSIs */ 4840 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 4841 if (enabled_tc & BIT(i)) 4842 bw_share[i] = 1; 4843 } 4844 4845 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share); 4846 if (ret) { 4847 dev_info(&vsi->back->pdev->dev, 4848 "Failed configuring TC map %d for VSI %d\n", 4849 enabled_tc, vsi->seid); 4850 goto out; 4851 } 4852 4853 /* Update Queue Pairs Mapping for currently enabled UPs */ 4854 ctxt.seid = vsi->seid; 4855 ctxt.pf_num = vsi->back->hw.pf_id; 4856 ctxt.vf_num = 0; 4857 ctxt.uplink_seid = vsi->uplink_seid; 4858 ctxt.info = vsi->info; 4859 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false); 4860 4861 if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) { 4862 ctxt.info.valid_sections |= 4863 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID); 4864 ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA; 4865 } 4866 4867 /* Update the VSI after updating the VSI queue-mapping information */ 4868 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); 4869 if (ret) { 4870 dev_info(&vsi->back->pdev->dev, 4871 "Update vsi tc config failed, err %s aq_err %s\n", 4872 i40e_stat_str(&vsi->back->hw, ret), 4873 i40e_aq_str(&vsi->back->hw, 4874 vsi->back->hw.aq.asq_last_status)); 4875 goto out; 4876 } 4877 /* update the local VSI info with updated queue map */ 4878 i40e_vsi_update_queue_map(vsi, &ctxt); 4879 vsi->info.valid_sections = 0; 4880 4881 /* Update current VSI BW information */ 4882 ret = i40e_vsi_get_bw_info(vsi); 4883 if (ret) { 4884 dev_info(&vsi->back->pdev->dev, 4885 "Failed updating vsi bw info, err %s aq_err %s\n", 4886 i40e_stat_str(&vsi->back->hw, ret), 4887 i40e_aq_str(&vsi->back->hw, 4888 vsi->back->hw.aq.asq_last_status)); 4889 goto out; 4890 } 4891 4892 /* Update the netdev TC setup */ 4893 i40e_vsi_config_netdev_tc(vsi, enabled_tc); 4894 out: 4895 return ret; 4896 } 4897 4898 /** 4899 * i40e_veb_config_tc - Configure TCs for given VEB 4900 * @veb: given VEB 4901 * @enabled_tc: TC bitmap 4902 * 4903 * Configures given TC bitmap for VEB (switching) element 4904 **/ 4905 int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc) 4906 { 4907 struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0}; 4908 struct i40e_pf *pf = veb->pf; 4909 int ret = 0; 4910 int i; 4911 4912 /* No TCs or already enabled TCs just return */ 4913 if (!enabled_tc || veb->enabled_tc == enabled_tc) 4914 return ret; 4915 4916 bw_data.tc_valid_bits = enabled_tc; 4917 /* bw_data.absolute_credits is not set (relative) */ 4918 4919 /* Enable ETS TCs with equal BW Share for now */ 4920 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 4921 if (enabled_tc & BIT(i)) 4922 bw_data.tc_bw_share_credits[i] = 1; 4923 } 4924 4925 ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid, 4926 &bw_data, NULL); 4927 if (ret) { 4928 dev_info(&pf->pdev->dev, 4929 "VEB bw config failed, err %s aq_err %s\n", 4930 i40e_stat_str(&pf->hw, ret), 4931 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 4932 goto out; 4933 } 4934 4935 /* Update the BW information */ 4936 ret = i40e_veb_get_bw_info(veb); 4937 if (ret) { 4938 dev_info(&pf->pdev->dev, 4939 "Failed getting veb bw config, err %s aq_err %s\n", 4940 i40e_stat_str(&pf->hw, ret), 4941 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 4942 } 4943 4944 out: 4945 return ret; 4946 } 4947 4948 #ifdef CONFIG_I40E_DCB 4949 /** 4950 * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs 4951 * @pf: PF struct 4952 * 4953 * Reconfigure VEB/VSIs on a given PF; it is assumed that 4954 * the caller would've quiesce all the VSIs before calling 4955 * this function 4956 **/ 4957 static void i40e_dcb_reconfigure(struct i40e_pf *pf) 4958 { 4959 u8 tc_map = 0; 4960 int ret; 4961 u8 v; 4962 4963 /* Enable the TCs available on PF to all VEBs */ 4964 tc_map = i40e_pf_get_tc_map(pf); 4965 for (v = 0; v < I40E_MAX_VEB; v++) { 4966 if (!pf->veb[v]) 4967 continue; 4968 ret = i40e_veb_config_tc(pf->veb[v], tc_map); 4969 if (ret) { 4970 dev_info(&pf->pdev->dev, 4971 "Failed configuring TC for VEB seid=%d\n", 4972 pf->veb[v]->seid); 4973 /* Will try to configure as many components */ 4974 } 4975 } 4976 4977 /* Update each VSI */ 4978 for (v = 0; v < pf->num_alloc_vsi; v++) { 4979 if (!pf->vsi[v]) 4980 continue; 4981 4982 /* - Enable all TCs for the LAN VSI 4983 #ifdef I40E_FCOE 4984 * - For FCoE VSI only enable the TC configured 4985 * as per the APP TLV 4986 #endif 4987 * - For all others keep them at TC0 for now 4988 */ 4989 if (v == pf->lan_vsi) 4990 tc_map = i40e_pf_get_tc_map(pf); 4991 else 4992 tc_map = i40e_pf_get_default_tc(pf); 4993 #ifdef I40E_FCOE 4994 if (pf->vsi[v]->type == I40E_VSI_FCOE) 4995 tc_map = i40e_get_fcoe_tc_map(pf); 4996 #endif /* #ifdef I40E_FCOE */ 4997 4998 ret = i40e_vsi_config_tc(pf->vsi[v], tc_map); 4999 if (ret) { 5000 dev_info(&pf->pdev->dev, 5001 "Failed configuring TC for VSI seid=%d\n", 5002 pf->vsi[v]->seid); 5003 /* Will try to configure as many components */ 5004 } else { 5005 /* Re-configure VSI vectors based on updated TC map */ 5006 i40e_vsi_map_rings_to_vectors(pf->vsi[v]); 5007 if (pf->vsi[v]->netdev) 5008 i40e_dcbnl_set_all(pf->vsi[v]); 5009 } 5010 i40e_notify_client_of_l2_param_changes(pf->vsi[v]); 5011 } 5012 } 5013 5014 /** 5015 * i40e_resume_port_tx - Resume port Tx 5016 * @pf: PF struct 5017 * 5018 * Resume a port's Tx and issue a PF reset in case of failure to 5019 * resume. 5020 **/ 5021 static int i40e_resume_port_tx(struct i40e_pf *pf) 5022 { 5023 struct i40e_hw *hw = &pf->hw; 5024 int ret; 5025 5026 ret = i40e_aq_resume_port_tx(hw, NULL); 5027 if (ret) { 5028 dev_info(&pf->pdev->dev, 5029 "Resume Port Tx failed, err %s aq_err %s\n", 5030 i40e_stat_str(&pf->hw, ret), 5031 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 5032 /* Schedule PF reset to recover */ 5033 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); 5034 i40e_service_event_schedule(pf); 5035 } 5036 5037 return ret; 5038 } 5039 5040 /** 5041 * i40e_init_pf_dcb - Initialize DCB configuration 5042 * @pf: PF being configured 5043 * 5044 * Query the current DCB configuration and cache it 5045 * in the hardware structure 5046 **/ 5047 static int i40e_init_pf_dcb(struct i40e_pf *pf) 5048 { 5049 struct i40e_hw *hw = &pf->hw; 5050 int err = 0; 5051 5052 /* Do not enable DCB for SW1 and SW2 images even if the FW is capable */ 5053 if (pf->flags & I40E_FLAG_NO_DCB_SUPPORT) 5054 goto out; 5055 5056 /* Get the initial DCB configuration */ 5057 err = i40e_init_dcb(hw); 5058 if (!err) { 5059 /* Device/Function is not DCBX capable */ 5060 if ((!hw->func_caps.dcb) || 5061 (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) { 5062 dev_info(&pf->pdev->dev, 5063 "DCBX offload is not supported or is disabled for this PF.\n"); 5064 5065 if (pf->flags & I40E_FLAG_MFP_ENABLED) 5066 goto out; 5067 5068 } else { 5069 /* When status is not DISABLED then DCBX in FW */ 5070 pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED | 5071 DCB_CAP_DCBX_VER_IEEE; 5072 5073 pf->flags |= I40E_FLAG_DCB_CAPABLE; 5074 /* Enable DCB tagging only when more than one TC */ 5075 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1) 5076 pf->flags |= I40E_FLAG_DCB_ENABLED; 5077 dev_dbg(&pf->pdev->dev, 5078 "DCBX offload is supported for this PF.\n"); 5079 } 5080 } else { 5081 dev_info(&pf->pdev->dev, 5082 "Query for DCB configuration failed, err %s aq_err %s\n", 5083 i40e_stat_str(&pf->hw, err), 5084 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 5085 } 5086 5087 out: 5088 return err; 5089 } 5090 #endif /* CONFIG_I40E_DCB */ 5091 #define SPEED_SIZE 14 5092 #define FC_SIZE 8 5093 /** 5094 * i40e_print_link_message - print link up or down 5095 * @vsi: the VSI for which link needs a message 5096 */ 5097 void i40e_print_link_message(struct i40e_vsi *vsi, bool isup) 5098 { 5099 char *speed = "Unknown"; 5100 char *fc = "Unknown"; 5101 5102 if (vsi->current_isup == isup) 5103 return; 5104 vsi->current_isup = isup; 5105 if (!isup) { 5106 netdev_info(vsi->netdev, "NIC Link is Down\n"); 5107 return; 5108 } 5109 5110 /* Warn user if link speed on NPAR enabled partition is not at 5111 * least 10GB 5112 */ 5113 if (vsi->back->hw.func_caps.npar_enable && 5114 (vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB || 5115 vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB)) 5116 netdev_warn(vsi->netdev, 5117 "The partition detected link speed that is less than 10Gbps\n"); 5118 5119 switch (vsi->back->hw.phy.link_info.link_speed) { 5120 case I40E_LINK_SPEED_40GB: 5121 speed = "40 G"; 5122 break; 5123 case I40E_LINK_SPEED_20GB: 5124 speed = "20 G"; 5125 break; 5126 case I40E_LINK_SPEED_10GB: 5127 speed = "10 G"; 5128 break; 5129 case I40E_LINK_SPEED_1GB: 5130 speed = "1000 M"; 5131 break; 5132 case I40E_LINK_SPEED_100MB: 5133 speed = "100 M"; 5134 break; 5135 default: 5136 break; 5137 } 5138 5139 switch (vsi->back->hw.fc.current_mode) { 5140 case I40E_FC_FULL: 5141 fc = "RX/TX"; 5142 break; 5143 case I40E_FC_TX_PAUSE: 5144 fc = "TX"; 5145 break; 5146 case I40E_FC_RX_PAUSE: 5147 fc = "RX"; 5148 break; 5149 default: 5150 fc = "None"; 5151 break; 5152 } 5153 5154 netdev_info(vsi->netdev, "NIC Link is Up %sbps Full Duplex, Flow Control: %s\n", 5155 speed, fc); 5156 } 5157 5158 /** 5159 * i40e_up_complete - Finish the last steps of bringing up a connection 5160 * @vsi: the VSI being configured 5161 **/ 5162 static int i40e_up_complete(struct i40e_vsi *vsi) 5163 { 5164 struct i40e_pf *pf = vsi->back; 5165 int err; 5166 5167 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 5168 i40e_vsi_configure_msix(vsi); 5169 else 5170 i40e_configure_msi_and_legacy(vsi); 5171 5172 /* start rings */ 5173 err = i40e_vsi_control_rings(vsi, true); 5174 if (err) 5175 return err; 5176 5177 clear_bit(__I40E_DOWN, &vsi->state); 5178 i40e_napi_enable_all(vsi); 5179 i40e_vsi_enable_irq(vsi); 5180 5181 if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) && 5182 (vsi->netdev)) { 5183 i40e_print_link_message(vsi, true); 5184 netif_tx_start_all_queues(vsi->netdev); 5185 netif_carrier_on(vsi->netdev); 5186 } else if (vsi->netdev) { 5187 i40e_print_link_message(vsi, false); 5188 /* need to check for qualified module here*/ 5189 if ((pf->hw.phy.link_info.link_info & 5190 I40E_AQ_MEDIA_AVAILABLE) && 5191 (!(pf->hw.phy.link_info.an_info & 5192 I40E_AQ_QUALIFIED_MODULE))) 5193 netdev_err(vsi->netdev, 5194 "the driver failed to link because an unqualified module was detected."); 5195 } 5196 5197 /* replay FDIR SB filters */ 5198 if (vsi->type == I40E_VSI_FDIR) { 5199 /* reset fd counters */ 5200 pf->fd_add_err = pf->fd_atr_cnt = 0; 5201 if (pf->fd_tcp_rule > 0) { 5202 pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED; 5203 if (I40E_DEBUG_FD & pf->hw.debug_mask) 5204 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n"); 5205 pf->fd_tcp_rule = 0; 5206 } 5207 i40e_fdir_filter_restore(vsi); 5208 } 5209 5210 /* On the next run of the service_task, notify any clients of the new 5211 * opened netdev 5212 */ 5213 pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED; 5214 i40e_service_event_schedule(pf); 5215 5216 return 0; 5217 } 5218 5219 /** 5220 * i40e_vsi_reinit_locked - Reset the VSI 5221 * @vsi: the VSI being configured 5222 * 5223 * Rebuild the ring structs after some configuration 5224 * has changed, e.g. MTU size. 5225 **/ 5226 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi) 5227 { 5228 struct i40e_pf *pf = vsi->back; 5229 5230 WARN_ON(in_interrupt()); 5231 while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state)) 5232 usleep_range(1000, 2000); 5233 i40e_down(vsi); 5234 5235 /* Give a VF some time to respond to the reset. The 5236 * two second wait is based upon the watchdog cycle in 5237 * the VF driver. 5238 */ 5239 if (vsi->type == I40E_VSI_SRIOV) 5240 msleep(2000); 5241 i40e_up(vsi); 5242 clear_bit(__I40E_CONFIG_BUSY, &pf->state); 5243 } 5244 5245 /** 5246 * i40e_up - Bring the connection back up after being down 5247 * @vsi: the VSI being configured 5248 **/ 5249 int i40e_up(struct i40e_vsi *vsi) 5250 { 5251 int err; 5252 5253 err = i40e_vsi_configure(vsi); 5254 if (!err) 5255 err = i40e_up_complete(vsi); 5256 5257 return err; 5258 } 5259 5260 /** 5261 * i40e_down - Shutdown the connection processing 5262 * @vsi: the VSI being stopped 5263 **/ 5264 void i40e_down(struct i40e_vsi *vsi) 5265 { 5266 int i; 5267 5268 /* It is assumed that the caller of this function 5269 * sets the vsi->state __I40E_DOWN bit. 5270 */ 5271 if (vsi->netdev) { 5272 netif_carrier_off(vsi->netdev); 5273 netif_tx_disable(vsi->netdev); 5274 } 5275 i40e_vsi_disable_irq(vsi); 5276 i40e_vsi_control_rings(vsi, false); 5277 i40e_napi_disable_all(vsi); 5278 5279 for (i = 0; i < vsi->num_queue_pairs; i++) { 5280 i40e_clean_tx_ring(vsi->tx_rings[i]); 5281 i40e_clean_rx_ring(vsi->rx_rings[i]); 5282 } 5283 } 5284 5285 /** 5286 * i40e_setup_tc - configure multiple traffic classes 5287 * @netdev: net device to configure 5288 * @tc: number of traffic classes to enable 5289 **/ 5290 static int i40e_setup_tc(struct net_device *netdev, u8 tc) 5291 { 5292 struct i40e_netdev_priv *np = netdev_priv(netdev); 5293 struct i40e_vsi *vsi = np->vsi; 5294 struct i40e_pf *pf = vsi->back; 5295 u8 enabled_tc = 0; 5296 int ret = -EINVAL; 5297 int i; 5298 5299 /* Check if DCB enabled to continue */ 5300 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) { 5301 netdev_info(netdev, "DCB is not enabled for adapter\n"); 5302 goto exit; 5303 } 5304 5305 /* Check if MFP enabled */ 5306 if (pf->flags & I40E_FLAG_MFP_ENABLED) { 5307 netdev_info(netdev, "Configuring TC not supported in MFP mode\n"); 5308 goto exit; 5309 } 5310 5311 /* Check whether tc count is within enabled limit */ 5312 if (tc > i40e_pf_get_num_tc(pf)) { 5313 netdev_info(netdev, "TC count greater than enabled on link for adapter\n"); 5314 goto exit; 5315 } 5316 5317 /* Generate TC map for number of tc requested */ 5318 for (i = 0; i < tc; i++) 5319 enabled_tc |= BIT(i); 5320 5321 /* Requesting same TC configuration as already enabled */ 5322 if (enabled_tc == vsi->tc_config.enabled_tc) 5323 return 0; 5324 5325 /* Quiesce VSI queues */ 5326 i40e_quiesce_vsi(vsi); 5327 5328 /* Configure VSI for enabled TCs */ 5329 ret = i40e_vsi_config_tc(vsi, enabled_tc); 5330 if (ret) { 5331 netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n", 5332 vsi->seid); 5333 goto exit; 5334 } 5335 5336 /* Unquiesce VSI */ 5337 i40e_unquiesce_vsi(vsi); 5338 5339 exit: 5340 return ret; 5341 } 5342 5343 #ifdef I40E_FCOE 5344 int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto, 5345 struct tc_to_netdev *tc) 5346 #else 5347 static int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto, 5348 struct tc_to_netdev *tc) 5349 #endif 5350 { 5351 if (handle != TC_H_ROOT || tc->type != TC_SETUP_MQPRIO) 5352 return -EINVAL; 5353 return i40e_setup_tc(netdev, tc->tc); 5354 } 5355 5356 /** 5357 * i40e_open - Called when a network interface is made active 5358 * @netdev: network interface device structure 5359 * 5360 * The open entry point is called when a network interface is made 5361 * active by the system (IFF_UP). At this point all resources needed 5362 * for transmit and receive operations are allocated, the interrupt 5363 * handler is registered with the OS, the netdev watchdog subtask is 5364 * enabled, and the stack is notified that the interface is ready. 5365 * 5366 * Returns 0 on success, negative value on failure 5367 **/ 5368 int i40e_open(struct net_device *netdev) 5369 { 5370 struct i40e_netdev_priv *np = netdev_priv(netdev); 5371 struct i40e_vsi *vsi = np->vsi; 5372 struct i40e_pf *pf = vsi->back; 5373 int err; 5374 5375 /* disallow open during test or if eeprom is broken */ 5376 if (test_bit(__I40E_TESTING, &pf->state) || 5377 test_bit(__I40E_BAD_EEPROM, &pf->state)) 5378 return -EBUSY; 5379 5380 netif_carrier_off(netdev); 5381 5382 err = i40e_vsi_open(vsi); 5383 if (err) 5384 return err; 5385 5386 /* configure global TSO hardware offload settings */ 5387 wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH | 5388 TCP_FLAG_FIN) >> 16); 5389 wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH | 5390 TCP_FLAG_FIN | 5391 TCP_FLAG_CWR) >> 16); 5392 wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16); 5393 5394 #ifdef CONFIG_I40E_VXLAN 5395 vxlan_get_rx_port(netdev); 5396 #endif 5397 #ifdef CONFIG_I40E_GENEVE 5398 if (pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE) 5399 geneve_get_rx_port(netdev); 5400 #endif 5401 5402 i40e_notify_client_of_netdev_open(vsi); 5403 5404 return 0; 5405 } 5406 5407 /** 5408 * i40e_vsi_open - 5409 * @vsi: the VSI to open 5410 * 5411 * Finish initialization of the VSI. 5412 * 5413 * Returns 0 on success, negative value on failure 5414 **/ 5415 int i40e_vsi_open(struct i40e_vsi *vsi) 5416 { 5417 struct i40e_pf *pf = vsi->back; 5418 char int_name[I40E_INT_NAME_STR_LEN]; 5419 int err; 5420 5421 /* allocate descriptors */ 5422 err = i40e_vsi_setup_tx_resources(vsi); 5423 if (err) 5424 goto err_setup_tx; 5425 err = i40e_vsi_setup_rx_resources(vsi); 5426 if (err) 5427 goto err_setup_rx; 5428 5429 err = i40e_vsi_configure(vsi); 5430 if (err) 5431 goto err_setup_rx; 5432 5433 if (vsi->netdev) { 5434 snprintf(int_name, sizeof(int_name) - 1, "%s-%s", 5435 dev_driver_string(&pf->pdev->dev), vsi->netdev->name); 5436 err = i40e_vsi_request_irq(vsi, int_name); 5437 if (err) 5438 goto err_setup_rx; 5439 5440 /* Notify the stack of the actual queue counts. */ 5441 err = netif_set_real_num_tx_queues(vsi->netdev, 5442 vsi->num_queue_pairs); 5443 if (err) 5444 goto err_set_queues; 5445 5446 err = netif_set_real_num_rx_queues(vsi->netdev, 5447 vsi->num_queue_pairs); 5448 if (err) 5449 goto err_set_queues; 5450 5451 } else if (vsi->type == I40E_VSI_FDIR) { 5452 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:fdir", 5453 dev_driver_string(&pf->pdev->dev), 5454 dev_name(&pf->pdev->dev)); 5455 err = i40e_vsi_request_irq(vsi, int_name); 5456 5457 } else { 5458 err = -EINVAL; 5459 goto err_setup_rx; 5460 } 5461 5462 err = i40e_up_complete(vsi); 5463 if (err) 5464 goto err_up_complete; 5465 5466 return 0; 5467 5468 err_up_complete: 5469 i40e_down(vsi); 5470 err_set_queues: 5471 i40e_vsi_free_irq(vsi); 5472 err_setup_rx: 5473 i40e_vsi_free_rx_resources(vsi); 5474 err_setup_tx: 5475 i40e_vsi_free_tx_resources(vsi); 5476 if (vsi == pf->vsi[pf->lan_vsi]) 5477 i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED)); 5478 5479 return err; 5480 } 5481 5482 /** 5483 * i40e_fdir_filter_exit - Cleans up the Flow Director accounting 5484 * @pf: Pointer to PF 5485 * 5486 * This function destroys the hlist where all the Flow Director 5487 * filters were saved. 5488 **/ 5489 static void i40e_fdir_filter_exit(struct i40e_pf *pf) 5490 { 5491 struct i40e_fdir_filter *filter; 5492 struct hlist_node *node2; 5493 5494 hlist_for_each_entry_safe(filter, node2, 5495 &pf->fdir_filter_list, fdir_node) { 5496 hlist_del(&filter->fdir_node); 5497 kfree(filter); 5498 } 5499 pf->fdir_pf_active_filters = 0; 5500 } 5501 5502 /** 5503 * i40e_close - Disables a network interface 5504 * @netdev: network interface device structure 5505 * 5506 * The close entry point is called when an interface is de-activated 5507 * by the OS. The hardware is still under the driver's control, but 5508 * this netdev interface is disabled. 5509 * 5510 * Returns 0, this is not allowed to fail 5511 **/ 5512 #ifdef I40E_FCOE 5513 int i40e_close(struct net_device *netdev) 5514 #else 5515 static int i40e_close(struct net_device *netdev) 5516 #endif 5517 { 5518 struct i40e_netdev_priv *np = netdev_priv(netdev); 5519 struct i40e_vsi *vsi = np->vsi; 5520 5521 i40e_vsi_close(vsi); 5522 5523 return 0; 5524 } 5525 5526 /** 5527 * i40e_do_reset - Start a PF or Core Reset sequence 5528 * @pf: board private structure 5529 * @reset_flags: which reset is requested 5530 * 5531 * The essential difference in resets is that the PF Reset 5532 * doesn't clear the packet buffers, doesn't reset the PE 5533 * firmware, and doesn't bother the other PFs on the chip. 5534 **/ 5535 void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags) 5536 { 5537 u32 val; 5538 5539 WARN_ON(in_interrupt()); 5540 5541 if (i40e_check_asq_alive(&pf->hw)) 5542 i40e_vc_notify_reset(pf); 5543 5544 /* do the biggest reset indicated */ 5545 if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) { 5546 5547 /* Request a Global Reset 5548 * 5549 * This will start the chip's countdown to the actual full 5550 * chip reset event, and a warning interrupt to be sent 5551 * to all PFs, including the requestor. Our handler 5552 * for the warning interrupt will deal with the shutdown 5553 * and recovery of the switch setup. 5554 */ 5555 dev_dbg(&pf->pdev->dev, "GlobalR requested\n"); 5556 val = rd32(&pf->hw, I40E_GLGEN_RTRIG); 5557 val |= I40E_GLGEN_RTRIG_GLOBR_MASK; 5558 wr32(&pf->hw, I40E_GLGEN_RTRIG, val); 5559 5560 } else if (reset_flags & BIT_ULL(__I40E_CORE_RESET_REQUESTED)) { 5561 5562 /* Request a Core Reset 5563 * 5564 * Same as Global Reset, except does *not* include the MAC/PHY 5565 */ 5566 dev_dbg(&pf->pdev->dev, "CoreR requested\n"); 5567 val = rd32(&pf->hw, I40E_GLGEN_RTRIG); 5568 val |= I40E_GLGEN_RTRIG_CORER_MASK; 5569 wr32(&pf->hw, I40E_GLGEN_RTRIG, val); 5570 i40e_flush(&pf->hw); 5571 5572 } else if (reset_flags & BIT_ULL(__I40E_PF_RESET_REQUESTED)) { 5573 5574 /* Request a PF Reset 5575 * 5576 * Resets only the PF-specific registers 5577 * 5578 * This goes directly to the tear-down and rebuild of 5579 * the switch, since we need to do all the recovery as 5580 * for the Core Reset. 5581 */ 5582 dev_dbg(&pf->pdev->dev, "PFR requested\n"); 5583 i40e_handle_reset_warning(pf); 5584 5585 } else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) { 5586 int v; 5587 5588 /* Find the VSI(s) that requested a re-init */ 5589 dev_info(&pf->pdev->dev, 5590 "VSI reinit requested\n"); 5591 for (v = 0; v < pf->num_alloc_vsi; v++) { 5592 struct i40e_vsi *vsi = pf->vsi[v]; 5593 5594 if (vsi != NULL && 5595 test_bit(__I40E_REINIT_REQUESTED, &vsi->state)) { 5596 i40e_vsi_reinit_locked(pf->vsi[v]); 5597 clear_bit(__I40E_REINIT_REQUESTED, &vsi->state); 5598 } 5599 } 5600 } else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) { 5601 int v; 5602 5603 /* Find the VSI(s) that needs to be brought down */ 5604 dev_info(&pf->pdev->dev, "VSI down requested\n"); 5605 for (v = 0; v < pf->num_alloc_vsi; v++) { 5606 struct i40e_vsi *vsi = pf->vsi[v]; 5607 5608 if (vsi != NULL && 5609 test_bit(__I40E_DOWN_REQUESTED, &vsi->state)) { 5610 set_bit(__I40E_DOWN, &vsi->state); 5611 i40e_down(vsi); 5612 clear_bit(__I40E_DOWN_REQUESTED, &vsi->state); 5613 } 5614 } 5615 } else { 5616 dev_info(&pf->pdev->dev, 5617 "bad reset request 0x%08x\n", reset_flags); 5618 } 5619 } 5620 5621 #ifdef CONFIG_I40E_DCB 5622 /** 5623 * i40e_dcb_need_reconfig - Check if DCB needs reconfig 5624 * @pf: board private structure 5625 * @old_cfg: current DCB config 5626 * @new_cfg: new DCB config 5627 **/ 5628 bool i40e_dcb_need_reconfig(struct i40e_pf *pf, 5629 struct i40e_dcbx_config *old_cfg, 5630 struct i40e_dcbx_config *new_cfg) 5631 { 5632 bool need_reconfig = false; 5633 5634 /* Check if ETS configuration has changed */ 5635 if (memcmp(&new_cfg->etscfg, 5636 &old_cfg->etscfg, 5637 sizeof(new_cfg->etscfg))) { 5638 /* If Priority Table has changed reconfig is needed */ 5639 if (memcmp(&new_cfg->etscfg.prioritytable, 5640 &old_cfg->etscfg.prioritytable, 5641 sizeof(new_cfg->etscfg.prioritytable))) { 5642 need_reconfig = true; 5643 dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n"); 5644 } 5645 5646 if (memcmp(&new_cfg->etscfg.tcbwtable, 5647 &old_cfg->etscfg.tcbwtable, 5648 sizeof(new_cfg->etscfg.tcbwtable))) 5649 dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n"); 5650 5651 if (memcmp(&new_cfg->etscfg.tsatable, 5652 &old_cfg->etscfg.tsatable, 5653 sizeof(new_cfg->etscfg.tsatable))) 5654 dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n"); 5655 } 5656 5657 /* Check if PFC configuration has changed */ 5658 if (memcmp(&new_cfg->pfc, 5659 &old_cfg->pfc, 5660 sizeof(new_cfg->pfc))) { 5661 need_reconfig = true; 5662 dev_dbg(&pf->pdev->dev, "PFC config change detected.\n"); 5663 } 5664 5665 /* Check if APP Table has changed */ 5666 if (memcmp(&new_cfg->app, 5667 &old_cfg->app, 5668 sizeof(new_cfg->app))) { 5669 need_reconfig = true; 5670 dev_dbg(&pf->pdev->dev, "APP Table change detected.\n"); 5671 } 5672 5673 dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig); 5674 return need_reconfig; 5675 } 5676 5677 /** 5678 * i40e_handle_lldp_event - Handle LLDP Change MIB event 5679 * @pf: board private structure 5680 * @e: event info posted on ARQ 5681 **/ 5682 static int i40e_handle_lldp_event(struct i40e_pf *pf, 5683 struct i40e_arq_event_info *e) 5684 { 5685 struct i40e_aqc_lldp_get_mib *mib = 5686 (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw; 5687 struct i40e_hw *hw = &pf->hw; 5688 struct i40e_dcbx_config tmp_dcbx_cfg; 5689 bool need_reconfig = false; 5690 int ret = 0; 5691 u8 type; 5692 5693 /* Not DCB capable or capability disabled */ 5694 if (!(pf->flags & I40E_FLAG_DCB_CAPABLE)) 5695 return ret; 5696 5697 /* Ignore if event is not for Nearest Bridge */ 5698 type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) 5699 & I40E_AQ_LLDP_BRIDGE_TYPE_MASK); 5700 dev_dbg(&pf->pdev->dev, "LLDP event mib bridge type 0x%x\n", type); 5701 if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE) 5702 return ret; 5703 5704 /* Check MIB Type and return if event for Remote MIB update */ 5705 type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK; 5706 dev_dbg(&pf->pdev->dev, 5707 "LLDP event mib type %s\n", type ? "remote" : "local"); 5708 if (type == I40E_AQ_LLDP_MIB_REMOTE) { 5709 /* Update the remote cached instance and return */ 5710 ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE, 5711 I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE, 5712 &hw->remote_dcbx_config); 5713 goto exit; 5714 } 5715 5716 /* Store the old configuration */ 5717 tmp_dcbx_cfg = hw->local_dcbx_config; 5718 5719 /* Reset the old DCBx configuration data */ 5720 memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config)); 5721 /* Get updated DCBX data from firmware */ 5722 ret = i40e_get_dcb_config(&pf->hw); 5723 if (ret) { 5724 dev_info(&pf->pdev->dev, 5725 "Failed querying DCB configuration data from firmware, err %s aq_err %s\n", 5726 i40e_stat_str(&pf->hw, ret), 5727 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 5728 goto exit; 5729 } 5730 5731 /* No change detected in DCBX configs */ 5732 if (!memcmp(&tmp_dcbx_cfg, &hw->local_dcbx_config, 5733 sizeof(tmp_dcbx_cfg))) { 5734 dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n"); 5735 goto exit; 5736 } 5737 5738 need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg, 5739 &hw->local_dcbx_config); 5740 5741 i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config); 5742 5743 if (!need_reconfig) 5744 goto exit; 5745 5746 /* Enable DCB tagging only when more than one TC */ 5747 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1) 5748 pf->flags |= I40E_FLAG_DCB_ENABLED; 5749 else 5750 pf->flags &= ~I40E_FLAG_DCB_ENABLED; 5751 5752 set_bit(__I40E_PORT_TX_SUSPENDED, &pf->state); 5753 /* Reconfiguration needed quiesce all VSIs */ 5754 i40e_pf_quiesce_all_vsi(pf); 5755 5756 /* Changes in configuration update VEB/VSI */ 5757 i40e_dcb_reconfigure(pf); 5758 5759 ret = i40e_resume_port_tx(pf); 5760 5761 clear_bit(__I40E_PORT_TX_SUSPENDED, &pf->state); 5762 /* In case of error no point in resuming VSIs */ 5763 if (ret) 5764 goto exit; 5765 5766 /* Wait for the PF's queues to be disabled */ 5767 ret = i40e_pf_wait_queues_disabled(pf); 5768 if (ret) { 5769 /* Schedule PF reset to recover */ 5770 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); 5771 i40e_service_event_schedule(pf); 5772 } else { 5773 i40e_pf_unquiesce_all_vsi(pf); 5774 } 5775 5776 exit: 5777 return ret; 5778 } 5779 #endif /* CONFIG_I40E_DCB */ 5780 5781 /** 5782 * i40e_do_reset_safe - Protected reset path for userland calls. 5783 * @pf: board private structure 5784 * @reset_flags: which reset is requested 5785 * 5786 **/ 5787 void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags) 5788 { 5789 rtnl_lock(); 5790 i40e_do_reset(pf, reset_flags); 5791 rtnl_unlock(); 5792 } 5793 5794 /** 5795 * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event 5796 * @pf: board private structure 5797 * @e: event info posted on ARQ 5798 * 5799 * Handler for LAN Queue Overflow Event generated by the firmware for PF 5800 * and VF queues 5801 **/ 5802 static void i40e_handle_lan_overflow_event(struct i40e_pf *pf, 5803 struct i40e_arq_event_info *e) 5804 { 5805 struct i40e_aqc_lan_overflow *data = 5806 (struct i40e_aqc_lan_overflow *)&e->desc.params.raw; 5807 u32 queue = le32_to_cpu(data->prtdcb_rupto); 5808 u32 qtx_ctl = le32_to_cpu(data->otx_ctl); 5809 struct i40e_hw *hw = &pf->hw; 5810 struct i40e_vf *vf; 5811 u16 vf_id; 5812 5813 dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n", 5814 queue, qtx_ctl); 5815 5816 /* Queue belongs to VF, find the VF and issue VF reset */ 5817 if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK) 5818 >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) { 5819 vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK) 5820 >> I40E_QTX_CTL_VFVM_INDX_SHIFT); 5821 vf_id -= hw->func_caps.vf_base_id; 5822 vf = &pf->vf[vf_id]; 5823 i40e_vc_notify_vf_reset(vf); 5824 /* Allow VF to process pending reset notification */ 5825 msleep(20); 5826 i40e_reset_vf(vf, false); 5827 } 5828 } 5829 5830 /** 5831 * i40e_service_event_complete - Finish up the service event 5832 * @pf: board private structure 5833 **/ 5834 static void i40e_service_event_complete(struct i40e_pf *pf) 5835 { 5836 WARN_ON(!test_bit(__I40E_SERVICE_SCHED, &pf->state)); 5837 5838 /* flush memory to make sure state is correct before next watchog */ 5839 smp_mb__before_atomic(); 5840 clear_bit(__I40E_SERVICE_SCHED, &pf->state); 5841 } 5842 5843 /** 5844 * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters 5845 * @pf: board private structure 5846 **/ 5847 u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf) 5848 { 5849 u32 val, fcnt_prog; 5850 5851 val = rd32(&pf->hw, I40E_PFQF_FDSTAT); 5852 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK); 5853 return fcnt_prog; 5854 } 5855 5856 /** 5857 * i40e_get_current_fd_count - Get total FD filters programmed for this PF 5858 * @pf: board private structure 5859 **/ 5860 u32 i40e_get_current_fd_count(struct i40e_pf *pf) 5861 { 5862 u32 val, fcnt_prog; 5863 5864 val = rd32(&pf->hw, I40E_PFQF_FDSTAT); 5865 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) + 5866 ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >> 5867 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT); 5868 return fcnt_prog; 5869 } 5870 5871 /** 5872 * i40e_get_global_fd_count - Get total FD filters programmed on device 5873 * @pf: board private structure 5874 **/ 5875 u32 i40e_get_global_fd_count(struct i40e_pf *pf) 5876 { 5877 u32 val, fcnt_prog; 5878 5879 val = rd32(&pf->hw, I40E_GLQF_FDCNT_0); 5880 fcnt_prog = (val & I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK) + 5881 ((val & I40E_GLQF_FDCNT_0_BESTCNT_MASK) >> 5882 I40E_GLQF_FDCNT_0_BESTCNT_SHIFT); 5883 return fcnt_prog; 5884 } 5885 5886 /** 5887 * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled 5888 * @pf: board private structure 5889 **/ 5890 void i40e_fdir_check_and_reenable(struct i40e_pf *pf) 5891 { 5892 struct i40e_fdir_filter *filter; 5893 u32 fcnt_prog, fcnt_avail; 5894 struct hlist_node *node; 5895 5896 if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state)) 5897 return; 5898 5899 /* Check if, FD SB or ATR was auto disabled and if there is enough room 5900 * to re-enable 5901 */ 5902 fcnt_prog = i40e_get_global_fd_count(pf); 5903 fcnt_avail = pf->fdir_pf_filter_count; 5904 if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) || 5905 (pf->fd_add_err == 0) || 5906 (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt)) { 5907 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && 5908 (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) { 5909 pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED; 5910 if (I40E_DEBUG_FD & pf->hw.debug_mask) 5911 dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n"); 5912 } 5913 } 5914 /* Wait for some more space to be available to turn on ATR */ 5915 if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM * 2)) { 5916 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && 5917 (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) { 5918 pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED; 5919 if (I40E_DEBUG_FD & pf->hw.debug_mask) 5920 dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n"); 5921 } 5922 } 5923 5924 /* if hw had a problem adding a filter, delete it */ 5925 if (pf->fd_inv > 0) { 5926 hlist_for_each_entry_safe(filter, node, 5927 &pf->fdir_filter_list, fdir_node) { 5928 if (filter->fd_id == pf->fd_inv) { 5929 hlist_del(&filter->fdir_node); 5930 kfree(filter); 5931 pf->fdir_pf_active_filters--; 5932 } 5933 } 5934 } 5935 } 5936 5937 #define I40E_MIN_FD_FLUSH_INTERVAL 10 5938 #define I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE 30 5939 /** 5940 * i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB 5941 * @pf: board private structure 5942 **/ 5943 static void i40e_fdir_flush_and_replay(struct i40e_pf *pf) 5944 { 5945 unsigned long min_flush_time; 5946 int flush_wait_retry = 50; 5947 bool disable_atr = false; 5948 int fd_room; 5949 int reg; 5950 5951 if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))) 5952 return; 5953 5954 if (!time_after(jiffies, pf->fd_flush_timestamp + 5955 (I40E_MIN_FD_FLUSH_INTERVAL * HZ))) 5956 return; 5957 5958 /* If the flush is happening too quick and we have mostly SB rules we 5959 * should not re-enable ATR for some time. 5960 */ 5961 min_flush_time = pf->fd_flush_timestamp + 5962 (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ); 5963 fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters; 5964 5965 if (!(time_after(jiffies, min_flush_time)) && 5966 (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) { 5967 if (I40E_DEBUG_FD & pf->hw.debug_mask) 5968 dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n"); 5969 disable_atr = true; 5970 } 5971 5972 pf->fd_flush_timestamp = jiffies; 5973 pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED; 5974 /* flush all filters */ 5975 wr32(&pf->hw, I40E_PFQF_CTL_1, 5976 I40E_PFQF_CTL_1_CLEARFDTABLE_MASK); 5977 i40e_flush(&pf->hw); 5978 pf->fd_flush_cnt++; 5979 pf->fd_add_err = 0; 5980 do { 5981 /* Check FD flush status every 5-6msec */ 5982 usleep_range(5000, 6000); 5983 reg = rd32(&pf->hw, I40E_PFQF_CTL_1); 5984 if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK)) 5985 break; 5986 } while (flush_wait_retry--); 5987 if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) { 5988 dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n"); 5989 } else { 5990 /* replay sideband filters */ 5991 i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]); 5992 if (!disable_atr) 5993 pf->flags |= I40E_FLAG_FD_ATR_ENABLED; 5994 clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state); 5995 if (I40E_DEBUG_FD & pf->hw.debug_mask) 5996 dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n"); 5997 } 5998 5999 } 6000 6001 /** 6002 * i40e_get_current_atr_count - Get the count of total FD ATR filters programmed 6003 * @pf: board private structure 6004 **/ 6005 u32 i40e_get_current_atr_cnt(struct i40e_pf *pf) 6006 { 6007 return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters; 6008 } 6009 6010 /* We can see up to 256 filter programming desc in transit if the filters are 6011 * being applied really fast; before we see the first 6012 * filter miss error on Rx queue 0. Accumulating enough error messages before 6013 * reacting will make sure we don't cause flush too often. 6014 */ 6015 #define I40E_MAX_FD_PROGRAM_ERROR 256 6016 6017 /** 6018 * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table 6019 * @pf: board private structure 6020 **/ 6021 static void i40e_fdir_reinit_subtask(struct i40e_pf *pf) 6022 { 6023 6024 /* if interface is down do nothing */ 6025 if (test_bit(__I40E_DOWN, &pf->state)) 6026 return; 6027 6028 if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))) 6029 return; 6030 6031 if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state)) 6032 i40e_fdir_flush_and_replay(pf); 6033 6034 i40e_fdir_check_and_reenable(pf); 6035 6036 } 6037 6038 /** 6039 * i40e_vsi_link_event - notify VSI of a link event 6040 * @vsi: vsi to be notified 6041 * @link_up: link up or down 6042 **/ 6043 static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up) 6044 { 6045 if (!vsi || test_bit(__I40E_DOWN, &vsi->state)) 6046 return; 6047 6048 switch (vsi->type) { 6049 case I40E_VSI_MAIN: 6050 #ifdef I40E_FCOE 6051 case I40E_VSI_FCOE: 6052 #endif 6053 if (!vsi->netdev || !vsi->netdev_registered) 6054 break; 6055 6056 if (link_up) { 6057 netif_carrier_on(vsi->netdev); 6058 netif_tx_wake_all_queues(vsi->netdev); 6059 } else { 6060 netif_carrier_off(vsi->netdev); 6061 netif_tx_stop_all_queues(vsi->netdev); 6062 } 6063 break; 6064 6065 case I40E_VSI_SRIOV: 6066 case I40E_VSI_VMDQ2: 6067 case I40E_VSI_CTRL: 6068 case I40E_VSI_IWARP: 6069 case I40E_VSI_MIRROR: 6070 default: 6071 /* there is no notification for other VSIs */ 6072 break; 6073 } 6074 } 6075 6076 /** 6077 * i40e_veb_link_event - notify elements on the veb of a link event 6078 * @veb: veb to be notified 6079 * @link_up: link up or down 6080 **/ 6081 static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up) 6082 { 6083 struct i40e_pf *pf; 6084 int i; 6085 6086 if (!veb || !veb->pf) 6087 return; 6088 pf = veb->pf; 6089 6090 /* depth first... */ 6091 for (i = 0; i < I40E_MAX_VEB; i++) 6092 if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid)) 6093 i40e_veb_link_event(pf->veb[i], link_up); 6094 6095 /* ... now the local VSIs */ 6096 for (i = 0; i < pf->num_alloc_vsi; i++) 6097 if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid)) 6098 i40e_vsi_link_event(pf->vsi[i], link_up); 6099 } 6100 6101 /** 6102 * i40e_link_event - Update netif_carrier status 6103 * @pf: board private structure 6104 **/ 6105 static void i40e_link_event(struct i40e_pf *pf) 6106 { 6107 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; 6108 u8 new_link_speed, old_link_speed; 6109 i40e_status status; 6110 bool new_link, old_link; 6111 6112 /* save off old link status information */ 6113 pf->hw.phy.link_info_old = pf->hw.phy.link_info; 6114 6115 /* set this to force the get_link_status call to refresh state */ 6116 pf->hw.phy.get_link_info = true; 6117 6118 old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP); 6119 6120 status = i40e_get_link_status(&pf->hw, &new_link); 6121 if (status) { 6122 dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n", 6123 status); 6124 return; 6125 } 6126 6127 old_link_speed = pf->hw.phy.link_info_old.link_speed; 6128 new_link_speed = pf->hw.phy.link_info.link_speed; 6129 6130 if (new_link == old_link && 6131 new_link_speed == old_link_speed && 6132 (test_bit(__I40E_DOWN, &vsi->state) || 6133 new_link == netif_carrier_ok(vsi->netdev))) 6134 return; 6135 6136 if (!test_bit(__I40E_DOWN, &vsi->state)) 6137 i40e_print_link_message(vsi, new_link); 6138 6139 /* Notify the base of the switch tree connected to 6140 * the link. Floating VEBs are not notified. 6141 */ 6142 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb]) 6143 i40e_veb_link_event(pf->veb[pf->lan_veb], new_link); 6144 else 6145 i40e_vsi_link_event(vsi, new_link); 6146 6147 if (pf->vf) 6148 i40e_vc_notify_link_state(pf); 6149 6150 if (pf->flags & I40E_FLAG_PTP) 6151 i40e_ptp_set_increment(pf); 6152 } 6153 6154 /** 6155 * i40e_watchdog_subtask - periodic checks not using event driven response 6156 * @pf: board private structure 6157 **/ 6158 static void i40e_watchdog_subtask(struct i40e_pf *pf) 6159 { 6160 int i; 6161 6162 /* if interface is down do nothing */ 6163 if (test_bit(__I40E_DOWN, &pf->state) || 6164 test_bit(__I40E_CONFIG_BUSY, &pf->state)) 6165 return; 6166 6167 /* make sure we don't do these things too often */ 6168 if (time_before(jiffies, (pf->service_timer_previous + 6169 pf->service_timer_period))) 6170 return; 6171 pf->service_timer_previous = jiffies; 6172 6173 if (pf->flags & I40E_FLAG_LINK_POLLING_ENABLED) 6174 i40e_link_event(pf); 6175 6176 /* Update the stats for active netdevs so the network stack 6177 * can look at updated numbers whenever it cares to 6178 */ 6179 for (i = 0; i < pf->num_alloc_vsi; i++) 6180 if (pf->vsi[i] && pf->vsi[i]->netdev) 6181 i40e_update_stats(pf->vsi[i]); 6182 6183 if (pf->flags & I40E_FLAG_VEB_STATS_ENABLED) { 6184 /* Update the stats for the active switching components */ 6185 for (i = 0; i < I40E_MAX_VEB; i++) 6186 if (pf->veb[i]) 6187 i40e_update_veb_stats(pf->veb[i]); 6188 } 6189 6190 i40e_ptp_rx_hang(pf->vsi[pf->lan_vsi]); 6191 } 6192 6193 /** 6194 * i40e_reset_subtask - Set up for resetting the device and driver 6195 * @pf: board private structure 6196 **/ 6197 static void i40e_reset_subtask(struct i40e_pf *pf) 6198 { 6199 u32 reset_flags = 0; 6200 6201 rtnl_lock(); 6202 if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) { 6203 reset_flags |= BIT(__I40E_REINIT_REQUESTED); 6204 clear_bit(__I40E_REINIT_REQUESTED, &pf->state); 6205 } 6206 if (test_bit(__I40E_PF_RESET_REQUESTED, &pf->state)) { 6207 reset_flags |= BIT(__I40E_PF_RESET_REQUESTED); 6208 clear_bit(__I40E_PF_RESET_REQUESTED, &pf->state); 6209 } 6210 if (test_bit(__I40E_CORE_RESET_REQUESTED, &pf->state)) { 6211 reset_flags |= BIT(__I40E_CORE_RESET_REQUESTED); 6212 clear_bit(__I40E_CORE_RESET_REQUESTED, &pf->state); 6213 } 6214 if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state)) { 6215 reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED); 6216 clear_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state); 6217 } 6218 if (test_bit(__I40E_DOWN_REQUESTED, &pf->state)) { 6219 reset_flags |= BIT(__I40E_DOWN_REQUESTED); 6220 clear_bit(__I40E_DOWN_REQUESTED, &pf->state); 6221 } 6222 6223 /* If there's a recovery already waiting, it takes 6224 * precedence before starting a new reset sequence. 6225 */ 6226 if (test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) { 6227 i40e_handle_reset_warning(pf); 6228 goto unlock; 6229 } 6230 6231 /* If we're already down or resetting, just bail */ 6232 if (reset_flags && 6233 !test_bit(__I40E_DOWN, &pf->state) && 6234 !test_bit(__I40E_CONFIG_BUSY, &pf->state)) 6235 i40e_do_reset(pf, reset_flags); 6236 6237 unlock: 6238 rtnl_unlock(); 6239 } 6240 6241 /** 6242 * i40e_handle_link_event - Handle link event 6243 * @pf: board private structure 6244 * @e: event info posted on ARQ 6245 **/ 6246 static void i40e_handle_link_event(struct i40e_pf *pf, 6247 struct i40e_arq_event_info *e) 6248 { 6249 struct i40e_aqc_get_link_status *status = 6250 (struct i40e_aqc_get_link_status *)&e->desc.params.raw; 6251 6252 /* Do a new status request to re-enable LSE reporting 6253 * and load new status information into the hw struct 6254 * This completely ignores any state information 6255 * in the ARQ event info, instead choosing to always 6256 * issue the AQ update link status command. 6257 */ 6258 i40e_link_event(pf); 6259 6260 /* check for unqualified module, if link is down */ 6261 if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) && 6262 (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) && 6263 (!(status->link_info & I40E_AQ_LINK_UP))) 6264 dev_err(&pf->pdev->dev, 6265 "The driver failed to link because an unqualified module was detected.\n"); 6266 } 6267 6268 /** 6269 * i40e_clean_adminq_subtask - Clean the AdminQ rings 6270 * @pf: board private structure 6271 **/ 6272 static void i40e_clean_adminq_subtask(struct i40e_pf *pf) 6273 { 6274 struct i40e_arq_event_info event; 6275 struct i40e_hw *hw = &pf->hw; 6276 u16 pending, i = 0; 6277 i40e_status ret; 6278 u16 opcode; 6279 u32 oldval; 6280 u32 val; 6281 6282 /* Do not run clean AQ when PF reset fails */ 6283 if (test_bit(__I40E_RESET_FAILED, &pf->state)) 6284 return; 6285 6286 /* check for error indications */ 6287 val = rd32(&pf->hw, pf->hw.aq.arq.len); 6288 oldval = val; 6289 if (val & I40E_PF_ARQLEN_ARQVFE_MASK) { 6290 if (hw->debug_mask & I40E_DEBUG_AQ) 6291 dev_info(&pf->pdev->dev, "ARQ VF Error detected\n"); 6292 val &= ~I40E_PF_ARQLEN_ARQVFE_MASK; 6293 } 6294 if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) { 6295 if (hw->debug_mask & I40E_DEBUG_AQ) 6296 dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n"); 6297 val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK; 6298 pf->arq_overflows++; 6299 } 6300 if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) { 6301 if (hw->debug_mask & I40E_DEBUG_AQ) 6302 dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n"); 6303 val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK; 6304 } 6305 if (oldval != val) 6306 wr32(&pf->hw, pf->hw.aq.arq.len, val); 6307 6308 val = rd32(&pf->hw, pf->hw.aq.asq.len); 6309 oldval = val; 6310 if (val & I40E_PF_ATQLEN_ATQVFE_MASK) { 6311 if (pf->hw.debug_mask & I40E_DEBUG_AQ) 6312 dev_info(&pf->pdev->dev, "ASQ VF Error detected\n"); 6313 val &= ~I40E_PF_ATQLEN_ATQVFE_MASK; 6314 } 6315 if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) { 6316 if (pf->hw.debug_mask & I40E_DEBUG_AQ) 6317 dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n"); 6318 val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK; 6319 } 6320 if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) { 6321 if (pf->hw.debug_mask & I40E_DEBUG_AQ) 6322 dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n"); 6323 val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK; 6324 } 6325 if (oldval != val) 6326 wr32(&pf->hw, pf->hw.aq.asq.len, val); 6327 6328 event.buf_len = I40E_MAX_AQ_BUF_SIZE; 6329 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); 6330 if (!event.msg_buf) 6331 return; 6332 6333 do { 6334 ret = i40e_clean_arq_element(hw, &event, &pending); 6335 if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK) 6336 break; 6337 else if (ret) { 6338 dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret); 6339 break; 6340 } 6341 6342 opcode = le16_to_cpu(event.desc.opcode); 6343 switch (opcode) { 6344 6345 case i40e_aqc_opc_get_link_status: 6346 i40e_handle_link_event(pf, &event); 6347 break; 6348 case i40e_aqc_opc_send_msg_to_pf: 6349 ret = i40e_vc_process_vf_msg(pf, 6350 le16_to_cpu(event.desc.retval), 6351 le32_to_cpu(event.desc.cookie_high), 6352 le32_to_cpu(event.desc.cookie_low), 6353 event.msg_buf, 6354 event.msg_len); 6355 break; 6356 case i40e_aqc_opc_lldp_update_mib: 6357 dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n"); 6358 #ifdef CONFIG_I40E_DCB 6359 rtnl_lock(); 6360 ret = i40e_handle_lldp_event(pf, &event); 6361 rtnl_unlock(); 6362 #endif /* CONFIG_I40E_DCB */ 6363 break; 6364 case i40e_aqc_opc_event_lan_overflow: 6365 dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n"); 6366 i40e_handle_lan_overflow_event(pf, &event); 6367 break; 6368 case i40e_aqc_opc_send_msg_to_peer: 6369 dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n"); 6370 break; 6371 case i40e_aqc_opc_nvm_erase: 6372 case i40e_aqc_opc_nvm_update: 6373 case i40e_aqc_opc_oem_post_update: 6374 i40e_debug(&pf->hw, I40E_DEBUG_NVM, 6375 "ARQ NVM operation 0x%04x completed\n", 6376 opcode); 6377 break; 6378 default: 6379 dev_info(&pf->pdev->dev, 6380 "ARQ Error: Unknown event 0x%04x received\n", 6381 opcode); 6382 break; 6383 } 6384 } while (pending && (i++ < pf->adminq_work_limit)); 6385 6386 clear_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state); 6387 /* re-enable Admin queue interrupt cause */ 6388 val = rd32(hw, I40E_PFINT_ICR0_ENA); 6389 val |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK; 6390 wr32(hw, I40E_PFINT_ICR0_ENA, val); 6391 i40e_flush(hw); 6392 6393 kfree(event.msg_buf); 6394 } 6395 6396 /** 6397 * i40e_verify_eeprom - make sure eeprom is good to use 6398 * @pf: board private structure 6399 **/ 6400 static void i40e_verify_eeprom(struct i40e_pf *pf) 6401 { 6402 int err; 6403 6404 err = i40e_diag_eeprom_test(&pf->hw); 6405 if (err) { 6406 /* retry in case of garbage read */ 6407 err = i40e_diag_eeprom_test(&pf->hw); 6408 if (err) { 6409 dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n", 6410 err); 6411 set_bit(__I40E_BAD_EEPROM, &pf->state); 6412 } 6413 } 6414 6415 if (!err && test_bit(__I40E_BAD_EEPROM, &pf->state)) { 6416 dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n"); 6417 clear_bit(__I40E_BAD_EEPROM, &pf->state); 6418 } 6419 } 6420 6421 /** 6422 * i40e_enable_pf_switch_lb 6423 * @pf: pointer to the PF structure 6424 * 6425 * enable switch loop back or die - no point in a return value 6426 **/ 6427 static void i40e_enable_pf_switch_lb(struct i40e_pf *pf) 6428 { 6429 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; 6430 struct i40e_vsi_context ctxt; 6431 int ret; 6432 6433 ctxt.seid = pf->main_vsi_seid; 6434 ctxt.pf_num = pf->hw.pf_id; 6435 ctxt.vf_num = 0; 6436 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); 6437 if (ret) { 6438 dev_info(&pf->pdev->dev, 6439 "couldn't get PF vsi config, err %s aq_err %s\n", 6440 i40e_stat_str(&pf->hw, ret), 6441 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 6442 return; 6443 } 6444 ctxt.flags = I40E_AQ_VSI_TYPE_PF; 6445 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); 6446 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); 6447 6448 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); 6449 if (ret) { 6450 dev_info(&pf->pdev->dev, 6451 "update vsi switch failed, err %s aq_err %s\n", 6452 i40e_stat_str(&pf->hw, ret), 6453 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 6454 } 6455 } 6456 6457 /** 6458 * i40e_disable_pf_switch_lb 6459 * @pf: pointer to the PF structure 6460 * 6461 * disable switch loop back or die - no point in a return value 6462 **/ 6463 static void i40e_disable_pf_switch_lb(struct i40e_pf *pf) 6464 { 6465 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; 6466 struct i40e_vsi_context ctxt; 6467 int ret; 6468 6469 ctxt.seid = pf->main_vsi_seid; 6470 ctxt.pf_num = pf->hw.pf_id; 6471 ctxt.vf_num = 0; 6472 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); 6473 if (ret) { 6474 dev_info(&pf->pdev->dev, 6475 "couldn't get PF vsi config, err %s aq_err %s\n", 6476 i40e_stat_str(&pf->hw, ret), 6477 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 6478 return; 6479 } 6480 ctxt.flags = I40E_AQ_VSI_TYPE_PF; 6481 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); 6482 ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); 6483 6484 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); 6485 if (ret) { 6486 dev_info(&pf->pdev->dev, 6487 "update vsi switch failed, err %s aq_err %s\n", 6488 i40e_stat_str(&pf->hw, ret), 6489 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 6490 } 6491 } 6492 6493 /** 6494 * i40e_config_bridge_mode - Configure the HW bridge mode 6495 * @veb: pointer to the bridge instance 6496 * 6497 * Configure the loop back mode for the LAN VSI that is downlink to the 6498 * specified HW bridge instance. It is expected this function is called 6499 * when a new HW bridge is instantiated. 6500 **/ 6501 static void i40e_config_bridge_mode(struct i40e_veb *veb) 6502 { 6503 struct i40e_pf *pf = veb->pf; 6504 6505 if (pf->hw.debug_mask & I40E_DEBUG_LAN) 6506 dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n", 6507 veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB"); 6508 if (veb->bridge_mode & BRIDGE_MODE_VEPA) 6509 i40e_disable_pf_switch_lb(pf); 6510 else 6511 i40e_enable_pf_switch_lb(pf); 6512 } 6513 6514 /** 6515 * i40e_reconstitute_veb - rebuild the VEB and anything connected to it 6516 * @veb: pointer to the VEB instance 6517 * 6518 * This is a recursive function that first builds the attached VSIs then 6519 * recurses in to build the next layer of VEB. We track the connections 6520 * through our own index numbers because the seid's from the HW could 6521 * change across the reset. 6522 **/ 6523 static int i40e_reconstitute_veb(struct i40e_veb *veb) 6524 { 6525 struct i40e_vsi *ctl_vsi = NULL; 6526 struct i40e_pf *pf = veb->pf; 6527 int v, veb_idx; 6528 int ret; 6529 6530 /* build VSI that owns this VEB, temporarily attached to base VEB */ 6531 for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) { 6532 if (pf->vsi[v] && 6533 pf->vsi[v]->veb_idx == veb->idx && 6534 pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) { 6535 ctl_vsi = pf->vsi[v]; 6536 break; 6537 } 6538 } 6539 if (!ctl_vsi) { 6540 dev_info(&pf->pdev->dev, 6541 "missing owner VSI for veb_idx %d\n", veb->idx); 6542 ret = -ENOENT; 6543 goto end_reconstitute; 6544 } 6545 if (ctl_vsi != pf->vsi[pf->lan_vsi]) 6546 ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid; 6547 ret = i40e_add_vsi(ctl_vsi); 6548 if (ret) { 6549 dev_info(&pf->pdev->dev, 6550 "rebuild of veb_idx %d owner VSI failed: %d\n", 6551 veb->idx, ret); 6552 goto end_reconstitute; 6553 } 6554 i40e_vsi_reset_stats(ctl_vsi); 6555 6556 /* create the VEB in the switch and move the VSI onto the VEB */ 6557 ret = i40e_add_veb(veb, ctl_vsi); 6558 if (ret) 6559 goto end_reconstitute; 6560 6561 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) 6562 veb->bridge_mode = BRIDGE_MODE_VEB; 6563 else 6564 veb->bridge_mode = BRIDGE_MODE_VEPA; 6565 i40e_config_bridge_mode(veb); 6566 6567 /* create the remaining VSIs attached to this VEB */ 6568 for (v = 0; v < pf->num_alloc_vsi; v++) { 6569 if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi) 6570 continue; 6571 6572 if (pf->vsi[v]->veb_idx == veb->idx) { 6573 struct i40e_vsi *vsi = pf->vsi[v]; 6574 6575 vsi->uplink_seid = veb->seid; 6576 ret = i40e_add_vsi(vsi); 6577 if (ret) { 6578 dev_info(&pf->pdev->dev, 6579 "rebuild of vsi_idx %d failed: %d\n", 6580 v, ret); 6581 goto end_reconstitute; 6582 } 6583 i40e_vsi_reset_stats(vsi); 6584 } 6585 } 6586 6587 /* create any VEBs attached to this VEB - RECURSION */ 6588 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) { 6589 if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) { 6590 pf->veb[veb_idx]->uplink_seid = veb->seid; 6591 ret = i40e_reconstitute_veb(pf->veb[veb_idx]); 6592 if (ret) 6593 break; 6594 } 6595 } 6596 6597 end_reconstitute: 6598 return ret; 6599 } 6600 6601 /** 6602 * i40e_get_capabilities - get info about the HW 6603 * @pf: the PF struct 6604 **/ 6605 static int i40e_get_capabilities(struct i40e_pf *pf) 6606 { 6607 struct i40e_aqc_list_capabilities_element_resp *cap_buf; 6608 u16 data_size; 6609 int buf_len; 6610 int err; 6611 6612 buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp); 6613 do { 6614 cap_buf = kzalloc(buf_len, GFP_KERNEL); 6615 if (!cap_buf) 6616 return -ENOMEM; 6617 6618 /* this loads the data into the hw struct for us */ 6619 err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len, 6620 &data_size, 6621 i40e_aqc_opc_list_func_capabilities, 6622 NULL); 6623 /* data loaded, buffer no longer needed */ 6624 kfree(cap_buf); 6625 6626 if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) { 6627 /* retry with a larger buffer */ 6628 buf_len = data_size; 6629 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) { 6630 dev_info(&pf->pdev->dev, 6631 "capability discovery failed, err %s aq_err %s\n", 6632 i40e_stat_str(&pf->hw, err), 6633 i40e_aq_str(&pf->hw, 6634 pf->hw.aq.asq_last_status)); 6635 return -ENODEV; 6636 } 6637 } while (err); 6638 6639 if (pf->hw.debug_mask & I40E_DEBUG_USER) 6640 dev_info(&pf->pdev->dev, 6641 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n", 6642 pf->hw.pf_id, pf->hw.func_caps.num_vfs, 6643 pf->hw.func_caps.num_msix_vectors, 6644 pf->hw.func_caps.num_msix_vectors_vf, 6645 pf->hw.func_caps.fd_filters_guaranteed, 6646 pf->hw.func_caps.fd_filters_best_effort, 6647 pf->hw.func_caps.num_tx_qp, 6648 pf->hw.func_caps.num_vsis); 6649 6650 #define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \ 6651 + pf->hw.func_caps.num_vfs) 6652 if (pf->hw.revision_id == 0 && (DEF_NUM_VSI > pf->hw.func_caps.num_vsis)) { 6653 dev_info(&pf->pdev->dev, 6654 "got num_vsis %d, setting num_vsis to %d\n", 6655 pf->hw.func_caps.num_vsis, DEF_NUM_VSI); 6656 pf->hw.func_caps.num_vsis = DEF_NUM_VSI; 6657 } 6658 6659 return 0; 6660 } 6661 6662 static int i40e_vsi_clear(struct i40e_vsi *vsi); 6663 6664 /** 6665 * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband 6666 * @pf: board private structure 6667 **/ 6668 static void i40e_fdir_sb_setup(struct i40e_pf *pf) 6669 { 6670 struct i40e_vsi *vsi; 6671 int i; 6672 6673 /* quick workaround for an NVM issue that leaves a critical register 6674 * uninitialized 6675 */ 6676 if (!rd32(&pf->hw, I40E_GLQF_HKEY(0))) { 6677 static const u32 hkey[] = { 6678 0xe640d33f, 0xcdfe98ab, 0x73fa7161, 0x0d7a7d36, 6679 0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb, 6680 0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21, 6681 0x95b3a76d}; 6682 6683 for (i = 0; i <= I40E_GLQF_HKEY_MAX_INDEX; i++) 6684 wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]); 6685 } 6686 6687 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) 6688 return; 6689 6690 /* find existing VSI and see if it needs configuring */ 6691 vsi = NULL; 6692 for (i = 0; i < pf->num_alloc_vsi; i++) { 6693 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) { 6694 vsi = pf->vsi[i]; 6695 break; 6696 } 6697 } 6698 6699 /* create a new VSI if none exists */ 6700 if (!vsi) { 6701 vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR, 6702 pf->vsi[pf->lan_vsi]->seid, 0); 6703 if (!vsi) { 6704 dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n"); 6705 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; 6706 return; 6707 } 6708 } 6709 6710 i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring); 6711 } 6712 6713 /** 6714 * i40e_fdir_teardown - release the Flow Director resources 6715 * @pf: board private structure 6716 **/ 6717 static void i40e_fdir_teardown(struct i40e_pf *pf) 6718 { 6719 int i; 6720 6721 i40e_fdir_filter_exit(pf); 6722 for (i = 0; i < pf->num_alloc_vsi; i++) { 6723 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) { 6724 i40e_vsi_release(pf->vsi[i]); 6725 break; 6726 } 6727 } 6728 } 6729 6730 /** 6731 * i40e_prep_for_reset - prep for the core to reset 6732 * @pf: board private structure 6733 * 6734 * Close up the VFs and other things in prep for PF Reset. 6735 **/ 6736 static void i40e_prep_for_reset(struct i40e_pf *pf) 6737 { 6738 struct i40e_hw *hw = &pf->hw; 6739 i40e_status ret = 0; 6740 u32 v; 6741 6742 clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state); 6743 if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) 6744 return; 6745 6746 dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n"); 6747 6748 /* quiesce the VSIs and their queues that are not already DOWN */ 6749 i40e_pf_quiesce_all_vsi(pf); 6750 6751 for (v = 0; v < pf->num_alloc_vsi; v++) { 6752 if (pf->vsi[v]) 6753 pf->vsi[v]->seid = 0; 6754 } 6755 6756 i40e_shutdown_adminq(&pf->hw); 6757 6758 /* call shutdown HMC */ 6759 if (hw->hmc.hmc_obj) { 6760 ret = i40e_shutdown_lan_hmc(hw); 6761 if (ret) 6762 dev_warn(&pf->pdev->dev, 6763 "shutdown_lan_hmc failed: %d\n", ret); 6764 } 6765 } 6766 6767 /** 6768 * i40e_send_version - update firmware with driver version 6769 * @pf: PF struct 6770 */ 6771 static void i40e_send_version(struct i40e_pf *pf) 6772 { 6773 struct i40e_driver_version dv; 6774 6775 dv.major_version = DRV_VERSION_MAJOR; 6776 dv.minor_version = DRV_VERSION_MINOR; 6777 dv.build_version = DRV_VERSION_BUILD; 6778 dv.subbuild_version = 0; 6779 strlcpy(dv.driver_string, DRV_VERSION, sizeof(dv.driver_string)); 6780 i40e_aq_send_driver_version(&pf->hw, &dv, NULL); 6781 } 6782 6783 /** 6784 * i40e_reset_and_rebuild - reset and rebuild using a saved config 6785 * @pf: board private structure 6786 * @reinit: if the Main VSI needs to re-initialized. 6787 **/ 6788 static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) 6789 { 6790 struct i40e_hw *hw = &pf->hw; 6791 u8 set_fc_aq_fail = 0; 6792 i40e_status ret; 6793 u32 val; 6794 u32 v; 6795 6796 /* Now we wait for GRST to settle out. 6797 * We don't have to delete the VEBs or VSIs from the hw switch 6798 * because the reset will make them disappear. 6799 */ 6800 ret = i40e_pf_reset(hw); 6801 if (ret) { 6802 dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret); 6803 set_bit(__I40E_RESET_FAILED, &pf->state); 6804 goto clear_recovery; 6805 } 6806 pf->pfr_count++; 6807 6808 if (test_bit(__I40E_DOWN, &pf->state)) 6809 goto clear_recovery; 6810 dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n"); 6811 6812 /* rebuild the basics for the AdminQ, HMC, and initial HW switch */ 6813 ret = i40e_init_adminq(&pf->hw); 6814 if (ret) { 6815 dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %s aq_err %s\n", 6816 i40e_stat_str(&pf->hw, ret), 6817 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 6818 goto clear_recovery; 6819 } 6820 6821 /* re-verify the eeprom if we just had an EMP reset */ 6822 if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state)) 6823 i40e_verify_eeprom(pf); 6824 6825 i40e_clear_pxe_mode(hw); 6826 ret = i40e_get_capabilities(pf); 6827 if (ret) 6828 goto end_core_reset; 6829 6830 ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, 6831 hw->func_caps.num_rx_qp, 6832 pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num); 6833 if (ret) { 6834 dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret); 6835 goto end_core_reset; 6836 } 6837 ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY); 6838 if (ret) { 6839 dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret); 6840 goto end_core_reset; 6841 } 6842 6843 #ifdef CONFIG_I40E_DCB 6844 ret = i40e_init_pf_dcb(pf); 6845 if (ret) { 6846 dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n", ret); 6847 pf->flags &= ~I40E_FLAG_DCB_CAPABLE; 6848 /* Continue without DCB enabled */ 6849 } 6850 #endif /* CONFIG_I40E_DCB */ 6851 #ifdef I40E_FCOE 6852 i40e_init_pf_fcoe(pf); 6853 6854 #endif 6855 /* do basic switch setup */ 6856 ret = i40e_setup_pf_switch(pf, reinit); 6857 if (ret) 6858 goto end_core_reset; 6859 6860 /* The driver only wants link up/down and module qualification 6861 * reports from firmware. Note the negative logic. 6862 */ 6863 ret = i40e_aq_set_phy_int_mask(&pf->hw, 6864 ~(I40E_AQ_EVENT_LINK_UPDOWN | 6865 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL); 6866 if (ret) 6867 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n", 6868 i40e_stat_str(&pf->hw, ret), 6869 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 6870 6871 /* make sure our flow control settings are restored */ 6872 ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true); 6873 if (ret) 6874 dev_dbg(&pf->pdev->dev, "setting flow control: ret = %s last_status = %s\n", 6875 i40e_stat_str(&pf->hw, ret), 6876 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 6877 6878 /* Rebuild the VSIs and VEBs that existed before reset. 6879 * They are still in our local switch element arrays, so only 6880 * need to rebuild the switch model in the HW. 6881 * 6882 * If there were VEBs but the reconstitution failed, we'll try 6883 * try to recover minimal use by getting the basic PF VSI working. 6884 */ 6885 if (pf->vsi[pf->lan_vsi]->uplink_seid != pf->mac_seid) { 6886 dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n"); 6887 /* find the one VEB connected to the MAC, and find orphans */ 6888 for (v = 0; v < I40E_MAX_VEB; v++) { 6889 if (!pf->veb[v]) 6890 continue; 6891 6892 if (pf->veb[v]->uplink_seid == pf->mac_seid || 6893 pf->veb[v]->uplink_seid == 0) { 6894 ret = i40e_reconstitute_veb(pf->veb[v]); 6895 6896 if (!ret) 6897 continue; 6898 6899 /* If Main VEB failed, we're in deep doodoo, 6900 * so give up rebuilding the switch and set up 6901 * for minimal rebuild of PF VSI. 6902 * If orphan failed, we'll report the error 6903 * but try to keep going. 6904 */ 6905 if (pf->veb[v]->uplink_seid == pf->mac_seid) { 6906 dev_info(&pf->pdev->dev, 6907 "rebuild of switch failed: %d, will try to set up simple PF connection\n", 6908 ret); 6909 pf->vsi[pf->lan_vsi]->uplink_seid 6910 = pf->mac_seid; 6911 break; 6912 } else if (pf->veb[v]->uplink_seid == 0) { 6913 dev_info(&pf->pdev->dev, 6914 "rebuild of orphan VEB failed: %d\n", 6915 ret); 6916 } 6917 } 6918 } 6919 } 6920 6921 if (pf->vsi[pf->lan_vsi]->uplink_seid == pf->mac_seid) { 6922 dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n"); 6923 /* no VEB, so rebuild only the Main VSI */ 6924 ret = i40e_add_vsi(pf->vsi[pf->lan_vsi]); 6925 if (ret) { 6926 dev_info(&pf->pdev->dev, 6927 "rebuild of Main VSI failed: %d\n", ret); 6928 goto end_core_reset; 6929 } 6930 } 6931 6932 /* Reconfigure hardware for allowing smaller MSS in the case 6933 * of TSO, so that we avoid the MDD being fired and causing 6934 * a reset in the case of small MSS+TSO. 6935 */ 6936 #define I40E_REG_MSS 0x000E64DC 6937 #define I40E_REG_MSS_MIN_MASK 0x3FF0000 6938 #define I40E_64BYTE_MSS 0x400000 6939 val = rd32(hw, I40E_REG_MSS); 6940 if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) { 6941 val &= ~I40E_REG_MSS_MIN_MASK; 6942 val |= I40E_64BYTE_MSS; 6943 wr32(hw, I40E_REG_MSS, val); 6944 } 6945 6946 if (pf->flags & I40E_FLAG_RESTART_AUTONEG) { 6947 msleep(75); 6948 ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); 6949 if (ret) 6950 dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n", 6951 i40e_stat_str(&pf->hw, ret), 6952 i40e_aq_str(&pf->hw, 6953 pf->hw.aq.asq_last_status)); 6954 } 6955 /* reinit the misc interrupt */ 6956 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 6957 ret = i40e_setup_misc_vector(pf); 6958 6959 /* Add a filter to drop all Flow control frames from any VSI from being 6960 * transmitted. By doing so we stop a malicious VF from sending out 6961 * PAUSE or PFC frames and potentially controlling traffic for other 6962 * PF/VF VSIs. 6963 * The FW can still send Flow control frames if enabled. 6964 */ 6965 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw, 6966 pf->main_vsi_seid); 6967 6968 /* restart the VSIs that were rebuilt and running before the reset */ 6969 i40e_pf_unquiesce_all_vsi(pf); 6970 6971 if (pf->num_alloc_vfs) { 6972 for (v = 0; v < pf->num_alloc_vfs; v++) 6973 i40e_reset_vf(&pf->vf[v], true); 6974 } 6975 6976 /* tell the firmware that we're starting */ 6977 i40e_send_version(pf); 6978 6979 end_core_reset: 6980 clear_bit(__I40E_RESET_FAILED, &pf->state); 6981 clear_recovery: 6982 clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state); 6983 } 6984 6985 /** 6986 * i40e_handle_reset_warning - prep for the PF to reset, reset and rebuild 6987 * @pf: board private structure 6988 * 6989 * Close up the VFs and other things in prep for a Core Reset, 6990 * then get ready to rebuild the world. 6991 **/ 6992 static void i40e_handle_reset_warning(struct i40e_pf *pf) 6993 { 6994 i40e_prep_for_reset(pf); 6995 i40e_reset_and_rebuild(pf, false); 6996 } 6997 6998 /** 6999 * i40e_handle_mdd_event 7000 * @pf: pointer to the PF structure 7001 * 7002 * Called from the MDD irq handler to identify possibly malicious vfs 7003 **/ 7004 static void i40e_handle_mdd_event(struct i40e_pf *pf) 7005 { 7006 struct i40e_hw *hw = &pf->hw; 7007 bool mdd_detected = false; 7008 bool pf_mdd_detected = false; 7009 struct i40e_vf *vf; 7010 u32 reg; 7011 int i; 7012 7013 if (!test_bit(__I40E_MDD_EVENT_PENDING, &pf->state)) 7014 return; 7015 7016 /* find what triggered the MDD event */ 7017 reg = rd32(hw, I40E_GL_MDET_TX); 7018 if (reg & I40E_GL_MDET_TX_VALID_MASK) { 7019 u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >> 7020 I40E_GL_MDET_TX_PF_NUM_SHIFT; 7021 u16 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >> 7022 I40E_GL_MDET_TX_VF_NUM_SHIFT; 7023 u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >> 7024 I40E_GL_MDET_TX_EVENT_SHIFT; 7025 u16 queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >> 7026 I40E_GL_MDET_TX_QUEUE_SHIFT) - 7027 pf->hw.func_caps.base_queue; 7028 if (netif_msg_tx_err(pf)) 7029 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n", 7030 event, queue, pf_num, vf_num); 7031 wr32(hw, I40E_GL_MDET_TX, 0xffffffff); 7032 mdd_detected = true; 7033 } 7034 reg = rd32(hw, I40E_GL_MDET_RX); 7035 if (reg & I40E_GL_MDET_RX_VALID_MASK) { 7036 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >> 7037 I40E_GL_MDET_RX_FUNCTION_SHIFT; 7038 u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >> 7039 I40E_GL_MDET_RX_EVENT_SHIFT; 7040 u16 queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >> 7041 I40E_GL_MDET_RX_QUEUE_SHIFT) - 7042 pf->hw.func_caps.base_queue; 7043 if (netif_msg_rx_err(pf)) 7044 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n", 7045 event, queue, func); 7046 wr32(hw, I40E_GL_MDET_RX, 0xffffffff); 7047 mdd_detected = true; 7048 } 7049 7050 if (mdd_detected) { 7051 reg = rd32(hw, I40E_PF_MDET_TX); 7052 if (reg & I40E_PF_MDET_TX_VALID_MASK) { 7053 wr32(hw, I40E_PF_MDET_TX, 0xFFFF); 7054 dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n"); 7055 pf_mdd_detected = true; 7056 } 7057 reg = rd32(hw, I40E_PF_MDET_RX); 7058 if (reg & I40E_PF_MDET_RX_VALID_MASK) { 7059 wr32(hw, I40E_PF_MDET_RX, 0xFFFF); 7060 dev_info(&pf->pdev->dev, "RX driver issue detected, PF reset issued\n"); 7061 pf_mdd_detected = true; 7062 } 7063 /* Queue belongs to the PF, initiate a reset */ 7064 if (pf_mdd_detected) { 7065 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); 7066 i40e_service_event_schedule(pf); 7067 } 7068 } 7069 7070 /* see if one of the VFs needs its hand slapped */ 7071 for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) { 7072 vf = &(pf->vf[i]); 7073 reg = rd32(hw, I40E_VP_MDET_TX(i)); 7074 if (reg & I40E_VP_MDET_TX_VALID_MASK) { 7075 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF); 7076 vf->num_mdd_events++; 7077 dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n", 7078 i); 7079 } 7080 7081 reg = rd32(hw, I40E_VP_MDET_RX(i)); 7082 if (reg & I40E_VP_MDET_RX_VALID_MASK) { 7083 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF); 7084 vf->num_mdd_events++; 7085 dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n", 7086 i); 7087 } 7088 7089 if (vf->num_mdd_events > I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED) { 7090 dev_info(&pf->pdev->dev, 7091 "Too many MDD events on VF %d, disabled\n", i); 7092 dev_info(&pf->pdev->dev, 7093 "Use PF Control I/F to re-enable the VF\n"); 7094 set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states); 7095 } 7096 } 7097 7098 /* re-enable mdd interrupt cause */ 7099 clear_bit(__I40E_MDD_EVENT_PENDING, &pf->state); 7100 reg = rd32(hw, I40E_PFINT_ICR0_ENA); 7101 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK; 7102 wr32(hw, I40E_PFINT_ICR0_ENA, reg); 7103 i40e_flush(hw); 7104 } 7105 7106 /** 7107 * i40e_sync_udp_filters_subtask - Sync the VSI filter list with HW 7108 * @pf: board private structure 7109 **/ 7110 static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf) 7111 { 7112 #if IS_ENABLED(CONFIG_VXLAN) || IS_ENABLED(CONFIG_GENEVE) 7113 struct i40e_hw *hw = &pf->hw; 7114 i40e_status ret; 7115 __be16 port; 7116 int i; 7117 7118 if (!(pf->flags & I40E_FLAG_UDP_FILTER_SYNC)) 7119 return; 7120 7121 pf->flags &= ~I40E_FLAG_UDP_FILTER_SYNC; 7122 7123 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) { 7124 if (pf->pending_udp_bitmap & BIT_ULL(i)) { 7125 pf->pending_udp_bitmap &= ~BIT_ULL(i); 7126 port = pf->udp_ports[i].index; 7127 if (port) 7128 ret = i40e_aq_add_udp_tunnel(hw, ntohs(port), 7129 pf->udp_ports[i].type, 7130 NULL, NULL); 7131 else 7132 ret = i40e_aq_del_udp_tunnel(hw, i, NULL); 7133 7134 if (ret) { 7135 dev_dbg(&pf->pdev->dev, 7136 "%s %s port %d, index %d failed, err %s aq_err %s\n", 7137 pf->udp_ports[i].type ? "vxlan" : "geneve", 7138 port ? "add" : "delete", 7139 ntohs(port), i, 7140 i40e_stat_str(&pf->hw, ret), 7141 i40e_aq_str(&pf->hw, 7142 pf->hw.aq.asq_last_status)); 7143 pf->udp_ports[i].index = 0; 7144 } 7145 } 7146 } 7147 #endif 7148 } 7149 7150 /** 7151 * i40e_service_task - Run the driver's async subtasks 7152 * @work: pointer to work_struct containing our data 7153 **/ 7154 static void i40e_service_task(struct work_struct *work) 7155 { 7156 struct i40e_pf *pf = container_of(work, 7157 struct i40e_pf, 7158 service_task); 7159 unsigned long start_time = jiffies; 7160 7161 /* don't bother with service tasks if a reset is in progress */ 7162 if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) { 7163 i40e_service_event_complete(pf); 7164 return; 7165 } 7166 7167 i40e_detect_recover_hung(pf); 7168 i40e_sync_filters_subtask(pf); 7169 i40e_reset_subtask(pf); 7170 i40e_handle_mdd_event(pf); 7171 i40e_vc_process_vflr_event(pf); 7172 i40e_watchdog_subtask(pf); 7173 i40e_fdir_reinit_subtask(pf); 7174 i40e_client_subtask(pf); 7175 i40e_sync_filters_subtask(pf); 7176 i40e_sync_udp_filters_subtask(pf); 7177 i40e_clean_adminq_subtask(pf); 7178 7179 i40e_service_event_complete(pf); 7180 7181 /* If the tasks have taken longer than one timer cycle or there 7182 * is more work to be done, reschedule the service task now 7183 * rather than wait for the timer to tick again. 7184 */ 7185 if (time_after(jiffies, (start_time + pf->service_timer_period)) || 7186 test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state) || 7187 test_bit(__I40E_MDD_EVENT_PENDING, &pf->state) || 7188 test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state)) 7189 i40e_service_event_schedule(pf); 7190 } 7191 7192 /** 7193 * i40e_service_timer - timer callback 7194 * @data: pointer to PF struct 7195 **/ 7196 static void i40e_service_timer(unsigned long data) 7197 { 7198 struct i40e_pf *pf = (struct i40e_pf *)data; 7199 7200 mod_timer(&pf->service_timer, 7201 round_jiffies(jiffies + pf->service_timer_period)); 7202 i40e_service_event_schedule(pf); 7203 } 7204 7205 /** 7206 * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI 7207 * @vsi: the VSI being configured 7208 **/ 7209 static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi) 7210 { 7211 struct i40e_pf *pf = vsi->back; 7212 7213 switch (vsi->type) { 7214 case I40E_VSI_MAIN: 7215 vsi->alloc_queue_pairs = pf->num_lan_qps; 7216 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, 7217 I40E_REQ_DESCRIPTOR_MULTIPLE); 7218 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 7219 vsi->num_q_vectors = pf->num_lan_msix; 7220 else 7221 vsi->num_q_vectors = 1; 7222 7223 break; 7224 7225 case I40E_VSI_FDIR: 7226 vsi->alloc_queue_pairs = 1; 7227 vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT, 7228 I40E_REQ_DESCRIPTOR_MULTIPLE); 7229 vsi->num_q_vectors = 1; 7230 break; 7231 7232 case I40E_VSI_VMDQ2: 7233 vsi->alloc_queue_pairs = pf->num_vmdq_qps; 7234 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, 7235 I40E_REQ_DESCRIPTOR_MULTIPLE); 7236 vsi->num_q_vectors = pf->num_vmdq_msix; 7237 break; 7238 7239 case I40E_VSI_SRIOV: 7240 vsi->alloc_queue_pairs = pf->num_vf_qps; 7241 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, 7242 I40E_REQ_DESCRIPTOR_MULTIPLE); 7243 break; 7244 7245 #ifdef I40E_FCOE 7246 case I40E_VSI_FCOE: 7247 vsi->alloc_queue_pairs = pf->num_fcoe_qps; 7248 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, 7249 I40E_REQ_DESCRIPTOR_MULTIPLE); 7250 vsi->num_q_vectors = pf->num_fcoe_msix; 7251 break; 7252 7253 #endif /* I40E_FCOE */ 7254 default: 7255 WARN_ON(1); 7256 return -ENODATA; 7257 } 7258 7259 return 0; 7260 } 7261 7262 /** 7263 * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi 7264 * @type: VSI pointer 7265 * @alloc_qvectors: a bool to specify if q_vectors need to be allocated. 7266 * 7267 * On error: returns error code (negative) 7268 * On success: returns 0 7269 **/ 7270 static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors) 7271 { 7272 int size; 7273 int ret = 0; 7274 7275 /* allocate memory for both Tx and Rx ring pointers */ 7276 size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * 2; 7277 vsi->tx_rings = kzalloc(size, GFP_KERNEL); 7278 if (!vsi->tx_rings) 7279 return -ENOMEM; 7280 vsi->rx_rings = &vsi->tx_rings[vsi->alloc_queue_pairs]; 7281 7282 if (alloc_qvectors) { 7283 /* allocate memory for q_vector pointers */ 7284 size = sizeof(struct i40e_q_vector *) * vsi->num_q_vectors; 7285 vsi->q_vectors = kzalloc(size, GFP_KERNEL); 7286 if (!vsi->q_vectors) { 7287 ret = -ENOMEM; 7288 goto err_vectors; 7289 } 7290 } 7291 return ret; 7292 7293 err_vectors: 7294 kfree(vsi->tx_rings); 7295 return ret; 7296 } 7297 7298 /** 7299 * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF 7300 * @pf: board private structure 7301 * @type: type of VSI 7302 * 7303 * On error: returns error code (negative) 7304 * On success: returns vsi index in PF (positive) 7305 **/ 7306 static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type) 7307 { 7308 int ret = -ENODEV; 7309 struct i40e_vsi *vsi; 7310 int vsi_idx; 7311 int i; 7312 7313 /* Need to protect the allocation of the VSIs at the PF level */ 7314 mutex_lock(&pf->switch_mutex); 7315 7316 /* VSI list may be fragmented if VSI creation/destruction has 7317 * been happening. We can afford to do a quick scan to look 7318 * for any free VSIs in the list. 7319 * 7320 * find next empty vsi slot, looping back around if necessary 7321 */ 7322 i = pf->next_vsi; 7323 while (i < pf->num_alloc_vsi && pf->vsi[i]) 7324 i++; 7325 if (i >= pf->num_alloc_vsi) { 7326 i = 0; 7327 while (i < pf->next_vsi && pf->vsi[i]) 7328 i++; 7329 } 7330 7331 if (i < pf->num_alloc_vsi && !pf->vsi[i]) { 7332 vsi_idx = i; /* Found one! */ 7333 } else { 7334 ret = -ENODEV; 7335 goto unlock_pf; /* out of VSI slots! */ 7336 } 7337 pf->next_vsi = ++i; 7338 7339 vsi = kzalloc(sizeof(*vsi), GFP_KERNEL); 7340 if (!vsi) { 7341 ret = -ENOMEM; 7342 goto unlock_pf; 7343 } 7344 vsi->type = type; 7345 vsi->back = pf; 7346 set_bit(__I40E_DOWN, &vsi->state); 7347 vsi->flags = 0; 7348 vsi->idx = vsi_idx; 7349 vsi->int_rate_limit = 0; 7350 vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ? 7351 pf->rss_table_size : 64; 7352 vsi->netdev_registered = false; 7353 vsi->work_limit = I40E_DEFAULT_IRQ_WORK; 7354 INIT_LIST_HEAD(&vsi->mac_filter_list); 7355 vsi->irqs_ready = false; 7356 7357 ret = i40e_set_num_rings_in_vsi(vsi); 7358 if (ret) 7359 goto err_rings; 7360 7361 ret = i40e_vsi_alloc_arrays(vsi, true); 7362 if (ret) 7363 goto err_rings; 7364 7365 /* Setup default MSIX irq handler for VSI */ 7366 i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings); 7367 7368 /* Initialize VSI lock */ 7369 spin_lock_init(&vsi->mac_filter_list_lock); 7370 pf->vsi[vsi_idx] = vsi; 7371 ret = vsi_idx; 7372 goto unlock_pf; 7373 7374 err_rings: 7375 pf->next_vsi = i - 1; 7376 kfree(vsi); 7377 unlock_pf: 7378 mutex_unlock(&pf->switch_mutex); 7379 return ret; 7380 } 7381 7382 /** 7383 * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI 7384 * @type: VSI pointer 7385 * @free_qvectors: a bool to specify if q_vectors need to be freed. 7386 * 7387 * On error: returns error code (negative) 7388 * On success: returns 0 7389 **/ 7390 static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors) 7391 { 7392 /* free the ring and vector containers */ 7393 if (free_qvectors) { 7394 kfree(vsi->q_vectors); 7395 vsi->q_vectors = NULL; 7396 } 7397 kfree(vsi->tx_rings); 7398 vsi->tx_rings = NULL; 7399 vsi->rx_rings = NULL; 7400 } 7401 7402 /** 7403 * i40e_clear_rss_config_user - clear the user configured RSS hash keys 7404 * and lookup table 7405 * @vsi: Pointer to VSI structure 7406 */ 7407 static void i40e_clear_rss_config_user(struct i40e_vsi *vsi) 7408 { 7409 if (!vsi) 7410 return; 7411 7412 kfree(vsi->rss_hkey_user); 7413 vsi->rss_hkey_user = NULL; 7414 7415 kfree(vsi->rss_lut_user); 7416 vsi->rss_lut_user = NULL; 7417 } 7418 7419 /** 7420 * i40e_vsi_clear - Deallocate the VSI provided 7421 * @vsi: the VSI being un-configured 7422 **/ 7423 static int i40e_vsi_clear(struct i40e_vsi *vsi) 7424 { 7425 struct i40e_pf *pf; 7426 7427 if (!vsi) 7428 return 0; 7429 7430 if (!vsi->back) 7431 goto free_vsi; 7432 pf = vsi->back; 7433 7434 mutex_lock(&pf->switch_mutex); 7435 if (!pf->vsi[vsi->idx]) { 7436 dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](%p,type %d)\n", 7437 vsi->idx, vsi->idx, vsi, vsi->type); 7438 goto unlock_vsi; 7439 } 7440 7441 if (pf->vsi[vsi->idx] != vsi) { 7442 dev_err(&pf->pdev->dev, 7443 "pf->vsi[%d](%p, type %d) != vsi[%d](%p,type %d): no free!\n", 7444 pf->vsi[vsi->idx]->idx, 7445 pf->vsi[vsi->idx], 7446 pf->vsi[vsi->idx]->type, 7447 vsi->idx, vsi, vsi->type); 7448 goto unlock_vsi; 7449 } 7450 7451 /* updates the PF for this cleared vsi */ 7452 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx); 7453 i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx); 7454 7455 i40e_vsi_free_arrays(vsi, true); 7456 i40e_clear_rss_config_user(vsi); 7457 7458 pf->vsi[vsi->idx] = NULL; 7459 if (vsi->idx < pf->next_vsi) 7460 pf->next_vsi = vsi->idx; 7461 7462 unlock_vsi: 7463 mutex_unlock(&pf->switch_mutex); 7464 free_vsi: 7465 kfree(vsi); 7466 7467 return 0; 7468 } 7469 7470 /** 7471 * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI 7472 * @vsi: the VSI being cleaned 7473 **/ 7474 static void i40e_vsi_clear_rings(struct i40e_vsi *vsi) 7475 { 7476 int i; 7477 7478 if (vsi->tx_rings && vsi->tx_rings[0]) { 7479 for (i = 0; i < vsi->alloc_queue_pairs; i++) { 7480 kfree_rcu(vsi->tx_rings[i], rcu); 7481 vsi->tx_rings[i] = NULL; 7482 vsi->rx_rings[i] = NULL; 7483 } 7484 } 7485 } 7486 7487 /** 7488 * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI 7489 * @vsi: the VSI being configured 7490 **/ 7491 static int i40e_alloc_rings(struct i40e_vsi *vsi) 7492 { 7493 struct i40e_ring *tx_ring, *rx_ring; 7494 struct i40e_pf *pf = vsi->back; 7495 int i; 7496 7497 /* Set basic values in the rings to be used later during open() */ 7498 for (i = 0; i < vsi->alloc_queue_pairs; i++) { 7499 /* allocate space for both Tx and Rx in one shot */ 7500 tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL); 7501 if (!tx_ring) 7502 goto err_out; 7503 7504 tx_ring->queue_index = i; 7505 tx_ring->reg_idx = vsi->base_queue + i; 7506 tx_ring->ring_active = false; 7507 tx_ring->vsi = vsi; 7508 tx_ring->netdev = vsi->netdev; 7509 tx_ring->dev = &pf->pdev->dev; 7510 tx_ring->count = vsi->num_desc; 7511 tx_ring->size = 0; 7512 tx_ring->dcb_tc = 0; 7513 if (vsi->back->flags & I40E_FLAG_WB_ON_ITR_CAPABLE) 7514 tx_ring->flags = I40E_TXR_FLAGS_WB_ON_ITR; 7515 tx_ring->tx_itr_setting = pf->tx_itr_default; 7516 vsi->tx_rings[i] = tx_ring; 7517 7518 rx_ring = &tx_ring[1]; 7519 rx_ring->queue_index = i; 7520 rx_ring->reg_idx = vsi->base_queue + i; 7521 rx_ring->ring_active = false; 7522 rx_ring->vsi = vsi; 7523 rx_ring->netdev = vsi->netdev; 7524 rx_ring->dev = &pf->pdev->dev; 7525 rx_ring->count = vsi->num_desc; 7526 rx_ring->size = 0; 7527 rx_ring->dcb_tc = 0; 7528 if (pf->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED) 7529 set_ring_16byte_desc_enabled(rx_ring); 7530 else 7531 clear_ring_16byte_desc_enabled(rx_ring); 7532 rx_ring->rx_itr_setting = pf->rx_itr_default; 7533 vsi->rx_rings[i] = rx_ring; 7534 } 7535 7536 return 0; 7537 7538 err_out: 7539 i40e_vsi_clear_rings(vsi); 7540 return -ENOMEM; 7541 } 7542 7543 /** 7544 * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel 7545 * @pf: board private structure 7546 * @vectors: the number of MSI-X vectors to request 7547 * 7548 * Returns the number of vectors reserved, or error 7549 **/ 7550 static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors) 7551 { 7552 vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries, 7553 I40E_MIN_MSIX, vectors); 7554 if (vectors < 0) { 7555 dev_info(&pf->pdev->dev, 7556 "MSI-X vector reservation failed: %d\n", vectors); 7557 vectors = 0; 7558 } 7559 7560 return vectors; 7561 } 7562 7563 /** 7564 * i40e_init_msix - Setup the MSIX capability 7565 * @pf: board private structure 7566 * 7567 * Work with the OS to set up the MSIX vectors needed. 7568 * 7569 * Returns the number of vectors reserved or negative on failure 7570 **/ 7571 static int i40e_init_msix(struct i40e_pf *pf) 7572 { 7573 struct i40e_hw *hw = &pf->hw; 7574 int vectors_left; 7575 int v_budget, i; 7576 int v_actual; 7577 int iwarp_requested = 0; 7578 7579 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) 7580 return -ENODEV; 7581 7582 /* The number of vectors we'll request will be comprised of: 7583 * - Add 1 for "other" cause for Admin Queue events, etc. 7584 * - The number of LAN queue pairs 7585 * - Queues being used for RSS. 7586 * We don't need as many as max_rss_size vectors. 7587 * use rss_size instead in the calculation since that 7588 * is governed by number of cpus in the system. 7589 * - assumes symmetric Tx/Rx pairing 7590 * - The number of VMDq pairs 7591 * - The CPU count within the NUMA node if iWARP is enabled 7592 #ifdef I40E_FCOE 7593 * - The number of FCOE qps. 7594 #endif 7595 * Once we count this up, try the request. 7596 * 7597 * If we can't get what we want, we'll simplify to nearly nothing 7598 * and try again. If that still fails, we punt. 7599 */ 7600 vectors_left = hw->func_caps.num_msix_vectors; 7601 v_budget = 0; 7602 7603 /* reserve one vector for miscellaneous handler */ 7604 if (vectors_left) { 7605 v_budget++; 7606 vectors_left--; 7607 } 7608 7609 /* reserve vectors for the main PF traffic queues */ 7610 pf->num_lan_msix = min_t(int, num_online_cpus(), vectors_left); 7611 vectors_left -= pf->num_lan_msix; 7612 v_budget += pf->num_lan_msix; 7613 7614 /* reserve one vector for sideband flow director */ 7615 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { 7616 if (vectors_left) { 7617 v_budget++; 7618 vectors_left--; 7619 } else { 7620 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; 7621 } 7622 } 7623 7624 #ifdef I40E_FCOE 7625 /* can we reserve enough for FCoE? */ 7626 if (pf->flags & I40E_FLAG_FCOE_ENABLED) { 7627 if (!vectors_left) 7628 pf->num_fcoe_msix = 0; 7629 else if (vectors_left >= pf->num_fcoe_qps) 7630 pf->num_fcoe_msix = pf->num_fcoe_qps; 7631 else 7632 pf->num_fcoe_msix = 1; 7633 v_budget += pf->num_fcoe_msix; 7634 vectors_left -= pf->num_fcoe_msix; 7635 } 7636 7637 #endif 7638 /* can we reserve enough for iWARP? */ 7639 if (pf->flags & I40E_FLAG_IWARP_ENABLED) { 7640 if (!vectors_left) 7641 pf->num_iwarp_msix = 0; 7642 else if (vectors_left < pf->num_iwarp_msix) 7643 pf->num_iwarp_msix = 1; 7644 v_budget += pf->num_iwarp_msix; 7645 vectors_left -= pf->num_iwarp_msix; 7646 } 7647 7648 /* any vectors left over go for VMDq support */ 7649 if (pf->flags & I40E_FLAG_VMDQ_ENABLED) { 7650 int vmdq_vecs_wanted = pf->num_vmdq_vsis * pf->num_vmdq_qps; 7651 int vmdq_vecs = min_t(int, vectors_left, vmdq_vecs_wanted); 7652 7653 /* if we're short on vectors for what's desired, we limit 7654 * the queues per vmdq. If this is still more than are 7655 * available, the user will need to change the number of 7656 * queues/vectors used by the PF later with the ethtool 7657 * channels command 7658 */ 7659 if (vmdq_vecs < vmdq_vecs_wanted) 7660 pf->num_vmdq_qps = 1; 7661 pf->num_vmdq_msix = pf->num_vmdq_qps; 7662 7663 v_budget += vmdq_vecs; 7664 vectors_left -= vmdq_vecs; 7665 } 7666 7667 pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry), 7668 GFP_KERNEL); 7669 if (!pf->msix_entries) 7670 return -ENOMEM; 7671 7672 for (i = 0; i < v_budget; i++) 7673 pf->msix_entries[i].entry = i; 7674 v_actual = i40e_reserve_msix_vectors(pf, v_budget); 7675 7676 if (v_actual != v_budget) { 7677 /* If we have limited resources, we will start with no vectors 7678 * for the special features and then allocate vectors to some 7679 * of these features based on the policy and at the end disable 7680 * the features that did not get any vectors. 7681 */ 7682 iwarp_requested = pf->num_iwarp_msix; 7683 pf->num_iwarp_msix = 0; 7684 #ifdef I40E_FCOE 7685 pf->num_fcoe_qps = 0; 7686 pf->num_fcoe_msix = 0; 7687 #endif 7688 pf->num_vmdq_msix = 0; 7689 } 7690 7691 if (v_actual < I40E_MIN_MSIX) { 7692 pf->flags &= ~I40E_FLAG_MSIX_ENABLED; 7693 kfree(pf->msix_entries); 7694 pf->msix_entries = NULL; 7695 return -ENODEV; 7696 7697 } else if (v_actual == I40E_MIN_MSIX) { 7698 /* Adjust for minimal MSIX use */ 7699 pf->num_vmdq_vsis = 0; 7700 pf->num_vmdq_qps = 0; 7701 pf->num_lan_qps = 1; 7702 pf->num_lan_msix = 1; 7703 7704 } else if (v_actual != v_budget) { 7705 int vec; 7706 7707 /* reserve the misc vector */ 7708 vec = v_actual - 1; 7709 7710 /* Scale vector usage down */ 7711 pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */ 7712 pf->num_vmdq_vsis = 1; 7713 pf->num_vmdq_qps = 1; 7714 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; 7715 7716 /* partition out the remaining vectors */ 7717 switch (vec) { 7718 case 2: 7719 pf->num_lan_msix = 1; 7720 break; 7721 case 3: 7722 if (pf->flags & I40E_FLAG_IWARP_ENABLED) { 7723 pf->num_lan_msix = 1; 7724 pf->num_iwarp_msix = 1; 7725 } else { 7726 pf->num_lan_msix = 2; 7727 } 7728 #ifdef I40E_FCOE 7729 /* give one vector to FCoE */ 7730 if (pf->flags & I40E_FLAG_FCOE_ENABLED) { 7731 pf->num_lan_msix = 1; 7732 pf->num_fcoe_msix = 1; 7733 } 7734 #endif 7735 break; 7736 default: 7737 if (pf->flags & I40E_FLAG_IWARP_ENABLED) { 7738 pf->num_iwarp_msix = min_t(int, (vec / 3), 7739 iwarp_requested); 7740 pf->num_vmdq_vsis = min_t(int, (vec / 3), 7741 I40E_DEFAULT_NUM_VMDQ_VSI); 7742 } else { 7743 pf->num_vmdq_vsis = min_t(int, (vec / 2), 7744 I40E_DEFAULT_NUM_VMDQ_VSI); 7745 } 7746 pf->num_lan_msix = min_t(int, 7747 (vec - (pf->num_iwarp_msix + pf->num_vmdq_vsis)), 7748 pf->num_lan_msix); 7749 #ifdef I40E_FCOE 7750 /* give one vector to FCoE */ 7751 if (pf->flags & I40E_FLAG_FCOE_ENABLED) { 7752 pf->num_fcoe_msix = 1; 7753 vec--; 7754 } 7755 #endif 7756 break; 7757 } 7758 } 7759 7760 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) && 7761 (pf->num_vmdq_msix == 0)) { 7762 dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n"); 7763 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED; 7764 } 7765 7766 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) && 7767 (pf->num_iwarp_msix == 0)) { 7768 dev_info(&pf->pdev->dev, "IWARP disabled, not enough MSI-X vectors\n"); 7769 pf->flags &= ~I40E_FLAG_IWARP_ENABLED; 7770 } 7771 #ifdef I40E_FCOE 7772 7773 if ((pf->flags & I40E_FLAG_FCOE_ENABLED) && (pf->num_fcoe_msix == 0)) { 7774 dev_info(&pf->pdev->dev, "FCOE disabled, not enough MSI-X vectors\n"); 7775 pf->flags &= ~I40E_FLAG_FCOE_ENABLED; 7776 } 7777 #endif 7778 return v_actual; 7779 } 7780 7781 /** 7782 * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector 7783 * @vsi: the VSI being configured 7784 * @v_idx: index of the vector in the vsi struct 7785 * 7786 * We allocate one q_vector. If allocation fails we return -ENOMEM. 7787 **/ 7788 static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx) 7789 { 7790 struct i40e_q_vector *q_vector; 7791 7792 /* allocate q_vector */ 7793 q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL); 7794 if (!q_vector) 7795 return -ENOMEM; 7796 7797 q_vector->vsi = vsi; 7798 q_vector->v_idx = v_idx; 7799 cpumask_set_cpu(v_idx, &q_vector->affinity_mask); 7800 if (vsi->netdev) 7801 netif_napi_add(vsi->netdev, &q_vector->napi, 7802 i40e_napi_poll, NAPI_POLL_WEIGHT); 7803 7804 q_vector->rx.latency_range = I40E_LOW_LATENCY; 7805 q_vector->tx.latency_range = I40E_LOW_LATENCY; 7806 7807 /* tie q_vector and vsi together */ 7808 vsi->q_vectors[v_idx] = q_vector; 7809 7810 return 0; 7811 } 7812 7813 /** 7814 * i40e_vsi_alloc_q_vectors - Allocate memory for interrupt vectors 7815 * @vsi: the VSI being configured 7816 * 7817 * We allocate one q_vector per queue interrupt. If allocation fails we 7818 * return -ENOMEM. 7819 **/ 7820 static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi) 7821 { 7822 struct i40e_pf *pf = vsi->back; 7823 int v_idx, num_q_vectors; 7824 int err; 7825 7826 /* if not MSIX, give the one vector only to the LAN VSI */ 7827 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 7828 num_q_vectors = vsi->num_q_vectors; 7829 else if (vsi == pf->vsi[pf->lan_vsi]) 7830 num_q_vectors = 1; 7831 else 7832 return -EINVAL; 7833 7834 for (v_idx = 0; v_idx < num_q_vectors; v_idx++) { 7835 err = i40e_vsi_alloc_q_vector(vsi, v_idx); 7836 if (err) 7837 goto err_out; 7838 } 7839 7840 return 0; 7841 7842 err_out: 7843 while (v_idx--) 7844 i40e_free_q_vector(vsi, v_idx); 7845 7846 return err; 7847 } 7848 7849 /** 7850 * i40e_init_interrupt_scheme - Determine proper interrupt scheme 7851 * @pf: board private structure to initialize 7852 **/ 7853 static int i40e_init_interrupt_scheme(struct i40e_pf *pf) 7854 { 7855 int vectors = 0; 7856 ssize_t size; 7857 7858 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 7859 vectors = i40e_init_msix(pf); 7860 if (vectors < 0) { 7861 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | 7862 I40E_FLAG_IWARP_ENABLED | 7863 #ifdef I40E_FCOE 7864 I40E_FLAG_FCOE_ENABLED | 7865 #endif 7866 I40E_FLAG_RSS_ENABLED | 7867 I40E_FLAG_DCB_CAPABLE | 7868 I40E_FLAG_SRIOV_ENABLED | 7869 I40E_FLAG_FD_SB_ENABLED | 7870 I40E_FLAG_FD_ATR_ENABLED | 7871 I40E_FLAG_VMDQ_ENABLED); 7872 7873 /* rework the queue expectations without MSIX */ 7874 i40e_determine_queue_usage(pf); 7875 } 7876 } 7877 7878 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) && 7879 (pf->flags & I40E_FLAG_MSI_ENABLED)) { 7880 dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n"); 7881 vectors = pci_enable_msi(pf->pdev); 7882 if (vectors < 0) { 7883 dev_info(&pf->pdev->dev, "MSI init failed - %d\n", 7884 vectors); 7885 pf->flags &= ~I40E_FLAG_MSI_ENABLED; 7886 } 7887 vectors = 1; /* one MSI or Legacy vector */ 7888 } 7889 7890 if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED))) 7891 dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n"); 7892 7893 /* set up vector assignment tracking */ 7894 size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors); 7895 pf->irq_pile = kzalloc(size, GFP_KERNEL); 7896 if (!pf->irq_pile) { 7897 dev_err(&pf->pdev->dev, "error allocating irq_pile memory\n"); 7898 return -ENOMEM; 7899 } 7900 pf->irq_pile->num_entries = vectors; 7901 pf->irq_pile->search_hint = 0; 7902 7903 /* track first vector for misc interrupts, ignore return */ 7904 (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1); 7905 7906 return 0; 7907 } 7908 7909 /** 7910 * i40e_setup_misc_vector - Setup the misc vector to handle non queue events 7911 * @pf: board private structure 7912 * 7913 * This sets up the handler for MSIX 0, which is used to manage the 7914 * non-queue interrupts, e.g. AdminQ and errors. This is not used 7915 * when in MSI or Legacy interrupt mode. 7916 **/ 7917 static int i40e_setup_misc_vector(struct i40e_pf *pf) 7918 { 7919 struct i40e_hw *hw = &pf->hw; 7920 int err = 0; 7921 7922 /* Only request the irq if this is the first time through, and 7923 * not when we're rebuilding after a Reset 7924 */ 7925 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) { 7926 err = request_irq(pf->msix_entries[0].vector, 7927 i40e_intr, 0, pf->int_name, pf); 7928 if (err) { 7929 dev_info(&pf->pdev->dev, 7930 "request_irq for %s failed: %d\n", 7931 pf->int_name, err); 7932 return -EFAULT; 7933 } 7934 } 7935 7936 i40e_enable_misc_int_causes(pf); 7937 7938 /* associate no queues to the misc vector */ 7939 wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST); 7940 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K); 7941 7942 i40e_flush(hw); 7943 7944 i40e_irq_dynamic_enable_icr0(pf, true); 7945 7946 return err; 7947 } 7948 7949 /** 7950 * i40e_config_rss_aq - Prepare for RSS using AQ commands 7951 * @vsi: vsi structure 7952 * @seed: RSS hash seed 7953 **/ 7954 static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed, 7955 u8 *lut, u16 lut_size) 7956 { 7957 struct i40e_aqc_get_set_rss_key_data rss_key; 7958 struct i40e_pf *pf = vsi->back; 7959 struct i40e_hw *hw = &pf->hw; 7960 bool pf_lut = false; 7961 u8 *rss_lut; 7962 int ret, i; 7963 7964 memset(&rss_key, 0, sizeof(rss_key)); 7965 memcpy(&rss_key, seed, sizeof(rss_key)); 7966 7967 rss_lut = kzalloc(pf->rss_table_size, GFP_KERNEL); 7968 if (!rss_lut) 7969 return -ENOMEM; 7970 7971 /* Populate the LUT with max no. of queues in round robin fashion */ 7972 for (i = 0; i < vsi->rss_table_size; i++) 7973 rss_lut[i] = i % vsi->rss_size; 7974 7975 ret = i40e_aq_set_rss_key(hw, vsi->id, &rss_key); 7976 if (ret) { 7977 dev_info(&pf->pdev->dev, 7978 "Cannot set RSS key, err %s aq_err %s\n", 7979 i40e_stat_str(&pf->hw, ret), 7980 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 7981 goto config_rss_aq_out; 7982 } 7983 7984 if (vsi->type == I40E_VSI_MAIN) 7985 pf_lut = true; 7986 7987 ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, rss_lut, 7988 vsi->rss_table_size); 7989 if (ret) 7990 dev_info(&pf->pdev->dev, 7991 "Cannot set RSS lut, err %s aq_err %s\n", 7992 i40e_stat_str(&pf->hw, ret), 7993 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 7994 7995 config_rss_aq_out: 7996 kfree(rss_lut); 7997 return ret; 7998 } 7999 8000 /** 8001 * i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used 8002 * @vsi: VSI structure 8003 **/ 8004 static int i40e_vsi_config_rss(struct i40e_vsi *vsi) 8005 { 8006 u8 seed[I40E_HKEY_ARRAY_SIZE]; 8007 struct i40e_pf *pf = vsi->back; 8008 u8 *lut; 8009 int ret; 8010 8011 if (!(pf->flags & I40E_FLAG_RSS_AQ_CAPABLE)) 8012 return 0; 8013 8014 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); 8015 if (!lut) 8016 return -ENOMEM; 8017 8018 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size); 8019 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE); 8020 vsi->rss_size = min_t(int, pf->alloc_rss_size, vsi->num_queue_pairs); 8021 ret = i40e_config_rss_aq(vsi, seed, lut, vsi->rss_table_size); 8022 kfree(lut); 8023 8024 return ret; 8025 } 8026 8027 /** 8028 * i40e_get_rss_aq - Get RSS keys and lut by using AQ commands 8029 * @vsi: Pointer to vsi structure 8030 * @seed: Buffter to store the hash keys 8031 * @lut: Buffer to store the lookup table entries 8032 * @lut_size: Size of buffer to store the lookup table entries 8033 * 8034 * Return 0 on success, negative on failure 8035 */ 8036 static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed, 8037 u8 *lut, u16 lut_size) 8038 { 8039 struct i40e_pf *pf = vsi->back; 8040 struct i40e_hw *hw = &pf->hw; 8041 int ret = 0; 8042 8043 if (seed) { 8044 ret = i40e_aq_get_rss_key(hw, vsi->id, 8045 (struct i40e_aqc_get_set_rss_key_data *)seed); 8046 if (ret) { 8047 dev_info(&pf->pdev->dev, 8048 "Cannot get RSS key, err %s aq_err %s\n", 8049 i40e_stat_str(&pf->hw, ret), 8050 i40e_aq_str(&pf->hw, 8051 pf->hw.aq.asq_last_status)); 8052 return ret; 8053 } 8054 } 8055 8056 if (lut) { 8057 bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false; 8058 8059 ret = i40e_aq_get_rss_lut(hw, vsi->id, pf_lut, lut, lut_size); 8060 if (ret) { 8061 dev_info(&pf->pdev->dev, 8062 "Cannot get RSS lut, err %s aq_err %s\n", 8063 i40e_stat_str(&pf->hw, ret), 8064 i40e_aq_str(&pf->hw, 8065 pf->hw.aq.asq_last_status)); 8066 return ret; 8067 } 8068 } 8069 8070 return ret; 8071 } 8072 8073 /** 8074 * i40e_config_rss_reg - Configure RSS keys and lut by writing registers 8075 * @vsi: Pointer to vsi structure 8076 * @seed: RSS hash seed 8077 * @lut: Lookup table 8078 * @lut_size: Lookup table size 8079 * 8080 * Returns 0 on success, negative on failure 8081 **/ 8082 static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed, 8083 const u8 *lut, u16 lut_size) 8084 { 8085 struct i40e_pf *pf = vsi->back; 8086 struct i40e_hw *hw = &pf->hw; 8087 u8 i; 8088 8089 /* Fill out hash function seed */ 8090 if (seed) { 8091 u32 *seed_dw = (u32 *)seed; 8092 8093 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) 8094 i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), seed_dw[i]); 8095 } 8096 8097 if (lut) { 8098 u32 *lut_dw = (u32 *)lut; 8099 8100 if (lut_size != I40E_HLUT_ARRAY_SIZE) 8101 return -EINVAL; 8102 8103 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) 8104 wr32(hw, I40E_PFQF_HLUT(i), lut_dw[i]); 8105 } 8106 i40e_flush(hw); 8107 8108 return 0; 8109 } 8110 8111 /** 8112 * i40e_get_rss_reg - Get the RSS keys and lut by reading registers 8113 * @vsi: Pointer to VSI structure 8114 * @seed: Buffer to store the keys 8115 * @lut: Buffer to store the lookup table entries 8116 * @lut_size: Size of buffer to store the lookup table entries 8117 * 8118 * Returns 0 on success, negative on failure 8119 */ 8120 static int i40e_get_rss_reg(struct i40e_vsi *vsi, u8 *seed, 8121 u8 *lut, u16 lut_size) 8122 { 8123 struct i40e_pf *pf = vsi->back; 8124 struct i40e_hw *hw = &pf->hw; 8125 u16 i; 8126 8127 if (seed) { 8128 u32 *seed_dw = (u32 *)seed; 8129 8130 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) 8131 seed_dw[i] = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i)); 8132 } 8133 if (lut) { 8134 u32 *lut_dw = (u32 *)lut; 8135 8136 if (lut_size != I40E_HLUT_ARRAY_SIZE) 8137 return -EINVAL; 8138 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) 8139 lut_dw[i] = rd32(hw, I40E_PFQF_HLUT(i)); 8140 } 8141 8142 return 0; 8143 } 8144 8145 /** 8146 * i40e_config_rss - Configure RSS keys and lut 8147 * @vsi: Pointer to VSI structure 8148 * @seed: RSS hash seed 8149 * @lut: Lookup table 8150 * @lut_size: Lookup table size 8151 * 8152 * Returns 0 on success, negative on failure 8153 */ 8154 int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) 8155 { 8156 struct i40e_pf *pf = vsi->back; 8157 8158 if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) 8159 return i40e_config_rss_aq(vsi, seed, lut, lut_size); 8160 else 8161 return i40e_config_rss_reg(vsi, seed, lut, lut_size); 8162 } 8163 8164 /** 8165 * i40e_get_rss - Get RSS keys and lut 8166 * @vsi: Pointer to VSI structure 8167 * @seed: Buffer to store the keys 8168 * @lut: Buffer to store the lookup table entries 8169 * lut_size: Size of buffer to store the lookup table entries 8170 * 8171 * Returns 0 on success, negative on failure 8172 */ 8173 int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) 8174 { 8175 struct i40e_pf *pf = vsi->back; 8176 8177 if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) 8178 return i40e_get_rss_aq(vsi, seed, lut, lut_size); 8179 else 8180 return i40e_get_rss_reg(vsi, seed, lut, lut_size); 8181 } 8182 8183 /** 8184 * i40e_fill_rss_lut - Fill the RSS lookup table with default values 8185 * @pf: Pointer to board private structure 8186 * @lut: Lookup table 8187 * @rss_table_size: Lookup table size 8188 * @rss_size: Range of queue number for hashing 8189 */ 8190 static void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut, 8191 u16 rss_table_size, u16 rss_size) 8192 { 8193 u16 i; 8194 8195 for (i = 0; i < rss_table_size; i++) 8196 lut[i] = i % rss_size; 8197 } 8198 8199 /** 8200 * i40e_pf_config_rss - Prepare for RSS if used 8201 * @pf: board private structure 8202 **/ 8203 static int i40e_pf_config_rss(struct i40e_pf *pf) 8204 { 8205 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; 8206 u8 seed[I40E_HKEY_ARRAY_SIZE]; 8207 u8 *lut; 8208 struct i40e_hw *hw = &pf->hw; 8209 u32 reg_val; 8210 u64 hena; 8211 int ret; 8212 8213 /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */ 8214 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) | 8215 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32); 8216 hena |= i40e_pf_get_default_rss_hena(pf); 8217 8218 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena); 8219 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32)); 8220 8221 /* Determine the RSS table size based on the hardware capabilities */ 8222 reg_val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0); 8223 reg_val = (pf->rss_table_size == 512) ? 8224 (reg_val | I40E_PFQF_CTL_0_HASHLUTSIZE_512) : 8225 (reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512); 8226 i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, reg_val); 8227 8228 /* Determine the RSS size of the VSI */ 8229 if (!vsi->rss_size) 8230 vsi->rss_size = min_t(int, pf->alloc_rss_size, 8231 vsi->num_queue_pairs); 8232 8233 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); 8234 if (!lut) 8235 return -ENOMEM; 8236 8237 /* Use user configured lut if there is one, otherwise use default */ 8238 if (vsi->rss_lut_user) 8239 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size); 8240 else 8241 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size); 8242 8243 /* Use user configured hash key if there is one, otherwise 8244 * use default. 8245 */ 8246 if (vsi->rss_hkey_user) 8247 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE); 8248 else 8249 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE); 8250 ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size); 8251 kfree(lut); 8252 8253 return ret; 8254 } 8255 8256 /** 8257 * i40e_reconfig_rss_queues - change number of queues for rss and rebuild 8258 * @pf: board private structure 8259 * @queue_count: the requested queue count for rss. 8260 * 8261 * returns 0 if rss is not enabled, if enabled returns the final rss queue 8262 * count which may be different from the requested queue count. 8263 **/ 8264 int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count) 8265 { 8266 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; 8267 int new_rss_size; 8268 8269 if (!(pf->flags & I40E_FLAG_RSS_ENABLED)) 8270 return 0; 8271 8272 new_rss_size = min_t(int, queue_count, pf->rss_size_max); 8273 8274 if (queue_count != vsi->num_queue_pairs) { 8275 vsi->req_queue_pairs = queue_count; 8276 i40e_prep_for_reset(pf); 8277 8278 pf->alloc_rss_size = new_rss_size; 8279 8280 i40e_reset_and_rebuild(pf, true); 8281 8282 /* Discard the user configured hash keys and lut, if less 8283 * queues are enabled. 8284 */ 8285 if (queue_count < vsi->rss_size) { 8286 i40e_clear_rss_config_user(vsi); 8287 dev_dbg(&pf->pdev->dev, 8288 "discard user configured hash keys and lut\n"); 8289 } 8290 8291 /* Reset vsi->rss_size, as number of enabled queues changed */ 8292 vsi->rss_size = min_t(int, pf->alloc_rss_size, 8293 vsi->num_queue_pairs); 8294 8295 i40e_pf_config_rss(pf); 8296 } 8297 dev_info(&pf->pdev->dev, "RSS count/HW max RSS count: %d/%d\n", 8298 pf->alloc_rss_size, pf->rss_size_max); 8299 return pf->alloc_rss_size; 8300 } 8301 8302 /** 8303 * i40e_get_npar_bw_setting - Retrieve BW settings for this PF partition 8304 * @pf: board private structure 8305 **/ 8306 i40e_status i40e_get_npar_bw_setting(struct i40e_pf *pf) 8307 { 8308 i40e_status status; 8309 bool min_valid, max_valid; 8310 u32 max_bw, min_bw; 8311 8312 status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw, 8313 &min_valid, &max_valid); 8314 8315 if (!status) { 8316 if (min_valid) 8317 pf->npar_min_bw = min_bw; 8318 if (max_valid) 8319 pf->npar_max_bw = max_bw; 8320 } 8321 8322 return status; 8323 } 8324 8325 /** 8326 * i40e_set_npar_bw_setting - Set BW settings for this PF partition 8327 * @pf: board private structure 8328 **/ 8329 i40e_status i40e_set_npar_bw_setting(struct i40e_pf *pf) 8330 { 8331 struct i40e_aqc_configure_partition_bw_data bw_data; 8332 i40e_status status; 8333 8334 /* Set the valid bit for this PF */ 8335 bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id)); 8336 bw_data.max_bw[pf->hw.pf_id] = pf->npar_max_bw & I40E_ALT_BW_VALUE_MASK; 8337 bw_data.min_bw[pf->hw.pf_id] = pf->npar_min_bw & I40E_ALT_BW_VALUE_MASK; 8338 8339 /* Set the new bandwidths */ 8340 status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL); 8341 8342 return status; 8343 } 8344 8345 /** 8346 * i40e_commit_npar_bw_setting - Commit BW settings for this PF partition 8347 * @pf: board private structure 8348 **/ 8349 i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf) 8350 { 8351 /* Commit temporary BW setting to permanent NVM image */ 8352 enum i40e_admin_queue_err last_aq_status; 8353 i40e_status ret; 8354 u16 nvm_word; 8355 8356 if (pf->hw.partition_id != 1) { 8357 dev_info(&pf->pdev->dev, 8358 "Commit BW only works on partition 1! This is partition %d", 8359 pf->hw.partition_id); 8360 ret = I40E_NOT_SUPPORTED; 8361 goto bw_commit_out; 8362 } 8363 8364 /* Acquire NVM for read access */ 8365 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ); 8366 last_aq_status = pf->hw.aq.asq_last_status; 8367 if (ret) { 8368 dev_info(&pf->pdev->dev, 8369 "Cannot acquire NVM for read access, err %s aq_err %s\n", 8370 i40e_stat_str(&pf->hw, ret), 8371 i40e_aq_str(&pf->hw, last_aq_status)); 8372 goto bw_commit_out; 8373 } 8374 8375 /* Read word 0x10 of NVM - SW compatibility word 1 */ 8376 ret = i40e_aq_read_nvm(&pf->hw, 8377 I40E_SR_NVM_CONTROL_WORD, 8378 0x10, sizeof(nvm_word), &nvm_word, 8379 false, NULL); 8380 /* Save off last admin queue command status before releasing 8381 * the NVM 8382 */ 8383 last_aq_status = pf->hw.aq.asq_last_status; 8384 i40e_release_nvm(&pf->hw); 8385 if (ret) { 8386 dev_info(&pf->pdev->dev, "NVM read error, err %s aq_err %s\n", 8387 i40e_stat_str(&pf->hw, ret), 8388 i40e_aq_str(&pf->hw, last_aq_status)); 8389 goto bw_commit_out; 8390 } 8391 8392 /* Wait a bit for NVM release to complete */ 8393 msleep(50); 8394 8395 /* Acquire NVM for write access */ 8396 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE); 8397 last_aq_status = pf->hw.aq.asq_last_status; 8398 if (ret) { 8399 dev_info(&pf->pdev->dev, 8400 "Cannot acquire NVM for write access, err %s aq_err %s\n", 8401 i40e_stat_str(&pf->hw, ret), 8402 i40e_aq_str(&pf->hw, last_aq_status)); 8403 goto bw_commit_out; 8404 } 8405 /* Write it back out unchanged to initiate update NVM, 8406 * which will force a write of the shadow (alt) RAM to 8407 * the NVM - thus storing the bandwidth values permanently. 8408 */ 8409 ret = i40e_aq_update_nvm(&pf->hw, 8410 I40E_SR_NVM_CONTROL_WORD, 8411 0x10, sizeof(nvm_word), 8412 &nvm_word, true, NULL); 8413 /* Save off last admin queue command status before releasing 8414 * the NVM 8415 */ 8416 last_aq_status = pf->hw.aq.asq_last_status; 8417 i40e_release_nvm(&pf->hw); 8418 if (ret) 8419 dev_info(&pf->pdev->dev, 8420 "BW settings NOT SAVED, err %s aq_err %s\n", 8421 i40e_stat_str(&pf->hw, ret), 8422 i40e_aq_str(&pf->hw, last_aq_status)); 8423 bw_commit_out: 8424 8425 return ret; 8426 } 8427 8428 /** 8429 * i40e_sw_init - Initialize general software structures (struct i40e_pf) 8430 * @pf: board private structure to initialize 8431 * 8432 * i40e_sw_init initializes the Adapter private data structure. 8433 * Fields are initialized based on PCI device information and 8434 * OS network device settings (MTU size). 8435 **/ 8436 static int i40e_sw_init(struct i40e_pf *pf) 8437 { 8438 int err = 0; 8439 int size; 8440 8441 pf->msg_enable = netif_msg_init(I40E_DEFAULT_MSG_ENABLE, 8442 (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)); 8443 pf->hw.debug_mask = pf->msg_enable | I40E_DEBUG_DIAG; 8444 if (debug != -1 && debug != I40E_DEFAULT_MSG_ENABLE) { 8445 if (I40E_DEBUG_USER & debug) 8446 pf->hw.debug_mask = debug; 8447 pf->msg_enable = netif_msg_init((debug & ~I40E_DEBUG_USER), 8448 I40E_DEFAULT_MSG_ENABLE); 8449 } 8450 8451 /* Set default capability flags */ 8452 pf->flags = I40E_FLAG_RX_CSUM_ENABLED | 8453 I40E_FLAG_MSI_ENABLED | 8454 I40E_FLAG_LINK_POLLING_ENABLED | 8455 I40E_FLAG_MSIX_ENABLED; 8456 8457 if (iommu_present(&pci_bus_type)) 8458 pf->flags |= I40E_FLAG_RX_PS_ENABLED; 8459 else 8460 pf->flags |= I40E_FLAG_RX_1BUF_ENABLED; 8461 8462 /* Set default ITR */ 8463 pf->rx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF; 8464 pf->tx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF; 8465 8466 /* Depending on PF configurations, it is possible that the RSS 8467 * maximum might end up larger than the available queues 8468 */ 8469 pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width); 8470 pf->alloc_rss_size = 1; 8471 pf->rss_table_size = pf->hw.func_caps.rss_table_size; 8472 pf->rss_size_max = min_t(int, pf->rss_size_max, 8473 pf->hw.func_caps.num_tx_qp); 8474 if (pf->hw.func_caps.rss) { 8475 pf->flags |= I40E_FLAG_RSS_ENABLED; 8476 pf->alloc_rss_size = min_t(int, pf->rss_size_max, 8477 num_online_cpus()); 8478 } 8479 8480 /* MFP mode enabled */ 8481 if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) { 8482 pf->flags |= I40E_FLAG_MFP_ENABLED; 8483 dev_info(&pf->pdev->dev, "MFP mode Enabled\n"); 8484 if (i40e_get_npar_bw_setting(pf)) 8485 dev_warn(&pf->pdev->dev, 8486 "Could not get NPAR bw settings\n"); 8487 else 8488 dev_info(&pf->pdev->dev, 8489 "Min BW = %8.8x, Max BW = %8.8x\n", 8490 pf->npar_min_bw, pf->npar_max_bw); 8491 } 8492 8493 /* FW/NVM is not yet fixed in this regard */ 8494 if ((pf->hw.func_caps.fd_filters_guaranteed > 0) || 8495 (pf->hw.func_caps.fd_filters_best_effort > 0)) { 8496 pf->flags |= I40E_FLAG_FD_ATR_ENABLED; 8497 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE; 8498 if (pf->flags & I40E_FLAG_MFP_ENABLED && 8499 pf->hw.num_partitions > 1) 8500 dev_info(&pf->pdev->dev, 8501 "Flow Director Sideband mode Disabled in MFP mode\n"); 8502 else 8503 pf->flags |= I40E_FLAG_FD_SB_ENABLED; 8504 pf->fdir_pf_filter_count = 8505 pf->hw.func_caps.fd_filters_guaranteed; 8506 pf->hw.fdir_shared_filter_count = 8507 pf->hw.func_caps.fd_filters_best_effort; 8508 } 8509 8510 if (i40e_is_mac_710(&pf->hw) && 8511 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) || 8512 (pf->hw.aq.fw_maj_ver < 4))) { 8513 pf->flags |= I40E_FLAG_RESTART_AUTONEG; 8514 /* No DCB support for FW < v4.33 */ 8515 pf->flags |= I40E_FLAG_NO_DCB_SUPPORT; 8516 } 8517 8518 /* Disable FW LLDP if FW < v4.3 */ 8519 if (i40e_is_mac_710(&pf->hw) && 8520 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) || 8521 (pf->hw.aq.fw_maj_ver < 4))) 8522 pf->flags |= I40E_FLAG_STOP_FW_LLDP; 8523 8524 /* Use the FW Set LLDP MIB API if FW > v4.40 */ 8525 if (i40e_is_mac_710(&pf->hw) && 8526 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver >= 40)) || 8527 (pf->hw.aq.fw_maj_ver >= 5))) 8528 pf->flags |= I40E_FLAG_USE_SET_LLDP_MIB; 8529 8530 if (pf->hw.func_caps.vmdq) { 8531 pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI; 8532 pf->flags |= I40E_FLAG_VMDQ_ENABLED; 8533 pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf); 8534 } 8535 8536 if (pf->hw.func_caps.iwarp) { 8537 pf->flags |= I40E_FLAG_IWARP_ENABLED; 8538 /* IWARP needs one extra vector for CQP just like MISC.*/ 8539 pf->num_iwarp_msix = (int)num_online_cpus() + 1; 8540 } 8541 8542 #ifdef I40E_FCOE 8543 i40e_init_pf_fcoe(pf); 8544 8545 #endif /* I40E_FCOE */ 8546 #ifdef CONFIG_PCI_IOV 8547 if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) { 8548 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF; 8549 pf->flags |= I40E_FLAG_SRIOV_ENABLED; 8550 pf->num_req_vfs = min_t(int, 8551 pf->hw.func_caps.num_vfs, 8552 I40E_MAX_VF_COUNT); 8553 } 8554 #endif /* CONFIG_PCI_IOV */ 8555 if (pf->hw.mac.type == I40E_MAC_X722) { 8556 pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE | 8557 I40E_FLAG_128_QP_RSS_CAPABLE | 8558 I40E_FLAG_HW_ATR_EVICT_CAPABLE | 8559 I40E_FLAG_OUTER_UDP_CSUM_CAPABLE | 8560 I40E_FLAG_WB_ON_ITR_CAPABLE | 8561 I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE | 8562 I40E_FLAG_NO_PCI_LINK_CHECK | 8563 I40E_FLAG_100M_SGMII_CAPABLE | 8564 I40E_FLAG_USE_SET_LLDP_MIB | 8565 I40E_FLAG_GENEVE_OFFLOAD_CAPABLE; 8566 } else if ((pf->hw.aq.api_maj_ver > 1) || 8567 ((pf->hw.aq.api_maj_ver == 1) && 8568 (pf->hw.aq.api_min_ver > 4))) { 8569 /* Supported in FW API version higher than 1.4 */ 8570 pf->flags |= I40E_FLAG_GENEVE_OFFLOAD_CAPABLE; 8571 pf->auto_disable_flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE; 8572 } else { 8573 pf->auto_disable_flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE; 8574 } 8575 8576 pf->eeprom_version = 0xDEAD; 8577 pf->lan_veb = I40E_NO_VEB; 8578 pf->lan_vsi = I40E_NO_VSI; 8579 8580 /* By default FW has this off for performance reasons */ 8581 pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED; 8582 8583 /* set up queue assignment tracking */ 8584 size = sizeof(struct i40e_lump_tracking) 8585 + (sizeof(u16) * pf->hw.func_caps.num_tx_qp); 8586 pf->qp_pile = kzalloc(size, GFP_KERNEL); 8587 if (!pf->qp_pile) { 8588 err = -ENOMEM; 8589 goto sw_init_done; 8590 } 8591 pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp; 8592 pf->qp_pile->search_hint = 0; 8593 8594 pf->tx_timeout_recovery_level = 1; 8595 8596 mutex_init(&pf->switch_mutex); 8597 8598 /* If NPAR is enabled nudge the Tx scheduler */ 8599 if (pf->hw.func_caps.npar_enable && (!i40e_get_npar_bw_setting(pf))) 8600 i40e_set_npar_bw_setting(pf); 8601 8602 sw_init_done: 8603 return err; 8604 } 8605 8606 /** 8607 * i40e_set_ntuple - set the ntuple feature flag and take action 8608 * @pf: board private structure to initialize 8609 * @features: the feature set that the stack is suggesting 8610 * 8611 * returns a bool to indicate if reset needs to happen 8612 **/ 8613 bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features) 8614 { 8615 bool need_reset = false; 8616 8617 /* Check if Flow Director n-tuple support was enabled or disabled. If 8618 * the state changed, we need to reset. 8619 */ 8620 if (features & NETIF_F_NTUPLE) { 8621 /* Enable filters and mark for reset */ 8622 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) 8623 need_reset = true; 8624 pf->flags |= I40E_FLAG_FD_SB_ENABLED; 8625 } else { 8626 /* turn off filters, mark for reset and clear SW filter list */ 8627 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { 8628 need_reset = true; 8629 i40e_fdir_filter_exit(pf); 8630 } 8631 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; 8632 pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED; 8633 /* reset fd counters */ 8634 pf->fd_add_err = pf->fd_atr_cnt = pf->fd_tcp_rule = 0; 8635 pf->fdir_pf_active_filters = 0; 8636 pf->flags |= I40E_FLAG_FD_ATR_ENABLED; 8637 if (I40E_DEBUG_FD & pf->hw.debug_mask) 8638 dev_info(&pf->pdev->dev, "ATR re-enabled.\n"); 8639 /* if ATR was auto disabled it can be re-enabled. */ 8640 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && 8641 (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) 8642 pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED; 8643 } 8644 return need_reset; 8645 } 8646 8647 /** 8648 * i40e_set_features - set the netdev feature flags 8649 * @netdev: ptr to the netdev being adjusted 8650 * @features: the feature set that the stack is suggesting 8651 **/ 8652 static int i40e_set_features(struct net_device *netdev, 8653 netdev_features_t features) 8654 { 8655 struct i40e_netdev_priv *np = netdev_priv(netdev); 8656 struct i40e_vsi *vsi = np->vsi; 8657 struct i40e_pf *pf = vsi->back; 8658 bool need_reset; 8659 8660 if (features & NETIF_F_HW_VLAN_CTAG_RX) 8661 i40e_vlan_stripping_enable(vsi); 8662 else 8663 i40e_vlan_stripping_disable(vsi); 8664 8665 need_reset = i40e_set_ntuple(pf, features); 8666 8667 if (need_reset) 8668 i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED)); 8669 8670 return 0; 8671 } 8672 8673 #if IS_ENABLED(CONFIG_VXLAN) || IS_ENABLED(CONFIG_GENEVE) 8674 /** 8675 * i40e_get_udp_port_idx - Lookup a possibly offloaded for Rx UDP port 8676 * @pf: board private structure 8677 * @port: The UDP port to look up 8678 * 8679 * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found 8680 **/ 8681 static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, __be16 port) 8682 { 8683 u8 i; 8684 8685 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) { 8686 if (pf->udp_ports[i].index == port) 8687 return i; 8688 } 8689 8690 return i; 8691 } 8692 8693 #endif 8694 8695 #if IS_ENABLED(CONFIG_VXLAN) 8696 /** 8697 * i40e_add_vxlan_port - Get notifications about VXLAN ports that come up 8698 * @netdev: This physical port's netdev 8699 * @sa_family: Socket Family that VXLAN is notifying us about 8700 * @port: New UDP port number that VXLAN started listening to 8701 **/ 8702 static void i40e_add_vxlan_port(struct net_device *netdev, 8703 sa_family_t sa_family, __be16 port) 8704 { 8705 struct i40e_netdev_priv *np = netdev_priv(netdev); 8706 struct i40e_vsi *vsi = np->vsi; 8707 struct i40e_pf *pf = vsi->back; 8708 u8 next_idx; 8709 u8 idx; 8710 8711 idx = i40e_get_udp_port_idx(pf, port); 8712 8713 /* Check if port already exists */ 8714 if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { 8715 netdev_info(netdev, "vxlan port %d already offloaded\n", 8716 ntohs(port)); 8717 return; 8718 } 8719 8720 /* Now check if there is space to add the new port */ 8721 next_idx = i40e_get_udp_port_idx(pf, 0); 8722 8723 if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) { 8724 netdev_info(netdev, "maximum number of vxlan UDP ports reached, not adding port %d\n", 8725 ntohs(port)); 8726 return; 8727 } 8728 8729 /* New port: add it and mark its index in the bitmap */ 8730 pf->udp_ports[next_idx].index = port; 8731 pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_VXLAN; 8732 pf->pending_udp_bitmap |= BIT_ULL(next_idx); 8733 pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; 8734 } 8735 8736 /** 8737 * i40e_del_vxlan_port - Get notifications about VXLAN ports that go away 8738 * @netdev: This physical port's netdev 8739 * @sa_family: Socket Family that VXLAN is notifying us about 8740 * @port: UDP port number that VXLAN stopped listening to 8741 **/ 8742 static void i40e_del_vxlan_port(struct net_device *netdev, 8743 sa_family_t sa_family, __be16 port) 8744 { 8745 struct i40e_netdev_priv *np = netdev_priv(netdev); 8746 struct i40e_vsi *vsi = np->vsi; 8747 struct i40e_pf *pf = vsi->back; 8748 u8 idx; 8749 8750 idx = i40e_get_udp_port_idx(pf, port); 8751 8752 /* Check if port already exists */ 8753 if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { 8754 /* if port exists, set it to 0 (mark for deletion) 8755 * and make it pending 8756 */ 8757 pf->udp_ports[idx].index = 0; 8758 pf->pending_udp_bitmap |= BIT_ULL(idx); 8759 pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; 8760 } else { 8761 netdev_warn(netdev, "vxlan port %d was not found, not deleting\n", 8762 ntohs(port)); 8763 } 8764 } 8765 #endif 8766 8767 #if IS_ENABLED(CONFIG_GENEVE) 8768 /** 8769 * i40e_add_geneve_port - Get notifications about GENEVE ports that come up 8770 * @netdev: This physical port's netdev 8771 * @sa_family: Socket Family that GENEVE is notifying us about 8772 * @port: New UDP port number that GENEVE started listening to 8773 **/ 8774 static void i40e_add_geneve_port(struct net_device *netdev, 8775 sa_family_t sa_family, __be16 port) 8776 { 8777 struct i40e_netdev_priv *np = netdev_priv(netdev); 8778 struct i40e_vsi *vsi = np->vsi; 8779 struct i40e_pf *pf = vsi->back; 8780 u8 next_idx; 8781 u8 idx; 8782 8783 if (!(pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE)) 8784 return; 8785 8786 idx = i40e_get_udp_port_idx(pf, port); 8787 8788 /* Check if port already exists */ 8789 if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { 8790 netdev_info(netdev, "udp port %d already offloaded\n", 8791 ntohs(port)); 8792 return; 8793 } 8794 8795 /* Now check if there is space to add the new port */ 8796 next_idx = i40e_get_udp_port_idx(pf, 0); 8797 8798 if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) { 8799 netdev_info(netdev, "maximum number of UDP ports reached, not adding port %d\n", 8800 ntohs(port)); 8801 return; 8802 } 8803 8804 /* New port: add it and mark its index in the bitmap */ 8805 pf->udp_ports[next_idx].index = port; 8806 pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_NGE; 8807 pf->pending_udp_bitmap |= BIT_ULL(next_idx); 8808 pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; 8809 8810 dev_info(&pf->pdev->dev, "adding geneve port %d\n", ntohs(port)); 8811 } 8812 8813 /** 8814 * i40e_del_geneve_port - Get notifications about GENEVE ports that go away 8815 * @netdev: This physical port's netdev 8816 * @sa_family: Socket Family that GENEVE is notifying us about 8817 * @port: UDP port number that GENEVE stopped listening to 8818 **/ 8819 static void i40e_del_geneve_port(struct net_device *netdev, 8820 sa_family_t sa_family, __be16 port) 8821 { 8822 struct i40e_netdev_priv *np = netdev_priv(netdev); 8823 struct i40e_vsi *vsi = np->vsi; 8824 struct i40e_pf *pf = vsi->back; 8825 u8 idx; 8826 8827 if (!(pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE)) 8828 return; 8829 8830 idx = i40e_get_udp_port_idx(pf, port); 8831 8832 /* Check if port already exists */ 8833 if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { 8834 /* if port exists, set it to 0 (mark for deletion) 8835 * and make it pending 8836 */ 8837 pf->udp_ports[idx].index = 0; 8838 pf->pending_udp_bitmap |= BIT_ULL(idx); 8839 pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; 8840 8841 dev_info(&pf->pdev->dev, "deleting geneve port %d\n", 8842 ntohs(port)); 8843 } else { 8844 netdev_warn(netdev, "geneve port %d was not found, not deleting\n", 8845 ntohs(port)); 8846 } 8847 } 8848 #endif 8849 8850 static int i40e_get_phys_port_id(struct net_device *netdev, 8851 struct netdev_phys_item_id *ppid) 8852 { 8853 struct i40e_netdev_priv *np = netdev_priv(netdev); 8854 struct i40e_pf *pf = np->vsi->back; 8855 struct i40e_hw *hw = &pf->hw; 8856 8857 if (!(pf->flags & I40E_FLAG_PORT_ID_VALID)) 8858 return -EOPNOTSUPP; 8859 8860 ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id)); 8861 memcpy(ppid->id, hw->mac.port_addr, ppid->id_len); 8862 8863 return 0; 8864 } 8865 8866 /** 8867 * i40e_ndo_fdb_add - add an entry to the hardware database 8868 * @ndm: the input from the stack 8869 * @tb: pointer to array of nladdr (unused) 8870 * @dev: the net device pointer 8871 * @addr: the MAC address entry being added 8872 * @flags: instructions from stack about fdb operation 8873 */ 8874 static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], 8875 struct net_device *dev, 8876 const unsigned char *addr, u16 vid, 8877 u16 flags) 8878 { 8879 struct i40e_netdev_priv *np = netdev_priv(dev); 8880 struct i40e_pf *pf = np->vsi->back; 8881 int err = 0; 8882 8883 if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED)) 8884 return -EOPNOTSUPP; 8885 8886 if (vid) { 8887 pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name); 8888 return -EINVAL; 8889 } 8890 8891 /* Hardware does not support aging addresses so if a 8892 * ndm_state is given only allow permanent addresses 8893 */ 8894 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { 8895 netdev_info(dev, "FDB only supports static addresses\n"); 8896 return -EINVAL; 8897 } 8898 8899 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) 8900 err = dev_uc_add_excl(dev, addr); 8901 else if (is_multicast_ether_addr(addr)) 8902 err = dev_mc_add_excl(dev, addr); 8903 else 8904 err = -EINVAL; 8905 8906 /* Only return duplicate errors if NLM_F_EXCL is set */ 8907 if (err == -EEXIST && !(flags & NLM_F_EXCL)) 8908 err = 0; 8909 8910 return err; 8911 } 8912 8913 /** 8914 * i40e_ndo_bridge_setlink - Set the hardware bridge mode 8915 * @dev: the netdev being configured 8916 * @nlh: RTNL message 8917 * 8918 * Inserts a new hardware bridge if not already created and 8919 * enables the bridging mode requested (VEB or VEPA). If the 8920 * hardware bridge has already been inserted and the request 8921 * is to change the mode then that requires a PF reset to 8922 * allow rebuild of the components with required hardware 8923 * bridge mode enabled. 8924 **/ 8925 static int i40e_ndo_bridge_setlink(struct net_device *dev, 8926 struct nlmsghdr *nlh, 8927 u16 flags) 8928 { 8929 struct i40e_netdev_priv *np = netdev_priv(dev); 8930 struct i40e_vsi *vsi = np->vsi; 8931 struct i40e_pf *pf = vsi->back; 8932 struct i40e_veb *veb = NULL; 8933 struct nlattr *attr, *br_spec; 8934 int i, rem; 8935 8936 /* Only for PF VSI for now */ 8937 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) 8938 return -EOPNOTSUPP; 8939 8940 /* Find the HW bridge for PF VSI */ 8941 for (i = 0; i < I40E_MAX_VEB && !veb; i++) { 8942 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) 8943 veb = pf->veb[i]; 8944 } 8945 8946 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 8947 8948 nla_for_each_nested(attr, br_spec, rem) { 8949 __u16 mode; 8950 8951 if (nla_type(attr) != IFLA_BRIDGE_MODE) 8952 continue; 8953 8954 mode = nla_get_u16(attr); 8955 if ((mode != BRIDGE_MODE_VEPA) && 8956 (mode != BRIDGE_MODE_VEB)) 8957 return -EINVAL; 8958 8959 /* Insert a new HW bridge */ 8960 if (!veb) { 8961 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid, 8962 vsi->tc_config.enabled_tc); 8963 if (veb) { 8964 veb->bridge_mode = mode; 8965 i40e_config_bridge_mode(veb); 8966 } else { 8967 /* No Bridge HW offload available */ 8968 return -ENOENT; 8969 } 8970 break; 8971 } else if (mode != veb->bridge_mode) { 8972 /* Existing HW bridge but different mode needs reset */ 8973 veb->bridge_mode = mode; 8974 /* TODO: If no VFs or VMDq VSIs, disallow VEB mode */ 8975 if (mode == BRIDGE_MODE_VEB) 8976 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; 8977 else 8978 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; 8979 i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED)); 8980 break; 8981 } 8982 } 8983 8984 return 0; 8985 } 8986 8987 /** 8988 * i40e_ndo_bridge_getlink - Get the hardware bridge mode 8989 * @skb: skb buff 8990 * @pid: process id 8991 * @seq: RTNL message seq # 8992 * @dev: the netdev being configured 8993 * @filter_mask: unused 8994 * @nlflags: netlink flags passed in 8995 * 8996 * Return the mode in which the hardware bridge is operating in 8997 * i.e VEB or VEPA. 8998 **/ 8999 static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 9000 struct net_device *dev, 9001 u32 __always_unused filter_mask, 9002 int nlflags) 9003 { 9004 struct i40e_netdev_priv *np = netdev_priv(dev); 9005 struct i40e_vsi *vsi = np->vsi; 9006 struct i40e_pf *pf = vsi->back; 9007 struct i40e_veb *veb = NULL; 9008 int i; 9009 9010 /* Only for PF VSI for now */ 9011 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) 9012 return -EOPNOTSUPP; 9013 9014 /* Find the HW bridge for the PF VSI */ 9015 for (i = 0; i < I40E_MAX_VEB && !veb; i++) { 9016 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) 9017 veb = pf->veb[i]; 9018 } 9019 9020 if (!veb) 9021 return 0; 9022 9023 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode, 9024 nlflags, 0, 0, filter_mask, NULL); 9025 } 9026 9027 /* Hardware supports L4 tunnel length of 128B (=2^7) which includes 9028 * inner mac plus all inner ethertypes. 9029 */ 9030 #define I40E_MAX_TUNNEL_HDR_LEN 128 9031 /** 9032 * i40e_features_check - Validate encapsulated packet conforms to limits 9033 * @skb: skb buff 9034 * @dev: This physical port's netdev 9035 * @features: Offload features that the stack believes apply 9036 **/ 9037 static netdev_features_t i40e_features_check(struct sk_buff *skb, 9038 struct net_device *dev, 9039 netdev_features_t features) 9040 { 9041 if (skb->encapsulation && 9042 ((skb_inner_network_header(skb) - skb_transport_header(skb)) > 9043 I40E_MAX_TUNNEL_HDR_LEN)) 9044 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 9045 9046 return features; 9047 } 9048 9049 static const struct net_device_ops i40e_netdev_ops = { 9050 .ndo_open = i40e_open, 9051 .ndo_stop = i40e_close, 9052 .ndo_start_xmit = i40e_lan_xmit_frame, 9053 .ndo_get_stats64 = i40e_get_netdev_stats_struct, 9054 .ndo_set_rx_mode = i40e_set_rx_mode, 9055 .ndo_validate_addr = eth_validate_addr, 9056 .ndo_set_mac_address = i40e_set_mac, 9057 .ndo_change_mtu = i40e_change_mtu, 9058 .ndo_do_ioctl = i40e_ioctl, 9059 .ndo_tx_timeout = i40e_tx_timeout, 9060 .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid, 9061 .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid, 9062 #ifdef CONFIG_NET_POLL_CONTROLLER 9063 .ndo_poll_controller = i40e_netpoll, 9064 #endif 9065 .ndo_setup_tc = __i40e_setup_tc, 9066 #ifdef I40E_FCOE 9067 .ndo_fcoe_enable = i40e_fcoe_enable, 9068 .ndo_fcoe_disable = i40e_fcoe_disable, 9069 #endif 9070 .ndo_set_features = i40e_set_features, 9071 .ndo_set_vf_mac = i40e_ndo_set_vf_mac, 9072 .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan, 9073 .ndo_set_vf_rate = i40e_ndo_set_vf_bw, 9074 .ndo_get_vf_config = i40e_ndo_get_vf_config, 9075 .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state, 9076 .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk, 9077 #if IS_ENABLED(CONFIG_VXLAN) 9078 .ndo_add_vxlan_port = i40e_add_vxlan_port, 9079 .ndo_del_vxlan_port = i40e_del_vxlan_port, 9080 #endif 9081 #if IS_ENABLED(CONFIG_GENEVE) 9082 .ndo_add_geneve_port = i40e_add_geneve_port, 9083 .ndo_del_geneve_port = i40e_del_geneve_port, 9084 #endif 9085 .ndo_get_phys_port_id = i40e_get_phys_port_id, 9086 .ndo_fdb_add = i40e_ndo_fdb_add, 9087 .ndo_features_check = i40e_features_check, 9088 .ndo_bridge_getlink = i40e_ndo_bridge_getlink, 9089 .ndo_bridge_setlink = i40e_ndo_bridge_setlink, 9090 }; 9091 9092 /** 9093 * i40e_config_netdev - Setup the netdev flags 9094 * @vsi: the VSI being configured 9095 * 9096 * Returns 0 on success, negative value on failure 9097 **/ 9098 static int i40e_config_netdev(struct i40e_vsi *vsi) 9099 { 9100 u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 9101 struct i40e_pf *pf = vsi->back; 9102 struct i40e_hw *hw = &pf->hw; 9103 struct i40e_netdev_priv *np; 9104 struct net_device *netdev; 9105 u8 mac_addr[ETH_ALEN]; 9106 int etherdev_size; 9107 9108 etherdev_size = sizeof(struct i40e_netdev_priv); 9109 netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs); 9110 if (!netdev) 9111 return -ENOMEM; 9112 9113 vsi->netdev = netdev; 9114 np = netdev_priv(netdev); 9115 np->vsi = vsi; 9116 9117 netdev->hw_enc_features |= NETIF_F_IP_CSUM | 9118 NETIF_F_IPV6_CSUM | 9119 NETIF_F_TSO | 9120 NETIF_F_TSO6 | 9121 NETIF_F_TSO_ECN | 9122 NETIF_F_GSO_GRE | 9123 NETIF_F_GSO_UDP_TUNNEL | 9124 NETIF_F_GSO_UDP_TUNNEL_CSUM | 9125 0; 9126 9127 netdev->features = NETIF_F_SG | 9128 NETIF_F_IP_CSUM | 9129 NETIF_F_SCTP_CRC | 9130 NETIF_F_HIGHDMA | 9131 NETIF_F_GSO_UDP_TUNNEL | 9132 NETIF_F_GSO_GRE | 9133 NETIF_F_HW_VLAN_CTAG_TX | 9134 NETIF_F_HW_VLAN_CTAG_RX | 9135 NETIF_F_HW_VLAN_CTAG_FILTER | 9136 NETIF_F_IPV6_CSUM | 9137 NETIF_F_TSO | 9138 NETIF_F_TSO_ECN | 9139 NETIF_F_TSO6 | 9140 NETIF_F_RXCSUM | 9141 NETIF_F_RXHASH | 9142 0; 9143 9144 if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) 9145 netdev->features |= NETIF_F_NTUPLE; 9146 if (pf->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE) 9147 netdev->features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; 9148 9149 /* copy netdev features into list of user selectable features */ 9150 netdev->hw_features |= netdev->features; 9151 9152 if (vsi->type == I40E_VSI_MAIN) { 9153 SET_NETDEV_DEV(netdev, &pf->pdev->dev); 9154 ether_addr_copy(mac_addr, hw->mac.perm_addr); 9155 /* The following steps are necessary to prevent reception 9156 * of tagged packets - some older NVM configurations load a 9157 * default a MAC-VLAN filter that accepts any tagged packet 9158 * which must be replaced by a normal filter. 9159 */ 9160 if (!i40e_rm_default_mac_filter(vsi, mac_addr)) { 9161 spin_lock_bh(&vsi->mac_filter_list_lock); 9162 i40e_add_filter(vsi, mac_addr, 9163 I40E_VLAN_ANY, false, true); 9164 spin_unlock_bh(&vsi->mac_filter_list_lock); 9165 } 9166 } else { 9167 /* relate the VSI_VMDQ name to the VSI_MAIN name */ 9168 snprintf(netdev->name, IFNAMSIZ, "%sv%%d", 9169 pf->vsi[pf->lan_vsi]->netdev->name); 9170 random_ether_addr(mac_addr); 9171 9172 spin_lock_bh(&vsi->mac_filter_list_lock); 9173 i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, false); 9174 spin_unlock_bh(&vsi->mac_filter_list_lock); 9175 } 9176 9177 spin_lock_bh(&vsi->mac_filter_list_lock); 9178 i40e_add_filter(vsi, brdcast, I40E_VLAN_ANY, false, false); 9179 spin_unlock_bh(&vsi->mac_filter_list_lock); 9180 9181 ether_addr_copy(netdev->dev_addr, mac_addr); 9182 ether_addr_copy(netdev->perm_addr, mac_addr); 9183 /* vlan gets same features (except vlan offload) 9184 * after any tweaks for specific VSI types 9185 */ 9186 netdev->vlan_features = netdev->features & ~(NETIF_F_HW_VLAN_CTAG_TX | 9187 NETIF_F_HW_VLAN_CTAG_RX | 9188 NETIF_F_HW_VLAN_CTAG_FILTER); 9189 netdev->priv_flags |= IFF_UNICAST_FLT; 9190 netdev->priv_flags |= IFF_SUPP_NOFCS; 9191 /* Setup netdev TC information */ 9192 i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc); 9193 9194 netdev->netdev_ops = &i40e_netdev_ops; 9195 netdev->watchdog_timeo = 5 * HZ; 9196 i40e_set_ethtool_ops(netdev); 9197 #ifdef I40E_FCOE 9198 i40e_fcoe_config_netdev(netdev, vsi); 9199 #endif 9200 9201 return 0; 9202 } 9203 9204 /** 9205 * i40e_vsi_delete - Delete a VSI from the switch 9206 * @vsi: the VSI being removed 9207 * 9208 * Returns 0 on success, negative value on failure 9209 **/ 9210 static void i40e_vsi_delete(struct i40e_vsi *vsi) 9211 { 9212 /* remove default VSI is not allowed */ 9213 if (vsi == vsi->back->vsi[vsi->back->lan_vsi]) 9214 return; 9215 9216 i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL); 9217 } 9218 9219 /** 9220 * i40e_is_vsi_uplink_mode_veb - Check if the VSI's uplink bridge mode is VEB 9221 * @vsi: the VSI being queried 9222 * 9223 * Returns 1 if HW bridge mode is VEB and return 0 in case of VEPA mode 9224 **/ 9225 int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi) 9226 { 9227 struct i40e_veb *veb; 9228 struct i40e_pf *pf = vsi->back; 9229 9230 /* Uplink is not a bridge so default to VEB */ 9231 if (vsi->veb_idx == I40E_NO_VEB) 9232 return 1; 9233 9234 veb = pf->veb[vsi->veb_idx]; 9235 if (!veb) { 9236 dev_info(&pf->pdev->dev, 9237 "There is no veb associated with the bridge\n"); 9238 return -ENOENT; 9239 } 9240 9241 /* Uplink is a bridge in VEPA mode */ 9242 if (veb->bridge_mode & BRIDGE_MODE_VEPA) { 9243 return 0; 9244 } else { 9245 /* Uplink is a bridge in VEB mode */ 9246 return 1; 9247 } 9248 9249 /* VEPA is now default bridge, so return 0 */ 9250 return 0; 9251 } 9252 9253 /** 9254 * i40e_add_vsi - Add a VSI to the switch 9255 * @vsi: the VSI being configured 9256 * 9257 * This initializes a VSI context depending on the VSI type to be added and 9258 * passes it down to the add_vsi aq command. 9259 **/ 9260 static int i40e_add_vsi(struct i40e_vsi *vsi) 9261 { 9262 int ret = -ENODEV; 9263 u8 laa_macaddr[ETH_ALEN]; 9264 bool found_laa_mac_filter = false; 9265 struct i40e_pf *pf = vsi->back; 9266 struct i40e_hw *hw = &pf->hw; 9267 struct i40e_vsi_context ctxt; 9268 struct i40e_mac_filter *f, *ftmp; 9269 9270 u8 enabled_tc = 0x1; /* TC0 enabled */ 9271 int f_count = 0; 9272 9273 memset(&ctxt, 0, sizeof(ctxt)); 9274 switch (vsi->type) { 9275 case I40E_VSI_MAIN: 9276 /* The PF's main VSI is already setup as part of the 9277 * device initialization, so we'll not bother with 9278 * the add_vsi call, but we will retrieve the current 9279 * VSI context. 9280 */ 9281 ctxt.seid = pf->main_vsi_seid; 9282 ctxt.pf_num = pf->hw.pf_id; 9283 ctxt.vf_num = 0; 9284 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); 9285 ctxt.flags = I40E_AQ_VSI_TYPE_PF; 9286 if (ret) { 9287 dev_info(&pf->pdev->dev, 9288 "couldn't get PF vsi config, err %s aq_err %s\n", 9289 i40e_stat_str(&pf->hw, ret), 9290 i40e_aq_str(&pf->hw, 9291 pf->hw.aq.asq_last_status)); 9292 return -ENOENT; 9293 } 9294 vsi->info = ctxt.info; 9295 vsi->info.valid_sections = 0; 9296 9297 vsi->seid = ctxt.seid; 9298 vsi->id = ctxt.vsi_number; 9299 9300 enabled_tc = i40e_pf_get_tc_map(pf); 9301 9302 /* MFP mode setup queue map and update VSI */ 9303 if ((pf->flags & I40E_FLAG_MFP_ENABLED) && 9304 !(pf->hw.func_caps.iscsi)) { /* NIC type PF */ 9305 memset(&ctxt, 0, sizeof(ctxt)); 9306 ctxt.seid = pf->main_vsi_seid; 9307 ctxt.pf_num = pf->hw.pf_id; 9308 ctxt.vf_num = 0; 9309 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false); 9310 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); 9311 if (ret) { 9312 dev_info(&pf->pdev->dev, 9313 "update vsi failed, err %s aq_err %s\n", 9314 i40e_stat_str(&pf->hw, ret), 9315 i40e_aq_str(&pf->hw, 9316 pf->hw.aq.asq_last_status)); 9317 ret = -ENOENT; 9318 goto err; 9319 } 9320 /* update the local VSI info queue map */ 9321 i40e_vsi_update_queue_map(vsi, &ctxt); 9322 vsi->info.valid_sections = 0; 9323 } else { 9324 /* Default/Main VSI is only enabled for TC0 9325 * reconfigure it to enable all TCs that are 9326 * available on the port in SFP mode. 9327 * For MFP case the iSCSI PF would use this 9328 * flow to enable LAN+iSCSI TC. 9329 */ 9330 ret = i40e_vsi_config_tc(vsi, enabled_tc); 9331 if (ret) { 9332 dev_info(&pf->pdev->dev, 9333 "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n", 9334 enabled_tc, 9335 i40e_stat_str(&pf->hw, ret), 9336 i40e_aq_str(&pf->hw, 9337 pf->hw.aq.asq_last_status)); 9338 ret = -ENOENT; 9339 } 9340 } 9341 break; 9342 9343 case I40E_VSI_FDIR: 9344 ctxt.pf_num = hw->pf_id; 9345 ctxt.vf_num = 0; 9346 ctxt.uplink_seid = vsi->uplink_seid; 9347 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; 9348 ctxt.flags = I40E_AQ_VSI_TYPE_PF; 9349 if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) && 9350 (i40e_is_vsi_uplink_mode_veb(vsi))) { 9351 ctxt.info.valid_sections |= 9352 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); 9353 ctxt.info.switch_id = 9354 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); 9355 } 9356 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); 9357 break; 9358 9359 case I40E_VSI_VMDQ2: 9360 ctxt.pf_num = hw->pf_id; 9361 ctxt.vf_num = 0; 9362 ctxt.uplink_seid = vsi->uplink_seid; 9363 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; 9364 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2; 9365 9366 /* This VSI is connected to VEB so the switch_id 9367 * should be set to zero by default. 9368 */ 9369 if (i40e_is_vsi_uplink_mode_veb(vsi)) { 9370 ctxt.info.valid_sections |= 9371 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); 9372 ctxt.info.switch_id = 9373 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); 9374 } 9375 9376 /* Setup the VSI tx/rx queue map for TC0 only for now */ 9377 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); 9378 break; 9379 9380 case I40E_VSI_SRIOV: 9381 ctxt.pf_num = hw->pf_id; 9382 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id; 9383 ctxt.uplink_seid = vsi->uplink_seid; 9384 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; 9385 ctxt.flags = I40E_AQ_VSI_TYPE_VF; 9386 9387 /* This VSI is connected to VEB so the switch_id 9388 * should be set to zero by default. 9389 */ 9390 if (i40e_is_vsi_uplink_mode_veb(vsi)) { 9391 ctxt.info.valid_sections |= 9392 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); 9393 ctxt.info.switch_id = 9394 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); 9395 } 9396 9397 if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) { 9398 ctxt.info.valid_sections |= 9399 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID); 9400 ctxt.info.queueing_opt_flags |= 9401 I40E_AQ_VSI_QUE_OPT_TCP_ENA; 9402 } 9403 9404 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); 9405 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL; 9406 if (pf->vf[vsi->vf_id].spoofchk) { 9407 ctxt.info.valid_sections |= 9408 cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID); 9409 ctxt.info.sec_flags |= 9410 (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK | 9411 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK); 9412 } 9413 /* Setup the VSI tx/rx queue map for TC0 only for now */ 9414 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); 9415 break; 9416 9417 #ifdef I40E_FCOE 9418 case I40E_VSI_FCOE: 9419 ret = i40e_fcoe_vsi_init(vsi, &ctxt); 9420 if (ret) { 9421 dev_info(&pf->pdev->dev, "failed to initialize FCoE VSI\n"); 9422 return ret; 9423 } 9424 break; 9425 9426 #endif /* I40E_FCOE */ 9427 case I40E_VSI_IWARP: 9428 /* send down message to iWARP */ 9429 break; 9430 9431 default: 9432 return -ENODEV; 9433 } 9434 9435 if (vsi->type != I40E_VSI_MAIN) { 9436 ret = i40e_aq_add_vsi(hw, &ctxt, NULL); 9437 if (ret) { 9438 dev_info(&vsi->back->pdev->dev, 9439 "add vsi failed, err %s aq_err %s\n", 9440 i40e_stat_str(&pf->hw, ret), 9441 i40e_aq_str(&pf->hw, 9442 pf->hw.aq.asq_last_status)); 9443 ret = -ENOENT; 9444 goto err; 9445 } 9446 vsi->info = ctxt.info; 9447 vsi->info.valid_sections = 0; 9448 vsi->seid = ctxt.seid; 9449 vsi->id = ctxt.vsi_number; 9450 } 9451 9452 spin_lock_bh(&vsi->mac_filter_list_lock); 9453 /* If macvlan filters already exist, force them to get loaded */ 9454 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { 9455 f->changed = true; 9456 f_count++; 9457 9458 /* Expected to have only one MAC filter entry for LAA in list */ 9459 if (f->is_laa && vsi->type == I40E_VSI_MAIN) { 9460 ether_addr_copy(laa_macaddr, f->macaddr); 9461 found_laa_mac_filter = true; 9462 } 9463 } 9464 spin_unlock_bh(&vsi->mac_filter_list_lock); 9465 9466 if (found_laa_mac_filter) { 9467 struct i40e_aqc_remove_macvlan_element_data element; 9468 9469 memset(&element, 0, sizeof(element)); 9470 ether_addr_copy(element.mac_addr, laa_macaddr); 9471 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; 9472 ret = i40e_aq_remove_macvlan(hw, vsi->seid, 9473 &element, 1, NULL); 9474 if (ret) { 9475 /* some older FW has a different default */ 9476 element.flags |= 9477 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; 9478 i40e_aq_remove_macvlan(hw, vsi->seid, 9479 &element, 1, NULL); 9480 } 9481 9482 i40e_aq_mac_address_write(hw, 9483 I40E_AQC_WRITE_TYPE_LAA_WOL, 9484 laa_macaddr, NULL); 9485 } 9486 9487 if (f_count) { 9488 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; 9489 pf->flags |= I40E_FLAG_FILTER_SYNC; 9490 } 9491 9492 /* Update VSI BW information */ 9493 ret = i40e_vsi_get_bw_info(vsi); 9494 if (ret) { 9495 dev_info(&pf->pdev->dev, 9496 "couldn't get vsi bw info, err %s aq_err %s\n", 9497 i40e_stat_str(&pf->hw, ret), 9498 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 9499 /* VSI is already added so not tearing that up */ 9500 ret = 0; 9501 } 9502 9503 err: 9504 return ret; 9505 } 9506 9507 /** 9508 * i40e_vsi_release - Delete a VSI and free its resources 9509 * @vsi: the VSI being removed 9510 * 9511 * Returns 0 on success or < 0 on error 9512 **/ 9513 int i40e_vsi_release(struct i40e_vsi *vsi) 9514 { 9515 struct i40e_mac_filter *f, *ftmp; 9516 struct i40e_veb *veb = NULL; 9517 struct i40e_pf *pf; 9518 u16 uplink_seid; 9519 int i, n; 9520 9521 pf = vsi->back; 9522 9523 /* release of a VEB-owner or last VSI is not allowed */ 9524 if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) { 9525 dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n", 9526 vsi->seid, vsi->uplink_seid); 9527 return -ENODEV; 9528 } 9529 if (vsi == pf->vsi[pf->lan_vsi] && 9530 !test_bit(__I40E_DOWN, &pf->state)) { 9531 dev_info(&pf->pdev->dev, "Can't remove PF VSI\n"); 9532 return -ENODEV; 9533 } 9534 9535 uplink_seid = vsi->uplink_seid; 9536 if (vsi->type != I40E_VSI_SRIOV) { 9537 if (vsi->netdev_registered) { 9538 vsi->netdev_registered = false; 9539 if (vsi->netdev) { 9540 /* results in a call to i40e_close() */ 9541 unregister_netdev(vsi->netdev); 9542 } 9543 } else { 9544 i40e_vsi_close(vsi); 9545 } 9546 i40e_vsi_disable_irq(vsi); 9547 } 9548 9549 spin_lock_bh(&vsi->mac_filter_list_lock); 9550 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) 9551 i40e_del_filter(vsi, f->macaddr, f->vlan, 9552 f->is_vf, f->is_netdev); 9553 spin_unlock_bh(&vsi->mac_filter_list_lock); 9554 9555 i40e_sync_vsi_filters(vsi); 9556 9557 i40e_vsi_delete(vsi); 9558 i40e_vsi_free_q_vectors(vsi); 9559 if (vsi->netdev) { 9560 free_netdev(vsi->netdev); 9561 vsi->netdev = NULL; 9562 } 9563 i40e_vsi_clear_rings(vsi); 9564 i40e_vsi_clear(vsi); 9565 9566 /* If this was the last thing on the VEB, except for the 9567 * controlling VSI, remove the VEB, which puts the controlling 9568 * VSI onto the next level down in the switch. 9569 * 9570 * Well, okay, there's one more exception here: don't remove 9571 * the orphan VEBs yet. We'll wait for an explicit remove request 9572 * from up the network stack. 9573 */ 9574 for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) { 9575 if (pf->vsi[i] && 9576 pf->vsi[i]->uplink_seid == uplink_seid && 9577 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) { 9578 n++; /* count the VSIs */ 9579 } 9580 } 9581 for (i = 0; i < I40E_MAX_VEB; i++) { 9582 if (!pf->veb[i]) 9583 continue; 9584 if (pf->veb[i]->uplink_seid == uplink_seid) 9585 n++; /* count the VEBs */ 9586 if (pf->veb[i]->seid == uplink_seid) 9587 veb = pf->veb[i]; 9588 } 9589 if (n == 0 && veb && veb->uplink_seid != 0) 9590 i40e_veb_release(veb); 9591 9592 return 0; 9593 } 9594 9595 /** 9596 * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI 9597 * @vsi: ptr to the VSI 9598 * 9599 * This should only be called after i40e_vsi_mem_alloc() which allocates the 9600 * corresponding SW VSI structure and initializes num_queue_pairs for the 9601 * newly allocated VSI. 9602 * 9603 * Returns 0 on success or negative on failure 9604 **/ 9605 static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi) 9606 { 9607 int ret = -ENOENT; 9608 struct i40e_pf *pf = vsi->back; 9609 9610 if (vsi->q_vectors[0]) { 9611 dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n", 9612 vsi->seid); 9613 return -EEXIST; 9614 } 9615 9616 if (vsi->base_vector) { 9617 dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n", 9618 vsi->seid, vsi->base_vector); 9619 return -EEXIST; 9620 } 9621 9622 ret = i40e_vsi_alloc_q_vectors(vsi); 9623 if (ret) { 9624 dev_info(&pf->pdev->dev, 9625 "failed to allocate %d q_vector for VSI %d, ret=%d\n", 9626 vsi->num_q_vectors, vsi->seid, ret); 9627 vsi->num_q_vectors = 0; 9628 goto vector_setup_out; 9629 } 9630 9631 /* In Legacy mode, we do not have to get any other vector since we 9632 * piggyback on the misc/ICR0 for queue interrupts. 9633 */ 9634 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) 9635 return ret; 9636 if (vsi->num_q_vectors) 9637 vsi->base_vector = i40e_get_lump(pf, pf->irq_pile, 9638 vsi->num_q_vectors, vsi->idx); 9639 if (vsi->base_vector < 0) { 9640 dev_info(&pf->pdev->dev, 9641 "failed to get tracking for %d vectors for VSI %d, err=%d\n", 9642 vsi->num_q_vectors, vsi->seid, vsi->base_vector); 9643 i40e_vsi_free_q_vectors(vsi); 9644 ret = -ENOENT; 9645 goto vector_setup_out; 9646 } 9647 9648 vector_setup_out: 9649 return ret; 9650 } 9651 9652 /** 9653 * i40e_vsi_reinit_setup - return and reallocate resources for a VSI 9654 * @vsi: pointer to the vsi. 9655 * 9656 * This re-allocates a vsi's queue resources. 9657 * 9658 * Returns pointer to the successfully allocated and configured VSI sw struct 9659 * on success, otherwise returns NULL on failure. 9660 **/ 9661 static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi) 9662 { 9663 struct i40e_pf *pf; 9664 u8 enabled_tc; 9665 int ret; 9666 9667 if (!vsi) 9668 return NULL; 9669 9670 pf = vsi->back; 9671 9672 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx); 9673 i40e_vsi_clear_rings(vsi); 9674 9675 i40e_vsi_free_arrays(vsi, false); 9676 i40e_set_num_rings_in_vsi(vsi); 9677 ret = i40e_vsi_alloc_arrays(vsi, false); 9678 if (ret) 9679 goto err_vsi; 9680 9681 ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx); 9682 if (ret < 0) { 9683 dev_info(&pf->pdev->dev, 9684 "failed to get tracking for %d queues for VSI %d err %d\n", 9685 vsi->alloc_queue_pairs, vsi->seid, ret); 9686 goto err_vsi; 9687 } 9688 vsi->base_queue = ret; 9689 9690 /* Update the FW view of the VSI. Force a reset of TC and queue 9691 * layout configurations. 9692 */ 9693 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc; 9694 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0; 9695 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid; 9696 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc); 9697 9698 /* assign it some queues */ 9699 ret = i40e_alloc_rings(vsi); 9700 if (ret) 9701 goto err_rings; 9702 9703 /* map all of the rings to the q_vectors */ 9704 i40e_vsi_map_rings_to_vectors(vsi); 9705 return vsi; 9706 9707 err_rings: 9708 i40e_vsi_free_q_vectors(vsi); 9709 if (vsi->netdev_registered) { 9710 vsi->netdev_registered = false; 9711 unregister_netdev(vsi->netdev); 9712 free_netdev(vsi->netdev); 9713 vsi->netdev = NULL; 9714 } 9715 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL); 9716 err_vsi: 9717 i40e_vsi_clear(vsi); 9718 return NULL; 9719 } 9720 9721 /** 9722 * i40e_macaddr_init - explicitly write the mac address filters. 9723 * 9724 * @vsi: pointer to the vsi. 9725 * @macaddr: the MAC address 9726 * 9727 * This is needed when the macaddr has been obtained by other 9728 * means than the default, e.g., from Open Firmware or IDPROM. 9729 * Returns 0 on success, negative on failure 9730 **/ 9731 static int i40e_macaddr_init(struct i40e_vsi *vsi, u8 *macaddr) 9732 { 9733 int ret; 9734 struct i40e_aqc_add_macvlan_element_data element; 9735 9736 ret = i40e_aq_mac_address_write(&vsi->back->hw, 9737 I40E_AQC_WRITE_TYPE_LAA_WOL, 9738 macaddr, NULL); 9739 if (ret) { 9740 dev_info(&vsi->back->pdev->dev, 9741 "Addr change for VSI failed: %d\n", ret); 9742 return -EADDRNOTAVAIL; 9743 } 9744 9745 memset(&element, 0, sizeof(element)); 9746 ether_addr_copy(element.mac_addr, macaddr); 9747 element.flags = cpu_to_le16(I40E_AQC_MACVLAN_ADD_PERFECT_MATCH); 9748 ret = i40e_aq_add_macvlan(&vsi->back->hw, vsi->seid, &element, 1, NULL); 9749 if (ret) { 9750 dev_info(&vsi->back->pdev->dev, 9751 "add filter failed err %s aq_err %s\n", 9752 i40e_stat_str(&vsi->back->hw, ret), 9753 i40e_aq_str(&vsi->back->hw, 9754 vsi->back->hw.aq.asq_last_status)); 9755 } 9756 return ret; 9757 } 9758 9759 /** 9760 * i40e_vsi_setup - Set up a VSI by a given type 9761 * @pf: board private structure 9762 * @type: VSI type 9763 * @uplink_seid: the switch element to link to 9764 * @param1: usage depends upon VSI type. For VF types, indicates VF id 9765 * 9766 * This allocates the sw VSI structure and its queue resources, then add a VSI 9767 * to the identified VEB. 9768 * 9769 * Returns pointer to the successfully allocated and configure VSI sw struct on 9770 * success, otherwise returns NULL on failure. 9771 **/ 9772 struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, 9773 u16 uplink_seid, u32 param1) 9774 { 9775 struct i40e_vsi *vsi = NULL; 9776 struct i40e_veb *veb = NULL; 9777 int ret, i; 9778 int v_idx; 9779 9780 /* The requested uplink_seid must be either 9781 * - the PF's port seid 9782 * no VEB is needed because this is the PF 9783 * or this is a Flow Director special case VSI 9784 * - seid of an existing VEB 9785 * - seid of a VSI that owns an existing VEB 9786 * - seid of a VSI that doesn't own a VEB 9787 * a new VEB is created and the VSI becomes the owner 9788 * - seid of the PF VSI, which is what creates the first VEB 9789 * this is a special case of the previous 9790 * 9791 * Find which uplink_seid we were given and create a new VEB if needed 9792 */ 9793 for (i = 0; i < I40E_MAX_VEB; i++) { 9794 if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) { 9795 veb = pf->veb[i]; 9796 break; 9797 } 9798 } 9799 9800 if (!veb && uplink_seid != pf->mac_seid) { 9801 9802 for (i = 0; i < pf->num_alloc_vsi; i++) { 9803 if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) { 9804 vsi = pf->vsi[i]; 9805 break; 9806 } 9807 } 9808 if (!vsi) { 9809 dev_info(&pf->pdev->dev, "no such uplink_seid %d\n", 9810 uplink_seid); 9811 return NULL; 9812 } 9813 9814 if (vsi->uplink_seid == pf->mac_seid) 9815 veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid, 9816 vsi->tc_config.enabled_tc); 9817 else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) 9818 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid, 9819 vsi->tc_config.enabled_tc); 9820 if (veb) { 9821 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) { 9822 dev_info(&vsi->back->pdev->dev, 9823 "New VSI creation error, uplink seid of LAN VSI expected.\n"); 9824 return NULL; 9825 } 9826 /* We come up by default in VEPA mode if SRIOV is not 9827 * already enabled, in which case we can't force VEPA 9828 * mode. 9829 */ 9830 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { 9831 veb->bridge_mode = BRIDGE_MODE_VEPA; 9832 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; 9833 } 9834 i40e_config_bridge_mode(veb); 9835 } 9836 for (i = 0; i < I40E_MAX_VEB && !veb; i++) { 9837 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) 9838 veb = pf->veb[i]; 9839 } 9840 if (!veb) { 9841 dev_info(&pf->pdev->dev, "couldn't add VEB\n"); 9842 return NULL; 9843 } 9844 9845 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER; 9846 uplink_seid = veb->seid; 9847 } 9848 9849 /* get vsi sw struct */ 9850 v_idx = i40e_vsi_mem_alloc(pf, type); 9851 if (v_idx < 0) 9852 goto err_alloc; 9853 vsi = pf->vsi[v_idx]; 9854 if (!vsi) 9855 goto err_alloc; 9856 vsi->type = type; 9857 vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB); 9858 9859 if (type == I40E_VSI_MAIN) 9860 pf->lan_vsi = v_idx; 9861 else if (type == I40E_VSI_SRIOV) 9862 vsi->vf_id = param1; 9863 /* assign it some queues */ 9864 ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, 9865 vsi->idx); 9866 if (ret < 0) { 9867 dev_info(&pf->pdev->dev, 9868 "failed to get tracking for %d queues for VSI %d err=%d\n", 9869 vsi->alloc_queue_pairs, vsi->seid, ret); 9870 goto err_vsi; 9871 } 9872 vsi->base_queue = ret; 9873 9874 /* get a VSI from the hardware */ 9875 vsi->uplink_seid = uplink_seid; 9876 ret = i40e_add_vsi(vsi); 9877 if (ret) 9878 goto err_vsi; 9879 9880 switch (vsi->type) { 9881 /* setup the netdev if needed */ 9882 case I40E_VSI_MAIN: 9883 /* Apply relevant filters if a platform-specific mac 9884 * address was selected. 9885 */ 9886 if (!!(pf->flags & I40E_FLAG_PF_MAC)) { 9887 ret = i40e_macaddr_init(vsi, pf->hw.mac.addr); 9888 if (ret) { 9889 dev_warn(&pf->pdev->dev, 9890 "could not set up macaddr; err %d\n", 9891 ret); 9892 } 9893 } 9894 case I40E_VSI_VMDQ2: 9895 case I40E_VSI_FCOE: 9896 ret = i40e_config_netdev(vsi); 9897 if (ret) 9898 goto err_netdev; 9899 ret = register_netdev(vsi->netdev); 9900 if (ret) 9901 goto err_netdev; 9902 vsi->netdev_registered = true; 9903 netif_carrier_off(vsi->netdev); 9904 #ifdef CONFIG_I40E_DCB 9905 /* Setup DCB netlink interface */ 9906 i40e_dcbnl_setup(vsi); 9907 #endif /* CONFIG_I40E_DCB */ 9908 /* fall through */ 9909 9910 case I40E_VSI_FDIR: 9911 /* set up vectors and rings if needed */ 9912 ret = i40e_vsi_setup_vectors(vsi); 9913 if (ret) 9914 goto err_msix; 9915 9916 ret = i40e_alloc_rings(vsi); 9917 if (ret) 9918 goto err_rings; 9919 9920 /* map all of the rings to the q_vectors */ 9921 i40e_vsi_map_rings_to_vectors(vsi); 9922 9923 i40e_vsi_reset_stats(vsi); 9924 break; 9925 9926 default: 9927 /* no netdev or rings for the other VSI types */ 9928 break; 9929 } 9930 9931 if ((pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) && 9932 (vsi->type == I40E_VSI_VMDQ2)) { 9933 ret = i40e_vsi_config_rss(vsi); 9934 } 9935 return vsi; 9936 9937 err_rings: 9938 i40e_vsi_free_q_vectors(vsi); 9939 err_msix: 9940 if (vsi->netdev_registered) { 9941 vsi->netdev_registered = false; 9942 unregister_netdev(vsi->netdev); 9943 free_netdev(vsi->netdev); 9944 vsi->netdev = NULL; 9945 } 9946 err_netdev: 9947 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL); 9948 err_vsi: 9949 i40e_vsi_clear(vsi); 9950 err_alloc: 9951 return NULL; 9952 } 9953 9954 /** 9955 * i40e_veb_get_bw_info - Query VEB BW information 9956 * @veb: the veb to query 9957 * 9958 * Query the Tx scheduler BW configuration data for given VEB 9959 **/ 9960 static int i40e_veb_get_bw_info(struct i40e_veb *veb) 9961 { 9962 struct i40e_aqc_query_switching_comp_ets_config_resp ets_data; 9963 struct i40e_aqc_query_switching_comp_bw_config_resp bw_data; 9964 struct i40e_pf *pf = veb->pf; 9965 struct i40e_hw *hw = &pf->hw; 9966 u32 tc_bw_max; 9967 int ret = 0; 9968 int i; 9969 9970 ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid, 9971 &bw_data, NULL); 9972 if (ret) { 9973 dev_info(&pf->pdev->dev, 9974 "query veb bw config failed, err %s aq_err %s\n", 9975 i40e_stat_str(&pf->hw, ret), 9976 i40e_aq_str(&pf->hw, hw->aq.asq_last_status)); 9977 goto out; 9978 } 9979 9980 ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid, 9981 &ets_data, NULL); 9982 if (ret) { 9983 dev_info(&pf->pdev->dev, 9984 "query veb bw ets config failed, err %s aq_err %s\n", 9985 i40e_stat_str(&pf->hw, ret), 9986 i40e_aq_str(&pf->hw, hw->aq.asq_last_status)); 9987 goto out; 9988 } 9989 9990 veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit); 9991 veb->bw_max_quanta = ets_data.tc_bw_max; 9992 veb->is_abs_credits = bw_data.absolute_credits_enable; 9993 veb->enabled_tc = ets_data.tc_valid_bits; 9994 tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) | 9995 (le16_to_cpu(bw_data.tc_bw_max[1]) << 16); 9996 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 9997 veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i]; 9998 veb->bw_tc_limit_credits[i] = 9999 le16_to_cpu(bw_data.tc_bw_limits[i]); 10000 veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7); 10001 } 10002 10003 out: 10004 return ret; 10005 } 10006 10007 /** 10008 * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF 10009 * @pf: board private structure 10010 * 10011 * On error: returns error code (negative) 10012 * On success: returns vsi index in PF (positive) 10013 **/ 10014 static int i40e_veb_mem_alloc(struct i40e_pf *pf) 10015 { 10016 int ret = -ENOENT; 10017 struct i40e_veb *veb; 10018 int i; 10019 10020 /* Need to protect the allocation of switch elements at the PF level */ 10021 mutex_lock(&pf->switch_mutex); 10022 10023 /* VEB list may be fragmented if VEB creation/destruction has 10024 * been happening. We can afford to do a quick scan to look 10025 * for any free slots in the list. 10026 * 10027 * find next empty veb slot, looping back around if necessary 10028 */ 10029 i = 0; 10030 while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL)) 10031 i++; 10032 if (i >= I40E_MAX_VEB) { 10033 ret = -ENOMEM; 10034 goto err_alloc_veb; /* out of VEB slots! */ 10035 } 10036 10037 veb = kzalloc(sizeof(*veb), GFP_KERNEL); 10038 if (!veb) { 10039 ret = -ENOMEM; 10040 goto err_alloc_veb; 10041 } 10042 veb->pf = pf; 10043 veb->idx = i; 10044 veb->enabled_tc = 1; 10045 10046 pf->veb[i] = veb; 10047 ret = i; 10048 err_alloc_veb: 10049 mutex_unlock(&pf->switch_mutex); 10050 return ret; 10051 } 10052 10053 /** 10054 * i40e_switch_branch_release - Delete a branch of the switch tree 10055 * @branch: where to start deleting 10056 * 10057 * This uses recursion to find the tips of the branch to be 10058 * removed, deleting until we get back to and can delete this VEB. 10059 **/ 10060 static void i40e_switch_branch_release(struct i40e_veb *branch) 10061 { 10062 struct i40e_pf *pf = branch->pf; 10063 u16 branch_seid = branch->seid; 10064 u16 veb_idx = branch->idx; 10065 int i; 10066 10067 /* release any VEBs on this VEB - RECURSION */ 10068 for (i = 0; i < I40E_MAX_VEB; i++) { 10069 if (!pf->veb[i]) 10070 continue; 10071 if (pf->veb[i]->uplink_seid == branch->seid) 10072 i40e_switch_branch_release(pf->veb[i]); 10073 } 10074 10075 /* Release the VSIs on this VEB, but not the owner VSI. 10076 * 10077 * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing 10078 * the VEB itself, so don't use (*branch) after this loop. 10079 */ 10080 for (i = 0; i < pf->num_alloc_vsi; i++) { 10081 if (!pf->vsi[i]) 10082 continue; 10083 if (pf->vsi[i]->uplink_seid == branch_seid && 10084 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) { 10085 i40e_vsi_release(pf->vsi[i]); 10086 } 10087 } 10088 10089 /* There's one corner case where the VEB might not have been 10090 * removed, so double check it here and remove it if needed. 10091 * This case happens if the veb was created from the debugfs 10092 * commands and no VSIs were added to it. 10093 */ 10094 if (pf->veb[veb_idx]) 10095 i40e_veb_release(pf->veb[veb_idx]); 10096 } 10097 10098 /** 10099 * i40e_veb_clear - remove veb struct 10100 * @veb: the veb to remove 10101 **/ 10102 static void i40e_veb_clear(struct i40e_veb *veb) 10103 { 10104 if (!veb) 10105 return; 10106 10107 if (veb->pf) { 10108 struct i40e_pf *pf = veb->pf; 10109 10110 mutex_lock(&pf->switch_mutex); 10111 if (pf->veb[veb->idx] == veb) 10112 pf->veb[veb->idx] = NULL; 10113 mutex_unlock(&pf->switch_mutex); 10114 } 10115 10116 kfree(veb); 10117 } 10118 10119 /** 10120 * i40e_veb_release - Delete a VEB and free its resources 10121 * @veb: the VEB being removed 10122 **/ 10123 void i40e_veb_release(struct i40e_veb *veb) 10124 { 10125 struct i40e_vsi *vsi = NULL; 10126 struct i40e_pf *pf; 10127 int i, n = 0; 10128 10129 pf = veb->pf; 10130 10131 /* find the remaining VSI and check for extras */ 10132 for (i = 0; i < pf->num_alloc_vsi; i++) { 10133 if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) { 10134 n++; 10135 vsi = pf->vsi[i]; 10136 } 10137 } 10138 if (n != 1) { 10139 dev_info(&pf->pdev->dev, 10140 "can't remove VEB %d with %d VSIs left\n", 10141 veb->seid, n); 10142 return; 10143 } 10144 10145 /* move the remaining VSI to uplink veb */ 10146 vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER; 10147 if (veb->uplink_seid) { 10148 vsi->uplink_seid = veb->uplink_seid; 10149 if (veb->uplink_seid == pf->mac_seid) 10150 vsi->veb_idx = I40E_NO_VEB; 10151 else 10152 vsi->veb_idx = veb->veb_idx; 10153 } else { 10154 /* floating VEB */ 10155 vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid; 10156 vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx; 10157 } 10158 10159 i40e_aq_delete_element(&pf->hw, veb->seid, NULL); 10160 i40e_veb_clear(veb); 10161 } 10162 10163 /** 10164 * i40e_add_veb - create the VEB in the switch 10165 * @veb: the VEB to be instantiated 10166 * @vsi: the controlling VSI 10167 **/ 10168 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi) 10169 { 10170 struct i40e_pf *pf = veb->pf; 10171 bool is_default = veb->pf->cur_promisc; 10172 bool enable_stats = !!(pf->flags & I40E_FLAG_VEB_STATS_ENABLED); 10173 int ret; 10174 10175 /* get a VEB from the hardware */ 10176 ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid, 10177 veb->enabled_tc, is_default, 10178 &veb->seid, enable_stats, NULL); 10179 if (ret) { 10180 dev_info(&pf->pdev->dev, 10181 "couldn't add VEB, err %s aq_err %s\n", 10182 i40e_stat_str(&pf->hw, ret), 10183 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 10184 return -EPERM; 10185 } 10186 10187 /* get statistics counter */ 10188 ret = i40e_aq_get_veb_parameters(&pf->hw, veb->seid, NULL, NULL, 10189 &veb->stats_idx, NULL, NULL, NULL); 10190 if (ret) { 10191 dev_info(&pf->pdev->dev, 10192 "couldn't get VEB statistics idx, err %s aq_err %s\n", 10193 i40e_stat_str(&pf->hw, ret), 10194 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 10195 return -EPERM; 10196 } 10197 ret = i40e_veb_get_bw_info(veb); 10198 if (ret) { 10199 dev_info(&pf->pdev->dev, 10200 "couldn't get VEB bw info, err %s aq_err %s\n", 10201 i40e_stat_str(&pf->hw, ret), 10202 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 10203 i40e_aq_delete_element(&pf->hw, veb->seid, NULL); 10204 return -ENOENT; 10205 } 10206 10207 vsi->uplink_seid = veb->seid; 10208 vsi->veb_idx = veb->idx; 10209 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER; 10210 10211 return 0; 10212 } 10213 10214 /** 10215 * i40e_veb_setup - Set up a VEB 10216 * @pf: board private structure 10217 * @flags: VEB setup flags 10218 * @uplink_seid: the switch element to link to 10219 * @vsi_seid: the initial VSI seid 10220 * @enabled_tc: Enabled TC bit-map 10221 * 10222 * This allocates the sw VEB structure and links it into the switch 10223 * It is possible and legal for this to be a duplicate of an already 10224 * existing VEB. It is also possible for both uplink and vsi seids 10225 * to be zero, in order to create a floating VEB. 10226 * 10227 * Returns pointer to the successfully allocated VEB sw struct on 10228 * success, otherwise returns NULL on failure. 10229 **/ 10230 struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, 10231 u16 uplink_seid, u16 vsi_seid, 10232 u8 enabled_tc) 10233 { 10234 struct i40e_veb *veb, *uplink_veb = NULL; 10235 int vsi_idx, veb_idx; 10236 int ret; 10237 10238 /* if one seid is 0, the other must be 0 to create a floating relay */ 10239 if ((uplink_seid == 0 || vsi_seid == 0) && 10240 (uplink_seid + vsi_seid != 0)) { 10241 dev_info(&pf->pdev->dev, 10242 "one, not both seid's are 0: uplink=%d vsi=%d\n", 10243 uplink_seid, vsi_seid); 10244 return NULL; 10245 } 10246 10247 /* make sure there is such a vsi and uplink */ 10248 for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++) 10249 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid) 10250 break; 10251 if (vsi_idx >= pf->num_alloc_vsi && vsi_seid != 0) { 10252 dev_info(&pf->pdev->dev, "vsi seid %d not found\n", 10253 vsi_seid); 10254 return NULL; 10255 } 10256 10257 if (uplink_seid && uplink_seid != pf->mac_seid) { 10258 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) { 10259 if (pf->veb[veb_idx] && 10260 pf->veb[veb_idx]->seid == uplink_seid) { 10261 uplink_veb = pf->veb[veb_idx]; 10262 break; 10263 } 10264 } 10265 if (!uplink_veb) { 10266 dev_info(&pf->pdev->dev, 10267 "uplink seid %d not found\n", uplink_seid); 10268 return NULL; 10269 } 10270 } 10271 10272 /* get veb sw struct */ 10273 veb_idx = i40e_veb_mem_alloc(pf); 10274 if (veb_idx < 0) 10275 goto err_alloc; 10276 veb = pf->veb[veb_idx]; 10277 veb->flags = flags; 10278 veb->uplink_seid = uplink_seid; 10279 veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB); 10280 veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1); 10281 10282 /* create the VEB in the switch */ 10283 ret = i40e_add_veb(veb, pf->vsi[vsi_idx]); 10284 if (ret) 10285 goto err_veb; 10286 if (vsi_idx == pf->lan_vsi) 10287 pf->lan_veb = veb->idx; 10288 10289 return veb; 10290 10291 err_veb: 10292 i40e_veb_clear(veb); 10293 err_alloc: 10294 return NULL; 10295 } 10296 10297 /** 10298 * i40e_setup_pf_switch_element - set PF vars based on switch type 10299 * @pf: board private structure 10300 * @ele: element we are building info from 10301 * @num_reported: total number of elements 10302 * @printconfig: should we print the contents 10303 * 10304 * helper function to assist in extracting a few useful SEID values. 10305 **/ 10306 static void i40e_setup_pf_switch_element(struct i40e_pf *pf, 10307 struct i40e_aqc_switch_config_element_resp *ele, 10308 u16 num_reported, bool printconfig) 10309 { 10310 u16 downlink_seid = le16_to_cpu(ele->downlink_seid); 10311 u16 uplink_seid = le16_to_cpu(ele->uplink_seid); 10312 u8 element_type = ele->element_type; 10313 u16 seid = le16_to_cpu(ele->seid); 10314 10315 if (printconfig) 10316 dev_info(&pf->pdev->dev, 10317 "type=%d seid=%d uplink=%d downlink=%d\n", 10318 element_type, seid, uplink_seid, downlink_seid); 10319 10320 switch (element_type) { 10321 case I40E_SWITCH_ELEMENT_TYPE_MAC: 10322 pf->mac_seid = seid; 10323 break; 10324 case I40E_SWITCH_ELEMENT_TYPE_VEB: 10325 /* Main VEB? */ 10326 if (uplink_seid != pf->mac_seid) 10327 break; 10328 if (pf->lan_veb == I40E_NO_VEB) { 10329 int v; 10330 10331 /* find existing or else empty VEB */ 10332 for (v = 0; v < I40E_MAX_VEB; v++) { 10333 if (pf->veb[v] && (pf->veb[v]->seid == seid)) { 10334 pf->lan_veb = v; 10335 break; 10336 } 10337 } 10338 if (pf->lan_veb == I40E_NO_VEB) { 10339 v = i40e_veb_mem_alloc(pf); 10340 if (v < 0) 10341 break; 10342 pf->lan_veb = v; 10343 } 10344 } 10345 10346 pf->veb[pf->lan_veb]->seid = seid; 10347 pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid; 10348 pf->veb[pf->lan_veb]->pf = pf; 10349 pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB; 10350 break; 10351 case I40E_SWITCH_ELEMENT_TYPE_VSI: 10352 if (num_reported != 1) 10353 break; 10354 /* This is immediately after a reset so we can assume this is 10355 * the PF's VSI 10356 */ 10357 pf->mac_seid = uplink_seid; 10358 pf->pf_seid = downlink_seid; 10359 pf->main_vsi_seid = seid; 10360 if (printconfig) 10361 dev_info(&pf->pdev->dev, 10362 "pf_seid=%d main_vsi_seid=%d\n", 10363 pf->pf_seid, pf->main_vsi_seid); 10364 break; 10365 case I40E_SWITCH_ELEMENT_TYPE_PF: 10366 case I40E_SWITCH_ELEMENT_TYPE_VF: 10367 case I40E_SWITCH_ELEMENT_TYPE_EMP: 10368 case I40E_SWITCH_ELEMENT_TYPE_BMC: 10369 case I40E_SWITCH_ELEMENT_TYPE_PE: 10370 case I40E_SWITCH_ELEMENT_TYPE_PA: 10371 /* ignore these for now */ 10372 break; 10373 default: 10374 dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n", 10375 element_type, seid); 10376 break; 10377 } 10378 } 10379 10380 /** 10381 * i40e_fetch_switch_configuration - Get switch config from firmware 10382 * @pf: board private structure 10383 * @printconfig: should we print the contents 10384 * 10385 * Get the current switch configuration from the device and 10386 * extract a few useful SEID values. 10387 **/ 10388 int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig) 10389 { 10390 struct i40e_aqc_get_switch_config_resp *sw_config; 10391 u16 next_seid = 0; 10392 int ret = 0; 10393 u8 *aq_buf; 10394 int i; 10395 10396 aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL); 10397 if (!aq_buf) 10398 return -ENOMEM; 10399 10400 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf; 10401 do { 10402 u16 num_reported, num_total; 10403 10404 ret = i40e_aq_get_switch_config(&pf->hw, sw_config, 10405 I40E_AQ_LARGE_BUF, 10406 &next_seid, NULL); 10407 if (ret) { 10408 dev_info(&pf->pdev->dev, 10409 "get switch config failed err %s aq_err %s\n", 10410 i40e_stat_str(&pf->hw, ret), 10411 i40e_aq_str(&pf->hw, 10412 pf->hw.aq.asq_last_status)); 10413 kfree(aq_buf); 10414 return -ENOENT; 10415 } 10416 10417 num_reported = le16_to_cpu(sw_config->header.num_reported); 10418 num_total = le16_to_cpu(sw_config->header.num_total); 10419 10420 if (printconfig) 10421 dev_info(&pf->pdev->dev, 10422 "header: %d reported %d total\n", 10423 num_reported, num_total); 10424 10425 for (i = 0; i < num_reported; i++) { 10426 struct i40e_aqc_switch_config_element_resp *ele = 10427 &sw_config->element[i]; 10428 10429 i40e_setup_pf_switch_element(pf, ele, num_reported, 10430 printconfig); 10431 } 10432 } while (next_seid != 0); 10433 10434 kfree(aq_buf); 10435 return ret; 10436 } 10437 10438 /** 10439 * i40e_setup_pf_switch - Setup the HW switch on startup or after reset 10440 * @pf: board private structure 10441 * @reinit: if the Main VSI needs to re-initialized. 10442 * 10443 * Returns 0 on success, negative value on failure 10444 **/ 10445 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit) 10446 { 10447 int ret; 10448 10449 /* find out what's out there already */ 10450 ret = i40e_fetch_switch_configuration(pf, false); 10451 if (ret) { 10452 dev_info(&pf->pdev->dev, 10453 "couldn't fetch switch config, err %s aq_err %s\n", 10454 i40e_stat_str(&pf->hw, ret), 10455 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 10456 return ret; 10457 } 10458 i40e_pf_reset_stats(pf); 10459 10460 /* first time setup */ 10461 if (pf->lan_vsi == I40E_NO_VSI || reinit) { 10462 struct i40e_vsi *vsi = NULL; 10463 u16 uplink_seid; 10464 10465 /* Set up the PF VSI associated with the PF's main VSI 10466 * that is already in the HW switch 10467 */ 10468 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb]) 10469 uplink_seid = pf->veb[pf->lan_veb]->seid; 10470 else 10471 uplink_seid = pf->mac_seid; 10472 if (pf->lan_vsi == I40E_NO_VSI) 10473 vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0); 10474 else if (reinit) 10475 vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]); 10476 if (!vsi) { 10477 dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n"); 10478 i40e_fdir_teardown(pf); 10479 return -EAGAIN; 10480 } 10481 } else { 10482 /* force a reset of TC and queue layout configurations */ 10483 u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc; 10484 10485 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0; 10486 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid; 10487 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc); 10488 } 10489 i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]); 10490 10491 i40e_fdir_sb_setup(pf); 10492 10493 /* Setup static PF queue filter control settings */ 10494 ret = i40e_setup_pf_filter_control(pf); 10495 if (ret) { 10496 dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n", 10497 ret); 10498 /* Failure here should not stop continuing other steps */ 10499 } 10500 10501 /* enable RSS in the HW, even for only one queue, as the stack can use 10502 * the hash 10503 */ 10504 if ((pf->flags & I40E_FLAG_RSS_ENABLED)) 10505 i40e_pf_config_rss(pf); 10506 10507 /* fill in link information and enable LSE reporting */ 10508 i40e_update_link_info(&pf->hw); 10509 i40e_link_event(pf); 10510 10511 /* Initialize user-specific link properties */ 10512 pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info & 10513 I40E_AQ_AN_COMPLETED) ? true : false); 10514 10515 i40e_ptp_init(pf); 10516 10517 return ret; 10518 } 10519 10520 /** 10521 * i40e_determine_queue_usage - Work out queue distribution 10522 * @pf: board private structure 10523 **/ 10524 static void i40e_determine_queue_usage(struct i40e_pf *pf) 10525 { 10526 int queues_left; 10527 10528 pf->num_lan_qps = 0; 10529 #ifdef I40E_FCOE 10530 pf->num_fcoe_qps = 0; 10531 #endif 10532 10533 /* Find the max queues to be put into basic use. We'll always be 10534 * using TC0, whether or not DCB is running, and TC0 will get the 10535 * big RSS set. 10536 */ 10537 queues_left = pf->hw.func_caps.num_tx_qp; 10538 10539 if ((queues_left == 1) || 10540 !(pf->flags & I40E_FLAG_MSIX_ENABLED)) { 10541 /* one qp for PF, no queues for anything else */ 10542 queues_left = 0; 10543 pf->alloc_rss_size = pf->num_lan_qps = 1; 10544 10545 /* make sure all the fancies are disabled */ 10546 pf->flags &= ~(I40E_FLAG_RSS_ENABLED | 10547 I40E_FLAG_IWARP_ENABLED | 10548 #ifdef I40E_FCOE 10549 I40E_FLAG_FCOE_ENABLED | 10550 #endif 10551 I40E_FLAG_FD_SB_ENABLED | 10552 I40E_FLAG_FD_ATR_ENABLED | 10553 I40E_FLAG_DCB_CAPABLE | 10554 I40E_FLAG_SRIOV_ENABLED | 10555 I40E_FLAG_VMDQ_ENABLED); 10556 } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED | 10557 I40E_FLAG_FD_SB_ENABLED | 10558 I40E_FLAG_FD_ATR_ENABLED | 10559 I40E_FLAG_DCB_CAPABLE))) { 10560 /* one qp for PF */ 10561 pf->alloc_rss_size = pf->num_lan_qps = 1; 10562 queues_left -= pf->num_lan_qps; 10563 10564 pf->flags &= ~(I40E_FLAG_RSS_ENABLED | 10565 I40E_FLAG_IWARP_ENABLED | 10566 #ifdef I40E_FCOE 10567 I40E_FLAG_FCOE_ENABLED | 10568 #endif 10569 I40E_FLAG_FD_SB_ENABLED | 10570 I40E_FLAG_FD_ATR_ENABLED | 10571 I40E_FLAG_DCB_ENABLED | 10572 I40E_FLAG_VMDQ_ENABLED); 10573 } else { 10574 /* Not enough queues for all TCs */ 10575 if ((pf->flags & I40E_FLAG_DCB_CAPABLE) && 10576 (queues_left < I40E_MAX_TRAFFIC_CLASS)) { 10577 pf->flags &= ~I40E_FLAG_DCB_CAPABLE; 10578 dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n"); 10579 } 10580 pf->num_lan_qps = max_t(int, pf->rss_size_max, 10581 num_online_cpus()); 10582 pf->num_lan_qps = min_t(int, pf->num_lan_qps, 10583 pf->hw.func_caps.num_tx_qp); 10584 10585 queues_left -= pf->num_lan_qps; 10586 } 10587 10588 #ifdef I40E_FCOE 10589 if (pf->flags & I40E_FLAG_FCOE_ENABLED) { 10590 if (I40E_DEFAULT_FCOE <= queues_left) { 10591 pf->num_fcoe_qps = I40E_DEFAULT_FCOE; 10592 } else if (I40E_MINIMUM_FCOE <= queues_left) { 10593 pf->num_fcoe_qps = I40E_MINIMUM_FCOE; 10594 } else { 10595 pf->num_fcoe_qps = 0; 10596 pf->flags &= ~I40E_FLAG_FCOE_ENABLED; 10597 dev_info(&pf->pdev->dev, "not enough queues for FCoE. FCoE feature will be disabled\n"); 10598 } 10599 10600 queues_left -= pf->num_fcoe_qps; 10601 } 10602 10603 #endif 10604 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { 10605 if (queues_left > 1) { 10606 queues_left -= 1; /* save 1 queue for FD */ 10607 } else { 10608 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; 10609 dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n"); 10610 } 10611 } 10612 10613 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && 10614 pf->num_vf_qps && pf->num_req_vfs && queues_left) { 10615 pf->num_req_vfs = min_t(int, pf->num_req_vfs, 10616 (queues_left / pf->num_vf_qps)); 10617 queues_left -= (pf->num_req_vfs * pf->num_vf_qps); 10618 } 10619 10620 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) && 10621 pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) { 10622 pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis, 10623 (queues_left / pf->num_vmdq_qps)); 10624 queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps); 10625 } 10626 10627 pf->queues_left = queues_left; 10628 dev_dbg(&pf->pdev->dev, 10629 "qs_avail=%d FD SB=%d lan_qs=%d lan_tc0=%d vf=%d*%d vmdq=%d*%d, remaining=%d\n", 10630 pf->hw.func_caps.num_tx_qp, 10631 !!(pf->flags & I40E_FLAG_FD_SB_ENABLED), 10632 pf->num_lan_qps, pf->alloc_rss_size, pf->num_req_vfs, 10633 pf->num_vf_qps, pf->num_vmdq_vsis, pf->num_vmdq_qps, 10634 queues_left); 10635 #ifdef I40E_FCOE 10636 dev_dbg(&pf->pdev->dev, "fcoe queues = %d\n", pf->num_fcoe_qps); 10637 #endif 10638 } 10639 10640 /** 10641 * i40e_setup_pf_filter_control - Setup PF static filter control 10642 * @pf: PF to be setup 10643 * 10644 * i40e_setup_pf_filter_control sets up a PF's initial filter control 10645 * settings. If PE/FCoE are enabled then it will also set the per PF 10646 * based filter sizes required for them. It also enables Flow director, 10647 * ethertype and macvlan type filter settings for the pf. 10648 * 10649 * Returns 0 on success, negative on failure 10650 **/ 10651 static int i40e_setup_pf_filter_control(struct i40e_pf *pf) 10652 { 10653 struct i40e_filter_control_settings *settings = &pf->filter_settings; 10654 10655 settings->hash_lut_size = I40E_HASH_LUT_SIZE_128; 10656 10657 /* Flow Director is enabled */ 10658 if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)) 10659 settings->enable_fdir = true; 10660 10661 /* Ethtype and MACVLAN filters enabled for PF */ 10662 settings->enable_ethtype = true; 10663 settings->enable_macvlan = true; 10664 10665 if (i40e_set_filter_control(&pf->hw, settings)) 10666 return -ENOENT; 10667 10668 return 0; 10669 } 10670 10671 #define INFO_STRING_LEN 255 10672 #define REMAIN(__x) (INFO_STRING_LEN - (__x)) 10673 static void i40e_print_features(struct i40e_pf *pf) 10674 { 10675 struct i40e_hw *hw = &pf->hw; 10676 char *buf; 10677 int i; 10678 10679 buf = kmalloc(INFO_STRING_LEN, GFP_KERNEL); 10680 if (!buf) 10681 return; 10682 10683 i = snprintf(buf, INFO_STRING_LEN, "Features: PF-id[%d]", hw->pf_id); 10684 #ifdef CONFIG_PCI_IOV 10685 i += snprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs); 10686 #endif 10687 i += snprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d RX: %s", 10688 pf->hw.func_caps.num_vsis, 10689 pf->vsi[pf->lan_vsi]->num_queue_pairs, 10690 pf->flags & I40E_FLAG_RX_PS_ENABLED ? "PS" : "1BUF"); 10691 10692 if (pf->flags & I40E_FLAG_RSS_ENABLED) 10693 i += snprintf(&buf[i], REMAIN(i), " RSS"); 10694 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) 10695 i += snprintf(&buf[i], REMAIN(i), " FD_ATR"); 10696 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { 10697 i += snprintf(&buf[i], REMAIN(i), " FD_SB"); 10698 i += snprintf(&buf[i], REMAIN(i), " NTUPLE"); 10699 } 10700 if (pf->flags & I40E_FLAG_DCB_CAPABLE) 10701 i += snprintf(&buf[i], REMAIN(i), " DCB"); 10702 #if IS_ENABLED(CONFIG_VXLAN) 10703 i += snprintf(&buf[i], REMAIN(i), " VxLAN"); 10704 #endif 10705 #if IS_ENABLED(CONFIG_GENEVE) 10706 i += snprintf(&buf[i], REMAIN(i), " Geneve"); 10707 #endif 10708 if (pf->flags & I40E_FLAG_PTP) 10709 i += snprintf(&buf[i], REMAIN(i), " PTP"); 10710 #ifdef I40E_FCOE 10711 if (pf->flags & I40E_FLAG_FCOE_ENABLED) 10712 i += snprintf(&buf[i], REMAIN(i), " FCOE"); 10713 #endif 10714 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) 10715 i += snprintf(&buf[i], REMAIN(i), " VEB"); 10716 else 10717 i += snprintf(&buf[i], REMAIN(i), " VEPA"); 10718 10719 dev_info(&pf->pdev->dev, "%s\n", buf); 10720 kfree(buf); 10721 WARN_ON(i > INFO_STRING_LEN); 10722 } 10723 10724 /** 10725 * i40e_get_platform_mac_addr - get platform-specific MAC address 10726 * 10727 * @pdev: PCI device information struct 10728 * @pf: board private structure 10729 * 10730 * Look up the MAC address in Open Firmware on systems that support it, 10731 * and use IDPROM on SPARC if no OF address is found. On return, the 10732 * I40E_FLAG_PF_MAC will be wset in pf->flags if a platform-specific value 10733 * has been selected. 10734 **/ 10735 static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf) 10736 { 10737 pf->flags &= ~I40E_FLAG_PF_MAC; 10738 if (!eth_platform_get_mac_address(&pdev->dev, pf->hw.mac.addr)) 10739 pf->flags |= I40E_FLAG_PF_MAC; 10740 } 10741 10742 /** 10743 * i40e_probe - Device initialization routine 10744 * @pdev: PCI device information struct 10745 * @ent: entry in i40e_pci_tbl 10746 * 10747 * i40e_probe initializes a PF identified by a pci_dev structure. 10748 * The OS initialization, configuring of the PF private structure, 10749 * and a hardware reset occur. 10750 * 10751 * Returns 0 on success, negative on failure 10752 **/ 10753 static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 10754 { 10755 struct i40e_aq_get_phy_abilities_resp abilities; 10756 struct i40e_pf *pf; 10757 struct i40e_hw *hw; 10758 static u16 pfs_found; 10759 u16 wol_nvm_bits; 10760 u16 link_status; 10761 int err; 10762 u32 val; 10763 u32 i; 10764 u8 set_fc_aq_fail; 10765 10766 err = pci_enable_device_mem(pdev); 10767 if (err) 10768 return err; 10769 10770 /* set up for high or low dma */ 10771 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 10772 if (err) { 10773 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 10774 if (err) { 10775 dev_err(&pdev->dev, 10776 "DMA configuration failed: 0x%x\n", err); 10777 goto err_dma; 10778 } 10779 } 10780 10781 /* set up pci connections */ 10782 err = pci_request_selected_regions(pdev, pci_select_bars(pdev, 10783 IORESOURCE_MEM), i40e_driver_name); 10784 if (err) { 10785 dev_info(&pdev->dev, 10786 "pci_request_selected_regions failed %d\n", err); 10787 goto err_pci_reg; 10788 } 10789 10790 pci_enable_pcie_error_reporting(pdev); 10791 pci_set_master(pdev); 10792 10793 /* Now that we have a PCI connection, we need to do the 10794 * low level device setup. This is primarily setting up 10795 * the Admin Queue structures and then querying for the 10796 * device's current profile information. 10797 */ 10798 pf = kzalloc(sizeof(*pf), GFP_KERNEL); 10799 if (!pf) { 10800 err = -ENOMEM; 10801 goto err_pf_alloc; 10802 } 10803 pf->next_vsi = 0; 10804 pf->pdev = pdev; 10805 set_bit(__I40E_DOWN, &pf->state); 10806 10807 hw = &pf->hw; 10808 hw->back = pf; 10809 10810 pf->ioremap_len = min_t(int, pci_resource_len(pdev, 0), 10811 I40E_MAX_CSR_SPACE); 10812 10813 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pf->ioremap_len); 10814 if (!hw->hw_addr) { 10815 err = -EIO; 10816 dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n", 10817 (unsigned int)pci_resource_start(pdev, 0), 10818 pf->ioremap_len, err); 10819 goto err_ioremap; 10820 } 10821 hw->vendor_id = pdev->vendor; 10822 hw->device_id = pdev->device; 10823 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); 10824 hw->subsystem_vendor_id = pdev->subsystem_vendor; 10825 hw->subsystem_device_id = pdev->subsystem_device; 10826 hw->bus.device = PCI_SLOT(pdev->devfn); 10827 hw->bus.func = PCI_FUNC(pdev->devfn); 10828 pf->instance = pfs_found; 10829 10830 if (debug != -1) { 10831 pf->msg_enable = pf->hw.debug_mask; 10832 pf->msg_enable = debug; 10833 } 10834 10835 /* do a special CORER for clearing PXE mode once at init */ 10836 if (hw->revision_id == 0 && 10837 (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) { 10838 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK); 10839 i40e_flush(hw); 10840 msleep(200); 10841 pf->corer_count++; 10842 10843 i40e_clear_pxe_mode(hw); 10844 } 10845 10846 /* Reset here to make sure all is clean and to define PF 'n' */ 10847 i40e_clear_hw(hw); 10848 err = i40e_pf_reset(hw); 10849 if (err) { 10850 dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err); 10851 goto err_pf_reset; 10852 } 10853 pf->pfr_count++; 10854 10855 hw->aq.num_arq_entries = I40E_AQ_LEN; 10856 hw->aq.num_asq_entries = I40E_AQ_LEN; 10857 hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE; 10858 hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE; 10859 pf->adminq_work_limit = I40E_AQ_WORK_LIMIT; 10860 10861 snprintf(pf->int_name, sizeof(pf->int_name) - 1, 10862 "%s-%s:misc", 10863 dev_driver_string(&pf->pdev->dev), dev_name(&pdev->dev)); 10864 10865 err = i40e_init_shared_code(hw); 10866 if (err) { 10867 dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n", 10868 err); 10869 goto err_pf_reset; 10870 } 10871 10872 /* set up a default setting for link flow control */ 10873 pf->hw.fc.requested_mode = I40E_FC_NONE; 10874 10875 /* set up the locks for the AQ, do this only once in probe 10876 * and destroy them only once in remove 10877 */ 10878 mutex_init(&hw->aq.asq_mutex); 10879 mutex_init(&hw->aq.arq_mutex); 10880 10881 err = i40e_init_adminq(hw); 10882 if (err) { 10883 if (err == I40E_ERR_FIRMWARE_API_VERSION) 10884 dev_info(&pdev->dev, 10885 "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n"); 10886 else 10887 dev_info(&pdev->dev, 10888 "The driver for the device stopped because the device firmware failed to init. Try updating your NVM image.\n"); 10889 10890 goto err_pf_reset; 10891 } 10892 10893 /* provide nvm, fw, api versions */ 10894 dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s\n", 10895 hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build, 10896 hw->aq.api_maj_ver, hw->aq.api_min_ver, 10897 i40e_nvm_version_str(hw)); 10898 10899 if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && 10900 hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR) 10901 dev_info(&pdev->dev, 10902 "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n"); 10903 else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR || 10904 hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1)) 10905 dev_info(&pdev->dev, 10906 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n"); 10907 10908 i40e_verify_eeprom(pf); 10909 10910 /* Rev 0 hardware was never productized */ 10911 if (hw->revision_id < 1) 10912 dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n"); 10913 10914 i40e_clear_pxe_mode(hw); 10915 err = i40e_get_capabilities(pf); 10916 if (err) 10917 goto err_adminq_setup; 10918 10919 err = i40e_sw_init(pf); 10920 if (err) { 10921 dev_info(&pdev->dev, "sw_init failed: %d\n", err); 10922 goto err_sw_init; 10923 } 10924 10925 err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, 10926 hw->func_caps.num_rx_qp, 10927 pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num); 10928 if (err) { 10929 dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err); 10930 goto err_init_lan_hmc; 10931 } 10932 10933 err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY); 10934 if (err) { 10935 dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err); 10936 err = -ENOENT; 10937 goto err_configure_lan_hmc; 10938 } 10939 10940 /* Disable LLDP for NICs that have firmware versions lower than v4.3. 10941 * Ignore error return codes because if it was already disabled via 10942 * hardware settings this will fail 10943 */ 10944 if (pf->flags & I40E_FLAG_STOP_FW_LLDP) { 10945 dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n"); 10946 i40e_aq_stop_lldp(hw, true, NULL); 10947 } 10948 10949 i40e_get_mac_addr(hw, hw->mac.addr); 10950 /* allow a platform config to override the HW addr */ 10951 i40e_get_platform_mac_addr(pdev, pf); 10952 if (!is_valid_ether_addr(hw->mac.addr)) { 10953 dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr); 10954 err = -EIO; 10955 goto err_mac_addr; 10956 } 10957 dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr); 10958 ether_addr_copy(hw->mac.perm_addr, hw->mac.addr); 10959 i40e_get_port_mac_addr(hw, hw->mac.port_addr); 10960 if (is_valid_ether_addr(hw->mac.port_addr)) 10961 pf->flags |= I40E_FLAG_PORT_ID_VALID; 10962 #ifdef I40E_FCOE 10963 err = i40e_get_san_mac_addr(hw, hw->mac.san_addr); 10964 if (err) 10965 dev_info(&pdev->dev, 10966 "(non-fatal) SAN MAC retrieval failed: %d\n", err); 10967 if (!is_valid_ether_addr(hw->mac.san_addr)) { 10968 dev_warn(&pdev->dev, "invalid SAN MAC address %pM, falling back to LAN MAC\n", 10969 hw->mac.san_addr); 10970 ether_addr_copy(hw->mac.san_addr, hw->mac.addr); 10971 } 10972 dev_info(&pf->pdev->dev, "SAN MAC: %pM\n", hw->mac.san_addr); 10973 #endif /* I40E_FCOE */ 10974 10975 pci_set_drvdata(pdev, pf); 10976 pci_save_state(pdev); 10977 #ifdef CONFIG_I40E_DCB 10978 err = i40e_init_pf_dcb(pf); 10979 if (err) { 10980 dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err); 10981 pf->flags &= ~I40E_FLAG_DCB_CAPABLE; 10982 /* Continue without DCB enabled */ 10983 } 10984 #endif /* CONFIG_I40E_DCB */ 10985 10986 /* set up periodic task facility */ 10987 setup_timer(&pf->service_timer, i40e_service_timer, (unsigned long)pf); 10988 pf->service_timer_period = HZ; 10989 10990 INIT_WORK(&pf->service_task, i40e_service_task); 10991 clear_bit(__I40E_SERVICE_SCHED, &pf->state); 10992 pf->flags |= I40E_FLAG_NEED_LINK_UPDATE; 10993 10994 /* NVM bit on means WoL disabled for the port */ 10995 i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits); 10996 if (BIT (hw->port) & wol_nvm_bits || hw->partition_id != 1) 10997 pf->wol_en = false; 10998 else 10999 pf->wol_en = true; 11000 device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en); 11001 11002 /* set up the main switch operations */ 11003 i40e_determine_queue_usage(pf); 11004 err = i40e_init_interrupt_scheme(pf); 11005 if (err) 11006 goto err_switch_setup; 11007 11008 /* The number of VSIs reported by the FW is the minimum guaranteed 11009 * to us; HW supports far more and we share the remaining pool with 11010 * the other PFs. We allocate space for more than the guarantee with 11011 * the understanding that we might not get them all later. 11012 */ 11013 if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC) 11014 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC; 11015 else 11016 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis; 11017 11018 /* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */ 11019 pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *), 11020 GFP_KERNEL); 11021 if (!pf->vsi) { 11022 err = -ENOMEM; 11023 goto err_switch_setup; 11024 } 11025 11026 #ifdef CONFIG_PCI_IOV 11027 /* prep for VF support */ 11028 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && 11029 (pf->flags & I40E_FLAG_MSIX_ENABLED) && 11030 !test_bit(__I40E_BAD_EEPROM, &pf->state)) { 11031 if (pci_num_vf(pdev)) 11032 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; 11033 } 11034 #endif 11035 err = i40e_setup_pf_switch(pf, false); 11036 if (err) { 11037 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err); 11038 goto err_vsis; 11039 } 11040 11041 /* Make sure flow control is set according to current settings */ 11042 err = i40e_set_fc(hw, &set_fc_aq_fail, true); 11043 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_GET) 11044 dev_dbg(&pf->pdev->dev, 11045 "Set fc with err %s aq_err %s on get_phy_cap\n", 11046 i40e_stat_str(hw, err), 11047 i40e_aq_str(hw, hw->aq.asq_last_status)); 11048 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_SET) 11049 dev_dbg(&pf->pdev->dev, 11050 "Set fc with err %s aq_err %s on set_phy_config\n", 11051 i40e_stat_str(hw, err), 11052 i40e_aq_str(hw, hw->aq.asq_last_status)); 11053 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_UPDATE) 11054 dev_dbg(&pf->pdev->dev, 11055 "Set fc with err %s aq_err %s on get_link_info\n", 11056 i40e_stat_str(hw, err), 11057 i40e_aq_str(hw, hw->aq.asq_last_status)); 11058 11059 /* if FDIR VSI was set up, start it now */ 11060 for (i = 0; i < pf->num_alloc_vsi; i++) { 11061 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) { 11062 i40e_vsi_open(pf->vsi[i]); 11063 break; 11064 } 11065 } 11066 11067 /* The driver only wants link up/down and module qualification 11068 * reports from firmware. Note the negative logic. 11069 */ 11070 err = i40e_aq_set_phy_int_mask(&pf->hw, 11071 ~(I40E_AQ_EVENT_LINK_UPDOWN | 11072 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL); 11073 if (err) 11074 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n", 11075 i40e_stat_str(&pf->hw, err), 11076 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 11077 11078 /* Reconfigure hardware for allowing smaller MSS in the case 11079 * of TSO, so that we avoid the MDD being fired and causing 11080 * a reset in the case of small MSS+TSO. 11081 */ 11082 val = rd32(hw, I40E_REG_MSS); 11083 if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) { 11084 val &= ~I40E_REG_MSS_MIN_MASK; 11085 val |= I40E_64BYTE_MSS; 11086 wr32(hw, I40E_REG_MSS, val); 11087 } 11088 11089 if (pf->flags & I40E_FLAG_RESTART_AUTONEG) { 11090 msleep(75); 11091 err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); 11092 if (err) 11093 dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n", 11094 i40e_stat_str(&pf->hw, err), 11095 i40e_aq_str(&pf->hw, 11096 pf->hw.aq.asq_last_status)); 11097 } 11098 /* The main driver is (mostly) up and happy. We need to set this state 11099 * before setting up the misc vector or we get a race and the vector 11100 * ends up disabled forever. 11101 */ 11102 clear_bit(__I40E_DOWN, &pf->state); 11103 11104 /* In case of MSIX we are going to setup the misc vector right here 11105 * to handle admin queue events etc. In case of legacy and MSI 11106 * the misc functionality and queue processing is combined in 11107 * the same vector and that gets setup at open. 11108 */ 11109 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 11110 err = i40e_setup_misc_vector(pf); 11111 if (err) { 11112 dev_info(&pdev->dev, 11113 "setup of misc vector failed: %d\n", err); 11114 goto err_vsis; 11115 } 11116 } 11117 11118 #ifdef CONFIG_PCI_IOV 11119 /* prep for VF support */ 11120 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && 11121 (pf->flags & I40E_FLAG_MSIX_ENABLED) && 11122 !test_bit(__I40E_BAD_EEPROM, &pf->state)) { 11123 /* disable link interrupts for VFs */ 11124 val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM); 11125 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK; 11126 wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val); 11127 i40e_flush(hw); 11128 11129 if (pci_num_vf(pdev)) { 11130 dev_info(&pdev->dev, 11131 "Active VFs found, allocating resources.\n"); 11132 err = i40e_alloc_vfs(pf, pci_num_vf(pdev)); 11133 if (err) 11134 dev_info(&pdev->dev, 11135 "Error %d allocating resources for existing VFs\n", 11136 err); 11137 } 11138 } 11139 #endif /* CONFIG_PCI_IOV */ 11140 11141 if (pf->flags & I40E_FLAG_IWARP_ENABLED) { 11142 pf->iwarp_base_vector = i40e_get_lump(pf, pf->irq_pile, 11143 pf->num_iwarp_msix, 11144 I40E_IWARP_IRQ_PILE_ID); 11145 if (pf->iwarp_base_vector < 0) { 11146 dev_info(&pdev->dev, 11147 "failed to get tracking for %d vectors for IWARP err=%d\n", 11148 pf->num_iwarp_msix, pf->iwarp_base_vector); 11149 pf->flags &= ~I40E_FLAG_IWARP_ENABLED; 11150 } 11151 } 11152 11153 i40e_dbg_pf_init(pf); 11154 11155 /* tell the firmware that we're starting */ 11156 i40e_send_version(pf); 11157 11158 /* since everything's happy, start the service_task timer */ 11159 mod_timer(&pf->service_timer, 11160 round_jiffies(jiffies + pf->service_timer_period)); 11161 11162 /* add this PF to client device list and launch a client service task */ 11163 err = i40e_lan_add_device(pf); 11164 if (err) 11165 dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n", 11166 err); 11167 11168 #ifdef I40E_FCOE 11169 /* create FCoE interface */ 11170 i40e_fcoe_vsi_setup(pf); 11171 11172 #endif 11173 #define PCI_SPEED_SIZE 8 11174 #define PCI_WIDTH_SIZE 8 11175 /* Devices on the IOSF bus do not have this information 11176 * and will report PCI Gen 1 x 1 by default so don't bother 11177 * checking them. 11178 */ 11179 if (!(pf->flags & I40E_FLAG_NO_PCI_LINK_CHECK)) { 11180 char speed[PCI_SPEED_SIZE] = "Unknown"; 11181 char width[PCI_WIDTH_SIZE] = "Unknown"; 11182 11183 /* Get the negotiated link width and speed from PCI config 11184 * space 11185 */ 11186 pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA, 11187 &link_status); 11188 11189 i40e_set_pci_config_data(hw, link_status); 11190 11191 switch (hw->bus.speed) { 11192 case i40e_bus_speed_8000: 11193 strncpy(speed, "8.0", PCI_SPEED_SIZE); break; 11194 case i40e_bus_speed_5000: 11195 strncpy(speed, "5.0", PCI_SPEED_SIZE); break; 11196 case i40e_bus_speed_2500: 11197 strncpy(speed, "2.5", PCI_SPEED_SIZE); break; 11198 default: 11199 break; 11200 } 11201 switch (hw->bus.width) { 11202 case i40e_bus_width_pcie_x8: 11203 strncpy(width, "8", PCI_WIDTH_SIZE); break; 11204 case i40e_bus_width_pcie_x4: 11205 strncpy(width, "4", PCI_WIDTH_SIZE); break; 11206 case i40e_bus_width_pcie_x2: 11207 strncpy(width, "2", PCI_WIDTH_SIZE); break; 11208 case i40e_bus_width_pcie_x1: 11209 strncpy(width, "1", PCI_WIDTH_SIZE); break; 11210 default: 11211 break; 11212 } 11213 11214 dev_info(&pdev->dev, "PCI-Express: Speed %sGT/s Width x%s\n", 11215 speed, width); 11216 11217 if (hw->bus.width < i40e_bus_width_pcie_x8 || 11218 hw->bus.speed < i40e_bus_speed_8000) { 11219 dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n"); 11220 dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n"); 11221 } 11222 } 11223 11224 /* get the requested speeds from the fw */ 11225 err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL); 11226 if (err) 11227 dev_dbg(&pf->pdev->dev, "get requested speeds ret = %s last_status = %s\n", 11228 i40e_stat_str(&pf->hw, err), 11229 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 11230 pf->hw.phy.link_info.requested_speeds = abilities.link_speed; 11231 11232 /* get the supported phy types from the fw */ 11233 err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL); 11234 if (err) 11235 dev_dbg(&pf->pdev->dev, "get supported phy types ret = %s last_status = %s\n", 11236 i40e_stat_str(&pf->hw, err), 11237 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 11238 pf->hw.phy.phy_types = le32_to_cpu(abilities.phy_type); 11239 11240 /* Add a filter to drop all Flow control frames from any VSI from being 11241 * transmitted. By doing so we stop a malicious VF from sending out 11242 * PAUSE or PFC frames and potentially controlling traffic for other 11243 * PF/VF VSIs. 11244 * The FW can still send Flow control frames if enabled. 11245 */ 11246 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw, 11247 pf->main_vsi_seid); 11248 11249 if ((pf->hw.device_id == I40E_DEV_ID_10G_BASE_T) || 11250 (pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4)) 11251 pf->flags |= I40E_FLAG_HAVE_10GBASET_PHY; 11252 11253 /* print a string summarizing features */ 11254 i40e_print_features(pf); 11255 11256 return 0; 11257 11258 /* Unwind what we've done if something failed in the setup */ 11259 err_vsis: 11260 set_bit(__I40E_DOWN, &pf->state); 11261 i40e_clear_interrupt_scheme(pf); 11262 kfree(pf->vsi); 11263 err_switch_setup: 11264 i40e_reset_interrupt_capability(pf); 11265 del_timer_sync(&pf->service_timer); 11266 err_mac_addr: 11267 err_configure_lan_hmc: 11268 (void)i40e_shutdown_lan_hmc(hw); 11269 err_init_lan_hmc: 11270 kfree(pf->qp_pile); 11271 err_sw_init: 11272 err_adminq_setup: 11273 (void)i40e_shutdown_adminq(hw); 11274 err_pf_reset: 11275 iounmap(hw->hw_addr); 11276 err_ioremap: 11277 kfree(pf); 11278 err_pf_alloc: 11279 pci_disable_pcie_error_reporting(pdev); 11280 pci_release_selected_regions(pdev, 11281 pci_select_bars(pdev, IORESOURCE_MEM)); 11282 err_pci_reg: 11283 err_dma: 11284 pci_disable_device(pdev); 11285 return err; 11286 } 11287 11288 /** 11289 * i40e_remove - Device removal routine 11290 * @pdev: PCI device information struct 11291 * 11292 * i40e_remove is called by the PCI subsystem to alert the driver 11293 * that is should release a PCI device. This could be caused by a 11294 * Hot-Plug event, or because the driver is going to be removed from 11295 * memory. 11296 **/ 11297 static void i40e_remove(struct pci_dev *pdev) 11298 { 11299 struct i40e_pf *pf = pci_get_drvdata(pdev); 11300 struct i40e_hw *hw = &pf->hw; 11301 i40e_status ret_code; 11302 int i; 11303 11304 i40e_dbg_pf_exit(pf); 11305 11306 i40e_ptp_stop(pf); 11307 11308 /* Disable RSS in hw */ 11309 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0); 11310 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0); 11311 11312 /* no more scheduling of any task */ 11313 set_bit(__I40E_SUSPENDED, &pf->state); 11314 set_bit(__I40E_DOWN, &pf->state); 11315 del_timer_sync(&pf->service_timer); 11316 cancel_work_sync(&pf->service_task); 11317 11318 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) { 11319 i40e_free_vfs(pf); 11320 pf->flags &= ~I40E_FLAG_SRIOV_ENABLED; 11321 } 11322 11323 i40e_fdir_teardown(pf); 11324 11325 /* If there is a switch structure or any orphans, remove them. 11326 * This will leave only the PF's VSI remaining. 11327 */ 11328 for (i = 0; i < I40E_MAX_VEB; i++) { 11329 if (!pf->veb[i]) 11330 continue; 11331 11332 if (pf->veb[i]->uplink_seid == pf->mac_seid || 11333 pf->veb[i]->uplink_seid == 0) 11334 i40e_switch_branch_release(pf->veb[i]); 11335 } 11336 11337 /* Now we can shutdown the PF's VSI, just before we kill 11338 * adminq and hmc. 11339 */ 11340 if (pf->vsi[pf->lan_vsi]) 11341 i40e_vsi_release(pf->vsi[pf->lan_vsi]); 11342 11343 /* remove attached clients */ 11344 ret_code = i40e_lan_del_device(pf); 11345 if (ret_code) { 11346 dev_warn(&pdev->dev, "Failed to delete client device: %d\n", 11347 ret_code); 11348 } 11349 11350 /* shutdown and destroy the HMC */ 11351 if (hw->hmc.hmc_obj) { 11352 ret_code = i40e_shutdown_lan_hmc(hw); 11353 if (ret_code) 11354 dev_warn(&pdev->dev, 11355 "Failed to destroy the HMC resources: %d\n", 11356 ret_code); 11357 } 11358 11359 /* shutdown the adminq */ 11360 ret_code = i40e_shutdown_adminq(hw); 11361 if (ret_code) 11362 dev_warn(&pdev->dev, 11363 "Failed to destroy the Admin Queue resources: %d\n", 11364 ret_code); 11365 11366 /* destroy the locks only once, here */ 11367 mutex_destroy(&hw->aq.arq_mutex); 11368 mutex_destroy(&hw->aq.asq_mutex); 11369 11370 /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */ 11371 i40e_clear_interrupt_scheme(pf); 11372 for (i = 0; i < pf->num_alloc_vsi; i++) { 11373 if (pf->vsi[i]) { 11374 i40e_vsi_clear_rings(pf->vsi[i]); 11375 i40e_vsi_clear(pf->vsi[i]); 11376 pf->vsi[i] = NULL; 11377 } 11378 } 11379 11380 for (i = 0; i < I40E_MAX_VEB; i++) { 11381 kfree(pf->veb[i]); 11382 pf->veb[i] = NULL; 11383 } 11384 11385 kfree(pf->qp_pile); 11386 kfree(pf->vsi); 11387 11388 iounmap(hw->hw_addr); 11389 kfree(pf); 11390 pci_release_selected_regions(pdev, 11391 pci_select_bars(pdev, IORESOURCE_MEM)); 11392 11393 pci_disable_pcie_error_reporting(pdev); 11394 pci_disable_device(pdev); 11395 } 11396 11397 /** 11398 * i40e_pci_error_detected - warning that something funky happened in PCI land 11399 * @pdev: PCI device information struct 11400 * 11401 * Called to warn that something happened and the error handling steps 11402 * are in progress. Allows the driver to quiesce things, be ready for 11403 * remediation. 11404 **/ 11405 static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev, 11406 enum pci_channel_state error) 11407 { 11408 struct i40e_pf *pf = pci_get_drvdata(pdev); 11409 11410 dev_info(&pdev->dev, "%s: error %d\n", __func__, error); 11411 11412 /* shutdown all operations */ 11413 if (!test_bit(__I40E_SUSPENDED, &pf->state)) { 11414 rtnl_lock(); 11415 i40e_prep_for_reset(pf); 11416 rtnl_unlock(); 11417 } 11418 11419 /* Request a slot reset */ 11420 return PCI_ERS_RESULT_NEED_RESET; 11421 } 11422 11423 /** 11424 * i40e_pci_error_slot_reset - a PCI slot reset just happened 11425 * @pdev: PCI device information struct 11426 * 11427 * Called to find if the driver can work with the device now that 11428 * the pci slot has been reset. If a basic connection seems good 11429 * (registers are readable and have sane content) then return a 11430 * happy little PCI_ERS_RESULT_xxx. 11431 **/ 11432 static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev) 11433 { 11434 struct i40e_pf *pf = pci_get_drvdata(pdev); 11435 pci_ers_result_t result; 11436 int err; 11437 u32 reg; 11438 11439 dev_dbg(&pdev->dev, "%s\n", __func__); 11440 if (pci_enable_device_mem(pdev)) { 11441 dev_info(&pdev->dev, 11442 "Cannot re-enable PCI device after reset.\n"); 11443 result = PCI_ERS_RESULT_DISCONNECT; 11444 } else { 11445 pci_set_master(pdev); 11446 pci_restore_state(pdev); 11447 pci_save_state(pdev); 11448 pci_wake_from_d3(pdev, false); 11449 11450 reg = rd32(&pf->hw, I40E_GLGEN_RTRIG); 11451 if (reg == 0) 11452 result = PCI_ERS_RESULT_RECOVERED; 11453 else 11454 result = PCI_ERS_RESULT_DISCONNECT; 11455 } 11456 11457 err = pci_cleanup_aer_uncorrect_error_status(pdev); 11458 if (err) { 11459 dev_info(&pdev->dev, 11460 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n", 11461 err); 11462 /* non-fatal, continue */ 11463 } 11464 11465 return result; 11466 } 11467 11468 /** 11469 * i40e_pci_error_resume - restart operations after PCI error recovery 11470 * @pdev: PCI device information struct 11471 * 11472 * Called to allow the driver to bring things back up after PCI error 11473 * and/or reset recovery has finished. 11474 **/ 11475 static void i40e_pci_error_resume(struct pci_dev *pdev) 11476 { 11477 struct i40e_pf *pf = pci_get_drvdata(pdev); 11478 11479 dev_dbg(&pdev->dev, "%s\n", __func__); 11480 if (test_bit(__I40E_SUSPENDED, &pf->state)) 11481 return; 11482 11483 rtnl_lock(); 11484 i40e_handle_reset_warning(pf); 11485 rtnl_unlock(); 11486 } 11487 11488 /** 11489 * i40e_shutdown - PCI callback for shutting down 11490 * @pdev: PCI device information struct 11491 **/ 11492 static void i40e_shutdown(struct pci_dev *pdev) 11493 { 11494 struct i40e_pf *pf = pci_get_drvdata(pdev); 11495 struct i40e_hw *hw = &pf->hw; 11496 11497 set_bit(__I40E_SUSPENDED, &pf->state); 11498 set_bit(__I40E_DOWN, &pf->state); 11499 rtnl_lock(); 11500 i40e_prep_for_reset(pf); 11501 rtnl_unlock(); 11502 11503 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); 11504 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); 11505 11506 del_timer_sync(&pf->service_timer); 11507 cancel_work_sync(&pf->service_task); 11508 i40e_fdir_teardown(pf); 11509 11510 rtnl_lock(); 11511 i40e_prep_for_reset(pf); 11512 rtnl_unlock(); 11513 11514 wr32(hw, I40E_PFPM_APM, 11515 (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); 11516 wr32(hw, I40E_PFPM_WUFC, 11517 (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); 11518 11519 i40e_clear_interrupt_scheme(pf); 11520 11521 if (system_state == SYSTEM_POWER_OFF) { 11522 pci_wake_from_d3(pdev, pf->wol_en); 11523 pci_set_power_state(pdev, PCI_D3hot); 11524 } 11525 } 11526 11527 #ifdef CONFIG_PM 11528 /** 11529 * i40e_suspend - PCI callback for moving to D3 11530 * @pdev: PCI device information struct 11531 **/ 11532 static int i40e_suspend(struct pci_dev *pdev, pm_message_t state) 11533 { 11534 struct i40e_pf *pf = pci_get_drvdata(pdev); 11535 struct i40e_hw *hw = &pf->hw; 11536 11537 set_bit(__I40E_SUSPENDED, &pf->state); 11538 set_bit(__I40E_DOWN, &pf->state); 11539 11540 rtnl_lock(); 11541 i40e_prep_for_reset(pf); 11542 rtnl_unlock(); 11543 11544 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); 11545 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); 11546 11547 pci_wake_from_d3(pdev, pf->wol_en); 11548 pci_set_power_state(pdev, PCI_D3hot); 11549 11550 return 0; 11551 } 11552 11553 /** 11554 * i40e_resume - PCI callback for waking up from D3 11555 * @pdev: PCI device information struct 11556 **/ 11557 static int i40e_resume(struct pci_dev *pdev) 11558 { 11559 struct i40e_pf *pf = pci_get_drvdata(pdev); 11560 u32 err; 11561 11562 pci_set_power_state(pdev, PCI_D0); 11563 pci_restore_state(pdev); 11564 /* pci_restore_state() clears dev->state_saves, so 11565 * call pci_save_state() again to restore it. 11566 */ 11567 pci_save_state(pdev); 11568 11569 err = pci_enable_device_mem(pdev); 11570 if (err) { 11571 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n"); 11572 return err; 11573 } 11574 pci_set_master(pdev); 11575 11576 /* no wakeup events while running */ 11577 pci_wake_from_d3(pdev, false); 11578 11579 /* handling the reset will rebuild the device state */ 11580 if (test_and_clear_bit(__I40E_SUSPENDED, &pf->state)) { 11581 clear_bit(__I40E_DOWN, &pf->state); 11582 rtnl_lock(); 11583 i40e_reset_and_rebuild(pf, false); 11584 rtnl_unlock(); 11585 } 11586 11587 return 0; 11588 } 11589 11590 #endif 11591 static const struct pci_error_handlers i40e_err_handler = { 11592 .error_detected = i40e_pci_error_detected, 11593 .slot_reset = i40e_pci_error_slot_reset, 11594 .resume = i40e_pci_error_resume, 11595 }; 11596 11597 static struct pci_driver i40e_driver = { 11598 .name = i40e_driver_name, 11599 .id_table = i40e_pci_tbl, 11600 .probe = i40e_probe, 11601 .remove = i40e_remove, 11602 #ifdef CONFIG_PM 11603 .suspend = i40e_suspend, 11604 .resume = i40e_resume, 11605 #endif 11606 .shutdown = i40e_shutdown, 11607 .err_handler = &i40e_err_handler, 11608 .sriov_configure = i40e_pci_sriov_configure, 11609 }; 11610 11611 /** 11612 * i40e_init_module - Driver registration routine 11613 * 11614 * i40e_init_module is the first routine called when the driver is 11615 * loaded. All it does is register with the PCI subsystem. 11616 **/ 11617 static int __init i40e_init_module(void) 11618 { 11619 pr_info("%s: %s - version %s\n", i40e_driver_name, 11620 i40e_driver_string, i40e_driver_version_str); 11621 pr_info("%s: %s\n", i40e_driver_name, i40e_copyright); 11622 11623 /* we will see if single thread per module is enough for now, 11624 * it can't be any worse than using the system workqueue which 11625 * was already single threaded 11626 */ 11627 i40e_wq = create_singlethread_workqueue(i40e_driver_name); 11628 if (!i40e_wq) { 11629 pr_err("%s: Failed to create workqueue\n", i40e_driver_name); 11630 return -ENOMEM; 11631 } 11632 11633 i40e_dbg_init(); 11634 return pci_register_driver(&i40e_driver); 11635 } 11636 module_init(i40e_init_module); 11637 11638 /** 11639 * i40e_exit_module - Driver exit cleanup routine 11640 * 11641 * i40e_exit_module is called just before the driver is removed 11642 * from memory. 11643 **/ 11644 static void __exit i40e_exit_module(void) 11645 { 11646 pci_unregister_driver(&i40e_driver); 11647 destroy_workqueue(i40e_wq); 11648 i40e_dbg_exit(); 11649 } 11650 module_exit(i40e_exit_module); 11651