1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2013 - 2018 Intel Corporation. */ 3 4 #include <linux/net/intel/libie/rx.h> 5 6 #include "iavf.h" 7 #include "iavf_prototype.h" 8 /* All iavf tracepoints are defined by the include below, which must 9 * be included exactly once across the whole kernel with 10 * CREATE_TRACE_POINTS defined 11 */ 12 #define CREATE_TRACE_POINTS 13 #include "iavf_trace.h" 14 15 static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter); 16 static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter); 17 static int iavf_close(struct net_device *netdev); 18 static void iavf_init_get_resources(struct iavf_adapter *adapter); 19 static int iavf_check_reset_complete(struct iavf_hw *hw); 20 21 char iavf_driver_name[] = "iavf"; 22 static const char iavf_driver_string[] = 23 "Intel(R) Ethernet Adaptive Virtual Function Network Driver"; 24 25 static const char iavf_copyright[] = 26 "Copyright (c) 2013 - 2018 Intel Corporation."; 27 28 /* iavf_pci_tbl - PCI Device ID Table 29 * 30 * Wildcard entries (PCI_ANY_ID) should come last 31 * Last entry must be all 0s 32 * 33 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 34 * Class, Class Mask, private data (not used) } 35 */ 36 static const struct pci_device_id iavf_pci_tbl[] = { 37 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF), 0}, 38 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF_HV), 0}, 39 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_X722_VF), 0}, 40 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_ADAPTIVE_VF), 0}, 41 /* required last entry */ 42 {0, } 43 }; 44 45 MODULE_DEVICE_TABLE(pci, iavf_pci_tbl); 46 47 MODULE_ALIAS("i40evf"); 48 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 49 MODULE_DESCRIPTION("Intel(R) Ethernet Adaptive Virtual Function Network Driver"); 50 MODULE_IMPORT_NS(LIBETH); 51 MODULE_IMPORT_NS(LIBIE); 52 MODULE_LICENSE("GPL v2"); 53 54 static const struct net_device_ops iavf_netdev_ops; 55 56 int iavf_status_to_errno(enum iavf_status status) 57 { 58 switch (status) { 59 case IAVF_SUCCESS: 60 return 0; 61 case IAVF_ERR_PARAM: 62 case IAVF_ERR_MAC_TYPE: 63 case IAVF_ERR_INVALID_MAC_ADDR: 64 case IAVF_ERR_INVALID_LINK_SETTINGS: 65 case IAVF_ERR_INVALID_PD_ID: 66 case IAVF_ERR_INVALID_QP_ID: 67 case IAVF_ERR_INVALID_CQ_ID: 68 case IAVF_ERR_INVALID_CEQ_ID: 69 case IAVF_ERR_INVALID_AEQ_ID: 70 case IAVF_ERR_INVALID_SIZE: 71 case IAVF_ERR_INVALID_ARP_INDEX: 72 case IAVF_ERR_INVALID_FPM_FUNC_ID: 73 case IAVF_ERR_QP_INVALID_MSG_SIZE: 74 case IAVF_ERR_INVALID_FRAG_COUNT: 75 case IAVF_ERR_INVALID_ALIGNMENT: 76 case IAVF_ERR_INVALID_PUSH_PAGE_INDEX: 77 case IAVF_ERR_INVALID_IMM_DATA_SIZE: 78 case IAVF_ERR_INVALID_VF_ID: 79 case IAVF_ERR_INVALID_HMCFN_ID: 80 case IAVF_ERR_INVALID_PBLE_INDEX: 81 case IAVF_ERR_INVALID_SD_INDEX: 82 case IAVF_ERR_INVALID_PAGE_DESC_INDEX: 83 case IAVF_ERR_INVALID_SD_TYPE: 84 case IAVF_ERR_INVALID_HMC_OBJ_INDEX: 85 case IAVF_ERR_INVALID_HMC_OBJ_COUNT: 86 case IAVF_ERR_INVALID_SRQ_ARM_LIMIT: 87 return -EINVAL; 88 case IAVF_ERR_NVM: 89 case IAVF_ERR_NVM_CHECKSUM: 90 case IAVF_ERR_PHY: 91 case IAVF_ERR_CONFIG: 92 case IAVF_ERR_UNKNOWN_PHY: 93 case IAVF_ERR_LINK_SETUP: 94 case IAVF_ERR_ADAPTER_STOPPED: 95 case IAVF_ERR_PRIMARY_REQUESTS_PENDING: 96 case IAVF_ERR_AUTONEG_NOT_COMPLETE: 97 case IAVF_ERR_RESET_FAILED: 98 case IAVF_ERR_BAD_PTR: 99 case IAVF_ERR_SWFW_SYNC: 100 case IAVF_ERR_QP_TOOMANY_WRS_POSTED: 101 case IAVF_ERR_QUEUE_EMPTY: 102 case IAVF_ERR_FLUSHED_QUEUE: 103 case IAVF_ERR_OPCODE_MISMATCH: 104 case IAVF_ERR_CQP_COMPL_ERROR: 105 case IAVF_ERR_BACKING_PAGE_ERROR: 106 case IAVF_ERR_NO_PBLCHUNKS_AVAILABLE: 107 case IAVF_ERR_MEMCPY_FAILED: 108 case IAVF_ERR_SRQ_ENABLED: 109 case IAVF_ERR_ADMIN_QUEUE_ERROR: 110 case IAVF_ERR_ADMIN_QUEUE_FULL: 111 case IAVF_ERR_BAD_RDMA_CQE: 112 case IAVF_ERR_NVM_BLANK_MODE: 113 case IAVF_ERR_PE_DOORBELL_NOT_ENABLED: 114 case IAVF_ERR_DIAG_TEST_FAILED: 115 case IAVF_ERR_FIRMWARE_API_VERSION: 116 case IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR: 117 return -EIO; 118 case IAVF_ERR_DEVICE_NOT_SUPPORTED: 119 return -ENODEV; 120 case IAVF_ERR_NO_AVAILABLE_VSI: 121 case IAVF_ERR_RING_FULL: 122 return -ENOSPC; 123 case IAVF_ERR_NO_MEMORY: 124 return -ENOMEM; 125 case IAVF_ERR_TIMEOUT: 126 case IAVF_ERR_ADMIN_QUEUE_TIMEOUT: 127 return -ETIMEDOUT; 128 case IAVF_ERR_NOT_IMPLEMENTED: 129 case IAVF_NOT_SUPPORTED: 130 return -EOPNOTSUPP; 131 case IAVF_ERR_ADMIN_QUEUE_NO_WORK: 132 return -EALREADY; 133 case IAVF_ERR_NOT_READY: 134 return -EBUSY; 135 case IAVF_ERR_BUF_TOO_SHORT: 136 return -EMSGSIZE; 137 } 138 139 return -EIO; 140 } 141 142 int virtchnl_status_to_errno(enum virtchnl_status_code v_status) 143 { 144 switch (v_status) { 145 case VIRTCHNL_STATUS_SUCCESS: 146 return 0; 147 case VIRTCHNL_STATUS_ERR_PARAM: 148 case VIRTCHNL_STATUS_ERR_INVALID_VF_ID: 149 return -EINVAL; 150 case VIRTCHNL_STATUS_ERR_NO_MEMORY: 151 return -ENOMEM; 152 case VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH: 153 case VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR: 154 case VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR: 155 return -EIO; 156 case VIRTCHNL_STATUS_ERR_NOT_SUPPORTED: 157 return -EOPNOTSUPP; 158 } 159 160 return -EIO; 161 } 162 163 /** 164 * iavf_pdev_to_adapter - go from pci_dev to adapter 165 * @pdev: pci_dev pointer 166 */ 167 static struct iavf_adapter *iavf_pdev_to_adapter(struct pci_dev *pdev) 168 { 169 return netdev_priv(pci_get_drvdata(pdev)); 170 } 171 172 /** 173 * iavf_is_reset_in_progress - Check if a reset is in progress 174 * @adapter: board private structure 175 */ 176 static bool iavf_is_reset_in_progress(struct iavf_adapter *adapter) 177 { 178 if (adapter->state == __IAVF_RESETTING || 179 adapter->flags & (IAVF_FLAG_RESET_PENDING | 180 IAVF_FLAG_RESET_NEEDED)) 181 return true; 182 183 return false; 184 } 185 186 /** 187 * iavf_wait_for_reset - Wait for reset to finish. 188 * @adapter: board private structure 189 * 190 * Returns 0 if reset finished successfully, negative on timeout or interrupt. 191 */ 192 int iavf_wait_for_reset(struct iavf_adapter *adapter) 193 { 194 int ret = wait_event_interruptible_timeout(adapter->reset_waitqueue, 195 !iavf_is_reset_in_progress(adapter), 196 msecs_to_jiffies(5000)); 197 198 /* If ret < 0 then it means wait was interrupted. 199 * If ret == 0 then it means we got a timeout while waiting 200 * for reset to finish. 201 * If ret > 0 it means reset has finished. 202 */ 203 if (ret > 0) 204 return 0; 205 else if (ret < 0) 206 return -EINTR; 207 else 208 return -EBUSY; 209 } 210 211 /** 212 * iavf_allocate_dma_mem_d - OS specific memory alloc for shared code 213 * @hw: pointer to the HW structure 214 * @mem: ptr to mem struct to fill out 215 * @size: size of memory requested 216 * @alignment: what to align the allocation to 217 **/ 218 enum iavf_status iavf_allocate_dma_mem_d(struct iavf_hw *hw, 219 struct iavf_dma_mem *mem, 220 u64 size, u32 alignment) 221 { 222 struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back; 223 224 if (!mem) 225 return IAVF_ERR_PARAM; 226 227 mem->size = ALIGN(size, alignment); 228 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, 229 (dma_addr_t *)&mem->pa, GFP_KERNEL); 230 if (mem->va) 231 return 0; 232 else 233 return IAVF_ERR_NO_MEMORY; 234 } 235 236 /** 237 * iavf_free_dma_mem - wrapper for DMA memory freeing 238 * @hw: pointer to the HW structure 239 * @mem: ptr to mem struct to free 240 **/ 241 enum iavf_status iavf_free_dma_mem(struct iavf_hw *hw, struct iavf_dma_mem *mem) 242 { 243 struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back; 244 245 if (!mem || !mem->va) 246 return IAVF_ERR_PARAM; 247 dma_free_coherent(&adapter->pdev->dev, mem->size, 248 mem->va, (dma_addr_t)mem->pa); 249 return 0; 250 } 251 252 /** 253 * iavf_allocate_virt_mem - virt memory alloc wrapper 254 * @hw: pointer to the HW structure 255 * @mem: ptr to mem struct to fill out 256 * @size: size of memory requested 257 **/ 258 enum iavf_status iavf_allocate_virt_mem(struct iavf_hw *hw, 259 struct iavf_virt_mem *mem, u32 size) 260 { 261 if (!mem) 262 return IAVF_ERR_PARAM; 263 264 mem->size = size; 265 mem->va = kzalloc(size, GFP_KERNEL); 266 267 if (mem->va) 268 return 0; 269 else 270 return IAVF_ERR_NO_MEMORY; 271 } 272 273 /** 274 * iavf_free_virt_mem - virt memory free wrapper 275 * @hw: pointer to the HW structure 276 * @mem: ptr to mem struct to free 277 **/ 278 void iavf_free_virt_mem(struct iavf_hw *hw, struct iavf_virt_mem *mem) 279 { 280 kfree(mem->va); 281 } 282 283 /** 284 * iavf_schedule_reset - Set the flags and schedule a reset event 285 * @adapter: board private structure 286 * @flags: IAVF_FLAG_RESET_PENDING or IAVF_FLAG_RESET_NEEDED 287 **/ 288 void iavf_schedule_reset(struct iavf_adapter *adapter, u64 flags) 289 { 290 if (!test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section) && 291 !(adapter->flags & 292 (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED))) { 293 adapter->flags |= flags; 294 queue_work(adapter->wq, &adapter->reset_task); 295 } 296 } 297 298 /** 299 * iavf_schedule_aq_request - Set the flags and schedule aq request 300 * @adapter: board private structure 301 * @flags: requested aq flags 302 **/ 303 void iavf_schedule_aq_request(struct iavf_adapter *adapter, u64 flags) 304 { 305 adapter->aq_required |= flags; 306 mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0); 307 } 308 309 /** 310 * iavf_tx_timeout - Respond to a Tx Hang 311 * @netdev: network interface device structure 312 * @txqueue: queue number that is timing out 313 **/ 314 static void iavf_tx_timeout(struct net_device *netdev, unsigned int txqueue) 315 { 316 struct iavf_adapter *adapter = netdev_priv(netdev); 317 318 adapter->tx_timeout_count++; 319 iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED); 320 } 321 322 /** 323 * iavf_misc_irq_disable - Mask off interrupt generation on the NIC 324 * @adapter: board private structure 325 **/ 326 static void iavf_misc_irq_disable(struct iavf_adapter *adapter) 327 { 328 struct iavf_hw *hw = &adapter->hw; 329 330 if (!adapter->msix_entries) 331 return; 332 333 wr32(hw, IAVF_VFINT_DYN_CTL01, 0); 334 335 iavf_flush(hw); 336 337 synchronize_irq(adapter->msix_entries[0].vector); 338 } 339 340 /** 341 * iavf_misc_irq_enable - Enable default interrupt generation settings 342 * @adapter: board private structure 343 **/ 344 static void iavf_misc_irq_enable(struct iavf_adapter *adapter) 345 { 346 struct iavf_hw *hw = &adapter->hw; 347 348 wr32(hw, IAVF_VFINT_DYN_CTL01, IAVF_VFINT_DYN_CTL01_INTENA_MASK | 349 IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK); 350 wr32(hw, IAVF_VFINT_ICR0_ENA1, IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK); 351 352 iavf_flush(hw); 353 } 354 355 /** 356 * iavf_irq_disable - Mask off interrupt generation on the NIC 357 * @adapter: board private structure 358 **/ 359 static void iavf_irq_disable(struct iavf_adapter *adapter) 360 { 361 int i; 362 struct iavf_hw *hw = &adapter->hw; 363 364 if (!adapter->msix_entries) 365 return; 366 367 for (i = 1; i < adapter->num_msix_vectors; i++) { 368 wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1), 0); 369 synchronize_irq(adapter->msix_entries[i].vector); 370 } 371 iavf_flush(hw); 372 } 373 374 /** 375 * iavf_irq_enable_queues - Enable interrupt for all queues 376 * @adapter: board private structure 377 **/ 378 static void iavf_irq_enable_queues(struct iavf_adapter *adapter) 379 { 380 struct iavf_hw *hw = &adapter->hw; 381 int i; 382 383 for (i = 1; i < adapter->num_msix_vectors; i++) { 384 wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1), 385 IAVF_VFINT_DYN_CTLN1_INTENA_MASK | 386 IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK); 387 } 388 } 389 390 /** 391 * iavf_irq_enable - Enable default interrupt generation settings 392 * @adapter: board private structure 393 * @flush: boolean value whether to run rd32() 394 **/ 395 void iavf_irq_enable(struct iavf_adapter *adapter, bool flush) 396 { 397 struct iavf_hw *hw = &adapter->hw; 398 399 iavf_misc_irq_enable(adapter); 400 iavf_irq_enable_queues(adapter); 401 402 if (flush) 403 iavf_flush(hw); 404 } 405 406 /** 407 * iavf_msix_aq - Interrupt handler for vector 0 408 * @irq: interrupt number 409 * @data: pointer to netdev 410 **/ 411 static irqreturn_t iavf_msix_aq(int irq, void *data) 412 { 413 struct net_device *netdev = data; 414 struct iavf_adapter *adapter = netdev_priv(netdev); 415 struct iavf_hw *hw = &adapter->hw; 416 417 /* handle non-queue interrupts, these reads clear the registers */ 418 rd32(hw, IAVF_VFINT_ICR01); 419 rd32(hw, IAVF_VFINT_ICR0_ENA1); 420 421 if (adapter->state != __IAVF_REMOVE) 422 /* schedule work on the private workqueue */ 423 queue_work(adapter->wq, &adapter->adminq_task); 424 425 return IRQ_HANDLED; 426 } 427 428 /** 429 * iavf_msix_clean_rings - MSIX mode Interrupt Handler 430 * @irq: interrupt number 431 * @data: pointer to a q_vector 432 **/ 433 static irqreturn_t iavf_msix_clean_rings(int irq, void *data) 434 { 435 struct iavf_q_vector *q_vector = data; 436 437 if (!q_vector->tx.ring && !q_vector->rx.ring) 438 return IRQ_HANDLED; 439 440 napi_schedule_irqoff(&q_vector->napi); 441 442 return IRQ_HANDLED; 443 } 444 445 /** 446 * iavf_map_vector_to_rxq - associate irqs with rx queues 447 * @adapter: board private structure 448 * @v_idx: interrupt number 449 * @r_idx: queue number 450 **/ 451 static void 452 iavf_map_vector_to_rxq(struct iavf_adapter *adapter, int v_idx, int r_idx) 453 { 454 struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx]; 455 struct iavf_ring *rx_ring = &adapter->rx_rings[r_idx]; 456 struct iavf_hw *hw = &adapter->hw; 457 458 rx_ring->q_vector = q_vector; 459 rx_ring->next = q_vector->rx.ring; 460 rx_ring->vsi = &adapter->vsi; 461 q_vector->rx.ring = rx_ring; 462 q_vector->rx.count++; 463 q_vector->rx.next_update = jiffies + 1; 464 q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting); 465 q_vector->ring_mask |= BIT(r_idx); 466 wr32(hw, IAVF_VFINT_ITRN1(IAVF_RX_ITR, q_vector->reg_idx), 467 q_vector->rx.current_itr >> 1); 468 q_vector->rx.current_itr = q_vector->rx.target_itr; 469 } 470 471 /** 472 * iavf_map_vector_to_txq - associate irqs with tx queues 473 * @adapter: board private structure 474 * @v_idx: interrupt number 475 * @t_idx: queue number 476 **/ 477 static void 478 iavf_map_vector_to_txq(struct iavf_adapter *adapter, int v_idx, int t_idx) 479 { 480 struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx]; 481 struct iavf_ring *tx_ring = &adapter->tx_rings[t_idx]; 482 struct iavf_hw *hw = &adapter->hw; 483 484 tx_ring->q_vector = q_vector; 485 tx_ring->next = q_vector->tx.ring; 486 tx_ring->vsi = &adapter->vsi; 487 q_vector->tx.ring = tx_ring; 488 q_vector->tx.count++; 489 q_vector->tx.next_update = jiffies + 1; 490 q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting); 491 q_vector->num_ringpairs++; 492 wr32(hw, IAVF_VFINT_ITRN1(IAVF_TX_ITR, q_vector->reg_idx), 493 q_vector->tx.target_itr >> 1); 494 q_vector->tx.current_itr = q_vector->tx.target_itr; 495 } 496 497 /** 498 * iavf_map_rings_to_vectors - Maps descriptor rings to vectors 499 * @adapter: board private structure to initialize 500 * 501 * This function maps descriptor rings to the queue-specific vectors 502 * we were allotted through the MSI-X enabling code. Ideally, we'd have 503 * one vector per ring/queue, but on a constrained vector budget, we 504 * group the rings as "efficiently" as possible. You would add new 505 * mapping configurations in here. 506 **/ 507 static void iavf_map_rings_to_vectors(struct iavf_adapter *adapter) 508 { 509 int rings_remaining = adapter->num_active_queues; 510 int ridx = 0, vidx = 0; 511 int q_vectors; 512 513 q_vectors = adapter->num_msix_vectors - NONQ_VECS; 514 515 for (; ridx < rings_remaining; ridx++) { 516 iavf_map_vector_to_rxq(adapter, vidx, ridx); 517 iavf_map_vector_to_txq(adapter, vidx, ridx); 518 519 /* In the case where we have more queues than vectors, continue 520 * round-robin on vectors until all queues are mapped. 521 */ 522 if (++vidx >= q_vectors) 523 vidx = 0; 524 } 525 526 adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS; 527 } 528 529 /** 530 * iavf_irq_affinity_notify - Callback for affinity changes 531 * @notify: context as to what irq was changed 532 * @mask: the new affinity mask 533 * 534 * This is a callback function used by the irq_set_affinity_notifier function 535 * so that we may register to receive changes to the irq affinity masks. 536 **/ 537 static void iavf_irq_affinity_notify(struct irq_affinity_notify *notify, 538 const cpumask_t *mask) 539 { 540 struct iavf_q_vector *q_vector = 541 container_of(notify, struct iavf_q_vector, affinity_notify); 542 543 cpumask_copy(&q_vector->affinity_mask, mask); 544 } 545 546 /** 547 * iavf_irq_affinity_release - Callback for affinity notifier release 548 * @ref: internal core kernel usage 549 * 550 * This is a callback function used by the irq_set_affinity_notifier function 551 * to inform the current notification subscriber that they will no longer 552 * receive notifications. 553 **/ 554 static void iavf_irq_affinity_release(struct kref *ref) {} 555 556 /** 557 * iavf_request_traffic_irqs - Initialize MSI-X interrupts 558 * @adapter: board private structure 559 * @basename: device basename 560 * 561 * Allocates MSI-X vectors for tx and rx handling, and requests 562 * interrupts from the kernel. 563 **/ 564 static int 565 iavf_request_traffic_irqs(struct iavf_adapter *adapter, char *basename) 566 { 567 unsigned int vector, q_vectors; 568 unsigned int rx_int_idx = 0, tx_int_idx = 0; 569 int irq_num, err; 570 int cpu; 571 572 iavf_irq_disable(adapter); 573 /* Decrement for Other and TCP Timer vectors */ 574 q_vectors = adapter->num_msix_vectors - NONQ_VECS; 575 576 for (vector = 0; vector < q_vectors; vector++) { 577 struct iavf_q_vector *q_vector = &adapter->q_vectors[vector]; 578 579 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector; 580 581 if (q_vector->tx.ring && q_vector->rx.ring) { 582 snprintf(q_vector->name, sizeof(q_vector->name), 583 "iavf-%s-TxRx-%u", basename, rx_int_idx++); 584 tx_int_idx++; 585 } else if (q_vector->rx.ring) { 586 snprintf(q_vector->name, sizeof(q_vector->name), 587 "iavf-%s-rx-%u", basename, rx_int_idx++); 588 } else if (q_vector->tx.ring) { 589 snprintf(q_vector->name, sizeof(q_vector->name), 590 "iavf-%s-tx-%u", basename, tx_int_idx++); 591 } else { 592 /* skip this unused q_vector */ 593 continue; 594 } 595 err = request_irq(irq_num, 596 iavf_msix_clean_rings, 597 0, 598 q_vector->name, 599 q_vector); 600 if (err) { 601 dev_info(&adapter->pdev->dev, 602 "Request_irq failed, error: %d\n", err); 603 goto free_queue_irqs; 604 } 605 /* register for affinity change notifications */ 606 q_vector->affinity_notify.notify = iavf_irq_affinity_notify; 607 q_vector->affinity_notify.release = 608 iavf_irq_affinity_release; 609 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify); 610 /* Spread the IRQ affinity hints across online CPUs. Note that 611 * get_cpu_mask returns a mask with a permanent lifetime so 612 * it's safe to use as a hint for irq_update_affinity_hint. 613 */ 614 cpu = cpumask_local_spread(q_vector->v_idx, -1); 615 irq_update_affinity_hint(irq_num, get_cpu_mask(cpu)); 616 } 617 618 return 0; 619 620 free_queue_irqs: 621 while (vector) { 622 vector--; 623 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector; 624 irq_set_affinity_notifier(irq_num, NULL); 625 irq_update_affinity_hint(irq_num, NULL); 626 free_irq(irq_num, &adapter->q_vectors[vector]); 627 } 628 return err; 629 } 630 631 /** 632 * iavf_request_misc_irq - Initialize MSI-X interrupts 633 * @adapter: board private structure 634 * 635 * Allocates MSI-X vector 0 and requests interrupts from the kernel. This 636 * vector is only for the admin queue, and stays active even when the netdev 637 * is closed. 638 **/ 639 static int iavf_request_misc_irq(struct iavf_adapter *adapter) 640 { 641 struct net_device *netdev = adapter->netdev; 642 int err; 643 644 snprintf(adapter->misc_vector_name, 645 sizeof(adapter->misc_vector_name) - 1, "iavf-%s:mbx", 646 dev_name(&adapter->pdev->dev)); 647 err = request_irq(adapter->msix_entries[0].vector, 648 &iavf_msix_aq, 0, 649 adapter->misc_vector_name, netdev); 650 if (err) { 651 dev_err(&adapter->pdev->dev, 652 "request_irq for %s failed: %d\n", 653 adapter->misc_vector_name, err); 654 free_irq(adapter->msix_entries[0].vector, netdev); 655 } 656 return err; 657 } 658 659 /** 660 * iavf_free_traffic_irqs - Free MSI-X interrupts 661 * @adapter: board private structure 662 * 663 * Frees all MSI-X vectors other than 0. 664 **/ 665 static void iavf_free_traffic_irqs(struct iavf_adapter *adapter) 666 { 667 int vector, irq_num, q_vectors; 668 669 if (!adapter->msix_entries) 670 return; 671 672 q_vectors = adapter->num_msix_vectors - NONQ_VECS; 673 674 for (vector = 0; vector < q_vectors; vector++) { 675 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector; 676 irq_set_affinity_notifier(irq_num, NULL); 677 irq_update_affinity_hint(irq_num, NULL); 678 free_irq(irq_num, &adapter->q_vectors[vector]); 679 } 680 } 681 682 /** 683 * iavf_free_misc_irq - Free MSI-X miscellaneous vector 684 * @adapter: board private structure 685 * 686 * Frees MSI-X vector 0. 687 **/ 688 static void iavf_free_misc_irq(struct iavf_adapter *adapter) 689 { 690 struct net_device *netdev = adapter->netdev; 691 692 if (!adapter->msix_entries) 693 return; 694 695 free_irq(adapter->msix_entries[0].vector, netdev); 696 } 697 698 /** 699 * iavf_configure_tx - Configure Transmit Unit after Reset 700 * @adapter: board private structure 701 * 702 * Configure the Tx unit of the MAC after a reset. 703 **/ 704 static void iavf_configure_tx(struct iavf_adapter *adapter) 705 { 706 struct iavf_hw *hw = &adapter->hw; 707 int i; 708 709 for (i = 0; i < adapter->num_active_queues; i++) 710 adapter->tx_rings[i].tail = hw->hw_addr + IAVF_QTX_TAIL1(i); 711 } 712 713 /** 714 * iavf_configure_rx - Configure Receive Unit after Reset 715 * @adapter: board private structure 716 * 717 * Configure the Rx unit of the MAC after a reset. 718 **/ 719 static void iavf_configure_rx(struct iavf_adapter *adapter) 720 { 721 struct iavf_hw *hw = &adapter->hw; 722 723 for (u32 i = 0; i < adapter->num_active_queues; i++) 724 adapter->rx_rings[i].tail = hw->hw_addr + IAVF_QRX_TAIL1(i); 725 } 726 727 /** 728 * iavf_find_vlan - Search filter list for specific vlan filter 729 * @adapter: board private structure 730 * @vlan: vlan tag 731 * 732 * Returns ptr to the filter object or NULL. Must be called while holding the 733 * mac_vlan_list_lock. 734 **/ 735 static struct 736 iavf_vlan_filter *iavf_find_vlan(struct iavf_adapter *adapter, 737 struct iavf_vlan vlan) 738 { 739 struct iavf_vlan_filter *f; 740 741 list_for_each_entry(f, &adapter->vlan_filter_list, list) { 742 if (f->vlan.vid == vlan.vid && 743 f->vlan.tpid == vlan.tpid) 744 return f; 745 } 746 747 return NULL; 748 } 749 750 /** 751 * iavf_add_vlan - Add a vlan filter to the list 752 * @adapter: board private structure 753 * @vlan: VLAN tag 754 * 755 * Returns ptr to the filter object or NULL when no memory available. 756 **/ 757 static struct 758 iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter, 759 struct iavf_vlan vlan) 760 { 761 struct iavf_vlan_filter *f = NULL; 762 763 spin_lock_bh(&adapter->mac_vlan_list_lock); 764 765 f = iavf_find_vlan(adapter, vlan); 766 if (!f) { 767 f = kzalloc(sizeof(*f), GFP_ATOMIC); 768 if (!f) 769 goto clearout; 770 771 f->vlan = vlan; 772 773 list_add_tail(&f->list, &adapter->vlan_filter_list); 774 f->state = IAVF_VLAN_ADD; 775 adapter->num_vlan_filters++; 776 iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_VLAN_FILTER); 777 } 778 779 clearout: 780 spin_unlock_bh(&adapter->mac_vlan_list_lock); 781 return f; 782 } 783 784 /** 785 * iavf_del_vlan - Remove a vlan filter from the list 786 * @adapter: board private structure 787 * @vlan: VLAN tag 788 **/ 789 static void iavf_del_vlan(struct iavf_adapter *adapter, struct iavf_vlan vlan) 790 { 791 struct iavf_vlan_filter *f; 792 793 spin_lock_bh(&adapter->mac_vlan_list_lock); 794 795 f = iavf_find_vlan(adapter, vlan); 796 if (f) { 797 f->state = IAVF_VLAN_REMOVE; 798 iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_DEL_VLAN_FILTER); 799 } 800 801 spin_unlock_bh(&adapter->mac_vlan_list_lock); 802 } 803 804 /** 805 * iavf_restore_filters 806 * @adapter: board private structure 807 * 808 * Restore existing non MAC filters when VF netdev comes back up 809 **/ 810 static void iavf_restore_filters(struct iavf_adapter *adapter) 811 { 812 struct iavf_vlan_filter *f; 813 814 /* re-add all VLAN filters */ 815 spin_lock_bh(&adapter->mac_vlan_list_lock); 816 817 list_for_each_entry(f, &adapter->vlan_filter_list, list) { 818 if (f->state == IAVF_VLAN_INACTIVE) 819 f->state = IAVF_VLAN_ADD; 820 } 821 822 spin_unlock_bh(&adapter->mac_vlan_list_lock); 823 adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER; 824 } 825 826 /** 827 * iavf_get_num_vlans_added - get number of VLANs added 828 * @adapter: board private structure 829 */ 830 u16 iavf_get_num_vlans_added(struct iavf_adapter *adapter) 831 { 832 return adapter->num_vlan_filters; 833 } 834 835 /** 836 * iavf_get_max_vlans_allowed - get maximum VLANs allowed for this VF 837 * @adapter: board private structure 838 * 839 * This depends on the negotiated VLAN capability. For VIRTCHNL_VF_OFFLOAD_VLAN, 840 * do not impose a limit as that maintains current behavior and for 841 * VIRTCHNL_VF_OFFLOAD_VLAN_V2, use the maximum allowed sent from the PF. 842 **/ 843 static u16 iavf_get_max_vlans_allowed(struct iavf_adapter *adapter) 844 { 845 /* don't impose any limit for VIRTCHNL_VF_OFFLOAD_VLAN since there has 846 * never been a limit on the VF driver side 847 */ 848 if (VLAN_ALLOWED(adapter)) 849 return VLAN_N_VID; 850 else if (VLAN_V2_ALLOWED(adapter)) 851 return adapter->vlan_v2_caps.filtering.max_filters; 852 853 return 0; 854 } 855 856 /** 857 * iavf_max_vlans_added - check if maximum VLANs allowed already exist 858 * @adapter: board private structure 859 **/ 860 static bool iavf_max_vlans_added(struct iavf_adapter *adapter) 861 { 862 if (iavf_get_num_vlans_added(adapter) < 863 iavf_get_max_vlans_allowed(adapter)) 864 return false; 865 866 return true; 867 } 868 869 /** 870 * iavf_vlan_rx_add_vid - Add a VLAN filter to a device 871 * @netdev: network device struct 872 * @proto: unused protocol data 873 * @vid: VLAN tag 874 **/ 875 static int iavf_vlan_rx_add_vid(struct net_device *netdev, 876 __always_unused __be16 proto, u16 vid) 877 { 878 struct iavf_adapter *adapter = netdev_priv(netdev); 879 880 /* Do not track VLAN 0 filter, always added by the PF on VF init */ 881 if (!vid) 882 return 0; 883 884 if (!VLAN_FILTERING_ALLOWED(adapter)) 885 return -EIO; 886 887 if (iavf_max_vlans_added(adapter)) { 888 netdev_err(netdev, "Max allowed VLAN filters %u. Remove existing VLANs or disable filtering via Ethtool if supported.\n", 889 iavf_get_max_vlans_allowed(adapter)); 890 return -EIO; 891 } 892 893 if (!iavf_add_vlan(adapter, IAVF_VLAN(vid, be16_to_cpu(proto)))) 894 return -ENOMEM; 895 896 return 0; 897 } 898 899 /** 900 * iavf_vlan_rx_kill_vid - Remove a VLAN filter from a device 901 * @netdev: network device struct 902 * @proto: unused protocol data 903 * @vid: VLAN tag 904 **/ 905 static int iavf_vlan_rx_kill_vid(struct net_device *netdev, 906 __always_unused __be16 proto, u16 vid) 907 { 908 struct iavf_adapter *adapter = netdev_priv(netdev); 909 910 /* We do not track VLAN 0 filter */ 911 if (!vid) 912 return 0; 913 914 iavf_del_vlan(adapter, IAVF_VLAN(vid, be16_to_cpu(proto))); 915 return 0; 916 } 917 918 /** 919 * iavf_find_filter - Search filter list for specific mac filter 920 * @adapter: board private structure 921 * @macaddr: the MAC address 922 * 923 * Returns ptr to the filter object or NULL. Must be called while holding the 924 * mac_vlan_list_lock. 925 **/ 926 static struct 927 iavf_mac_filter *iavf_find_filter(struct iavf_adapter *adapter, 928 const u8 *macaddr) 929 { 930 struct iavf_mac_filter *f; 931 932 if (!macaddr) 933 return NULL; 934 935 list_for_each_entry(f, &adapter->mac_filter_list, list) { 936 if (ether_addr_equal(macaddr, f->macaddr)) 937 return f; 938 } 939 return NULL; 940 } 941 942 /** 943 * iavf_add_filter - Add a mac filter to the filter list 944 * @adapter: board private structure 945 * @macaddr: the MAC address 946 * 947 * Returns ptr to the filter object or NULL when no memory available. 948 **/ 949 struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter, 950 const u8 *macaddr) 951 { 952 struct iavf_mac_filter *f; 953 954 if (!macaddr) 955 return NULL; 956 957 f = iavf_find_filter(adapter, macaddr); 958 if (!f) { 959 f = kzalloc(sizeof(*f), GFP_ATOMIC); 960 if (!f) 961 return f; 962 963 ether_addr_copy(f->macaddr, macaddr); 964 965 list_add_tail(&f->list, &adapter->mac_filter_list); 966 f->add = true; 967 f->add_handled = false; 968 f->is_new_mac = true; 969 f->is_primary = ether_addr_equal(macaddr, adapter->hw.mac.addr); 970 adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER; 971 } else { 972 f->remove = false; 973 } 974 975 return f; 976 } 977 978 /** 979 * iavf_replace_primary_mac - Replace current primary address 980 * @adapter: board private structure 981 * @new_mac: new MAC address to be applied 982 * 983 * Replace current dev_addr and send request to PF for removal of previous 984 * primary MAC address filter and addition of new primary MAC filter. 985 * Return 0 for success, -ENOMEM for failure. 986 * 987 * Do not call this with mac_vlan_list_lock! 988 **/ 989 static int iavf_replace_primary_mac(struct iavf_adapter *adapter, 990 const u8 *new_mac) 991 { 992 struct iavf_hw *hw = &adapter->hw; 993 struct iavf_mac_filter *new_f; 994 struct iavf_mac_filter *old_f; 995 996 spin_lock_bh(&adapter->mac_vlan_list_lock); 997 998 new_f = iavf_add_filter(adapter, new_mac); 999 if (!new_f) { 1000 spin_unlock_bh(&adapter->mac_vlan_list_lock); 1001 return -ENOMEM; 1002 } 1003 1004 old_f = iavf_find_filter(adapter, hw->mac.addr); 1005 if (old_f) { 1006 old_f->is_primary = false; 1007 old_f->remove = true; 1008 adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER; 1009 } 1010 /* Always send the request to add if changing primary MAC, 1011 * even if filter is already present on the list 1012 */ 1013 new_f->is_primary = true; 1014 new_f->add = true; 1015 ether_addr_copy(hw->mac.addr, new_mac); 1016 1017 spin_unlock_bh(&adapter->mac_vlan_list_lock); 1018 1019 /* schedule the watchdog task to immediately process the request */ 1020 iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_MAC_FILTER); 1021 return 0; 1022 } 1023 1024 /** 1025 * iavf_is_mac_set_handled - wait for a response to set MAC from PF 1026 * @netdev: network interface device structure 1027 * @macaddr: MAC address to set 1028 * 1029 * Returns true on success, false on failure 1030 */ 1031 static bool iavf_is_mac_set_handled(struct net_device *netdev, 1032 const u8 *macaddr) 1033 { 1034 struct iavf_adapter *adapter = netdev_priv(netdev); 1035 struct iavf_mac_filter *f; 1036 bool ret = false; 1037 1038 spin_lock_bh(&adapter->mac_vlan_list_lock); 1039 1040 f = iavf_find_filter(adapter, macaddr); 1041 1042 if (!f || (!f->add && f->add_handled)) 1043 ret = true; 1044 1045 spin_unlock_bh(&adapter->mac_vlan_list_lock); 1046 1047 return ret; 1048 } 1049 1050 /** 1051 * iavf_set_mac - NDO callback to set port MAC address 1052 * @netdev: network interface device structure 1053 * @p: pointer to an address structure 1054 * 1055 * Returns 0 on success, negative on failure 1056 */ 1057 static int iavf_set_mac(struct net_device *netdev, void *p) 1058 { 1059 struct iavf_adapter *adapter = netdev_priv(netdev); 1060 struct sockaddr *addr = p; 1061 int ret; 1062 1063 if (!is_valid_ether_addr(addr->sa_data)) 1064 return -EADDRNOTAVAIL; 1065 1066 ret = iavf_replace_primary_mac(adapter, addr->sa_data); 1067 1068 if (ret) 1069 return ret; 1070 1071 ret = wait_event_interruptible_timeout(adapter->vc_waitqueue, 1072 iavf_is_mac_set_handled(netdev, addr->sa_data), 1073 msecs_to_jiffies(2500)); 1074 1075 /* If ret < 0 then it means wait was interrupted. 1076 * If ret == 0 then it means we got a timeout. 1077 * else it means we got response for set MAC from PF, 1078 * check if netdev MAC was updated to requested MAC, 1079 * if yes then set MAC succeeded otherwise it failed return -EACCES 1080 */ 1081 if (ret < 0) 1082 return ret; 1083 1084 if (!ret) 1085 return -EAGAIN; 1086 1087 if (!ether_addr_equal(netdev->dev_addr, addr->sa_data)) 1088 return -EACCES; 1089 1090 return 0; 1091 } 1092 1093 /** 1094 * iavf_addr_sync - Callback for dev_(mc|uc)_sync to add address 1095 * @netdev: the netdevice 1096 * @addr: address to add 1097 * 1098 * Called by __dev_(mc|uc)_sync when an address needs to be added. We call 1099 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock. 1100 */ 1101 static int iavf_addr_sync(struct net_device *netdev, const u8 *addr) 1102 { 1103 struct iavf_adapter *adapter = netdev_priv(netdev); 1104 1105 if (iavf_add_filter(adapter, addr)) 1106 return 0; 1107 else 1108 return -ENOMEM; 1109 } 1110 1111 /** 1112 * iavf_addr_unsync - Callback for dev_(mc|uc)_sync to remove address 1113 * @netdev: the netdevice 1114 * @addr: address to add 1115 * 1116 * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call 1117 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock. 1118 */ 1119 static int iavf_addr_unsync(struct net_device *netdev, const u8 *addr) 1120 { 1121 struct iavf_adapter *adapter = netdev_priv(netdev); 1122 struct iavf_mac_filter *f; 1123 1124 /* Under some circumstances, we might receive a request to delete 1125 * our own device address from our uc list. Because we store the 1126 * device address in the VSI's MAC/VLAN filter list, we need to ignore 1127 * such requests and not delete our device address from this list. 1128 */ 1129 if (ether_addr_equal(addr, netdev->dev_addr)) 1130 return 0; 1131 1132 f = iavf_find_filter(adapter, addr); 1133 if (f) { 1134 f->remove = true; 1135 adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER; 1136 } 1137 return 0; 1138 } 1139 1140 /** 1141 * iavf_promiscuous_mode_changed - check if promiscuous mode bits changed 1142 * @adapter: device specific adapter 1143 */ 1144 bool iavf_promiscuous_mode_changed(struct iavf_adapter *adapter) 1145 { 1146 return (adapter->current_netdev_promisc_flags ^ adapter->netdev->flags) & 1147 (IFF_PROMISC | IFF_ALLMULTI); 1148 } 1149 1150 /** 1151 * iavf_set_rx_mode - NDO callback to set the netdev filters 1152 * @netdev: network interface device structure 1153 **/ 1154 static void iavf_set_rx_mode(struct net_device *netdev) 1155 { 1156 struct iavf_adapter *adapter = netdev_priv(netdev); 1157 1158 spin_lock_bh(&adapter->mac_vlan_list_lock); 1159 __dev_uc_sync(netdev, iavf_addr_sync, iavf_addr_unsync); 1160 __dev_mc_sync(netdev, iavf_addr_sync, iavf_addr_unsync); 1161 spin_unlock_bh(&adapter->mac_vlan_list_lock); 1162 1163 spin_lock_bh(&adapter->current_netdev_promisc_flags_lock); 1164 if (iavf_promiscuous_mode_changed(adapter)) 1165 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE; 1166 spin_unlock_bh(&adapter->current_netdev_promisc_flags_lock); 1167 } 1168 1169 /** 1170 * iavf_napi_enable_all - enable NAPI on all queue vectors 1171 * @adapter: board private structure 1172 **/ 1173 static void iavf_napi_enable_all(struct iavf_adapter *adapter) 1174 { 1175 int q_idx; 1176 struct iavf_q_vector *q_vector; 1177 int q_vectors = adapter->num_msix_vectors - NONQ_VECS; 1178 1179 for (q_idx = 0; q_idx < q_vectors; q_idx++) { 1180 struct napi_struct *napi; 1181 1182 q_vector = &adapter->q_vectors[q_idx]; 1183 napi = &q_vector->napi; 1184 napi_enable(napi); 1185 } 1186 } 1187 1188 /** 1189 * iavf_napi_disable_all - disable NAPI on all queue vectors 1190 * @adapter: board private structure 1191 **/ 1192 static void iavf_napi_disable_all(struct iavf_adapter *adapter) 1193 { 1194 int q_idx; 1195 struct iavf_q_vector *q_vector; 1196 int q_vectors = adapter->num_msix_vectors - NONQ_VECS; 1197 1198 for (q_idx = 0; q_idx < q_vectors; q_idx++) { 1199 q_vector = &adapter->q_vectors[q_idx]; 1200 napi_disable(&q_vector->napi); 1201 } 1202 } 1203 1204 /** 1205 * iavf_configure - set up transmit and receive data structures 1206 * @adapter: board private structure 1207 **/ 1208 static void iavf_configure(struct iavf_adapter *adapter) 1209 { 1210 struct net_device *netdev = adapter->netdev; 1211 int i; 1212 1213 iavf_set_rx_mode(netdev); 1214 1215 iavf_configure_tx(adapter); 1216 iavf_configure_rx(adapter); 1217 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_QUEUES; 1218 1219 for (i = 0; i < adapter->num_active_queues; i++) { 1220 struct iavf_ring *ring = &adapter->rx_rings[i]; 1221 1222 iavf_alloc_rx_buffers(ring, IAVF_DESC_UNUSED(ring)); 1223 } 1224 } 1225 1226 /** 1227 * iavf_up_complete - Finish the last steps of bringing up a connection 1228 * @adapter: board private structure 1229 * 1230 * Expects to be called while holding crit_lock. 1231 **/ 1232 static void iavf_up_complete(struct iavf_adapter *adapter) 1233 { 1234 iavf_change_state(adapter, __IAVF_RUNNING); 1235 clear_bit(__IAVF_VSI_DOWN, adapter->vsi.state); 1236 1237 iavf_napi_enable_all(adapter); 1238 1239 iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ENABLE_QUEUES); 1240 } 1241 1242 /** 1243 * iavf_clear_mac_vlan_filters - Remove mac and vlan filters not sent to PF 1244 * yet and mark other to be removed. 1245 * @adapter: board private structure 1246 **/ 1247 static void iavf_clear_mac_vlan_filters(struct iavf_adapter *adapter) 1248 { 1249 struct iavf_vlan_filter *vlf, *vlftmp; 1250 struct iavf_mac_filter *f, *ftmp; 1251 1252 spin_lock_bh(&adapter->mac_vlan_list_lock); 1253 /* clear the sync flag on all filters */ 1254 __dev_uc_unsync(adapter->netdev, NULL); 1255 __dev_mc_unsync(adapter->netdev, NULL); 1256 1257 /* remove all MAC filters */ 1258 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, 1259 list) { 1260 if (f->add) { 1261 list_del(&f->list); 1262 kfree(f); 1263 } else { 1264 f->remove = true; 1265 } 1266 } 1267 1268 /* disable all VLAN filters */ 1269 list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list, 1270 list) 1271 vlf->state = IAVF_VLAN_DISABLE; 1272 1273 spin_unlock_bh(&adapter->mac_vlan_list_lock); 1274 } 1275 1276 /** 1277 * iavf_clear_cloud_filters - Remove cloud filters not sent to PF yet and 1278 * mark other to be removed. 1279 * @adapter: board private structure 1280 **/ 1281 static void iavf_clear_cloud_filters(struct iavf_adapter *adapter) 1282 { 1283 struct iavf_cloud_filter *cf, *cftmp; 1284 1285 /* remove all cloud filters */ 1286 spin_lock_bh(&adapter->cloud_filter_list_lock); 1287 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, 1288 list) { 1289 if (cf->add) { 1290 list_del(&cf->list); 1291 kfree(cf); 1292 adapter->num_cloud_filters--; 1293 } else { 1294 cf->del = true; 1295 } 1296 } 1297 spin_unlock_bh(&adapter->cloud_filter_list_lock); 1298 } 1299 1300 /** 1301 * iavf_clear_fdir_filters - Remove fdir filters not sent to PF yet and mark 1302 * other to be removed. 1303 * @adapter: board private structure 1304 **/ 1305 static void iavf_clear_fdir_filters(struct iavf_adapter *adapter) 1306 { 1307 struct iavf_fdir_fltr *fdir; 1308 1309 /* remove all Flow Director filters */ 1310 spin_lock_bh(&adapter->fdir_fltr_lock); 1311 list_for_each_entry(fdir, &adapter->fdir_list_head, list) { 1312 if (fdir->state == IAVF_FDIR_FLTR_ADD_REQUEST) { 1313 /* Cancel a request, keep filter as inactive */ 1314 fdir->state = IAVF_FDIR_FLTR_INACTIVE; 1315 } else if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING || 1316 fdir->state == IAVF_FDIR_FLTR_ACTIVE) { 1317 /* Disable filters which are active or have a pending 1318 * request to PF to be added 1319 */ 1320 fdir->state = IAVF_FDIR_FLTR_DIS_REQUEST; 1321 } 1322 } 1323 spin_unlock_bh(&adapter->fdir_fltr_lock); 1324 } 1325 1326 /** 1327 * iavf_clear_adv_rss_conf - Remove adv rss conf not sent to PF yet and mark 1328 * other to be removed. 1329 * @adapter: board private structure 1330 **/ 1331 static void iavf_clear_adv_rss_conf(struct iavf_adapter *adapter) 1332 { 1333 struct iavf_adv_rss *rss, *rsstmp; 1334 1335 /* remove all advance RSS configuration */ 1336 spin_lock_bh(&adapter->adv_rss_lock); 1337 list_for_each_entry_safe(rss, rsstmp, &adapter->adv_rss_list_head, 1338 list) { 1339 if (rss->state == IAVF_ADV_RSS_ADD_REQUEST) { 1340 list_del(&rss->list); 1341 kfree(rss); 1342 } else { 1343 rss->state = IAVF_ADV_RSS_DEL_REQUEST; 1344 } 1345 } 1346 spin_unlock_bh(&adapter->adv_rss_lock); 1347 } 1348 1349 /** 1350 * iavf_down - Shutdown the connection processing 1351 * @adapter: board private structure 1352 * 1353 * Expects to be called while holding crit_lock. 1354 **/ 1355 void iavf_down(struct iavf_adapter *adapter) 1356 { 1357 struct net_device *netdev = adapter->netdev; 1358 1359 if (adapter->state <= __IAVF_DOWN_PENDING) 1360 return; 1361 1362 netif_carrier_off(netdev); 1363 netif_tx_disable(netdev); 1364 adapter->link_up = false; 1365 iavf_napi_disable_all(adapter); 1366 iavf_irq_disable(adapter); 1367 1368 iavf_clear_mac_vlan_filters(adapter); 1369 iavf_clear_cloud_filters(adapter); 1370 iavf_clear_fdir_filters(adapter); 1371 iavf_clear_adv_rss_conf(adapter); 1372 1373 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) 1374 return; 1375 1376 if (!test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) { 1377 /* cancel any current operation */ 1378 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 1379 /* Schedule operations to close down the HW. Don't wait 1380 * here for this to complete. The watchdog is still running 1381 * and it will take care of this. 1382 */ 1383 if (!list_empty(&adapter->mac_filter_list)) 1384 adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER; 1385 if (!list_empty(&adapter->vlan_filter_list)) 1386 adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER; 1387 if (!list_empty(&adapter->cloud_filter_list)) 1388 adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER; 1389 if (!list_empty(&adapter->fdir_list_head)) 1390 adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER; 1391 if (!list_empty(&adapter->adv_rss_list_head)) 1392 adapter->aq_required |= IAVF_FLAG_AQ_DEL_ADV_RSS_CFG; 1393 } 1394 1395 iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_DISABLE_QUEUES); 1396 } 1397 1398 /** 1399 * iavf_acquire_msix_vectors - Setup the MSIX capability 1400 * @adapter: board private structure 1401 * @vectors: number of vectors to request 1402 * 1403 * Work with the OS to set up the MSIX vectors needed. 1404 * 1405 * Returns 0 on success, negative on failure 1406 **/ 1407 static int 1408 iavf_acquire_msix_vectors(struct iavf_adapter *adapter, int vectors) 1409 { 1410 int err, vector_threshold; 1411 1412 /* We'll want at least 3 (vector_threshold): 1413 * 0) Other (Admin Queue and link, mostly) 1414 * 1) TxQ[0] Cleanup 1415 * 2) RxQ[0] Cleanup 1416 */ 1417 vector_threshold = MIN_MSIX_COUNT; 1418 1419 /* The more we get, the more we will assign to Tx/Rx Cleanup 1420 * for the separate queues...where Rx Cleanup >= Tx Cleanup. 1421 * Right now, we simply care about how many we'll get; we'll 1422 * set them up later while requesting irq's. 1423 */ 1424 err = pci_enable_msix_range(adapter->pdev, adapter->msix_entries, 1425 vector_threshold, vectors); 1426 if (err < 0) { 1427 dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts\n"); 1428 kfree(adapter->msix_entries); 1429 adapter->msix_entries = NULL; 1430 return err; 1431 } 1432 1433 /* Adjust for only the vectors we'll use, which is minimum 1434 * of max_msix_q_vectors + NONQ_VECS, or the number of 1435 * vectors we were allocated. 1436 */ 1437 adapter->num_msix_vectors = err; 1438 return 0; 1439 } 1440 1441 /** 1442 * iavf_free_queues - Free memory for all rings 1443 * @adapter: board private structure to initialize 1444 * 1445 * Free all of the memory associated with queue pairs. 1446 **/ 1447 static void iavf_free_queues(struct iavf_adapter *adapter) 1448 { 1449 if (!adapter->vsi_res) 1450 return; 1451 adapter->num_active_queues = 0; 1452 kfree(adapter->tx_rings); 1453 adapter->tx_rings = NULL; 1454 kfree(adapter->rx_rings); 1455 adapter->rx_rings = NULL; 1456 } 1457 1458 /** 1459 * iavf_set_queue_vlan_tag_loc - set location for VLAN tag offload 1460 * @adapter: board private structure 1461 * 1462 * Based on negotiated capabilities, the VLAN tag needs to be inserted and/or 1463 * stripped in certain descriptor fields. Instead of checking the offload 1464 * capability bits in the hot path, cache the location the ring specific 1465 * flags. 1466 */ 1467 void iavf_set_queue_vlan_tag_loc(struct iavf_adapter *adapter) 1468 { 1469 int i; 1470 1471 for (i = 0; i < adapter->num_active_queues; i++) { 1472 struct iavf_ring *tx_ring = &adapter->tx_rings[i]; 1473 struct iavf_ring *rx_ring = &adapter->rx_rings[i]; 1474 1475 /* prevent multiple L2TAG bits being set after VFR */ 1476 tx_ring->flags &= 1477 ~(IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 | 1478 IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2); 1479 rx_ring->flags &= 1480 ~(IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 | 1481 IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2); 1482 1483 if (VLAN_ALLOWED(adapter)) { 1484 tx_ring->flags |= IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1; 1485 rx_ring->flags |= IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1; 1486 } else if (VLAN_V2_ALLOWED(adapter)) { 1487 struct virtchnl_vlan_supported_caps *stripping_support; 1488 struct virtchnl_vlan_supported_caps *insertion_support; 1489 1490 stripping_support = 1491 &adapter->vlan_v2_caps.offloads.stripping_support; 1492 insertion_support = 1493 &adapter->vlan_v2_caps.offloads.insertion_support; 1494 1495 if (stripping_support->outer) { 1496 if (stripping_support->outer & 1497 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1) 1498 rx_ring->flags |= 1499 IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1; 1500 else if (stripping_support->outer & 1501 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2) 1502 rx_ring->flags |= 1503 IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2; 1504 } else if (stripping_support->inner) { 1505 if (stripping_support->inner & 1506 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1) 1507 rx_ring->flags |= 1508 IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1; 1509 else if (stripping_support->inner & 1510 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2) 1511 rx_ring->flags |= 1512 IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2; 1513 } 1514 1515 if (insertion_support->outer) { 1516 if (insertion_support->outer & 1517 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1) 1518 tx_ring->flags |= 1519 IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1; 1520 else if (insertion_support->outer & 1521 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2) 1522 tx_ring->flags |= 1523 IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2; 1524 } else if (insertion_support->inner) { 1525 if (insertion_support->inner & 1526 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1) 1527 tx_ring->flags |= 1528 IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1; 1529 else if (insertion_support->inner & 1530 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2) 1531 tx_ring->flags |= 1532 IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2; 1533 } 1534 } 1535 } 1536 } 1537 1538 /** 1539 * iavf_alloc_queues - Allocate memory for all rings 1540 * @adapter: board private structure to initialize 1541 * 1542 * We allocate one ring per queue at run-time since we don't know the 1543 * number of queues at compile-time. The polling_netdev array is 1544 * intended for Multiqueue, but should work fine with a single queue. 1545 **/ 1546 static int iavf_alloc_queues(struct iavf_adapter *adapter) 1547 { 1548 int i, num_active_queues; 1549 1550 /* If we're in reset reallocating queues we don't actually know yet for 1551 * certain the PF gave us the number of queues we asked for but we'll 1552 * assume it did. Once basic reset is finished we'll confirm once we 1553 * start negotiating config with PF. 1554 */ 1555 if (adapter->num_req_queues) 1556 num_active_queues = adapter->num_req_queues; 1557 else if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) && 1558 adapter->num_tc) 1559 num_active_queues = adapter->ch_config.total_qps; 1560 else 1561 num_active_queues = min_t(int, 1562 adapter->vsi_res->num_queue_pairs, 1563 (int)(num_online_cpus())); 1564 1565 1566 adapter->tx_rings = kcalloc(num_active_queues, 1567 sizeof(struct iavf_ring), GFP_KERNEL); 1568 if (!adapter->tx_rings) 1569 goto err_out; 1570 adapter->rx_rings = kcalloc(num_active_queues, 1571 sizeof(struct iavf_ring), GFP_KERNEL); 1572 if (!adapter->rx_rings) 1573 goto err_out; 1574 1575 for (i = 0; i < num_active_queues; i++) { 1576 struct iavf_ring *tx_ring; 1577 struct iavf_ring *rx_ring; 1578 1579 tx_ring = &adapter->tx_rings[i]; 1580 1581 tx_ring->queue_index = i; 1582 tx_ring->netdev = adapter->netdev; 1583 tx_ring->dev = &adapter->pdev->dev; 1584 tx_ring->count = adapter->tx_desc_count; 1585 tx_ring->itr_setting = IAVF_ITR_TX_DEF; 1586 if (adapter->flags & IAVF_FLAG_WB_ON_ITR_CAPABLE) 1587 tx_ring->flags |= IAVF_TXR_FLAGS_WB_ON_ITR; 1588 1589 rx_ring = &adapter->rx_rings[i]; 1590 rx_ring->queue_index = i; 1591 rx_ring->netdev = adapter->netdev; 1592 rx_ring->count = adapter->rx_desc_count; 1593 rx_ring->itr_setting = IAVF_ITR_RX_DEF; 1594 } 1595 1596 adapter->num_active_queues = num_active_queues; 1597 1598 iavf_set_queue_vlan_tag_loc(adapter); 1599 1600 return 0; 1601 1602 err_out: 1603 iavf_free_queues(adapter); 1604 return -ENOMEM; 1605 } 1606 1607 /** 1608 * iavf_set_interrupt_capability - set MSI-X or FAIL if not supported 1609 * @adapter: board private structure to initialize 1610 * 1611 * Attempt to configure the interrupts using the best available 1612 * capabilities of the hardware and the kernel. 1613 **/ 1614 static int iavf_set_interrupt_capability(struct iavf_adapter *adapter) 1615 { 1616 int vector, v_budget; 1617 int pairs = 0; 1618 int err = 0; 1619 1620 if (!adapter->vsi_res) { 1621 err = -EIO; 1622 goto out; 1623 } 1624 pairs = adapter->num_active_queues; 1625 1626 /* It's easy to be greedy for MSI-X vectors, but it really doesn't do 1627 * us much good if we have more vectors than CPUs. However, we already 1628 * limit the total number of queues by the number of CPUs so we do not 1629 * need any further limiting here. 1630 */ 1631 v_budget = min_t(int, pairs + NONQ_VECS, 1632 (int)adapter->vf_res->max_vectors); 1633 1634 adapter->msix_entries = kcalloc(v_budget, 1635 sizeof(struct msix_entry), GFP_KERNEL); 1636 if (!adapter->msix_entries) { 1637 err = -ENOMEM; 1638 goto out; 1639 } 1640 1641 for (vector = 0; vector < v_budget; vector++) 1642 adapter->msix_entries[vector].entry = vector; 1643 1644 err = iavf_acquire_msix_vectors(adapter, v_budget); 1645 if (!err) 1646 iavf_schedule_finish_config(adapter); 1647 1648 out: 1649 return err; 1650 } 1651 1652 /** 1653 * iavf_config_rss_aq - Configure RSS keys and lut by using AQ commands 1654 * @adapter: board private structure 1655 * 1656 * Return 0 on success, negative on failure 1657 **/ 1658 static int iavf_config_rss_aq(struct iavf_adapter *adapter) 1659 { 1660 struct iavf_aqc_get_set_rss_key_data *rss_key = 1661 (struct iavf_aqc_get_set_rss_key_data *)adapter->rss_key; 1662 struct iavf_hw *hw = &adapter->hw; 1663 enum iavf_status status; 1664 1665 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1666 /* bail because we already have a command pending */ 1667 dev_err(&adapter->pdev->dev, "Cannot configure RSS, command %d pending\n", 1668 adapter->current_op); 1669 return -EBUSY; 1670 } 1671 1672 status = iavf_aq_set_rss_key(hw, adapter->vsi.id, rss_key); 1673 if (status) { 1674 dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n", 1675 iavf_stat_str(hw, status), 1676 iavf_aq_str(hw, hw->aq.asq_last_status)); 1677 return iavf_status_to_errno(status); 1678 1679 } 1680 1681 status = iavf_aq_set_rss_lut(hw, adapter->vsi.id, false, 1682 adapter->rss_lut, adapter->rss_lut_size); 1683 if (status) { 1684 dev_err(&adapter->pdev->dev, "Cannot set RSS lut, err %s aq_err %s\n", 1685 iavf_stat_str(hw, status), 1686 iavf_aq_str(hw, hw->aq.asq_last_status)); 1687 return iavf_status_to_errno(status); 1688 } 1689 1690 return 0; 1691 1692 } 1693 1694 /** 1695 * iavf_config_rss_reg - Configure RSS keys and lut by writing registers 1696 * @adapter: board private structure 1697 * 1698 * Returns 0 on success, negative on failure 1699 **/ 1700 static int iavf_config_rss_reg(struct iavf_adapter *adapter) 1701 { 1702 struct iavf_hw *hw = &adapter->hw; 1703 u32 *dw; 1704 u16 i; 1705 1706 dw = (u32 *)adapter->rss_key; 1707 for (i = 0; i <= adapter->rss_key_size / 4; i++) 1708 wr32(hw, IAVF_VFQF_HKEY(i), dw[i]); 1709 1710 dw = (u32 *)adapter->rss_lut; 1711 for (i = 0; i <= adapter->rss_lut_size / 4; i++) 1712 wr32(hw, IAVF_VFQF_HLUT(i), dw[i]); 1713 1714 iavf_flush(hw); 1715 1716 return 0; 1717 } 1718 1719 /** 1720 * iavf_config_rss - Configure RSS keys and lut 1721 * @adapter: board private structure 1722 * 1723 * Returns 0 on success, negative on failure 1724 **/ 1725 int iavf_config_rss(struct iavf_adapter *adapter) 1726 { 1727 1728 if (RSS_PF(adapter)) { 1729 adapter->aq_required |= IAVF_FLAG_AQ_SET_RSS_LUT | 1730 IAVF_FLAG_AQ_SET_RSS_KEY; 1731 return 0; 1732 } else if (RSS_AQ(adapter)) { 1733 return iavf_config_rss_aq(adapter); 1734 } else { 1735 return iavf_config_rss_reg(adapter); 1736 } 1737 } 1738 1739 /** 1740 * iavf_fill_rss_lut - Fill the lut with default values 1741 * @adapter: board private structure 1742 **/ 1743 static void iavf_fill_rss_lut(struct iavf_adapter *adapter) 1744 { 1745 u16 i; 1746 1747 for (i = 0; i < adapter->rss_lut_size; i++) 1748 adapter->rss_lut[i] = i % adapter->num_active_queues; 1749 } 1750 1751 /** 1752 * iavf_init_rss - Prepare for RSS 1753 * @adapter: board private structure 1754 * 1755 * Return 0 on success, negative on failure 1756 **/ 1757 static int iavf_init_rss(struct iavf_adapter *adapter) 1758 { 1759 struct iavf_hw *hw = &adapter->hw; 1760 1761 if (!RSS_PF(adapter)) { 1762 /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */ 1763 if (adapter->vf_res->vf_cap_flags & 1764 VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) 1765 adapter->hena = IAVF_DEFAULT_RSS_HENA_EXPANDED; 1766 else 1767 adapter->hena = IAVF_DEFAULT_RSS_HENA; 1768 1769 wr32(hw, IAVF_VFQF_HENA(0), (u32)adapter->hena); 1770 wr32(hw, IAVF_VFQF_HENA(1), (u32)(adapter->hena >> 32)); 1771 } 1772 1773 iavf_fill_rss_lut(adapter); 1774 netdev_rss_key_fill((void *)adapter->rss_key, adapter->rss_key_size); 1775 1776 return iavf_config_rss(adapter); 1777 } 1778 1779 /** 1780 * iavf_alloc_q_vectors - Allocate memory for interrupt vectors 1781 * @adapter: board private structure to initialize 1782 * 1783 * We allocate one q_vector per queue interrupt. If allocation fails we 1784 * return -ENOMEM. 1785 **/ 1786 static int iavf_alloc_q_vectors(struct iavf_adapter *adapter) 1787 { 1788 int q_idx = 0, num_q_vectors; 1789 struct iavf_q_vector *q_vector; 1790 1791 num_q_vectors = adapter->num_msix_vectors - NONQ_VECS; 1792 adapter->q_vectors = kcalloc(num_q_vectors, sizeof(*q_vector), 1793 GFP_KERNEL); 1794 if (!adapter->q_vectors) 1795 return -ENOMEM; 1796 1797 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { 1798 q_vector = &adapter->q_vectors[q_idx]; 1799 q_vector->adapter = adapter; 1800 q_vector->vsi = &adapter->vsi; 1801 q_vector->v_idx = q_idx; 1802 q_vector->reg_idx = q_idx; 1803 cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask); 1804 netif_napi_add(adapter->netdev, &q_vector->napi, 1805 iavf_napi_poll); 1806 } 1807 1808 return 0; 1809 } 1810 1811 /** 1812 * iavf_free_q_vectors - Free memory allocated for interrupt vectors 1813 * @adapter: board private structure to initialize 1814 * 1815 * This function frees the memory allocated to the q_vectors. In addition if 1816 * NAPI is enabled it will delete any references to the NAPI struct prior 1817 * to freeing the q_vector. 1818 **/ 1819 static void iavf_free_q_vectors(struct iavf_adapter *adapter) 1820 { 1821 int q_idx, num_q_vectors; 1822 1823 if (!adapter->q_vectors) 1824 return; 1825 1826 num_q_vectors = adapter->num_msix_vectors - NONQ_VECS; 1827 1828 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { 1829 struct iavf_q_vector *q_vector = &adapter->q_vectors[q_idx]; 1830 1831 netif_napi_del(&q_vector->napi); 1832 } 1833 kfree(adapter->q_vectors); 1834 adapter->q_vectors = NULL; 1835 } 1836 1837 /** 1838 * iavf_reset_interrupt_capability - Reset MSIX setup 1839 * @adapter: board private structure 1840 * 1841 **/ 1842 static void iavf_reset_interrupt_capability(struct iavf_adapter *adapter) 1843 { 1844 if (!adapter->msix_entries) 1845 return; 1846 1847 pci_disable_msix(adapter->pdev); 1848 kfree(adapter->msix_entries); 1849 adapter->msix_entries = NULL; 1850 } 1851 1852 /** 1853 * iavf_init_interrupt_scheme - Determine if MSIX is supported and init 1854 * @adapter: board private structure to initialize 1855 * 1856 **/ 1857 static int iavf_init_interrupt_scheme(struct iavf_adapter *adapter) 1858 { 1859 int err; 1860 1861 err = iavf_alloc_queues(adapter); 1862 if (err) { 1863 dev_err(&adapter->pdev->dev, 1864 "Unable to allocate memory for queues\n"); 1865 goto err_alloc_queues; 1866 } 1867 1868 err = iavf_set_interrupt_capability(adapter); 1869 if (err) { 1870 dev_err(&adapter->pdev->dev, 1871 "Unable to setup interrupt capabilities\n"); 1872 goto err_set_interrupt; 1873 } 1874 1875 err = iavf_alloc_q_vectors(adapter); 1876 if (err) { 1877 dev_err(&adapter->pdev->dev, 1878 "Unable to allocate memory for queue vectors\n"); 1879 goto err_alloc_q_vectors; 1880 } 1881 1882 /* If we've made it so far while ADq flag being ON, then we haven't 1883 * bailed out anywhere in middle. And ADq isn't just enabled but actual 1884 * resources have been allocated in the reset path. 1885 * Now we can truly claim that ADq is enabled. 1886 */ 1887 if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) && 1888 adapter->num_tc) 1889 dev_info(&adapter->pdev->dev, "ADq Enabled, %u TCs created", 1890 adapter->num_tc); 1891 1892 dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u", 1893 (adapter->num_active_queues > 1) ? "Enabled" : "Disabled", 1894 adapter->num_active_queues); 1895 1896 return 0; 1897 err_alloc_q_vectors: 1898 iavf_reset_interrupt_capability(adapter); 1899 err_set_interrupt: 1900 iavf_free_queues(adapter); 1901 err_alloc_queues: 1902 return err; 1903 } 1904 1905 /** 1906 * iavf_free_interrupt_scheme - Undo what iavf_init_interrupt_scheme does 1907 * @adapter: board private structure 1908 **/ 1909 static void iavf_free_interrupt_scheme(struct iavf_adapter *adapter) 1910 { 1911 iavf_free_q_vectors(adapter); 1912 iavf_reset_interrupt_capability(adapter); 1913 iavf_free_queues(adapter); 1914 } 1915 1916 /** 1917 * iavf_free_rss - Free memory used by RSS structs 1918 * @adapter: board private structure 1919 **/ 1920 static void iavf_free_rss(struct iavf_adapter *adapter) 1921 { 1922 kfree(adapter->rss_key); 1923 adapter->rss_key = NULL; 1924 1925 kfree(adapter->rss_lut); 1926 adapter->rss_lut = NULL; 1927 } 1928 1929 /** 1930 * iavf_reinit_interrupt_scheme - Reallocate queues and vectors 1931 * @adapter: board private structure 1932 * @running: true if adapter->state == __IAVF_RUNNING 1933 * 1934 * Returns 0 on success, negative on failure 1935 **/ 1936 static int iavf_reinit_interrupt_scheme(struct iavf_adapter *adapter, bool running) 1937 { 1938 struct net_device *netdev = adapter->netdev; 1939 int err; 1940 1941 if (running) 1942 iavf_free_traffic_irqs(adapter); 1943 iavf_free_misc_irq(adapter); 1944 iavf_free_interrupt_scheme(adapter); 1945 1946 err = iavf_init_interrupt_scheme(adapter); 1947 if (err) 1948 goto err; 1949 1950 netif_tx_stop_all_queues(netdev); 1951 1952 err = iavf_request_misc_irq(adapter); 1953 if (err) 1954 goto err; 1955 1956 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); 1957 1958 iavf_map_rings_to_vectors(adapter); 1959 err: 1960 return err; 1961 } 1962 1963 /** 1964 * iavf_finish_config - do all netdev work that needs RTNL 1965 * @work: our work_struct 1966 * 1967 * Do work that needs both RTNL and crit_lock. 1968 **/ 1969 static void iavf_finish_config(struct work_struct *work) 1970 { 1971 struct iavf_adapter *adapter; 1972 int pairs, err; 1973 1974 adapter = container_of(work, struct iavf_adapter, finish_config); 1975 1976 /* Always take RTNL first to prevent circular lock dependency */ 1977 rtnl_lock(); 1978 mutex_lock(&adapter->crit_lock); 1979 1980 if ((adapter->flags & IAVF_FLAG_SETUP_NETDEV_FEATURES) && 1981 adapter->netdev->reg_state == NETREG_REGISTERED && 1982 !test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) { 1983 netdev_update_features(adapter->netdev); 1984 adapter->flags &= ~IAVF_FLAG_SETUP_NETDEV_FEATURES; 1985 } 1986 1987 switch (adapter->state) { 1988 case __IAVF_DOWN: 1989 if (adapter->netdev->reg_state != NETREG_REGISTERED) { 1990 err = register_netdevice(adapter->netdev); 1991 if (err) { 1992 dev_err(&adapter->pdev->dev, "Unable to register netdev (%d)\n", 1993 err); 1994 1995 /* go back and try again.*/ 1996 iavf_free_rss(adapter); 1997 iavf_free_misc_irq(adapter); 1998 iavf_reset_interrupt_capability(adapter); 1999 iavf_change_state(adapter, 2000 __IAVF_INIT_CONFIG_ADAPTER); 2001 goto out; 2002 } 2003 } 2004 2005 /* Set the real number of queues when reset occurs while 2006 * state == __IAVF_DOWN 2007 */ 2008 fallthrough; 2009 case __IAVF_RUNNING: 2010 pairs = adapter->num_active_queues; 2011 netif_set_real_num_rx_queues(adapter->netdev, pairs); 2012 netif_set_real_num_tx_queues(adapter->netdev, pairs); 2013 break; 2014 2015 default: 2016 break; 2017 } 2018 2019 out: 2020 mutex_unlock(&adapter->crit_lock); 2021 rtnl_unlock(); 2022 } 2023 2024 /** 2025 * iavf_schedule_finish_config - Set the flags and schedule a reset event 2026 * @adapter: board private structure 2027 **/ 2028 void iavf_schedule_finish_config(struct iavf_adapter *adapter) 2029 { 2030 if (!test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) 2031 queue_work(adapter->wq, &adapter->finish_config); 2032 } 2033 2034 /** 2035 * iavf_process_aq_command - process aq_required flags 2036 * and sends aq command 2037 * @adapter: pointer to iavf adapter structure 2038 * 2039 * Returns 0 on success 2040 * Returns error code if no command was sent 2041 * or error code if the command failed. 2042 **/ 2043 static int iavf_process_aq_command(struct iavf_adapter *adapter) 2044 { 2045 if (adapter->aq_required & IAVF_FLAG_AQ_GET_CONFIG) 2046 return iavf_send_vf_config_msg(adapter); 2047 if (adapter->aq_required & IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS) 2048 return iavf_send_vf_offload_vlan_v2_msg(adapter); 2049 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_QUEUES) { 2050 iavf_disable_queues(adapter); 2051 return 0; 2052 } 2053 2054 if (adapter->aq_required & IAVF_FLAG_AQ_MAP_VECTORS) { 2055 iavf_map_queues(adapter); 2056 return 0; 2057 } 2058 2059 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_MAC_FILTER) { 2060 iavf_add_ether_addrs(adapter); 2061 return 0; 2062 } 2063 2064 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_VLAN_FILTER) { 2065 iavf_add_vlans(adapter); 2066 return 0; 2067 } 2068 2069 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_MAC_FILTER) { 2070 iavf_del_ether_addrs(adapter); 2071 return 0; 2072 } 2073 2074 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_VLAN_FILTER) { 2075 iavf_del_vlans(adapter); 2076 return 0; 2077 } 2078 2079 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING) { 2080 iavf_enable_vlan_stripping(adapter); 2081 return 0; 2082 } 2083 2084 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING) { 2085 iavf_disable_vlan_stripping(adapter); 2086 return 0; 2087 } 2088 2089 if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_QUEUES) { 2090 iavf_configure_queues(adapter); 2091 return 0; 2092 } 2093 2094 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_QUEUES) { 2095 iavf_enable_queues(adapter); 2096 return 0; 2097 } 2098 2099 if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_RSS) { 2100 /* This message goes straight to the firmware, not the 2101 * PF, so we don't have to set current_op as we will 2102 * not get a response through the ARQ. 2103 */ 2104 adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_RSS; 2105 return 0; 2106 } 2107 if (adapter->aq_required & IAVF_FLAG_AQ_GET_HENA) { 2108 iavf_get_hena(adapter); 2109 return 0; 2110 } 2111 if (adapter->aq_required & IAVF_FLAG_AQ_SET_HENA) { 2112 iavf_set_hena(adapter); 2113 return 0; 2114 } 2115 if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_KEY) { 2116 iavf_set_rss_key(adapter); 2117 return 0; 2118 } 2119 if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_LUT) { 2120 iavf_set_rss_lut(adapter); 2121 return 0; 2122 } 2123 if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_HFUNC) { 2124 iavf_set_rss_hfunc(adapter); 2125 return 0; 2126 } 2127 2128 if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE) { 2129 iavf_set_promiscuous(adapter); 2130 return 0; 2131 } 2132 2133 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CHANNELS) { 2134 iavf_enable_channels(adapter); 2135 return 0; 2136 } 2137 2138 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CHANNELS) { 2139 iavf_disable_channels(adapter); 2140 return 0; 2141 } 2142 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) { 2143 iavf_add_cloud_filter(adapter); 2144 return 0; 2145 } 2146 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) { 2147 iavf_del_cloud_filter(adapter); 2148 return 0; 2149 } 2150 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_FDIR_FILTER) { 2151 iavf_add_fdir_filter(adapter); 2152 return IAVF_SUCCESS; 2153 } 2154 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_FDIR_FILTER) { 2155 iavf_del_fdir_filter(adapter); 2156 return IAVF_SUCCESS; 2157 } 2158 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_ADV_RSS_CFG) { 2159 iavf_add_adv_rss_cfg(adapter); 2160 return 0; 2161 } 2162 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_ADV_RSS_CFG) { 2163 iavf_del_adv_rss_cfg(adapter); 2164 return 0; 2165 } 2166 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_STRIPPING) { 2167 iavf_disable_vlan_stripping_v2(adapter, ETH_P_8021Q); 2168 return 0; 2169 } 2170 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_STAG_VLAN_STRIPPING) { 2171 iavf_disable_vlan_stripping_v2(adapter, ETH_P_8021AD); 2172 return 0; 2173 } 2174 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_STRIPPING) { 2175 iavf_enable_vlan_stripping_v2(adapter, ETH_P_8021Q); 2176 return 0; 2177 } 2178 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_STAG_VLAN_STRIPPING) { 2179 iavf_enable_vlan_stripping_v2(adapter, ETH_P_8021AD); 2180 return 0; 2181 } 2182 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION) { 2183 iavf_disable_vlan_insertion_v2(adapter, ETH_P_8021Q); 2184 return 0; 2185 } 2186 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION) { 2187 iavf_disable_vlan_insertion_v2(adapter, ETH_P_8021AD); 2188 return 0; 2189 } 2190 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_INSERTION) { 2191 iavf_enable_vlan_insertion_v2(adapter, ETH_P_8021Q); 2192 return 0; 2193 } 2194 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION) { 2195 iavf_enable_vlan_insertion_v2(adapter, ETH_P_8021AD); 2196 return 0; 2197 } 2198 2199 if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_STATS) { 2200 iavf_request_stats(adapter); 2201 return 0; 2202 } 2203 2204 return -EAGAIN; 2205 } 2206 2207 /** 2208 * iavf_set_vlan_offload_features - set VLAN offload configuration 2209 * @adapter: board private structure 2210 * @prev_features: previous features used for comparison 2211 * @features: updated features used for configuration 2212 * 2213 * Set the aq_required bit(s) based on the requested features passed in to 2214 * configure VLAN stripping and/or VLAN insertion if supported. Also, schedule 2215 * the watchdog if any changes are requested to expedite the request via 2216 * virtchnl. 2217 **/ 2218 static void 2219 iavf_set_vlan_offload_features(struct iavf_adapter *adapter, 2220 netdev_features_t prev_features, 2221 netdev_features_t features) 2222 { 2223 bool enable_stripping = true, enable_insertion = true; 2224 u16 vlan_ethertype = 0; 2225 u64 aq_required = 0; 2226 2227 /* keep cases separate because one ethertype for offloads can be 2228 * disabled at the same time as another is disabled, so check for an 2229 * enabled ethertype first, then check for disabled. Default to 2230 * ETH_P_8021Q so an ethertype is specified if disabling insertion and 2231 * stripping. 2232 */ 2233 if (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX)) 2234 vlan_ethertype = ETH_P_8021AD; 2235 else if (features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX)) 2236 vlan_ethertype = ETH_P_8021Q; 2237 else if (prev_features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX)) 2238 vlan_ethertype = ETH_P_8021AD; 2239 else if (prev_features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX)) 2240 vlan_ethertype = ETH_P_8021Q; 2241 else 2242 vlan_ethertype = ETH_P_8021Q; 2243 2244 if (!(features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_CTAG_RX))) 2245 enable_stripping = false; 2246 if (!(features & (NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_CTAG_TX))) 2247 enable_insertion = false; 2248 2249 if (VLAN_ALLOWED(adapter)) { 2250 /* VIRTCHNL_VF_OFFLOAD_VLAN only has support for toggling VLAN 2251 * stripping via virtchnl. VLAN insertion can be toggled on the 2252 * netdev, but it doesn't require a virtchnl message 2253 */ 2254 if (enable_stripping) 2255 aq_required |= IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING; 2256 else 2257 aq_required |= IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING; 2258 2259 } else if (VLAN_V2_ALLOWED(adapter)) { 2260 switch (vlan_ethertype) { 2261 case ETH_P_8021Q: 2262 if (enable_stripping) 2263 aq_required |= IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_STRIPPING; 2264 else 2265 aq_required |= IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_STRIPPING; 2266 2267 if (enable_insertion) 2268 aq_required |= IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_INSERTION; 2269 else 2270 aq_required |= IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION; 2271 break; 2272 case ETH_P_8021AD: 2273 if (enable_stripping) 2274 aq_required |= IAVF_FLAG_AQ_ENABLE_STAG_VLAN_STRIPPING; 2275 else 2276 aq_required |= IAVF_FLAG_AQ_DISABLE_STAG_VLAN_STRIPPING; 2277 2278 if (enable_insertion) 2279 aq_required |= IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION; 2280 else 2281 aq_required |= IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION; 2282 break; 2283 } 2284 } 2285 2286 if (aq_required) 2287 iavf_schedule_aq_request(adapter, aq_required); 2288 } 2289 2290 /** 2291 * iavf_startup - first step of driver startup 2292 * @adapter: board private structure 2293 * 2294 * Function process __IAVF_STARTUP driver state. 2295 * When success the state is changed to __IAVF_INIT_VERSION_CHECK 2296 * when fails the state is changed to __IAVF_INIT_FAILED 2297 **/ 2298 static void iavf_startup(struct iavf_adapter *adapter) 2299 { 2300 struct pci_dev *pdev = adapter->pdev; 2301 struct iavf_hw *hw = &adapter->hw; 2302 enum iavf_status status; 2303 int ret; 2304 2305 WARN_ON(adapter->state != __IAVF_STARTUP); 2306 2307 /* driver loaded, probe complete */ 2308 adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED; 2309 adapter->flags &= ~IAVF_FLAG_RESET_PENDING; 2310 2311 ret = iavf_check_reset_complete(hw); 2312 if (ret) { 2313 dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n", 2314 ret); 2315 goto err; 2316 } 2317 hw->aq.num_arq_entries = IAVF_AQ_LEN; 2318 hw->aq.num_asq_entries = IAVF_AQ_LEN; 2319 hw->aq.arq_buf_size = IAVF_MAX_AQ_BUF_SIZE; 2320 hw->aq.asq_buf_size = IAVF_MAX_AQ_BUF_SIZE; 2321 2322 status = iavf_init_adminq(hw); 2323 if (status) { 2324 dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n", 2325 status); 2326 goto err; 2327 } 2328 ret = iavf_send_api_ver(adapter); 2329 if (ret) { 2330 dev_err(&pdev->dev, "Unable to send to PF (%d)\n", ret); 2331 iavf_shutdown_adminq(hw); 2332 goto err; 2333 } 2334 iavf_change_state(adapter, __IAVF_INIT_VERSION_CHECK); 2335 return; 2336 err: 2337 iavf_change_state(adapter, __IAVF_INIT_FAILED); 2338 } 2339 2340 /** 2341 * iavf_init_version_check - second step of driver startup 2342 * @adapter: board private structure 2343 * 2344 * Function process __IAVF_INIT_VERSION_CHECK driver state. 2345 * When success the state is changed to __IAVF_INIT_GET_RESOURCES 2346 * when fails the state is changed to __IAVF_INIT_FAILED 2347 **/ 2348 static void iavf_init_version_check(struct iavf_adapter *adapter) 2349 { 2350 struct pci_dev *pdev = adapter->pdev; 2351 struct iavf_hw *hw = &adapter->hw; 2352 int err = -EAGAIN; 2353 2354 WARN_ON(adapter->state != __IAVF_INIT_VERSION_CHECK); 2355 2356 if (!iavf_asq_done(hw)) { 2357 dev_err(&pdev->dev, "Admin queue command never completed\n"); 2358 iavf_shutdown_adminq(hw); 2359 iavf_change_state(adapter, __IAVF_STARTUP); 2360 goto err; 2361 } 2362 2363 /* aq msg sent, awaiting reply */ 2364 err = iavf_verify_api_ver(adapter); 2365 if (err) { 2366 if (err == -EALREADY) 2367 err = iavf_send_api_ver(adapter); 2368 else 2369 dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n", 2370 adapter->pf_version.major, 2371 adapter->pf_version.minor, 2372 VIRTCHNL_VERSION_MAJOR, 2373 VIRTCHNL_VERSION_MINOR); 2374 goto err; 2375 } 2376 err = iavf_send_vf_config_msg(adapter); 2377 if (err) { 2378 dev_err(&pdev->dev, "Unable to send config request (%d)\n", 2379 err); 2380 goto err; 2381 } 2382 iavf_change_state(adapter, __IAVF_INIT_GET_RESOURCES); 2383 return; 2384 err: 2385 iavf_change_state(adapter, __IAVF_INIT_FAILED); 2386 } 2387 2388 /** 2389 * iavf_parse_vf_resource_msg - parse response from VIRTCHNL_OP_GET_VF_RESOURCES 2390 * @adapter: board private structure 2391 */ 2392 int iavf_parse_vf_resource_msg(struct iavf_adapter *adapter) 2393 { 2394 int i, num_req_queues = adapter->num_req_queues; 2395 struct iavf_vsi *vsi = &adapter->vsi; 2396 2397 for (i = 0; i < adapter->vf_res->num_vsis; i++) { 2398 if (adapter->vf_res->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV) 2399 adapter->vsi_res = &adapter->vf_res->vsi_res[i]; 2400 } 2401 if (!adapter->vsi_res) { 2402 dev_err(&adapter->pdev->dev, "No LAN VSI found\n"); 2403 return -ENODEV; 2404 } 2405 2406 if (num_req_queues && 2407 num_req_queues > adapter->vsi_res->num_queue_pairs) { 2408 /* Problem. The PF gave us fewer queues than what we had 2409 * negotiated in our request. Need a reset to see if we can't 2410 * get back to a working state. 2411 */ 2412 dev_err(&adapter->pdev->dev, 2413 "Requested %d queues, but PF only gave us %d.\n", 2414 num_req_queues, 2415 adapter->vsi_res->num_queue_pairs); 2416 adapter->flags |= IAVF_FLAG_REINIT_MSIX_NEEDED; 2417 adapter->num_req_queues = adapter->vsi_res->num_queue_pairs; 2418 iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED); 2419 2420 return -EAGAIN; 2421 } 2422 adapter->num_req_queues = 0; 2423 adapter->vsi.id = adapter->vsi_res->vsi_id; 2424 2425 adapter->vsi.back = adapter; 2426 adapter->vsi.base_vector = 1; 2427 vsi->netdev = adapter->netdev; 2428 vsi->qs_handle = adapter->vsi_res->qset_handle; 2429 if (adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) { 2430 adapter->rss_key_size = adapter->vf_res->rss_key_size; 2431 adapter->rss_lut_size = adapter->vf_res->rss_lut_size; 2432 } else { 2433 adapter->rss_key_size = IAVF_HKEY_ARRAY_SIZE; 2434 adapter->rss_lut_size = IAVF_HLUT_ARRAY_SIZE; 2435 } 2436 2437 return 0; 2438 } 2439 2440 /** 2441 * iavf_init_get_resources - third step of driver startup 2442 * @adapter: board private structure 2443 * 2444 * Function process __IAVF_INIT_GET_RESOURCES driver state and 2445 * finishes driver initialization procedure. 2446 * When success the state is changed to __IAVF_DOWN 2447 * when fails the state is changed to __IAVF_INIT_FAILED 2448 **/ 2449 static void iavf_init_get_resources(struct iavf_adapter *adapter) 2450 { 2451 struct pci_dev *pdev = adapter->pdev; 2452 struct iavf_hw *hw = &adapter->hw; 2453 int err; 2454 2455 WARN_ON(adapter->state != __IAVF_INIT_GET_RESOURCES); 2456 /* aq msg sent, awaiting reply */ 2457 if (!adapter->vf_res) { 2458 adapter->vf_res = kzalloc(IAVF_VIRTCHNL_VF_RESOURCE_SIZE, 2459 GFP_KERNEL); 2460 if (!adapter->vf_res) { 2461 err = -ENOMEM; 2462 goto err; 2463 } 2464 } 2465 err = iavf_get_vf_config(adapter); 2466 if (err == -EALREADY) { 2467 err = iavf_send_vf_config_msg(adapter); 2468 goto err; 2469 } else if (err == -EINVAL) { 2470 /* We only get -EINVAL if the device is in a very bad 2471 * state or if we've been disabled for previous bad 2472 * behavior. Either way, we're done now. 2473 */ 2474 iavf_shutdown_adminq(hw); 2475 dev_err(&pdev->dev, "Unable to get VF config due to PF error condition, not retrying\n"); 2476 return; 2477 } 2478 if (err) { 2479 dev_err(&pdev->dev, "Unable to get VF config (%d)\n", err); 2480 goto err_alloc; 2481 } 2482 2483 err = iavf_parse_vf_resource_msg(adapter); 2484 if (err) { 2485 dev_err(&pdev->dev, "Failed to parse VF resource message from PF (%d)\n", 2486 err); 2487 goto err_alloc; 2488 } 2489 /* Some features require additional messages to negotiate extended 2490 * capabilities. These are processed in sequence by the 2491 * __IAVF_INIT_EXTENDED_CAPS driver state. 2492 */ 2493 adapter->extended_caps = IAVF_EXTENDED_CAPS; 2494 2495 iavf_change_state(adapter, __IAVF_INIT_EXTENDED_CAPS); 2496 return; 2497 2498 err_alloc: 2499 kfree(adapter->vf_res); 2500 adapter->vf_res = NULL; 2501 err: 2502 iavf_change_state(adapter, __IAVF_INIT_FAILED); 2503 } 2504 2505 /** 2506 * iavf_init_send_offload_vlan_v2_caps - part of initializing VLAN V2 caps 2507 * @adapter: board private structure 2508 * 2509 * Function processes send of the extended VLAN V2 capability message to the 2510 * PF. Must clear IAVF_EXTENDED_CAP_RECV_VLAN_V2 if the message is not sent, 2511 * e.g. due to PF not negotiating VIRTCHNL_VF_OFFLOAD_VLAN_V2. 2512 */ 2513 static void iavf_init_send_offload_vlan_v2_caps(struct iavf_adapter *adapter) 2514 { 2515 int ret; 2516 2517 WARN_ON(!(adapter->extended_caps & IAVF_EXTENDED_CAP_SEND_VLAN_V2)); 2518 2519 ret = iavf_send_vf_offload_vlan_v2_msg(adapter); 2520 if (ret && ret == -EOPNOTSUPP) { 2521 /* PF does not support VIRTCHNL_VF_OFFLOAD_V2. In this case, 2522 * we did not send the capability exchange message and do not 2523 * expect a response. 2524 */ 2525 adapter->extended_caps &= ~IAVF_EXTENDED_CAP_RECV_VLAN_V2; 2526 } 2527 2528 /* We sent the message, so move on to the next step */ 2529 adapter->extended_caps &= ~IAVF_EXTENDED_CAP_SEND_VLAN_V2; 2530 } 2531 2532 /** 2533 * iavf_init_recv_offload_vlan_v2_caps - part of initializing VLAN V2 caps 2534 * @adapter: board private structure 2535 * 2536 * Function processes receipt of the extended VLAN V2 capability message from 2537 * the PF. 2538 **/ 2539 static void iavf_init_recv_offload_vlan_v2_caps(struct iavf_adapter *adapter) 2540 { 2541 int ret; 2542 2543 WARN_ON(!(adapter->extended_caps & IAVF_EXTENDED_CAP_RECV_VLAN_V2)); 2544 2545 memset(&adapter->vlan_v2_caps, 0, sizeof(adapter->vlan_v2_caps)); 2546 2547 ret = iavf_get_vf_vlan_v2_caps(adapter); 2548 if (ret) 2549 goto err; 2550 2551 /* We've processed receipt of the VLAN V2 caps message */ 2552 adapter->extended_caps &= ~IAVF_EXTENDED_CAP_RECV_VLAN_V2; 2553 return; 2554 err: 2555 /* We didn't receive a reply. Make sure we try sending again when 2556 * __IAVF_INIT_FAILED attempts to recover. 2557 */ 2558 adapter->extended_caps |= IAVF_EXTENDED_CAP_SEND_VLAN_V2; 2559 iavf_change_state(adapter, __IAVF_INIT_FAILED); 2560 } 2561 2562 /** 2563 * iavf_init_process_extended_caps - Part of driver startup 2564 * @adapter: board private structure 2565 * 2566 * Function processes __IAVF_INIT_EXTENDED_CAPS driver state. This state 2567 * handles negotiating capabilities for features which require an additional 2568 * message. 2569 * 2570 * Once all extended capabilities exchanges are finished, the driver will 2571 * transition into __IAVF_INIT_CONFIG_ADAPTER. 2572 */ 2573 static void iavf_init_process_extended_caps(struct iavf_adapter *adapter) 2574 { 2575 WARN_ON(adapter->state != __IAVF_INIT_EXTENDED_CAPS); 2576 2577 /* Process capability exchange for VLAN V2 */ 2578 if (adapter->extended_caps & IAVF_EXTENDED_CAP_SEND_VLAN_V2) { 2579 iavf_init_send_offload_vlan_v2_caps(adapter); 2580 return; 2581 } else if (adapter->extended_caps & IAVF_EXTENDED_CAP_RECV_VLAN_V2) { 2582 iavf_init_recv_offload_vlan_v2_caps(adapter); 2583 return; 2584 } 2585 2586 /* When we reach here, no further extended capabilities exchanges are 2587 * necessary, so we finally transition into __IAVF_INIT_CONFIG_ADAPTER 2588 */ 2589 iavf_change_state(adapter, __IAVF_INIT_CONFIG_ADAPTER); 2590 } 2591 2592 /** 2593 * iavf_init_config_adapter - last part of driver startup 2594 * @adapter: board private structure 2595 * 2596 * After all the supported capabilities are negotiated, then the 2597 * __IAVF_INIT_CONFIG_ADAPTER state will finish driver initialization. 2598 */ 2599 static void iavf_init_config_adapter(struct iavf_adapter *adapter) 2600 { 2601 struct net_device *netdev = adapter->netdev; 2602 struct pci_dev *pdev = adapter->pdev; 2603 int err; 2604 2605 WARN_ON(adapter->state != __IAVF_INIT_CONFIG_ADAPTER); 2606 2607 if (iavf_process_config(adapter)) 2608 goto err; 2609 2610 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 2611 2612 adapter->flags |= IAVF_FLAG_RX_CSUM_ENABLED; 2613 2614 netdev->netdev_ops = &iavf_netdev_ops; 2615 iavf_set_ethtool_ops(netdev); 2616 netdev->watchdog_timeo = 5 * HZ; 2617 2618 netdev->min_mtu = ETH_MIN_MTU; 2619 netdev->max_mtu = LIBIE_MAX_MTU; 2620 2621 if (!is_valid_ether_addr(adapter->hw.mac.addr)) { 2622 dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n", 2623 adapter->hw.mac.addr); 2624 eth_hw_addr_random(netdev); 2625 ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); 2626 } else { 2627 eth_hw_addr_set(netdev, adapter->hw.mac.addr); 2628 ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr); 2629 } 2630 2631 adapter->tx_desc_count = IAVF_DEFAULT_TXD; 2632 adapter->rx_desc_count = IAVF_DEFAULT_RXD; 2633 err = iavf_init_interrupt_scheme(adapter); 2634 if (err) 2635 goto err_sw_init; 2636 iavf_map_rings_to_vectors(adapter); 2637 if (adapter->vf_res->vf_cap_flags & 2638 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) 2639 adapter->flags |= IAVF_FLAG_WB_ON_ITR_CAPABLE; 2640 2641 err = iavf_request_misc_irq(adapter); 2642 if (err) 2643 goto err_sw_init; 2644 2645 netif_carrier_off(netdev); 2646 adapter->link_up = false; 2647 netif_tx_stop_all_queues(netdev); 2648 2649 dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr); 2650 if (netdev->features & NETIF_F_GRO) 2651 dev_info(&pdev->dev, "GRO is enabled\n"); 2652 2653 iavf_change_state(adapter, __IAVF_DOWN); 2654 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); 2655 2656 iavf_misc_irq_enable(adapter); 2657 wake_up(&adapter->down_waitqueue); 2658 2659 adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL); 2660 adapter->rss_lut = kzalloc(adapter->rss_lut_size, GFP_KERNEL); 2661 if (!adapter->rss_key || !adapter->rss_lut) { 2662 err = -ENOMEM; 2663 goto err_mem; 2664 } 2665 if (RSS_AQ(adapter)) 2666 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS; 2667 else 2668 iavf_init_rss(adapter); 2669 2670 if (VLAN_V2_ALLOWED(adapter)) 2671 /* request initial VLAN offload settings */ 2672 iavf_set_vlan_offload_features(adapter, 0, netdev->features); 2673 2674 iavf_schedule_finish_config(adapter); 2675 return; 2676 2677 err_mem: 2678 iavf_free_rss(adapter); 2679 iavf_free_misc_irq(adapter); 2680 err_sw_init: 2681 iavf_reset_interrupt_capability(adapter); 2682 err: 2683 iavf_change_state(adapter, __IAVF_INIT_FAILED); 2684 } 2685 2686 /** 2687 * iavf_watchdog_task - Periodic call-back task 2688 * @work: pointer to work_struct 2689 **/ 2690 static void iavf_watchdog_task(struct work_struct *work) 2691 { 2692 struct iavf_adapter *adapter = container_of(work, 2693 struct iavf_adapter, 2694 watchdog_task.work); 2695 struct iavf_hw *hw = &adapter->hw; 2696 u32 reg_val; 2697 2698 if (!mutex_trylock(&adapter->crit_lock)) { 2699 if (adapter->state == __IAVF_REMOVE) 2700 return; 2701 2702 goto restart_watchdog; 2703 } 2704 2705 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) 2706 iavf_change_state(adapter, __IAVF_COMM_FAILED); 2707 2708 switch (adapter->state) { 2709 case __IAVF_STARTUP: 2710 iavf_startup(adapter); 2711 mutex_unlock(&adapter->crit_lock); 2712 queue_delayed_work(adapter->wq, &adapter->watchdog_task, 2713 msecs_to_jiffies(30)); 2714 return; 2715 case __IAVF_INIT_VERSION_CHECK: 2716 iavf_init_version_check(adapter); 2717 mutex_unlock(&adapter->crit_lock); 2718 queue_delayed_work(adapter->wq, &adapter->watchdog_task, 2719 msecs_to_jiffies(30)); 2720 return; 2721 case __IAVF_INIT_GET_RESOURCES: 2722 iavf_init_get_resources(adapter); 2723 mutex_unlock(&adapter->crit_lock); 2724 queue_delayed_work(adapter->wq, &adapter->watchdog_task, 2725 msecs_to_jiffies(1)); 2726 return; 2727 case __IAVF_INIT_EXTENDED_CAPS: 2728 iavf_init_process_extended_caps(adapter); 2729 mutex_unlock(&adapter->crit_lock); 2730 queue_delayed_work(adapter->wq, &adapter->watchdog_task, 2731 msecs_to_jiffies(1)); 2732 return; 2733 case __IAVF_INIT_CONFIG_ADAPTER: 2734 iavf_init_config_adapter(adapter); 2735 mutex_unlock(&adapter->crit_lock); 2736 queue_delayed_work(adapter->wq, &adapter->watchdog_task, 2737 msecs_to_jiffies(1)); 2738 return; 2739 case __IAVF_INIT_FAILED: 2740 if (test_bit(__IAVF_IN_REMOVE_TASK, 2741 &adapter->crit_section)) { 2742 /* Do not update the state and do not reschedule 2743 * watchdog task, iavf_remove should handle this state 2744 * as it can loop forever 2745 */ 2746 mutex_unlock(&adapter->crit_lock); 2747 return; 2748 } 2749 if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) { 2750 dev_err(&adapter->pdev->dev, 2751 "Failed to communicate with PF; waiting before retry\n"); 2752 adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED; 2753 iavf_shutdown_adminq(hw); 2754 mutex_unlock(&adapter->crit_lock); 2755 queue_delayed_work(adapter->wq, 2756 &adapter->watchdog_task, (5 * HZ)); 2757 return; 2758 } 2759 /* Try again from failed step*/ 2760 iavf_change_state(adapter, adapter->last_state); 2761 mutex_unlock(&adapter->crit_lock); 2762 queue_delayed_work(adapter->wq, &adapter->watchdog_task, HZ); 2763 return; 2764 case __IAVF_COMM_FAILED: 2765 if (test_bit(__IAVF_IN_REMOVE_TASK, 2766 &adapter->crit_section)) { 2767 /* Set state to __IAVF_INIT_FAILED and perform remove 2768 * steps. Remove IAVF_FLAG_PF_COMMS_FAILED so the task 2769 * doesn't bring the state back to __IAVF_COMM_FAILED. 2770 */ 2771 iavf_change_state(adapter, __IAVF_INIT_FAILED); 2772 adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED; 2773 mutex_unlock(&adapter->crit_lock); 2774 return; 2775 } 2776 reg_val = rd32(hw, IAVF_VFGEN_RSTAT) & 2777 IAVF_VFGEN_RSTAT_VFR_STATE_MASK; 2778 if (reg_val == VIRTCHNL_VFR_VFACTIVE || 2779 reg_val == VIRTCHNL_VFR_COMPLETED) { 2780 /* A chance for redemption! */ 2781 dev_err(&adapter->pdev->dev, 2782 "Hardware came out of reset. Attempting reinit.\n"); 2783 /* When init task contacts the PF and 2784 * gets everything set up again, it'll restart the 2785 * watchdog for us. Down, boy. Sit. Stay. Woof. 2786 */ 2787 iavf_change_state(adapter, __IAVF_STARTUP); 2788 adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED; 2789 } 2790 adapter->aq_required = 0; 2791 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 2792 mutex_unlock(&adapter->crit_lock); 2793 queue_delayed_work(adapter->wq, 2794 &adapter->watchdog_task, 2795 msecs_to_jiffies(10)); 2796 return; 2797 case __IAVF_RESETTING: 2798 mutex_unlock(&adapter->crit_lock); 2799 queue_delayed_work(adapter->wq, &adapter->watchdog_task, 2800 HZ * 2); 2801 return; 2802 case __IAVF_DOWN: 2803 case __IAVF_DOWN_PENDING: 2804 case __IAVF_TESTING: 2805 case __IAVF_RUNNING: 2806 if (adapter->current_op) { 2807 if (!iavf_asq_done(hw)) { 2808 dev_dbg(&adapter->pdev->dev, 2809 "Admin queue timeout\n"); 2810 iavf_send_api_ver(adapter); 2811 } 2812 } else { 2813 int ret = iavf_process_aq_command(adapter); 2814 2815 /* An error will be returned if no commands were 2816 * processed; use this opportunity to update stats 2817 * if the error isn't -ENOTSUPP 2818 */ 2819 if (ret && ret != -EOPNOTSUPP && 2820 adapter->state == __IAVF_RUNNING) 2821 iavf_request_stats(adapter); 2822 } 2823 if (adapter->state == __IAVF_RUNNING) 2824 iavf_detect_recover_hung(&adapter->vsi); 2825 break; 2826 case __IAVF_REMOVE: 2827 default: 2828 mutex_unlock(&adapter->crit_lock); 2829 return; 2830 } 2831 2832 /* check for hw reset */ 2833 reg_val = rd32(hw, IAVF_VF_ARQLEN1) & IAVF_VF_ARQLEN1_ARQENABLE_MASK; 2834 if (!reg_val) { 2835 adapter->aq_required = 0; 2836 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 2837 dev_err(&adapter->pdev->dev, "Hardware reset detected\n"); 2838 iavf_schedule_reset(adapter, IAVF_FLAG_RESET_PENDING); 2839 mutex_unlock(&adapter->crit_lock); 2840 queue_delayed_work(adapter->wq, 2841 &adapter->watchdog_task, HZ * 2); 2842 return; 2843 } 2844 2845 mutex_unlock(&adapter->crit_lock); 2846 restart_watchdog: 2847 if (adapter->state >= __IAVF_DOWN) 2848 queue_work(adapter->wq, &adapter->adminq_task); 2849 if (adapter->aq_required) 2850 queue_delayed_work(adapter->wq, &adapter->watchdog_task, 2851 msecs_to_jiffies(20)); 2852 else 2853 queue_delayed_work(adapter->wq, &adapter->watchdog_task, 2854 HZ * 2); 2855 } 2856 2857 /** 2858 * iavf_disable_vf - disable VF 2859 * @adapter: board private structure 2860 * 2861 * Set communication failed flag and free all resources. 2862 * NOTE: This function is expected to be called with crit_lock being held. 2863 **/ 2864 static void iavf_disable_vf(struct iavf_adapter *adapter) 2865 { 2866 struct iavf_mac_filter *f, *ftmp; 2867 struct iavf_vlan_filter *fv, *fvtmp; 2868 struct iavf_cloud_filter *cf, *cftmp; 2869 2870 adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED; 2871 2872 /* We don't use netif_running() because it may be true prior to 2873 * ndo_open() returning, so we can't assume it means all our open 2874 * tasks have finished, since we're not holding the rtnl_lock here. 2875 */ 2876 if (adapter->state == __IAVF_RUNNING) { 2877 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); 2878 netif_carrier_off(adapter->netdev); 2879 netif_tx_disable(adapter->netdev); 2880 adapter->link_up = false; 2881 iavf_napi_disable_all(adapter); 2882 iavf_irq_disable(adapter); 2883 iavf_free_traffic_irqs(adapter); 2884 iavf_free_all_tx_resources(adapter); 2885 iavf_free_all_rx_resources(adapter); 2886 } 2887 2888 spin_lock_bh(&adapter->mac_vlan_list_lock); 2889 2890 /* Delete all of the filters */ 2891 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { 2892 list_del(&f->list); 2893 kfree(f); 2894 } 2895 2896 list_for_each_entry_safe(fv, fvtmp, &adapter->vlan_filter_list, list) { 2897 list_del(&fv->list); 2898 kfree(fv); 2899 } 2900 adapter->num_vlan_filters = 0; 2901 2902 spin_unlock_bh(&adapter->mac_vlan_list_lock); 2903 2904 spin_lock_bh(&adapter->cloud_filter_list_lock); 2905 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) { 2906 list_del(&cf->list); 2907 kfree(cf); 2908 adapter->num_cloud_filters--; 2909 } 2910 spin_unlock_bh(&adapter->cloud_filter_list_lock); 2911 2912 iavf_free_misc_irq(adapter); 2913 iavf_free_interrupt_scheme(adapter); 2914 memset(adapter->vf_res, 0, IAVF_VIRTCHNL_VF_RESOURCE_SIZE); 2915 iavf_shutdown_adminq(&adapter->hw); 2916 adapter->flags &= ~IAVF_FLAG_RESET_PENDING; 2917 iavf_change_state(adapter, __IAVF_DOWN); 2918 wake_up(&adapter->down_waitqueue); 2919 dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n"); 2920 } 2921 2922 /** 2923 * iavf_reset_task - Call-back task to handle hardware reset 2924 * @work: pointer to work_struct 2925 * 2926 * During reset we need to shut down and reinitialize the admin queue 2927 * before we can use it to communicate with the PF again. We also clear 2928 * and reinit the rings because that context is lost as well. 2929 **/ 2930 static void iavf_reset_task(struct work_struct *work) 2931 { 2932 struct iavf_adapter *adapter = container_of(work, 2933 struct iavf_adapter, 2934 reset_task); 2935 struct virtchnl_vf_resource *vfres = adapter->vf_res; 2936 struct net_device *netdev = adapter->netdev; 2937 struct iavf_hw *hw = &adapter->hw; 2938 struct iavf_mac_filter *f, *ftmp; 2939 struct iavf_cloud_filter *cf; 2940 enum iavf_status status; 2941 u32 reg_val; 2942 int i = 0, err; 2943 bool running; 2944 2945 /* When device is being removed it doesn't make sense to run the reset 2946 * task, just return in such a case. 2947 */ 2948 if (!mutex_trylock(&adapter->crit_lock)) { 2949 if (adapter->state != __IAVF_REMOVE) 2950 queue_work(adapter->wq, &adapter->reset_task); 2951 2952 return; 2953 } 2954 2955 iavf_misc_irq_disable(adapter); 2956 if (adapter->flags & IAVF_FLAG_RESET_NEEDED) { 2957 adapter->flags &= ~IAVF_FLAG_RESET_NEEDED; 2958 /* Restart the AQ here. If we have been reset but didn't 2959 * detect it, or if the PF had to reinit, our AQ will be hosed. 2960 */ 2961 iavf_shutdown_adminq(hw); 2962 iavf_init_adminq(hw); 2963 iavf_request_reset(adapter); 2964 } 2965 adapter->flags |= IAVF_FLAG_RESET_PENDING; 2966 2967 /* poll until we see the reset actually happen */ 2968 for (i = 0; i < IAVF_RESET_WAIT_DETECTED_COUNT; i++) { 2969 reg_val = rd32(hw, IAVF_VF_ARQLEN1) & 2970 IAVF_VF_ARQLEN1_ARQENABLE_MASK; 2971 if (!reg_val) 2972 break; 2973 usleep_range(5000, 10000); 2974 } 2975 if (i == IAVF_RESET_WAIT_DETECTED_COUNT) { 2976 dev_info(&adapter->pdev->dev, "Never saw reset\n"); 2977 goto continue_reset; /* act like the reset happened */ 2978 } 2979 2980 /* wait until the reset is complete and the PF is responding to us */ 2981 for (i = 0; i < IAVF_RESET_WAIT_COMPLETE_COUNT; i++) { 2982 /* sleep first to make sure a minimum wait time is met */ 2983 msleep(IAVF_RESET_WAIT_MS); 2984 2985 reg_val = rd32(hw, IAVF_VFGEN_RSTAT) & 2986 IAVF_VFGEN_RSTAT_VFR_STATE_MASK; 2987 if (reg_val == VIRTCHNL_VFR_VFACTIVE) 2988 break; 2989 } 2990 2991 pci_set_master(adapter->pdev); 2992 pci_restore_msi_state(adapter->pdev); 2993 2994 if (i == IAVF_RESET_WAIT_COMPLETE_COUNT) { 2995 dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n", 2996 reg_val); 2997 iavf_disable_vf(adapter); 2998 mutex_unlock(&adapter->crit_lock); 2999 return; /* Do not attempt to reinit. It's dead, Jim. */ 3000 } 3001 3002 continue_reset: 3003 /* We don't use netif_running() because it may be true prior to 3004 * ndo_open() returning, so we can't assume it means all our open 3005 * tasks have finished, since we're not holding the rtnl_lock here. 3006 */ 3007 running = adapter->state == __IAVF_RUNNING; 3008 3009 if (running) { 3010 netif_carrier_off(netdev); 3011 netif_tx_stop_all_queues(netdev); 3012 adapter->link_up = false; 3013 iavf_napi_disable_all(adapter); 3014 } 3015 iavf_irq_disable(adapter); 3016 3017 iavf_change_state(adapter, __IAVF_RESETTING); 3018 adapter->flags &= ~IAVF_FLAG_RESET_PENDING; 3019 3020 /* free the Tx/Rx rings and descriptors, might be better to just 3021 * re-use them sometime in the future 3022 */ 3023 iavf_free_all_rx_resources(adapter); 3024 iavf_free_all_tx_resources(adapter); 3025 3026 adapter->flags |= IAVF_FLAG_QUEUES_DISABLED; 3027 /* kill and reinit the admin queue */ 3028 iavf_shutdown_adminq(hw); 3029 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 3030 status = iavf_init_adminq(hw); 3031 if (status) { 3032 dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n", 3033 status); 3034 goto reset_err; 3035 } 3036 adapter->aq_required = 0; 3037 3038 if ((adapter->flags & IAVF_FLAG_REINIT_MSIX_NEEDED) || 3039 (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED)) { 3040 err = iavf_reinit_interrupt_scheme(adapter, running); 3041 if (err) 3042 goto reset_err; 3043 } 3044 3045 if (RSS_AQ(adapter)) { 3046 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS; 3047 } else { 3048 err = iavf_init_rss(adapter); 3049 if (err) 3050 goto reset_err; 3051 } 3052 3053 adapter->aq_required |= IAVF_FLAG_AQ_GET_CONFIG; 3054 /* always set since VIRTCHNL_OP_GET_VF_RESOURCES has not been 3055 * sent/received yet, so VLAN_V2_ALLOWED() cannot is not reliable here, 3056 * however the VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS won't be sent until 3057 * VIRTCHNL_OP_GET_VF_RESOURCES and VIRTCHNL_VF_OFFLOAD_VLAN_V2 have 3058 * been successfully sent and negotiated 3059 */ 3060 adapter->aq_required |= IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS; 3061 adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS; 3062 3063 spin_lock_bh(&adapter->mac_vlan_list_lock); 3064 3065 /* Delete filter for the current MAC address, it could have 3066 * been changed by the PF via administratively set MAC. 3067 * Will be re-added via VIRTCHNL_OP_GET_VF_RESOURCES. 3068 */ 3069 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { 3070 if (ether_addr_equal(f->macaddr, adapter->hw.mac.addr)) { 3071 list_del(&f->list); 3072 kfree(f); 3073 } 3074 } 3075 /* re-add all MAC filters */ 3076 list_for_each_entry(f, &adapter->mac_filter_list, list) { 3077 f->add = true; 3078 } 3079 spin_unlock_bh(&adapter->mac_vlan_list_lock); 3080 3081 /* check if TCs are running and re-add all cloud filters */ 3082 spin_lock_bh(&adapter->cloud_filter_list_lock); 3083 if ((vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) && 3084 adapter->num_tc) { 3085 list_for_each_entry(cf, &adapter->cloud_filter_list, list) { 3086 cf->add = true; 3087 } 3088 } 3089 spin_unlock_bh(&adapter->cloud_filter_list_lock); 3090 3091 adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER; 3092 adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER; 3093 iavf_misc_irq_enable(adapter); 3094 3095 mod_delayed_work(adapter->wq, &adapter->watchdog_task, 2); 3096 3097 /* We were running when the reset started, so we need to restore some 3098 * state here. 3099 */ 3100 if (running) { 3101 /* allocate transmit descriptors */ 3102 err = iavf_setup_all_tx_resources(adapter); 3103 if (err) 3104 goto reset_err; 3105 3106 /* allocate receive descriptors */ 3107 err = iavf_setup_all_rx_resources(adapter); 3108 if (err) 3109 goto reset_err; 3110 3111 if ((adapter->flags & IAVF_FLAG_REINIT_MSIX_NEEDED) || 3112 (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED)) { 3113 err = iavf_request_traffic_irqs(adapter, netdev->name); 3114 if (err) 3115 goto reset_err; 3116 3117 adapter->flags &= ~IAVF_FLAG_REINIT_MSIX_NEEDED; 3118 } 3119 3120 iavf_configure(adapter); 3121 3122 /* iavf_up_complete() will switch device back 3123 * to __IAVF_RUNNING 3124 */ 3125 iavf_up_complete(adapter); 3126 3127 iavf_irq_enable(adapter, true); 3128 } else { 3129 iavf_change_state(adapter, __IAVF_DOWN); 3130 wake_up(&adapter->down_waitqueue); 3131 } 3132 3133 adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; 3134 3135 wake_up(&adapter->reset_waitqueue); 3136 mutex_unlock(&adapter->crit_lock); 3137 3138 return; 3139 reset_err: 3140 if (running) { 3141 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); 3142 iavf_free_traffic_irqs(adapter); 3143 } 3144 iavf_disable_vf(adapter); 3145 3146 mutex_unlock(&adapter->crit_lock); 3147 dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n"); 3148 } 3149 3150 /** 3151 * iavf_adminq_task - worker thread to clean the admin queue 3152 * @work: pointer to work_struct containing our data 3153 **/ 3154 static void iavf_adminq_task(struct work_struct *work) 3155 { 3156 struct iavf_adapter *adapter = 3157 container_of(work, struct iavf_adapter, adminq_task); 3158 struct iavf_hw *hw = &adapter->hw; 3159 struct iavf_arq_event_info event; 3160 enum virtchnl_ops v_op; 3161 enum iavf_status ret, v_ret; 3162 u32 val, oldval; 3163 u16 pending; 3164 3165 if (!mutex_trylock(&adapter->crit_lock)) { 3166 if (adapter->state == __IAVF_REMOVE) 3167 return; 3168 3169 queue_work(adapter->wq, &adapter->adminq_task); 3170 goto out; 3171 } 3172 3173 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) 3174 goto unlock; 3175 3176 event.buf_len = IAVF_MAX_AQ_BUF_SIZE; 3177 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); 3178 if (!event.msg_buf) 3179 goto unlock; 3180 3181 do { 3182 ret = iavf_clean_arq_element(hw, &event, &pending); 3183 v_op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high); 3184 v_ret = (enum iavf_status)le32_to_cpu(event.desc.cookie_low); 3185 3186 if (ret || !v_op) 3187 break; /* No event to process or error cleaning ARQ */ 3188 3189 iavf_virtchnl_completion(adapter, v_op, v_ret, event.msg_buf, 3190 event.msg_len); 3191 if (pending != 0) 3192 memset(event.msg_buf, 0, IAVF_MAX_AQ_BUF_SIZE); 3193 } while (pending); 3194 3195 if (iavf_is_reset_in_progress(adapter)) 3196 goto freedom; 3197 3198 /* check for error indications */ 3199 val = rd32(hw, IAVF_VF_ARQLEN1); 3200 if (val == 0xdeadbeef || val == 0xffffffff) /* device in reset */ 3201 goto freedom; 3202 oldval = val; 3203 if (val & IAVF_VF_ARQLEN1_ARQVFE_MASK) { 3204 dev_info(&adapter->pdev->dev, "ARQ VF Error detected\n"); 3205 val &= ~IAVF_VF_ARQLEN1_ARQVFE_MASK; 3206 } 3207 if (val & IAVF_VF_ARQLEN1_ARQOVFL_MASK) { 3208 dev_info(&adapter->pdev->dev, "ARQ Overflow Error detected\n"); 3209 val &= ~IAVF_VF_ARQLEN1_ARQOVFL_MASK; 3210 } 3211 if (val & IAVF_VF_ARQLEN1_ARQCRIT_MASK) { 3212 dev_info(&adapter->pdev->dev, "ARQ Critical Error detected\n"); 3213 val &= ~IAVF_VF_ARQLEN1_ARQCRIT_MASK; 3214 } 3215 if (oldval != val) 3216 wr32(hw, IAVF_VF_ARQLEN1, val); 3217 3218 val = rd32(hw, IAVF_VF_ATQLEN1); 3219 oldval = val; 3220 if (val & IAVF_VF_ATQLEN1_ATQVFE_MASK) { 3221 dev_info(&adapter->pdev->dev, "ASQ VF Error detected\n"); 3222 val &= ~IAVF_VF_ATQLEN1_ATQVFE_MASK; 3223 } 3224 if (val & IAVF_VF_ATQLEN1_ATQOVFL_MASK) { 3225 dev_info(&adapter->pdev->dev, "ASQ Overflow Error detected\n"); 3226 val &= ~IAVF_VF_ATQLEN1_ATQOVFL_MASK; 3227 } 3228 if (val & IAVF_VF_ATQLEN1_ATQCRIT_MASK) { 3229 dev_info(&adapter->pdev->dev, "ASQ Critical Error detected\n"); 3230 val &= ~IAVF_VF_ATQLEN1_ATQCRIT_MASK; 3231 } 3232 if (oldval != val) 3233 wr32(hw, IAVF_VF_ATQLEN1, val); 3234 3235 freedom: 3236 kfree(event.msg_buf); 3237 unlock: 3238 mutex_unlock(&adapter->crit_lock); 3239 out: 3240 /* re-enable Admin queue interrupt cause */ 3241 iavf_misc_irq_enable(adapter); 3242 } 3243 3244 /** 3245 * iavf_free_all_tx_resources - Free Tx Resources for All Queues 3246 * @adapter: board private structure 3247 * 3248 * Free all transmit software resources 3249 **/ 3250 void iavf_free_all_tx_resources(struct iavf_adapter *adapter) 3251 { 3252 int i; 3253 3254 if (!adapter->tx_rings) 3255 return; 3256 3257 for (i = 0; i < adapter->num_active_queues; i++) 3258 if (adapter->tx_rings[i].desc) 3259 iavf_free_tx_resources(&adapter->tx_rings[i]); 3260 } 3261 3262 /** 3263 * iavf_setup_all_tx_resources - allocate all queues Tx resources 3264 * @adapter: board private structure 3265 * 3266 * If this function returns with an error, then it's possible one or 3267 * more of the rings is populated (while the rest are not). It is the 3268 * callers duty to clean those orphaned rings. 3269 * 3270 * Return 0 on success, negative on failure 3271 **/ 3272 static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter) 3273 { 3274 int i, err = 0; 3275 3276 for (i = 0; i < adapter->num_active_queues; i++) { 3277 adapter->tx_rings[i].count = adapter->tx_desc_count; 3278 err = iavf_setup_tx_descriptors(&adapter->tx_rings[i]); 3279 if (!err) 3280 continue; 3281 dev_err(&adapter->pdev->dev, 3282 "Allocation for Tx Queue %u failed\n", i); 3283 break; 3284 } 3285 3286 return err; 3287 } 3288 3289 /** 3290 * iavf_setup_all_rx_resources - allocate all queues Rx resources 3291 * @adapter: board private structure 3292 * 3293 * If this function returns with an error, then it's possible one or 3294 * more of the rings is populated (while the rest are not). It is the 3295 * callers duty to clean those orphaned rings. 3296 * 3297 * Return 0 on success, negative on failure 3298 **/ 3299 static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter) 3300 { 3301 int i, err = 0; 3302 3303 for (i = 0; i < adapter->num_active_queues; i++) { 3304 adapter->rx_rings[i].count = adapter->rx_desc_count; 3305 err = iavf_setup_rx_descriptors(&adapter->rx_rings[i]); 3306 if (!err) 3307 continue; 3308 dev_err(&adapter->pdev->dev, 3309 "Allocation for Rx Queue %u failed\n", i); 3310 break; 3311 } 3312 return err; 3313 } 3314 3315 /** 3316 * iavf_free_all_rx_resources - Free Rx Resources for All Queues 3317 * @adapter: board private structure 3318 * 3319 * Free all receive software resources 3320 **/ 3321 void iavf_free_all_rx_resources(struct iavf_adapter *adapter) 3322 { 3323 int i; 3324 3325 if (!adapter->rx_rings) 3326 return; 3327 3328 for (i = 0; i < adapter->num_active_queues; i++) 3329 if (adapter->rx_rings[i].desc) 3330 iavf_free_rx_resources(&adapter->rx_rings[i]); 3331 } 3332 3333 /** 3334 * iavf_validate_tx_bandwidth - validate the max Tx bandwidth 3335 * @adapter: board private structure 3336 * @max_tx_rate: max Tx bw for a tc 3337 **/ 3338 static int iavf_validate_tx_bandwidth(struct iavf_adapter *adapter, 3339 u64 max_tx_rate) 3340 { 3341 int speed = 0, ret = 0; 3342 3343 if (ADV_LINK_SUPPORT(adapter)) { 3344 if (adapter->link_speed_mbps < U32_MAX) { 3345 speed = adapter->link_speed_mbps; 3346 goto validate_bw; 3347 } else { 3348 dev_err(&adapter->pdev->dev, "Unknown link speed\n"); 3349 return -EINVAL; 3350 } 3351 } 3352 3353 switch (adapter->link_speed) { 3354 case VIRTCHNL_LINK_SPEED_40GB: 3355 speed = SPEED_40000; 3356 break; 3357 case VIRTCHNL_LINK_SPEED_25GB: 3358 speed = SPEED_25000; 3359 break; 3360 case VIRTCHNL_LINK_SPEED_20GB: 3361 speed = SPEED_20000; 3362 break; 3363 case VIRTCHNL_LINK_SPEED_10GB: 3364 speed = SPEED_10000; 3365 break; 3366 case VIRTCHNL_LINK_SPEED_5GB: 3367 speed = SPEED_5000; 3368 break; 3369 case VIRTCHNL_LINK_SPEED_2_5GB: 3370 speed = SPEED_2500; 3371 break; 3372 case VIRTCHNL_LINK_SPEED_1GB: 3373 speed = SPEED_1000; 3374 break; 3375 case VIRTCHNL_LINK_SPEED_100MB: 3376 speed = SPEED_100; 3377 break; 3378 default: 3379 break; 3380 } 3381 3382 validate_bw: 3383 if (max_tx_rate > speed) { 3384 dev_err(&adapter->pdev->dev, 3385 "Invalid tx rate specified\n"); 3386 ret = -EINVAL; 3387 } 3388 3389 return ret; 3390 } 3391 3392 /** 3393 * iavf_validate_ch_config - validate queue mapping info 3394 * @adapter: board private structure 3395 * @mqprio_qopt: queue parameters 3396 * 3397 * This function validates if the config provided by the user to 3398 * configure queue channels is valid or not. Returns 0 on a valid 3399 * config. 3400 **/ 3401 static int iavf_validate_ch_config(struct iavf_adapter *adapter, 3402 struct tc_mqprio_qopt_offload *mqprio_qopt) 3403 { 3404 u64 total_max_rate = 0; 3405 u32 tx_rate_rem = 0; 3406 int i, num_qps = 0; 3407 u64 tx_rate = 0; 3408 int ret = 0; 3409 3410 if (mqprio_qopt->qopt.num_tc > IAVF_MAX_TRAFFIC_CLASS || 3411 mqprio_qopt->qopt.num_tc < 1) 3412 return -EINVAL; 3413 3414 for (i = 0; i <= mqprio_qopt->qopt.num_tc - 1; i++) { 3415 if (!mqprio_qopt->qopt.count[i] || 3416 mqprio_qopt->qopt.offset[i] != num_qps) 3417 return -EINVAL; 3418 if (mqprio_qopt->min_rate[i]) { 3419 dev_err(&adapter->pdev->dev, 3420 "Invalid min tx rate (greater than 0) specified for TC%d\n", 3421 i); 3422 return -EINVAL; 3423 } 3424 3425 /* convert to Mbps */ 3426 tx_rate = div_u64(mqprio_qopt->max_rate[i], 3427 IAVF_MBPS_DIVISOR); 3428 3429 if (mqprio_qopt->max_rate[i] && 3430 tx_rate < IAVF_MBPS_QUANTA) { 3431 dev_err(&adapter->pdev->dev, 3432 "Invalid max tx rate for TC%d, minimum %dMbps\n", 3433 i, IAVF_MBPS_QUANTA); 3434 return -EINVAL; 3435 } 3436 3437 (void)div_u64_rem(tx_rate, IAVF_MBPS_QUANTA, &tx_rate_rem); 3438 3439 if (tx_rate_rem != 0) { 3440 dev_err(&adapter->pdev->dev, 3441 "Invalid max tx rate for TC%d, not divisible by %d\n", 3442 i, IAVF_MBPS_QUANTA); 3443 return -EINVAL; 3444 } 3445 3446 total_max_rate += tx_rate; 3447 num_qps += mqprio_qopt->qopt.count[i]; 3448 } 3449 if (num_qps > adapter->num_active_queues) { 3450 dev_err(&adapter->pdev->dev, 3451 "Cannot support requested number of queues\n"); 3452 return -EINVAL; 3453 } 3454 3455 ret = iavf_validate_tx_bandwidth(adapter, total_max_rate); 3456 return ret; 3457 } 3458 3459 /** 3460 * iavf_del_all_cloud_filters - delete all cloud filters on the traffic classes 3461 * @adapter: board private structure 3462 **/ 3463 static void iavf_del_all_cloud_filters(struct iavf_adapter *adapter) 3464 { 3465 struct iavf_cloud_filter *cf, *cftmp; 3466 3467 spin_lock_bh(&adapter->cloud_filter_list_lock); 3468 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, 3469 list) { 3470 list_del(&cf->list); 3471 kfree(cf); 3472 adapter->num_cloud_filters--; 3473 } 3474 spin_unlock_bh(&adapter->cloud_filter_list_lock); 3475 } 3476 3477 /** 3478 * iavf_is_tc_config_same - Compare the mqprio TC config with the 3479 * TC config already configured on this adapter. 3480 * @adapter: board private structure 3481 * @mqprio_qopt: TC config received from kernel. 3482 * 3483 * This function compares the TC config received from the kernel 3484 * with the config already configured on the adapter. 3485 * 3486 * Return: True if configuration is same, false otherwise. 3487 **/ 3488 static bool iavf_is_tc_config_same(struct iavf_adapter *adapter, 3489 struct tc_mqprio_qopt *mqprio_qopt) 3490 { 3491 struct virtchnl_channel_info *ch = &adapter->ch_config.ch_info[0]; 3492 int i; 3493 3494 if (adapter->num_tc != mqprio_qopt->num_tc) 3495 return false; 3496 3497 for (i = 0; i < adapter->num_tc; i++) { 3498 if (ch[i].count != mqprio_qopt->count[i] || 3499 ch[i].offset != mqprio_qopt->offset[i]) 3500 return false; 3501 } 3502 return true; 3503 } 3504 3505 /** 3506 * __iavf_setup_tc - configure multiple traffic classes 3507 * @netdev: network interface device structure 3508 * @type_data: tc offload data 3509 * 3510 * This function processes the config information provided by the 3511 * user to configure traffic classes/queue channels and packages the 3512 * information to request the PF to setup traffic classes. 3513 * 3514 * Returns 0 on success. 3515 **/ 3516 static int __iavf_setup_tc(struct net_device *netdev, void *type_data) 3517 { 3518 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data; 3519 struct iavf_adapter *adapter = netdev_priv(netdev); 3520 struct virtchnl_vf_resource *vfres = adapter->vf_res; 3521 u8 num_tc = 0, total_qps = 0; 3522 int ret = 0, netdev_tc = 0; 3523 u64 max_tx_rate; 3524 u16 mode; 3525 int i; 3526 3527 num_tc = mqprio_qopt->qopt.num_tc; 3528 mode = mqprio_qopt->mode; 3529 3530 /* delete queue_channel */ 3531 if (!mqprio_qopt->qopt.hw) { 3532 if (adapter->ch_config.state == __IAVF_TC_RUNNING) { 3533 /* reset the tc configuration */ 3534 netdev_reset_tc(netdev); 3535 adapter->num_tc = 0; 3536 netif_tx_stop_all_queues(netdev); 3537 netif_tx_disable(netdev); 3538 iavf_del_all_cloud_filters(adapter); 3539 adapter->aq_required = IAVF_FLAG_AQ_DISABLE_CHANNELS; 3540 total_qps = adapter->orig_num_active_queues; 3541 goto exit; 3542 } else { 3543 return -EINVAL; 3544 } 3545 } 3546 3547 /* add queue channel */ 3548 if (mode == TC_MQPRIO_MODE_CHANNEL) { 3549 if (!(vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)) { 3550 dev_err(&adapter->pdev->dev, "ADq not supported\n"); 3551 return -EOPNOTSUPP; 3552 } 3553 if (adapter->ch_config.state != __IAVF_TC_INVALID) { 3554 dev_err(&adapter->pdev->dev, "TC configuration already exists\n"); 3555 return -EINVAL; 3556 } 3557 3558 ret = iavf_validate_ch_config(adapter, mqprio_qopt); 3559 if (ret) 3560 return ret; 3561 /* Return if same TC config is requested */ 3562 if (iavf_is_tc_config_same(adapter, &mqprio_qopt->qopt)) 3563 return 0; 3564 adapter->num_tc = num_tc; 3565 3566 for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) { 3567 if (i < num_tc) { 3568 adapter->ch_config.ch_info[i].count = 3569 mqprio_qopt->qopt.count[i]; 3570 adapter->ch_config.ch_info[i].offset = 3571 mqprio_qopt->qopt.offset[i]; 3572 total_qps += mqprio_qopt->qopt.count[i]; 3573 max_tx_rate = mqprio_qopt->max_rate[i]; 3574 /* convert to Mbps */ 3575 max_tx_rate = div_u64(max_tx_rate, 3576 IAVF_MBPS_DIVISOR); 3577 adapter->ch_config.ch_info[i].max_tx_rate = 3578 max_tx_rate; 3579 } else { 3580 adapter->ch_config.ch_info[i].count = 1; 3581 adapter->ch_config.ch_info[i].offset = 0; 3582 } 3583 } 3584 3585 /* Take snapshot of original config such as "num_active_queues" 3586 * It is used later when delete ADQ flow is exercised, so that 3587 * once delete ADQ flow completes, VF shall go back to its 3588 * original queue configuration 3589 */ 3590 3591 adapter->orig_num_active_queues = adapter->num_active_queues; 3592 3593 /* Store queue info based on TC so that VF gets configured 3594 * with correct number of queues when VF completes ADQ config 3595 * flow 3596 */ 3597 adapter->ch_config.total_qps = total_qps; 3598 3599 netif_tx_stop_all_queues(netdev); 3600 netif_tx_disable(netdev); 3601 adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_CHANNELS; 3602 netdev_reset_tc(netdev); 3603 /* Report the tc mapping up the stack */ 3604 netdev_set_num_tc(adapter->netdev, num_tc); 3605 for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) { 3606 u16 qcount = mqprio_qopt->qopt.count[i]; 3607 u16 qoffset = mqprio_qopt->qopt.offset[i]; 3608 3609 if (i < num_tc) 3610 netdev_set_tc_queue(netdev, netdev_tc++, qcount, 3611 qoffset); 3612 } 3613 } 3614 exit: 3615 if (test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) 3616 return 0; 3617 3618 netif_set_real_num_rx_queues(netdev, total_qps); 3619 netif_set_real_num_tx_queues(netdev, total_qps); 3620 3621 return ret; 3622 } 3623 3624 /** 3625 * iavf_parse_cls_flower - Parse tc flower filters provided by kernel 3626 * @adapter: board private structure 3627 * @f: pointer to struct flow_cls_offload 3628 * @filter: pointer to cloud filter structure 3629 */ 3630 static int iavf_parse_cls_flower(struct iavf_adapter *adapter, 3631 struct flow_cls_offload *f, 3632 struct iavf_cloud_filter *filter) 3633 { 3634 struct flow_rule *rule = flow_cls_offload_flow_rule(f); 3635 struct flow_dissector *dissector = rule->match.dissector; 3636 u16 n_proto_mask = 0; 3637 u16 n_proto_key = 0; 3638 u8 field_flags = 0; 3639 u16 addr_type = 0; 3640 u16 n_proto = 0; 3641 int i = 0; 3642 struct virtchnl_filter *vf = &filter->f; 3643 3644 if (dissector->used_keys & 3645 ~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) | 3646 BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) | 3647 BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) | 3648 BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) | 3649 BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | 3650 BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | 3651 BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) | 3652 BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID))) { 3653 dev_err(&adapter->pdev->dev, "Unsupported key used: 0x%llx\n", 3654 dissector->used_keys); 3655 return -EOPNOTSUPP; 3656 } 3657 3658 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) { 3659 struct flow_match_enc_keyid match; 3660 3661 flow_rule_match_enc_keyid(rule, &match); 3662 if (match.mask->keyid != 0) 3663 field_flags |= IAVF_CLOUD_FIELD_TEN_ID; 3664 } 3665 3666 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { 3667 struct flow_match_basic match; 3668 3669 flow_rule_match_basic(rule, &match); 3670 n_proto_key = ntohs(match.key->n_proto); 3671 n_proto_mask = ntohs(match.mask->n_proto); 3672 3673 if (n_proto_key == ETH_P_ALL) { 3674 n_proto_key = 0; 3675 n_proto_mask = 0; 3676 } 3677 n_proto = n_proto_key & n_proto_mask; 3678 if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6) 3679 return -EINVAL; 3680 if (n_proto == ETH_P_IPV6) { 3681 /* specify flow type as TCP IPv6 */ 3682 vf->flow_type = VIRTCHNL_TCP_V6_FLOW; 3683 } 3684 3685 if (match.key->ip_proto != IPPROTO_TCP) { 3686 dev_info(&adapter->pdev->dev, "Only TCP transport is supported\n"); 3687 return -EINVAL; 3688 } 3689 } 3690 3691 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { 3692 struct flow_match_eth_addrs match; 3693 3694 flow_rule_match_eth_addrs(rule, &match); 3695 3696 /* use is_broadcast and is_zero to check for all 0xf or 0 */ 3697 if (!is_zero_ether_addr(match.mask->dst)) { 3698 if (is_broadcast_ether_addr(match.mask->dst)) { 3699 field_flags |= IAVF_CLOUD_FIELD_OMAC; 3700 } else { 3701 dev_err(&adapter->pdev->dev, "Bad ether dest mask %pM\n", 3702 match.mask->dst); 3703 return -EINVAL; 3704 } 3705 } 3706 3707 if (!is_zero_ether_addr(match.mask->src)) { 3708 if (is_broadcast_ether_addr(match.mask->src)) { 3709 field_flags |= IAVF_CLOUD_FIELD_IMAC; 3710 } else { 3711 dev_err(&adapter->pdev->dev, "Bad ether src mask %pM\n", 3712 match.mask->src); 3713 return -EINVAL; 3714 } 3715 } 3716 3717 if (!is_zero_ether_addr(match.key->dst)) 3718 if (is_valid_ether_addr(match.key->dst) || 3719 is_multicast_ether_addr(match.key->dst)) { 3720 /* set the mask if a valid dst_mac address */ 3721 for (i = 0; i < ETH_ALEN; i++) 3722 vf->mask.tcp_spec.dst_mac[i] |= 0xff; 3723 ether_addr_copy(vf->data.tcp_spec.dst_mac, 3724 match.key->dst); 3725 } 3726 3727 if (!is_zero_ether_addr(match.key->src)) 3728 if (is_valid_ether_addr(match.key->src) || 3729 is_multicast_ether_addr(match.key->src)) { 3730 /* set the mask if a valid dst_mac address */ 3731 for (i = 0; i < ETH_ALEN; i++) 3732 vf->mask.tcp_spec.src_mac[i] |= 0xff; 3733 ether_addr_copy(vf->data.tcp_spec.src_mac, 3734 match.key->src); 3735 } 3736 } 3737 3738 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { 3739 struct flow_match_vlan match; 3740 3741 flow_rule_match_vlan(rule, &match); 3742 if (match.mask->vlan_id) { 3743 if (match.mask->vlan_id == VLAN_VID_MASK) { 3744 field_flags |= IAVF_CLOUD_FIELD_IVLAN; 3745 } else { 3746 dev_err(&adapter->pdev->dev, "Bad vlan mask %u\n", 3747 match.mask->vlan_id); 3748 return -EINVAL; 3749 } 3750 } 3751 vf->mask.tcp_spec.vlan_id |= cpu_to_be16(0xffff); 3752 vf->data.tcp_spec.vlan_id = cpu_to_be16(match.key->vlan_id); 3753 } 3754 3755 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { 3756 struct flow_match_control match; 3757 3758 flow_rule_match_control(rule, &match); 3759 addr_type = match.key->addr_type; 3760 3761 if (flow_rule_has_control_flags(match.mask->flags, 3762 f->common.extack)) 3763 return -EOPNOTSUPP; 3764 } 3765 3766 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { 3767 struct flow_match_ipv4_addrs match; 3768 3769 flow_rule_match_ipv4_addrs(rule, &match); 3770 if (match.mask->dst) { 3771 if (match.mask->dst == cpu_to_be32(0xffffffff)) { 3772 field_flags |= IAVF_CLOUD_FIELD_IIP; 3773 } else { 3774 dev_err(&adapter->pdev->dev, "Bad ip dst mask 0x%08x\n", 3775 be32_to_cpu(match.mask->dst)); 3776 return -EINVAL; 3777 } 3778 } 3779 3780 if (match.mask->src) { 3781 if (match.mask->src == cpu_to_be32(0xffffffff)) { 3782 field_flags |= IAVF_CLOUD_FIELD_IIP; 3783 } else { 3784 dev_err(&adapter->pdev->dev, "Bad ip src mask 0x%08x\n", 3785 be32_to_cpu(match.mask->src)); 3786 return -EINVAL; 3787 } 3788 } 3789 3790 if (field_flags & IAVF_CLOUD_FIELD_TEN_ID) { 3791 dev_info(&adapter->pdev->dev, "Tenant id not allowed for ip filter\n"); 3792 return -EINVAL; 3793 } 3794 if (match.key->dst) { 3795 vf->mask.tcp_spec.dst_ip[0] |= cpu_to_be32(0xffffffff); 3796 vf->data.tcp_spec.dst_ip[0] = match.key->dst; 3797 } 3798 if (match.key->src) { 3799 vf->mask.tcp_spec.src_ip[0] |= cpu_to_be32(0xffffffff); 3800 vf->data.tcp_spec.src_ip[0] = match.key->src; 3801 } 3802 } 3803 3804 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { 3805 struct flow_match_ipv6_addrs match; 3806 3807 flow_rule_match_ipv6_addrs(rule, &match); 3808 3809 /* validate mask, make sure it is not IPV6_ADDR_ANY */ 3810 if (ipv6_addr_any(&match.mask->dst)) { 3811 dev_err(&adapter->pdev->dev, "Bad ipv6 dst mask 0x%02x\n", 3812 IPV6_ADDR_ANY); 3813 return -EINVAL; 3814 } 3815 3816 /* src and dest IPv6 address should not be LOOPBACK 3817 * (0:0:0:0:0:0:0:1) which can be represented as ::1 3818 */ 3819 if (ipv6_addr_loopback(&match.key->dst) || 3820 ipv6_addr_loopback(&match.key->src)) { 3821 dev_err(&adapter->pdev->dev, 3822 "ipv6 addr should not be loopback\n"); 3823 return -EINVAL; 3824 } 3825 if (!ipv6_addr_any(&match.mask->dst) || 3826 !ipv6_addr_any(&match.mask->src)) 3827 field_flags |= IAVF_CLOUD_FIELD_IIP; 3828 3829 for (i = 0; i < 4; i++) 3830 vf->mask.tcp_spec.dst_ip[i] |= cpu_to_be32(0xffffffff); 3831 memcpy(&vf->data.tcp_spec.dst_ip, &match.key->dst.s6_addr32, 3832 sizeof(vf->data.tcp_spec.dst_ip)); 3833 for (i = 0; i < 4; i++) 3834 vf->mask.tcp_spec.src_ip[i] |= cpu_to_be32(0xffffffff); 3835 memcpy(&vf->data.tcp_spec.src_ip, &match.key->src.s6_addr32, 3836 sizeof(vf->data.tcp_spec.src_ip)); 3837 } 3838 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { 3839 struct flow_match_ports match; 3840 3841 flow_rule_match_ports(rule, &match); 3842 if (match.mask->src) { 3843 if (match.mask->src == cpu_to_be16(0xffff)) { 3844 field_flags |= IAVF_CLOUD_FIELD_IIP; 3845 } else { 3846 dev_err(&adapter->pdev->dev, "Bad src port mask %u\n", 3847 be16_to_cpu(match.mask->src)); 3848 return -EINVAL; 3849 } 3850 } 3851 3852 if (match.mask->dst) { 3853 if (match.mask->dst == cpu_to_be16(0xffff)) { 3854 field_flags |= IAVF_CLOUD_FIELD_IIP; 3855 } else { 3856 dev_err(&adapter->pdev->dev, "Bad dst port mask %u\n", 3857 be16_to_cpu(match.mask->dst)); 3858 return -EINVAL; 3859 } 3860 } 3861 if (match.key->dst) { 3862 vf->mask.tcp_spec.dst_port |= cpu_to_be16(0xffff); 3863 vf->data.tcp_spec.dst_port = match.key->dst; 3864 } 3865 3866 if (match.key->src) { 3867 vf->mask.tcp_spec.src_port |= cpu_to_be16(0xffff); 3868 vf->data.tcp_spec.src_port = match.key->src; 3869 } 3870 } 3871 vf->field_flags = field_flags; 3872 3873 return 0; 3874 } 3875 3876 /** 3877 * iavf_handle_tclass - Forward to a traffic class on the device 3878 * @adapter: board private structure 3879 * @tc: traffic class index on the device 3880 * @filter: pointer to cloud filter structure 3881 */ 3882 static int iavf_handle_tclass(struct iavf_adapter *adapter, u32 tc, 3883 struct iavf_cloud_filter *filter) 3884 { 3885 if (tc == 0) 3886 return 0; 3887 if (tc < adapter->num_tc) { 3888 if (!filter->f.data.tcp_spec.dst_port) { 3889 dev_err(&adapter->pdev->dev, 3890 "Specify destination port to redirect to traffic class other than TC0\n"); 3891 return -EINVAL; 3892 } 3893 } 3894 /* redirect to a traffic class on the same device */ 3895 filter->f.action = VIRTCHNL_ACTION_TC_REDIRECT; 3896 filter->f.action_meta = tc; 3897 return 0; 3898 } 3899 3900 /** 3901 * iavf_find_cf - Find the cloud filter in the list 3902 * @adapter: Board private structure 3903 * @cookie: filter specific cookie 3904 * 3905 * Returns ptr to the filter object or NULL. Must be called while holding the 3906 * cloud_filter_list_lock. 3907 */ 3908 static struct iavf_cloud_filter *iavf_find_cf(struct iavf_adapter *adapter, 3909 unsigned long *cookie) 3910 { 3911 struct iavf_cloud_filter *filter = NULL; 3912 3913 if (!cookie) 3914 return NULL; 3915 3916 list_for_each_entry(filter, &adapter->cloud_filter_list, list) { 3917 if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie))) 3918 return filter; 3919 } 3920 return NULL; 3921 } 3922 3923 /** 3924 * iavf_configure_clsflower - Add tc flower filters 3925 * @adapter: board private structure 3926 * @cls_flower: Pointer to struct flow_cls_offload 3927 */ 3928 static int iavf_configure_clsflower(struct iavf_adapter *adapter, 3929 struct flow_cls_offload *cls_flower) 3930 { 3931 int tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid); 3932 struct iavf_cloud_filter *filter = NULL; 3933 int err = -EINVAL, count = 50; 3934 3935 if (tc < 0) { 3936 dev_err(&adapter->pdev->dev, "Invalid traffic class\n"); 3937 return -EINVAL; 3938 } 3939 3940 filter = kzalloc(sizeof(*filter), GFP_KERNEL); 3941 if (!filter) 3942 return -ENOMEM; 3943 3944 while (!mutex_trylock(&adapter->crit_lock)) { 3945 if (--count == 0) { 3946 kfree(filter); 3947 return err; 3948 } 3949 udelay(1); 3950 } 3951 3952 filter->cookie = cls_flower->cookie; 3953 3954 /* bail out here if filter already exists */ 3955 spin_lock_bh(&adapter->cloud_filter_list_lock); 3956 if (iavf_find_cf(adapter, &cls_flower->cookie)) { 3957 dev_err(&adapter->pdev->dev, "Failed to add TC Flower filter, it already exists\n"); 3958 err = -EEXIST; 3959 goto spin_unlock; 3960 } 3961 spin_unlock_bh(&adapter->cloud_filter_list_lock); 3962 3963 /* set the mask to all zeroes to begin with */ 3964 memset(&filter->f.mask.tcp_spec, 0, sizeof(struct virtchnl_l4_spec)); 3965 /* start out with flow type and eth type IPv4 to begin with */ 3966 filter->f.flow_type = VIRTCHNL_TCP_V4_FLOW; 3967 err = iavf_parse_cls_flower(adapter, cls_flower, filter); 3968 if (err) 3969 goto err; 3970 3971 err = iavf_handle_tclass(adapter, tc, filter); 3972 if (err) 3973 goto err; 3974 3975 /* add filter to the list */ 3976 spin_lock_bh(&adapter->cloud_filter_list_lock); 3977 list_add_tail(&filter->list, &adapter->cloud_filter_list); 3978 adapter->num_cloud_filters++; 3979 filter->add = true; 3980 adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER; 3981 spin_unlock: 3982 spin_unlock_bh(&adapter->cloud_filter_list_lock); 3983 err: 3984 if (err) 3985 kfree(filter); 3986 3987 mutex_unlock(&adapter->crit_lock); 3988 return err; 3989 } 3990 3991 /** 3992 * iavf_delete_clsflower - Remove tc flower filters 3993 * @adapter: board private structure 3994 * @cls_flower: Pointer to struct flow_cls_offload 3995 */ 3996 static int iavf_delete_clsflower(struct iavf_adapter *adapter, 3997 struct flow_cls_offload *cls_flower) 3998 { 3999 struct iavf_cloud_filter *filter = NULL; 4000 int err = 0; 4001 4002 spin_lock_bh(&adapter->cloud_filter_list_lock); 4003 filter = iavf_find_cf(adapter, &cls_flower->cookie); 4004 if (filter) { 4005 filter->del = true; 4006 adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER; 4007 } else { 4008 err = -EINVAL; 4009 } 4010 spin_unlock_bh(&adapter->cloud_filter_list_lock); 4011 4012 return err; 4013 } 4014 4015 /** 4016 * iavf_setup_tc_cls_flower - flower classifier offloads 4017 * @adapter: board private structure 4018 * @cls_flower: pointer to flow_cls_offload struct with flow info 4019 */ 4020 static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter, 4021 struct flow_cls_offload *cls_flower) 4022 { 4023 switch (cls_flower->command) { 4024 case FLOW_CLS_REPLACE: 4025 return iavf_configure_clsflower(adapter, cls_flower); 4026 case FLOW_CLS_DESTROY: 4027 return iavf_delete_clsflower(adapter, cls_flower); 4028 case FLOW_CLS_STATS: 4029 return -EOPNOTSUPP; 4030 default: 4031 return -EOPNOTSUPP; 4032 } 4033 } 4034 4035 /** 4036 * iavf_setup_tc_block_cb - block callback for tc 4037 * @type: type of offload 4038 * @type_data: offload data 4039 * @cb_priv: 4040 * 4041 * This function is the block callback for traffic classes 4042 **/ 4043 static int iavf_setup_tc_block_cb(enum tc_setup_type type, void *type_data, 4044 void *cb_priv) 4045 { 4046 struct iavf_adapter *adapter = cb_priv; 4047 4048 if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data)) 4049 return -EOPNOTSUPP; 4050 4051 switch (type) { 4052 case TC_SETUP_CLSFLOWER: 4053 return iavf_setup_tc_cls_flower(cb_priv, type_data); 4054 default: 4055 return -EOPNOTSUPP; 4056 } 4057 } 4058 4059 static LIST_HEAD(iavf_block_cb_list); 4060 4061 /** 4062 * iavf_setup_tc - configure multiple traffic classes 4063 * @netdev: network interface device structure 4064 * @type: type of offload 4065 * @type_data: tc offload data 4066 * 4067 * This function is the callback to ndo_setup_tc in the 4068 * netdev_ops. 4069 * 4070 * Returns 0 on success 4071 **/ 4072 static int iavf_setup_tc(struct net_device *netdev, enum tc_setup_type type, 4073 void *type_data) 4074 { 4075 struct iavf_adapter *adapter = netdev_priv(netdev); 4076 4077 switch (type) { 4078 case TC_SETUP_QDISC_MQPRIO: 4079 return __iavf_setup_tc(netdev, type_data); 4080 case TC_SETUP_BLOCK: 4081 return flow_block_cb_setup_simple(type_data, 4082 &iavf_block_cb_list, 4083 iavf_setup_tc_block_cb, 4084 adapter, adapter, true); 4085 default: 4086 return -EOPNOTSUPP; 4087 } 4088 } 4089 4090 /** 4091 * iavf_restore_fdir_filters 4092 * @adapter: board private structure 4093 * 4094 * Restore existing FDIR filters when VF netdev comes back up. 4095 **/ 4096 static void iavf_restore_fdir_filters(struct iavf_adapter *adapter) 4097 { 4098 struct iavf_fdir_fltr *f; 4099 4100 spin_lock_bh(&adapter->fdir_fltr_lock); 4101 list_for_each_entry(f, &adapter->fdir_list_head, list) { 4102 if (f->state == IAVF_FDIR_FLTR_DIS_REQUEST) { 4103 /* Cancel a request, keep filter as active */ 4104 f->state = IAVF_FDIR_FLTR_ACTIVE; 4105 } else if (f->state == IAVF_FDIR_FLTR_DIS_PENDING || 4106 f->state == IAVF_FDIR_FLTR_INACTIVE) { 4107 /* Add filters which are inactive or have a pending 4108 * request to PF to be deleted 4109 */ 4110 f->state = IAVF_FDIR_FLTR_ADD_REQUEST; 4111 adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER; 4112 } 4113 } 4114 spin_unlock_bh(&adapter->fdir_fltr_lock); 4115 } 4116 4117 /** 4118 * iavf_open - Called when a network interface is made active 4119 * @netdev: network interface device structure 4120 * 4121 * Returns 0 on success, negative value on failure 4122 * 4123 * The open entry point is called when a network interface is made 4124 * active by the system (IFF_UP). At this point all resources needed 4125 * for transmit and receive operations are allocated, the interrupt 4126 * handler is registered with the OS, the watchdog is started, 4127 * and the stack is notified that the interface is ready. 4128 **/ 4129 static int iavf_open(struct net_device *netdev) 4130 { 4131 struct iavf_adapter *adapter = netdev_priv(netdev); 4132 int err; 4133 4134 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) { 4135 dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n"); 4136 return -EIO; 4137 } 4138 4139 while (!mutex_trylock(&adapter->crit_lock)) { 4140 /* If we are in __IAVF_INIT_CONFIG_ADAPTER state the crit_lock 4141 * is already taken and iavf_open is called from an upper 4142 * device's notifier reacting on NETDEV_REGISTER event. 4143 * We have to leave here to avoid dead lock. 4144 */ 4145 if (adapter->state == __IAVF_INIT_CONFIG_ADAPTER) 4146 return -EBUSY; 4147 4148 usleep_range(500, 1000); 4149 } 4150 4151 if (adapter->state != __IAVF_DOWN) { 4152 err = -EBUSY; 4153 goto err_unlock; 4154 } 4155 4156 if (adapter->state == __IAVF_RUNNING && 4157 !test_bit(__IAVF_VSI_DOWN, adapter->vsi.state)) { 4158 dev_dbg(&adapter->pdev->dev, "VF is already open.\n"); 4159 err = 0; 4160 goto err_unlock; 4161 } 4162 4163 /* allocate transmit descriptors */ 4164 err = iavf_setup_all_tx_resources(adapter); 4165 if (err) 4166 goto err_setup_tx; 4167 4168 /* allocate receive descriptors */ 4169 err = iavf_setup_all_rx_resources(adapter); 4170 if (err) 4171 goto err_setup_rx; 4172 4173 /* clear any pending interrupts, may auto mask */ 4174 err = iavf_request_traffic_irqs(adapter, netdev->name); 4175 if (err) 4176 goto err_req_irq; 4177 4178 spin_lock_bh(&adapter->mac_vlan_list_lock); 4179 4180 iavf_add_filter(adapter, adapter->hw.mac.addr); 4181 4182 spin_unlock_bh(&adapter->mac_vlan_list_lock); 4183 4184 /* Restore filters that were removed with IFF_DOWN */ 4185 iavf_restore_filters(adapter); 4186 iavf_restore_fdir_filters(adapter); 4187 4188 iavf_configure(adapter); 4189 4190 iavf_up_complete(adapter); 4191 4192 iavf_irq_enable(adapter, true); 4193 4194 mutex_unlock(&adapter->crit_lock); 4195 4196 return 0; 4197 4198 err_req_irq: 4199 iavf_down(adapter); 4200 iavf_free_traffic_irqs(adapter); 4201 err_setup_rx: 4202 iavf_free_all_rx_resources(adapter); 4203 err_setup_tx: 4204 iavf_free_all_tx_resources(adapter); 4205 err_unlock: 4206 mutex_unlock(&adapter->crit_lock); 4207 4208 return err; 4209 } 4210 4211 /** 4212 * iavf_close - Disables a network interface 4213 * @netdev: network interface device structure 4214 * 4215 * Returns 0, this is not allowed to fail 4216 * 4217 * The close entry point is called when an interface is de-activated 4218 * by the OS. The hardware is still under the drivers control, but 4219 * needs to be disabled. All IRQs except vector 0 (reserved for admin queue) 4220 * are freed, along with all transmit and receive resources. 4221 **/ 4222 static int iavf_close(struct net_device *netdev) 4223 { 4224 struct iavf_adapter *adapter = netdev_priv(netdev); 4225 u64 aq_to_restore; 4226 int status; 4227 4228 mutex_lock(&adapter->crit_lock); 4229 4230 if (adapter->state <= __IAVF_DOWN_PENDING) { 4231 mutex_unlock(&adapter->crit_lock); 4232 return 0; 4233 } 4234 4235 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); 4236 /* We cannot send IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS before 4237 * IAVF_FLAG_AQ_DISABLE_QUEUES because in such case there is rtnl 4238 * deadlock with adminq_task() until iavf_close timeouts. We must send 4239 * IAVF_FLAG_AQ_GET_CONFIG before IAVF_FLAG_AQ_DISABLE_QUEUES to make 4240 * disable queues possible for vf. Give only necessary flags to 4241 * iavf_down and save other to set them right before iavf_close() 4242 * returns, when IAVF_FLAG_AQ_DISABLE_QUEUES will be already sent and 4243 * iavf will be in DOWN state. 4244 */ 4245 aq_to_restore = adapter->aq_required; 4246 adapter->aq_required &= IAVF_FLAG_AQ_GET_CONFIG; 4247 4248 /* Remove flags which we do not want to send after close or we want to 4249 * send before disable queues. 4250 */ 4251 aq_to_restore &= ~(IAVF_FLAG_AQ_GET_CONFIG | 4252 IAVF_FLAG_AQ_ENABLE_QUEUES | 4253 IAVF_FLAG_AQ_CONFIGURE_QUEUES | 4254 IAVF_FLAG_AQ_ADD_VLAN_FILTER | 4255 IAVF_FLAG_AQ_ADD_MAC_FILTER | 4256 IAVF_FLAG_AQ_ADD_CLOUD_FILTER | 4257 IAVF_FLAG_AQ_ADD_FDIR_FILTER | 4258 IAVF_FLAG_AQ_ADD_ADV_RSS_CFG); 4259 4260 iavf_down(adapter); 4261 iavf_change_state(adapter, __IAVF_DOWN_PENDING); 4262 iavf_free_traffic_irqs(adapter); 4263 4264 mutex_unlock(&adapter->crit_lock); 4265 4266 /* We explicitly don't free resources here because the hardware is 4267 * still active and can DMA into memory. Resources are cleared in 4268 * iavf_virtchnl_completion() after we get confirmation from the PF 4269 * driver that the rings have been stopped. 4270 * 4271 * Also, we wait for state to transition to __IAVF_DOWN before 4272 * returning. State change occurs in iavf_virtchnl_completion() after 4273 * VF resources are released (which occurs after PF driver processes and 4274 * responds to admin queue commands). 4275 */ 4276 4277 status = wait_event_timeout(adapter->down_waitqueue, 4278 adapter->state == __IAVF_DOWN, 4279 msecs_to_jiffies(500)); 4280 if (!status) 4281 netdev_warn(netdev, "Device resources not yet released\n"); 4282 4283 mutex_lock(&adapter->crit_lock); 4284 adapter->aq_required |= aq_to_restore; 4285 mutex_unlock(&adapter->crit_lock); 4286 return 0; 4287 } 4288 4289 /** 4290 * iavf_change_mtu - Change the Maximum Transfer Unit 4291 * @netdev: network interface device structure 4292 * @new_mtu: new value for maximum frame size 4293 * 4294 * Returns 0 on success, negative on failure 4295 **/ 4296 static int iavf_change_mtu(struct net_device *netdev, int new_mtu) 4297 { 4298 struct iavf_adapter *adapter = netdev_priv(netdev); 4299 int ret = 0; 4300 4301 netdev_dbg(netdev, "changing MTU from %d to %d\n", 4302 netdev->mtu, new_mtu); 4303 WRITE_ONCE(netdev->mtu, new_mtu); 4304 4305 if (netif_running(netdev)) { 4306 iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED); 4307 ret = iavf_wait_for_reset(adapter); 4308 if (ret < 0) 4309 netdev_warn(netdev, "MTU change interrupted waiting for reset"); 4310 else if (ret) 4311 netdev_warn(netdev, "MTU change timed out waiting for reset"); 4312 } 4313 4314 return ret; 4315 } 4316 4317 /** 4318 * iavf_disable_fdir - disable Flow Director and clear existing filters 4319 * @adapter: board private structure 4320 **/ 4321 static void iavf_disable_fdir(struct iavf_adapter *adapter) 4322 { 4323 struct iavf_fdir_fltr *fdir, *fdirtmp; 4324 bool del_filters = false; 4325 4326 adapter->flags &= ~IAVF_FLAG_FDIR_ENABLED; 4327 4328 /* remove all Flow Director filters */ 4329 spin_lock_bh(&adapter->fdir_fltr_lock); 4330 list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head, 4331 list) { 4332 if (fdir->state == IAVF_FDIR_FLTR_ADD_REQUEST || 4333 fdir->state == IAVF_FDIR_FLTR_INACTIVE) { 4334 /* Delete filters not registered in PF */ 4335 list_del(&fdir->list); 4336 kfree(fdir); 4337 adapter->fdir_active_fltr--; 4338 } else if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING || 4339 fdir->state == IAVF_FDIR_FLTR_DIS_REQUEST || 4340 fdir->state == IAVF_FDIR_FLTR_ACTIVE) { 4341 /* Filters registered in PF, schedule their deletion */ 4342 fdir->state = IAVF_FDIR_FLTR_DEL_REQUEST; 4343 del_filters = true; 4344 } else if (fdir->state == IAVF_FDIR_FLTR_DIS_PENDING) { 4345 /* Request to delete filter already sent to PF, change 4346 * state to DEL_PENDING to delete filter after PF's 4347 * response, not set as INACTIVE 4348 */ 4349 fdir->state = IAVF_FDIR_FLTR_DEL_PENDING; 4350 } 4351 } 4352 spin_unlock_bh(&adapter->fdir_fltr_lock); 4353 4354 if (del_filters) { 4355 adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER; 4356 mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0); 4357 } 4358 } 4359 4360 #define NETIF_VLAN_OFFLOAD_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \ 4361 NETIF_F_HW_VLAN_CTAG_TX | \ 4362 NETIF_F_HW_VLAN_STAG_RX | \ 4363 NETIF_F_HW_VLAN_STAG_TX) 4364 4365 /** 4366 * iavf_set_features - set the netdev feature flags 4367 * @netdev: ptr to the netdev being adjusted 4368 * @features: the feature set that the stack is suggesting 4369 * Note: expects to be called while under rtnl_lock() 4370 **/ 4371 static int iavf_set_features(struct net_device *netdev, 4372 netdev_features_t features) 4373 { 4374 struct iavf_adapter *adapter = netdev_priv(netdev); 4375 4376 /* trigger update on any VLAN feature change */ 4377 if ((netdev->features & NETIF_VLAN_OFFLOAD_FEATURES) ^ 4378 (features & NETIF_VLAN_OFFLOAD_FEATURES)) 4379 iavf_set_vlan_offload_features(adapter, netdev->features, 4380 features); 4381 if (CRC_OFFLOAD_ALLOWED(adapter) && 4382 ((netdev->features & NETIF_F_RXFCS) ^ (features & NETIF_F_RXFCS))) 4383 iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED); 4384 4385 if ((netdev->features & NETIF_F_NTUPLE) ^ (features & NETIF_F_NTUPLE)) { 4386 if (features & NETIF_F_NTUPLE) 4387 adapter->flags |= IAVF_FLAG_FDIR_ENABLED; 4388 else 4389 iavf_disable_fdir(adapter); 4390 } 4391 4392 return 0; 4393 } 4394 4395 /** 4396 * iavf_features_check - Validate encapsulated packet conforms to limits 4397 * @skb: skb buff 4398 * @dev: This physical port's netdev 4399 * @features: Offload features that the stack believes apply 4400 **/ 4401 static netdev_features_t iavf_features_check(struct sk_buff *skb, 4402 struct net_device *dev, 4403 netdev_features_t features) 4404 { 4405 size_t len; 4406 4407 /* No point in doing any of this if neither checksum nor GSO are 4408 * being requested for this frame. We can rule out both by just 4409 * checking for CHECKSUM_PARTIAL 4410 */ 4411 if (skb->ip_summed != CHECKSUM_PARTIAL) 4412 return features; 4413 4414 /* We cannot support GSO if the MSS is going to be less than 4415 * 64 bytes. If it is then we need to drop support for GSO. 4416 */ 4417 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64)) 4418 features &= ~NETIF_F_GSO_MASK; 4419 4420 /* MACLEN can support at most 63 words */ 4421 len = skb_network_offset(skb); 4422 if (len & ~(63 * 2)) 4423 goto out_err; 4424 4425 /* IPLEN and EIPLEN can support at most 127 dwords */ 4426 len = skb_network_header_len(skb); 4427 if (len & ~(127 * 4)) 4428 goto out_err; 4429 4430 if (skb->encapsulation) { 4431 /* L4TUNLEN can support 127 words */ 4432 len = skb_inner_network_header(skb) - skb_transport_header(skb); 4433 if (len & ~(127 * 2)) 4434 goto out_err; 4435 4436 /* IPLEN can support at most 127 dwords */ 4437 len = skb_inner_transport_header(skb) - 4438 skb_inner_network_header(skb); 4439 if (len & ~(127 * 4)) 4440 goto out_err; 4441 } 4442 4443 /* No need to validate L4LEN as TCP is the only protocol with a 4444 * flexible value and we support all possible values supported 4445 * by TCP, which is at most 15 dwords 4446 */ 4447 4448 return features; 4449 out_err: 4450 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 4451 } 4452 4453 /** 4454 * iavf_get_netdev_vlan_hw_features - get NETDEV VLAN features that can toggle on/off 4455 * @adapter: board private structure 4456 * 4457 * Depending on whether VIRTHCNL_VF_OFFLOAD_VLAN or VIRTCHNL_VF_OFFLOAD_VLAN_V2 4458 * were negotiated determine the VLAN features that can be toggled on and off. 4459 **/ 4460 static netdev_features_t 4461 iavf_get_netdev_vlan_hw_features(struct iavf_adapter *adapter) 4462 { 4463 netdev_features_t hw_features = 0; 4464 4465 if (!adapter->vf_res || !adapter->vf_res->vf_cap_flags) 4466 return hw_features; 4467 4468 /* Enable VLAN features if supported */ 4469 if (VLAN_ALLOWED(adapter)) { 4470 hw_features |= (NETIF_F_HW_VLAN_CTAG_TX | 4471 NETIF_F_HW_VLAN_CTAG_RX); 4472 } else if (VLAN_V2_ALLOWED(adapter)) { 4473 struct virtchnl_vlan_caps *vlan_v2_caps = 4474 &adapter->vlan_v2_caps; 4475 struct virtchnl_vlan_supported_caps *stripping_support = 4476 &vlan_v2_caps->offloads.stripping_support; 4477 struct virtchnl_vlan_supported_caps *insertion_support = 4478 &vlan_v2_caps->offloads.insertion_support; 4479 4480 if (stripping_support->outer != VIRTCHNL_VLAN_UNSUPPORTED && 4481 stripping_support->outer & VIRTCHNL_VLAN_TOGGLE) { 4482 if (stripping_support->outer & 4483 VIRTCHNL_VLAN_ETHERTYPE_8100) 4484 hw_features |= NETIF_F_HW_VLAN_CTAG_RX; 4485 if (stripping_support->outer & 4486 VIRTCHNL_VLAN_ETHERTYPE_88A8) 4487 hw_features |= NETIF_F_HW_VLAN_STAG_RX; 4488 } else if (stripping_support->inner != 4489 VIRTCHNL_VLAN_UNSUPPORTED && 4490 stripping_support->inner & VIRTCHNL_VLAN_TOGGLE) { 4491 if (stripping_support->inner & 4492 VIRTCHNL_VLAN_ETHERTYPE_8100) 4493 hw_features |= NETIF_F_HW_VLAN_CTAG_RX; 4494 } 4495 4496 if (insertion_support->outer != VIRTCHNL_VLAN_UNSUPPORTED && 4497 insertion_support->outer & VIRTCHNL_VLAN_TOGGLE) { 4498 if (insertion_support->outer & 4499 VIRTCHNL_VLAN_ETHERTYPE_8100) 4500 hw_features |= NETIF_F_HW_VLAN_CTAG_TX; 4501 if (insertion_support->outer & 4502 VIRTCHNL_VLAN_ETHERTYPE_88A8) 4503 hw_features |= NETIF_F_HW_VLAN_STAG_TX; 4504 } else if (insertion_support->inner && 4505 insertion_support->inner & VIRTCHNL_VLAN_TOGGLE) { 4506 if (insertion_support->inner & 4507 VIRTCHNL_VLAN_ETHERTYPE_8100) 4508 hw_features |= NETIF_F_HW_VLAN_CTAG_TX; 4509 } 4510 } 4511 4512 if (CRC_OFFLOAD_ALLOWED(adapter)) 4513 hw_features |= NETIF_F_RXFCS; 4514 4515 return hw_features; 4516 } 4517 4518 /** 4519 * iavf_get_netdev_vlan_features - get the enabled NETDEV VLAN fetures 4520 * @adapter: board private structure 4521 * 4522 * Depending on whether VIRTHCNL_VF_OFFLOAD_VLAN or VIRTCHNL_VF_OFFLOAD_VLAN_V2 4523 * were negotiated determine the VLAN features that are enabled by default. 4524 **/ 4525 static netdev_features_t 4526 iavf_get_netdev_vlan_features(struct iavf_adapter *adapter) 4527 { 4528 netdev_features_t features = 0; 4529 4530 if (!adapter->vf_res || !adapter->vf_res->vf_cap_flags) 4531 return features; 4532 4533 if (VLAN_ALLOWED(adapter)) { 4534 features |= NETIF_F_HW_VLAN_CTAG_FILTER | 4535 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX; 4536 } else if (VLAN_V2_ALLOWED(adapter)) { 4537 struct virtchnl_vlan_caps *vlan_v2_caps = 4538 &adapter->vlan_v2_caps; 4539 struct virtchnl_vlan_supported_caps *filtering_support = 4540 &vlan_v2_caps->filtering.filtering_support; 4541 struct virtchnl_vlan_supported_caps *stripping_support = 4542 &vlan_v2_caps->offloads.stripping_support; 4543 struct virtchnl_vlan_supported_caps *insertion_support = 4544 &vlan_v2_caps->offloads.insertion_support; 4545 u32 ethertype_init; 4546 4547 /* give priority to outer stripping and don't support both outer 4548 * and inner stripping 4549 */ 4550 ethertype_init = vlan_v2_caps->offloads.ethertype_init; 4551 if (stripping_support->outer != VIRTCHNL_VLAN_UNSUPPORTED) { 4552 if (stripping_support->outer & 4553 VIRTCHNL_VLAN_ETHERTYPE_8100 && 4554 ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100) 4555 features |= NETIF_F_HW_VLAN_CTAG_RX; 4556 else if (stripping_support->outer & 4557 VIRTCHNL_VLAN_ETHERTYPE_88A8 && 4558 ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8) 4559 features |= NETIF_F_HW_VLAN_STAG_RX; 4560 } else if (stripping_support->inner != 4561 VIRTCHNL_VLAN_UNSUPPORTED) { 4562 if (stripping_support->inner & 4563 VIRTCHNL_VLAN_ETHERTYPE_8100 && 4564 ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100) 4565 features |= NETIF_F_HW_VLAN_CTAG_RX; 4566 } 4567 4568 /* give priority to outer insertion and don't support both outer 4569 * and inner insertion 4570 */ 4571 if (insertion_support->outer != VIRTCHNL_VLAN_UNSUPPORTED) { 4572 if (insertion_support->outer & 4573 VIRTCHNL_VLAN_ETHERTYPE_8100 && 4574 ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100) 4575 features |= NETIF_F_HW_VLAN_CTAG_TX; 4576 else if (insertion_support->outer & 4577 VIRTCHNL_VLAN_ETHERTYPE_88A8 && 4578 ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8) 4579 features |= NETIF_F_HW_VLAN_STAG_TX; 4580 } else if (insertion_support->inner != 4581 VIRTCHNL_VLAN_UNSUPPORTED) { 4582 if (insertion_support->inner & 4583 VIRTCHNL_VLAN_ETHERTYPE_8100 && 4584 ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100) 4585 features |= NETIF_F_HW_VLAN_CTAG_TX; 4586 } 4587 4588 /* give priority to outer filtering and don't bother if both 4589 * outer and inner filtering are enabled 4590 */ 4591 ethertype_init = vlan_v2_caps->filtering.ethertype_init; 4592 if (filtering_support->outer != VIRTCHNL_VLAN_UNSUPPORTED) { 4593 if (filtering_support->outer & 4594 VIRTCHNL_VLAN_ETHERTYPE_8100 && 4595 ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100) 4596 features |= NETIF_F_HW_VLAN_CTAG_FILTER; 4597 if (filtering_support->outer & 4598 VIRTCHNL_VLAN_ETHERTYPE_88A8 && 4599 ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8) 4600 features |= NETIF_F_HW_VLAN_STAG_FILTER; 4601 } else if (filtering_support->inner != 4602 VIRTCHNL_VLAN_UNSUPPORTED) { 4603 if (filtering_support->inner & 4604 VIRTCHNL_VLAN_ETHERTYPE_8100 && 4605 ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100) 4606 features |= NETIF_F_HW_VLAN_CTAG_FILTER; 4607 if (filtering_support->inner & 4608 VIRTCHNL_VLAN_ETHERTYPE_88A8 && 4609 ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8) 4610 features |= NETIF_F_HW_VLAN_STAG_FILTER; 4611 } 4612 } 4613 4614 return features; 4615 } 4616 4617 #define IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested, allowed, feature_bit) \ 4618 (!(((requested) & (feature_bit)) && \ 4619 !((allowed) & (feature_bit)))) 4620 4621 /** 4622 * iavf_fix_netdev_vlan_features - fix NETDEV VLAN features based on support 4623 * @adapter: board private structure 4624 * @requested_features: stack requested NETDEV features 4625 **/ 4626 static netdev_features_t 4627 iavf_fix_netdev_vlan_features(struct iavf_adapter *adapter, 4628 netdev_features_t requested_features) 4629 { 4630 netdev_features_t allowed_features; 4631 4632 allowed_features = iavf_get_netdev_vlan_hw_features(adapter) | 4633 iavf_get_netdev_vlan_features(adapter); 4634 4635 if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features, 4636 allowed_features, 4637 NETIF_F_HW_VLAN_CTAG_TX)) 4638 requested_features &= ~NETIF_F_HW_VLAN_CTAG_TX; 4639 4640 if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features, 4641 allowed_features, 4642 NETIF_F_HW_VLAN_CTAG_RX)) 4643 requested_features &= ~NETIF_F_HW_VLAN_CTAG_RX; 4644 4645 if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features, 4646 allowed_features, 4647 NETIF_F_HW_VLAN_STAG_TX)) 4648 requested_features &= ~NETIF_F_HW_VLAN_STAG_TX; 4649 if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features, 4650 allowed_features, 4651 NETIF_F_HW_VLAN_STAG_RX)) 4652 requested_features &= ~NETIF_F_HW_VLAN_STAG_RX; 4653 4654 if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features, 4655 allowed_features, 4656 NETIF_F_HW_VLAN_CTAG_FILTER)) 4657 requested_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; 4658 4659 if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features, 4660 allowed_features, 4661 NETIF_F_HW_VLAN_STAG_FILTER)) 4662 requested_features &= ~NETIF_F_HW_VLAN_STAG_FILTER; 4663 4664 if ((requested_features & 4665 (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX)) && 4666 (requested_features & 4667 (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX)) && 4668 adapter->vlan_v2_caps.offloads.ethertype_match == 4669 VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION) { 4670 netdev_warn(adapter->netdev, "cannot support CTAG and STAG VLAN stripping and/or insertion simultaneously since CTAG and STAG offloads are mutually exclusive, clearing STAG offload settings\n"); 4671 requested_features &= ~(NETIF_F_HW_VLAN_STAG_RX | 4672 NETIF_F_HW_VLAN_STAG_TX); 4673 } 4674 4675 return requested_features; 4676 } 4677 4678 /** 4679 * iavf_fix_strip_features - fix NETDEV CRC and VLAN strip features 4680 * @adapter: board private structure 4681 * @requested_features: stack requested NETDEV features 4682 * 4683 * Returns fixed-up features bits 4684 **/ 4685 static netdev_features_t 4686 iavf_fix_strip_features(struct iavf_adapter *adapter, 4687 netdev_features_t requested_features) 4688 { 4689 struct net_device *netdev = adapter->netdev; 4690 bool crc_offload_req, is_vlan_strip; 4691 netdev_features_t vlan_strip; 4692 int num_non_zero_vlan; 4693 4694 crc_offload_req = CRC_OFFLOAD_ALLOWED(adapter) && 4695 (requested_features & NETIF_F_RXFCS); 4696 num_non_zero_vlan = iavf_get_num_vlans_added(adapter); 4697 vlan_strip = (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX); 4698 is_vlan_strip = requested_features & vlan_strip; 4699 4700 if (!crc_offload_req) 4701 return requested_features; 4702 4703 if (!num_non_zero_vlan && (netdev->features & vlan_strip) && 4704 !(netdev->features & NETIF_F_RXFCS) && is_vlan_strip) { 4705 requested_features &= ~vlan_strip; 4706 netdev_info(netdev, "Disabling VLAN stripping as FCS/CRC stripping is also disabled and there is no VLAN configured\n"); 4707 return requested_features; 4708 } 4709 4710 if ((netdev->features & NETIF_F_RXFCS) && is_vlan_strip) { 4711 requested_features &= ~vlan_strip; 4712 if (!(netdev->features & vlan_strip)) 4713 netdev_info(netdev, "To enable VLAN stripping, first need to enable FCS/CRC stripping"); 4714 4715 return requested_features; 4716 } 4717 4718 if (num_non_zero_vlan && is_vlan_strip && 4719 !(netdev->features & NETIF_F_RXFCS)) { 4720 requested_features &= ~NETIF_F_RXFCS; 4721 netdev_info(netdev, "To disable FCS/CRC stripping, first need to disable VLAN stripping"); 4722 } 4723 4724 return requested_features; 4725 } 4726 4727 /** 4728 * iavf_fix_features - fix up the netdev feature bits 4729 * @netdev: our net device 4730 * @features: desired feature bits 4731 * 4732 * Returns fixed-up features bits 4733 **/ 4734 static netdev_features_t iavf_fix_features(struct net_device *netdev, 4735 netdev_features_t features) 4736 { 4737 struct iavf_adapter *adapter = netdev_priv(netdev); 4738 4739 features = iavf_fix_netdev_vlan_features(adapter, features); 4740 4741 if (!FDIR_FLTR_SUPPORT(adapter)) 4742 features &= ~NETIF_F_NTUPLE; 4743 4744 return iavf_fix_strip_features(adapter, features); 4745 } 4746 4747 static const struct net_device_ops iavf_netdev_ops = { 4748 .ndo_open = iavf_open, 4749 .ndo_stop = iavf_close, 4750 .ndo_start_xmit = iavf_xmit_frame, 4751 .ndo_set_rx_mode = iavf_set_rx_mode, 4752 .ndo_validate_addr = eth_validate_addr, 4753 .ndo_set_mac_address = iavf_set_mac, 4754 .ndo_change_mtu = iavf_change_mtu, 4755 .ndo_tx_timeout = iavf_tx_timeout, 4756 .ndo_vlan_rx_add_vid = iavf_vlan_rx_add_vid, 4757 .ndo_vlan_rx_kill_vid = iavf_vlan_rx_kill_vid, 4758 .ndo_features_check = iavf_features_check, 4759 .ndo_fix_features = iavf_fix_features, 4760 .ndo_set_features = iavf_set_features, 4761 .ndo_setup_tc = iavf_setup_tc, 4762 }; 4763 4764 /** 4765 * iavf_check_reset_complete - check that VF reset is complete 4766 * @hw: pointer to hw struct 4767 * 4768 * Returns 0 if device is ready to use, or -EBUSY if it's in reset. 4769 **/ 4770 static int iavf_check_reset_complete(struct iavf_hw *hw) 4771 { 4772 u32 rstat; 4773 int i; 4774 4775 for (i = 0; i < IAVF_RESET_WAIT_COMPLETE_COUNT; i++) { 4776 rstat = rd32(hw, IAVF_VFGEN_RSTAT) & 4777 IAVF_VFGEN_RSTAT_VFR_STATE_MASK; 4778 if ((rstat == VIRTCHNL_VFR_VFACTIVE) || 4779 (rstat == VIRTCHNL_VFR_COMPLETED)) 4780 return 0; 4781 msleep(IAVF_RESET_WAIT_MS); 4782 } 4783 return -EBUSY; 4784 } 4785 4786 /** 4787 * iavf_process_config - Process the config information we got from the PF 4788 * @adapter: board private structure 4789 * 4790 * Verify that we have a valid config struct, and set up our netdev features 4791 * and our VSI struct. 4792 **/ 4793 int iavf_process_config(struct iavf_adapter *adapter) 4794 { 4795 struct virtchnl_vf_resource *vfres = adapter->vf_res; 4796 netdev_features_t hw_vlan_features, vlan_features; 4797 struct net_device *netdev = adapter->netdev; 4798 netdev_features_t hw_enc_features; 4799 netdev_features_t hw_features; 4800 4801 hw_enc_features = NETIF_F_SG | 4802 NETIF_F_IP_CSUM | 4803 NETIF_F_IPV6_CSUM | 4804 NETIF_F_HIGHDMA | 4805 NETIF_F_SOFT_FEATURES | 4806 NETIF_F_TSO | 4807 NETIF_F_TSO_ECN | 4808 NETIF_F_TSO6 | 4809 NETIF_F_SCTP_CRC | 4810 NETIF_F_RXHASH | 4811 NETIF_F_RXCSUM | 4812 0; 4813 4814 /* advertise to stack only if offloads for encapsulated packets is 4815 * supported 4816 */ 4817 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ENCAP) { 4818 hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL | 4819 NETIF_F_GSO_GRE | 4820 NETIF_F_GSO_GRE_CSUM | 4821 NETIF_F_GSO_IPXIP4 | 4822 NETIF_F_GSO_IPXIP6 | 4823 NETIF_F_GSO_UDP_TUNNEL_CSUM | 4824 NETIF_F_GSO_PARTIAL | 4825 0; 4826 4827 if (!(vfres->vf_cap_flags & 4828 VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)) 4829 netdev->gso_partial_features |= 4830 NETIF_F_GSO_UDP_TUNNEL_CSUM; 4831 4832 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM; 4833 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID; 4834 netdev->hw_enc_features |= hw_enc_features; 4835 } 4836 /* record features VLANs can make use of */ 4837 netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID; 4838 4839 /* Write features and hw_features separately to avoid polluting 4840 * with, or dropping, features that are set when we registered. 4841 */ 4842 hw_features = hw_enc_features; 4843 4844 /* get HW VLAN features that can be toggled */ 4845 hw_vlan_features = iavf_get_netdev_vlan_hw_features(adapter); 4846 4847 /* Enable cloud filter if ADQ is supported */ 4848 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) 4849 hw_features |= NETIF_F_HW_TC; 4850 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_USO) 4851 hw_features |= NETIF_F_GSO_UDP_L4; 4852 4853 netdev->hw_features |= hw_features | hw_vlan_features; 4854 vlan_features = iavf_get_netdev_vlan_features(adapter); 4855 4856 netdev->features |= hw_features | vlan_features; 4857 4858 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) 4859 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 4860 4861 if (FDIR_FLTR_SUPPORT(adapter)) { 4862 netdev->hw_features |= NETIF_F_NTUPLE; 4863 netdev->features |= NETIF_F_NTUPLE; 4864 adapter->flags |= IAVF_FLAG_FDIR_ENABLED; 4865 } 4866 4867 netdev->priv_flags |= IFF_UNICAST_FLT; 4868 4869 /* Do not turn on offloads when they are requested to be turned off. 4870 * TSO needs minimum 576 bytes to work correctly. 4871 */ 4872 if (netdev->wanted_features) { 4873 if (!(netdev->wanted_features & NETIF_F_TSO) || 4874 netdev->mtu < 576) 4875 netdev->features &= ~NETIF_F_TSO; 4876 if (!(netdev->wanted_features & NETIF_F_TSO6) || 4877 netdev->mtu < 576) 4878 netdev->features &= ~NETIF_F_TSO6; 4879 if (!(netdev->wanted_features & NETIF_F_TSO_ECN)) 4880 netdev->features &= ~NETIF_F_TSO_ECN; 4881 if (!(netdev->wanted_features & NETIF_F_GRO)) 4882 netdev->features &= ~NETIF_F_GRO; 4883 if (!(netdev->wanted_features & NETIF_F_GSO)) 4884 netdev->features &= ~NETIF_F_GSO; 4885 } 4886 4887 return 0; 4888 } 4889 4890 /** 4891 * iavf_probe - Device Initialization Routine 4892 * @pdev: PCI device information struct 4893 * @ent: entry in iavf_pci_tbl 4894 * 4895 * Returns 0 on success, negative on failure 4896 * 4897 * iavf_probe initializes an adapter identified by a pci_dev structure. 4898 * The OS initialization, configuring of the adapter private structure, 4899 * and a hardware reset occur. 4900 **/ 4901 static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 4902 { 4903 struct net_device *netdev; 4904 struct iavf_adapter *adapter = NULL; 4905 struct iavf_hw *hw = NULL; 4906 int err; 4907 4908 err = pci_enable_device(pdev); 4909 if (err) 4910 return err; 4911 4912 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 4913 if (err) { 4914 dev_err(&pdev->dev, 4915 "DMA configuration failed: 0x%x\n", err); 4916 goto err_dma; 4917 } 4918 4919 err = pci_request_regions(pdev, iavf_driver_name); 4920 if (err) { 4921 dev_err(&pdev->dev, 4922 "pci_request_regions failed 0x%x\n", err); 4923 goto err_pci_reg; 4924 } 4925 4926 pci_set_master(pdev); 4927 4928 netdev = alloc_etherdev_mq(sizeof(struct iavf_adapter), 4929 IAVF_MAX_REQ_QUEUES); 4930 if (!netdev) { 4931 err = -ENOMEM; 4932 goto err_alloc_etherdev; 4933 } 4934 4935 SET_NETDEV_DEV(netdev, &pdev->dev); 4936 4937 pci_set_drvdata(pdev, netdev); 4938 adapter = netdev_priv(netdev); 4939 4940 adapter->netdev = netdev; 4941 adapter->pdev = pdev; 4942 4943 hw = &adapter->hw; 4944 hw->back = adapter; 4945 4946 adapter->wq = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, 4947 iavf_driver_name); 4948 if (!adapter->wq) { 4949 err = -ENOMEM; 4950 goto err_alloc_wq; 4951 } 4952 4953 adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1; 4954 iavf_change_state(adapter, __IAVF_STARTUP); 4955 4956 /* Call save state here because it relies on the adapter struct. */ 4957 pci_save_state(pdev); 4958 4959 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), 4960 pci_resource_len(pdev, 0)); 4961 if (!hw->hw_addr) { 4962 err = -EIO; 4963 goto err_ioremap; 4964 } 4965 hw->vendor_id = pdev->vendor; 4966 hw->device_id = pdev->device; 4967 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); 4968 hw->subsystem_vendor_id = pdev->subsystem_vendor; 4969 hw->subsystem_device_id = pdev->subsystem_device; 4970 hw->bus.device = PCI_SLOT(pdev->devfn); 4971 hw->bus.func = PCI_FUNC(pdev->devfn); 4972 hw->bus.bus_id = pdev->bus->number; 4973 4974 /* set up the locks for the AQ, do this only once in probe 4975 * and destroy them only once in remove 4976 */ 4977 mutex_init(&adapter->crit_lock); 4978 mutex_init(&hw->aq.asq_mutex); 4979 mutex_init(&hw->aq.arq_mutex); 4980 4981 spin_lock_init(&adapter->mac_vlan_list_lock); 4982 spin_lock_init(&adapter->cloud_filter_list_lock); 4983 spin_lock_init(&adapter->fdir_fltr_lock); 4984 spin_lock_init(&adapter->adv_rss_lock); 4985 spin_lock_init(&adapter->current_netdev_promisc_flags_lock); 4986 4987 INIT_LIST_HEAD(&adapter->mac_filter_list); 4988 INIT_LIST_HEAD(&adapter->vlan_filter_list); 4989 INIT_LIST_HEAD(&adapter->cloud_filter_list); 4990 INIT_LIST_HEAD(&adapter->fdir_list_head); 4991 INIT_LIST_HEAD(&adapter->adv_rss_list_head); 4992 4993 INIT_WORK(&adapter->reset_task, iavf_reset_task); 4994 INIT_WORK(&adapter->adminq_task, iavf_adminq_task); 4995 INIT_WORK(&adapter->finish_config, iavf_finish_config); 4996 INIT_DELAYED_WORK(&adapter->watchdog_task, iavf_watchdog_task); 4997 4998 /* Setup the wait queue for indicating transition to down status */ 4999 init_waitqueue_head(&adapter->down_waitqueue); 5000 5001 /* Setup the wait queue for indicating transition to running state */ 5002 init_waitqueue_head(&adapter->reset_waitqueue); 5003 5004 /* Setup the wait queue for indicating virtchannel events */ 5005 init_waitqueue_head(&adapter->vc_waitqueue); 5006 5007 queue_delayed_work(adapter->wq, &adapter->watchdog_task, 5008 msecs_to_jiffies(5 * (pdev->devfn & 0x07))); 5009 /* Initialization goes on in the work. Do not add more of it below. */ 5010 return 0; 5011 5012 err_ioremap: 5013 destroy_workqueue(adapter->wq); 5014 err_alloc_wq: 5015 free_netdev(netdev); 5016 err_alloc_etherdev: 5017 pci_release_regions(pdev); 5018 err_pci_reg: 5019 err_dma: 5020 pci_disable_device(pdev); 5021 return err; 5022 } 5023 5024 /** 5025 * iavf_suspend - Power management suspend routine 5026 * @dev_d: device info pointer 5027 * 5028 * Called when the system (VM) is entering sleep/suspend. 5029 **/ 5030 static int iavf_suspend(struct device *dev_d) 5031 { 5032 struct net_device *netdev = dev_get_drvdata(dev_d); 5033 struct iavf_adapter *adapter = netdev_priv(netdev); 5034 5035 netif_device_detach(netdev); 5036 5037 mutex_lock(&adapter->crit_lock); 5038 5039 if (netif_running(netdev)) { 5040 rtnl_lock(); 5041 iavf_down(adapter); 5042 rtnl_unlock(); 5043 } 5044 iavf_free_misc_irq(adapter); 5045 iavf_reset_interrupt_capability(adapter); 5046 5047 mutex_unlock(&adapter->crit_lock); 5048 5049 return 0; 5050 } 5051 5052 /** 5053 * iavf_resume - Power management resume routine 5054 * @dev_d: device info pointer 5055 * 5056 * Called when the system (VM) is resumed from sleep/suspend. 5057 **/ 5058 static int iavf_resume(struct device *dev_d) 5059 { 5060 struct pci_dev *pdev = to_pci_dev(dev_d); 5061 struct iavf_adapter *adapter; 5062 u32 err; 5063 5064 adapter = iavf_pdev_to_adapter(pdev); 5065 5066 pci_set_master(pdev); 5067 5068 rtnl_lock(); 5069 err = iavf_set_interrupt_capability(adapter); 5070 if (err) { 5071 rtnl_unlock(); 5072 dev_err(&pdev->dev, "Cannot enable MSI-X interrupts.\n"); 5073 return err; 5074 } 5075 err = iavf_request_misc_irq(adapter); 5076 rtnl_unlock(); 5077 if (err) { 5078 dev_err(&pdev->dev, "Cannot get interrupt vector.\n"); 5079 return err; 5080 } 5081 5082 queue_work(adapter->wq, &adapter->reset_task); 5083 5084 netif_device_attach(adapter->netdev); 5085 5086 return err; 5087 } 5088 5089 /** 5090 * iavf_remove - Device Removal Routine 5091 * @pdev: PCI device information struct 5092 * 5093 * iavf_remove is called by the PCI subsystem to alert the driver 5094 * that it should release a PCI device. The could be caused by a 5095 * Hot-Plug event, or because the driver is going to be removed from 5096 * memory. 5097 **/ 5098 static void iavf_remove(struct pci_dev *pdev) 5099 { 5100 struct iavf_fdir_fltr *fdir, *fdirtmp; 5101 struct iavf_vlan_filter *vlf, *vlftmp; 5102 struct iavf_cloud_filter *cf, *cftmp; 5103 struct iavf_adv_rss *rss, *rsstmp; 5104 struct iavf_mac_filter *f, *ftmp; 5105 struct iavf_adapter *adapter; 5106 struct net_device *netdev; 5107 struct iavf_hw *hw; 5108 5109 /* Don't proceed with remove if netdev is already freed */ 5110 netdev = pci_get_drvdata(pdev); 5111 if (!netdev) 5112 return; 5113 5114 adapter = iavf_pdev_to_adapter(pdev); 5115 hw = &adapter->hw; 5116 5117 if (test_and_set_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) 5118 return; 5119 5120 /* Wait until port initialization is complete. 5121 * There are flows where register/unregister netdev may race. 5122 */ 5123 while (1) { 5124 mutex_lock(&adapter->crit_lock); 5125 if (adapter->state == __IAVF_RUNNING || 5126 adapter->state == __IAVF_DOWN || 5127 adapter->state == __IAVF_INIT_FAILED) { 5128 mutex_unlock(&adapter->crit_lock); 5129 break; 5130 } 5131 /* Simply return if we already went through iavf_shutdown */ 5132 if (adapter->state == __IAVF_REMOVE) { 5133 mutex_unlock(&adapter->crit_lock); 5134 return; 5135 } 5136 5137 mutex_unlock(&adapter->crit_lock); 5138 usleep_range(500, 1000); 5139 } 5140 cancel_delayed_work_sync(&adapter->watchdog_task); 5141 cancel_work_sync(&adapter->finish_config); 5142 5143 if (netdev->reg_state == NETREG_REGISTERED) 5144 unregister_netdev(netdev); 5145 5146 mutex_lock(&adapter->crit_lock); 5147 dev_info(&adapter->pdev->dev, "Removing device\n"); 5148 iavf_change_state(adapter, __IAVF_REMOVE); 5149 5150 iavf_request_reset(adapter); 5151 msleep(50); 5152 /* If the FW isn't responding, kick it once, but only once. */ 5153 if (!iavf_asq_done(hw)) { 5154 iavf_request_reset(adapter); 5155 msleep(50); 5156 } 5157 5158 iavf_misc_irq_disable(adapter); 5159 /* Shut down all the garbage mashers on the detention level */ 5160 cancel_work_sync(&adapter->reset_task); 5161 cancel_delayed_work_sync(&adapter->watchdog_task); 5162 cancel_work_sync(&adapter->adminq_task); 5163 5164 adapter->aq_required = 0; 5165 adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; 5166 5167 iavf_free_all_tx_resources(adapter); 5168 iavf_free_all_rx_resources(adapter); 5169 iavf_free_misc_irq(adapter); 5170 iavf_free_interrupt_scheme(adapter); 5171 5172 iavf_free_rss(adapter); 5173 5174 if (hw->aq.asq.count) 5175 iavf_shutdown_adminq(hw); 5176 5177 /* destroy the locks only once, here */ 5178 mutex_destroy(&hw->aq.arq_mutex); 5179 mutex_destroy(&hw->aq.asq_mutex); 5180 mutex_unlock(&adapter->crit_lock); 5181 mutex_destroy(&adapter->crit_lock); 5182 5183 iounmap(hw->hw_addr); 5184 pci_release_regions(pdev); 5185 kfree(adapter->vf_res); 5186 spin_lock_bh(&adapter->mac_vlan_list_lock); 5187 /* If we got removed before an up/down sequence, we've got a filter 5188 * hanging out there that we need to get rid of. 5189 */ 5190 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { 5191 list_del(&f->list); 5192 kfree(f); 5193 } 5194 list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list, 5195 list) { 5196 list_del(&vlf->list); 5197 kfree(vlf); 5198 } 5199 5200 spin_unlock_bh(&adapter->mac_vlan_list_lock); 5201 5202 spin_lock_bh(&adapter->cloud_filter_list_lock); 5203 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) { 5204 list_del(&cf->list); 5205 kfree(cf); 5206 } 5207 spin_unlock_bh(&adapter->cloud_filter_list_lock); 5208 5209 spin_lock_bh(&adapter->fdir_fltr_lock); 5210 list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head, list) { 5211 list_del(&fdir->list); 5212 kfree(fdir); 5213 } 5214 spin_unlock_bh(&adapter->fdir_fltr_lock); 5215 5216 spin_lock_bh(&adapter->adv_rss_lock); 5217 list_for_each_entry_safe(rss, rsstmp, &adapter->adv_rss_list_head, 5218 list) { 5219 list_del(&rss->list); 5220 kfree(rss); 5221 } 5222 spin_unlock_bh(&adapter->adv_rss_lock); 5223 5224 destroy_workqueue(adapter->wq); 5225 5226 pci_set_drvdata(pdev, NULL); 5227 5228 free_netdev(netdev); 5229 5230 pci_disable_device(pdev); 5231 } 5232 5233 /** 5234 * iavf_shutdown - Shutdown the device in preparation for a reboot 5235 * @pdev: pci device structure 5236 **/ 5237 static void iavf_shutdown(struct pci_dev *pdev) 5238 { 5239 iavf_remove(pdev); 5240 5241 if (system_state == SYSTEM_POWER_OFF) 5242 pci_set_power_state(pdev, PCI_D3hot); 5243 } 5244 5245 static DEFINE_SIMPLE_DEV_PM_OPS(iavf_pm_ops, iavf_suspend, iavf_resume); 5246 5247 static struct pci_driver iavf_driver = { 5248 .name = iavf_driver_name, 5249 .id_table = iavf_pci_tbl, 5250 .probe = iavf_probe, 5251 .remove = iavf_remove, 5252 .driver.pm = pm_sleep_ptr(&iavf_pm_ops), 5253 .shutdown = iavf_shutdown, 5254 }; 5255 5256 /** 5257 * iavf_init_module - Driver Registration Routine 5258 * 5259 * iavf_init_module is the first routine called when the driver is 5260 * loaded. All it does is register with the PCI subsystem. 5261 **/ 5262 static int __init iavf_init_module(void) 5263 { 5264 pr_info("iavf: %s\n", iavf_driver_string); 5265 5266 pr_info("%s\n", iavf_copyright); 5267 5268 return pci_register_driver(&iavf_driver); 5269 } 5270 5271 module_init(iavf_init_module); 5272 5273 /** 5274 * iavf_exit_module - Driver Exit Cleanup Routine 5275 * 5276 * iavf_exit_module is called just before the driver is removed 5277 * from memory. 5278 **/ 5279 static void __exit iavf_exit_module(void) 5280 { 5281 pci_unregister_driver(&iavf_driver); 5282 } 5283 5284 module_exit(iavf_exit_module); 5285 5286 /* iavf_main.c */ 5287