1 /******************************************************************************* 2 3 Intel 82599 Virtual Function driver 4 Copyright(c) 1999 - 2012 Intel Corporation. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms and conditions of the GNU General Public License, 8 version 2, as published by the Free Software Foundation. 9 10 This program is distributed in the hope it will be useful, but WITHOUT 11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 more details. 14 15 You should have received a copy of the GNU General Public License along with 16 this program; if not, write to the Free Software Foundation, Inc., 17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 19 The full GNU General Public License is included in this distribution in 20 the file called "COPYING". 21 22 Contact Information: 23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 26 *******************************************************************************/ 27 28 29 /****************************************************************************** 30 Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code 31 ******************************************************************************/ 32 33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 34 35 #include <linux/types.h> 36 #include <linux/bitops.h> 37 #include <linux/module.h> 38 #include <linux/pci.h> 39 #include <linux/netdevice.h> 40 #include <linux/vmalloc.h> 41 #include <linux/string.h> 42 #include <linux/in.h> 43 #include <linux/ip.h> 44 #include <linux/tcp.h> 45 #include <linux/sctp.h> 46 #include <linux/ipv6.h> 47 #include <linux/slab.h> 48 #include <net/checksum.h> 49 #include <net/ip6_checksum.h> 50 #include <linux/ethtool.h> 51 #include <linux/if.h> 52 #include <linux/if_vlan.h> 53 #include <linux/prefetch.h> 54 55 #include "ixgbevf.h" 56 57 const char ixgbevf_driver_name[] = "ixgbevf"; 58 static const char ixgbevf_driver_string[] = 59 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver"; 60 61 #define DRV_VERSION "2.7.12-k" 62 const char ixgbevf_driver_version[] = DRV_VERSION; 63 static char ixgbevf_copyright[] = 64 "Copyright (c) 2009 - 2012 Intel Corporation."; 65 66 static const struct ixgbevf_info *ixgbevf_info_tbl[] = { 67 [board_82599_vf] = &ixgbevf_82599_vf_info, 68 [board_X540_vf] = &ixgbevf_X540_vf_info, 69 }; 70 71 /* ixgbevf_pci_tbl - PCI Device ID Table 72 * 73 * Wildcard entries (PCI_ANY_ID) should come last 74 * Last entry must be all 0s 75 * 76 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 77 * Class, Class Mask, private data (not used) } 78 */ 79 static struct pci_device_id ixgbevf_pci_tbl[] = { 80 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), 81 board_82599_vf}, 82 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), 83 board_X540_vf}, 84 85 /* required last entry */ 86 {0, } 87 }; 88 MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl); 89 90 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 91 MODULE_DESCRIPTION("Intel(R) 82599 Virtual Function Driver"); 92 MODULE_LICENSE("GPL"); 93 MODULE_VERSION(DRV_VERSION); 94 95 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) 96 static int debug = -1; 97 module_param(debug, int, 0); 98 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 99 100 /* forward decls */ 101 static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector); 102 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter); 103 104 static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw, 105 struct ixgbevf_ring *rx_ring, 106 u32 val) 107 { 108 /* 109 * Force memory writes to complete before letting h/w 110 * know there are new descriptors to fetch. (Only 111 * applicable for weak-ordered memory model archs, 112 * such as IA-64). 113 */ 114 wmb(); 115 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rx_ring->reg_idx), val); 116 } 117 118 /** 119 * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors 120 * @adapter: pointer to adapter struct 121 * @direction: 0 for Rx, 1 for Tx, -1 for other causes 122 * @queue: queue to map the corresponding interrupt to 123 * @msix_vector: the vector to map to the corresponding queue 124 */ 125 static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction, 126 u8 queue, u8 msix_vector) 127 { 128 u32 ivar, index; 129 struct ixgbe_hw *hw = &adapter->hw; 130 if (direction == -1) { 131 /* other causes */ 132 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 133 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC); 134 ivar &= ~0xFF; 135 ivar |= msix_vector; 136 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar); 137 } else { 138 /* tx or rx causes */ 139 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 140 index = ((16 * (queue & 1)) + (8 * direction)); 141 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1)); 142 ivar &= ~(0xFF << index); 143 ivar |= (msix_vector << index); 144 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar); 145 } 146 } 147 148 static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring, 149 struct ixgbevf_tx_buffer 150 *tx_buffer_info) 151 { 152 if (tx_buffer_info->dma) { 153 if (tx_buffer_info->mapped_as_page) 154 dma_unmap_page(tx_ring->dev, 155 tx_buffer_info->dma, 156 tx_buffer_info->length, 157 DMA_TO_DEVICE); 158 else 159 dma_unmap_single(tx_ring->dev, 160 tx_buffer_info->dma, 161 tx_buffer_info->length, 162 DMA_TO_DEVICE); 163 tx_buffer_info->dma = 0; 164 } 165 if (tx_buffer_info->skb) { 166 dev_kfree_skb_any(tx_buffer_info->skb); 167 tx_buffer_info->skb = NULL; 168 } 169 tx_buffer_info->time_stamp = 0; 170 /* tx_buffer_info must be completely set up in the transmit path */ 171 } 172 173 #define IXGBE_MAX_TXD_PWR 14 174 #define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR) 175 176 /* Tx Descriptors needed, worst case */ 177 #define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD) 178 #define DESC_NEEDED (MAX_SKB_FRAGS + 4) 179 180 static void ixgbevf_tx_timeout(struct net_device *netdev); 181 182 /** 183 * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes 184 * @q_vector: board private structure 185 * @tx_ring: tx ring to clean 186 **/ 187 static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, 188 struct ixgbevf_ring *tx_ring) 189 { 190 struct ixgbevf_adapter *adapter = q_vector->adapter; 191 union ixgbe_adv_tx_desc *tx_desc, *eop_desc; 192 struct ixgbevf_tx_buffer *tx_buffer_info; 193 unsigned int i, eop, count = 0; 194 unsigned int total_bytes = 0, total_packets = 0; 195 196 if (test_bit(__IXGBEVF_DOWN, &adapter->state)) 197 return true; 198 199 i = tx_ring->next_to_clean; 200 eop = tx_ring->tx_buffer_info[i].next_to_watch; 201 eop_desc = IXGBEVF_TX_DESC(tx_ring, eop); 202 203 while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) && 204 (count < tx_ring->count)) { 205 bool cleaned = false; 206 rmb(); /* read buffer_info after eop_desc */ 207 /* eop could change between read and DD-check */ 208 if (unlikely(eop != tx_ring->tx_buffer_info[i].next_to_watch)) 209 goto cont_loop; 210 for ( ; !cleaned; count++) { 211 struct sk_buff *skb; 212 tx_desc = IXGBEVF_TX_DESC(tx_ring, i); 213 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 214 cleaned = (i == eop); 215 skb = tx_buffer_info->skb; 216 217 if (cleaned && skb) { 218 unsigned int segs, bytecount; 219 220 /* gso_segs is currently only valid for tcp */ 221 segs = skb_shinfo(skb)->gso_segs ?: 1; 222 /* multiply data chunks by size of headers */ 223 bytecount = ((segs - 1) * skb_headlen(skb)) + 224 skb->len; 225 total_packets += segs; 226 total_bytes += bytecount; 227 } 228 229 ixgbevf_unmap_and_free_tx_resource(tx_ring, 230 tx_buffer_info); 231 232 tx_desc->wb.status = 0; 233 234 i++; 235 if (i == tx_ring->count) 236 i = 0; 237 } 238 239 cont_loop: 240 eop = tx_ring->tx_buffer_info[i].next_to_watch; 241 eop_desc = IXGBEVF_TX_DESC(tx_ring, eop); 242 } 243 244 tx_ring->next_to_clean = i; 245 246 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) 247 if (unlikely(count && netif_carrier_ok(tx_ring->netdev) && 248 (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { 249 /* Make sure that anybody stopping the queue after this 250 * sees the new next_to_clean. 251 */ 252 smp_mb(); 253 if (__netif_subqueue_stopped(tx_ring->netdev, 254 tx_ring->queue_index) && 255 !test_bit(__IXGBEVF_DOWN, &adapter->state)) { 256 netif_wake_subqueue(tx_ring->netdev, 257 tx_ring->queue_index); 258 ++adapter->restart_queue; 259 } 260 } 261 262 u64_stats_update_begin(&tx_ring->syncp); 263 tx_ring->total_bytes += total_bytes; 264 tx_ring->total_packets += total_packets; 265 u64_stats_update_end(&tx_ring->syncp); 266 q_vector->tx.total_bytes += total_bytes; 267 q_vector->tx.total_packets += total_packets; 268 269 return count < tx_ring->count; 270 } 271 272 /** 273 * ixgbevf_receive_skb - Send a completed packet up the stack 274 * @q_vector: structure containing interrupt and ring information 275 * @skb: packet to send up 276 * @status: hardware indication of status of receive 277 * @rx_desc: rx descriptor 278 **/ 279 static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector, 280 struct sk_buff *skb, u8 status, 281 union ixgbe_adv_rx_desc *rx_desc) 282 { 283 struct ixgbevf_adapter *adapter = q_vector->adapter; 284 bool is_vlan = (status & IXGBE_RXD_STAT_VP); 285 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan); 286 287 if (is_vlan && test_bit(tag & VLAN_VID_MASK, adapter->active_vlans)) 288 __vlan_hwaccel_put_tag(skb, tag); 289 290 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) 291 napi_gro_receive(&q_vector->napi, skb); 292 else 293 netif_rx(skb); 294 } 295 296 /** 297 * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum 298 * @ring: pointer to Rx descriptor ring structure 299 * @status_err: hardware indication of status of receive 300 * @skb: skb currently being received and modified 301 **/ 302 static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring, 303 u32 status_err, struct sk_buff *skb) 304 { 305 skb_checksum_none_assert(skb); 306 307 /* Rx csum disabled */ 308 if (!(ring->netdev->features & NETIF_F_RXCSUM)) 309 return; 310 311 /* if IP and error */ 312 if ((status_err & IXGBE_RXD_STAT_IPCS) && 313 (status_err & IXGBE_RXDADV_ERR_IPE)) { 314 ring->hw_csum_rx_error++; 315 return; 316 } 317 318 if (!(status_err & IXGBE_RXD_STAT_L4CS)) 319 return; 320 321 if (status_err & IXGBE_RXDADV_ERR_TCPE) { 322 ring->hw_csum_rx_error++; 323 return; 324 } 325 326 /* It must be a TCP or UDP packet with a valid checksum */ 327 skb->ip_summed = CHECKSUM_UNNECESSARY; 328 ring->hw_csum_rx_good++; 329 } 330 331 /** 332 * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split 333 * @adapter: address of board private structure 334 **/ 335 static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter, 336 struct ixgbevf_ring *rx_ring, 337 int cleaned_count) 338 { 339 struct pci_dev *pdev = adapter->pdev; 340 union ixgbe_adv_rx_desc *rx_desc; 341 struct ixgbevf_rx_buffer *bi; 342 unsigned int i = rx_ring->next_to_use; 343 344 bi = &rx_ring->rx_buffer_info[i]; 345 346 while (cleaned_count--) { 347 rx_desc = IXGBEVF_RX_DESC(rx_ring, i); 348 349 if (!bi->skb) { 350 struct sk_buff *skb; 351 352 skb = netdev_alloc_skb_ip_align(rx_ring->netdev, 353 rx_ring->rx_buf_len); 354 if (!skb) { 355 adapter->alloc_rx_buff_failed++; 356 goto no_buffers; 357 } 358 bi->skb = skb; 359 360 bi->dma = dma_map_single(&pdev->dev, skb->data, 361 rx_ring->rx_buf_len, 362 DMA_FROM_DEVICE); 363 if (dma_mapping_error(&pdev->dev, bi->dma)) { 364 dev_kfree_skb(skb); 365 bi->skb = NULL; 366 dev_err(&pdev->dev, "RX DMA map failed\n"); 367 break; 368 } 369 } 370 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); 371 372 i++; 373 if (i == rx_ring->count) 374 i = 0; 375 bi = &rx_ring->rx_buffer_info[i]; 376 } 377 378 no_buffers: 379 if (rx_ring->next_to_use != i) { 380 rx_ring->next_to_use = i; 381 ixgbevf_release_rx_desc(&adapter->hw, rx_ring, i); 382 } 383 } 384 385 static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter, 386 u32 qmask) 387 { 388 struct ixgbe_hw *hw = &adapter->hw; 389 390 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask); 391 } 392 393 static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, 394 struct ixgbevf_ring *rx_ring, 395 int budget) 396 { 397 struct ixgbevf_adapter *adapter = q_vector->adapter; 398 struct pci_dev *pdev = adapter->pdev; 399 union ixgbe_adv_rx_desc *rx_desc, *next_rxd; 400 struct ixgbevf_rx_buffer *rx_buffer_info, *next_buffer; 401 struct sk_buff *skb; 402 unsigned int i; 403 u32 len, staterr; 404 int cleaned_count = 0; 405 unsigned int total_rx_bytes = 0, total_rx_packets = 0; 406 407 i = rx_ring->next_to_clean; 408 rx_desc = IXGBEVF_RX_DESC(rx_ring, i); 409 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 410 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 411 412 while (staterr & IXGBE_RXD_STAT_DD) { 413 if (!budget) 414 break; 415 budget--; 416 417 rmb(); /* read descriptor and rx_buffer_info after status DD */ 418 len = le16_to_cpu(rx_desc->wb.upper.length); 419 skb = rx_buffer_info->skb; 420 prefetch(skb->data - NET_IP_ALIGN); 421 rx_buffer_info->skb = NULL; 422 423 if (rx_buffer_info->dma) { 424 dma_unmap_single(&pdev->dev, rx_buffer_info->dma, 425 rx_ring->rx_buf_len, 426 DMA_FROM_DEVICE); 427 rx_buffer_info->dma = 0; 428 skb_put(skb, len); 429 } 430 431 i++; 432 if (i == rx_ring->count) 433 i = 0; 434 435 next_rxd = IXGBEVF_RX_DESC(rx_ring, i); 436 prefetch(next_rxd); 437 cleaned_count++; 438 439 next_buffer = &rx_ring->rx_buffer_info[i]; 440 441 if (!(staterr & IXGBE_RXD_STAT_EOP)) { 442 skb->next = next_buffer->skb; 443 IXGBE_CB(skb->next)->prev = skb; 444 adapter->non_eop_descs++; 445 goto next_desc; 446 } 447 448 /* we should not be chaining buffers, if we did drop the skb */ 449 if (IXGBE_CB(skb)->prev) { 450 do { 451 struct sk_buff *this = skb; 452 skb = IXGBE_CB(skb)->prev; 453 dev_kfree_skb(this); 454 } while (skb); 455 goto next_desc; 456 } 457 458 /* ERR_MASK will only have valid bits if EOP set */ 459 if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) { 460 dev_kfree_skb_irq(skb); 461 goto next_desc; 462 } 463 464 ixgbevf_rx_checksum(rx_ring, staterr, skb); 465 466 /* probably a little skewed due to removing CRC */ 467 total_rx_bytes += skb->len; 468 total_rx_packets++; 469 470 /* 471 * Work around issue of some types of VM to VM loop back 472 * packets not getting split correctly 473 */ 474 if (staterr & IXGBE_RXD_STAT_LB) { 475 u32 header_fixup_len = skb_headlen(skb); 476 if (header_fixup_len < 14) 477 skb_push(skb, header_fixup_len); 478 } 479 skb->protocol = eth_type_trans(skb, rx_ring->netdev); 480 481 /* Workaround hardware that can't do proper VEPA multicast 482 * source pruning. 483 */ 484 if ((skb->pkt_type & (PACKET_BROADCAST | PACKET_MULTICAST)) && 485 !(compare_ether_addr(adapter->netdev->dev_addr, 486 eth_hdr(skb)->h_source))) { 487 dev_kfree_skb_irq(skb); 488 goto next_desc; 489 } 490 491 ixgbevf_receive_skb(q_vector, skb, staterr, rx_desc); 492 493 next_desc: 494 rx_desc->wb.upper.status_error = 0; 495 496 /* return some buffers to hardware, one at a time is too slow */ 497 if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) { 498 ixgbevf_alloc_rx_buffers(adapter, rx_ring, 499 cleaned_count); 500 cleaned_count = 0; 501 } 502 503 /* use prefetched values */ 504 rx_desc = next_rxd; 505 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 506 507 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 508 } 509 510 rx_ring->next_to_clean = i; 511 cleaned_count = IXGBE_DESC_UNUSED(rx_ring); 512 513 if (cleaned_count) 514 ixgbevf_alloc_rx_buffers(adapter, rx_ring, cleaned_count); 515 516 u64_stats_update_begin(&rx_ring->syncp); 517 rx_ring->total_packets += total_rx_packets; 518 rx_ring->total_bytes += total_rx_bytes; 519 u64_stats_update_end(&rx_ring->syncp); 520 q_vector->rx.total_packets += total_rx_packets; 521 q_vector->rx.total_bytes += total_rx_bytes; 522 523 return !!budget; 524 } 525 526 /** 527 * ixgbevf_poll - NAPI polling calback 528 * @napi: napi struct with our devices info in it 529 * @budget: amount of work driver is allowed to do this pass, in packets 530 * 531 * This function will clean more than one or more rings associated with a 532 * q_vector. 533 **/ 534 static int ixgbevf_poll(struct napi_struct *napi, int budget) 535 { 536 struct ixgbevf_q_vector *q_vector = 537 container_of(napi, struct ixgbevf_q_vector, napi); 538 struct ixgbevf_adapter *adapter = q_vector->adapter; 539 struct ixgbevf_ring *ring; 540 int per_ring_budget; 541 bool clean_complete = true; 542 543 ixgbevf_for_each_ring(ring, q_vector->tx) 544 clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring); 545 546 /* attempt to distribute budget to each queue fairly, but don't allow 547 * the budget to go below 1 because we'll exit polling */ 548 if (q_vector->rx.count > 1) 549 per_ring_budget = max(budget/q_vector->rx.count, 1); 550 else 551 per_ring_budget = budget; 552 553 adapter->flags |= IXGBE_FLAG_IN_NETPOLL; 554 ixgbevf_for_each_ring(ring, q_vector->rx) 555 clean_complete &= ixgbevf_clean_rx_irq(q_vector, ring, 556 per_ring_budget); 557 adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL; 558 559 /* If all work not completed, return budget and keep polling */ 560 if (!clean_complete) 561 return budget; 562 /* all work done, exit the polling mode */ 563 napi_complete(napi); 564 if (adapter->rx_itr_setting & 1) 565 ixgbevf_set_itr(q_vector); 566 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) 567 ixgbevf_irq_enable_queues(adapter, 568 1 << q_vector->v_idx); 569 570 return 0; 571 } 572 573 /** 574 * ixgbevf_write_eitr - write VTEITR register in hardware specific way 575 * @q_vector: structure containing interrupt and ring information 576 */ 577 static void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector) 578 { 579 struct ixgbevf_adapter *adapter = q_vector->adapter; 580 struct ixgbe_hw *hw = &adapter->hw; 581 int v_idx = q_vector->v_idx; 582 u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR; 583 584 /* 585 * set the WDIS bit to not clear the timer bits and cause an 586 * immediate assertion of the interrupt 587 */ 588 itr_reg |= IXGBE_EITR_CNT_WDIS; 589 590 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg); 591 } 592 593 /** 594 * ixgbevf_configure_msix - Configure MSI-X hardware 595 * @adapter: board private structure 596 * 597 * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X 598 * interrupts. 599 **/ 600 static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter) 601 { 602 struct ixgbevf_q_vector *q_vector; 603 int q_vectors, v_idx; 604 605 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 606 adapter->eims_enable_mask = 0; 607 608 /* 609 * Populate the IVAR table and set the ITR values to the 610 * corresponding register. 611 */ 612 for (v_idx = 0; v_idx < q_vectors; v_idx++) { 613 struct ixgbevf_ring *ring; 614 q_vector = adapter->q_vector[v_idx]; 615 616 ixgbevf_for_each_ring(ring, q_vector->rx) 617 ixgbevf_set_ivar(adapter, 0, ring->reg_idx, v_idx); 618 619 ixgbevf_for_each_ring(ring, q_vector->tx) 620 ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx); 621 622 if (q_vector->tx.ring && !q_vector->rx.ring) { 623 /* tx only vector */ 624 if (adapter->tx_itr_setting == 1) 625 q_vector->itr = IXGBE_10K_ITR; 626 else 627 q_vector->itr = adapter->tx_itr_setting; 628 } else { 629 /* rx or rx/tx vector */ 630 if (adapter->rx_itr_setting == 1) 631 q_vector->itr = IXGBE_20K_ITR; 632 else 633 q_vector->itr = adapter->rx_itr_setting; 634 } 635 636 /* add q_vector eims value to global eims_enable_mask */ 637 adapter->eims_enable_mask |= 1 << v_idx; 638 639 ixgbevf_write_eitr(q_vector); 640 } 641 642 ixgbevf_set_ivar(adapter, -1, 1, v_idx); 643 /* setup eims_other and add value to global eims_enable_mask */ 644 adapter->eims_other = 1 << v_idx; 645 adapter->eims_enable_mask |= adapter->eims_other; 646 } 647 648 enum latency_range { 649 lowest_latency = 0, 650 low_latency = 1, 651 bulk_latency = 2, 652 latency_invalid = 255 653 }; 654 655 /** 656 * ixgbevf_update_itr - update the dynamic ITR value based on statistics 657 * @q_vector: structure containing interrupt and ring information 658 * @ring_container: structure containing ring performance data 659 * 660 * Stores a new ITR value based on packets and byte 661 * counts during the last interrupt. The advantage of per interrupt 662 * computation is faster updates and more accurate ITR for the current 663 * traffic pattern. Constants in this function were computed 664 * based on theoretical maximum wire speed and thresholds were set based 665 * on testing data as well as attempting to minimize response time 666 * while increasing bulk throughput. 667 **/ 668 static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector, 669 struct ixgbevf_ring_container *ring_container) 670 { 671 int bytes = ring_container->total_bytes; 672 int packets = ring_container->total_packets; 673 u32 timepassed_us; 674 u64 bytes_perint; 675 u8 itr_setting = ring_container->itr; 676 677 if (packets == 0) 678 return; 679 680 /* simple throttlerate management 681 * 0-20MB/s lowest (100000 ints/s) 682 * 20-100MB/s low (20000 ints/s) 683 * 100-1249MB/s bulk (8000 ints/s) 684 */ 685 /* what was last interrupt timeslice? */ 686 timepassed_us = q_vector->itr >> 2; 687 bytes_perint = bytes / timepassed_us; /* bytes/usec */ 688 689 switch (itr_setting) { 690 case lowest_latency: 691 if (bytes_perint > 10) 692 itr_setting = low_latency; 693 break; 694 case low_latency: 695 if (bytes_perint > 20) 696 itr_setting = bulk_latency; 697 else if (bytes_perint <= 10) 698 itr_setting = lowest_latency; 699 break; 700 case bulk_latency: 701 if (bytes_perint <= 20) 702 itr_setting = low_latency; 703 break; 704 } 705 706 /* clear work counters since we have the values we need */ 707 ring_container->total_bytes = 0; 708 ring_container->total_packets = 0; 709 710 /* write updated itr to ring container */ 711 ring_container->itr = itr_setting; 712 } 713 714 static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector) 715 { 716 u32 new_itr = q_vector->itr; 717 u8 current_itr; 718 719 ixgbevf_update_itr(q_vector, &q_vector->tx); 720 ixgbevf_update_itr(q_vector, &q_vector->rx); 721 722 current_itr = max(q_vector->rx.itr, q_vector->tx.itr); 723 724 switch (current_itr) { 725 /* counts and packets in update_itr are dependent on these numbers */ 726 case lowest_latency: 727 new_itr = IXGBE_100K_ITR; 728 break; 729 case low_latency: 730 new_itr = IXGBE_20K_ITR; 731 break; 732 case bulk_latency: 733 default: 734 new_itr = IXGBE_8K_ITR; 735 break; 736 } 737 738 if (new_itr != q_vector->itr) { 739 /* do an exponential smoothing */ 740 new_itr = (10 * new_itr * q_vector->itr) / 741 ((9 * new_itr) + q_vector->itr); 742 743 /* save the algorithm value here */ 744 q_vector->itr = new_itr; 745 746 ixgbevf_write_eitr(q_vector); 747 } 748 } 749 750 static irqreturn_t ixgbevf_msix_other(int irq, void *data) 751 { 752 struct ixgbevf_adapter *adapter = data; 753 struct ixgbe_hw *hw = &adapter->hw; 754 755 hw->mac.get_link_status = 1; 756 757 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) 758 mod_timer(&adapter->watchdog_timer, jiffies); 759 760 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other); 761 762 return IRQ_HANDLED; 763 } 764 765 /** 766 * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues) 767 * @irq: unused 768 * @data: pointer to our q_vector struct for this interrupt vector 769 **/ 770 static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data) 771 { 772 struct ixgbevf_q_vector *q_vector = data; 773 774 /* EIAM disabled interrupts (on this vector) for us */ 775 if (q_vector->rx.ring || q_vector->tx.ring) 776 napi_schedule(&q_vector->napi); 777 778 return IRQ_HANDLED; 779 } 780 781 static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx, 782 int r_idx) 783 { 784 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx]; 785 786 a->rx_ring[r_idx].next = q_vector->rx.ring; 787 q_vector->rx.ring = &a->rx_ring[r_idx]; 788 q_vector->rx.count++; 789 } 790 791 static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx, 792 int t_idx) 793 { 794 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx]; 795 796 a->tx_ring[t_idx].next = q_vector->tx.ring; 797 q_vector->tx.ring = &a->tx_ring[t_idx]; 798 q_vector->tx.count++; 799 } 800 801 /** 802 * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors 803 * @adapter: board private structure to initialize 804 * 805 * This function maps descriptor rings to the queue-specific vectors 806 * we were allotted through the MSI-X enabling code. Ideally, we'd have 807 * one vector per ring/queue, but on a constrained vector budget, we 808 * group the rings as "efficiently" as possible. You would add new 809 * mapping configurations in here. 810 **/ 811 static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter) 812 { 813 int q_vectors; 814 int v_start = 0; 815 int rxr_idx = 0, txr_idx = 0; 816 int rxr_remaining = adapter->num_rx_queues; 817 int txr_remaining = adapter->num_tx_queues; 818 int i, j; 819 int rqpv, tqpv; 820 int err = 0; 821 822 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 823 824 /* 825 * The ideal configuration... 826 * We have enough vectors to map one per queue. 827 */ 828 if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) { 829 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++) 830 map_vector_to_rxq(adapter, v_start, rxr_idx); 831 832 for (; txr_idx < txr_remaining; v_start++, txr_idx++) 833 map_vector_to_txq(adapter, v_start, txr_idx); 834 goto out; 835 } 836 837 /* 838 * If we don't have enough vectors for a 1-to-1 839 * mapping, we'll have to group them so there are 840 * multiple queues per vector. 841 */ 842 /* Re-adjusting *qpv takes care of the remainder. */ 843 for (i = v_start; i < q_vectors; i++) { 844 rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i); 845 for (j = 0; j < rqpv; j++) { 846 map_vector_to_rxq(adapter, i, rxr_idx); 847 rxr_idx++; 848 rxr_remaining--; 849 } 850 } 851 for (i = v_start; i < q_vectors; i++) { 852 tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i); 853 for (j = 0; j < tqpv; j++) { 854 map_vector_to_txq(adapter, i, txr_idx); 855 txr_idx++; 856 txr_remaining--; 857 } 858 } 859 860 out: 861 return err; 862 } 863 864 /** 865 * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts 866 * @adapter: board private structure 867 * 868 * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests 869 * interrupts from the kernel. 870 **/ 871 static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter) 872 { 873 struct net_device *netdev = adapter->netdev; 874 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 875 int vector, err; 876 int ri = 0, ti = 0; 877 878 for (vector = 0; vector < q_vectors; vector++) { 879 struct ixgbevf_q_vector *q_vector = adapter->q_vector[vector]; 880 struct msix_entry *entry = &adapter->msix_entries[vector]; 881 882 if (q_vector->tx.ring && q_vector->rx.ring) { 883 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 884 "%s-%s-%d", netdev->name, "TxRx", ri++); 885 ti++; 886 } else if (q_vector->rx.ring) { 887 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 888 "%s-%s-%d", netdev->name, "rx", ri++); 889 } else if (q_vector->tx.ring) { 890 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 891 "%s-%s-%d", netdev->name, "tx", ti++); 892 } else { 893 /* skip this unused q_vector */ 894 continue; 895 } 896 err = request_irq(entry->vector, &ixgbevf_msix_clean_rings, 0, 897 q_vector->name, q_vector); 898 if (err) { 899 hw_dbg(&adapter->hw, 900 "request_irq failed for MSIX interrupt " 901 "Error: %d\n", err); 902 goto free_queue_irqs; 903 } 904 } 905 906 err = request_irq(adapter->msix_entries[vector].vector, 907 &ixgbevf_msix_other, 0, netdev->name, adapter); 908 if (err) { 909 hw_dbg(&adapter->hw, 910 "request_irq for msix_other failed: %d\n", err); 911 goto free_queue_irqs; 912 } 913 914 return 0; 915 916 free_queue_irqs: 917 while (vector) { 918 vector--; 919 free_irq(adapter->msix_entries[vector].vector, 920 adapter->q_vector[vector]); 921 } 922 pci_disable_msix(adapter->pdev); 923 kfree(adapter->msix_entries); 924 adapter->msix_entries = NULL; 925 return err; 926 } 927 928 static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter) 929 { 930 int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 931 932 for (i = 0; i < q_vectors; i++) { 933 struct ixgbevf_q_vector *q_vector = adapter->q_vector[i]; 934 q_vector->rx.ring = NULL; 935 q_vector->tx.ring = NULL; 936 q_vector->rx.count = 0; 937 q_vector->tx.count = 0; 938 } 939 } 940 941 /** 942 * ixgbevf_request_irq - initialize interrupts 943 * @adapter: board private structure 944 * 945 * Attempts to configure interrupts using the best available 946 * capabilities of the hardware and kernel. 947 **/ 948 static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter) 949 { 950 int err = 0; 951 952 err = ixgbevf_request_msix_irqs(adapter); 953 954 if (err) 955 hw_dbg(&adapter->hw, 956 "request_irq failed, Error %d\n", err); 957 958 return err; 959 } 960 961 static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter) 962 { 963 int i, q_vectors; 964 965 q_vectors = adapter->num_msix_vectors; 966 i = q_vectors - 1; 967 968 free_irq(adapter->msix_entries[i].vector, adapter); 969 i--; 970 971 for (; i >= 0; i--) { 972 /* free only the irqs that were actually requested */ 973 if (!adapter->q_vector[i]->rx.ring && 974 !adapter->q_vector[i]->tx.ring) 975 continue; 976 977 free_irq(adapter->msix_entries[i].vector, 978 adapter->q_vector[i]); 979 } 980 981 ixgbevf_reset_q_vectors(adapter); 982 } 983 984 /** 985 * ixgbevf_irq_disable - Mask off interrupt generation on the NIC 986 * @adapter: board private structure 987 **/ 988 static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter) 989 { 990 struct ixgbe_hw *hw = &adapter->hw; 991 int i; 992 993 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, 0); 994 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0); 995 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, 0); 996 997 IXGBE_WRITE_FLUSH(hw); 998 999 for (i = 0; i < adapter->num_msix_vectors; i++) 1000 synchronize_irq(adapter->msix_entries[i].vector); 1001 } 1002 1003 /** 1004 * ixgbevf_irq_enable - Enable default interrupt generation settings 1005 * @adapter: board private structure 1006 **/ 1007 static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter) 1008 { 1009 struct ixgbe_hw *hw = &adapter->hw; 1010 1011 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, adapter->eims_enable_mask); 1012 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, adapter->eims_enable_mask); 1013 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_enable_mask); 1014 } 1015 1016 /** 1017 * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset 1018 * @adapter: board private structure 1019 * 1020 * Configure the Tx unit of the MAC after a reset. 1021 **/ 1022 static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter) 1023 { 1024 u64 tdba; 1025 struct ixgbe_hw *hw = &adapter->hw; 1026 u32 i, j, tdlen, txctrl; 1027 1028 /* Setup the HW Tx Head and Tail descriptor pointers */ 1029 for (i = 0; i < adapter->num_tx_queues; i++) { 1030 struct ixgbevf_ring *ring = &adapter->tx_ring[i]; 1031 j = ring->reg_idx; 1032 tdba = ring->dma; 1033 tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc); 1034 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j), 1035 (tdba & DMA_BIT_MASK(32))); 1036 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32)); 1037 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j), tdlen); 1038 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(j), 0); 1039 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(j), 0); 1040 adapter->tx_ring[i].head = IXGBE_VFTDH(j); 1041 adapter->tx_ring[i].tail = IXGBE_VFTDT(j); 1042 /* Disable Tx Head Writeback RO bit, since this hoses 1043 * bookkeeping if things aren't delivered in order. 1044 */ 1045 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j)); 1046 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN; 1047 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl); 1048 } 1049 } 1050 1051 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 1052 1053 static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index) 1054 { 1055 struct ixgbevf_ring *rx_ring; 1056 struct ixgbe_hw *hw = &adapter->hw; 1057 u32 srrctl; 1058 1059 rx_ring = &adapter->rx_ring[index]; 1060 1061 srrctl = IXGBE_SRRCTL_DROP_EN; 1062 1063 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 1064 1065 srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >> 1066 IXGBE_SRRCTL_BSIZEPKT_SHIFT; 1067 1068 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl); 1069 } 1070 1071 static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter) 1072 { 1073 struct ixgbe_hw *hw = &adapter->hw; 1074 struct net_device *netdev = adapter->netdev; 1075 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; 1076 int i; 1077 u16 rx_buf_len; 1078 1079 /* notify the PF of our intent to use this size of frame */ 1080 ixgbevf_rlpml_set_vf(hw, max_frame); 1081 1082 /* PF will allow an extra 4 bytes past for vlan tagged frames */ 1083 max_frame += VLAN_HLEN; 1084 1085 /* 1086 * Allocate buffer sizes that fit well into 32K and 1087 * take into account max frame size of 9.5K 1088 */ 1089 if ((hw->mac.type == ixgbe_mac_X540_vf) && 1090 (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE)) 1091 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE; 1092 else if (max_frame <= IXGBEVF_RXBUFFER_2K) 1093 rx_buf_len = IXGBEVF_RXBUFFER_2K; 1094 else if (max_frame <= IXGBEVF_RXBUFFER_4K) 1095 rx_buf_len = IXGBEVF_RXBUFFER_4K; 1096 else if (max_frame <= IXGBEVF_RXBUFFER_8K) 1097 rx_buf_len = IXGBEVF_RXBUFFER_8K; 1098 else 1099 rx_buf_len = IXGBEVF_RXBUFFER_10K; 1100 1101 for (i = 0; i < adapter->num_rx_queues; i++) 1102 adapter->rx_ring[i].rx_buf_len = rx_buf_len; 1103 } 1104 1105 /** 1106 * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset 1107 * @adapter: board private structure 1108 * 1109 * Configure the Rx unit of the MAC after a reset. 1110 **/ 1111 static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter) 1112 { 1113 u64 rdba; 1114 struct ixgbe_hw *hw = &adapter->hw; 1115 int i, j; 1116 u32 rdlen; 1117 1118 /* PSRTYPE must be initialized in 82599 */ 1119 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0); 1120 1121 /* set_rx_buffer_len must be called before ring initialization */ 1122 ixgbevf_set_rx_buffer_len(adapter); 1123 1124 rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc); 1125 /* Setup the HW Rx Head and Tail Descriptor Pointers and 1126 * the Base and Length of the Rx Descriptor Ring */ 1127 for (i = 0; i < adapter->num_rx_queues; i++) { 1128 rdba = adapter->rx_ring[i].dma; 1129 j = adapter->rx_ring[i].reg_idx; 1130 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j), 1131 (rdba & DMA_BIT_MASK(32))); 1132 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32)); 1133 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j), rdlen); 1134 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(j), 0); 1135 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(j), 0); 1136 adapter->rx_ring[i].head = IXGBE_VFRDH(j); 1137 adapter->rx_ring[i].tail = IXGBE_VFRDT(j); 1138 1139 ixgbevf_configure_srrctl(adapter, j); 1140 } 1141 } 1142 1143 static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid) 1144 { 1145 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 1146 struct ixgbe_hw *hw = &adapter->hw; 1147 int err; 1148 1149 spin_lock_bh(&adapter->mbx_lock); 1150 1151 /* add VID to filter table */ 1152 err = hw->mac.ops.set_vfta(hw, vid, 0, true); 1153 1154 spin_unlock_bh(&adapter->mbx_lock); 1155 1156 /* translate error return types so error makes sense */ 1157 if (err == IXGBE_ERR_MBX) 1158 return -EIO; 1159 1160 if (err == IXGBE_ERR_INVALID_ARGUMENT) 1161 return -EACCES; 1162 1163 set_bit(vid, adapter->active_vlans); 1164 1165 return err; 1166 } 1167 1168 static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) 1169 { 1170 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 1171 struct ixgbe_hw *hw = &adapter->hw; 1172 int err = -EOPNOTSUPP; 1173 1174 spin_lock_bh(&adapter->mbx_lock); 1175 1176 /* remove VID from filter table */ 1177 err = hw->mac.ops.set_vfta(hw, vid, 0, false); 1178 1179 spin_unlock_bh(&adapter->mbx_lock); 1180 1181 clear_bit(vid, adapter->active_vlans); 1182 1183 return err; 1184 } 1185 1186 static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter) 1187 { 1188 u16 vid; 1189 1190 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) 1191 ixgbevf_vlan_rx_add_vid(adapter->netdev, vid); 1192 } 1193 1194 static int ixgbevf_write_uc_addr_list(struct net_device *netdev) 1195 { 1196 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 1197 struct ixgbe_hw *hw = &adapter->hw; 1198 int count = 0; 1199 1200 if ((netdev_uc_count(netdev)) > 10) { 1201 pr_err("Too many unicast filters - No Space\n"); 1202 return -ENOSPC; 1203 } 1204 1205 if (!netdev_uc_empty(netdev)) { 1206 struct netdev_hw_addr *ha; 1207 netdev_for_each_uc_addr(ha, netdev) { 1208 hw->mac.ops.set_uc_addr(hw, ++count, ha->addr); 1209 udelay(200); 1210 } 1211 } else { 1212 /* 1213 * If the list is empty then send message to PF driver to 1214 * clear all macvlans on this VF. 1215 */ 1216 hw->mac.ops.set_uc_addr(hw, 0, NULL); 1217 } 1218 1219 return count; 1220 } 1221 1222 /** 1223 * ixgbevf_set_rx_mode - Multicast and unicast set 1224 * @netdev: network interface device structure 1225 * 1226 * The set_rx_method entry point is called whenever the multicast address 1227 * list, unicast address list or the network interface flags are updated. 1228 * This routine is responsible for configuring the hardware for proper 1229 * multicast mode and configuring requested unicast filters. 1230 **/ 1231 static void ixgbevf_set_rx_mode(struct net_device *netdev) 1232 { 1233 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 1234 struct ixgbe_hw *hw = &adapter->hw; 1235 1236 spin_lock_bh(&adapter->mbx_lock); 1237 1238 /* reprogram multicast list */ 1239 hw->mac.ops.update_mc_addr_list(hw, netdev); 1240 1241 ixgbevf_write_uc_addr_list(netdev); 1242 1243 spin_unlock_bh(&adapter->mbx_lock); 1244 } 1245 1246 static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter) 1247 { 1248 int q_idx; 1249 struct ixgbevf_q_vector *q_vector; 1250 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 1251 1252 for (q_idx = 0; q_idx < q_vectors; q_idx++) { 1253 q_vector = adapter->q_vector[q_idx]; 1254 napi_enable(&q_vector->napi); 1255 } 1256 } 1257 1258 static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter) 1259 { 1260 int q_idx; 1261 struct ixgbevf_q_vector *q_vector; 1262 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 1263 1264 for (q_idx = 0; q_idx < q_vectors; q_idx++) { 1265 q_vector = adapter->q_vector[q_idx]; 1266 napi_disable(&q_vector->napi); 1267 } 1268 } 1269 1270 static void ixgbevf_configure(struct ixgbevf_adapter *adapter) 1271 { 1272 struct net_device *netdev = adapter->netdev; 1273 int i; 1274 1275 ixgbevf_set_rx_mode(netdev); 1276 1277 ixgbevf_restore_vlan(adapter); 1278 1279 ixgbevf_configure_tx(adapter); 1280 ixgbevf_configure_rx(adapter); 1281 for (i = 0; i < adapter->num_rx_queues; i++) { 1282 struct ixgbevf_ring *ring = &adapter->rx_ring[i]; 1283 ixgbevf_alloc_rx_buffers(adapter, ring, 1284 IXGBE_DESC_UNUSED(ring)); 1285 } 1286 } 1287 1288 #define IXGBE_MAX_RX_DESC_POLL 10 1289 static inline void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter, 1290 int rxr) 1291 { 1292 struct ixgbe_hw *hw = &adapter->hw; 1293 int j = adapter->rx_ring[rxr].reg_idx; 1294 int k; 1295 1296 for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) { 1297 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) & IXGBE_RXDCTL_ENABLE) 1298 break; 1299 else 1300 msleep(1); 1301 } 1302 if (k >= IXGBE_MAX_RX_DESC_POLL) { 1303 hw_dbg(hw, "RXDCTL.ENABLE on Rx queue %d " 1304 "not set within the polling period\n", rxr); 1305 } 1306 1307 ixgbevf_release_rx_desc(hw, &adapter->rx_ring[rxr], 1308 adapter->rx_ring[rxr].count - 1); 1309 } 1310 1311 static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter) 1312 { 1313 /* Only save pre-reset stats if there are some */ 1314 if (adapter->stats.vfgprc || adapter->stats.vfgptc) { 1315 adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc - 1316 adapter->stats.base_vfgprc; 1317 adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc - 1318 adapter->stats.base_vfgptc; 1319 adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc - 1320 adapter->stats.base_vfgorc; 1321 adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc - 1322 adapter->stats.base_vfgotc; 1323 adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc - 1324 adapter->stats.base_vfmprc; 1325 } 1326 } 1327 1328 static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter) 1329 { 1330 struct ixgbe_hw *hw = &adapter->hw; 1331 1332 adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC); 1333 adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB); 1334 adapter->stats.last_vfgorc |= 1335 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32); 1336 adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC); 1337 adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB); 1338 adapter->stats.last_vfgotc |= 1339 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32); 1340 adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC); 1341 1342 adapter->stats.base_vfgprc = adapter->stats.last_vfgprc; 1343 adapter->stats.base_vfgorc = adapter->stats.last_vfgorc; 1344 adapter->stats.base_vfgptc = adapter->stats.last_vfgptc; 1345 adapter->stats.base_vfgotc = adapter->stats.last_vfgotc; 1346 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc; 1347 } 1348 1349 static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter) 1350 { 1351 struct ixgbe_hw *hw = &adapter->hw; 1352 int api[] = { ixgbe_mbox_api_11, 1353 ixgbe_mbox_api_10, 1354 ixgbe_mbox_api_unknown }; 1355 int err = 0, idx = 0; 1356 1357 spin_lock_bh(&adapter->mbx_lock); 1358 1359 while (api[idx] != ixgbe_mbox_api_unknown) { 1360 err = ixgbevf_negotiate_api_version(hw, api[idx]); 1361 if (!err) 1362 break; 1363 idx++; 1364 } 1365 1366 spin_unlock_bh(&adapter->mbx_lock); 1367 } 1368 1369 static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter) 1370 { 1371 struct net_device *netdev = adapter->netdev; 1372 struct ixgbe_hw *hw = &adapter->hw; 1373 int i, j = 0; 1374 int num_rx_rings = adapter->num_rx_queues; 1375 u32 txdctl, rxdctl; 1376 1377 for (i = 0; i < adapter->num_tx_queues; i++) { 1378 j = adapter->tx_ring[i].reg_idx; 1379 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j)); 1380 /* enable WTHRESH=8 descriptors, to encourage burst writeback */ 1381 txdctl |= (8 << 16); 1382 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl); 1383 } 1384 1385 for (i = 0; i < adapter->num_tx_queues; i++) { 1386 j = adapter->tx_ring[i].reg_idx; 1387 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j)); 1388 txdctl |= IXGBE_TXDCTL_ENABLE; 1389 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl); 1390 } 1391 1392 for (i = 0; i < num_rx_rings; i++) { 1393 j = adapter->rx_ring[i].reg_idx; 1394 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)); 1395 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME; 1396 if (hw->mac.type == ixgbe_mac_X540_vf) { 1397 rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK; 1398 rxdctl |= ((netdev->mtu + ETH_HLEN + ETH_FCS_LEN) | 1399 IXGBE_RXDCTL_RLPML_EN); 1400 } 1401 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl); 1402 ixgbevf_rx_desc_queue_enable(adapter, i); 1403 } 1404 1405 ixgbevf_configure_msix(adapter); 1406 1407 spin_lock_bh(&adapter->mbx_lock); 1408 1409 if (is_valid_ether_addr(hw->mac.addr)) 1410 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0); 1411 else 1412 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0); 1413 1414 spin_unlock_bh(&adapter->mbx_lock); 1415 1416 clear_bit(__IXGBEVF_DOWN, &adapter->state); 1417 ixgbevf_napi_enable_all(adapter); 1418 1419 /* enable transmits */ 1420 netif_tx_start_all_queues(netdev); 1421 1422 ixgbevf_save_reset_stats(adapter); 1423 ixgbevf_init_last_counter_stats(adapter); 1424 1425 hw->mac.get_link_status = 1; 1426 mod_timer(&adapter->watchdog_timer, jiffies); 1427 } 1428 1429 static int ixgbevf_reset_queues(struct ixgbevf_adapter *adapter) 1430 { 1431 struct ixgbe_hw *hw = &adapter->hw; 1432 struct ixgbevf_ring *rx_ring; 1433 unsigned int def_q = 0; 1434 unsigned int num_tcs = 0; 1435 unsigned int num_rx_queues = 1; 1436 int err, i; 1437 1438 spin_lock_bh(&adapter->mbx_lock); 1439 1440 /* fetch queue configuration from the PF */ 1441 err = ixgbevf_get_queues(hw, &num_tcs, &def_q); 1442 1443 spin_unlock_bh(&adapter->mbx_lock); 1444 1445 if (err) 1446 return err; 1447 1448 if (num_tcs > 1) { 1449 /* update default Tx ring register index */ 1450 adapter->tx_ring[0].reg_idx = def_q; 1451 1452 /* we need as many queues as traffic classes */ 1453 num_rx_queues = num_tcs; 1454 } 1455 1456 /* nothing to do if we have the correct number of queues */ 1457 if (adapter->num_rx_queues == num_rx_queues) 1458 return 0; 1459 1460 /* allocate new rings */ 1461 rx_ring = kcalloc(num_rx_queues, 1462 sizeof(struct ixgbevf_ring), GFP_KERNEL); 1463 if (!rx_ring) 1464 return -ENOMEM; 1465 1466 /* setup ring fields */ 1467 for (i = 0; i < num_rx_queues; i++) { 1468 rx_ring[i].count = adapter->rx_ring_count; 1469 rx_ring[i].queue_index = i; 1470 rx_ring[i].reg_idx = i; 1471 rx_ring[i].dev = &adapter->pdev->dev; 1472 rx_ring[i].netdev = adapter->netdev; 1473 1474 /* allocate resources on the ring */ 1475 err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]); 1476 if (err) { 1477 while (i) { 1478 i--; 1479 ixgbevf_free_rx_resources(adapter, &rx_ring[i]); 1480 } 1481 kfree(rx_ring); 1482 return err; 1483 } 1484 } 1485 1486 /* free the existing rings and queues */ 1487 ixgbevf_free_all_rx_resources(adapter); 1488 adapter->num_rx_queues = 0; 1489 kfree(adapter->rx_ring); 1490 1491 /* move new rings into position on the adapter struct */ 1492 adapter->rx_ring = rx_ring; 1493 adapter->num_rx_queues = num_rx_queues; 1494 1495 /* reset ring to vector mapping */ 1496 ixgbevf_reset_q_vectors(adapter); 1497 ixgbevf_map_rings_to_vectors(adapter); 1498 1499 return 0; 1500 } 1501 1502 void ixgbevf_up(struct ixgbevf_adapter *adapter) 1503 { 1504 struct ixgbe_hw *hw = &adapter->hw; 1505 1506 ixgbevf_negotiate_api(adapter); 1507 1508 ixgbevf_reset_queues(adapter); 1509 1510 ixgbevf_configure(adapter); 1511 1512 ixgbevf_up_complete(adapter); 1513 1514 /* clear any pending interrupts, may auto mask */ 1515 IXGBE_READ_REG(hw, IXGBE_VTEICR); 1516 1517 ixgbevf_irq_enable(adapter); 1518 } 1519 1520 /** 1521 * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue 1522 * @adapter: board private structure 1523 * @rx_ring: ring to free buffers from 1524 **/ 1525 static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter, 1526 struct ixgbevf_ring *rx_ring) 1527 { 1528 struct pci_dev *pdev = adapter->pdev; 1529 unsigned long size; 1530 unsigned int i; 1531 1532 if (!rx_ring->rx_buffer_info) 1533 return; 1534 1535 /* Free all the Rx ring sk_buffs */ 1536 for (i = 0; i < rx_ring->count; i++) { 1537 struct ixgbevf_rx_buffer *rx_buffer_info; 1538 1539 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 1540 if (rx_buffer_info->dma) { 1541 dma_unmap_single(&pdev->dev, rx_buffer_info->dma, 1542 rx_ring->rx_buf_len, 1543 DMA_FROM_DEVICE); 1544 rx_buffer_info->dma = 0; 1545 } 1546 if (rx_buffer_info->skb) { 1547 struct sk_buff *skb = rx_buffer_info->skb; 1548 rx_buffer_info->skb = NULL; 1549 do { 1550 struct sk_buff *this = skb; 1551 skb = IXGBE_CB(skb)->prev; 1552 dev_kfree_skb(this); 1553 } while (skb); 1554 } 1555 } 1556 1557 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count; 1558 memset(rx_ring->rx_buffer_info, 0, size); 1559 1560 /* Zero out the descriptor ring */ 1561 memset(rx_ring->desc, 0, rx_ring->size); 1562 1563 rx_ring->next_to_clean = 0; 1564 rx_ring->next_to_use = 0; 1565 1566 if (rx_ring->head) 1567 writel(0, adapter->hw.hw_addr + rx_ring->head); 1568 if (rx_ring->tail) 1569 writel(0, adapter->hw.hw_addr + rx_ring->tail); 1570 } 1571 1572 /** 1573 * ixgbevf_clean_tx_ring - Free Tx Buffers 1574 * @adapter: board private structure 1575 * @tx_ring: ring to be cleaned 1576 **/ 1577 static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter *adapter, 1578 struct ixgbevf_ring *tx_ring) 1579 { 1580 struct ixgbevf_tx_buffer *tx_buffer_info; 1581 unsigned long size; 1582 unsigned int i; 1583 1584 if (!tx_ring->tx_buffer_info) 1585 return; 1586 1587 /* Free all the Tx ring sk_buffs */ 1588 for (i = 0; i < tx_ring->count; i++) { 1589 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 1590 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); 1591 } 1592 1593 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count; 1594 memset(tx_ring->tx_buffer_info, 0, size); 1595 1596 memset(tx_ring->desc, 0, tx_ring->size); 1597 1598 tx_ring->next_to_use = 0; 1599 tx_ring->next_to_clean = 0; 1600 1601 if (tx_ring->head) 1602 writel(0, adapter->hw.hw_addr + tx_ring->head); 1603 if (tx_ring->tail) 1604 writel(0, adapter->hw.hw_addr + tx_ring->tail); 1605 } 1606 1607 /** 1608 * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues 1609 * @adapter: board private structure 1610 **/ 1611 static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter) 1612 { 1613 int i; 1614 1615 for (i = 0; i < adapter->num_rx_queues; i++) 1616 ixgbevf_clean_rx_ring(adapter, &adapter->rx_ring[i]); 1617 } 1618 1619 /** 1620 * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues 1621 * @adapter: board private structure 1622 **/ 1623 static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter) 1624 { 1625 int i; 1626 1627 for (i = 0; i < adapter->num_tx_queues; i++) 1628 ixgbevf_clean_tx_ring(adapter, &adapter->tx_ring[i]); 1629 } 1630 1631 void ixgbevf_down(struct ixgbevf_adapter *adapter) 1632 { 1633 struct net_device *netdev = adapter->netdev; 1634 struct ixgbe_hw *hw = &adapter->hw; 1635 u32 txdctl; 1636 int i, j; 1637 1638 /* signal that we are down to the interrupt handler */ 1639 set_bit(__IXGBEVF_DOWN, &adapter->state); 1640 /* disable receives */ 1641 1642 netif_tx_disable(netdev); 1643 1644 msleep(10); 1645 1646 netif_tx_stop_all_queues(netdev); 1647 1648 ixgbevf_irq_disable(adapter); 1649 1650 ixgbevf_napi_disable_all(adapter); 1651 1652 del_timer_sync(&adapter->watchdog_timer); 1653 /* can't call flush scheduled work here because it can deadlock 1654 * if linkwatch_event tries to acquire the rtnl_lock which we are 1655 * holding */ 1656 while (adapter->flags & IXGBE_FLAG_IN_WATCHDOG_TASK) 1657 msleep(1); 1658 1659 /* disable transmits in the hardware now that interrupts are off */ 1660 for (i = 0; i < adapter->num_tx_queues; i++) { 1661 j = adapter->tx_ring[i].reg_idx; 1662 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j)); 1663 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), 1664 (txdctl & ~IXGBE_TXDCTL_ENABLE)); 1665 } 1666 1667 netif_carrier_off(netdev); 1668 1669 if (!pci_channel_offline(adapter->pdev)) 1670 ixgbevf_reset(adapter); 1671 1672 ixgbevf_clean_all_tx_rings(adapter); 1673 ixgbevf_clean_all_rx_rings(adapter); 1674 } 1675 1676 void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter) 1677 { 1678 WARN_ON(in_interrupt()); 1679 1680 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state)) 1681 msleep(1); 1682 1683 ixgbevf_down(adapter); 1684 ixgbevf_up(adapter); 1685 1686 clear_bit(__IXGBEVF_RESETTING, &adapter->state); 1687 } 1688 1689 void ixgbevf_reset(struct ixgbevf_adapter *adapter) 1690 { 1691 struct ixgbe_hw *hw = &adapter->hw; 1692 struct net_device *netdev = adapter->netdev; 1693 1694 if (hw->mac.ops.reset_hw(hw)) 1695 hw_dbg(hw, "PF still resetting\n"); 1696 else 1697 hw->mac.ops.init_hw(hw); 1698 1699 if (is_valid_ether_addr(adapter->hw.mac.addr)) { 1700 memcpy(netdev->dev_addr, adapter->hw.mac.addr, 1701 netdev->addr_len); 1702 memcpy(netdev->perm_addr, adapter->hw.mac.addr, 1703 netdev->addr_len); 1704 } 1705 } 1706 1707 static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter, 1708 int vectors) 1709 { 1710 int err = 0; 1711 int vector_threshold; 1712 1713 /* We'll want at least 2 (vector_threshold): 1714 * 1) TxQ[0] + RxQ[0] handler 1715 * 2) Other (Link Status Change, etc.) 1716 */ 1717 vector_threshold = MIN_MSIX_COUNT; 1718 1719 /* The more we get, the more we will assign to Tx/Rx Cleanup 1720 * for the separate queues...where Rx Cleanup >= Tx Cleanup. 1721 * Right now, we simply care about how many we'll get; we'll 1722 * set them up later while requesting irq's. 1723 */ 1724 while (vectors >= vector_threshold) { 1725 err = pci_enable_msix(adapter->pdev, adapter->msix_entries, 1726 vectors); 1727 if (!err || err < 0) /* Success or a nasty failure. */ 1728 break; 1729 else /* err == number of vectors we should try again with */ 1730 vectors = err; 1731 } 1732 1733 if (vectors < vector_threshold) 1734 err = -ENOMEM; 1735 1736 if (err) { 1737 dev_err(&adapter->pdev->dev, 1738 "Unable to allocate MSI-X interrupts\n"); 1739 kfree(adapter->msix_entries); 1740 adapter->msix_entries = NULL; 1741 } else { 1742 /* 1743 * Adjust for only the vectors we'll use, which is minimum 1744 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of 1745 * vectors we were allocated. 1746 */ 1747 adapter->num_msix_vectors = vectors; 1748 } 1749 1750 return err; 1751 } 1752 1753 /** 1754 * ixgbevf_set_num_queues - Allocate queues for device, feature dependent 1755 * @adapter: board private structure to initialize 1756 * 1757 * This is the top level queue allocation routine. The order here is very 1758 * important, starting with the "most" number of features turned on at once, 1759 * and ending with the smallest set of features. This way large combinations 1760 * can be allocated if they're turned on, and smaller combinations are the 1761 * fallthrough conditions. 1762 * 1763 **/ 1764 static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter) 1765 { 1766 /* Start with base case */ 1767 adapter->num_rx_queues = 1; 1768 adapter->num_tx_queues = 1; 1769 } 1770 1771 /** 1772 * ixgbevf_alloc_queues - Allocate memory for all rings 1773 * @adapter: board private structure to initialize 1774 * 1775 * We allocate one ring per queue at run-time since we don't know the 1776 * number of queues at compile-time. The polling_netdev array is 1777 * intended for Multiqueue, but should work fine with a single queue. 1778 **/ 1779 static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter) 1780 { 1781 int i; 1782 1783 adapter->tx_ring = kcalloc(adapter->num_tx_queues, 1784 sizeof(struct ixgbevf_ring), GFP_KERNEL); 1785 if (!adapter->tx_ring) 1786 goto err_tx_ring_allocation; 1787 1788 adapter->rx_ring = kcalloc(adapter->num_rx_queues, 1789 sizeof(struct ixgbevf_ring), GFP_KERNEL); 1790 if (!adapter->rx_ring) 1791 goto err_rx_ring_allocation; 1792 1793 for (i = 0; i < adapter->num_tx_queues; i++) { 1794 adapter->tx_ring[i].count = adapter->tx_ring_count; 1795 adapter->tx_ring[i].queue_index = i; 1796 /* reg_idx may be remapped later by DCB config */ 1797 adapter->tx_ring[i].reg_idx = i; 1798 adapter->tx_ring[i].dev = &adapter->pdev->dev; 1799 adapter->tx_ring[i].netdev = adapter->netdev; 1800 } 1801 1802 for (i = 0; i < adapter->num_rx_queues; i++) { 1803 adapter->rx_ring[i].count = adapter->rx_ring_count; 1804 adapter->rx_ring[i].queue_index = i; 1805 adapter->rx_ring[i].reg_idx = i; 1806 adapter->rx_ring[i].dev = &adapter->pdev->dev; 1807 adapter->rx_ring[i].netdev = adapter->netdev; 1808 } 1809 1810 return 0; 1811 1812 err_rx_ring_allocation: 1813 kfree(adapter->tx_ring); 1814 err_tx_ring_allocation: 1815 return -ENOMEM; 1816 } 1817 1818 /** 1819 * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported 1820 * @adapter: board private structure to initialize 1821 * 1822 * Attempt to configure the interrupts using the best available 1823 * capabilities of the hardware and the kernel. 1824 **/ 1825 static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter) 1826 { 1827 struct net_device *netdev = adapter->netdev; 1828 int err = 0; 1829 int vector, v_budget; 1830 1831 /* 1832 * It's easy to be greedy for MSI-X vectors, but it really 1833 * doesn't do us much good if we have a lot more vectors 1834 * than CPU's. So let's be conservative and only ask for 1835 * (roughly) the same number of vectors as there are CPU's. 1836 * The default is to use pairs of vectors. 1837 */ 1838 v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues); 1839 v_budget = min_t(int, v_budget, num_online_cpus()); 1840 v_budget += NON_Q_VECTORS; 1841 1842 /* A failure in MSI-X entry allocation isn't fatal, but it does 1843 * mean we disable MSI-X capabilities of the adapter. */ 1844 adapter->msix_entries = kcalloc(v_budget, 1845 sizeof(struct msix_entry), GFP_KERNEL); 1846 if (!adapter->msix_entries) { 1847 err = -ENOMEM; 1848 goto out; 1849 } 1850 1851 for (vector = 0; vector < v_budget; vector++) 1852 adapter->msix_entries[vector].entry = vector; 1853 1854 err = ixgbevf_acquire_msix_vectors(adapter, v_budget); 1855 if (err) 1856 goto out; 1857 1858 err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues); 1859 if (err) 1860 goto out; 1861 1862 err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues); 1863 1864 out: 1865 return err; 1866 } 1867 1868 /** 1869 * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors 1870 * @adapter: board private structure to initialize 1871 * 1872 * We allocate one q_vector per queue interrupt. If allocation fails we 1873 * return -ENOMEM. 1874 **/ 1875 static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter) 1876 { 1877 int q_idx, num_q_vectors; 1878 struct ixgbevf_q_vector *q_vector; 1879 1880 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 1881 1882 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { 1883 q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL); 1884 if (!q_vector) 1885 goto err_out; 1886 q_vector->adapter = adapter; 1887 q_vector->v_idx = q_idx; 1888 netif_napi_add(adapter->netdev, &q_vector->napi, 1889 ixgbevf_poll, 64); 1890 adapter->q_vector[q_idx] = q_vector; 1891 } 1892 1893 return 0; 1894 1895 err_out: 1896 while (q_idx) { 1897 q_idx--; 1898 q_vector = adapter->q_vector[q_idx]; 1899 netif_napi_del(&q_vector->napi); 1900 kfree(q_vector); 1901 adapter->q_vector[q_idx] = NULL; 1902 } 1903 return -ENOMEM; 1904 } 1905 1906 /** 1907 * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors 1908 * @adapter: board private structure to initialize 1909 * 1910 * This function frees the memory allocated to the q_vectors. In addition if 1911 * NAPI is enabled it will delete any references to the NAPI struct prior 1912 * to freeing the q_vector. 1913 **/ 1914 static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter) 1915 { 1916 int q_idx, num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 1917 1918 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { 1919 struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx]; 1920 1921 adapter->q_vector[q_idx] = NULL; 1922 netif_napi_del(&q_vector->napi); 1923 kfree(q_vector); 1924 } 1925 } 1926 1927 /** 1928 * ixgbevf_reset_interrupt_capability - Reset MSIX setup 1929 * @adapter: board private structure 1930 * 1931 **/ 1932 static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter) 1933 { 1934 pci_disable_msix(adapter->pdev); 1935 kfree(adapter->msix_entries); 1936 adapter->msix_entries = NULL; 1937 } 1938 1939 /** 1940 * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init 1941 * @adapter: board private structure to initialize 1942 * 1943 **/ 1944 static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter) 1945 { 1946 int err; 1947 1948 /* Number of supported queues */ 1949 ixgbevf_set_num_queues(adapter); 1950 1951 err = ixgbevf_set_interrupt_capability(adapter); 1952 if (err) { 1953 hw_dbg(&adapter->hw, 1954 "Unable to setup interrupt capabilities\n"); 1955 goto err_set_interrupt; 1956 } 1957 1958 err = ixgbevf_alloc_q_vectors(adapter); 1959 if (err) { 1960 hw_dbg(&adapter->hw, "Unable to allocate memory for queue " 1961 "vectors\n"); 1962 goto err_alloc_q_vectors; 1963 } 1964 1965 err = ixgbevf_alloc_queues(adapter); 1966 if (err) { 1967 pr_err("Unable to allocate memory for queues\n"); 1968 goto err_alloc_queues; 1969 } 1970 1971 hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, " 1972 "Tx Queue count = %u\n", 1973 (adapter->num_rx_queues > 1) ? "Enabled" : 1974 "Disabled", adapter->num_rx_queues, adapter->num_tx_queues); 1975 1976 set_bit(__IXGBEVF_DOWN, &adapter->state); 1977 1978 return 0; 1979 err_alloc_queues: 1980 ixgbevf_free_q_vectors(adapter); 1981 err_alloc_q_vectors: 1982 ixgbevf_reset_interrupt_capability(adapter); 1983 err_set_interrupt: 1984 return err; 1985 } 1986 1987 /** 1988 * ixgbevf_clear_interrupt_scheme - Clear the current interrupt scheme settings 1989 * @adapter: board private structure to clear interrupt scheme on 1990 * 1991 * We go through and clear interrupt specific resources and reset the structure 1992 * to pre-load conditions 1993 **/ 1994 static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter) 1995 { 1996 adapter->num_tx_queues = 0; 1997 adapter->num_rx_queues = 0; 1998 1999 ixgbevf_free_q_vectors(adapter); 2000 ixgbevf_reset_interrupt_capability(adapter); 2001 } 2002 2003 /** 2004 * ixgbevf_sw_init - Initialize general software structures 2005 * (struct ixgbevf_adapter) 2006 * @adapter: board private structure to initialize 2007 * 2008 * ixgbevf_sw_init initializes the Adapter private data structure. 2009 * Fields are initialized based on PCI device information and 2010 * OS network device settings (MTU size). 2011 **/ 2012 static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter) 2013 { 2014 struct ixgbe_hw *hw = &adapter->hw; 2015 struct pci_dev *pdev = adapter->pdev; 2016 int err; 2017 2018 /* PCI config space info */ 2019 2020 hw->vendor_id = pdev->vendor; 2021 hw->device_id = pdev->device; 2022 hw->revision_id = pdev->revision; 2023 hw->subsystem_vendor_id = pdev->subsystem_vendor; 2024 hw->subsystem_device_id = pdev->subsystem_device; 2025 2026 hw->mbx.ops.init_params(hw); 2027 2028 /* assume legacy case in which PF would only give VF 2 queues */ 2029 hw->mac.max_tx_queues = 2; 2030 hw->mac.max_rx_queues = 2; 2031 2032 err = hw->mac.ops.reset_hw(hw); 2033 if (err) { 2034 dev_info(&pdev->dev, 2035 "PF still in reset state, assigning new address\n"); 2036 eth_hw_addr_random(adapter->netdev); 2037 memcpy(adapter->hw.mac.addr, adapter->netdev->dev_addr, 2038 adapter->netdev->addr_len); 2039 } else { 2040 err = hw->mac.ops.init_hw(hw); 2041 if (err) { 2042 pr_err("init_shared_code failed: %d\n", err); 2043 goto out; 2044 } 2045 memcpy(adapter->netdev->dev_addr, adapter->hw.mac.addr, 2046 adapter->netdev->addr_len); 2047 } 2048 2049 /* lock to protect mailbox accesses */ 2050 spin_lock_init(&adapter->mbx_lock); 2051 2052 /* Enable dynamic interrupt throttling rates */ 2053 adapter->rx_itr_setting = 1; 2054 adapter->tx_itr_setting = 1; 2055 2056 /* set default ring sizes */ 2057 adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD; 2058 adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD; 2059 2060 set_bit(__IXGBEVF_DOWN, &adapter->state); 2061 return 0; 2062 2063 out: 2064 return err; 2065 } 2066 2067 #define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \ 2068 { \ 2069 u32 current_counter = IXGBE_READ_REG(hw, reg); \ 2070 if (current_counter < last_counter) \ 2071 counter += 0x100000000LL; \ 2072 last_counter = current_counter; \ 2073 counter &= 0xFFFFFFFF00000000LL; \ 2074 counter |= current_counter; \ 2075 } 2076 2077 #define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \ 2078 { \ 2079 u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \ 2080 u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \ 2081 u64 current_counter = (current_counter_msb << 32) | \ 2082 current_counter_lsb; \ 2083 if (current_counter < last_counter) \ 2084 counter += 0x1000000000LL; \ 2085 last_counter = current_counter; \ 2086 counter &= 0xFFFFFFF000000000LL; \ 2087 counter |= current_counter; \ 2088 } 2089 /** 2090 * ixgbevf_update_stats - Update the board statistics counters. 2091 * @adapter: board private structure 2092 **/ 2093 void ixgbevf_update_stats(struct ixgbevf_adapter *adapter) 2094 { 2095 struct ixgbe_hw *hw = &adapter->hw; 2096 int i; 2097 2098 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc, 2099 adapter->stats.vfgprc); 2100 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc, 2101 adapter->stats.vfgptc); 2102 UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, 2103 adapter->stats.last_vfgorc, 2104 adapter->stats.vfgorc); 2105 UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, 2106 adapter->stats.last_vfgotc, 2107 adapter->stats.vfgotc); 2108 UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc, 2109 adapter->stats.vfmprc); 2110 2111 for (i = 0; i < adapter->num_rx_queues; i++) { 2112 adapter->hw_csum_rx_error += 2113 adapter->rx_ring[i].hw_csum_rx_error; 2114 adapter->hw_csum_rx_good += 2115 adapter->rx_ring[i].hw_csum_rx_good; 2116 adapter->rx_ring[i].hw_csum_rx_error = 0; 2117 adapter->rx_ring[i].hw_csum_rx_good = 0; 2118 } 2119 } 2120 2121 /** 2122 * ixgbevf_watchdog - Timer Call-back 2123 * @data: pointer to adapter cast into an unsigned long 2124 **/ 2125 static void ixgbevf_watchdog(unsigned long data) 2126 { 2127 struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data; 2128 struct ixgbe_hw *hw = &adapter->hw; 2129 u32 eics = 0; 2130 int i; 2131 2132 /* 2133 * Do the watchdog outside of interrupt context due to the lovely 2134 * delays that some of the newer hardware requires 2135 */ 2136 2137 if (test_bit(__IXGBEVF_DOWN, &adapter->state)) 2138 goto watchdog_short_circuit; 2139 2140 /* get one bit for every active tx/rx interrupt vector */ 2141 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) { 2142 struct ixgbevf_q_vector *qv = adapter->q_vector[i]; 2143 if (qv->rx.ring || qv->tx.ring) 2144 eics |= 1 << i; 2145 } 2146 2147 IXGBE_WRITE_REG(hw, IXGBE_VTEICS, eics); 2148 2149 watchdog_short_circuit: 2150 schedule_work(&adapter->watchdog_task); 2151 } 2152 2153 /** 2154 * ixgbevf_tx_timeout - Respond to a Tx Hang 2155 * @netdev: network interface device structure 2156 **/ 2157 static void ixgbevf_tx_timeout(struct net_device *netdev) 2158 { 2159 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 2160 2161 /* Do the reset outside of interrupt context */ 2162 schedule_work(&adapter->reset_task); 2163 } 2164 2165 static void ixgbevf_reset_task(struct work_struct *work) 2166 { 2167 struct ixgbevf_adapter *adapter; 2168 adapter = container_of(work, struct ixgbevf_adapter, reset_task); 2169 2170 /* If we're already down or resetting, just bail */ 2171 if (test_bit(__IXGBEVF_DOWN, &adapter->state) || 2172 test_bit(__IXGBEVF_RESETTING, &adapter->state)) 2173 return; 2174 2175 adapter->tx_timeout_count++; 2176 2177 ixgbevf_reinit_locked(adapter); 2178 } 2179 2180 /** 2181 * ixgbevf_watchdog_task - worker thread to bring link up 2182 * @work: pointer to work_struct containing our data 2183 **/ 2184 static void ixgbevf_watchdog_task(struct work_struct *work) 2185 { 2186 struct ixgbevf_adapter *adapter = container_of(work, 2187 struct ixgbevf_adapter, 2188 watchdog_task); 2189 struct net_device *netdev = adapter->netdev; 2190 struct ixgbe_hw *hw = &adapter->hw; 2191 u32 link_speed = adapter->link_speed; 2192 bool link_up = adapter->link_up; 2193 s32 need_reset; 2194 2195 adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK; 2196 2197 /* 2198 * Always check the link on the watchdog because we have 2199 * no LSC interrupt 2200 */ 2201 spin_lock_bh(&adapter->mbx_lock); 2202 2203 need_reset = hw->mac.ops.check_link(hw, &link_speed, &link_up, false); 2204 2205 spin_unlock_bh(&adapter->mbx_lock); 2206 2207 if (need_reset) { 2208 adapter->link_up = link_up; 2209 adapter->link_speed = link_speed; 2210 netif_carrier_off(netdev); 2211 netif_tx_stop_all_queues(netdev); 2212 schedule_work(&adapter->reset_task); 2213 goto pf_has_reset; 2214 } 2215 adapter->link_up = link_up; 2216 adapter->link_speed = link_speed; 2217 2218 if (link_up) { 2219 if (!netif_carrier_ok(netdev)) { 2220 hw_dbg(&adapter->hw, "NIC Link is Up, %u Gbps\n", 2221 (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ? 2222 10 : 1); 2223 netif_carrier_on(netdev); 2224 netif_tx_wake_all_queues(netdev); 2225 } 2226 } else { 2227 adapter->link_up = false; 2228 adapter->link_speed = 0; 2229 if (netif_carrier_ok(netdev)) { 2230 hw_dbg(&adapter->hw, "NIC Link is Down\n"); 2231 netif_carrier_off(netdev); 2232 netif_tx_stop_all_queues(netdev); 2233 } 2234 } 2235 2236 ixgbevf_update_stats(adapter); 2237 2238 pf_has_reset: 2239 /* Reset the timer */ 2240 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) 2241 mod_timer(&adapter->watchdog_timer, 2242 round_jiffies(jiffies + (2 * HZ))); 2243 2244 adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK; 2245 } 2246 2247 /** 2248 * ixgbevf_free_tx_resources - Free Tx Resources per Queue 2249 * @adapter: board private structure 2250 * @tx_ring: Tx descriptor ring for a specific queue 2251 * 2252 * Free all transmit software resources 2253 **/ 2254 void ixgbevf_free_tx_resources(struct ixgbevf_adapter *adapter, 2255 struct ixgbevf_ring *tx_ring) 2256 { 2257 struct pci_dev *pdev = adapter->pdev; 2258 2259 ixgbevf_clean_tx_ring(adapter, tx_ring); 2260 2261 vfree(tx_ring->tx_buffer_info); 2262 tx_ring->tx_buffer_info = NULL; 2263 2264 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, 2265 tx_ring->dma); 2266 2267 tx_ring->desc = NULL; 2268 } 2269 2270 /** 2271 * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues 2272 * @adapter: board private structure 2273 * 2274 * Free all transmit software resources 2275 **/ 2276 static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter) 2277 { 2278 int i; 2279 2280 for (i = 0; i < adapter->num_tx_queues; i++) 2281 if (adapter->tx_ring[i].desc) 2282 ixgbevf_free_tx_resources(adapter, 2283 &adapter->tx_ring[i]); 2284 2285 } 2286 2287 /** 2288 * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors) 2289 * @adapter: board private structure 2290 * @tx_ring: tx descriptor ring (for a specific queue) to setup 2291 * 2292 * Return 0 on success, negative on failure 2293 **/ 2294 int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter, 2295 struct ixgbevf_ring *tx_ring) 2296 { 2297 struct pci_dev *pdev = adapter->pdev; 2298 int size; 2299 2300 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count; 2301 tx_ring->tx_buffer_info = vzalloc(size); 2302 if (!tx_ring->tx_buffer_info) 2303 goto err; 2304 2305 /* round up to nearest 4K */ 2306 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); 2307 tx_ring->size = ALIGN(tx_ring->size, 4096); 2308 2309 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size, 2310 &tx_ring->dma, GFP_KERNEL); 2311 if (!tx_ring->desc) 2312 goto err; 2313 2314 tx_ring->next_to_use = 0; 2315 tx_ring->next_to_clean = 0; 2316 return 0; 2317 2318 err: 2319 vfree(tx_ring->tx_buffer_info); 2320 tx_ring->tx_buffer_info = NULL; 2321 hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit " 2322 "descriptor ring\n"); 2323 return -ENOMEM; 2324 } 2325 2326 /** 2327 * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources 2328 * @adapter: board private structure 2329 * 2330 * If this function returns with an error, then it's possible one or 2331 * more of the rings is populated (while the rest are not). It is the 2332 * callers duty to clean those orphaned rings. 2333 * 2334 * Return 0 on success, negative on failure 2335 **/ 2336 static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter) 2337 { 2338 int i, err = 0; 2339 2340 for (i = 0; i < adapter->num_tx_queues; i++) { 2341 err = ixgbevf_setup_tx_resources(adapter, &adapter->tx_ring[i]); 2342 if (!err) 2343 continue; 2344 hw_dbg(&adapter->hw, 2345 "Allocation for Tx Queue %u failed\n", i); 2346 break; 2347 } 2348 2349 return err; 2350 } 2351 2352 /** 2353 * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors) 2354 * @adapter: board private structure 2355 * @rx_ring: rx descriptor ring (for a specific queue) to setup 2356 * 2357 * Returns 0 on success, negative on failure 2358 **/ 2359 int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter, 2360 struct ixgbevf_ring *rx_ring) 2361 { 2362 struct pci_dev *pdev = adapter->pdev; 2363 int size; 2364 2365 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count; 2366 rx_ring->rx_buffer_info = vzalloc(size); 2367 if (!rx_ring->rx_buffer_info) 2368 goto alloc_failed; 2369 2370 /* Round up to nearest 4K */ 2371 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); 2372 rx_ring->size = ALIGN(rx_ring->size, 4096); 2373 2374 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, 2375 &rx_ring->dma, GFP_KERNEL); 2376 2377 if (!rx_ring->desc) { 2378 hw_dbg(&adapter->hw, 2379 "Unable to allocate memory for " 2380 "the receive descriptor ring\n"); 2381 vfree(rx_ring->rx_buffer_info); 2382 rx_ring->rx_buffer_info = NULL; 2383 goto alloc_failed; 2384 } 2385 2386 rx_ring->next_to_clean = 0; 2387 rx_ring->next_to_use = 0; 2388 2389 return 0; 2390 alloc_failed: 2391 return -ENOMEM; 2392 } 2393 2394 /** 2395 * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources 2396 * @adapter: board private structure 2397 * 2398 * If this function returns with an error, then it's possible one or 2399 * more of the rings is populated (while the rest are not). It is the 2400 * callers duty to clean those orphaned rings. 2401 * 2402 * Return 0 on success, negative on failure 2403 **/ 2404 static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter) 2405 { 2406 int i, err = 0; 2407 2408 for (i = 0; i < adapter->num_rx_queues; i++) { 2409 err = ixgbevf_setup_rx_resources(adapter, &adapter->rx_ring[i]); 2410 if (!err) 2411 continue; 2412 hw_dbg(&adapter->hw, 2413 "Allocation for Rx Queue %u failed\n", i); 2414 break; 2415 } 2416 return err; 2417 } 2418 2419 /** 2420 * ixgbevf_free_rx_resources - Free Rx Resources 2421 * @adapter: board private structure 2422 * @rx_ring: ring to clean the resources from 2423 * 2424 * Free all receive software resources 2425 **/ 2426 void ixgbevf_free_rx_resources(struct ixgbevf_adapter *adapter, 2427 struct ixgbevf_ring *rx_ring) 2428 { 2429 struct pci_dev *pdev = adapter->pdev; 2430 2431 ixgbevf_clean_rx_ring(adapter, rx_ring); 2432 2433 vfree(rx_ring->rx_buffer_info); 2434 rx_ring->rx_buffer_info = NULL; 2435 2436 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, 2437 rx_ring->dma); 2438 2439 rx_ring->desc = NULL; 2440 } 2441 2442 /** 2443 * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues 2444 * @adapter: board private structure 2445 * 2446 * Free all receive software resources 2447 **/ 2448 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter) 2449 { 2450 int i; 2451 2452 for (i = 0; i < adapter->num_rx_queues; i++) 2453 if (adapter->rx_ring[i].desc) 2454 ixgbevf_free_rx_resources(adapter, 2455 &adapter->rx_ring[i]); 2456 } 2457 2458 static int ixgbevf_setup_queues(struct ixgbevf_adapter *adapter) 2459 { 2460 struct ixgbe_hw *hw = &adapter->hw; 2461 struct ixgbevf_ring *rx_ring; 2462 unsigned int def_q = 0; 2463 unsigned int num_tcs = 0; 2464 unsigned int num_rx_queues = 1; 2465 int err, i; 2466 2467 spin_lock_bh(&adapter->mbx_lock); 2468 2469 /* fetch queue configuration from the PF */ 2470 err = ixgbevf_get_queues(hw, &num_tcs, &def_q); 2471 2472 spin_unlock_bh(&adapter->mbx_lock); 2473 2474 if (err) 2475 return err; 2476 2477 if (num_tcs > 1) { 2478 /* update default Tx ring register index */ 2479 adapter->tx_ring[0].reg_idx = def_q; 2480 2481 /* we need as many queues as traffic classes */ 2482 num_rx_queues = num_tcs; 2483 } 2484 2485 /* nothing to do if we have the correct number of queues */ 2486 if (adapter->num_rx_queues == num_rx_queues) 2487 return 0; 2488 2489 /* allocate new rings */ 2490 rx_ring = kcalloc(num_rx_queues, 2491 sizeof(struct ixgbevf_ring), GFP_KERNEL); 2492 if (!rx_ring) 2493 return -ENOMEM; 2494 2495 /* setup ring fields */ 2496 for (i = 0; i < num_rx_queues; i++) { 2497 rx_ring[i].count = adapter->rx_ring_count; 2498 rx_ring[i].queue_index = i; 2499 rx_ring[i].reg_idx = i; 2500 rx_ring[i].dev = &adapter->pdev->dev; 2501 rx_ring[i].netdev = adapter->netdev; 2502 } 2503 2504 /* free the existing ring and queues */ 2505 adapter->num_rx_queues = 0; 2506 kfree(adapter->rx_ring); 2507 2508 /* move new rings into position on the adapter struct */ 2509 adapter->rx_ring = rx_ring; 2510 adapter->num_rx_queues = num_rx_queues; 2511 2512 return 0; 2513 } 2514 2515 /** 2516 * ixgbevf_open - Called when a network interface is made active 2517 * @netdev: network interface device structure 2518 * 2519 * Returns 0 on success, negative value on failure 2520 * 2521 * The open entry point is called when a network interface is made 2522 * active by the system (IFF_UP). At this point all resources needed 2523 * for transmit and receive operations are allocated, the interrupt 2524 * handler is registered with the OS, the watchdog timer is started, 2525 * and the stack is notified that the interface is ready. 2526 **/ 2527 static int ixgbevf_open(struct net_device *netdev) 2528 { 2529 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 2530 struct ixgbe_hw *hw = &adapter->hw; 2531 int err; 2532 2533 /* disallow open during test */ 2534 if (test_bit(__IXGBEVF_TESTING, &adapter->state)) 2535 return -EBUSY; 2536 2537 if (hw->adapter_stopped) { 2538 ixgbevf_reset(adapter); 2539 /* if adapter is still stopped then PF isn't up and 2540 * the vf can't start. */ 2541 if (hw->adapter_stopped) { 2542 err = IXGBE_ERR_MBX; 2543 pr_err("Unable to start - perhaps the PF Driver isn't " 2544 "up yet\n"); 2545 goto err_setup_reset; 2546 } 2547 } 2548 2549 ixgbevf_negotiate_api(adapter); 2550 2551 /* setup queue reg_idx and Rx queue count */ 2552 err = ixgbevf_setup_queues(adapter); 2553 if (err) 2554 goto err_setup_queues; 2555 2556 /* allocate transmit descriptors */ 2557 err = ixgbevf_setup_all_tx_resources(adapter); 2558 if (err) 2559 goto err_setup_tx; 2560 2561 /* allocate receive descriptors */ 2562 err = ixgbevf_setup_all_rx_resources(adapter); 2563 if (err) 2564 goto err_setup_rx; 2565 2566 ixgbevf_configure(adapter); 2567 2568 /* 2569 * Map the Tx/Rx rings to the vectors we were allotted. 2570 * if request_irq will be called in this function map_rings 2571 * must be called *before* up_complete 2572 */ 2573 ixgbevf_map_rings_to_vectors(adapter); 2574 2575 ixgbevf_up_complete(adapter); 2576 2577 /* clear any pending interrupts, may auto mask */ 2578 IXGBE_READ_REG(hw, IXGBE_VTEICR); 2579 err = ixgbevf_request_irq(adapter); 2580 if (err) 2581 goto err_req_irq; 2582 2583 ixgbevf_irq_enable(adapter); 2584 2585 return 0; 2586 2587 err_req_irq: 2588 ixgbevf_down(adapter); 2589 ixgbevf_free_irq(adapter); 2590 err_setup_rx: 2591 ixgbevf_free_all_rx_resources(adapter); 2592 err_setup_tx: 2593 ixgbevf_free_all_tx_resources(adapter); 2594 err_setup_queues: 2595 ixgbevf_reset(adapter); 2596 2597 err_setup_reset: 2598 2599 return err; 2600 } 2601 2602 /** 2603 * ixgbevf_close - Disables a network interface 2604 * @netdev: network interface device structure 2605 * 2606 * Returns 0, this is not allowed to fail 2607 * 2608 * The close entry point is called when an interface is de-activated 2609 * by the OS. The hardware is still under the drivers control, but 2610 * needs to be disabled. A global MAC reset is issued to stop the 2611 * hardware, and all transmit and receive resources are freed. 2612 **/ 2613 static int ixgbevf_close(struct net_device *netdev) 2614 { 2615 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 2616 2617 ixgbevf_down(adapter); 2618 ixgbevf_free_irq(adapter); 2619 2620 ixgbevf_free_all_tx_resources(adapter); 2621 ixgbevf_free_all_rx_resources(adapter); 2622 2623 return 0; 2624 } 2625 2626 static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring, 2627 u32 vlan_macip_lens, u32 type_tucmd, 2628 u32 mss_l4len_idx) 2629 { 2630 struct ixgbe_adv_tx_context_desc *context_desc; 2631 u16 i = tx_ring->next_to_use; 2632 2633 context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i); 2634 2635 i++; 2636 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; 2637 2638 /* set bits to identify this as an advanced context descriptor */ 2639 type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT; 2640 2641 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); 2642 context_desc->seqnum_seed = 0; 2643 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); 2644 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); 2645 } 2646 2647 static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, 2648 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len) 2649 { 2650 u32 vlan_macip_lens, type_tucmd; 2651 u32 mss_l4len_idx, l4len; 2652 2653 if (!skb_is_gso(skb)) 2654 return 0; 2655 2656 if (skb_header_cloned(skb)) { 2657 int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 2658 if (err) 2659 return err; 2660 } 2661 2662 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ 2663 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; 2664 2665 if (skb->protocol == htons(ETH_P_IP)) { 2666 struct iphdr *iph = ip_hdr(skb); 2667 iph->tot_len = 0; 2668 iph->check = 0; 2669 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, 2670 iph->daddr, 0, 2671 IPPROTO_TCP, 2672 0); 2673 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; 2674 } else if (skb_is_gso_v6(skb)) { 2675 ipv6_hdr(skb)->payload_len = 0; 2676 tcp_hdr(skb)->check = 2677 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 2678 &ipv6_hdr(skb)->daddr, 2679 0, IPPROTO_TCP, 0); 2680 } 2681 2682 /* compute header lengths */ 2683 l4len = tcp_hdrlen(skb); 2684 *hdr_len += l4len; 2685 *hdr_len = skb_transport_offset(skb) + l4len; 2686 2687 /* mss_l4len_id: use 1 as index for TSO */ 2688 mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT; 2689 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; 2690 mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT; 2691 2692 /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ 2693 vlan_macip_lens = skb_network_header_len(skb); 2694 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; 2695 vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; 2696 2697 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, 2698 type_tucmd, mss_l4len_idx); 2699 2700 return 1; 2701 } 2702 2703 static bool ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, 2704 struct sk_buff *skb, u32 tx_flags) 2705 { 2706 u32 vlan_macip_lens = 0; 2707 u32 mss_l4len_idx = 0; 2708 u32 type_tucmd = 0; 2709 2710 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2711 u8 l4_hdr = 0; 2712 switch (skb->protocol) { 2713 case __constant_htons(ETH_P_IP): 2714 vlan_macip_lens |= skb_network_header_len(skb); 2715 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; 2716 l4_hdr = ip_hdr(skb)->protocol; 2717 break; 2718 case __constant_htons(ETH_P_IPV6): 2719 vlan_macip_lens |= skb_network_header_len(skb); 2720 l4_hdr = ipv6_hdr(skb)->nexthdr; 2721 break; 2722 default: 2723 if (unlikely(net_ratelimit())) { 2724 dev_warn(tx_ring->dev, 2725 "partial checksum but proto=%x!\n", 2726 skb->protocol); 2727 } 2728 break; 2729 } 2730 2731 switch (l4_hdr) { 2732 case IPPROTO_TCP: 2733 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP; 2734 mss_l4len_idx = tcp_hdrlen(skb) << 2735 IXGBE_ADVTXD_L4LEN_SHIFT; 2736 break; 2737 case IPPROTO_SCTP: 2738 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP; 2739 mss_l4len_idx = sizeof(struct sctphdr) << 2740 IXGBE_ADVTXD_L4LEN_SHIFT; 2741 break; 2742 case IPPROTO_UDP: 2743 mss_l4len_idx = sizeof(struct udphdr) << 2744 IXGBE_ADVTXD_L4LEN_SHIFT; 2745 break; 2746 default: 2747 if (unlikely(net_ratelimit())) { 2748 dev_warn(tx_ring->dev, 2749 "partial checksum but l4 proto=%x!\n", 2750 l4_hdr); 2751 } 2752 break; 2753 } 2754 } 2755 2756 /* vlan_macip_lens: MACLEN, VLAN tag */ 2757 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; 2758 vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; 2759 2760 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, 2761 type_tucmd, mss_l4len_idx); 2762 2763 return (skb->ip_summed == CHECKSUM_PARTIAL); 2764 } 2765 2766 static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring, 2767 struct sk_buff *skb, u32 tx_flags, 2768 unsigned int first) 2769 { 2770 struct ixgbevf_tx_buffer *tx_buffer_info; 2771 unsigned int len; 2772 unsigned int total = skb->len; 2773 unsigned int offset = 0, size; 2774 int count = 0; 2775 unsigned int nr_frags = skb_shinfo(skb)->nr_frags; 2776 unsigned int f; 2777 int i; 2778 2779 i = tx_ring->next_to_use; 2780 2781 len = min(skb_headlen(skb), total); 2782 while (len) { 2783 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 2784 size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD); 2785 2786 tx_buffer_info->length = size; 2787 tx_buffer_info->mapped_as_page = false; 2788 tx_buffer_info->dma = dma_map_single(tx_ring->dev, 2789 skb->data + offset, 2790 size, DMA_TO_DEVICE); 2791 if (dma_mapping_error(tx_ring->dev, tx_buffer_info->dma)) 2792 goto dma_error; 2793 tx_buffer_info->next_to_watch = i; 2794 2795 len -= size; 2796 total -= size; 2797 offset += size; 2798 count++; 2799 i++; 2800 if (i == tx_ring->count) 2801 i = 0; 2802 } 2803 2804 for (f = 0; f < nr_frags; f++) { 2805 const struct skb_frag_struct *frag; 2806 2807 frag = &skb_shinfo(skb)->frags[f]; 2808 len = min((unsigned int)skb_frag_size(frag), total); 2809 offset = 0; 2810 2811 while (len) { 2812 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 2813 size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD); 2814 2815 tx_buffer_info->length = size; 2816 tx_buffer_info->dma = 2817 skb_frag_dma_map(tx_ring->dev, frag, 2818 offset, size, DMA_TO_DEVICE); 2819 if (dma_mapping_error(tx_ring->dev, 2820 tx_buffer_info->dma)) 2821 goto dma_error; 2822 tx_buffer_info->mapped_as_page = true; 2823 tx_buffer_info->next_to_watch = i; 2824 2825 len -= size; 2826 total -= size; 2827 offset += size; 2828 count++; 2829 i++; 2830 if (i == tx_ring->count) 2831 i = 0; 2832 } 2833 if (total == 0) 2834 break; 2835 } 2836 2837 if (i == 0) 2838 i = tx_ring->count - 1; 2839 else 2840 i = i - 1; 2841 tx_ring->tx_buffer_info[i].skb = skb; 2842 tx_ring->tx_buffer_info[first].next_to_watch = i; 2843 tx_ring->tx_buffer_info[first].time_stamp = jiffies; 2844 2845 return count; 2846 2847 dma_error: 2848 dev_err(tx_ring->dev, "TX DMA map failed\n"); 2849 2850 /* clear timestamp and dma mappings for failed tx_buffer_info map */ 2851 tx_buffer_info->dma = 0; 2852 tx_buffer_info->next_to_watch = 0; 2853 count--; 2854 2855 /* clear timestamp and dma mappings for remaining portion of packet */ 2856 while (count >= 0) { 2857 count--; 2858 i--; 2859 if (i < 0) 2860 i += tx_ring->count; 2861 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 2862 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); 2863 } 2864 2865 return count; 2866 } 2867 2868 static void ixgbevf_tx_queue(struct ixgbevf_ring *tx_ring, int tx_flags, 2869 int count, u32 paylen, u8 hdr_len) 2870 { 2871 union ixgbe_adv_tx_desc *tx_desc = NULL; 2872 struct ixgbevf_tx_buffer *tx_buffer_info; 2873 u32 olinfo_status = 0, cmd_type_len = 0; 2874 unsigned int i; 2875 2876 u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS; 2877 2878 cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA; 2879 2880 cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT; 2881 2882 if (tx_flags & IXGBE_TX_FLAGS_VLAN) 2883 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE; 2884 2885 if (tx_flags & IXGBE_TX_FLAGS_CSUM) 2886 olinfo_status |= IXGBE_ADVTXD_POPTS_TXSM; 2887 2888 if (tx_flags & IXGBE_TX_FLAGS_TSO) { 2889 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE; 2890 2891 /* use index 1 context for tso */ 2892 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT); 2893 if (tx_flags & IXGBE_TX_FLAGS_IPV4) 2894 olinfo_status |= IXGBE_ADVTXD_POPTS_IXSM; 2895 } 2896 2897 /* 2898 * Check Context must be set if Tx switch is enabled, which it 2899 * always is for case where virtual functions are running 2900 */ 2901 olinfo_status |= IXGBE_ADVTXD_CC; 2902 2903 olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT); 2904 2905 i = tx_ring->next_to_use; 2906 while (count--) { 2907 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 2908 tx_desc = IXGBEVF_TX_DESC(tx_ring, i); 2909 tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma); 2910 tx_desc->read.cmd_type_len = 2911 cpu_to_le32(cmd_type_len | tx_buffer_info->length); 2912 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); 2913 i++; 2914 if (i == tx_ring->count) 2915 i = 0; 2916 } 2917 2918 tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd); 2919 2920 tx_ring->next_to_use = i; 2921 } 2922 2923 static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size) 2924 { 2925 struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev); 2926 2927 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); 2928 /* Herbert's original patch had: 2929 * smp_mb__after_netif_stop_queue(); 2930 * but since that doesn't exist yet, just open code it. */ 2931 smp_mb(); 2932 2933 /* We need to check again in a case another CPU has just 2934 * made room available. */ 2935 if (likely(IXGBE_DESC_UNUSED(tx_ring) < size)) 2936 return -EBUSY; 2937 2938 /* A reprieve! - use start_queue because it doesn't call schedule */ 2939 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); 2940 ++adapter->restart_queue; 2941 return 0; 2942 } 2943 2944 static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size) 2945 { 2946 if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size)) 2947 return 0; 2948 return __ixgbevf_maybe_stop_tx(tx_ring, size); 2949 } 2950 2951 static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 2952 { 2953 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 2954 struct ixgbevf_ring *tx_ring; 2955 unsigned int first; 2956 unsigned int tx_flags = 0; 2957 u8 hdr_len = 0; 2958 int r_idx = 0, tso; 2959 u16 count = TXD_USE_COUNT(skb_headlen(skb)); 2960 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD 2961 unsigned short f; 2962 #endif 2963 u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL); 2964 if (!dst_mac || is_link_local_ether_addr(dst_mac)) { 2965 dev_kfree_skb(skb); 2966 return NETDEV_TX_OK; 2967 } 2968 2969 tx_ring = &adapter->tx_ring[r_idx]; 2970 2971 /* 2972 * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD, 2973 * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD, 2974 * + 2 desc gap to keep tail from touching head, 2975 * + 1 desc for context descriptor, 2976 * otherwise try next time 2977 */ 2978 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD 2979 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) 2980 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); 2981 #else 2982 count += skb_shinfo(skb)->nr_frags; 2983 #endif 2984 if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) { 2985 adapter->tx_busy++; 2986 return NETDEV_TX_BUSY; 2987 } 2988 2989 if (vlan_tx_tag_present(skb)) { 2990 tx_flags |= vlan_tx_tag_get(skb); 2991 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; 2992 tx_flags |= IXGBE_TX_FLAGS_VLAN; 2993 } 2994 2995 first = tx_ring->next_to_use; 2996 2997 if (skb->protocol == htons(ETH_P_IP)) 2998 tx_flags |= IXGBE_TX_FLAGS_IPV4; 2999 tso = ixgbevf_tso(tx_ring, skb, tx_flags, &hdr_len); 3000 if (tso < 0) { 3001 dev_kfree_skb_any(skb); 3002 return NETDEV_TX_OK; 3003 } 3004 3005 if (tso) 3006 tx_flags |= IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_CSUM; 3007 else if (ixgbevf_tx_csum(tx_ring, skb, tx_flags)) 3008 tx_flags |= IXGBE_TX_FLAGS_CSUM; 3009 3010 ixgbevf_tx_queue(tx_ring, tx_flags, 3011 ixgbevf_tx_map(tx_ring, skb, tx_flags, first), 3012 skb->len, hdr_len); 3013 /* 3014 * Force memory writes to complete before letting h/w 3015 * know there are new descriptors to fetch. (Only 3016 * applicable for weak-ordered memory model archs, 3017 * such as IA-64). 3018 */ 3019 wmb(); 3020 3021 writel(tx_ring->next_to_use, adapter->hw.hw_addr + tx_ring->tail); 3022 3023 ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED); 3024 3025 return NETDEV_TX_OK; 3026 } 3027 3028 /** 3029 * ixgbevf_set_mac - Change the Ethernet Address of the NIC 3030 * @netdev: network interface device structure 3031 * @p: pointer to an address structure 3032 * 3033 * Returns 0 on success, negative on failure 3034 **/ 3035 static int ixgbevf_set_mac(struct net_device *netdev, void *p) 3036 { 3037 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3038 struct ixgbe_hw *hw = &adapter->hw; 3039 struct sockaddr *addr = p; 3040 3041 if (!is_valid_ether_addr(addr->sa_data)) 3042 return -EADDRNOTAVAIL; 3043 3044 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 3045 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); 3046 3047 spin_lock_bh(&adapter->mbx_lock); 3048 3049 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0); 3050 3051 spin_unlock_bh(&adapter->mbx_lock); 3052 3053 return 0; 3054 } 3055 3056 /** 3057 * ixgbevf_change_mtu - Change the Maximum Transfer Unit 3058 * @netdev: network interface device structure 3059 * @new_mtu: new value for maximum frame size 3060 * 3061 * Returns 0 on success, negative on failure 3062 **/ 3063 static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu) 3064 { 3065 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3066 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 3067 int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE; 3068 3069 switch (adapter->hw.api_version) { 3070 case ixgbe_mbox_api_11: 3071 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE; 3072 break; 3073 default: 3074 if (adapter->hw.mac.type == ixgbe_mac_X540_vf) 3075 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE; 3076 break; 3077 } 3078 3079 /* MTU < 68 is an error and causes problems on some kernels */ 3080 if ((new_mtu < 68) || (max_frame > max_possible_frame)) 3081 return -EINVAL; 3082 3083 hw_dbg(&adapter->hw, "changing MTU from %d to %d\n", 3084 netdev->mtu, new_mtu); 3085 /* must set new MTU before calling down or up */ 3086 netdev->mtu = new_mtu; 3087 3088 if (netif_running(netdev)) 3089 ixgbevf_reinit_locked(adapter); 3090 3091 return 0; 3092 } 3093 3094 static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state) 3095 { 3096 struct net_device *netdev = pci_get_drvdata(pdev); 3097 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3098 #ifdef CONFIG_PM 3099 int retval = 0; 3100 #endif 3101 3102 netif_device_detach(netdev); 3103 3104 if (netif_running(netdev)) { 3105 rtnl_lock(); 3106 ixgbevf_down(adapter); 3107 ixgbevf_free_irq(adapter); 3108 ixgbevf_free_all_tx_resources(adapter); 3109 ixgbevf_free_all_rx_resources(adapter); 3110 rtnl_unlock(); 3111 } 3112 3113 ixgbevf_clear_interrupt_scheme(adapter); 3114 3115 #ifdef CONFIG_PM 3116 retval = pci_save_state(pdev); 3117 if (retval) 3118 return retval; 3119 3120 #endif 3121 pci_disable_device(pdev); 3122 3123 return 0; 3124 } 3125 3126 #ifdef CONFIG_PM 3127 static int ixgbevf_resume(struct pci_dev *pdev) 3128 { 3129 struct ixgbevf_adapter *adapter = pci_get_drvdata(pdev); 3130 struct net_device *netdev = adapter->netdev; 3131 u32 err; 3132 3133 pci_set_power_state(pdev, PCI_D0); 3134 pci_restore_state(pdev); 3135 /* 3136 * pci_restore_state clears dev->state_saved so call 3137 * pci_save_state to restore it. 3138 */ 3139 pci_save_state(pdev); 3140 3141 err = pci_enable_device_mem(pdev); 3142 if (err) { 3143 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n"); 3144 return err; 3145 } 3146 pci_set_master(pdev); 3147 3148 rtnl_lock(); 3149 err = ixgbevf_init_interrupt_scheme(adapter); 3150 rtnl_unlock(); 3151 if (err) { 3152 dev_err(&pdev->dev, "Cannot initialize interrupts\n"); 3153 return err; 3154 } 3155 3156 ixgbevf_reset(adapter); 3157 3158 if (netif_running(netdev)) { 3159 err = ixgbevf_open(netdev); 3160 if (err) 3161 return err; 3162 } 3163 3164 netif_device_attach(netdev); 3165 3166 return err; 3167 } 3168 3169 #endif /* CONFIG_PM */ 3170 static void ixgbevf_shutdown(struct pci_dev *pdev) 3171 { 3172 ixgbevf_suspend(pdev, PMSG_SUSPEND); 3173 } 3174 3175 static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev, 3176 struct rtnl_link_stats64 *stats) 3177 { 3178 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3179 unsigned int start; 3180 u64 bytes, packets; 3181 const struct ixgbevf_ring *ring; 3182 int i; 3183 3184 ixgbevf_update_stats(adapter); 3185 3186 stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc; 3187 3188 for (i = 0; i < adapter->num_rx_queues; i++) { 3189 ring = &adapter->rx_ring[i]; 3190 do { 3191 start = u64_stats_fetch_begin_bh(&ring->syncp); 3192 bytes = ring->total_bytes; 3193 packets = ring->total_packets; 3194 } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); 3195 stats->rx_bytes += bytes; 3196 stats->rx_packets += packets; 3197 } 3198 3199 for (i = 0; i < adapter->num_tx_queues; i++) { 3200 ring = &adapter->tx_ring[i]; 3201 do { 3202 start = u64_stats_fetch_begin_bh(&ring->syncp); 3203 bytes = ring->total_bytes; 3204 packets = ring->total_packets; 3205 } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); 3206 stats->tx_bytes += bytes; 3207 stats->tx_packets += packets; 3208 } 3209 3210 return stats; 3211 } 3212 3213 static const struct net_device_ops ixgbevf_netdev_ops = { 3214 .ndo_open = ixgbevf_open, 3215 .ndo_stop = ixgbevf_close, 3216 .ndo_start_xmit = ixgbevf_xmit_frame, 3217 .ndo_set_rx_mode = ixgbevf_set_rx_mode, 3218 .ndo_get_stats64 = ixgbevf_get_stats, 3219 .ndo_validate_addr = eth_validate_addr, 3220 .ndo_set_mac_address = ixgbevf_set_mac, 3221 .ndo_change_mtu = ixgbevf_change_mtu, 3222 .ndo_tx_timeout = ixgbevf_tx_timeout, 3223 .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid, 3224 .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid, 3225 }; 3226 3227 static void ixgbevf_assign_netdev_ops(struct net_device *dev) 3228 { 3229 dev->netdev_ops = &ixgbevf_netdev_ops; 3230 ixgbevf_set_ethtool_ops(dev); 3231 dev->watchdog_timeo = 5 * HZ; 3232 } 3233 3234 /** 3235 * ixgbevf_probe - Device Initialization Routine 3236 * @pdev: PCI device information struct 3237 * @ent: entry in ixgbevf_pci_tbl 3238 * 3239 * Returns 0 on success, negative on failure 3240 * 3241 * ixgbevf_probe initializes an adapter identified by a pci_dev structure. 3242 * The OS initialization, configuring of the adapter private structure, 3243 * and a hardware reset occur. 3244 **/ 3245 static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 3246 { 3247 struct net_device *netdev; 3248 struct ixgbevf_adapter *adapter = NULL; 3249 struct ixgbe_hw *hw = NULL; 3250 const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data]; 3251 static int cards_found; 3252 int err, pci_using_dac; 3253 3254 err = pci_enable_device(pdev); 3255 if (err) 3256 return err; 3257 3258 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) && 3259 !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) { 3260 pci_using_dac = 1; 3261 } else { 3262 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 3263 if (err) { 3264 err = dma_set_coherent_mask(&pdev->dev, 3265 DMA_BIT_MASK(32)); 3266 if (err) { 3267 dev_err(&pdev->dev, "No usable DMA " 3268 "configuration, aborting\n"); 3269 goto err_dma; 3270 } 3271 } 3272 pci_using_dac = 0; 3273 } 3274 3275 err = pci_request_regions(pdev, ixgbevf_driver_name); 3276 if (err) { 3277 dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err); 3278 goto err_pci_reg; 3279 } 3280 3281 pci_set_master(pdev); 3282 3283 netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter), 3284 MAX_TX_QUEUES); 3285 if (!netdev) { 3286 err = -ENOMEM; 3287 goto err_alloc_etherdev; 3288 } 3289 3290 SET_NETDEV_DEV(netdev, &pdev->dev); 3291 3292 pci_set_drvdata(pdev, netdev); 3293 adapter = netdev_priv(netdev); 3294 3295 adapter->netdev = netdev; 3296 adapter->pdev = pdev; 3297 hw = &adapter->hw; 3298 hw->back = adapter; 3299 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); 3300 3301 /* 3302 * call save state here in standalone driver because it relies on 3303 * adapter struct to exist, and needs to call netdev_priv 3304 */ 3305 pci_save_state(pdev); 3306 3307 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), 3308 pci_resource_len(pdev, 0)); 3309 if (!hw->hw_addr) { 3310 err = -EIO; 3311 goto err_ioremap; 3312 } 3313 3314 ixgbevf_assign_netdev_ops(netdev); 3315 3316 adapter->bd_number = cards_found; 3317 3318 /* Setup hw api */ 3319 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops)); 3320 hw->mac.type = ii->mac; 3321 3322 memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops, 3323 sizeof(struct ixgbe_mbx_operations)); 3324 3325 /* setup the private structure */ 3326 err = ixgbevf_sw_init(adapter); 3327 if (err) 3328 goto err_sw_init; 3329 3330 /* The HW MAC address was set and/or determined in sw_init */ 3331 memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len); 3332 3333 if (!is_valid_ether_addr(netdev->dev_addr)) { 3334 pr_err("invalid MAC address\n"); 3335 err = -EIO; 3336 goto err_sw_init; 3337 } 3338 3339 netdev->hw_features = NETIF_F_SG | 3340 NETIF_F_IP_CSUM | 3341 NETIF_F_IPV6_CSUM | 3342 NETIF_F_TSO | 3343 NETIF_F_TSO6 | 3344 NETIF_F_RXCSUM; 3345 3346 netdev->features = netdev->hw_features | 3347 NETIF_F_HW_VLAN_TX | 3348 NETIF_F_HW_VLAN_RX | 3349 NETIF_F_HW_VLAN_FILTER; 3350 3351 netdev->vlan_features |= NETIF_F_TSO; 3352 netdev->vlan_features |= NETIF_F_TSO6; 3353 netdev->vlan_features |= NETIF_F_IP_CSUM; 3354 netdev->vlan_features |= NETIF_F_IPV6_CSUM; 3355 netdev->vlan_features |= NETIF_F_SG; 3356 3357 if (pci_using_dac) 3358 netdev->features |= NETIF_F_HIGHDMA; 3359 3360 netdev->priv_flags |= IFF_UNICAST_FLT; 3361 3362 init_timer(&adapter->watchdog_timer); 3363 adapter->watchdog_timer.function = ixgbevf_watchdog; 3364 adapter->watchdog_timer.data = (unsigned long)adapter; 3365 3366 INIT_WORK(&adapter->reset_task, ixgbevf_reset_task); 3367 INIT_WORK(&adapter->watchdog_task, ixgbevf_watchdog_task); 3368 3369 err = ixgbevf_init_interrupt_scheme(adapter); 3370 if (err) 3371 goto err_sw_init; 3372 3373 strcpy(netdev->name, "eth%d"); 3374 3375 err = register_netdev(netdev); 3376 if (err) 3377 goto err_register; 3378 3379 netif_carrier_off(netdev); 3380 3381 ixgbevf_init_last_counter_stats(adapter); 3382 3383 /* print the MAC address */ 3384 hw_dbg(hw, "%pM\n", netdev->dev_addr); 3385 3386 hw_dbg(hw, "MAC: %d\n", hw->mac.type); 3387 3388 hw_dbg(hw, "Intel(R) 82599 Virtual Function\n"); 3389 cards_found++; 3390 return 0; 3391 3392 err_register: 3393 ixgbevf_clear_interrupt_scheme(adapter); 3394 err_sw_init: 3395 ixgbevf_reset_interrupt_capability(adapter); 3396 iounmap(hw->hw_addr); 3397 err_ioremap: 3398 free_netdev(netdev); 3399 err_alloc_etherdev: 3400 pci_release_regions(pdev); 3401 err_pci_reg: 3402 err_dma: 3403 pci_disable_device(pdev); 3404 return err; 3405 } 3406 3407 /** 3408 * ixgbevf_remove - Device Removal Routine 3409 * @pdev: PCI device information struct 3410 * 3411 * ixgbevf_remove is called by the PCI subsystem to alert the driver 3412 * that it should release a PCI device. The could be caused by a 3413 * Hot-Plug event, or because the driver is going to be removed from 3414 * memory. 3415 **/ 3416 static void ixgbevf_remove(struct pci_dev *pdev) 3417 { 3418 struct net_device *netdev = pci_get_drvdata(pdev); 3419 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3420 3421 set_bit(__IXGBEVF_DOWN, &adapter->state); 3422 3423 del_timer_sync(&adapter->watchdog_timer); 3424 3425 cancel_work_sync(&adapter->reset_task); 3426 cancel_work_sync(&adapter->watchdog_task); 3427 3428 if (netdev->reg_state == NETREG_REGISTERED) 3429 unregister_netdev(netdev); 3430 3431 ixgbevf_clear_interrupt_scheme(adapter); 3432 ixgbevf_reset_interrupt_capability(adapter); 3433 3434 iounmap(adapter->hw.hw_addr); 3435 pci_release_regions(pdev); 3436 3437 hw_dbg(&adapter->hw, "Remove complete\n"); 3438 3439 kfree(adapter->tx_ring); 3440 kfree(adapter->rx_ring); 3441 3442 free_netdev(netdev); 3443 3444 pci_disable_device(pdev); 3445 } 3446 3447 /** 3448 * ixgbevf_io_error_detected - called when PCI error is detected 3449 * @pdev: Pointer to PCI device 3450 * @state: The current pci connection state 3451 * 3452 * This function is called after a PCI bus error affecting 3453 * this device has been detected. 3454 */ 3455 static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev, 3456 pci_channel_state_t state) 3457 { 3458 struct net_device *netdev = pci_get_drvdata(pdev); 3459 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3460 3461 netif_device_detach(netdev); 3462 3463 if (state == pci_channel_io_perm_failure) 3464 return PCI_ERS_RESULT_DISCONNECT; 3465 3466 if (netif_running(netdev)) 3467 ixgbevf_down(adapter); 3468 3469 pci_disable_device(pdev); 3470 3471 /* Request a slot slot reset. */ 3472 return PCI_ERS_RESULT_NEED_RESET; 3473 } 3474 3475 /** 3476 * ixgbevf_io_slot_reset - called after the pci bus has been reset. 3477 * @pdev: Pointer to PCI device 3478 * 3479 * Restart the card from scratch, as if from a cold-boot. Implementation 3480 * resembles the first-half of the ixgbevf_resume routine. 3481 */ 3482 static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev) 3483 { 3484 struct net_device *netdev = pci_get_drvdata(pdev); 3485 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3486 3487 if (pci_enable_device_mem(pdev)) { 3488 dev_err(&pdev->dev, 3489 "Cannot re-enable PCI device after reset.\n"); 3490 return PCI_ERS_RESULT_DISCONNECT; 3491 } 3492 3493 pci_set_master(pdev); 3494 3495 ixgbevf_reset(adapter); 3496 3497 return PCI_ERS_RESULT_RECOVERED; 3498 } 3499 3500 /** 3501 * ixgbevf_io_resume - called when traffic can start flowing again. 3502 * @pdev: Pointer to PCI device 3503 * 3504 * This callback is called when the error recovery driver tells us that 3505 * its OK to resume normal operation. Implementation resembles the 3506 * second-half of the ixgbevf_resume routine. 3507 */ 3508 static void ixgbevf_io_resume(struct pci_dev *pdev) 3509 { 3510 struct net_device *netdev = pci_get_drvdata(pdev); 3511 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3512 3513 if (netif_running(netdev)) 3514 ixgbevf_up(adapter); 3515 3516 netif_device_attach(netdev); 3517 } 3518 3519 /* PCI Error Recovery (ERS) */ 3520 static const struct pci_error_handlers ixgbevf_err_handler = { 3521 .error_detected = ixgbevf_io_error_detected, 3522 .slot_reset = ixgbevf_io_slot_reset, 3523 .resume = ixgbevf_io_resume, 3524 }; 3525 3526 static struct pci_driver ixgbevf_driver = { 3527 .name = ixgbevf_driver_name, 3528 .id_table = ixgbevf_pci_tbl, 3529 .probe = ixgbevf_probe, 3530 .remove = ixgbevf_remove, 3531 #ifdef CONFIG_PM 3532 /* Power Management Hooks */ 3533 .suspend = ixgbevf_suspend, 3534 .resume = ixgbevf_resume, 3535 #endif 3536 .shutdown = ixgbevf_shutdown, 3537 .err_handler = &ixgbevf_err_handler 3538 }; 3539 3540 /** 3541 * ixgbevf_init_module - Driver Registration Routine 3542 * 3543 * ixgbevf_init_module is the first routine called when the driver is 3544 * loaded. All it does is register with the PCI subsystem. 3545 **/ 3546 static int __init ixgbevf_init_module(void) 3547 { 3548 int ret; 3549 pr_info("%s - version %s\n", ixgbevf_driver_string, 3550 ixgbevf_driver_version); 3551 3552 pr_info("%s\n", ixgbevf_copyright); 3553 3554 ret = pci_register_driver(&ixgbevf_driver); 3555 return ret; 3556 } 3557 3558 module_init(ixgbevf_init_module); 3559 3560 /** 3561 * ixgbevf_exit_module - Driver Exit Cleanup Routine 3562 * 3563 * ixgbevf_exit_module is called just before the driver is removed 3564 * from memory. 3565 **/ 3566 static void __exit ixgbevf_exit_module(void) 3567 { 3568 pci_unregister_driver(&ixgbevf_driver); 3569 } 3570 3571 #ifdef DEBUG 3572 /** 3573 * ixgbevf_get_hw_dev_name - return device name string 3574 * used by hardware layer to print debugging information 3575 **/ 3576 char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw) 3577 { 3578 struct ixgbevf_adapter *adapter = hw->back; 3579 return adapter->netdev->name; 3580 } 3581 3582 #endif 3583 module_exit(ixgbevf_exit_module); 3584 3585 /* ixgbevf_main.c */ 3586