1 /******************************************************************************* 2 3 Intel 82599 Virtual Function driver 4 Copyright(c) 1999 - 2012 Intel Corporation. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms and conditions of the GNU General Public License, 8 version 2, as published by the Free Software Foundation. 9 10 This program is distributed in the hope it will be useful, but WITHOUT 11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 more details. 14 15 You should have received a copy of the GNU General Public License along with 16 this program; if not, write to the Free Software Foundation, Inc., 17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 19 The full GNU General Public License is included in this distribution in 20 the file called "COPYING". 21 22 Contact Information: 23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 26 *******************************************************************************/ 27 28 29 /****************************************************************************** 30 Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code 31 ******************************************************************************/ 32 33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 34 35 #include <linux/types.h> 36 #include <linux/bitops.h> 37 #include <linux/module.h> 38 #include <linux/pci.h> 39 #include <linux/netdevice.h> 40 #include <linux/vmalloc.h> 41 #include <linux/string.h> 42 #include <linux/in.h> 43 #include <linux/ip.h> 44 #include <linux/tcp.h> 45 #include <linux/ipv6.h> 46 #include <linux/slab.h> 47 #include <net/checksum.h> 48 #include <net/ip6_checksum.h> 49 #include <linux/ethtool.h> 50 #include <linux/if.h> 51 #include <linux/if_vlan.h> 52 #include <linux/prefetch.h> 53 54 #include "ixgbevf.h" 55 56 const char ixgbevf_driver_name[] = "ixgbevf"; 57 static const char ixgbevf_driver_string[] = 58 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver"; 59 60 #define DRV_VERSION "2.6.0-k" 61 const char ixgbevf_driver_version[] = DRV_VERSION; 62 static char ixgbevf_copyright[] = 63 "Copyright (c) 2009 - 2012 Intel Corporation."; 64 65 static const struct ixgbevf_info *ixgbevf_info_tbl[] = { 66 [board_82599_vf] = &ixgbevf_82599_vf_info, 67 [board_X540_vf] = &ixgbevf_X540_vf_info, 68 }; 69 70 /* ixgbevf_pci_tbl - PCI Device ID Table 71 * 72 * Wildcard entries (PCI_ANY_ID) should come last 73 * Last entry must be all 0s 74 * 75 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 76 * Class, Class Mask, private data (not used) } 77 */ 78 static struct pci_device_id ixgbevf_pci_tbl[] = { 79 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), 80 board_82599_vf}, 81 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), 82 board_X540_vf}, 83 84 /* required last entry */ 85 {0, } 86 }; 87 MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl); 88 89 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 90 MODULE_DESCRIPTION("Intel(R) 82599 Virtual Function Driver"); 91 MODULE_LICENSE("GPL"); 92 MODULE_VERSION(DRV_VERSION); 93 94 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) 95 static int debug = -1; 96 module_param(debug, int, 0); 97 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 98 99 /* forward decls */ 100 static void ixgbevf_set_itr_msix(struct ixgbevf_q_vector *q_vector); 101 static void ixgbevf_write_eitr(struct ixgbevf_adapter *adapter, int v_idx, 102 u32 itr_reg); 103 104 static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw, 105 struct ixgbevf_ring *rx_ring, 106 u32 val) 107 { 108 /* 109 * Force memory writes to complete before letting h/w 110 * know there are new descriptors to fetch. (Only 111 * applicable for weak-ordered memory model archs, 112 * such as IA-64). 113 */ 114 wmb(); 115 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rx_ring->reg_idx), val); 116 } 117 118 /* 119 * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors 120 * @adapter: pointer to adapter struct 121 * @direction: 0 for Rx, 1 for Tx, -1 for other causes 122 * @queue: queue to map the corresponding interrupt to 123 * @msix_vector: the vector to map to the corresponding queue 124 * 125 */ 126 static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction, 127 u8 queue, u8 msix_vector) 128 { 129 u32 ivar, index; 130 struct ixgbe_hw *hw = &adapter->hw; 131 if (direction == -1) { 132 /* other causes */ 133 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 134 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC); 135 ivar &= ~0xFF; 136 ivar |= msix_vector; 137 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar); 138 } else { 139 /* tx or rx causes */ 140 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 141 index = ((16 * (queue & 1)) + (8 * direction)); 142 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1)); 143 ivar &= ~(0xFF << index); 144 ivar |= (msix_vector << index); 145 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar); 146 } 147 } 148 149 static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_adapter *adapter, 150 struct ixgbevf_tx_buffer 151 *tx_buffer_info) 152 { 153 if (tx_buffer_info->dma) { 154 if (tx_buffer_info->mapped_as_page) 155 dma_unmap_page(&adapter->pdev->dev, 156 tx_buffer_info->dma, 157 tx_buffer_info->length, 158 DMA_TO_DEVICE); 159 else 160 dma_unmap_single(&adapter->pdev->dev, 161 tx_buffer_info->dma, 162 tx_buffer_info->length, 163 DMA_TO_DEVICE); 164 tx_buffer_info->dma = 0; 165 } 166 if (tx_buffer_info->skb) { 167 dev_kfree_skb_any(tx_buffer_info->skb); 168 tx_buffer_info->skb = NULL; 169 } 170 tx_buffer_info->time_stamp = 0; 171 /* tx_buffer_info must be completely set up in the transmit path */ 172 } 173 174 #define IXGBE_MAX_TXD_PWR 14 175 #define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR) 176 177 /* Tx Descriptors needed, worst case */ 178 #define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \ 179 (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0)) 180 #ifdef MAX_SKB_FRAGS 181 #define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \ 182 MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */ 183 #else 184 #define DESC_NEEDED TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) 185 #endif 186 187 static void ixgbevf_tx_timeout(struct net_device *netdev); 188 189 /** 190 * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes 191 * @adapter: board private structure 192 * @tx_ring: tx ring to clean 193 **/ 194 static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter *adapter, 195 struct ixgbevf_ring *tx_ring) 196 { 197 struct net_device *netdev = adapter->netdev; 198 struct ixgbe_hw *hw = &adapter->hw; 199 union ixgbe_adv_tx_desc *tx_desc, *eop_desc; 200 struct ixgbevf_tx_buffer *tx_buffer_info; 201 unsigned int i, eop, count = 0; 202 unsigned int total_bytes = 0, total_packets = 0; 203 204 i = tx_ring->next_to_clean; 205 eop = tx_ring->tx_buffer_info[i].next_to_watch; 206 eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop); 207 208 while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) && 209 (count < tx_ring->work_limit)) { 210 bool cleaned = false; 211 rmb(); /* read buffer_info after eop_desc */ 212 /* eop could change between read and DD-check */ 213 if (unlikely(eop != tx_ring->tx_buffer_info[i].next_to_watch)) 214 goto cont_loop; 215 for ( ; !cleaned; count++) { 216 struct sk_buff *skb; 217 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i); 218 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 219 cleaned = (i == eop); 220 skb = tx_buffer_info->skb; 221 222 if (cleaned && skb) { 223 unsigned int segs, bytecount; 224 225 /* gso_segs is currently only valid for tcp */ 226 segs = skb_shinfo(skb)->gso_segs ?: 1; 227 /* multiply data chunks by size of headers */ 228 bytecount = ((segs - 1) * skb_headlen(skb)) + 229 skb->len; 230 total_packets += segs; 231 total_bytes += bytecount; 232 } 233 234 ixgbevf_unmap_and_free_tx_resource(adapter, 235 tx_buffer_info); 236 237 tx_desc->wb.status = 0; 238 239 i++; 240 if (i == tx_ring->count) 241 i = 0; 242 } 243 244 cont_loop: 245 eop = tx_ring->tx_buffer_info[i].next_to_watch; 246 eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop); 247 } 248 249 tx_ring->next_to_clean = i; 250 251 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) 252 if (unlikely(count && netif_carrier_ok(netdev) && 253 (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { 254 /* Make sure that anybody stopping the queue after this 255 * sees the new next_to_clean. 256 */ 257 smp_mb(); 258 #ifdef HAVE_TX_MQ 259 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) && 260 !test_bit(__IXGBEVF_DOWN, &adapter->state)) { 261 netif_wake_subqueue(netdev, tx_ring->queue_index); 262 ++adapter->restart_queue; 263 } 264 #else 265 if (netif_queue_stopped(netdev) && 266 !test_bit(__IXGBEVF_DOWN, &adapter->state)) { 267 netif_wake_queue(netdev); 268 ++adapter->restart_queue; 269 } 270 #endif 271 } 272 273 /* re-arm the interrupt */ 274 if ((count >= tx_ring->work_limit) && 275 (!test_bit(__IXGBEVF_DOWN, &adapter->state))) { 276 IXGBE_WRITE_REG(hw, IXGBE_VTEICS, tx_ring->v_idx); 277 } 278 279 u64_stats_update_begin(&tx_ring->syncp); 280 tx_ring->total_bytes += total_bytes; 281 tx_ring->total_packets += total_packets; 282 u64_stats_update_end(&tx_ring->syncp); 283 284 return count < tx_ring->work_limit; 285 } 286 287 /** 288 * ixgbevf_receive_skb - Send a completed packet up the stack 289 * @q_vector: structure containing interrupt and ring information 290 * @skb: packet to send up 291 * @status: hardware indication of status of receive 292 * @rx_ring: rx descriptor ring (for a specific queue) to setup 293 * @rx_desc: rx descriptor 294 **/ 295 static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector, 296 struct sk_buff *skb, u8 status, 297 struct ixgbevf_ring *ring, 298 union ixgbe_adv_rx_desc *rx_desc) 299 { 300 struct ixgbevf_adapter *adapter = q_vector->adapter; 301 bool is_vlan = (status & IXGBE_RXD_STAT_VP); 302 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan); 303 304 if (is_vlan && test_bit(tag, adapter->active_vlans)) 305 __vlan_hwaccel_put_tag(skb, tag); 306 307 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) 308 napi_gro_receive(&q_vector->napi, skb); 309 else 310 netif_rx(skb); 311 } 312 313 /** 314 * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum 315 * @adapter: address of board private structure 316 * @status_err: hardware indication of status of receive 317 * @skb: skb currently being received and modified 318 **/ 319 static inline void ixgbevf_rx_checksum(struct ixgbevf_adapter *adapter, 320 u32 status_err, struct sk_buff *skb) 321 { 322 skb_checksum_none_assert(skb); 323 324 /* Rx csum disabled */ 325 if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED)) 326 return; 327 328 /* if IP and error */ 329 if ((status_err & IXGBE_RXD_STAT_IPCS) && 330 (status_err & IXGBE_RXDADV_ERR_IPE)) { 331 adapter->hw_csum_rx_error++; 332 return; 333 } 334 335 if (!(status_err & IXGBE_RXD_STAT_L4CS)) 336 return; 337 338 if (status_err & IXGBE_RXDADV_ERR_TCPE) { 339 adapter->hw_csum_rx_error++; 340 return; 341 } 342 343 /* It must be a TCP or UDP packet with a valid checksum */ 344 skb->ip_summed = CHECKSUM_UNNECESSARY; 345 adapter->hw_csum_rx_good++; 346 } 347 348 /** 349 * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split 350 * @adapter: address of board private structure 351 **/ 352 static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter, 353 struct ixgbevf_ring *rx_ring, 354 int cleaned_count) 355 { 356 struct pci_dev *pdev = adapter->pdev; 357 union ixgbe_adv_rx_desc *rx_desc; 358 struct ixgbevf_rx_buffer *bi; 359 struct sk_buff *skb; 360 unsigned int i; 361 unsigned int bufsz = rx_ring->rx_buf_len + NET_IP_ALIGN; 362 363 i = rx_ring->next_to_use; 364 bi = &rx_ring->rx_buffer_info[i]; 365 366 while (cleaned_count--) { 367 rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i); 368 369 if (!bi->page_dma && 370 (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) { 371 if (!bi->page) { 372 bi->page = alloc_page(GFP_ATOMIC | __GFP_COLD); 373 if (!bi->page) { 374 adapter->alloc_rx_page_failed++; 375 goto no_buffers; 376 } 377 bi->page_offset = 0; 378 } else { 379 /* use a half page if we're re-using */ 380 bi->page_offset ^= (PAGE_SIZE / 2); 381 } 382 383 bi->page_dma = dma_map_page(&pdev->dev, bi->page, 384 bi->page_offset, 385 (PAGE_SIZE / 2), 386 DMA_FROM_DEVICE); 387 } 388 389 skb = bi->skb; 390 if (!skb) { 391 skb = netdev_alloc_skb(adapter->netdev, 392 bufsz); 393 394 if (!skb) { 395 adapter->alloc_rx_buff_failed++; 396 goto no_buffers; 397 } 398 399 /* 400 * Make buffer alignment 2 beyond a 16 byte boundary 401 * this will result in a 16 byte aligned IP header after 402 * the 14 byte MAC header is removed 403 */ 404 skb_reserve(skb, NET_IP_ALIGN); 405 406 bi->skb = skb; 407 } 408 if (!bi->dma) { 409 bi->dma = dma_map_single(&pdev->dev, skb->data, 410 rx_ring->rx_buf_len, 411 DMA_FROM_DEVICE); 412 } 413 /* Refresh the desc even if buffer_addrs didn't change because 414 * each write-back erases this info. */ 415 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { 416 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma); 417 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma); 418 } else { 419 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); 420 } 421 422 i++; 423 if (i == rx_ring->count) 424 i = 0; 425 bi = &rx_ring->rx_buffer_info[i]; 426 } 427 428 no_buffers: 429 if (rx_ring->next_to_use != i) { 430 rx_ring->next_to_use = i; 431 if (i-- == 0) 432 i = (rx_ring->count - 1); 433 434 ixgbevf_release_rx_desc(&adapter->hw, rx_ring, i); 435 } 436 } 437 438 static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter, 439 u64 qmask) 440 { 441 u32 mask; 442 struct ixgbe_hw *hw = &adapter->hw; 443 444 mask = (qmask & 0xFFFFFFFF); 445 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask); 446 } 447 448 static inline u16 ixgbevf_get_hdr_info(union ixgbe_adv_rx_desc *rx_desc) 449 { 450 return rx_desc->wb.lower.lo_dword.hs_rss.hdr_info; 451 } 452 453 static inline u16 ixgbevf_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc) 454 { 455 return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; 456 } 457 458 static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, 459 struct ixgbevf_ring *rx_ring, 460 int *work_done, int work_to_do) 461 { 462 struct ixgbevf_adapter *adapter = q_vector->adapter; 463 struct pci_dev *pdev = adapter->pdev; 464 union ixgbe_adv_rx_desc *rx_desc, *next_rxd; 465 struct ixgbevf_rx_buffer *rx_buffer_info, *next_buffer; 466 struct sk_buff *skb; 467 unsigned int i; 468 u32 len, staterr; 469 u16 hdr_info; 470 bool cleaned = false; 471 int cleaned_count = 0; 472 unsigned int total_rx_bytes = 0, total_rx_packets = 0; 473 474 i = rx_ring->next_to_clean; 475 rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i); 476 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 477 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 478 479 while (staterr & IXGBE_RXD_STAT_DD) { 480 u32 upper_len = 0; 481 if (*work_done >= work_to_do) 482 break; 483 (*work_done)++; 484 485 rmb(); /* read descriptor and rx_buffer_info after status DD */ 486 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { 487 hdr_info = le16_to_cpu(ixgbevf_get_hdr_info(rx_desc)); 488 len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >> 489 IXGBE_RXDADV_HDRBUFLEN_SHIFT; 490 if (hdr_info & IXGBE_RXDADV_SPH) 491 adapter->rx_hdr_split++; 492 if (len > IXGBEVF_RX_HDR_SIZE) 493 len = IXGBEVF_RX_HDR_SIZE; 494 upper_len = le16_to_cpu(rx_desc->wb.upper.length); 495 } else { 496 len = le16_to_cpu(rx_desc->wb.upper.length); 497 } 498 cleaned = true; 499 skb = rx_buffer_info->skb; 500 prefetch(skb->data - NET_IP_ALIGN); 501 rx_buffer_info->skb = NULL; 502 503 if (rx_buffer_info->dma) { 504 dma_unmap_single(&pdev->dev, rx_buffer_info->dma, 505 rx_ring->rx_buf_len, 506 DMA_FROM_DEVICE); 507 rx_buffer_info->dma = 0; 508 skb_put(skb, len); 509 } 510 511 if (upper_len) { 512 dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma, 513 PAGE_SIZE / 2, DMA_FROM_DEVICE); 514 rx_buffer_info->page_dma = 0; 515 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, 516 rx_buffer_info->page, 517 rx_buffer_info->page_offset, 518 upper_len); 519 520 if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) || 521 (page_count(rx_buffer_info->page) != 1)) 522 rx_buffer_info->page = NULL; 523 else 524 get_page(rx_buffer_info->page); 525 526 skb->len += upper_len; 527 skb->data_len += upper_len; 528 skb->truesize += upper_len; 529 } 530 531 i++; 532 if (i == rx_ring->count) 533 i = 0; 534 535 next_rxd = IXGBE_RX_DESC_ADV(*rx_ring, i); 536 prefetch(next_rxd); 537 cleaned_count++; 538 539 next_buffer = &rx_ring->rx_buffer_info[i]; 540 541 if (!(staterr & IXGBE_RXD_STAT_EOP)) { 542 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { 543 rx_buffer_info->skb = next_buffer->skb; 544 rx_buffer_info->dma = next_buffer->dma; 545 next_buffer->skb = skb; 546 next_buffer->dma = 0; 547 } else { 548 skb->next = next_buffer->skb; 549 skb->next->prev = skb; 550 } 551 adapter->non_eop_descs++; 552 goto next_desc; 553 } 554 555 /* ERR_MASK will only have valid bits if EOP set */ 556 if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) { 557 dev_kfree_skb_irq(skb); 558 goto next_desc; 559 } 560 561 ixgbevf_rx_checksum(adapter, staterr, skb); 562 563 /* probably a little skewed due to removing CRC */ 564 total_rx_bytes += skb->len; 565 total_rx_packets++; 566 567 /* 568 * Work around issue of some types of VM to VM loop back 569 * packets not getting split correctly 570 */ 571 if (staterr & IXGBE_RXD_STAT_LB) { 572 u32 header_fixup_len = skb_headlen(skb); 573 if (header_fixup_len < 14) 574 skb_push(skb, header_fixup_len); 575 } 576 skb->protocol = eth_type_trans(skb, adapter->netdev); 577 578 ixgbevf_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc); 579 580 next_desc: 581 rx_desc->wb.upper.status_error = 0; 582 583 /* return some buffers to hardware, one at a time is too slow */ 584 if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) { 585 ixgbevf_alloc_rx_buffers(adapter, rx_ring, 586 cleaned_count); 587 cleaned_count = 0; 588 } 589 590 /* use prefetched values */ 591 rx_desc = next_rxd; 592 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 593 594 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 595 } 596 597 rx_ring->next_to_clean = i; 598 cleaned_count = IXGBE_DESC_UNUSED(rx_ring); 599 600 if (cleaned_count) 601 ixgbevf_alloc_rx_buffers(adapter, rx_ring, cleaned_count); 602 603 u64_stats_update_begin(&rx_ring->syncp); 604 rx_ring->total_packets += total_rx_packets; 605 rx_ring->total_bytes += total_rx_bytes; 606 u64_stats_update_end(&rx_ring->syncp); 607 608 return cleaned; 609 } 610 611 /** 612 * ixgbevf_clean_rxonly - msix (aka one shot) rx clean routine 613 * @napi: napi struct with our devices info in it 614 * @budget: amount of work driver is allowed to do this pass, in packets 615 * 616 * This function is optimized for cleaning one queue only on a single 617 * q_vector!!! 618 **/ 619 static int ixgbevf_clean_rxonly(struct napi_struct *napi, int budget) 620 { 621 struct ixgbevf_q_vector *q_vector = 622 container_of(napi, struct ixgbevf_q_vector, napi); 623 struct ixgbevf_adapter *adapter = q_vector->adapter; 624 struct ixgbevf_ring *rx_ring = NULL; 625 int work_done = 0; 626 long r_idx; 627 628 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 629 rx_ring = &(adapter->rx_ring[r_idx]); 630 631 ixgbevf_clean_rx_irq(q_vector, rx_ring, &work_done, budget); 632 633 /* If all Rx work done, exit the polling mode */ 634 if (work_done < budget) { 635 napi_complete(napi); 636 if (adapter->itr_setting & 1) 637 ixgbevf_set_itr_msix(q_vector); 638 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) 639 ixgbevf_irq_enable_queues(adapter, rx_ring->v_idx); 640 } 641 642 return work_done; 643 } 644 645 /** 646 * ixgbevf_clean_rxonly_many - msix (aka one shot) rx clean routine 647 * @napi: napi struct with our devices info in it 648 * @budget: amount of work driver is allowed to do this pass, in packets 649 * 650 * This function will clean more than one rx queue associated with a 651 * q_vector. 652 **/ 653 static int ixgbevf_clean_rxonly_many(struct napi_struct *napi, int budget) 654 { 655 struct ixgbevf_q_vector *q_vector = 656 container_of(napi, struct ixgbevf_q_vector, napi); 657 struct ixgbevf_adapter *adapter = q_vector->adapter; 658 struct ixgbevf_ring *rx_ring = NULL; 659 int work_done = 0, i; 660 long r_idx; 661 u64 enable_mask = 0; 662 663 /* attempt to distribute budget to each queue fairly, but don't allow 664 * the budget to go below 1 because we'll exit polling */ 665 budget /= (q_vector->rxr_count ?: 1); 666 budget = max(budget, 1); 667 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 668 for (i = 0; i < q_vector->rxr_count; i++) { 669 rx_ring = &(adapter->rx_ring[r_idx]); 670 ixgbevf_clean_rx_irq(q_vector, rx_ring, &work_done, budget); 671 enable_mask |= rx_ring->v_idx; 672 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, 673 r_idx + 1); 674 } 675 676 #ifndef HAVE_NETDEV_NAPI_LIST 677 if (!netif_running(adapter->netdev)) 678 work_done = 0; 679 680 #endif 681 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 682 rx_ring = &(adapter->rx_ring[r_idx]); 683 684 /* If all Rx work done, exit the polling mode */ 685 if (work_done < budget) { 686 napi_complete(napi); 687 if (adapter->itr_setting & 1) 688 ixgbevf_set_itr_msix(q_vector); 689 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) 690 ixgbevf_irq_enable_queues(adapter, enable_mask); 691 } 692 693 return work_done; 694 } 695 696 697 /** 698 * ixgbevf_configure_msix - Configure MSI-X hardware 699 * @adapter: board private structure 700 * 701 * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X 702 * interrupts. 703 **/ 704 static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter) 705 { 706 struct ixgbevf_q_vector *q_vector; 707 struct ixgbe_hw *hw = &adapter->hw; 708 int i, j, q_vectors, v_idx, r_idx; 709 u32 mask; 710 711 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 712 713 /* 714 * Populate the IVAR table and set the ITR values to the 715 * corresponding register. 716 */ 717 for (v_idx = 0; v_idx < q_vectors; v_idx++) { 718 q_vector = adapter->q_vector[v_idx]; 719 /* XXX for_each_set_bit(...) */ 720 r_idx = find_first_bit(q_vector->rxr_idx, 721 adapter->num_rx_queues); 722 723 for (i = 0; i < q_vector->rxr_count; i++) { 724 j = adapter->rx_ring[r_idx].reg_idx; 725 ixgbevf_set_ivar(adapter, 0, j, v_idx); 726 r_idx = find_next_bit(q_vector->rxr_idx, 727 adapter->num_rx_queues, 728 r_idx + 1); 729 } 730 r_idx = find_first_bit(q_vector->txr_idx, 731 adapter->num_tx_queues); 732 733 for (i = 0; i < q_vector->txr_count; i++) { 734 j = adapter->tx_ring[r_idx].reg_idx; 735 ixgbevf_set_ivar(adapter, 1, j, v_idx); 736 r_idx = find_next_bit(q_vector->txr_idx, 737 adapter->num_tx_queues, 738 r_idx + 1); 739 } 740 741 /* if this is a tx only vector halve the interrupt rate */ 742 if (q_vector->txr_count && !q_vector->rxr_count) 743 q_vector->eitr = (adapter->eitr_param >> 1); 744 else if (q_vector->rxr_count) 745 /* rx only */ 746 q_vector->eitr = adapter->eitr_param; 747 748 ixgbevf_write_eitr(adapter, v_idx, q_vector->eitr); 749 } 750 751 ixgbevf_set_ivar(adapter, -1, 1, v_idx); 752 753 /* set up to autoclear timer, and the vectors */ 754 mask = IXGBE_EIMS_ENABLE_MASK; 755 mask &= ~IXGBE_EIMS_OTHER; 756 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask); 757 } 758 759 enum latency_range { 760 lowest_latency = 0, 761 low_latency = 1, 762 bulk_latency = 2, 763 latency_invalid = 255 764 }; 765 766 /** 767 * ixgbevf_update_itr - update the dynamic ITR value based on statistics 768 * @adapter: pointer to adapter 769 * @eitr: eitr setting (ints per sec) to give last timeslice 770 * @itr_setting: current throttle rate in ints/second 771 * @packets: the number of packets during this measurement interval 772 * @bytes: the number of bytes during this measurement interval 773 * 774 * Stores a new ITR value based on packets and byte 775 * counts during the last interrupt. The advantage of per interrupt 776 * computation is faster updates and more accurate ITR for the current 777 * traffic pattern. Constants in this function were computed 778 * based on theoretical maximum wire speed and thresholds were set based 779 * on testing data as well as attempting to minimize response time 780 * while increasing bulk throughput. 781 **/ 782 static u8 ixgbevf_update_itr(struct ixgbevf_adapter *adapter, 783 u32 eitr, u8 itr_setting, 784 int packets, int bytes) 785 { 786 unsigned int retval = itr_setting; 787 u32 timepassed_us; 788 u64 bytes_perint; 789 790 if (packets == 0) 791 goto update_itr_done; 792 793 794 /* simple throttlerate management 795 * 0-20MB/s lowest (100000 ints/s) 796 * 20-100MB/s low (20000 ints/s) 797 * 100-1249MB/s bulk (8000 ints/s) 798 */ 799 /* what was last interrupt timeslice? */ 800 timepassed_us = 1000000/eitr; 801 bytes_perint = bytes / timepassed_us; /* bytes/usec */ 802 803 switch (itr_setting) { 804 case lowest_latency: 805 if (bytes_perint > adapter->eitr_low) 806 retval = low_latency; 807 break; 808 case low_latency: 809 if (bytes_perint > adapter->eitr_high) 810 retval = bulk_latency; 811 else if (bytes_perint <= adapter->eitr_low) 812 retval = lowest_latency; 813 break; 814 case bulk_latency: 815 if (bytes_perint <= adapter->eitr_high) 816 retval = low_latency; 817 break; 818 } 819 820 update_itr_done: 821 return retval; 822 } 823 824 /** 825 * ixgbevf_write_eitr - write VTEITR register in hardware specific way 826 * @adapter: pointer to adapter struct 827 * @v_idx: vector index into q_vector array 828 * @itr_reg: new value to be written in *register* format, not ints/s 829 * 830 * This function is made to be called by ethtool and by the driver 831 * when it needs to update VTEITR registers at runtime. Hardware 832 * specific quirks/differences are taken care of here. 833 */ 834 static void ixgbevf_write_eitr(struct ixgbevf_adapter *adapter, int v_idx, 835 u32 itr_reg) 836 { 837 struct ixgbe_hw *hw = &adapter->hw; 838 839 itr_reg = EITR_INTS_PER_SEC_TO_REG(itr_reg); 840 841 /* 842 * set the WDIS bit to not clear the timer bits and cause an 843 * immediate assertion of the interrupt 844 */ 845 itr_reg |= IXGBE_EITR_CNT_WDIS; 846 847 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg); 848 } 849 850 static void ixgbevf_set_itr_msix(struct ixgbevf_q_vector *q_vector) 851 { 852 struct ixgbevf_adapter *adapter = q_vector->adapter; 853 u32 new_itr; 854 u8 current_itr, ret_itr; 855 int i, r_idx, v_idx = q_vector->v_idx; 856 struct ixgbevf_ring *rx_ring, *tx_ring; 857 858 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); 859 for (i = 0; i < q_vector->txr_count; i++) { 860 tx_ring = &(adapter->tx_ring[r_idx]); 861 ret_itr = ixgbevf_update_itr(adapter, q_vector->eitr, 862 q_vector->tx_itr, 863 tx_ring->total_packets, 864 tx_ring->total_bytes); 865 /* if the result for this queue would decrease interrupt 866 * rate for this vector then use that result */ 867 q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ? 868 q_vector->tx_itr - 1 : ret_itr); 869 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, 870 r_idx + 1); 871 } 872 873 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 874 for (i = 0; i < q_vector->rxr_count; i++) { 875 rx_ring = &(adapter->rx_ring[r_idx]); 876 ret_itr = ixgbevf_update_itr(adapter, q_vector->eitr, 877 q_vector->rx_itr, 878 rx_ring->total_packets, 879 rx_ring->total_bytes); 880 /* if the result for this queue would decrease interrupt 881 * rate for this vector then use that result */ 882 q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ? 883 q_vector->rx_itr - 1 : ret_itr); 884 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, 885 r_idx + 1); 886 } 887 888 current_itr = max(q_vector->rx_itr, q_vector->tx_itr); 889 890 switch (current_itr) { 891 /* counts and packets in update_itr are dependent on these numbers */ 892 case lowest_latency: 893 new_itr = 100000; 894 break; 895 case low_latency: 896 new_itr = 20000; /* aka hwitr = ~200 */ 897 break; 898 case bulk_latency: 899 default: 900 new_itr = 8000; 901 break; 902 } 903 904 if (new_itr != q_vector->eitr) { 905 u32 itr_reg; 906 907 /* save the algorithm value here, not the smoothed one */ 908 q_vector->eitr = new_itr; 909 /* do an exponential smoothing */ 910 new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100); 911 itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr); 912 ixgbevf_write_eitr(adapter, v_idx, itr_reg); 913 } 914 } 915 916 static irqreturn_t ixgbevf_msix_mbx(int irq, void *data) 917 { 918 struct net_device *netdev = data; 919 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 920 struct ixgbe_hw *hw = &adapter->hw; 921 u32 eicr; 922 u32 msg; 923 bool got_ack = false; 924 925 eicr = IXGBE_READ_REG(hw, IXGBE_VTEICS); 926 IXGBE_WRITE_REG(hw, IXGBE_VTEICR, eicr); 927 928 if (!hw->mbx.ops.check_for_ack(hw)) 929 got_ack = true; 930 931 if (!hw->mbx.ops.check_for_msg(hw)) { 932 hw->mbx.ops.read(hw, &msg, 1); 933 934 if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG) 935 mod_timer(&adapter->watchdog_timer, 936 round_jiffies(jiffies + 1)); 937 938 if (msg & IXGBE_VT_MSGTYPE_NACK) 939 pr_warn("Last Request of type %2.2x to PF Nacked\n", 940 msg & 0xFF); 941 /* 942 * Restore the PFSTS bit in case someone is polling for a 943 * return message from the PF 944 */ 945 hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFSTS; 946 } 947 948 /* 949 * checking for the ack clears the PFACK bit. Place 950 * it back in the v2p_mailbox cache so that anyone 951 * polling for an ack will not miss it 952 */ 953 if (got_ack) 954 hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFACK; 955 956 return IRQ_HANDLED; 957 } 958 959 static irqreturn_t ixgbevf_msix_clean_tx(int irq, void *data) 960 { 961 struct ixgbevf_q_vector *q_vector = data; 962 struct ixgbevf_adapter *adapter = q_vector->adapter; 963 struct ixgbevf_ring *tx_ring; 964 int i, r_idx; 965 966 if (!q_vector->txr_count) 967 return IRQ_HANDLED; 968 969 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); 970 for (i = 0; i < q_vector->txr_count; i++) { 971 tx_ring = &(adapter->tx_ring[r_idx]); 972 tx_ring->total_bytes = 0; 973 tx_ring->total_packets = 0; 974 ixgbevf_clean_tx_irq(adapter, tx_ring); 975 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, 976 r_idx + 1); 977 } 978 979 if (adapter->itr_setting & 1) 980 ixgbevf_set_itr_msix(q_vector); 981 982 return IRQ_HANDLED; 983 } 984 985 /** 986 * ixgbevf_msix_clean_rx - single unshared vector rx clean (all queues) 987 * @irq: unused 988 * @data: pointer to our q_vector struct for this interrupt vector 989 **/ 990 static irqreturn_t ixgbevf_msix_clean_rx(int irq, void *data) 991 { 992 struct ixgbevf_q_vector *q_vector = data; 993 struct ixgbevf_adapter *adapter = q_vector->adapter; 994 struct ixgbe_hw *hw = &adapter->hw; 995 struct ixgbevf_ring *rx_ring; 996 int r_idx; 997 int i; 998 999 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 1000 for (i = 0; i < q_vector->rxr_count; i++) { 1001 rx_ring = &(adapter->rx_ring[r_idx]); 1002 rx_ring->total_bytes = 0; 1003 rx_ring->total_packets = 0; 1004 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, 1005 r_idx + 1); 1006 } 1007 1008 if (!q_vector->rxr_count) 1009 return IRQ_HANDLED; 1010 1011 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 1012 rx_ring = &(adapter->rx_ring[r_idx]); 1013 /* disable interrupts on this vector only */ 1014 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, rx_ring->v_idx); 1015 napi_schedule(&q_vector->napi); 1016 1017 1018 return IRQ_HANDLED; 1019 } 1020 1021 static irqreturn_t ixgbevf_msix_clean_many(int irq, void *data) 1022 { 1023 ixgbevf_msix_clean_rx(irq, data); 1024 ixgbevf_msix_clean_tx(irq, data); 1025 1026 return IRQ_HANDLED; 1027 } 1028 1029 static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx, 1030 int r_idx) 1031 { 1032 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx]; 1033 1034 set_bit(r_idx, q_vector->rxr_idx); 1035 q_vector->rxr_count++; 1036 a->rx_ring[r_idx].v_idx = 1 << v_idx; 1037 } 1038 1039 static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx, 1040 int t_idx) 1041 { 1042 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx]; 1043 1044 set_bit(t_idx, q_vector->txr_idx); 1045 q_vector->txr_count++; 1046 a->tx_ring[t_idx].v_idx = 1 << v_idx; 1047 } 1048 1049 /** 1050 * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors 1051 * @adapter: board private structure to initialize 1052 * 1053 * This function maps descriptor rings to the queue-specific vectors 1054 * we were allotted through the MSI-X enabling code. Ideally, we'd have 1055 * one vector per ring/queue, but on a constrained vector budget, we 1056 * group the rings as "efficiently" as possible. You would add new 1057 * mapping configurations in here. 1058 **/ 1059 static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter) 1060 { 1061 int q_vectors; 1062 int v_start = 0; 1063 int rxr_idx = 0, txr_idx = 0; 1064 int rxr_remaining = adapter->num_rx_queues; 1065 int txr_remaining = adapter->num_tx_queues; 1066 int i, j; 1067 int rqpv, tqpv; 1068 int err = 0; 1069 1070 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 1071 1072 /* 1073 * The ideal configuration... 1074 * We have enough vectors to map one per queue. 1075 */ 1076 if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) { 1077 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++) 1078 map_vector_to_rxq(adapter, v_start, rxr_idx); 1079 1080 for (; txr_idx < txr_remaining; v_start++, txr_idx++) 1081 map_vector_to_txq(adapter, v_start, txr_idx); 1082 goto out; 1083 } 1084 1085 /* 1086 * If we don't have enough vectors for a 1-to-1 1087 * mapping, we'll have to group them so there are 1088 * multiple queues per vector. 1089 */ 1090 /* Re-adjusting *qpv takes care of the remainder. */ 1091 for (i = v_start; i < q_vectors; i++) { 1092 rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i); 1093 for (j = 0; j < rqpv; j++) { 1094 map_vector_to_rxq(adapter, i, rxr_idx); 1095 rxr_idx++; 1096 rxr_remaining--; 1097 } 1098 } 1099 for (i = v_start; i < q_vectors; i++) { 1100 tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i); 1101 for (j = 0; j < tqpv; j++) { 1102 map_vector_to_txq(adapter, i, txr_idx); 1103 txr_idx++; 1104 txr_remaining--; 1105 } 1106 } 1107 1108 out: 1109 return err; 1110 } 1111 1112 /** 1113 * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts 1114 * @adapter: board private structure 1115 * 1116 * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests 1117 * interrupts from the kernel. 1118 **/ 1119 static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter) 1120 { 1121 struct net_device *netdev = adapter->netdev; 1122 irqreturn_t (*handler)(int, void *); 1123 int i, vector, q_vectors, err; 1124 int ri = 0, ti = 0; 1125 1126 /* Decrement for Other and TCP Timer vectors */ 1127 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 1128 1129 #define SET_HANDLER(_v) (((_v)->rxr_count && (_v)->txr_count) \ 1130 ? &ixgbevf_msix_clean_many : \ 1131 (_v)->rxr_count ? &ixgbevf_msix_clean_rx : \ 1132 (_v)->txr_count ? &ixgbevf_msix_clean_tx : \ 1133 NULL) 1134 for (vector = 0; vector < q_vectors; vector++) { 1135 handler = SET_HANDLER(adapter->q_vector[vector]); 1136 1137 if (handler == &ixgbevf_msix_clean_rx) { 1138 sprintf(adapter->name[vector], "%s-%s-%d", 1139 netdev->name, "rx", ri++); 1140 } else if (handler == &ixgbevf_msix_clean_tx) { 1141 sprintf(adapter->name[vector], "%s-%s-%d", 1142 netdev->name, "tx", ti++); 1143 } else if (handler == &ixgbevf_msix_clean_many) { 1144 sprintf(adapter->name[vector], "%s-%s-%d", 1145 netdev->name, "TxRx", vector); 1146 } else { 1147 /* skip this unused q_vector */ 1148 continue; 1149 } 1150 err = request_irq(adapter->msix_entries[vector].vector, 1151 handler, 0, adapter->name[vector], 1152 adapter->q_vector[vector]); 1153 if (err) { 1154 hw_dbg(&adapter->hw, 1155 "request_irq failed for MSIX interrupt " 1156 "Error: %d\n", err); 1157 goto free_queue_irqs; 1158 } 1159 } 1160 1161 sprintf(adapter->name[vector], "%s:mbx", netdev->name); 1162 err = request_irq(adapter->msix_entries[vector].vector, 1163 &ixgbevf_msix_mbx, 0, adapter->name[vector], netdev); 1164 if (err) { 1165 hw_dbg(&adapter->hw, 1166 "request_irq for msix_mbx failed: %d\n", err); 1167 goto free_queue_irqs; 1168 } 1169 1170 return 0; 1171 1172 free_queue_irqs: 1173 for (i = vector - 1; i >= 0; i--) 1174 free_irq(adapter->msix_entries[--vector].vector, 1175 &(adapter->q_vector[i])); 1176 pci_disable_msix(adapter->pdev); 1177 kfree(adapter->msix_entries); 1178 adapter->msix_entries = NULL; 1179 return err; 1180 } 1181 1182 static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter) 1183 { 1184 int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 1185 1186 for (i = 0; i < q_vectors; i++) { 1187 struct ixgbevf_q_vector *q_vector = adapter->q_vector[i]; 1188 bitmap_zero(q_vector->rxr_idx, MAX_RX_QUEUES); 1189 bitmap_zero(q_vector->txr_idx, MAX_TX_QUEUES); 1190 q_vector->rxr_count = 0; 1191 q_vector->txr_count = 0; 1192 q_vector->eitr = adapter->eitr_param; 1193 } 1194 } 1195 1196 /** 1197 * ixgbevf_request_irq - initialize interrupts 1198 * @adapter: board private structure 1199 * 1200 * Attempts to configure interrupts using the best available 1201 * capabilities of the hardware and kernel. 1202 **/ 1203 static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter) 1204 { 1205 int err = 0; 1206 1207 err = ixgbevf_request_msix_irqs(adapter); 1208 1209 if (err) 1210 hw_dbg(&adapter->hw, 1211 "request_irq failed, Error %d\n", err); 1212 1213 return err; 1214 } 1215 1216 static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter) 1217 { 1218 struct net_device *netdev = adapter->netdev; 1219 int i, q_vectors; 1220 1221 q_vectors = adapter->num_msix_vectors; 1222 1223 i = q_vectors - 1; 1224 1225 free_irq(adapter->msix_entries[i].vector, netdev); 1226 i--; 1227 1228 for (; i >= 0; i--) { 1229 free_irq(adapter->msix_entries[i].vector, 1230 adapter->q_vector[i]); 1231 } 1232 1233 ixgbevf_reset_q_vectors(adapter); 1234 } 1235 1236 /** 1237 * ixgbevf_irq_disable - Mask off interrupt generation on the NIC 1238 * @adapter: board private structure 1239 **/ 1240 static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter) 1241 { 1242 int i; 1243 struct ixgbe_hw *hw = &adapter->hw; 1244 1245 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0); 1246 1247 IXGBE_WRITE_FLUSH(hw); 1248 1249 for (i = 0; i < adapter->num_msix_vectors; i++) 1250 synchronize_irq(adapter->msix_entries[i].vector); 1251 } 1252 1253 /** 1254 * ixgbevf_irq_enable - Enable default interrupt generation settings 1255 * @adapter: board private structure 1256 **/ 1257 static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter, 1258 bool queues, bool flush) 1259 { 1260 struct ixgbe_hw *hw = &adapter->hw; 1261 u32 mask; 1262 u64 qmask; 1263 1264 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE); 1265 qmask = ~0; 1266 1267 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask); 1268 1269 if (queues) 1270 ixgbevf_irq_enable_queues(adapter, qmask); 1271 1272 if (flush) 1273 IXGBE_WRITE_FLUSH(hw); 1274 } 1275 1276 /** 1277 * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset 1278 * @adapter: board private structure 1279 * 1280 * Configure the Tx unit of the MAC after a reset. 1281 **/ 1282 static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter) 1283 { 1284 u64 tdba; 1285 struct ixgbe_hw *hw = &adapter->hw; 1286 u32 i, j, tdlen, txctrl; 1287 1288 /* Setup the HW Tx Head and Tail descriptor pointers */ 1289 for (i = 0; i < adapter->num_tx_queues; i++) { 1290 struct ixgbevf_ring *ring = &adapter->tx_ring[i]; 1291 j = ring->reg_idx; 1292 tdba = ring->dma; 1293 tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc); 1294 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j), 1295 (tdba & DMA_BIT_MASK(32))); 1296 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32)); 1297 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j), tdlen); 1298 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(j), 0); 1299 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(j), 0); 1300 adapter->tx_ring[i].head = IXGBE_VFTDH(j); 1301 adapter->tx_ring[i].tail = IXGBE_VFTDT(j); 1302 /* Disable Tx Head Writeback RO bit, since this hoses 1303 * bookkeeping if things aren't delivered in order. 1304 */ 1305 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j)); 1306 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN; 1307 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl); 1308 } 1309 } 1310 1311 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 1312 1313 static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index) 1314 { 1315 struct ixgbevf_ring *rx_ring; 1316 struct ixgbe_hw *hw = &adapter->hw; 1317 u32 srrctl; 1318 1319 rx_ring = &adapter->rx_ring[index]; 1320 1321 srrctl = IXGBE_SRRCTL_DROP_EN; 1322 1323 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { 1324 u16 bufsz = IXGBEVF_RXBUFFER_2048; 1325 /* grow the amount we can receive on large page machines */ 1326 if (bufsz < (PAGE_SIZE / 2)) 1327 bufsz = (PAGE_SIZE / 2); 1328 /* cap the bufsz at our largest descriptor size */ 1329 bufsz = min((u16)IXGBEVF_MAX_RXBUFFER, bufsz); 1330 1331 srrctl |= bufsz >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; 1332 srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; 1333 srrctl |= ((IXGBEVF_RX_HDR_SIZE << 1334 IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) & 1335 IXGBE_SRRCTL_BSIZEHDR_MASK); 1336 } else { 1337 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 1338 1339 if (rx_ring->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE) 1340 srrctl |= IXGBEVF_RXBUFFER_2048 >> 1341 IXGBE_SRRCTL_BSIZEPKT_SHIFT; 1342 else 1343 srrctl |= rx_ring->rx_buf_len >> 1344 IXGBE_SRRCTL_BSIZEPKT_SHIFT; 1345 } 1346 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl); 1347 } 1348 1349 /** 1350 * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset 1351 * @adapter: board private structure 1352 * 1353 * Configure the Rx unit of the MAC after a reset. 1354 **/ 1355 static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter) 1356 { 1357 u64 rdba; 1358 struct ixgbe_hw *hw = &adapter->hw; 1359 struct net_device *netdev = adapter->netdev; 1360 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; 1361 int i, j; 1362 u32 rdlen; 1363 int rx_buf_len; 1364 1365 /* Decide whether to use packet split mode or not */ 1366 if (netdev->mtu > ETH_DATA_LEN) { 1367 if (adapter->flags & IXGBE_FLAG_RX_PS_CAPABLE) 1368 adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED; 1369 else 1370 adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED; 1371 } else { 1372 if (adapter->flags & IXGBE_FLAG_RX_1BUF_CAPABLE) 1373 adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED; 1374 else 1375 adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED; 1376 } 1377 1378 /* Set the RX buffer length according to the mode */ 1379 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { 1380 /* PSRTYPE must be initialized in 82599 */ 1381 u32 psrtype = IXGBE_PSRTYPE_TCPHDR | 1382 IXGBE_PSRTYPE_UDPHDR | 1383 IXGBE_PSRTYPE_IPV4HDR | 1384 IXGBE_PSRTYPE_IPV6HDR | 1385 IXGBE_PSRTYPE_L2HDR; 1386 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype); 1387 rx_buf_len = IXGBEVF_RX_HDR_SIZE; 1388 } else { 1389 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0); 1390 if (netdev->mtu <= ETH_DATA_LEN) 1391 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE; 1392 else 1393 rx_buf_len = ALIGN(max_frame, 1024); 1394 } 1395 1396 rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc); 1397 /* Setup the HW Rx Head and Tail Descriptor Pointers and 1398 * the Base and Length of the Rx Descriptor Ring */ 1399 for (i = 0; i < adapter->num_rx_queues; i++) { 1400 rdba = adapter->rx_ring[i].dma; 1401 j = adapter->rx_ring[i].reg_idx; 1402 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j), 1403 (rdba & DMA_BIT_MASK(32))); 1404 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32)); 1405 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j), rdlen); 1406 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(j), 0); 1407 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(j), 0); 1408 adapter->rx_ring[i].head = IXGBE_VFRDH(j); 1409 adapter->rx_ring[i].tail = IXGBE_VFRDT(j); 1410 adapter->rx_ring[i].rx_buf_len = rx_buf_len; 1411 1412 ixgbevf_configure_srrctl(adapter, j); 1413 } 1414 } 1415 1416 static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid) 1417 { 1418 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 1419 struct ixgbe_hw *hw = &adapter->hw; 1420 1421 /* add VID to filter table */ 1422 if (hw->mac.ops.set_vfta) 1423 hw->mac.ops.set_vfta(hw, vid, 0, true); 1424 set_bit(vid, adapter->active_vlans); 1425 1426 return 0; 1427 } 1428 1429 static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) 1430 { 1431 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 1432 struct ixgbe_hw *hw = &adapter->hw; 1433 1434 /* remove VID from filter table */ 1435 if (hw->mac.ops.set_vfta) 1436 hw->mac.ops.set_vfta(hw, vid, 0, false); 1437 clear_bit(vid, adapter->active_vlans); 1438 1439 return 0; 1440 } 1441 1442 static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter) 1443 { 1444 u16 vid; 1445 1446 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) 1447 ixgbevf_vlan_rx_add_vid(adapter->netdev, vid); 1448 } 1449 1450 static int ixgbevf_write_uc_addr_list(struct net_device *netdev) 1451 { 1452 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 1453 struct ixgbe_hw *hw = &adapter->hw; 1454 int count = 0; 1455 1456 if ((netdev_uc_count(netdev)) > 10) { 1457 pr_err("Too many unicast filters - No Space\n"); 1458 return -ENOSPC; 1459 } 1460 1461 if (!netdev_uc_empty(netdev)) { 1462 struct netdev_hw_addr *ha; 1463 netdev_for_each_uc_addr(ha, netdev) { 1464 hw->mac.ops.set_uc_addr(hw, ++count, ha->addr); 1465 udelay(200); 1466 } 1467 } else { 1468 /* 1469 * If the list is empty then send message to PF driver to 1470 * clear all macvlans on this VF. 1471 */ 1472 hw->mac.ops.set_uc_addr(hw, 0, NULL); 1473 } 1474 1475 return count; 1476 } 1477 1478 /** 1479 * ixgbevf_set_rx_mode - Multicast set 1480 * @netdev: network interface device structure 1481 * 1482 * The set_rx_method entry point is called whenever the multicast address 1483 * list or the network interface flags are updated. This routine is 1484 * responsible for configuring the hardware for proper multicast mode. 1485 **/ 1486 static void ixgbevf_set_rx_mode(struct net_device *netdev) 1487 { 1488 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 1489 struct ixgbe_hw *hw = &adapter->hw; 1490 1491 /* reprogram multicast list */ 1492 if (hw->mac.ops.update_mc_addr_list) 1493 hw->mac.ops.update_mc_addr_list(hw, netdev); 1494 1495 ixgbevf_write_uc_addr_list(netdev); 1496 } 1497 1498 static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter) 1499 { 1500 int q_idx; 1501 struct ixgbevf_q_vector *q_vector; 1502 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 1503 1504 for (q_idx = 0; q_idx < q_vectors; q_idx++) { 1505 struct napi_struct *napi; 1506 q_vector = adapter->q_vector[q_idx]; 1507 if (!q_vector->rxr_count) 1508 continue; 1509 napi = &q_vector->napi; 1510 if (q_vector->rxr_count > 1) 1511 napi->poll = &ixgbevf_clean_rxonly_many; 1512 1513 napi_enable(napi); 1514 } 1515 } 1516 1517 static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter) 1518 { 1519 int q_idx; 1520 struct ixgbevf_q_vector *q_vector; 1521 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 1522 1523 for (q_idx = 0; q_idx < q_vectors; q_idx++) { 1524 q_vector = adapter->q_vector[q_idx]; 1525 if (!q_vector->rxr_count) 1526 continue; 1527 napi_disable(&q_vector->napi); 1528 } 1529 } 1530 1531 static void ixgbevf_configure(struct ixgbevf_adapter *adapter) 1532 { 1533 struct net_device *netdev = adapter->netdev; 1534 int i; 1535 1536 ixgbevf_set_rx_mode(netdev); 1537 1538 ixgbevf_restore_vlan(adapter); 1539 1540 ixgbevf_configure_tx(adapter); 1541 ixgbevf_configure_rx(adapter); 1542 for (i = 0; i < adapter->num_rx_queues; i++) { 1543 struct ixgbevf_ring *ring = &adapter->rx_ring[i]; 1544 ixgbevf_alloc_rx_buffers(adapter, ring, ring->count); 1545 ring->next_to_use = ring->count - 1; 1546 writel(ring->next_to_use, adapter->hw.hw_addr + ring->tail); 1547 } 1548 } 1549 1550 #define IXGBE_MAX_RX_DESC_POLL 10 1551 static inline void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter, 1552 int rxr) 1553 { 1554 struct ixgbe_hw *hw = &adapter->hw; 1555 int j = adapter->rx_ring[rxr].reg_idx; 1556 int k; 1557 1558 for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) { 1559 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) & IXGBE_RXDCTL_ENABLE) 1560 break; 1561 else 1562 msleep(1); 1563 } 1564 if (k >= IXGBE_MAX_RX_DESC_POLL) { 1565 hw_dbg(hw, "RXDCTL.ENABLE on Rx queue %d " 1566 "not set within the polling period\n", rxr); 1567 } 1568 1569 ixgbevf_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr], 1570 (adapter->rx_ring[rxr].count - 1)); 1571 } 1572 1573 static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter) 1574 { 1575 /* Only save pre-reset stats if there are some */ 1576 if (adapter->stats.vfgprc || adapter->stats.vfgptc) { 1577 adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc - 1578 adapter->stats.base_vfgprc; 1579 adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc - 1580 adapter->stats.base_vfgptc; 1581 adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc - 1582 adapter->stats.base_vfgorc; 1583 adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc - 1584 adapter->stats.base_vfgotc; 1585 adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc - 1586 adapter->stats.base_vfmprc; 1587 } 1588 } 1589 1590 static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter) 1591 { 1592 struct ixgbe_hw *hw = &adapter->hw; 1593 1594 adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC); 1595 adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB); 1596 adapter->stats.last_vfgorc |= 1597 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32); 1598 adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC); 1599 adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB); 1600 adapter->stats.last_vfgotc |= 1601 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32); 1602 adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC); 1603 1604 adapter->stats.base_vfgprc = adapter->stats.last_vfgprc; 1605 adapter->stats.base_vfgorc = adapter->stats.last_vfgorc; 1606 adapter->stats.base_vfgptc = adapter->stats.last_vfgptc; 1607 adapter->stats.base_vfgotc = adapter->stats.last_vfgotc; 1608 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc; 1609 } 1610 1611 static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter) 1612 { 1613 struct net_device *netdev = adapter->netdev; 1614 struct ixgbe_hw *hw = &adapter->hw; 1615 int i, j = 0; 1616 int num_rx_rings = adapter->num_rx_queues; 1617 u32 txdctl, rxdctl; 1618 u32 msg[2]; 1619 1620 for (i = 0; i < adapter->num_tx_queues; i++) { 1621 j = adapter->tx_ring[i].reg_idx; 1622 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j)); 1623 /* enable WTHRESH=8 descriptors, to encourage burst writeback */ 1624 txdctl |= (8 << 16); 1625 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl); 1626 } 1627 1628 for (i = 0; i < adapter->num_tx_queues; i++) { 1629 j = adapter->tx_ring[i].reg_idx; 1630 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j)); 1631 txdctl |= IXGBE_TXDCTL_ENABLE; 1632 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl); 1633 } 1634 1635 for (i = 0; i < num_rx_rings; i++) { 1636 j = adapter->rx_ring[i].reg_idx; 1637 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)); 1638 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME; 1639 if (hw->mac.type == ixgbe_mac_X540_vf) { 1640 rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK; 1641 rxdctl |= ((netdev->mtu + ETH_HLEN + ETH_FCS_LEN) | 1642 IXGBE_RXDCTL_RLPML_EN); 1643 } 1644 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl); 1645 ixgbevf_rx_desc_queue_enable(adapter, i); 1646 } 1647 1648 ixgbevf_configure_msix(adapter); 1649 1650 if (hw->mac.ops.set_rar) { 1651 if (is_valid_ether_addr(hw->mac.addr)) 1652 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0); 1653 else 1654 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0); 1655 } 1656 1657 msg[0] = IXGBE_VF_SET_LPE; 1658 msg[1] = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; 1659 hw->mbx.ops.write_posted(hw, msg, 2); 1660 1661 clear_bit(__IXGBEVF_DOWN, &adapter->state); 1662 ixgbevf_napi_enable_all(adapter); 1663 1664 /* enable transmits */ 1665 netif_tx_start_all_queues(netdev); 1666 1667 ixgbevf_save_reset_stats(adapter); 1668 ixgbevf_init_last_counter_stats(adapter); 1669 1670 /* bring the link up in the watchdog, this could race with our first 1671 * link up interrupt but shouldn't be a problem */ 1672 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; 1673 adapter->link_check_timeout = jiffies; 1674 mod_timer(&adapter->watchdog_timer, jiffies); 1675 } 1676 1677 void ixgbevf_up(struct ixgbevf_adapter *adapter) 1678 { 1679 struct ixgbe_hw *hw = &adapter->hw; 1680 1681 ixgbevf_configure(adapter); 1682 1683 ixgbevf_up_complete(adapter); 1684 1685 /* clear any pending interrupts, may auto mask */ 1686 IXGBE_READ_REG(hw, IXGBE_VTEICR); 1687 1688 ixgbevf_irq_enable(adapter, true, true); 1689 } 1690 1691 /** 1692 * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue 1693 * @adapter: board private structure 1694 * @rx_ring: ring to free buffers from 1695 **/ 1696 static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter, 1697 struct ixgbevf_ring *rx_ring) 1698 { 1699 struct pci_dev *pdev = adapter->pdev; 1700 unsigned long size; 1701 unsigned int i; 1702 1703 if (!rx_ring->rx_buffer_info) 1704 return; 1705 1706 /* Free all the Rx ring sk_buffs */ 1707 for (i = 0; i < rx_ring->count; i++) { 1708 struct ixgbevf_rx_buffer *rx_buffer_info; 1709 1710 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 1711 if (rx_buffer_info->dma) { 1712 dma_unmap_single(&pdev->dev, rx_buffer_info->dma, 1713 rx_ring->rx_buf_len, 1714 DMA_FROM_DEVICE); 1715 rx_buffer_info->dma = 0; 1716 } 1717 if (rx_buffer_info->skb) { 1718 struct sk_buff *skb = rx_buffer_info->skb; 1719 rx_buffer_info->skb = NULL; 1720 do { 1721 struct sk_buff *this = skb; 1722 skb = skb->prev; 1723 dev_kfree_skb(this); 1724 } while (skb); 1725 } 1726 if (!rx_buffer_info->page) 1727 continue; 1728 dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma, 1729 PAGE_SIZE / 2, DMA_FROM_DEVICE); 1730 rx_buffer_info->page_dma = 0; 1731 put_page(rx_buffer_info->page); 1732 rx_buffer_info->page = NULL; 1733 rx_buffer_info->page_offset = 0; 1734 } 1735 1736 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count; 1737 memset(rx_ring->rx_buffer_info, 0, size); 1738 1739 /* Zero out the descriptor ring */ 1740 memset(rx_ring->desc, 0, rx_ring->size); 1741 1742 rx_ring->next_to_clean = 0; 1743 rx_ring->next_to_use = 0; 1744 1745 if (rx_ring->head) 1746 writel(0, adapter->hw.hw_addr + rx_ring->head); 1747 if (rx_ring->tail) 1748 writel(0, adapter->hw.hw_addr + rx_ring->tail); 1749 } 1750 1751 /** 1752 * ixgbevf_clean_tx_ring - Free Tx Buffers 1753 * @adapter: board private structure 1754 * @tx_ring: ring to be cleaned 1755 **/ 1756 static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter *adapter, 1757 struct ixgbevf_ring *tx_ring) 1758 { 1759 struct ixgbevf_tx_buffer *tx_buffer_info; 1760 unsigned long size; 1761 unsigned int i; 1762 1763 if (!tx_ring->tx_buffer_info) 1764 return; 1765 1766 /* Free all the Tx ring sk_buffs */ 1767 1768 for (i = 0; i < tx_ring->count; i++) { 1769 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 1770 ixgbevf_unmap_and_free_tx_resource(adapter, tx_buffer_info); 1771 } 1772 1773 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count; 1774 memset(tx_ring->tx_buffer_info, 0, size); 1775 1776 memset(tx_ring->desc, 0, tx_ring->size); 1777 1778 tx_ring->next_to_use = 0; 1779 tx_ring->next_to_clean = 0; 1780 1781 if (tx_ring->head) 1782 writel(0, adapter->hw.hw_addr + tx_ring->head); 1783 if (tx_ring->tail) 1784 writel(0, adapter->hw.hw_addr + tx_ring->tail); 1785 } 1786 1787 /** 1788 * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues 1789 * @adapter: board private structure 1790 **/ 1791 static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter) 1792 { 1793 int i; 1794 1795 for (i = 0; i < adapter->num_rx_queues; i++) 1796 ixgbevf_clean_rx_ring(adapter, &adapter->rx_ring[i]); 1797 } 1798 1799 /** 1800 * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues 1801 * @adapter: board private structure 1802 **/ 1803 static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter) 1804 { 1805 int i; 1806 1807 for (i = 0; i < adapter->num_tx_queues; i++) 1808 ixgbevf_clean_tx_ring(adapter, &adapter->tx_ring[i]); 1809 } 1810 1811 void ixgbevf_down(struct ixgbevf_adapter *adapter) 1812 { 1813 struct net_device *netdev = adapter->netdev; 1814 struct ixgbe_hw *hw = &adapter->hw; 1815 u32 txdctl; 1816 int i, j; 1817 1818 /* signal that we are down to the interrupt handler */ 1819 set_bit(__IXGBEVF_DOWN, &adapter->state); 1820 /* disable receives */ 1821 1822 netif_tx_disable(netdev); 1823 1824 msleep(10); 1825 1826 netif_tx_stop_all_queues(netdev); 1827 1828 ixgbevf_irq_disable(adapter); 1829 1830 ixgbevf_napi_disable_all(adapter); 1831 1832 del_timer_sync(&adapter->watchdog_timer); 1833 /* can't call flush scheduled work here because it can deadlock 1834 * if linkwatch_event tries to acquire the rtnl_lock which we are 1835 * holding */ 1836 while (adapter->flags & IXGBE_FLAG_IN_WATCHDOG_TASK) 1837 msleep(1); 1838 1839 /* disable transmits in the hardware now that interrupts are off */ 1840 for (i = 0; i < adapter->num_tx_queues; i++) { 1841 j = adapter->tx_ring[i].reg_idx; 1842 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j)); 1843 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), 1844 (txdctl & ~IXGBE_TXDCTL_ENABLE)); 1845 } 1846 1847 netif_carrier_off(netdev); 1848 1849 if (!pci_channel_offline(adapter->pdev)) 1850 ixgbevf_reset(adapter); 1851 1852 ixgbevf_clean_all_tx_rings(adapter); 1853 ixgbevf_clean_all_rx_rings(adapter); 1854 } 1855 1856 void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter) 1857 { 1858 struct ixgbe_hw *hw = &adapter->hw; 1859 1860 WARN_ON(in_interrupt()); 1861 1862 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state)) 1863 msleep(1); 1864 1865 /* 1866 * Check if PF is up before re-init. If not then skip until 1867 * later when the PF is up and ready to service requests from 1868 * the VF via mailbox. If the VF is up and running then the 1869 * watchdog task will continue to schedule reset tasks until 1870 * the PF is up and running. 1871 */ 1872 if (!hw->mac.ops.reset_hw(hw)) { 1873 ixgbevf_down(adapter); 1874 ixgbevf_up(adapter); 1875 } 1876 1877 clear_bit(__IXGBEVF_RESETTING, &adapter->state); 1878 } 1879 1880 void ixgbevf_reset(struct ixgbevf_adapter *adapter) 1881 { 1882 struct ixgbe_hw *hw = &adapter->hw; 1883 struct net_device *netdev = adapter->netdev; 1884 1885 if (hw->mac.ops.reset_hw(hw)) 1886 hw_dbg(hw, "PF still resetting\n"); 1887 else 1888 hw->mac.ops.init_hw(hw); 1889 1890 if (is_valid_ether_addr(adapter->hw.mac.addr)) { 1891 memcpy(netdev->dev_addr, adapter->hw.mac.addr, 1892 netdev->addr_len); 1893 memcpy(netdev->perm_addr, adapter->hw.mac.addr, 1894 netdev->addr_len); 1895 } 1896 } 1897 1898 static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter, 1899 int vectors) 1900 { 1901 int err, vector_threshold; 1902 1903 /* We'll want at least 3 (vector_threshold): 1904 * 1) TxQ[0] Cleanup 1905 * 2) RxQ[0] Cleanup 1906 * 3) Other (Link Status Change, etc.) 1907 */ 1908 vector_threshold = MIN_MSIX_COUNT; 1909 1910 /* The more we get, the more we will assign to Tx/Rx Cleanup 1911 * for the separate queues...where Rx Cleanup >= Tx Cleanup. 1912 * Right now, we simply care about how many we'll get; we'll 1913 * set them up later while requesting irq's. 1914 */ 1915 while (vectors >= vector_threshold) { 1916 err = pci_enable_msix(adapter->pdev, adapter->msix_entries, 1917 vectors); 1918 if (!err) /* Success in acquiring all requested vectors. */ 1919 break; 1920 else if (err < 0) 1921 vectors = 0; /* Nasty failure, quit now */ 1922 else /* err == number of vectors we should try again with */ 1923 vectors = err; 1924 } 1925 1926 if (vectors < vector_threshold) { 1927 /* Can't allocate enough MSI-X interrupts? Oh well. 1928 * This just means we'll go with either a single MSI 1929 * vector or fall back to legacy interrupts. 1930 */ 1931 hw_dbg(&adapter->hw, 1932 "Unable to allocate MSI-X interrupts\n"); 1933 kfree(adapter->msix_entries); 1934 adapter->msix_entries = NULL; 1935 } else { 1936 /* 1937 * Adjust for only the vectors we'll use, which is minimum 1938 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of 1939 * vectors we were allocated. 1940 */ 1941 adapter->num_msix_vectors = vectors; 1942 } 1943 } 1944 1945 /* 1946 * ixgbevf_set_num_queues: Allocate queues for device, feature dependent 1947 * @adapter: board private structure to initialize 1948 * 1949 * This is the top level queue allocation routine. The order here is very 1950 * important, starting with the "most" number of features turned on at once, 1951 * and ending with the smallest set of features. This way large combinations 1952 * can be allocated if they're turned on, and smaller combinations are the 1953 * fallthrough conditions. 1954 * 1955 **/ 1956 static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter) 1957 { 1958 /* Start with base case */ 1959 adapter->num_rx_queues = 1; 1960 adapter->num_tx_queues = 1; 1961 adapter->num_rx_pools = adapter->num_rx_queues; 1962 adapter->num_rx_queues_per_pool = 1; 1963 } 1964 1965 /** 1966 * ixgbevf_alloc_queues - Allocate memory for all rings 1967 * @adapter: board private structure to initialize 1968 * 1969 * We allocate one ring per queue at run-time since we don't know the 1970 * number of queues at compile-time. The polling_netdev array is 1971 * intended for Multiqueue, but should work fine with a single queue. 1972 **/ 1973 static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter) 1974 { 1975 int i; 1976 1977 adapter->tx_ring = kcalloc(adapter->num_tx_queues, 1978 sizeof(struct ixgbevf_ring), GFP_KERNEL); 1979 if (!adapter->tx_ring) 1980 goto err_tx_ring_allocation; 1981 1982 adapter->rx_ring = kcalloc(adapter->num_rx_queues, 1983 sizeof(struct ixgbevf_ring), GFP_KERNEL); 1984 if (!adapter->rx_ring) 1985 goto err_rx_ring_allocation; 1986 1987 for (i = 0; i < adapter->num_tx_queues; i++) { 1988 adapter->tx_ring[i].count = adapter->tx_ring_count; 1989 adapter->tx_ring[i].queue_index = i; 1990 adapter->tx_ring[i].reg_idx = i; 1991 } 1992 1993 for (i = 0; i < adapter->num_rx_queues; i++) { 1994 adapter->rx_ring[i].count = adapter->rx_ring_count; 1995 adapter->rx_ring[i].queue_index = i; 1996 adapter->rx_ring[i].reg_idx = i; 1997 } 1998 1999 return 0; 2000 2001 err_rx_ring_allocation: 2002 kfree(adapter->tx_ring); 2003 err_tx_ring_allocation: 2004 return -ENOMEM; 2005 } 2006 2007 /** 2008 * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported 2009 * @adapter: board private structure to initialize 2010 * 2011 * Attempt to configure the interrupts using the best available 2012 * capabilities of the hardware and the kernel. 2013 **/ 2014 static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter) 2015 { 2016 int err = 0; 2017 int vector, v_budget; 2018 2019 /* 2020 * It's easy to be greedy for MSI-X vectors, but it really 2021 * doesn't do us much good if we have a lot more vectors 2022 * than CPU's. So let's be conservative and only ask for 2023 * (roughly) twice the number of vectors as there are CPU's. 2024 */ 2025 v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues, 2026 (int)(num_online_cpus() * 2)) + NON_Q_VECTORS; 2027 2028 /* A failure in MSI-X entry allocation isn't fatal, but it does 2029 * mean we disable MSI-X capabilities of the adapter. */ 2030 adapter->msix_entries = kcalloc(v_budget, 2031 sizeof(struct msix_entry), GFP_KERNEL); 2032 if (!adapter->msix_entries) { 2033 err = -ENOMEM; 2034 goto out; 2035 } 2036 2037 for (vector = 0; vector < v_budget; vector++) 2038 adapter->msix_entries[vector].entry = vector; 2039 2040 ixgbevf_acquire_msix_vectors(adapter, v_budget); 2041 2042 out: 2043 return err; 2044 } 2045 2046 /** 2047 * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors 2048 * @adapter: board private structure to initialize 2049 * 2050 * We allocate one q_vector per queue interrupt. If allocation fails we 2051 * return -ENOMEM. 2052 **/ 2053 static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter) 2054 { 2055 int q_idx, num_q_vectors; 2056 struct ixgbevf_q_vector *q_vector; 2057 int napi_vectors; 2058 int (*poll)(struct napi_struct *, int); 2059 2060 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 2061 napi_vectors = adapter->num_rx_queues; 2062 poll = &ixgbevf_clean_rxonly; 2063 2064 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { 2065 q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL); 2066 if (!q_vector) 2067 goto err_out; 2068 q_vector->adapter = adapter; 2069 q_vector->v_idx = q_idx; 2070 q_vector->eitr = adapter->eitr_param; 2071 if (q_idx < napi_vectors) 2072 netif_napi_add(adapter->netdev, &q_vector->napi, 2073 (*poll), 64); 2074 adapter->q_vector[q_idx] = q_vector; 2075 } 2076 2077 return 0; 2078 2079 err_out: 2080 while (q_idx) { 2081 q_idx--; 2082 q_vector = adapter->q_vector[q_idx]; 2083 netif_napi_del(&q_vector->napi); 2084 kfree(q_vector); 2085 adapter->q_vector[q_idx] = NULL; 2086 } 2087 return -ENOMEM; 2088 } 2089 2090 /** 2091 * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors 2092 * @adapter: board private structure to initialize 2093 * 2094 * This function frees the memory allocated to the q_vectors. In addition if 2095 * NAPI is enabled it will delete any references to the NAPI struct prior 2096 * to freeing the q_vector. 2097 **/ 2098 static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter) 2099 { 2100 int q_idx, num_q_vectors; 2101 int napi_vectors; 2102 2103 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 2104 napi_vectors = adapter->num_rx_queues; 2105 2106 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { 2107 struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx]; 2108 2109 adapter->q_vector[q_idx] = NULL; 2110 if (q_idx < napi_vectors) 2111 netif_napi_del(&q_vector->napi); 2112 kfree(q_vector); 2113 } 2114 } 2115 2116 /** 2117 * ixgbevf_reset_interrupt_capability - Reset MSIX setup 2118 * @adapter: board private structure 2119 * 2120 **/ 2121 static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter) 2122 { 2123 pci_disable_msix(adapter->pdev); 2124 kfree(adapter->msix_entries); 2125 adapter->msix_entries = NULL; 2126 } 2127 2128 /** 2129 * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init 2130 * @adapter: board private structure to initialize 2131 * 2132 **/ 2133 static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter) 2134 { 2135 int err; 2136 2137 /* Number of supported queues */ 2138 ixgbevf_set_num_queues(adapter); 2139 2140 err = ixgbevf_set_interrupt_capability(adapter); 2141 if (err) { 2142 hw_dbg(&adapter->hw, 2143 "Unable to setup interrupt capabilities\n"); 2144 goto err_set_interrupt; 2145 } 2146 2147 err = ixgbevf_alloc_q_vectors(adapter); 2148 if (err) { 2149 hw_dbg(&adapter->hw, "Unable to allocate memory for queue " 2150 "vectors\n"); 2151 goto err_alloc_q_vectors; 2152 } 2153 2154 err = ixgbevf_alloc_queues(adapter); 2155 if (err) { 2156 pr_err("Unable to allocate memory for queues\n"); 2157 goto err_alloc_queues; 2158 } 2159 2160 hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, " 2161 "Tx Queue count = %u\n", 2162 (adapter->num_rx_queues > 1) ? "Enabled" : 2163 "Disabled", adapter->num_rx_queues, adapter->num_tx_queues); 2164 2165 set_bit(__IXGBEVF_DOWN, &adapter->state); 2166 2167 return 0; 2168 err_alloc_queues: 2169 ixgbevf_free_q_vectors(adapter); 2170 err_alloc_q_vectors: 2171 ixgbevf_reset_interrupt_capability(adapter); 2172 err_set_interrupt: 2173 return err; 2174 } 2175 2176 /** 2177 * ixgbevf_sw_init - Initialize general software structures 2178 * (struct ixgbevf_adapter) 2179 * @adapter: board private structure to initialize 2180 * 2181 * ixgbevf_sw_init initializes the Adapter private data structure. 2182 * Fields are initialized based on PCI device information and 2183 * OS network device settings (MTU size). 2184 **/ 2185 static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter) 2186 { 2187 struct ixgbe_hw *hw = &adapter->hw; 2188 struct pci_dev *pdev = adapter->pdev; 2189 int err; 2190 2191 /* PCI config space info */ 2192 2193 hw->vendor_id = pdev->vendor; 2194 hw->device_id = pdev->device; 2195 hw->revision_id = pdev->revision; 2196 hw->subsystem_vendor_id = pdev->subsystem_vendor; 2197 hw->subsystem_device_id = pdev->subsystem_device; 2198 2199 hw->mbx.ops.init_params(hw); 2200 hw->mac.max_tx_queues = MAX_TX_QUEUES; 2201 hw->mac.max_rx_queues = MAX_RX_QUEUES; 2202 err = hw->mac.ops.reset_hw(hw); 2203 if (err) { 2204 dev_info(&pdev->dev, 2205 "PF still in reset state, assigning new address\n"); 2206 eth_hw_addr_random(adapter->netdev); 2207 memcpy(adapter->hw.mac.addr, adapter->netdev->dev_addr, 2208 adapter->netdev->addr_len); 2209 } else { 2210 err = hw->mac.ops.init_hw(hw); 2211 if (err) { 2212 pr_err("init_shared_code failed: %d\n", err); 2213 goto out; 2214 } 2215 memcpy(adapter->netdev->dev_addr, adapter->hw.mac.addr, 2216 adapter->netdev->addr_len); 2217 } 2218 2219 /* Enable dynamic interrupt throttling rates */ 2220 adapter->eitr_param = 20000; 2221 adapter->itr_setting = 1; 2222 2223 /* set defaults for eitr in MegaBytes */ 2224 adapter->eitr_low = 10; 2225 adapter->eitr_high = 20; 2226 2227 /* set default ring sizes */ 2228 adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD; 2229 adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD; 2230 2231 /* enable rx csum by default */ 2232 adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED; 2233 2234 set_bit(__IXGBEVF_DOWN, &adapter->state); 2235 return 0; 2236 2237 out: 2238 return err; 2239 } 2240 2241 #define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \ 2242 { \ 2243 u32 current_counter = IXGBE_READ_REG(hw, reg); \ 2244 if (current_counter < last_counter) \ 2245 counter += 0x100000000LL; \ 2246 last_counter = current_counter; \ 2247 counter &= 0xFFFFFFFF00000000LL; \ 2248 counter |= current_counter; \ 2249 } 2250 2251 #define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \ 2252 { \ 2253 u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \ 2254 u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \ 2255 u64 current_counter = (current_counter_msb << 32) | \ 2256 current_counter_lsb; \ 2257 if (current_counter < last_counter) \ 2258 counter += 0x1000000000LL; \ 2259 last_counter = current_counter; \ 2260 counter &= 0xFFFFFFF000000000LL; \ 2261 counter |= current_counter; \ 2262 } 2263 /** 2264 * ixgbevf_update_stats - Update the board statistics counters. 2265 * @adapter: board private structure 2266 **/ 2267 void ixgbevf_update_stats(struct ixgbevf_adapter *adapter) 2268 { 2269 struct ixgbe_hw *hw = &adapter->hw; 2270 2271 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc, 2272 adapter->stats.vfgprc); 2273 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc, 2274 adapter->stats.vfgptc); 2275 UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, 2276 adapter->stats.last_vfgorc, 2277 adapter->stats.vfgorc); 2278 UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, 2279 adapter->stats.last_vfgotc, 2280 adapter->stats.vfgotc); 2281 UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc, 2282 adapter->stats.vfmprc); 2283 } 2284 2285 /** 2286 * ixgbevf_watchdog - Timer Call-back 2287 * @data: pointer to adapter cast into an unsigned long 2288 **/ 2289 static void ixgbevf_watchdog(unsigned long data) 2290 { 2291 struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data; 2292 struct ixgbe_hw *hw = &adapter->hw; 2293 u64 eics = 0; 2294 int i; 2295 2296 /* 2297 * Do the watchdog outside of interrupt context due to the lovely 2298 * delays that some of the newer hardware requires 2299 */ 2300 2301 if (test_bit(__IXGBEVF_DOWN, &adapter->state)) 2302 goto watchdog_short_circuit; 2303 2304 /* get one bit for every active tx/rx interrupt vector */ 2305 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) { 2306 struct ixgbevf_q_vector *qv = adapter->q_vector[i]; 2307 if (qv->rxr_count || qv->txr_count) 2308 eics |= (1 << i); 2309 } 2310 2311 IXGBE_WRITE_REG(hw, IXGBE_VTEICS, (u32)eics); 2312 2313 watchdog_short_circuit: 2314 schedule_work(&adapter->watchdog_task); 2315 } 2316 2317 /** 2318 * ixgbevf_tx_timeout - Respond to a Tx Hang 2319 * @netdev: network interface device structure 2320 **/ 2321 static void ixgbevf_tx_timeout(struct net_device *netdev) 2322 { 2323 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 2324 2325 /* Do the reset outside of interrupt context */ 2326 schedule_work(&adapter->reset_task); 2327 } 2328 2329 static void ixgbevf_reset_task(struct work_struct *work) 2330 { 2331 struct ixgbevf_adapter *adapter; 2332 adapter = container_of(work, struct ixgbevf_adapter, reset_task); 2333 2334 /* If we're already down or resetting, just bail */ 2335 if (test_bit(__IXGBEVF_DOWN, &adapter->state) || 2336 test_bit(__IXGBEVF_RESETTING, &adapter->state)) 2337 return; 2338 2339 adapter->tx_timeout_count++; 2340 2341 ixgbevf_reinit_locked(adapter); 2342 } 2343 2344 /** 2345 * ixgbevf_watchdog_task - worker thread to bring link up 2346 * @work: pointer to work_struct containing our data 2347 **/ 2348 static void ixgbevf_watchdog_task(struct work_struct *work) 2349 { 2350 struct ixgbevf_adapter *adapter = container_of(work, 2351 struct ixgbevf_adapter, 2352 watchdog_task); 2353 struct net_device *netdev = adapter->netdev; 2354 struct ixgbe_hw *hw = &adapter->hw; 2355 u32 link_speed = adapter->link_speed; 2356 bool link_up = adapter->link_up; 2357 2358 adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK; 2359 2360 /* 2361 * Always check the link on the watchdog because we have 2362 * no LSC interrupt 2363 */ 2364 if (hw->mac.ops.check_link) { 2365 if ((hw->mac.ops.check_link(hw, &link_speed, 2366 &link_up, false)) != 0) { 2367 adapter->link_up = link_up; 2368 adapter->link_speed = link_speed; 2369 netif_carrier_off(netdev); 2370 netif_tx_stop_all_queues(netdev); 2371 schedule_work(&adapter->reset_task); 2372 goto pf_has_reset; 2373 } 2374 } else { 2375 /* always assume link is up, if no check link 2376 * function */ 2377 link_speed = IXGBE_LINK_SPEED_10GB_FULL; 2378 link_up = true; 2379 } 2380 adapter->link_up = link_up; 2381 adapter->link_speed = link_speed; 2382 2383 if (link_up) { 2384 if (!netif_carrier_ok(netdev)) { 2385 hw_dbg(&adapter->hw, "NIC Link is Up, %u Gbps\n", 2386 (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ? 2387 10 : 1); 2388 netif_carrier_on(netdev); 2389 netif_tx_wake_all_queues(netdev); 2390 } 2391 } else { 2392 adapter->link_up = false; 2393 adapter->link_speed = 0; 2394 if (netif_carrier_ok(netdev)) { 2395 hw_dbg(&adapter->hw, "NIC Link is Down\n"); 2396 netif_carrier_off(netdev); 2397 netif_tx_stop_all_queues(netdev); 2398 } 2399 } 2400 2401 ixgbevf_update_stats(adapter); 2402 2403 pf_has_reset: 2404 /* Reset the timer */ 2405 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) 2406 mod_timer(&adapter->watchdog_timer, 2407 round_jiffies(jiffies + (2 * HZ))); 2408 2409 adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK; 2410 } 2411 2412 /** 2413 * ixgbevf_free_tx_resources - Free Tx Resources per Queue 2414 * @adapter: board private structure 2415 * @tx_ring: Tx descriptor ring for a specific queue 2416 * 2417 * Free all transmit software resources 2418 **/ 2419 void ixgbevf_free_tx_resources(struct ixgbevf_adapter *adapter, 2420 struct ixgbevf_ring *tx_ring) 2421 { 2422 struct pci_dev *pdev = adapter->pdev; 2423 2424 ixgbevf_clean_tx_ring(adapter, tx_ring); 2425 2426 vfree(tx_ring->tx_buffer_info); 2427 tx_ring->tx_buffer_info = NULL; 2428 2429 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, 2430 tx_ring->dma); 2431 2432 tx_ring->desc = NULL; 2433 } 2434 2435 /** 2436 * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues 2437 * @adapter: board private structure 2438 * 2439 * Free all transmit software resources 2440 **/ 2441 static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter) 2442 { 2443 int i; 2444 2445 for (i = 0; i < adapter->num_tx_queues; i++) 2446 if (adapter->tx_ring[i].desc) 2447 ixgbevf_free_tx_resources(adapter, 2448 &adapter->tx_ring[i]); 2449 2450 } 2451 2452 /** 2453 * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors) 2454 * @adapter: board private structure 2455 * @tx_ring: tx descriptor ring (for a specific queue) to setup 2456 * 2457 * Return 0 on success, negative on failure 2458 **/ 2459 int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter, 2460 struct ixgbevf_ring *tx_ring) 2461 { 2462 struct pci_dev *pdev = adapter->pdev; 2463 int size; 2464 2465 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count; 2466 tx_ring->tx_buffer_info = vzalloc(size); 2467 if (!tx_ring->tx_buffer_info) 2468 goto err; 2469 2470 /* round up to nearest 4K */ 2471 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); 2472 tx_ring->size = ALIGN(tx_ring->size, 4096); 2473 2474 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size, 2475 &tx_ring->dma, GFP_KERNEL); 2476 if (!tx_ring->desc) 2477 goto err; 2478 2479 tx_ring->next_to_use = 0; 2480 tx_ring->next_to_clean = 0; 2481 tx_ring->work_limit = tx_ring->count; 2482 return 0; 2483 2484 err: 2485 vfree(tx_ring->tx_buffer_info); 2486 tx_ring->tx_buffer_info = NULL; 2487 hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit " 2488 "descriptor ring\n"); 2489 return -ENOMEM; 2490 } 2491 2492 /** 2493 * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources 2494 * @adapter: board private structure 2495 * 2496 * If this function returns with an error, then it's possible one or 2497 * more of the rings is populated (while the rest are not). It is the 2498 * callers duty to clean those orphaned rings. 2499 * 2500 * Return 0 on success, negative on failure 2501 **/ 2502 static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter) 2503 { 2504 int i, err = 0; 2505 2506 for (i = 0; i < adapter->num_tx_queues; i++) { 2507 err = ixgbevf_setup_tx_resources(adapter, &adapter->tx_ring[i]); 2508 if (!err) 2509 continue; 2510 hw_dbg(&adapter->hw, 2511 "Allocation for Tx Queue %u failed\n", i); 2512 break; 2513 } 2514 2515 return err; 2516 } 2517 2518 /** 2519 * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors) 2520 * @adapter: board private structure 2521 * @rx_ring: rx descriptor ring (for a specific queue) to setup 2522 * 2523 * Returns 0 on success, negative on failure 2524 **/ 2525 int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter, 2526 struct ixgbevf_ring *rx_ring) 2527 { 2528 struct pci_dev *pdev = adapter->pdev; 2529 int size; 2530 2531 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count; 2532 rx_ring->rx_buffer_info = vzalloc(size); 2533 if (!rx_ring->rx_buffer_info) 2534 goto alloc_failed; 2535 2536 /* Round up to nearest 4K */ 2537 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); 2538 rx_ring->size = ALIGN(rx_ring->size, 4096); 2539 2540 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, 2541 &rx_ring->dma, GFP_KERNEL); 2542 2543 if (!rx_ring->desc) { 2544 hw_dbg(&adapter->hw, 2545 "Unable to allocate memory for " 2546 "the receive descriptor ring\n"); 2547 vfree(rx_ring->rx_buffer_info); 2548 rx_ring->rx_buffer_info = NULL; 2549 goto alloc_failed; 2550 } 2551 2552 rx_ring->next_to_clean = 0; 2553 rx_ring->next_to_use = 0; 2554 2555 return 0; 2556 alloc_failed: 2557 return -ENOMEM; 2558 } 2559 2560 /** 2561 * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources 2562 * @adapter: board private structure 2563 * 2564 * If this function returns with an error, then it's possible one or 2565 * more of the rings is populated (while the rest are not). It is the 2566 * callers duty to clean those orphaned rings. 2567 * 2568 * Return 0 on success, negative on failure 2569 **/ 2570 static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter) 2571 { 2572 int i, err = 0; 2573 2574 for (i = 0; i < adapter->num_rx_queues; i++) { 2575 err = ixgbevf_setup_rx_resources(adapter, &adapter->rx_ring[i]); 2576 if (!err) 2577 continue; 2578 hw_dbg(&adapter->hw, 2579 "Allocation for Rx Queue %u failed\n", i); 2580 break; 2581 } 2582 return err; 2583 } 2584 2585 /** 2586 * ixgbevf_free_rx_resources - Free Rx Resources 2587 * @adapter: board private structure 2588 * @rx_ring: ring to clean the resources from 2589 * 2590 * Free all receive software resources 2591 **/ 2592 void ixgbevf_free_rx_resources(struct ixgbevf_adapter *adapter, 2593 struct ixgbevf_ring *rx_ring) 2594 { 2595 struct pci_dev *pdev = adapter->pdev; 2596 2597 ixgbevf_clean_rx_ring(adapter, rx_ring); 2598 2599 vfree(rx_ring->rx_buffer_info); 2600 rx_ring->rx_buffer_info = NULL; 2601 2602 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, 2603 rx_ring->dma); 2604 2605 rx_ring->desc = NULL; 2606 } 2607 2608 /** 2609 * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues 2610 * @adapter: board private structure 2611 * 2612 * Free all receive software resources 2613 **/ 2614 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter) 2615 { 2616 int i; 2617 2618 for (i = 0; i < adapter->num_rx_queues; i++) 2619 if (adapter->rx_ring[i].desc) 2620 ixgbevf_free_rx_resources(adapter, 2621 &adapter->rx_ring[i]); 2622 } 2623 2624 /** 2625 * ixgbevf_open - Called when a network interface is made active 2626 * @netdev: network interface device structure 2627 * 2628 * Returns 0 on success, negative value on failure 2629 * 2630 * The open entry point is called when a network interface is made 2631 * active by the system (IFF_UP). At this point all resources needed 2632 * for transmit and receive operations are allocated, the interrupt 2633 * handler is registered with the OS, the watchdog timer is started, 2634 * and the stack is notified that the interface is ready. 2635 **/ 2636 static int ixgbevf_open(struct net_device *netdev) 2637 { 2638 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 2639 struct ixgbe_hw *hw = &adapter->hw; 2640 int err; 2641 2642 /* disallow open during test */ 2643 if (test_bit(__IXGBEVF_TESTING, &adapter->state)) 2644 return -EBUSY; 2645 2646 if (hw->adapter_stopped) { 2647 ixgbevf_reset(adapter); 2648 /* if adapter is still stopped then PF isn't up and 2649 * the vf can't start. */ 2650 if (hw->adapter_stopped) { 2651 err = IXGBE_ERR_MBX; 2652 pr_err("Unable to start - perhaps the PF Driver isn't " 2653 "up yet\n"); 2654 goto err_setup_reset; 2655 } 2656 } 2657 2658 /* allocate transmit descriptors */ 2659 err = ixgbevf_setup_all_tx_resources(adapter); 2660 if (err) 2661 goto err_setup_tx; 2662 2663 /* allocate receive descriptors */ 2664 err = ixgbevf_setup_all_rx_resources(adapter); 2665 if (err) 2666 goto err_setup_rx; 2667 2668 ixgbevf_configure(adapter); 2669 2670 /* 2671 * Map the Tx/Rx rings to the vectors we were allotted. 2672 * if request_irq will be called in this function map_rings 2673 * must be called *before* up_complete 2674 */ 2675 ixgbevf_map_rings_to_vectors(adapter); 2676 2677 ixgbevf_up_complete(adapter); 2678 2679 /* clear any pending interrupts, may auto mask */ 2680 IXGBE_READ_REG(hw, IXGBE_VTEICR); 2681 err = ixgbevf_request_irq(adapter); 2682 if (err) 2683 goto err_req_irq; 2684 2685 ixgbevf_irq_enable(adapter, true, true); 2686 2687 return 0; 2688 2689 err_req_irq: 2690 ixgbevf_down(adapter); 2691 ixgbevf_free_irq(adapter); 2692 err_setup_rx: 2693 ixgbevf_free_all_rx_resources(adapter); 2694 err_setup_tx: 2695 ixgbevf_free_all_tx_resources(adapter); 2696 ixgbevf_reset(adapter); 2697 2698 err_setup_reset: 2699 2700 return err; 2701 } 2702 2703 /** 2704 * ixgbevf_close - Disables a network interface 2705 * @netdev: network interface device structure 2706 * 2707 * Returns 0, this is not allowed to fail 2708 * 2709 * The close entry point is called when an interface is de-activated 2710 * by the OS. The hardware is still under the drivers control, but 2711 * needs to be disabled. A global MAC reset is issued to stop the 2712 * hardware, and all transmit and receive resources are freed. 2713 **/ 2714 static int ixgbevf_close(struct net_device *netdev) 2715 { 2716 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 2717 2718 ixgbevf_down(adapter); 2719 ixgbevf_free_irq(adapter); 2720 2721 ixgbevf_free_all_tx_resources(adapter); 2722 ixgbevf_free_all_rx_resources(adapter); 2723 2724 return 0; 2725 } 2726 2727 static int ixgbevf_tso(struct ixgbevf_adapter *adapter, 2728 struct ixgbevf_ring *tx_ring, 2729 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len) 2730 { 2731 struct ixgbe_adv_tx_context_desc *context_desc; 2732 unsigned int i; 2733 int err; 2734 struct ixgbevf_tx_buffer *tx_buffer_info; 2735 u32 vlan_macip_lens = 0, type_tucmd_mlhl; 2736 u32 mss_l4len_idx, l4len; 2737 2738 if (skb_is_gso(skb)) { 2739 if (skb_header_cloned(skb)) { 2740 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 2741 if (err) 2742 return err; 2743 } 2744 l4len = tcp_hdrlen(skb); 2745 *hdr_len += l4len; 2746 2747 if (skb->protocol == htons(ETH_P_IP)) { 2748 struct iphdr *iph = ip_hdr(skb); 2749 iph->tot_len = 0; 2750 iph->check = 0; 2751 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, 2752 iph->daddr, 0, 2753 IPPROTO_TCP, 2754 0); 2755 adapter->hw_tso_ctxt++; 2756 } else if (skb_is_gso_v6(skb)) { 2757 ipv6_hdr(skb)->payload_len = 0; 2758 tcp_hdr(skb)->check = 2759 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 2760 &ipv6_hdr(skb)->daddr, 2761 0, IPPROTO_TCP, 0); 2762 adapter->hw_tso6_ctxt++; 2763 } 2764 2765 i = tx_ring->next_to_use; 2766 2767 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 2768 context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i); 2769 2770 /* VLAN MACLEN IPLEN */ 2771 if (tx_flags & IXGBE_TX_FLAGS_VLAN) 2772 vlan_macip_lens |= 2773 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK); 2774 vlan_macip_lens |= ((skb_network_offset(skb)) << 2775 IXGBE_ADVTXD_MACLEN_SHIFT); 2776 *hdr_len += skb_network_offset(skb); 2777 vlan_macip_lens |= 2778 (skb_transport_header(skb) - skb_network_header(skb)); 2779 *hdr_len += 2780 (skb_transport_header(skb) - skb_network_header(skb)); 2781 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); 2782 context_desc->seqnum_seed = 0; 2783 2784 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ 2785 type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT | 2786 IXGBE_ADVTXD_DTYP_CTXT); 2787 2788 if (skb->protocol == htons(ETH_P_IP)) 2789 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; 2790 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP; 2791 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl); 2792 2793 /* MSS L4LEN IDX */ 2794 mss_l4len_idx = 2795 (skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT); 2796 mss_l4len_idx |= (l4len << IXGBE_ADVTXD_L4LEN_SHIFT); 2797 /* use index 1 for TSO */ 2798 mss_l4len_idx |= (1 << IXGBE_ADVTXD_IDX_SHIFT); 2799 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); 2800 2801 tx_buffer_info->time_stamp = jiffies; 2802 tx_buffer_info->next_to_watch = i; 2803 2804 i++; 2805 if (i == tx_ring->count) 2806 i = 0; 2807 tx_ring->next_to_use = i; 2808 2809 return true; 2810 } 2811 2812 return false; 2813 } 2814 2815 static bool ixgbevf_tx_csum(struct ixgbevf_adapter *adapter, 2816 struct ixgbevf_ring *tx_ring, 2817 struct sk_buff *skb, u32 tx_flags) 2818 { 2819 struct ixgbe_adv_tx_context_desc *context_desc; 2820 unsigned int i; 2821 struct ixgbevf_tx_buffer *tx_buffer_info; 2822 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0; 2823 2824 if (skb->ip_summed == CHECKSUM_PARTIAL || 2825 (tx_flags & IXGBE_TX_FLAGS_VLAN)) { 2826 i = tx_ring->next_to_use; 2827 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 2828 context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i); 2829 2830 if (tx_flags & IXGBE_TX_FLAGS_VLAN) 2831 vlan_macip_lens |= (tx_flags & 2832 IXGBE_TX_FLAGS_VLAN_MASK); 2833 vlan_macip_lens |= (skb_network_offset(skb) << 2834 IXGBE_ADVTXD_MACLEN_SHIFT); 2835 if (skb->ip_summed == CHECKSUM_PARTIAL) 2836 vlan_macip_lens |= (skb_transport_header(skb) - 2837 skb_network_header(skb)); 2838 2839 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); 2840 context_desc->seqnum_seed = 0; 2841 2842 type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT | 2843 IXGBE_ADVTXD_DTYP_CTXT); 2844 2845 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2846 switch (skb->protocol) { 2847 case __constant_htons(ETH_P_IP): 2848 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; 2849 if (ip_hdr(skb)->protocol == IPPROTO_TCP) 2850 type_tucmd_mlhl |= 2851 IXGBE_ADVTXD_TUCMD_L4T_TCP; 2852 break; 2853 case __constant_htons(ETH_P_IPV6): 2854 /* XXX what about other V6 headers?? */ 2855 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) 2856 type_tucmd_mlhl |= 2857 IXGBE_ADVTXD_TUCMD_L4T_TCP; 2858 break; 2859 default: 2860 if (unlikely(net_ratelimit())) { 2861 pr_warn("partial checksum but " 2862 "proto=%x!\n", skb->protocol); 2863 } 2864 break; 2865 } 2866 } 2867 2868 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl); 2869 /* use index zero for tx checksum offload */ 2870 context_desc->mss_l4len_idx = 0; 2871 2872 tx_buffer_info->time_stamp = jiffies; 2873 tx_buffer_info->next_to_watch = i; 2874 2875 adapter->hw_csum_tx_good++; 2876 i++; 2877 if (i == tx_ring->count) 2878 i = 0; 2879 tx_ring->next_to_use = i; 2880 2881 return true; 2882 } 2883 2884 return false; 2885 } 2886 2887 static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter, 2888 struct ixgbevf_ring *tx_ring, 2889 struct sk_buff *skb, u32 tx_flags, 2890 unsigned int first) 2891 { 2892 struct pci_dev *pdev = adapter->pdev; 2893 struct ixgbevf_tx_buffer *tx_buffer_info; 2894 unsigned int len; 2895 unsigned int total = skb->len; 2896 unsigned int offset = 0, size; 2897 int count = 0; 2898 unsigned int nr_frags = skb_shinfo(skb)->nr_frags; 2899 unsigned int f; 2900 int i; 2901 2902 i = tx_ring->next_to_use; 2903 2904 len = min(skb_headlen(skb), total); 2905 while (len) { 2906 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 2907 size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD); 2908 2909 tx_buffer_info->length = size; 2910 tx_buffer_info->mapped_as_page = false; 2911 tx_buffer_info->dma = dma_map_single(&adapter->pdev->dev, 2912 skb->data + offset, 2913 size, DMA_TO_DEVICE); 2914 if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma)) 2915 goto dma_error; 2916 tx_buffer_info->time_stamp = jiffies; 2917 tx_buffer_info->next_to_watch = i; 2918 2919 len -= size; 2920 total -= size; 2921 offset += size; 2922 count++; 2923 i++; 2924 if (i == tx_ring->count) 2925 i = 0; 2926 } 2927 2928 for (f = 0; f < nr_frags; f++) { 2929 const struct skb_frag_struct *frag; 2930 2931 frag = &skb_shinfo(skb)->frags[f]; 2932 len = min((unsigned int)skb_frag_size(frag), total); 2933 offset = 0; 2934 2935 while (len) { 2936 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 2937 size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD); 2938 2939 tx_buffer_info->length = size; 2940 tx_buffer_info->dma = 2941 skb_frag_dma_map(&adapter->pdev->dev, frag, 2942 offset, size, DMA_TO_DEVICE); 2943 tx_buffer_info->mapped_as_page = true; 2944 if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma)) 2945 goto dma_error; 2946 tx_buffer_info->time_stamp = jiffies; 2947 tx_buffer_info->next_to_watch = i; 2948 2949 len -= size; 2950 total -= size; 2951 offset += size; 2952 count++; 2953 i++; 2954 if (i == tx_ring->count) 2955 i = 0; 2956 } 2957 if (total == 0) 2958 break; 2959 } 2960 2961 if (i == 0) 2962 i = tx_ring->count - 1; 2963 else 2964 i = i - 1; 2965 tx_ring->tx_buffer_info[i].skb = skb; 2966 tx_ring->tx_buffer_info[first].next_to_watch = i; 2967 2968 return count; 2969 2970 dma_error: 2971 dev_err(&pdev->dev, "TX DMA map failed\n"); 2972 2973 /* clear timestamp and dma mappings for failed tx_buffer_info map */ 2974 tx_buffer_info->dma = 0; 2975 tx_buffer_info->time_stamp = 0; 2976 tx_buffer_info->next_to_watch = 0; 2977 count--; 2978 2979 /* clear timestamp and dma mappings for remaining portion of packet */ 2980 while (count >= 0) { 2981 count--; 2982 i--; 2983 if (i < 0) 2984 i += tx_ring->count; 2985 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 2986 ixgbevf_unmap_and_free_tx_resource(adapter, tx_buffer_info); 2987 } 2988 2989 return count; 2990 } 2991 2992 static void ixgbevf_tx_queue(struct ixgbevf_adapter *adapter, 2993 struct ixgbevf_ring *tx_ring, int tx_flags, 2994 int count, u32 paylen, u8 hdr_len) 2995 { 2996 union ixgbe_adv_tx_desc *tx_desc = NULL; 2997 struct ixgbevf_tx_buffer *tx_buffer_info; 2998 u32 olinfo_status = 0, cmd_type_len = 0; 2999 unsigned int i; 3000 3001 u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS; 3002 3003 cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA; 3004 3005 cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT; 3006 3007 if (tx_flags & IXGBE_TX_FLAGS_VLAN) 3008 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE; 3009 3010 if (tx_flags & IXGBE_TX_FLAGS_TSO) { 3011 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE; 3012 3013 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 3014 IXGBE_ADVTXD_POPTS_SHIFT; 3015 3016 /* use index 1 context for tso */ 3017 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT); 3018 if (tx_flags & IXGBE_TX_FLAGS_IPV4) 3019 olinfo_status |= IXGBE_TXD_POPTS_IXSM << 3020 IXGBE_ADVTXD_POPTS_SHIFT; 3021 3022 } else if (tx_flags & IXGBE_TX_FLAGS_CSUM) 3023 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 3024 IXGBE_ADVTXD_POPTS_SHIFT; 3025 3026 olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT); 3027 3028 i = tx_ring->next_to_use; 3029 while (count--) { 3030 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 3031 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i); 3032 tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma); 3033 tx_desc->read.cmd_type_len = 3034 cpu_to_le32(cmd_type_len | tx_buffer_info->length); 3035 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); 3036 i++; 3037 if (i == tx_ring->count) 3038 i = 0; 3039 } 3040 3041 tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd); 3042 3043 /* 3044 * Force memory writes to complete before letting h/w 3045 * know there are new descriptors to fetch. (Only 3046 * applicable for weak-ordered memory model archs, 3047 * such as IA-64). 3048 */ 3049 wmb(); 3050 3051 tx_ring->next_to_use = i; 3052 writel(i, adapter->hw.hw_addr + tx_ring->tail); 3053 } 3054 3055 static int __ixgbevf_maybe_stop_tx(struct net_device *netdev, 3056 struct ixgbevf_ring *tx_ring, int size) 3057 { 3058 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3059 3060 netif_stop_subqueue(netdev, tx_ring->queue_index); 3061 /* Herbert's original patch had: 3062 * smp_mb__after_netif_stop_queue(); 3063 * but since that doesn't exist yet, just open code it. */ 3064 smp_mb(); 3065 3066 /* We need to check again in a case another CPU has just 3067 * made room available. */ 3068 if (likely(IXGBE_DESC_UNUSED(tx_ring) < size)) 3069 return -EBUSY; 3070 3071 /* A reprieve! - use start_queue because it doesn't call schedule */ 3072 netif_start_subqueue(netdev, tx_ring->queue_index); 3073 ++adapter->restart_queue; 3074 return 0; 3075 } 3076 3077 static int ixgbevf_maybe_stop_tx(struct net_device *netdev, 3078 struct ixgbevf_ring *tx_ring, int size) 3079 { 3080 if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size)) 3081 return 0; 3082 return __ixgbevf_maybe_stop_tx(netdev, tx_ring, size); 3083 } 3084 3085 static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 3086 { 3087 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3088 struct ixgbevf_ring *tx_ring; 3089 unsigned int first; 3090 unsigned int tx_flags = 0; 3091 u8 hdr_len = 0; 3092 int r_idx = 0, tso; 3093 int count = 0; 3094 3095 unsigned int f; 3096 3097 tx_ring = &adapter->tx_ring[r_idx]; 3098 3099 if (vlan_tx_tag_present(skb)) { 3100 tx_flags |= vlan_tx_tag_get(skb); 3101 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; 3102 tx_flags |= IXGBE_TX_FLAGS_VLAN; 3103 } 3104 3105 /* four things can cause us to need a context descriptor */ 3106 if (skb_is_gso(skb) || 3107 (skb->ip_summed == CHECKSUM_PARTIAL) || 3108 (tx_flags & IXGBE_TX_FLAGS_VLAN)) 3109 count++; 3110 3111 count += TXD_USE_COUNT(skb_headlen(skb)); 3112 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) 3113 count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f])); 3114 3115 if (ixgbevf_maybe_stop_tx(netdev, tx_ring, count)) { 3116 adapter->tx_busy++; 3117 return NETDEV_TX_BUSY; 3118 } 3119 3120 first = tx_ring->next_to_use; 3121 3122 if (skb->protocol == htons(ETH_P_IP)) 3123 tx_flags |= IXGBE_TX_FLAGS_IPV4; 3124 tso = ixgbevf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len); 3125 if (tso < 0) { 3126 dev_kfree_skb_any(skb); 3127 return NETDEV_TX_OK; 3128 } 3129 3130 if (tso) 3131 tx_flags |= IXGBE_TX_FLAGS_TSO; 3132 else if (ixgbevf_tx_csum(adapter, tx_ring, skb, tx_flags) && 3133 (skb->ip_summed == CHECKSUM_PARTIAL)) 3134 tx_flags |= IXGBE_TX_FLAGS_CSUM; 3135 3136 ixgbevf_tx_queue(adapter, tx_ring, tx_flags, 3137 ixgbevf_tx_map(adapter, tx_ring, skb, tx_flags, first), 3138 skb->len, hdr_len); 3139 3140 ixgbevf_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED); 3141 3142 return NETDEV_TX_OK; 3143 } 3144 3145 /** 3146 * ixgbevf_set_mac - Change the Ethernet Address of the NIC 3147 * @netdev: network interface device structure 3148 * @p: pointer to an address structure 3149 * 3150 * Returns 0 on success, negative on failure 3151 **/ 3152 static int ixgbevf_set_mac(struct net_device *netdev, void *p) 3153 { 3154 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3155 struct ixgbe_hw *hw = &adapter->hw; 3156 struct sockaddr *addr = p; 3157 3158 if (!is_valid_ether_addr(addr->sa_data)) 3159 return -EADDRNOTAVAIL; 3160 3161 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 3162 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); 3163 3164 if (hw->mac.ops.set_rar) 3165 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0); 3166 3167 return 0; 3168 } 3169 3170 /** 3171 * ixgbevf_change_mtu - Change the Maximum Transfer Unit 3172 * @netdev: network interface device structure 3173 * @new_mtu: new value for maximum frame size 3174 * 3175 * Returns 0 on success, negative on failure 3176 **/ 3177 static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu) 3178 { 3179 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3180 struct ixgbe_hw *hw = &adapter->hw; 3181 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 3182 int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE; 3183 u32 msg[2]; 3184 3185 if (adapter->hw.mac.type == ixgbe_mac_X540_vf) 3186 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE; 3187 3188 /* MTU < 68 is an error and causes problems on some kernels */ 3189 if ((new_mtu < 68) || (max_frame > max_possible_frame)) 3190 return -EINVAL; 3191 3192 hw_dbg(&adapter->hw, "changing MTU from %d to %d\n", 3193 netdev->mtu, new_mtu); 3194 /* must set new MTU before calling down or up */ 3195 netdev->mtu = new_mtu; 3196 3197 if (!netif_running(netdev)) { 3198 msg[0] = IXGBE_VF_SET_LPE; 3199 msg[1] = max_frame; 3200 hw->mbx.ops.write_posted(hw, msg, 2); 3201 } 3202 3203 if (netif_running(netdev)) 3204 ixgbevf_reinit_locked(adapter); 3205 3206 return 0; 3207 } 3208 3209 static void ixgbevf_shutdown(struct pci_dev *pdev) 3210 { 3211 struct net_device *netdev = pci_get_drvdata(pdev); 3212 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3213 3214 netif_device_detach(netdev); 3215 3216 if (netif_running(netdev)) { 3217 ixgbevf_down(adapter); 3218 ixgbevf_free_irq(adapter); 3219 ixgbevf_free_all_tx_resources(adapter); 3220 ixgbevf_free_all_rx_resources(adapter); 3221 } 3222 3223 #ifdef CONFIG_PM 3224 pci_save_state(pdev); 3225 #endif 3226 3227 pci_disable_device(pdev); 3228 } 3229 3230 static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev, 3231 struct rtnl_link_stats64 *stats) 3232 { 3233 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3234 unsigned int start; 3235 u64 bytes, packets; 3236 const struct ixgbevf_ring *ring; 3237 int i; 3238 3239 ixgbevf_update_stats(adapter); 3240 3241 stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc; 3242 3243 for (i = 0; i < adapter->num_rx_queues; i++) { 3244 ring = &adapter->rx_ring[i]; 3245 do { 3246 start = u64_stats_fetch_begin_bh(&ring->syncp); 3247 bytes = ring->total_bytes; 3248 packets = ring->total_packets; 3249 } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); 3250 stats->rx_bytes += bytes; 3251 stats->rx_packets += packets; 3252 } 3253 3254 for (i = 0; i < adapter->num_tx_queues; i++) { 3255 ring = &adapter->tx_ring[i]; 3256 do { 3257 start = u64_stats_fetch_begin_bh(&ring->syncp); 3258 bytes = ring->total_bytes; 3259 packets = ring->total_packets; 3260 } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); 3261 stats->tx_bytes += bytes; 3262 stats->tx_packets += packets; 3263 } 3264 3265 return stats; 3266 } 3267 3268 static int ixgbevf_set_features(struct net_device *netdev, 3269 netdev_features_t features) 3270 { 3271 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3272 3273 if (features & NETIF_F_RXCSUM) 3274 adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED; 3275 else 3276 adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED; 3277 3278 return 0; 3279 } 3280 3281 static const struct net_device_ops ixgbe_netdev_ops = { 3282 .ndo_open = ixgbevf_open, 3283 .ndo_stop = ixgbevf_close, 3284 .ndo_start_xmit = ixgbevf_xmit_frame, 3285 .ndo_set_rx_mode = ixgbevf_set_rx_mode, 3286 .ndo_get_stats64 = ixgbevf_get_stats, 3287 .ndo_validate_addr = eth_validate_addr, 3288 .ndo_set_mac_address = ixgbevf_set_mac, 3289 .ndo_change_mtu = ixgbevf_change_mtu, 3290 .ndo_tx_timeout = ixgbevf_tx_timeout, 3291 .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid, 3292 .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid, 3293 .ndo_set_features = ixgbevf_set_features, 3294 }; 3295 3296 static void ixgbevf_assign_netdev_ops(struct net_device *dev) 3297 { 3298 dev->netdev_ops = &ixgbe_netdev_ops; 3299 ixgbevf_set_ethtool_ops(dev); 3300 dev->watchdog_timeo = 5 * HZ; 3301 } 3302 3303 /** 3304 * ixgbevf_probe - Device Initialization Routine 3305 * @pdev: PCI device information struct 3306 * @ent: entry in ixgbevf_pci_tbl 3307 * 3308 * Returns 0 on success, negative on failure 3309 * 3310 * ixgbevf_probe initializes an adapter identified by a pci_dev structure. 3311 * The OS initialization, configuring of the adapter private structure, 3312 * and a hardware reset occur. 3313 **/ 3314 static int __devinit ixgbevf_probe(struct pci_dev *pdev, 3315 const struct pci_device_id *ent) 3316 { 3317 struct net_device *netdev; 3318 struct ixgbevf_adapter *adapter = NULL; 3319 struct ixgbe_hw *hw = NULL; 3320 const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data]; 3321 static int cards_found; 3322 int err, pci_using_dac; 3323 3324 err = pci_enable_device(pdev); 3325 if (err) 3326 return err; 3327 3328 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) && 3329 !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) { 3330 pci_using_dac = 1; 3331 } else { 3332 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 3333 if (err) { 3334 err = dma_set_coherent_mask(&pdev->dev, 3335 DMA_BIT_MASK(32)); 3336 if (err) { 3337 dev_err(&pdev->dev, "No usable DMA " 3338 "configuration, aborting\n"); 3339 goto err_dma; 3340 } 3341 } 3342 pci_using_dac = 0; 3343 } 3344 3345 err = pci_request_regions(pdev, ixgbevf_driver_name); 3346 if (err) { 3347 dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err); 3348 goto err_pci_reg; 3349 } 3350 3351 pci_set_master(pdev); 3352 3353 #ifdef HAVE_TX_MQ 3354 netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter), 3355 MAX_TX_QUEUES); 3356 #else 3357 netdev = alloc_etherdev(sizeof(struct ixgbevf_adapter)); 3358 #endif 3359 if (!netdev) { 3360 err = -ENOMEM; 3361 goto err_alloc_etherdev; 3362 } 3363 3364 SET_NETDEV_DEV(netdev, &pdev->dev); 3365 3366 pci_set_drvdata(pdev, netdev); 3367 adapter = netdev_priv(netdev); 3368 3369 adapter->netdev = netdev; 3370 adapter->pdev = pdev; 3371 hw = &adapter->hw; 3372 hw->back = adapter; 3373 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); 3374 3375 /* 3376 * call save state here in standalone driver because it relies on 3377 * adapter struct to exist, and needs to call netdev_priv 3378 */ 3379 pci_save_state(pdev); 3380 3381 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), 3382 pci_resource_len(pdev, 0)); 3383 if (!hw->hw_addr) { 3384 err = -EIO; 3385 goto err_ioremap; 3386 } 3387 3388 ixgbevf_assign_netdev_ops(netdev); 3389 3390 adapter->bd_number = cards_found; 3391 3392 /* Setup hw api */ 3393 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops)); 3394 hw->mac.type = ii->mac; 3395 3396 memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops, 3397 sizeof(struct ixgbe_mbx_operations)); 3398 3399 adapter->flags &= ~IXGBE_FLAG_RX_PS_CAPABLE; 3400 adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED; 3401 adapter->flags |= IXGBE_FLAG_RX_1BUF_CAPABLE; 3402 3403 /* setup the private structure */ 3404 err = ixgbevf_sw_init(adapter); 3405 if (err) 3406 goto err_sw_init; 3407 3408 /* The HW MAC address was set and/or determined in sw_init */ 3409 memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len); 3410 3411 if (!is_valid_ether_addr(netdev->dev_addr)) { 3412 pr_err("invalid MAC address\n"); 3413 err = -EIO; 3414 goto err_sw_init; 3415 } 3416 3417 netdev->hw_features = NETIF_F_SG | 3418 NETIF_F_IP_CSUM | 3419 NETIF_F_IPV6_CSUM | 3420 NETIF_F_TSO | 3421 NETIF_F_TSO6 | 3422 NETIF_F_RXCSUM; 3423 3424 netdev->features = netdev->hw_features | 3425 NETIF_F_HW_VLAN_TX | 3426 NETIF_F_HW_VLAN_RX | 3427 NETIF_F_HW_VLAN_FILTER; 3428 3429 netdev->vlan_features |= NETIF_F_TSO; 3430 netdev->vlan_features |= NETIF_F_TSO6; 3431 netdev->vlan_features |= NETIF_F_IP_CSUM; 3432 netdev->vlan_features |= NETIF_F_IPV6_CSUM; 3433 netdev->vlan_features |= NETIF_F_SG; 3434 3435 if (pci_using_dac) 3436 netdev->features |= NETIF_F_HIGHDMA; 3437 3438 netdev->priv_flags |= IFF_UNICAST_FLT; 3439 3440 init_timer(&adapter->watchdog_timer); 3441 adapter->watchdog_timer.function = ixgbevf_watchdog; 3442 adapter->watchdog_timer.data = (unsigned long)adapter; 3443 3444 INIT_WORK(&adapter->reset_task, ixgbevf_reset_task); 3445 INIT_WORK(&adapter->watchdog_task, ixgbevf_watchdog_task); 3446 3447 err = ixgbevf_init_interrupt_scheme(adapter); 3448 if (err) 3449 goto err_sw_init; 3450 3451 /* pick up the PCI bus settings for reporting later */ 3452 if (hw->mac.ops.get_bus_info) 3453 hw->mac.ops.get_bus_info(hw); 3454 3455 strcpy(netdev->name, "eth%d"); 3456 3457 err = register_netdev(netdev); 3458 if (err) 3459 goto err_register; 3460 3461 adapter->netdev_registered = true; 3462 3463 netif_carrier_off(netdev); 3464 3465 ixgbevf_init_last_counter_stats(adapter); 3466 3467 /* print the MAC address */ 3468 hw_dbg(hw, "%pM\n", netdev->dev_addr); 3469 3470 hw_dbg(hw, "MAC: %d\n", hw->mac.type); 3471 3472 hw_dbg(hw, "LRO is disabled\n"); 3473 3474 hw_dbg(hw, "Intel(R) 82599 Virtual Function\n"); 3475 cards_found++; 3476 return 0; 3477 3478 err_register: 3479 err_sw_init: 3480 ixgbevf_reset_interrupt_capability(adapter); 3481 iounmap(hw->hw_addr); 3482 err_ioremap: 3483 free_netdev(netdev); 3484 err_alloc_etherdev: 3485 pci_release_regions(pdev); 3486 err_pci_reg: 3487 err_dma: 3488 pci_disable_device(pdev); 3489 return err; 3490 } 3491 3492 /** 3493 * ixgbevf_remove - Device Removal Routine 3494 * @pdev: PCI device information struct 3495 * 3496 * ixgbevf_remove is called by the PCI subsystem to alert the driver 3497 * that it should release a PCI device. The could be caused by a 3498 * Hot-Plug event, or because the driver is going to be removed from 3499 * memory. 3500 **/ 3501 static void __devexit ixgbevf_remove(struct pci_dev *pdev) 3502 { 3503 struct net_device *netdev = pci_get_drvdata(pdev); 3504 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3505 3506 set_bit(__IXGBEVF_DOWN, &adapter->state); 3507 3508 del_timer_sync(&adapter->watchdog_timer); 3509 3510 cancel_work_sync(&adapter->reset_task); 3511 cancel_work_sync(&adapter->watchdog_task); 3512 3513 if (adapter->netdev_registered) { 3514 unregister_netdev(netdev); 3515 adapter->netdev_registered = false; 3516 } 3517 3518 ixgbevf_reset_interrupt_capability(adapter); 3519 3520 iounmap(adapter->hw.hw_addr); 3521 pci_release_regions(pdev); 3522 3523 hw_dbg(&adapter->hw, "Remove complete\n"); 3524 3525 kfree(adapter->tx_ring); 3526 kfree(adapter->rx_ring); 3527 3528 free_netdev(netdev); 3529 3530 pci_disable_device(pdev); 3531 } 3532 3533 static struct pci_driver ixgbevf_driver = { 3534 .name = ixgbevf_driver_name, 3535 .id_table = ixgbevf_pci_tbl, 3536 .probe = ixgbevf_probe, 3537 .remove = __devexit_p(ixgbevf_remove), 3538 .shutdown = ixgbevf_shutdown, 3539 }; 3540 3541 /** 3542 * ixgbevf_init_module - Driver Registration Routine 3543 * 3544 * ixgbevf_init_module is the first routine called when the driver is 3545 * loaded. All it does is register with the PCI subsystem. 3546 **/ 3547 static int __init ixgbevf_init_module(void) 3548 { 3549 int ret; 3550 pr_info("%s - version %s\n", ixgbevf_driver_string, 3551 ixgbevf_driver_version); 3552 3553 pr_info("%s\n", ixgbevf_copyright); 3554 3555 ret = pci_register_driver(&ixgbevf_driver); 3556 return ret; 3557 } 3558 3559 module_init(ixgbevf_init_module); 3560 3561 /** 3562 * ixgbevf_exit_module - Driver Exit Cleanup Routine 3563 * 3564 * ixgbevf_exit_module is called just before the driver is removed 3565 * from memory. 3566 **/ 3567 static void __exit ixgbevf_exit_module(void) 3568 { 3569 pci_unregister_driver(&ixgbevf_driver); 3570 } 3571 3572 #ifdef DEBUG 3573 /** 3574 * ixgbevf_get_hw_dev_name - return device name string 3575 * used by hardware layer to print debugging information 3576 **/ 3577 char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw) 3578 { 3579 struct ixgbevf_adapter *adapter = hw->back; 3580 return adapter->netdev->name; 3581 } 3582 3583 #endif 3584 module_exit(ixgbevf_exit_module); 3585 3586 /* ixgbevf_main.c */ 3587