xref: /linux/drivers/net/ethernet/intel/igbvf/netdev.c (revision f2ee442115c9b6219083c019939a9cc0c9abb2f8)
1 /*******************************************************************************
2 
3   Intel(R) 82576 Virtual Function Linux driver
4   Copyright(c) 2009 - 2010 Intel Corporation.
5 
6   This program is free software; you can redistribute it and/or modify it
7   under the terms and conditions of the GNU General Public License,
8   version 2, as published by the Free Software Foundation.
9 
10   This program is distributed in the hope it will be useful, but WITHOUT
11   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13   more details.
14 
15   You should have received a copy of the GNU General Public License along with
16   this program; if not, write to the Free Software Foundation, Inc.,
17   51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 
19   The full GNU General Public License is included in this distribution in
20   the file called "COPYING".
21 
22   Contact Information:
23   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 
26 *******************************************************************************/
27 
28 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/init.h>
31 #include <linux/pci.h>
32 #include <linux/vmalloc.h>
33 #include <linux/pagemap.h>
34 #include <linux/delay.h>
35 #include <linux/netdevice.h>
36 #include <linux/tcp.h>
37 #include <linux/ipv6.h>
38 #include <linux/slab.h>
39 #include <net/checksum.h>
40 #include <net/ip6_checksum.h>
41 #include <linux/mii.h>
42 #include <linux/ethtool.h>
43 #include <linux/if_vlan.h>
44 #include <linux/prefetch.h>
45 
46 #include "igbvf.h"
47 
48 #define DRV_VERSION "2.0.1-k"
49 char igbvf_driver_name[] = "igbvf";
50 const char igbvf_driver_version[] = DRV_VERSION;
51 static const char igbvf_driver_string[] =
52 		  "Intel(R) Gigabit Virtual Function Network Driver";
53 static const char igbvf_copyright[] =
54 		  "Copyright (c) 2009 - 2011 Intel Corporation.";
55 
56 static int igbvf_poll(struct napi_struct *napi, int budget);
57 static void igbvf_reset(struct igbvf_adapter *);
58 static void igbvf_set_interrupt_capability(struct igbvf_adapter *);
59 static void igbvf_reset_interrupt_capability(struct igbvf_adapter *);
60 
61 static struct igbvf_info igbvf_vf_info = {
62 	.mac                    = e1000_vfadapt,
63 	.flags                  = 0,
64 	.pba                    = 10,
65 	.init_ops               = e1000_init_function_pointers_vf,
66 };
67 
68 static struct igbvf_info igbvf_i350_vf_info = {
69 	.mac			= e1000_vfadapt_i350,
70 	.flags			= 0,
71 	.pba			= 10,
72 	.init_ops		= e1000_init_function_pointers_vf,
73 };
74 
75 static const struct igbvf_info *igbvf_info_tbl[] = {
76 	[board_vf]              = &igbvf_vf_info,
77 	[board_i350_vf]		= &igbvf_i350_vf_info,
78 };
79 
80 /**
81  * igbvf_desc_unused - calculate if we have unused descriptors
82  **/
83 static int igbvf_desc_unused(struct igbvf_ring *ring)
84 {
85 	if (ring->next_to_clean > ring->next_to_use)
86 		return ring->next_to_clean - ring->next_to_use - 1;
87 
88 	return ring->count + ring->next_to_clean - ring->next_to_use - 1;
89 }
90 
91 /**
92  * igbvf_receive_skb - helper function to handle Rx indications
93  * @adapter: board private structure
94  * @status: descriptor status field as written by hardware
95  * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
96  * @skb: pointer to sk_buff to be indicated to stack
97  **/
98 static void igbvf_receive_skb(struct igbvf_adapter *adapter,
99                               struct net_device *netdev,
100                               struct sk_buff *skb,
101                               u32 status, u16 vlan)
102 {
103 	if (status & E1000_RXD_STAT_VP) {
104 		u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
105 		if (test_bit(vid, adapter->active_vlans))
106 			__vlan_hwaccel_put_tag(skb, vid);
107 	}
108 	netif_receive_skb(skb);
109 }
110 
111 static inline void igbvf_rx_checksum_adv(struct igbvf_adapter *adapter,
112                                          u32 status_err, struct sk_buff *skb)
113 {
114 	skb_checksum_none_assert(skb);
115 
116 	/* Ignore Checksum bit is set or checksum is disabled through ethtool */
117 	if ((status_err & E1000_RXD_STAT_IXSM) ||
118 	    (adapter->flags & IGBVF_FLAG_RX_CSUM_DISABLED))
119 		return;
120 
121 	/* TCP/UDP checksum error bit is set */
122 	if (status_err &
123 	    (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) {
124 		/* let the stack verify checksum errors */
125 		adapter->hw_csum_err++;
126 		return;
127 	}
128 
129 	/* It must be a TCP or UDP packet with a valid checksum */
130 	if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))
131 		skb->ip_summed = CHECKSUM_UNNECESSARY;
132 
133 	adapter->hw_csum_good++;
134 }
135 
136 /**
137  * igbvf_alloc_rx_buffers - Replace used receive buffers; packet split
138  * @rx_ring: address of ring structure to repopulate
139  * @cleaned_count: number of buffers to repopulate
140  **/
141 static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring,
142                                    int cleaned_count)
143 {
144 	struct igbvf_adapter *adapter = rx_ring->adapter;
145 	struct net_device *netdev = adapter->netdev;
146 	struct pci_dev *pdev = adapter->pdev;
147 	union e1000_adv_rx_desc *rx_desc;
148 	struct igbvf_buffer *buffer_info;
149 	struct sk_buff *skb;
150 	unsigned int i;
151 	int bufsz;
152 
153 	i = rx_ring->next_to_use;
154 	buffer_info = &rx_ring->buffer_info[i];
155 
156 	if (adapter->rx_ps_hdr_size)
157 		bufsz = adapter->rx_ps_hdr_size;
158 	else
159 		bufsz = adapter->rx_buffer_len;
160 
161 	while (cleaned_count--) {
162 		rx_desc = IGBVF_RX_DESC_ADV(*rx_ring, i);
163 
164 		if (adapter->rx_ps_hdr_size && !buffer_info->page_dma) {
165 			if (!buffer_info->page) {
166 				buffer_info->page = alloc_page(GFP_ATOMIC);
167 				if (!buffer_info->page) {
168 					adapter->alloc_rx_buff_failed++;
169 					goto no_buffers;
170 				}
171 				buffer_info->page_offset = 0;
172 			} else {
173 				buffer_info->page_offset ^= PAGE_SIZE / 2;
174 			}
175 			buffer_info->page_dma =
176 				dma_map_page(&pdev->dev, buffer_info->page,
177 				             buffer_info->page_offset,
178 				             PAGE_SIZE / 2,
179 					     DMA_FROM_DEVICE);
180 		}
181 
182 		if (!buffer_info->skb) {
183 			skb = netdev_alloc_skb_ip_align(netdev, bufsz);
184 			if (!skb) {
185 				adapter->alloc_rx_buff_failed++;
186 				goto no_buffers;
187 			}
188 
189 			buffer_info->skb = skb;
190 			buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
191 			                                  bufsz,
192 							  DMA_FROM_DEVICE);
193 		}
194 		/* Refresh the desc even if buffer_addrs didn't change because
195 		 * each write-back erases this info. */
196 		if (adapter->rx_ps_hdr_size) {
197 			rx_desc->read.pkt_addr =
198 			     cpu_to_le64(buffer_info->page_dma);
199 			rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma);
200 		} else {
201 			rx_desc->read.pkt_addr =
202 			     cpu_to_le64(buffer_info->dma);
203 			rx_desc->read.hdr_addr = 0;
204 		}
205 
206 		i++;
207 		if (i == rx_ring->count)
208 			i = 0;
209 		buffer_info = &rx_ring->buffer_info[i];
210 	}
211 
212 no_buffers:
213 	if (rx_ring->next_to_use != i) {
214 		rx_ring->next_to_use = i;
215 		if (i == 0)
216 			i = (rx_ring->count - 1);
217 		else
218 			i--;
219 
220 		/* Force memory writes to complete before letting h/w
221 		 * know there are new descriptors to fetch.  (Only
222 		 * applicable for weak-ordered memory model archs,
223 		 * such as IA-64). */
224 		wmb();
225 		writel(i, adapter->hw.hw_addr + rx_ring->tail);
226 	}
227 }
228 
229 /**
230  * igbvf_clean_rx_irq - Send received data up the network stack; legacy
231  * @adapter: board private structure
232  *
233  * the return value indicates whether actual cleaning was done, there
234  * is no guarantee that everything was cleaned
235  **/
236 static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter,
237                                int *work_done, int work_to_do)
238 {
239 	struct igbvf_ring *rx_ring = adapter->rx_ring;
240 	struct net_device *netdev = adapter->netdev;
241 	struct pci_dev *pdev = adapter->pdev;
242 	union e1000_adv_rx_desc *rx_desc, *next_rxd;
243 	struct igbvf_buffer *buffer_info, *next_buffer;
244 	struct sk_buff *skb;
245 	bool cleaned = false;
246 	int cleaned_count = 0;
247 	unsigned int total_bytes = 0, total_packets = 0;
248 	unsigned int i;
249 	u32 length, hlen, staterr;
250 
251 	i = rx_ring->next_to_clean;
252 	rx_desc = IGBVF_RX_DESC_ADV(*rx_ring, i);
253 	staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
254 
255 	while (staterr & E1000_RXD_STAT_DD) {
256 		if (*work_done >= work_to_do)
257 			break;
258 		(*work_done)++;
259 		rmb(); /* read descriptor and rx_buffer_info after status DD */
260 
261 		buffer_info = &rx_ring->buffer_info[i];
262 
263 		/* HW will not DMA in data larger than the given buffer, even
264 		 * if it parses the (NFS, of course) header to be larger.  In
265 		 * that case, it fills the header buffer and spills the rest
266 		 * into the page.
267 		 */
268 		hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info) &
269 		  E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
270 		if (hlen > adapter->rx_ps_hdr_size)
271 			hlen = adapter->rx_ps_hdr_size;
272 
273 		length = le16_to_cpu(rx_desc->wb.upper.length);
274 		cleaned = true;
275 		cleaned_count++;
276 
277 		skb = buffer_info->skb;
278 		prefetch(skb->data - NET_IP_ALIGN);
279 		buffer_info->skb = NULL;
280 		if (!adapter->rx_ps_hdr_size) {
281 			dma_unmap_single(&pdev->dev, buffer_info->dma,
282 			                 adapter->rx_buffer_len,
283 					 DMA_FROM_DEVICE);
284 			buffer_info->dma = 0;
285 			skb_put(skb, length);
286 			goto send_up;
287 		}
288 
289 		if (!skb_shinfo(skb)->nr_frags) {
290 			dma_unmap_single(&pdev->dev, buffer_info->dma,
291 			                 adapter->rx_ps_hdr_size,
292 					 DMA_FROM_DEVICE);
293 			skb_put(skb, hlen);
294 		}
295 
296 		if (length) {
297 			dma_unmap_page(&pdev->dev, buffer_info->page_dma,
298 			               PAGE_SIZE / 2,
299 				       DMA_FROM_DEVICE);
300 			buffer_info->page_dma = 0;
301 
302 			skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
303 			                   buffer_info->page,
304 			                   buffer_info->page_offset,
305 			                   length);
306 
307 			if ((adapter->rx_buffer_len > (PAGE_SIZE / 2)) ||
308 			    (page_count(buffer_info->page) != 1))
309 				buffer_info->page = NULL;
310 			else
311 				get_page(buffer_info->page);
312 
313 			skb->len += length;
314 			skb->data_len += length;
315 			skb->truesize += PAGE_SIZE / 2;
316 		}
317 send_up:
318 		i++;
319 		if (i == rx_ring->count)
320 			i = 0;
321 		next_rxd = IGBVF_RX_DESC_ADV(*rx_ring, i);
322 		prefetch(next_rxd);
323 		next_buffer = &rx_ring->buffer_info[i];
324 
325 		if (!(staterr & E1000_RXD_STAT_EOP)) {
326 			buffer_info->skb = next_buffer->skb;
327 			buffer_info->dma = next_buffer->dma;
328 			next_buffer->skb = skb;
329 			next_buffer->dma = 0;
330 			goto next_desc;
331 		}
332 
333 		if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
334 			dev_kfree_skb_irq(skb);
335 			goto next_desc;
336 		}
337 
338 		total_bytes += skb->len;
339 		total_packets++;
340 
341 		igbvf_rx_checksum_adv(adapter, staterr, skb);
342 
343 		skb->protocol = eth_type_trans(skb, netdev);
344 
345 		igbvf_receive_skb(adapter, netdev, skb, staterr,
346 		                  rx_desc->wb.upper.vlan);
347 
348 next_desc:
349 		rx_desc->wb.upper.status_error = 0;
350 
351 		/* return some buffers to hardware, one at a time is too slow */
352 		if (cleaned_count >= IGBVF_RX_BUFFER_WRITE) {
353 			igbvf_alloc_rx_buffers(rx_ring, cleaned_count);
354 			cleaned_count = 0;
355 		}
356 
357 		/* use prefetched values */
358 		rx_desc = next_rxd;
359 		buffer_info = next_buffer;
360 
361 		staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
362 	}
363 
364 	rx_ring->next_to_clean = i;
365 	cleaned_count = igbvf_desc_unused(rx_ring);
366 
367 	if (cleaned_count)
368 		igbvf_alloc_rx_buffers(rx_ring, cleaned_count);
369 
370 	adapter->total_rx_packets += total_packets;
371 	adapter->total_rx_bytes += total_bytes;
372 	adapter->net_stats.rx_bytes += total_bytes;
373 	adapter->net_stats.rx_packets += total_packets;
374 	return cleaned;
375 }
376 
377 static void igbvf_put_txbuf(struct igbvf_adapter *adapter,
378                             struct igbvf_buffer *buffer_info)
379 {
380 	if (buffer_info->dma) {
381 		if (buffer_info->mapped_as_page)
382 			dma_unmap_page(&adapter->pdev->dev,
383 				       buffer_info->dma,
384 				       buffer_info->length,
385 				       DMA_TO_DEVICE);
386 		else
387 			dma_unmap_single(&adapter->pdev->dev,
388 					 buffer_info->dma,
389 					 buffer_info->length,
390 					 DMA_TO_DEVICE);
391 		buffer_info->dma = 0;
392 	}
393 	if (buffer_info->skb) {
394 		dev_kfree_skb_any(buffer_info->skb);
395 		buffer_info->skb = NULL;
396 	}
397 	buffer_info->time_stamp = 0;
398 }
399 
400 /**
401  * igbvf_setup_tx_resources - allocate Tx resources (Descriptors)
402  * @adapter: board private structure
403  *
404  * Return 0 on success, negative on failure
405  **/
406 int igbvf_setup_tx_resources(struct igbvf_adapter *adapter,
407                              struct igbvf_ring *tx_ring)
408 {
409 	struct pci_dev *pdev = adapter->pdev;
410 	int size;
411 
412 	size = sizeof(struct igbvf_buffer) * tx_ring->count;
413 	tx_ring->buffer_info = vzalloc(size);
414 	if (!tx_ring->buffer_info)
415 		goto err;
416 
417 	/* round up to nearest 4K */
418 	tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
419 	tx_ring->size = ALIGN(tx_ring->size, 4096);
420 
421 	tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
422 					   &tx_ring->dma, GFP_KERNEL);
423 
424 	if (!tx_ring->desc)
425 		goto err;
426 
427 	tx_ring->adapter = adapter;
428 	tx_ring->next_to_use = 0;
429 	tx_ring->next_to_clean = 0;
430 
431 	return 0;
432 err:
433 	vfree(tx_ring->buffer_info);
434 	dev_err(&adapter->pdev->dev,
435 	        "Unable to allocate memory for the transmit descriptor ring\n");
436 	return -ENOMEM;
437 }
438 
439 /**
440  * igbvf_setup_rx_resources - allocate Rx resources (Descriptors)
441  * @adapter: board private structure
442  *
443  * Returns 0 on success, negative on failure
444  **/
445 int igbvf_setup_rx_resources(struct igbvf_adapter *adapter,
446 			     struct igbvf_ring *rx_ring)
447 {
448 	struct pci_dev *pdev = adapter->pdev;
449 	int size, desc_len;
450 
451 	size = sizeof(struct igbvf_buffer) * rx_ring->count;
452 	rx_ring->buffer_info = vzalloc(size);
453 	if (!rx_ring->buffer_info)
454 		goto err;
455 
456 	desc_len = sizeof(union e1000_adv_rx_desc);
457 
458 	/* Round up to nearest 4K */
459 	rx_ring->size = rx_ring->count * desc_len;
460 	rx_ring->size = ALIGN(rx_ring->size, 4096);
461 
462 	rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
463 					   &rx_ring->dma, GFP_KERNEL);
464 
465 	if (!rx_ring->desc)
466 		goto err;
467 
468 	rx_ring->next_to_clean = 0;
469 	rx_ring->next_to_use = 0;
470 
471 	rx_ring->adapter = adapter;
472 
473 	return 0;
474 
475 err:
476 	vfree(rx_ring->buffer_info);
477 	rx_ring->buffer_info = NULL;
478 	dev_err(&adapter->pdev->dev,
479 	        "Unable to allocate memory for the receive descriptor ring\n");
480 	return -ENOMEM;
481 }
482 
483 /**
484  * igbvf_clean_tx_ring - Free Tx Buffers
485  * @tx_ring: ring to be cleaned
486  **/
487 static void igbvf_clean_tx_ring(struct igbvf_ring *tx_ring)
488 {
489 	struct igbvf_adapter *adapter = tx_ring->adapter;
490 	struct igbvf_buffer *buffer_info;
491 	unsigned long size;
492 	unsigned int i;
493 
494 	if (!tx_ring->buffer_info)
495 		return;
496 
497 	/* Free all the Tx ring sk_buffs */
498 	for (i = 0; i < tx_ring->count; i++) {
499 		buffer_info = &tx_ring->buffer_info[i];
500 		igbvf_put_txbuf(adapter, buffer_info);
501 	}
502 
503 	size = sizeof(struct igbvf_buffer) * tx_ring->count;
504 	memset(tx_ring->buffer_info, 0, size);
505 
506 	/* Zero out the descriptor ring */
507 	memset(tx_ring->desc, 0, tx_ring->size);
508 
509 	tx_ring->next_to_use = 0;
510 	tx_ring->next_to_clean = 0;
511 
512 	writel(0, adapter->hw.hw_addr + tx_ring->head);
513 	writel(0, adapter->hw.hw_addr + tx_ring->tail);
514 }
515 
516 /**
517  * igbvf_free_tx_resources - Free Tx Resources per Queue
518  * @tx_ring: ring to free resources from
519  *
520  * Free all transmit software resources
521  **/
522 void igbvf_free_tx_resources(struct igbvf_ring *tx_ring)
523 {
524 	struct pci_dev *pdev = tx_ring->adapter->pdev;
525 
526 	igbvf_clean_tx_ring(tx_ring);
527 
528 	vfree(tx_ring->buffer_info);
529 	tx_ring->buffer_info = NULL;
530 
531 	dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
532 			  tx_ring->dma);
533 
534 	tx_ring->desc = NULL;
535 }
536 
537 /**
538  * igbvf_clean_rx_ring - Free Rx Buffers per Queue
539  * @adapter: board private structure
540  **/
541 static void igbvf_clean_rx_ring(struct igbvf_ring *rx_ring)
542 {
543 	struct igbvf_adapter *adapter = rx_ring->adapter;
544 	struct igbvf_buffer *buffer_info;
545 	struct pci_dev *pdev = adapter->pdev;
546 	unsigned long size;
547 	unsigned int i;
548 
549 	if (!rx_ring->buffer_info)
550 		return;
551 
552 	/* Free all the Rx ring sk_buffs */
553 	for (i = 0; i < rx_ring->count; i++) {
554 		buffer_info = &rx_ring->buffer_info[i];
555 		if (buffer_info->dma) {
556 			if (adapter->rx_ps_hdr_size){
557 				dma_unmap_single(&pdev->dev, buffer_info->dma,
558 				                 adapter->rx_ps_hdr_size,
559 						 DMA_FROM_DEVICE);
560 			} else {
561 				dma_unmap_single(&pdev->dev, buffer_info->dma,
562 				                 adapter->rx_buffer_len,
563 						 DMA_FROM_DEVICE);
564 			}
565 			buffer_info->dma = 0;
566 		}
567 
568 		if (buffer_info->skb) {
569 			dev_kfree_skb(buffer_info->skb);
570 			buffer_info->skb = NULL;
571 		}
572 
573 		if (buffer_info->page) {
574 			if (buffer_info->page_dma)
575 				dma_unmap_page(&pdev->dev,
576 					       buffer_info->page_dma,
577 				               PAGE_SIZE / 2,
578 					       DMA_FROM_DEVICE);
579 			put_page(buffer_info->page);
580 			buffer_info->page = NULL;
581 			buffer_info->page_dma = 0;
582 			buffer_info->page_offset = 0;
583 		}
584 	}
585 
586 	size = sizeof(struct igbvf_buffer) * rx_ring->count;
587 	memset(rx_ring->buffer_info, 0, size);
588 
589 	/* Zero out the descriptor ring */
590 	memset(rx_ring->desc, 0, rx_ring->size);
591 
592 	rx_ring->next_to_clean = 0;
593 	rx_ring->next_to_use = 0;
594 
595 	writel(0, adapter->hw.hw_addr + rx_ring->head);
596 	writel(0, adapter->hw.hw_addr + rx_ring->tail);
597 }
598 
599 /**
600  * igbvf_free_rx_resources - Free Rx Resources
601  * @rx_ring: ring to clean the resources from
602  *
603  * Free all receive software resources
604  **/
605 
606 void igbvf_free_rx_resources(struct igbvf_ring *rx_ring)
607 {
608 	struct pci_dev *pdev = rx_ring->adapter->pdev;
609 
610 	igbvf_clean_rx_ring(rx_ring);
611 
612 	vfree(rx_ring->buffer_info);
613 	rx_ring->buffer_info = NULL;
614 
615 	dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
616 	                  rx_ring->dma);
617 	rx_ring->desc = NULL;
618 }
619 
620 /**
621  * igbvf_update_itr - update the dynamic ITR value based on statistics
622  * @adapter: pointer to adapter
623  * @itr_setting: current adapter->itr
624  * @packets: the number of packets during this measurement interval
625  * @bytes: the number of bytes during this measurement interval
626  *
627  *      Stores a new ITR value based on packets and byte
628  *      counts during the last interrupt.  The advantage of per interrupt
629  *      computation is faster updates and more accurate ITR for the current
630  *      traffic pattern.  Constants in this function were computed
631  *      based on theoretical maximum wire speed and thresholds were set based
632  *      on testing data as well as attempting to minimize response time
633  *      while increasing bulk throughput.  This functionality is controlled
634  *      by the InterruptThrottleRate module parameter.
635  **/
636 static unsigned int igbvf_update_itr(struct igbvf_adapter *adapter,
637                                      u16 itr_setting, int packets,
638                                      int bytes)
639 {
640 	unsigned int retval = itr_setting;
641 
642 	if (packets == 0)
643 		goto update_itr_done;
644 
645 	switch (itr_setting) {
646 	case lowest_latency:
647 		/* handle TSO and jumbo frames */
648 		if (bytes/packets > 8000)
649 			retval = bulk_latency;
650 		else if ((packets < 5) && (bytes > 512))
651 			retval = low_latency;
652 		break;
653 	case low_latency:  /* 50 usec aka 20000 ints/s */
654 		if (bytes > 10000) {
655 			/* this if handles the TSO accounting */
656 			if (bytes/packets > 8000)
657 				retval = bulk_latency;
658 			else if ((packets < 10) || ((bytes/packets) > 1200))
659 				retval = bulk_latency;
660 			else if ((packets > 35))
661 				retval = lowest_latency;
662 		} else if (bytes/packets > 2000) {
663 			retval = bulk_latency;
664 		} else if (packets <= 2 && bytes < 512) {
665 			retval = lowest_latency;
666 		}
667 		break;
668 	case bulk_latency: /* 250 usec aka 4000 ints/s */
669 		if (bytes > 25000) {
670 			if (packets > 35)
671 				retval = low_latency;
672 		} else if (bytes < 6000) {
673 			retval = low_latency;
674 		}
675 		break;
676 	}
677 
678 update_itr_done:
679 	return retval;
680 }
681 
682 static void igbvf_set_itr(struct igbvf_adapter *adapter)
683 {
684 	struct e1000_hw *hw = &adapter->hw;
685 	u16 current_itr;
686 	u32 new_itr = adapter->itr;
687 
688 	adapter->tx_itr = igbvf_update_itr(adapter, adapter->tx_itr,
689 	                                   adapter->total_tx_packets,
690 	                                   adapter->total_tx_bytes);
691 	/* conservative mode (itr 3) eliminates the lowest_latency setting */
692 	if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
693 		adapter->tx_itr = low_latency;
694 
695 	adapter->rx_itr = igbvf_update_itr(adapter, adapter->rx_itr,
696 	                                   adapter->total_rx_packets,
697 	                                   adapter->total_rx_bytes);
698 	/* conservative mode (itr 3) eliminates the lowest_latency setting */
699 	if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
700 		adapter->rx_itr = low_latency;
701 
702 	current_itr = max(adapter->rx_itr, adapter->tx_itr);
703 
704 	switch (current_itr) {
705 	/* counts and packets in update_itr are dependent on these numbers */
706 	case lowest_latency:
707 		new_itr = 70000;
708 		break;
709 	case low_latency:
710 		new_itr = 20000; /* aka hwitr = ~200 */
711 		break;
712 	case bulk_latency:
713 		new_itr = 4000;
714 		break;
715 	default:
716 		break;
717 	}
718 
719 	if (new_itr != adapter->itr) {
720 		/*
721 		 * this attempts to bias the interrupt rate towards Bulk
722 		 * by adding intermediate steps when interrupt rate is
723 		 * increasing
724 		 */
725 		new_itr = new_itr > adapter->itr ?
726 		             min(adapter->itr + (new_itr >> 2), new_itr) :
727 		             new_itr;
728 		adapter->itr = new_itr;
729 		adapter->rx_ring->itr_val = 1952;
730 
731 		if (adapter->msix_entries)
732 			adapter->rx_ring->set_itr = 1;
733 		else
734 			ew32(ITR, 1952);
735 	}
736 }
737 
738 /**
739  * igbvf_clean_tx_irq - Reclaim resources after transmit completes
740  * @adapter: board private structure
741  * returns true if ring is completely cleaned
742  **/
743 static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
744 {
745 	struct igbvf_adapter *adapter = tx_ring->adapter;
746 	struct net_device *netdev = adapter->netdev;
747 	struct igbvf_buffer *buffer_info;
748 	struct sk_buff *skb;
749 	union e1000_adv_tx_desc *tx_desc, *eop_desc;
750 	unsigned int total_bytes = 0, total_packets = 0;
751 	unsigned int i, eop, count = 0;
752 	bool cleaned = false;
753 
754 	i = tx_ring->next_to_clean;
755 	eop = tx_ring->buffer_info[i].next_to_watch;
756 	eop_desc = IGBVF_TX_DESC_ADV(*tx_ring, eop);
757 
758 	while ((eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)) &&
759 	       (count < tx_ring->count)) {
760 		rmb();	/* read buffer_info after eop_desc status */
761 		for (cleaned = false; !cleaned; count++) {
762 			tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i);
763 			buffer_info = &tx_ring->buffer_info[i];
764 			cleaned = (i == eop);
765 			skb = buffer_info->skb;
766 
767 			if (skb) {
768 				unsigned int segs, bytecount;
769 
770 				/* gso_segs is currently only valid for tcp */
771 				segs = skb_shinfo(skb)->gso_segs ?: 1;
772 				/* multiply data chunks by size of headers */
773 				bytecount = ((segs - 1) * skb_headlen(skb)) +
774 				            skb->len;
775 				total_packets += segs;
776 				total_bytes += bytecount;
777 			}
778 
779 			igbvf_put_txbuf(adapter, buffer_info);
780 			tx_desc->wb.status = 0;
781 
782 			i++;
783 			if (i == tx_ring->count)
784 				i = 0;
785 		}
786 		eop = tx_ring->buffer_info[i].next_to_watch;
787 		eop_desc = IGBVF_TX_DESC_ADV(*tx_ring, eop);
788 	}
789 
790 	tx_ring->next_to_clean = i;
791 
792 	if (unlikely(count &&
793 	             netif_carrier_ok(netdev) &&
794 	             igbvf_desc_unused(tx_ring) >= IGBVF_TX_QUEUE_WAKE)) {
795 		/* Make sure that anybody stopping the queue after this
796 		 * sees the new next_to_clean.
797 		 */
798 		smp_mb();
799 		if (netif_queue_stopped(netdev) &&
800 		    !(test_bit(__IGBVF_DOWN, &adapter->state))) {
801 			netif_wake_queue(netdev);
802 			++adapter->restart_queue;
803 		}
804 	}
805 
806 	adapter->net_stats.tx_bytes += total_bytes;
807 	adapter->net_stats.tx_packets += total_packets;
808 	return count < tx_ring->count;
809 }
810 
811 static irqreturn_t igbvf_msix_other(int irq, void *data)
812 {
813 	struct net_device *netdev = data;
814 	struct igbvf_adapter *adapter = netdev_priv(netdev);
815 	struct e1000_hw *hw = &adapter->hw;
816 
817 	adapter->int_counter1++;
818 
819 	netif_carrier_off(netdev);
820 	hw->mac.get_link_status = 1;
821 	if (!test_bit(__IGBVF_DOWN, &adapter->state))
822 		mod_timer(&adapter->watchdog_timer, jiffies + 1);
823 
824 	ew32(EIMS, adapter->eims_other);
825 
826 	return IRQ_HANDLED;
827 }
828 
829 static irqreturn_t igbvf_intr_msix_tx(int irq, void *data)
830 {
831 	struct net_device *netdev = data;
832 	struct igbvf_adapter *adapter = netdev_priv(netdev);
833 	struct e1000_hw *hw = &adapter->hw;
834 	struct igbvf_ring *tx_ring = adapter->tx_ring;
835 
836 
837 	adapter->total_tx_bytes = 0;
838 	adapter->total_tx_packets = 0;
839 
840 	/* auto mask will automatically reenable the interrupt when we write
841 	 * EICS */
842 	if (!igbvf_clean_tx_irq(tx_ring))
843 		/* Ring was not completely cleaned, so fire another interrupt */
844 		ew32(EICS, tx_ring->eims_value);
845 	else
846 		ew32(EIMS, tx_ring->eims_value);
847 
848 	return IRQ_HANDLED;
849 }
850 
851 static irqreturn_t igbvf_intr_msix_rx(int irq, void *data)
852 {
853 	struct net_device *netdev = data;
854 	struct igbvf_adapter *adapter = netdev_priv(netdev);
855 
856 	adapter->int_counter0++;
857 
858 	/* Write the ITR value calculated at the end of the
859 	 * previous interrupt.
860 	 */
861 	if (adapter->rx_ring->set_itr) {
862 		writel(adapter->rx_ring->itr_val,
863 		       adapter->hw.hw_addr + adapter->rx_ring->itr_register);
864 		adapter->rx_ring->set_itr = 0;
865 	}
866 
867 	if (napi_schedule_prep(&adapter->rx_ring->napi)) {
868 		adapter->total_rx_bytes = 0;
869 		adapter->total_rx_packets = 0;
870 		__napi_schedule(&adapter->rx_ring->napi);
871 	}
872 
873 	return IRQ_HANDLED;
874 }
875 
876 #define IGBVF_NO_QUEUE -1
877 
878 static void igbvf_assign_vector(struct igbvf_adapter *adapter, int rx_queue,
879                                 int tx_queue, int msix_vector)
880 {
881 	struct e1000_hw *hw = &adapter->hw;
882 	u32 ivar, index;
883 
884 	/* 82576 uses a table-based method for assigning vectors.
885 	   Each queue has a single entry in the table to which we write
886 	   a vector number along with a "valid" bit.  Sadly, the layout
887 	   of the table is somewhat counterintuitive. */
888 	if (rx_queue > IGBVF_NO_QUEUE) {
889 		index = (rx_queue >> 1);
890 		ivar = array_er32(IVAR0, index);
891 		if (rx_queue & 0x1) {
892 			/* vector goes into third byte of register */
893 			ivar = ivar & 0xFF00FFFF;
894 			ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
895 		} else {
896 			/* vector goes into low byte of register */
897 			ivar = ivar & 0xFFFFFF00;
898 			ivar |= msix_vector | E1000_IVAR_VALID;
899 		}
900 		adapter->rx_ring[rx_queue].eims_value = 1 << msix_vector;
901 		array_ew32(IVAR0, index, ivar);
902 	}
903 	if (tx_queue > IGBVF_NO_QUEUE) {
904 		index = (tx_queue >> 1);
905 		ivar = array_er32(IVAR0, index);
906 		if (tx_queue & 0x1) {
907 			/* vector goes into high byte of register */
908 			ivar = ivar & 0x00FFFFFF;
909 			ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
910 		} else {
911 			/* vector goes into second byte of register */
912 			ivar = ivar & 0xFFFF00FF;
913 			ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
914 		}
915 		adapter->tx_ring[tx_queue].eims_value = 1 << msix_vector;
916 		array_ew32(IVAR0, index, ivar);
917 	}
918 }
919 
920 /**
921  * igbvf_configure_msix - Configure MSI-X hardware
922  *
923  * igbvf_configure_msix sets up the hardware to properly
924  * generate MSI-X interrupts.
925  **/
926 static void igbvf_configure_msix(struct igbvf_adapter *adapter)
927 {
928 	u32 tmp;
929 	struct e1000_hw *hw = &adapter->hw;
930 	struct igbvf_ring *tx_ring = adapter->tx_ring;
931 	struct igbvf_ring *rx_ring = adapter->rx_ring;
932 	int vector = 0;
933 
934 	adapter->eims_enable_mask = 0;
935 
936 	igbvf_assign_vector(adapter, IGBVF_NO_QUEUE, 0, vector++);
937 	adapter->eims_enable_mask |= tx_ring->eims_value;
938 	if (tx_ring->itr_val)
939 		writel(tx_ring->itr_val,
940 		       hw->hw_addr + tx_ring->itr_register);
941 	else
942 		writel(1952, hw->hw_addr + tx_ring->itr_register);
943 
944 	igbvf_assign_vector(adapter, 0, IGBVF_NO_QUEUE, vector++);
945 	adapter->eims_enable_mask |= rx_ring->eims_value;
946 	if (rx_ring->itr_val)
947 		writel(rx_ring->itr_val,
948 		       hw->hw_addr + rx_ring->itr_register);
949 	else
950 		writel(1952, hw->hw_addr + rx_ring->itr_register);
951 
952 	/* set vector for other causes, i.e. link changes */
953 
954 	tmp = (vector++ | E1000_IVAR_VALID);
955 
956 	ew32(IVAR_MISC, tmp);
957 
958 	adapter->eims_enable_mask = (1 << (vector)) - 1;
959 	adapter->eims_other = 1 << (vector - 1);
960 	e1e_flush();
961 }
962 
963 static void igbvf_reset_interrupt_capability(struct igbvf_adapter *adapter)
964 {
965 	if (adapter->msix_entries) {
966 		pci_disable_msix(adapter->pdev);
967 		kfree(adapter->msix_entries);
968 		adapter->msix_entries = NULL;
969 	}
970 }
971 
972 /**
973  * igbvf_set_interrupt_capability - set MSI or MSI-X if supported
974  *
975  * Attempt to configure interrupts using the best available
976  * capabilities of the hardware and kernel.
977  **/
978 static void igbvf_set_interrupt_capability(struct igbvf_adapter *adapter)
979 {
980 	int err = -ENOMEM;
981 	int i;
982 
983 	/* we allocate 3 vectors, 1 for tx, 1 for rx, one for pf messages */
984 	adapter->msix_entries = kcalloc(3, sizeof(struct msix_entry),
985 	                                GFP_KERNEL);
986 	if (adapter->msix_entries) {
987 		for (i = 0; i < 3; i++)
988 			adapter->msix_entries[i].entry = i;
989 
990 		err = pci_enable_msix(adapter->pdev,
991 		                      adapter->msix_entries, 3);
992 	}
993 
994 	if (err) {
995 		/* MSI-X failed */
996 		dev_err(&adapter->pdev->dev,
997 		        "Failed to initialize MSI-X interrupts.\n");
998 		igbvf_reset_interrupt_capability(adapter);
999 	}
1000 }
1001 
1002 /**
1003  * igbvf_request_msix - Initialize MSI-X interrupts
1004  *
1005  * igbvf_request_msix allocates MSI-X vectors and requests interrupts from the
1006  * kernel.
1007  **/
1008 static int igbvf_request_msix(struct igbvf_adapter *adapter)
1009 {
1010 	struct net_device *netdev = adapter->netdev;
1011 	int err = 0, vector = 0;
1012 
1013 	if (strlen(netdev->name) < (IFNAMSIZ - 5)) {
1014 		sprintf(adapter->tx_ring->name, "%s-tx-0", netdev->name);
1015 		sprintf(adapter->rx_ring->name, "%s-rx-0", netdev->name);
1016 	} else {
1017 		memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ);
1018 		memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ);
1019 	}
1020 
1021 	err = request_irq(adapter->msix_entries[vector].vector,
1022 	                  igbvf_intr_msix_tx, 0, adapter->tx_ring->name,
1023 	                  netdev);
1024 	if (err)
1025 		goto out;
1026 
1027 	adapter->tx_ring->itr_register = E1000_EITR(vector);
1028 	adapter->tx_ring->itr_val = 1952;
1029 	vector++;
1030 
1031 	err = request_irq(adapter->msix_entries[vector].vector,
1032 	                  igbvf_intr_msix_rx, 0, adapter->rx_ring->name,
1033 	                  netdev);
1034 	if (err)
1035 		goto out;
1036 
1037 	adapter->rx_ring->itr_register = E1000_EITR(vector);
1038 	adapter->rx_ring->itr_val = 1952;
1039 	vector++;
1040 
1041 	err = request_irq(adapter->msix_entries[vector].vector,
1042 	                  igbvf_msix_other, 0, netdev->name, netdev);
1043 	if (err)
1044 		goto out;
1045 
1046 	igbvf_configure_msix(adapter);
1047 	return 0;
1048 out:
1049 	return err;
1050 }
1051 
1052 /**
1053  * igbvf_alloc_queues - Allocate memory for all rings
1054  * @adapter: board private structure to initialize
1055  **/
1056 static int __devinit igbvf_alloc_queues(struct igbvf_adapter *adapter)
1057 {
1058 	struct net_device *netdev = adapter->netdev;
1059 
1060 	adapter->tx_ring = kzalloc(sizeof(struct igbvf_ring), GFP_KERNEL);
1061 	if (!adapter->tx_ring)
1062 		return -ENOMEM;
1063 
1064 	adapter->rx_ring = kzalloc(sizeof(struct igbvf_ring), GFP_KERNEL);
1065 	if (!adapter->rx_ring) {
1066 		kfree(adapter->tx_ring);
1067 		return -ENOMEM;
1068 	}
1069 
1070 	netif_napi_add(netdev, &adapter->rx_ring->napi, igbvf_poll, 64);
1071 
1072 	return 0;
1073 }
1074 
1075 /**
1076  * igbvf_request_irq - initialize interrupts
1077  *
1078  * Attempts to configure interrupts using the best available
1079  * capabilities of the hardware and kernel.
1080  **/
1081 static int igbvf_request_irq(struct igbvf_adapter *adapter)
1082 {
1083 	int err = -1;
1084 
1085 	/* igbvf supports msi-x only */
1086 	if (adapter->msix_entries)
1087 		err = igbvf_request_msix(adapter);
1088 
1089 	if (!err)
1090 		return err;
1091 
1092 	dev_err(&adapter->pdev->dev,
1093 	        "Unable to allocate interrupt, Error: %d\n", err);
1094 
1095 	return err;
1096 }
1097 
1098 static void igbvf_free_irq(struct igbvf_adapter *adapter)
1099 {
1100 	struct net_device *netdev = adapter->netdev;
1101 	int vector;
1102 
1103 	if (adapter->msix_entries) {
1104 		for (vector = 0; vector < 3; vector++)
1105 			free_irq(adapter->msix_entries[vector].vector, netdev);
1106 	}
1107 }
1108 
1109 /**
1110  * igbvf_irq_disable - Mask off interrupt generation on the NIC
1111  **/
1112 static void igbvf_irq_disable(struct igbvf_adapter *adapter)
1113 {
1114 	struct e1000_hw *hw = &adapter->hw;
1115 
1116 	ew32(EIMC, ~0);
1117 
1118 	if (adapter->msix_entries)
1119 		ew32(EIAC, 0);
1120 }
1121 
1122 /**
1123  * igbvf_irq_enable - Enable default interrupt generation settings
1124  **/
1125 static void igbvf_irq_enable(struct igbvf_adapter *adapter)
1126 {
1127 	struct e1000_hw *hw = &adapter->hw;
1128 
1129 	ew32(EIAC, adapter->eims_enable_mask);
1130 	ew32(EIAM, adapter->eims_enable_mask);
1131 	ew32(EIMS, adapter->eims_enable_mask);
1132 }
1133 
1134 /**
1135  * igbvf_poll - NAPI Rx polling callback
1136  * @napi: struct associated with this polling callback
1137  * @budget: amount of packets driver is allowed to process this poll
1138  **/
1139 static int igbvf_poll(struct napi_struct *napi, int budget)
1140 {
1141 	struct igbvf_ring *rx_ring = container_of(napi, struct igbvf_ring, napi);
1142 	struct igbvf_adapter *adapter = rx_ring->adapter;
1143 	struct e1000_hw *hw = &adapter->hw;
1144 	int work_done = 0;
1145 
1146 	igbvf_clean_rx_irq(adapter, &work_done, budget);
1147 
1148 	/* If not enough Rx work done, exit the polling mode */
1149 	if (work_done < budget) {
1150 		napi_complete(napi);
1151 
1152 		if (adapter->itr_setting & 3)
1153 			igbvf_set_itr(adapter);
1154 
1155 		if (!test_bit(__IGBVF_DOWN, &adapter->state))
1156 			ew32(EIMS, adapter->rx_ring->eims_value);
1157 	}
1158 
1159 	return work_done;
1160 }
1161 
1162 /**
1163  * igbvf_set_rlpml - set receive large packet maximum length
1164  * @adapter: board private structure
1165  *
1166  * Configure the maximum size of packets that will be received
1167  */
1168 static void igbvf_set_rlpml(struct igbvf_adapter *adapter)
1169 {
1170 	int max_frame_size;
1171 	struct e1000_hw *hw = &adapter->hw;
1172 
1173 	max_frame_size = adapter->max_frame_size + VLAN_TAG_SIZE;
1174 	e1000_rlpml_set_vf(hw, max_frame_size);
1175 }
1176 
1177 static void igbvf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
1178 {
1179 	struct igbvf_adapter *adapter = netdev_priv(netdev);
1180 	struct e1000_hw *hw = &adapter->hw;
1181 
1182 	if (hw->mac.ops.set_vfta(hw, vid, true))
1183 		dev_err(&adapter->pdev->dev, "Failed to add vlan id %d\n", vid);
1184 	else
1185 		set_bit(vid, adapter->active_vlans);
1186 }
1187 
1188 static void igbvf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1189 {
1190 	struct igbvf_adapter *adapter = netdev_priv(netdev);
1191 	struct e1000_hw *hw = &adapter->hw;
1192 
1193 	igbvf_irq_disable(adapter);
1194 
1195 	if (!test_bit(__IGBVF_DOWN, &adapter->state))
1196 		igbvf_irq_enable(adapter);
1197 
1198 	if (hw->mac.ops.set_vfta(hw, vid, false))
1199 		dev_err(&adapter->pdev->dev,
1200 		        "Failed to remove vlan id %d\n", vid);
1201 	else
1202 		clear_bit(vid, adapter->active_vlans);
1203 }
1204 
1205 static void igbvf_restore_vlan(struct igbvf_adapter *adapter)
1206 {
1207 	u16 vid;
1208 
1209 	for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
1210 		igbvf_vlan_rx_add_vid(adapter->netdev, vid);
1211 }
1212 
1213 /**
1214  * igbvf_configure_tx - Configure Transmit Unit after Reset
1215  * @adapter: board private structure
1216  *
1217  * Configure the Tx unit of the MAC after a reset.
1218  **/
1219 static void igbvf_configure_tx(struct igbvf_adapter *adapter)
1220 {
1221 	struct e1000_hw *hw = &adapter->hw;
1222 	struct igbvf_ring *tx_ring = adapter->tx_ring;
1223 	u64 tdba;
1224 	u32 txdctl, dca_txctrl;
1225 
1226 	/* disable transmits */
1227 	txdctl = er32(TXDCTL(0));
1228 	ew32(TXDCTL(0), txdctl & ~E1000_TXDCTL_QUEUE_ENABLE);
1229 	e1e_flush();
1230 	msleep(10);
1231 
1232 	/* Setup the HW Tx Head and Tail descriptor pointers */
1233 	ew32(TDLEN(0), tx_ring->count * sizeof(union e1000_adv_tx_desc));
1234 	tdba = tx_ring->dma;
1235 	ew32(TDBAL(0), (tdba & DMA_BIT_MASK(32)));
1236 	ew32(TDBAH(0), (tdba >> 32));
1237 	ew32(TDH(0), 0);
1238 	ew32(TDT(0), 0);
1239 	tx_ring->head = E1000_TDH(0);
1240 	tx_ring->tail = E1000_TDT(0);
1241 
1242 	/* Turn off Relaxed Ordering on head write-backs.  The writebacks
1243 	 * MUST be delivered in order or it will completely screw up
1244 	 * our bookeeping.
1245 	 */
1246 	dca_txctrl = er32(DCA_TXCTRL(0));
1247 	dca_txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN;
1248 	ew32(DCA_TXCTRL(0), dca_txctrl);
1249 
1250 	/* enable transmits */
1251 	txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
1252 	ew32(TXDCTL(0), txdctl);
1253 
1254 	/* Setup Transmit Descriptor Settings for eop descriptor */
1255 	adapter->txd_cmd = E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_IFCS;
1256 
1257 	/* enable Report Status bit */
1258 	adapter->txd_cmd |= E1000_ADVTXD_DCMD_RS;
1259 }
1260 
1261 /**
1262  * igbvf_setup_srrctl - configure the receive control registers
1263  * @adapter: Board private structure
1264  **/
1265 static void igbvf_setup_srrctl(struct igbvf_adapter *adapter)
1266 {
1267 	struct e1000_hw *hw = &adapter->hw;
1268 	u32 srrctl = 0;
1269 
1270 	srrctl &= ~(E1000_SRRCTL_DESCTYPE_MASK |
1271 	            E1000_SRRCTL_BSIZEHDR_MASK |
1272 	            E1000_SRRCTL_BSIZEPKT_MASK);
1273 
1274 	/* Enable queue drop to avoid head of line blocking */
1275 	srrctl |= E1000_SRRCTL_DROP_EN;
1276 
1277 	/* Setup buffer sizes */
1278 	srrctl |= ALIGN(adapter->rx_buffer_len, 1024) >>
1279 	          E1000_SRRCTL_BSIZEPKT_SHIFT;
1280 
1281 	if (adapter->rx_buffer_len < 2048) {
1282 		adapter->rx_ps_hdr_size = 0;
1283 		srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
1284 	} else {
1285 		adapter->rx_ps_hdr_size = 128;
1286 		srrctl |= adapter->rx_ps_hdr_size <<
1287 		          E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
1288 		srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
1289 	}
1290 
1291 	ew32(SRRCTL(0), srrctl);
1292 }
1293 
1294 /**
1295  * igbvf_configure_rx - Configure Receive Unit after Reset
1296  * @adapter: board private structure
1297  *
1298  * Configure the Rx unit of the MAC after a reset.
1299  **/
1300 static void igbvf_configure_rx(struct igbvf_adapter *adapter)
1301 {
1302 	struct e1000_hw *hw = &adapter->hw;
1303 	struct igbvf_ring *rx_ring = adapter->rx_ring;
1304 	u64 rdba;
1305 	u32 rdlen, rxdctl;
1306 
1307 	/* disable receives */
1308 	rxdctl = er32(RXDCTL(0));
1309 	ew32(RXDCTL(0), rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE);
1310 	e1e_flush();
1311 	msleep(10);
1312 
1313 	rdlen = rx_ring->count * sizeof(union e1000_adv_rx_desc);
1314 
1315 	/*
1316 	 * Setup the HW Rx Head and Tail Descriptor Pointers and
1317 	 * the Base and Length of the Rx Descriptor Ring
1318 	 */
1319 	rdba = rx_ring->dma;
1320 	ew32(RDBAL(0), (rdba & DMA_BIT_MASK(32)));
1321 	ew32(RDBAH(0), (rdba >> 32));
1322 	ew32(RDLEN(0), rx_ring->count * sizeof(union e1000_adv_rx_desc));
1323 	rx_ring->head = E1000_RDH(0);
1324 	rx_ring->tail = E1000_RDT(0);
1325 	ew32(RDH(0), 0);
1326 	ew32(RDT(0), 0);
1327 
1328 	rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
1329 	rxdctl &= 0xFFF00000;
1330 	rxdctl |= IGBVF_RX_PTHRESH;
1331 	rxdctl |= IGBVF_RX_HTHRESH << 8;
1332 	rxdctl |= IGBVF_RX_WTHRESH << 16;
1333 
1334 	igbvf_set_rlpml(adapter);
1335 
1336 	/* enable receives */
1337 	ew32(RXDCTL(0), rxdctl);
1338 }
1339 
1340 /**
1341  * igbvf_set_multi - Multicast and Promiscuous mode set
1342  * @netdev: network interface device structure
1343  *
1344  * The set_multi entry point is called whenever the multicast address
1345  * list or the network interface flags are updated.  This routine is
1346  * responsible for configuring the hardware for proper multicast,
1347  * promiscuous mode, and all-multi behavior.
1348  **/
1349 static void igbvf_set_multi(struct net_device *netdev)
1350 {
1351 	struct igbvf_adapter *adapter = netdev_priv(netdev);
1352 	struct e1000_hw *hw = &adapter->hw;
1353 	struct netdev_hw_addr *ha;
1354 	u8  *mta_list = NULL;
1355 	int i;
1356 
1357 	if (!netdev_mc_empty(netdev)) {
1358 		mta_list = kmalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
1359 		if (!mta_list) {
1360 			dev_err(&adapter->pdev->dev,
1361 			        "failed to allocate multicast filter list\n");
1362 			return;
1363 		}
1364 	}
1365 
1366 	/* prepare a packed array of only addresses. */
1367 	i = 0;
1368 	netdev_for_each_mc_addr(ha, netdev)
1369 		memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
1370 
1371 	hw->mac.ops.update_mc_addr_list(hw, mta_list, i, 0, 0);
1372 	kfree(mta_list);
1373 }
1374 
1375 /**
1376  * igbvf_configure - configure the hardware for Rx and Tx
1377  * @adapter: private board structure
1378  **/
1379 static void igbvf_configure(struct igbvf_adapter *adapter)
1380 {
1381 	igbvf_set_multi(adapter->netdev);
1382 
1383 	igbvf_restore_vlan(adapter);
1384 
1385 	igbvf_configure_tx(adapter);
1386 	igbvf_setup_srrctl(adapter);
1387 	igbvf_configure_rx(adapter);
1388 	igbvf_alloc_rx_buffers(adapter->rx_ring,
1389 	                       igbvf_desc_unused(adapter->rx_ring));
1390 }
1391 
1392 /* igbvf_reset - bring the hardware into a known good state
1393  *
1394  * This function boots the hardware and enables some settings that
1395  * require a configuration cycle of the hardware - those cannot be
1396  * set/changed during runtime. After reset the device needs to be
1397  * properly configured for Rx, Tx etc.
1398  */
1399 static void igbvf_reset(struct igbvf_adapter *adapter)
1400 {
1401 	struct e1000_mac_info *mac = &adapter->hw.mac;
1402 	struct net_device *netdev = adapter->netdev;
1403 	struct e1000_hw *hw = &adapter->hw;
1404 
1405 	/* Allow time for pending master requests to run */
1406 	if (mac->ops.reset_hw(hw))
1407 		dev_err(&adapter->pdev->dev, "PF still resetting\n");
1408 
1409 	mac->ops.init_hw(hw);
1410 
1411 	if (is_valid_ether_addr(adapter->hw.mac.addr)) {
1412 		memcpy(netdev->dev_addr, adapter->hw.mac.addr,
1413 		       netdev->addr_len);
1414 		memcpy(netdev->perm_addr, adapter->hw.mac.addr,
1415 		       netdev->addr_len);
1416 	}
1417 
1418 	adapter->last_reset = jiffies;
1419 }
1420 
1421 int igbvf_up(struct igbvf_adapter *adapter)
1422 {
1423 	struct e1000_hw *hw = &adapter->hw;
1424 
1425 	/* hardware has been reset, we need to reload some things */
1426 	igbvf_configure(adapter);
1427 
1428 	clear_bit(__IGBVF_DOWN, &adapter->state);
1429 
1430 	napi_enable(&adapter->rx_ring->napi);
1431 	if (adapter->msix_entries)
1432 		igbvf_configure_msix(adapter);
1433 
1434 	/* Clear any pending interrupts. */
1435 	er32(EICR);
1436 	igbvf_irq_enable(adapter);
1437 
1438 	/* start the watchdog */
1439 	hw->mac.get_link_status = 1;
1440 	mod_timer(&adapter->watchdog_timer, jiffies + 1);
1441 
1442 
1443 	return 0;
1444 }
1445 
1446 void igbvf_down(struct igbvf_adapter *adapter)
1447 {
1448 	struct net_device *netdev = adapter->netdev;
1449 	struct e1000_hw *hw = &adapter->hw;
1450 	u32 rxdctl, txdctl;
1451 
1452 	/*
1453 	 * signal that we're down so the interrupt handler does not
1454 	 * reschedule our watchdog timer
1455 	 */
1456 	set_bit(__IGBVF_DOWN, &adapter->state);
1457 
1458 	/* disable receives in the hardware */
1459 	rxdctl = er32(RXDCTL(0));
1460 	ew32(RXDCTL(0), rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE);
1461 
1462 	netif_stop_queue(netdev);
1463 
1464 	/* disable transmits in the hardware */
1465 	txdctl = er32(TXDCTL(0));
1466 	ew32(TXDCTL(0), txdctl & ~E1000_TXDCTL_QUEUE_ENABLE);
1467 
1468 	/* flush both disables and wait for them to finish */
1469 	e1e_flush();
1470 	msleep(10);
1471 
1472 	napi_disable(&adapter->rx_ring->napi);
1473 
1474 	igbvf_irq_disable(adapter);
1475 
1476 	del_timer_sync(&adapter->watchdog_timer);
1477 
1478 	netif_carrier_off(netdev);
1479 
1480 	/* record the stats before reset*/
1481 	igbvf_update_stats(adapter);
1482 
1483 	adapter->link_speed = 0;
1484 	adapter->link_duplex = 0;
1485 
1486 	igbvf_reset(adapter);
1487 	igbvf_clean_tx_ring(adapter->tx_ring);
1488 	igbvf_clean_rx_ring(adapter->rx_ring);
1489 }
1490 
1491 void igbvf_reinit_locked(struct igbvf_adapter *adapter)
1492 {
1493 	might_sleep();
1494 	while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state))
1495 		msleep(1);
1496 	igbvf_down(adapter);
1497 	igbvf_up(adapter);
1498 	clear_bit(__IGBVF_RESETTING, &adapter->state);
1499 }
1500 
1501 /**
1502  * igbvf_sw_init - Initialize general software structures (struct igbvf_adapter)
1503  * @adapter: board private structure to initialize
1504  *
1505  * igbvf_sw_init initializes the Adapter private data structure.
1506  * Fields are initialized based on PCI device information and
1507  * OS network device settings (MTU size).
1508  **/
1509 static int __devinit igbvf_sw_init(struct igbvf_adapter *adapter)
1510 {
1511 	struct net_device *netdev = adapter->netdev;
1512 	s32 rc;
1513 
1514 	adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN;
1515 	adapter->rx_ps_hdr_size = 0;
1516 	adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1517 	adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
1518 
1519 	adapter->tx_int_delay = 8;
1520 	adapter->tx_abs_int_delay = 32;
1521 	adapter->rx_int_delay = 0;
1522 	adapter->rx_abs_int_delay = 8;
1523 	adapter->itr_setting = 3;
1524 	adapter->itr = 20000;
1525 
1526 	/* Set various function pointers */
1527 	adapter->ei->init_ops(&adapter->hw);
1528 
1529 	rc = adapter->hw.mac.ops.init_params(&adapter->hw);
1530 	if (rc)
1531 		return rc;
1532 
1533 	rc = adapter->hw.mbx.ops.init_params(&adapter->hw);
1534 	if (rc)
1535 		return rc;
1536 
1537 	igbvf_set_interrupt_capability(adapter);
1538 
1539 	if (igbvf_alloc_queues(adapter))
1540 		return -ENOMEM;
1541 
1542 	spin_lock_init(&adapter->tx_queue_lock);
1543 
1544 	/* Explicitly disable IRQ since the NIC can be in any state. */
1545 	igbvf_irq_disable(adapter);
1546 
1547 	spin_lock_init(&adapter->stats_lock);
1548 
1549 	set_bit(__IGBVF_DOWN, &adapter->state);
1550 	return 0;
1551 }
1552 
1553 static void igbvf_initialize_last_counter_stats(struct igbvf_adapter *adapter)
1554 {
1555 	struct e1000_hw *hw = &adapter->hw;
1556 
1557 	adapter->stats.last_gprc = er32(VFGPRC);
1558 	adapter->stats.last_gorc = er32(VFGORC);
1559 	adapter->stats.last_gptc = er32(VFGPTC);
1560 	adapter->stats.last_gotc = er32(VFGOTC);
1561 	adapter->stats.last_mprc = er32(VFMPRC);
1562 	adapter->stats.last_gotlbc = er32(VFGOTLBC);
1563 	adapter->stats.last_gptlbc = er32(VFGPTLBC);
1564 	adapter->stats.last_gorlbc = er32(VFGORLBC);
1565 	adapter->stats.last_gprlbc = er32(VFGPRLBC);
1566 
1567 	adapter->stats.base_gprc = er32(VFGPRC);
1568 	adapter->stats.base_gorc = er32(VFGORC);
1569 	adapter->stats.base_gptc = er32(VFGPTC);
1570 	adapter->stats.base_gotc = er32(VFGOTC);
1571 	adapter->stats.base_mprc = er32(VFMPRC);
1572 	adapter->stats.base_gotlbc = er32(VFGOTLBC);
1573 	adapter->stats.base_gptlbc = er32(VFGPTLBC);
1574 	adapter->stats.base_gorlbc = er32(VFGORLBC);
1575 	adapter->stats.base_gprlbc = er32(VFGPRLBC);
1576 }
1577 
1578 /**
1579  * igbvf_open - Called when a network interface is made active
1580  * @netdev: network interface device structure
1581  *
1582  * Returns 0 on success, negative value on failure
1583  *
1584  * The open entry point is called when a network interface is made
1585  * active by the system (IFF_UP).  At this point all resources needed
1586  * for transmit and receive operations are allocated, the interrupt
1587  * handler is registered with the OS, the watchdog timer is started,
1588  * and the stack is notified that the interface is ready.
1589  **/
1590 static int igbvf_open(struct net_device *netdev)
1591 {
1592 	struct igbvf_adapter *adapter = netdev_priv(netdev);
1593 	struct e1000_hw *hw = &adapter->hw;
1594 	int err;
1595 
1596 	/* disallow open during test */
1597 	if (test_bit(__IGBVF_TESTING, &adapter->state))
1598 		return -EBUSY;
1599 
1600 	/* allocate transmit descriptors */
1601 	err = igbvf_setup_tx_resources(adapter, adapter->tx_ring);
1602 	if (err)
1603 		goto err_setup_tx;
1604 
1605 	/* allocate receive descriptors */
1606 	err = igbvf_setup_rx_resources(adapter, adapter->rx_ring);
1607 	if (err)
1608 		goto err_setup_rx;
1609 
1610 	/*
1611 	 * before we allocate an interrupt, we must be ready to handle it.
1612 	 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1613 	 * as soon as we call pci_request_irq, so we have to setup our
1614 	 * clean_rx handler before we do so.
1615 	 */
1616 	igbvf_configure(adapter);
1617 
1618 	err = igbvf_request_irq(adapter);
1619 	if (err)
1620 		goto err_req_irq;
1621 
1622 	/* From here on the code is the same as igbvf_up() */
1623 	clear_bit(__IGBVF_DOWN, &adapter->state);
1624 
1625 	napi_enable(&adapter->rx_ring->napi);
1626 
1627 	/* clear any pending interrupts */
1628 	er32(EICR);
1629 
1630 	igbvf_irq_enable(adapter);
1631 
1632 	/* start the watchdog */
1633 	hw->mac.get_link_status = 1;
1634 	mod_timer(&adapter->watchdog_timer, jiffies + 1);
1635 
1636 	return 0;
1637 
1638 err_req_irq:
1639 	igbvf_free_rx_resources(adapter->rx_ring);
1640 err_setup_rx:
1641 	igbvf_free_tx_resources(adapter->tx_ring);
1642 err_setup_tx:
1643 	igbvf_reset(adapter);
1644 
1645 	return err;
1646 }
1647 
1648 /**
1649  * igbvf_close - Disables a network interface
1650  * @netdev: network interface device structure
1651  *
1652  * Returns 0, this is not allowed to fail
1653  *
1654  * The close entry point is called when an interface is de-activated
1655  * by the OS.  The hardware is still under the drivers control, but
1656  * needs to be disabled.  A global MAC reset is issued to stop the
1657  * hardware, and all transmit and receive resources are freed.
1658  **/
1659 static int igbvf_close(struct net_device *netdev)
1660 {
1661 	struct igbvf_adapter *adapter = netdev_priv(netdev);
1662 
1663 	WARN_ON(test_bit(__IGBVF_RESETTING, &adapter->state));
1664 	igbvf_down(adapter);
1665 
1666 	igbvf_free_irq(adapter);
1667 
1668 	igbvf_free_tx_resources(adapter->tx_ring);
1669 	igbvf_free_rx_resources(adapter->rx_ring);
1670 
1671 	return 0;
1672 }
1673 /**
1674  * igbvf_set_mac - Change the Ethernet Address of the NIC
1675  * @netdev: network interface device structure
1676  * @p: pointer to an address structure
1677  *
1678  * Returns 0 on success, negative on failure
1679  **/
1680 static int igbvf_set_mac(struct net_device *netdev, void *p)
1681 {
1682 	struct igbvf_adapter *adapter = netdev_priv(netdev);
1683 	struct e1000_hw *hw = &adapter->hw;
1684 	struct sockaddr *addr = p;
1685 
1686 	if (!is_valid_ether_addr(addr->sa_data))
1687 		return -EADDRNOTAVAIL;
1688 
1689 	memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
1690 
1691 	hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
1692 
1693 	if (memcmp(addr->sa_data, hw->mac.addr, 6))
1694 		return -EADDRNOTAVAIL;
1695 
1696 	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1697 
1698 	return 0;
1699 }
1700 
1701 #define UPDATE_VF_COUNTER(reg, name)                                    \
1702 	{                                                               \
1703 		u32 current_counter = er32(reg);                        \
1704 		if (current_counter < adapter->stats.last_##name)       \
1705 			adapter->stats.name += 0x100000000LL;           \
1706 		adapter->stats.last_##name = current_counter;           \
1707 		adapter->stats.name &= 0xFFFFFFFF00000000LL;            \
1708 		adapter->stats.name |= current_counter;                 \
1709 	}
1710 
1711 /**
1712  * igbvf_update_stats - Update the board statistics counters
1713  * @adapter: board private structure
1714 **/
1715 void igbvf_update_stats(struct igbvf_adapter *adapter)
1716 {
1717 	struct e1000_hw *hw = &adapter->hw;
1718 	struct pci_dev *pdev = adapter->pdev;
1719 
1720 	/*
1721 	 * Prevent stats update while adapter is being reset, link is down
1722 	 * or if the pci connection is down.
1723 	 */
1724 	if (adapter->link_speed == 0)
1725 		return;
1726 
1727 	if (test_bit(__IGBVF_RESETTING, &adapter->state))
1728 		return;
1729 
1730 	if (pci_channel_offline(pdev))
1731 		return;
1732 
1733 	UPDATE_VF_COUNTER(VFGPRC, gprc);
1734 	UPDATE_VF_COUNTER(VFGORC, gorc);
1735 	UPDATE_VF_COUNTER(VFGPTC, gptc);
1736 	UPDATE_VF_COUNTER(VFGOTC, gotc);
1737 	UPDATE_VF_COUNTER(VFMPRC, mprc);
1738 	UPDATE_VF_COUNTER(VFGOTLBC, gotlbc);
1739 	UPDATE_VF_COUNTER(VFGPTLBC, gptlbc);
1740 	UPDATE_VF_COUNTER(VFGORLBC, gorlbc);
1741 	UPDATE_VF_COUNTER(VFGPRLBC, gprlbc);
1742 
1743 	/* Fill out the OS statistics structure */
1744 	adapter->net_stats.multicast = adapter->stats.mprc;
1745 }
1746 
1747 static void igbvf_print_link_info(struct igbvf_adapter *adapter)
1748 {
1749 	dev_info(&adapter->pdev->dev, "Link is Up %d Mbps %s\n",
1750 	         adapter->link_speed,
1751 	         ((adapter->link_duplex == FULL_DUPLEX) ?
1752 	          "Full Duplex" : "Half Duplex"));
1753 }
1754 
1755 static bool igbvf_has_link(struct igbvf_adapter *adapter)
1756 {
1757 	struct e1000_hw *hw = &adapter->hw;
1758 	s32 ret_val = E1000_SUCCESS;
1759 	bool link_active;
1760 
1761 	/* If interface is down, stay link down */
1762 	if (test_bit(__IGBVF_DOWN, &adapter->state))
1763 		return false;
1764 
1765 	ret_val = hw->mac.ops.check_for_link(hw);
1766 	link_active = !hw->mac.get_link_status;
1767 
1768 	/* if check for link returns error we will need to reset */
1769 	if (ret_val && time_after(jiffies, adapter->last_reset + (10 * HZ)))
1770 		schedule_work(&adapter->reset_task);
1771 
1772 	return link_active;
1773 }
1774 
1775 /**
1776  * igbvf_watchdog - Timer Call-back
1777  * @data: pointer to adapter cast into an unsigned long
1778  **/
1779 static void igbvf_watchdog(unsigned long data)
1780 {
1781 	struct igbvf_adapter *adapter = (struct igbvf_adapter *) data;
1782 
1783 	/* Do the rest outside of interrupt context */
1784 	schedule_work(&adapter->watchdog_task);
1785 }
1786 
1787 static void igbvf_watchdog_task(struct work_struct *work)
1788 {
1789 	struct igbvf_adapter *adapter = container_of(work,
1790 	                                             struct igbvf_adapter,
1791 	                                             watchdog_task);
1792 	struct net_device *netdev = adapter->netdev;
1793 	struct e1000_mac_info *mac = &adapter->hw.mac;
1794 	struct igbvf_ring *tx_ring = adapter->tx_ring;
1795 	struct e1000_hw *hw = &adapter->hw;
1796 	u32 link;
1797 	int tx_pending = 0;
1798 
1799 	link = igbvf_has_link(adapter);
1800 
1801 	if (link) {
1802 		if (!netif_carrier_ok(netdev)) {
1803 			mac->ops.get_link_up_info(&adapter->hw,
1804 			                          &adapter->link_speed,
1805 			                          &adapter->link_duplex);
1806 			igbvf_print_link_info(adapter);
1807 
1808 			netif_carrier_on(netdev);
1809 			netif_wake_queue(netdev);
1810 		}
1811 	} else {
1812 		if (netif_carrier_ok(netdev)) {
1813 			adapter->link_speed = 0;
1814 			adapter->link_duplex = 0;
1815 			dev_info(&adapter->pdev->dev, "Link is Down\n");
1816 			netif_carrier_off(netdev);
1817 			netif_stop_queue(netdev);
1818 		}
1819 	}
1820 
1821 	if (netif_carrier_ok(netdev)) {
1822 		igbvf_update_stats(adapter);
1823 	} else {
1824 		tx_pending = (igbvf_desc_unused(tx_ring) + 1 <
1825 		              tx_ring->count);
1826 		if (tx_pending) {
1827 			/*
1828 			 * We've lost link, so the controller stops DMA,
1829 			 * but we've got queued Tx work that's never going
1830 			 * to get done, so reset controller to flush Tx.
1831 			 * (Do the reset outside of interrupt context).
1832 			 */
1833 			adapter->tx_timeout_count++;
1834 			schedule_work(&adapter->reset_task);
1835 		}
1836 	}
1837 
1838 	/* Cause software interrupt to ensure Rx ring is cleaned */
1839 	ew32(EICS, adapter->rx_ring->eims_value);
1840 
1841 	/* Reset the timer */
1842 	if (!test_bit(__IGBVF_DOWN, &adapter->state))
1843 		mod_timer(&adapter->watchdog_timer,
1844 			  round_jiffies(jiffies + (2 * HZ)));
1845 }
1846 
1847 #define IGBVF_TX_FLAGS_CSUM             0x00000001
1848 #define IGBVF_TX_FLAGS_VLAN             0x00000002
1849 #define IGBVF_TX_FLAGS_TSO              0x00000004
1850 #define IGBVF_TX_FLAGS_IPV4             0x00000008
1851 #define IGBVF_TX_FLAGS_VLAN_MASK        0xffff0000
1852 #define IGBVF_TX_FLAGS_VLAN_SHIFT       16
1853 
1854 static int igbvf_tso(struct igbvf_adapter *adapter,
1855                      struct igbvf_ring *tx_ring,
1856                      struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
1857 {
1858 	struct e1000_adv_tx_context_desc *context_desc;
1859 	unsigned int i;
1860 	int err;
1861 	struct igbvf_buffer *buffer_info;
1862 	u32 info = 0, tu_cmd = 0;
1863 	u32 mss_l4len_idx, l4len;
1864 	*hdr_len = 0;
1865 
1866 	if (skb_header_cloned(skb)) {
1867 		err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1868 		if (err) {
1869 			dev_err(&adapter->pdev->dev,
1870 			        "igbvf_tso returning an error\n");
1871 			return err;
1872 		}
1873 	}
1874 
1875 	l4len = tcp_hdrlen(skb);
1876 	*hdr_len += l4len;
1877 
1878 	if (skb->protocol == htons(ETH_P_IP)) {
1879 		struct iphdr *iph = ip_hdr(skb);
1880 		iph->tot_len = 0;
1881 		iph->check = 0;
1882 		tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
1883 		                                         iph->daddr, 0,
1884 		                                         IPPROTO_TCP,
1885 		                                         0);
1886 	} else if (skb_is_gso_v6(skb)) {
1887 		ipv6_hdr(skb)->payload_len = 0;
1888 		tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
1889 		                                       &ipv6_hdr(skb)->daddr,
1890 		                                       0, IPPROTO_TCP, 0);
1891 	}
1892 
1893 	i = tx_ring->next_to_use;
1894 
1895 	buffer_info = &tx_ring->buffer_info[i];
1896 	context_desc = IGBVF_TX_CTXTDESC_ADV(*tx_ring, i);
1897 	/* VLAN MACLEN IPLEN */
1898 	if (tx_flags & IGBVF_TX_FLAGS_VLAN)
1899 		info |= (tx_flags & IGBVF_TX_FLAGS_VLAN_MASK);
1900 	info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
1901 	*hdr_len += skb_network_offset(skb);
1902 	info |= (skb_transport_header(skb) - skb_network_header(skb));
1903 	*hdr_len += (skb_transport_header(skb) - skb_network_header(skb));
1904 	context_desc->vlan_macip_lens = cpu_to_le32(info);
1905 
1906 	/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
1907 	tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
1908 
1909 	if (skb->protocol == htons(ETH_P_IP))
1910 		tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
1911 	tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
1912 
1913 	context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
1914 
1915 	/* MSS L4LEN IDX */
1916 	mss_l4len_idx = (skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT);
1917 	mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT);
1918 
1919 	context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
1920 	context_desc->seqnum_seed = 0;
1921 
1922 	buffer_info->time_stamp = jiffies;
1923 	buffer_info->next_to_watch = i;
1924 	buffer_info->dma = 0;
1925 	i++;
1926 	if (i == tx_ring->count)
1927 		i = 0;
1928 
1929 	tx_ring->next_to_use = i;
1930 
1931 	return true;
1932 }
1933 
1934 static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter,
1935                                  struct igbvf_ring *tx_ring,
1936                                  struct sk_buff *skb, u32 tx_flags)
1937 {
1938 	struct e1000_adv_tx_context_desc *context_desc;
1939 	unsigned int i;
1940 	struct igbvf_buffer *buffer_info;
1941 	u32 info = 0, tu_cmd = 0;
1942 
1943 	if ((skb->ip_summed == CHECKSUM_PARTIAL) ||
1944 	    (tx_flags & IGBVF_TX_FLAGS_VLAN)) {
1945 		i = tx_ring->next_to_use;
1946 		buffer_info = &tx_ring->buffer_info[i];
1947 		context_desc = IGBVF_TX_CTXTDESC_ADV(*tx_ring, i);
1948 
1949 		if (tx_flags & IGBVF_TX_FLAGS_VLAN)
1950 			info |= (tx_flags & IGBVF_TX_FLAGS_VLAN_MASK);
1951 
1952 		info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
1953 		if (skb->ip_summed == CHECKSUM_PARTIAL)
1954 			info |= (skb_transport_header(skb) -
1955 			         skb_network_header(skb));
1956 
1957 
1958 		context_desc->vlan_macip_lens = cpu_to_le32(info);
1959 
1960 		tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
1961 
1962 		if (skb->ip_summed == CHECKSUM_PARTIAL) {
1963 			switch (skb->protocol) {
1964 			case __constant_htons(ETH_P_IP):
1965 				tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
1966 				if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1967 					tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
1968 				break;
1969 			case __constant_htons(ETH_P_IPV6):
1970 				if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1971 					tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
1972 				break;
1973 			default:
1974 				break;
1975 			}
1976 		}
1977 
1978 		context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
1979 		context_desc->seqnum_seed = 0;
1980 		context_desc->mss_l4len_idx = 0;
1981 
1982 		buffer_info->time_stamp = jiffies;
1983 		buffer_info->next_to_watch = i;
1984 		buffer_info->dma = 0;
1985 		i++;
1986 		if (i == tx_ring->count)
1987 			i = 0;
1988 		tx_ring->next_to_use = i;
1989 
1990 		return true;
1991 	}
1992 
1993 	return false;
1994 }
1995 
1996 static int igbvf_maybe_stop_tx(struct net_device *netdev, int size)
1997 {
1998 	struct igbvf_adapter *adapter = netdev_priv(netdev);
1999 
2000 	/* there is enough descriptors then we don't need to worry  */
2001 	if (igbvf_desc_unused(adapter->tx_ring) >= size)
2002 		return 0;
2003 
2004 	netif_stop_queue(netdev);
2005 
2006 	smp_mb();
2007 
2008 	/* We need to check again just in case room has been made available */
2009 	if (igbvf_desc_unused(adapter->tx_ring) < size)
2010 		return -EBUSY;
2011 
2012 	netif_wake_queue(netdev);
2013 
2014 	++adapter->restart_queue;
2015 	return 0;
2016 }
2017 
2018 #define IGBVF_MAX_TXD_PWR       16
2019 #define IGBVF_MAX_DATA_PER_TXD  (1 << IGBVF_MAX_TXD_PWR)
2020 
2021 static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
2022                                    struct igbvf_ring *tx_ring,
2023                                    struct sk_buff *skb,
2024                                    unsigned int first)
2025 {
2026 	struct igbvf_buffer *buffer_info;
2027 	struct pci_dev *pdev = adapter->pdev;
2028 	unsigned int len = skb_headlen(skb);
2029 	unsigned int count = 0, i;
2030 	unsigned int f;
2031 
2032 	i = tx_ring->next_to_use;
2033 
2034 	buffer_info = &tx_ring->buffer_info[i];
2035 	BUG_ON(len >= IGBVF_MAX_DATA_PER_TXD);
2036 	buffer_info->length = len;
2037 	/* set time_stamp *before* dma to help avoid a possible race */
2038 	buffer_info->time_stamp = jiffies;
2039 	buffer_info->next_to_watch = i;
2040 	buffer_info->mapped_as_page = false;
2041 	buffer_info->dma = dma_map_single(&pdev->dev, skb->data, len,
2042 					  DMA_TO_DEVICE);
2043 	if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2044 		goto dma_error;
2045 
2046 
2047 	for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
2048 		const struct skb_frag_struct *frag;
2049 
2050 		count++;
2051 		i++;
2052 		if (i == tx_ring->count)
2053 			i = 0;
2054 
2055 		frag = &skb_shinfo(skb)->frags[f];
2056 		len = skb_frag_size(frag);
2057 
2058 		buffer_info = &tx_ring->buffer_info[i];
2059 		BUG_ON(len >= IGBVF_MAX_DATA_PER_TXD);
2060 		buffer_info->length = len;
2061 		buffer_info->time_stamp = jiffies;
2062 		buffer_info->next_to_watch = i;
2063 		buffer_info->mapped_as_page = true;
2064 		buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, 0, len,
2065 						DMA_TO_DEVICE);
2066 		if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2067 			goto dma_error;
2068 	}
2069 
2070 	tx_ring->buffer_info[i].skb = skb;
2071 	tx_ring->buffer_info[first].next_to_watch = i;
2072 
2073 	return ++count;
2074 
2075 dma_error:
2076 	dev_err(&pdev->dev, "TX DMA map failed\n");
2077 
2078 	/* clear timestamp and dma mappings for failed buffer_info mapping */
2079 	buffer_info->dma = 0;
2080 	buffer_info->time_stamp = 0;
2081 	buffer_info->length = 0;
2082 	buffer_info->next_to_watch = 0;
2083 	buffer_info->mapped_as_page = false;
2084 	if (count)
2085 		count--;
2086 
2087 	/* clear timestamp and dma mappings for remaining portion of packet */
2088 	while (count--) {
2089 		if (i==0)
2090 			i += tx_ring->count;
2091 		i--;
2092 		buffer_info = &tx_ring->buffer_info[i];
2093 		igbvf_put_txbuf(adapter, buffer_info);
2094 	}
2095 
2096 	return 0;
2097 }
2098 
2099 static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter,
2100                                       struct igbvf_ring *tx_ring,
2101                                       int tx_flags, int count, u32 paylen,
2102                                       u8 hdr_len)
2103 {
2104 	union e1000_adv_tx_desc *tx_desc = NULL;
2105 	struct igbvf_buffer *buffer_info;
2106 	u32 olinfo_status = 0, cmd_type_len;
2107 	unsigned int i;
2108 
2109 	cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS |
2110 	                E1000_ADVTXD_DCMD_DEXT);
2111 
2112 	if (tx_flags & IGBVF_TX_FLAGS_VLAN)
2113 		cmd_type_len |= E1000_ADVTXD_DCMD_VLE;
2114 
2115 	if (tx_flags & IGBVF_TX_FLAGS_TSO) {
2116 		cmd_type_len |= E1000_ADVTXD_DCMD_TSE;
2117 
2118 		/* insert tcp checksum */
2119 		olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
2120 
2121 		/* insert ip checksum */
2122 		if (tx_flags & IGBVF_TX_FLAGS_IPV4)
2123 			olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
2124 
2125 	} else if (tx_flags & IGBVF_TX_FLAGS_CSUM) {
2126 		olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
2127 	}
2128 
2129 	olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT);
2130 
2131 	i = tx_ring->next_to_use;
2132 	while (count--) {
2133 		buffer_info = &tx_ring->buffer_info[i];
2134 		tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i);
2135 		tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
2136 		tx_desc->read.cmd_type_len =
2137 		         cpu_to_le32(cmd_type_len | buffer_info->length);
2138 		tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
2139 		i++;
2140 		if (i == tx_ring->count)
2141 			i = 0;
2142 	}
2143 
2144 	tx_desc->read.cmd_type_len |= cpu_to_le32(adapter->txd_cmd);
2145 	/* Force memory writes to complete before letting h/w
2146 	 * know there are new descriptors to fetch.  (Only
2147 	 * applicable for weak-ordered memory model archs,
2148 	 * such as IA-64). */
2149 	wmb();
2150 
2151 	tx_ring->next_to_use = i;
2152 	writel(i, adapter->hw.hw_addr + tx_ring->tail);
2153 	/* we need this if more than one processor can write to our tail
2154 	 * at a time, it syncronizes IO on IA64/Altix systems */
2155 	mmiowb();
2156 }
2157 
2158 static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb,
2159 					     struct net_device *netdev,
2160 					     struct igbvf_ring *tx_ring)
2161 {
2162 	struct igbvf_adapter *adapter = netdev_priv(netdev);
2163 	unsigned int first, tx_flags = 0;
2164 	u8 hdr_len = 0;
2165 	int count = 0;
2166 	int tso = 0;
2167 
2168 	if (test_bit(__IGBVF_DOWN, &adapter->state)) {
2169 		dev_kfree_skb_any(skb);
2170 		return NETDEV_TX_OK;
2171 	}
2172 
2173 	if (skb->len <= 0) {
2174 		dev_kfree_skb_any(skb);
2175 		return NETDEV_TX_OK;
2176 	}
2177 
2178 	/*
2179 	 * need: count + 4 desc gap to keep tail from touching
2180          *       + 2 desc gap to keep tail from touching head,
2181          *       + 1 desc for skb->data,
2182          *       + 1 desc for context descriptor,
2183 	 * head, otherwise try next time
2184 	 */
2185 	if (igbvf_maybe_stop_tx(netdev, skb_shinfo(skb)->nr_frags + 4)) {
2186 		/* this is a hard error */
2187 		return NETDEV_TX_BUSY;
2188 	}
2189 
2190 	if (vlan_tx_tag_present(skb)) {
2191 		tx_flags |= IGBVF_TX_FLAGS_VLAN;
2192 		tx_flags |= (vlan_tx_tag_get(skb) << IGBVF_TX_FLAGS_VLAN_SHIFT);
2193 	}
2194 
2195 	if (skb->protocol == htons(ETH_P_IP))
2196 		tx_flags |= IGBVF_TX_FLAGS_IPV4;
2197 
2198 	first = tx_ring->next_to_use;
2199 
2200 	tso = skb_is_gso(skb) ?
2201 		igbvf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len) : 0;
2202 	if (unlikely(tso < 0)) {
2203 		dev_kfree_skb_any(skb);
2204 		return NETDEV_TX_OK;
2205 	}
2206 
2207 	if (tso)
2208 		tx_flags |= IGBVF_TX_FLAGS_TSO;
2209 	else if (igbvf_tx_csum(adapter, tx_ring, skb, tx_flags) &&
2210 	         (skb->ip_summed == CHECKSUM_PARTIAL))
2211 		tx_flags |= IGBVF_TX_FLAGS_CSUM;
2212 
2213 	/*
2214 	 * count reflects descriptors mapped, if 0 then mapping error
2215 	 * has occurred and we need to rewind the descriptor queue
2216 	 */
2217 	count = igbvf_tx_map_adv(adapter, tx_ring, skb, first);
2218 
2219 	if (count) {
2220 		igbvf_tx_queue_adv(adapter, tx_ring, tx_flags, count,
2221 		                   skb->len, hdr_len);
2222 		/* Make sure there is space in the ring for the next send. */
2223 		igbvf_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 4);
2224 	} else {
2225 		dev_kfree_skb_any(skb);
2226 		tx_ring->buffer_info[first].time_stamp = 0;
2227 		tx_ring->next_to_use = first;
2228 	}
2229 
2230 	return NETDEV_TX_OK;
2231 }
2232 
2233 static netdev_tx_t igbvf_xmit_frame(struct sk_buff *skb,
2234 				    struct net_device *netdev)
2235 {
2236 	struct igbvf_adapter *adapter = netdev_priv(netdev);
2237 	struct igbvf_ring *tx_ring;
2238 
2239 	if (test_bit(__IGBVF_DOWN, &adapter->state)) {
2240 		dev_kfree_skb_any(skb);
2241 		return NETDEV_TX_OK;
2242 	}
2243 
2244 	tx_ring = &adapter->tx_ring[0];
2245 
2246 	return igbvf_xmit_frame_ring_adv(skb, netdev, tx_ring);
2247 }
2248 
2249 /**
2250  * igbvf_tx_timeout - Respond to a Tx Hang
2251  * @netdev: network interface device structure
2252  **/
2253 static void igbvf_tx_timeout(struct net_device *netdev)
2254 {
2255 	struct igbvf_adapter *adapter = netdev_priv(netdev);
2256 
2257 	/* Do the reset outside of interrupt context */
2258 	adapter->tx_timeout_count++;
2259 	schedule_work(&adapter->reset_task);
2260 }
2261 
2262 static void igbvf_reset_task(struct work_struct *work)
2263 {
2264 	struct igbvf_adapter *adapter;
2265 	adapter = container_of(work, struct igbvf_adapter, reset_task);
2266 
2267 	igbvf_reinit_locked(adapter);
2268 }
2269 
2270 /**
2271  * igbvf_get_stats - Get System Network Statistics
2272  * @netdev: network interface device structure
2273  *
2274  * Returns the address of the device statistics structure.
2275  * The statistics are actually updated from the timer callback.
2276  **/
2277 static struct net_device_stats *igbvf_get_stats(struct net_device *netdev)
2278 {
2279 	struct igbvf_adapter *adapter = netdev_priv(netdev);
2280 
2281 	/* only return the current stats */
2282 	return &adapter->net_stats;
2283 }
2284 
2285 /**
2286  * igbvf_change_mtu - Change the Maximum Transfer Unit
2287  * @netdev: network interface device structure
2288  * @new_mtu: new value for maximum frame size
2289  *
2290  * Returns 0 on success, negative on failure
2291  **/
2292 static int igbvf_change_mtu(struct net_device *netdev, int new_mtu)
2293 {
2294 	struct igbvf_adapter *adapter = netdev_priv(netdev);
2295 	int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
2296 
2297 	if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) {
2298 		dev_err(&adapter->pdev->dev, "Invalid MTU setting\n");
2299 		return -EINVAL;
2300 	}
2301 
2302 #define MAX_STD_JUMBO_FRAME_SIZE 9234
2303 	if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
2304 		dev_err(&adapter->pdev->dev, "MTU > 9216 not supported.\n");
2305 		return -EINVAL;
2306 	}
2307 
2308 	while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state))
2309 		msleep(1);
2310 	/* igbvf_down has a dependency on max_frame_size */
2311 	adapter->max_frame_size = max_frame;
2312 	if (netif_running(netdev))
2313 		igbvf_down(adapter);
2314 
2315 	/*
2316 	 * NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
2317 	 * means we reserve 2 more, this pushes us to allocate from the next
2318 	 * larger slab size.
2319 	 * i.e. RXBUFFER_2048 --> size-4096 slab
2320 	 * However with the new *_jumbo_rx* routines, jumbo receives will use
2321 	 * fragmented skbs
2322 	 */
2323 
2324 	if (max_frame <= 1024)
2325 		adapter->rx_buffer_len = 1024;
2326 	else if (max_frame <= 2048)
2327 		adapter->rx_buffer_len = 2048;
2328 	else
2329 #if (PAGE_SIZE / 2) > 16384
2330 		adapter->rx_buffer_len = 16384;
2331 #else
2332 		adapter->rx_buffer_len = PAGE_SIZE / 2;
2333 #endif
2334 
2335 
2336 	/* adjust allocation if LPE protects us, and we aren't using SBP */
2337 	if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
2338 	     (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN))
2339 		adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN +
2340 		                         ETH_FCS_LEN;
2341 
2342 	dev_info(&adapter->pdev->dev, "changing MTU from %d to %d\n",
2343 	         netdev->mtu, new_mtu);
2344 	netdev->mtu = new_mtu;
2345 
2346 	if (netif_running(netdev))
2347 		igbvf_up(adapter);
2348 	else
2349 		igbvf_reset(adapter);
2350 
2351 	clear_bit(__IGBVF_RESETTING, &adapter->state);
2352 
2353 	return 0;
2354 }
2355 
2356 static int igbvf_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2357 {
2358 	switch (cmd) {
2359 	default:
2360 		return -EOPNOTSUPP;
2361 	}
2362 }
2363 
2364 static int igbvf_suspend(struct pci_dev *pdev, pm_message_t state)
2365 {
2366 	struct net_device *netdev = pci_get_drvdata(pdev);
2367 	struct igbvf_adapter *adapter = netdev_priv(netdev);
2368 #ifdef CONFIG_PM
2369 	int retval = 0;
2370 #endif
2371 
2372 	netif_device_detach(netdev);
2373 
2374 	if (netif_running(netdev)) {
2375 		WARN_ON(test_bit(__IGBVF_RESETTING, &adapter->state));
2376 		igbvf_down(adapter);
2377 		igbvf_free_irq(adapter);
2378 	}
2379 
2380 #ifdef CONFIG_PM
2381 	retval = pci_save_state(pdev);
2382 	if (retval)
2383 		return retval;
2384 #endif
2385 
2386 	pci_disable_device(pdev);
2387 
2388 	return 0;
2389 }
2390 
2391 #ifdef CONFIG_PM
2392 static int igbvf_resume(struct pci_dev *pdev)
2393 {
2394 	struct net_device *netdev = pci_get_drvdata(pdev);
2395 	struct igbvf_adapter *adapter = netdev_priv(netdev);
2396 	u32 err;
2397 
2398 	pci_restore_state(pdev);
2399 	err = pci_enable_device_mem(pdev);
2400 	if (err) {
2401 		dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
2402 		return err;
2403 	}
2404 
2405 	pci_set_master(pdev);
2406 
2407 	if (netif_running(netdev)) {
2408 		err = igbvf_request_irq(adapter);
2409 		if (err)
2410 			return err;
2411 	}
2412 
2413 	igbvf_reset(adapter);
2414 
2415 	if (netif_running(netdev))
2416 		igbvf_up(adapter);
2417 
2418 	netif_device_attach(netdev);
2419 
2420 	return 0;
2421 }
2422 #endif
2423 
2424 static void igbvf_shutdown(struct pci_dev *pdev)
2425 {
2426 	igbvf_suspend(pdev, PMSG_SUSPEND);
2427 }
2428 
2429 #ifdef CONFIG_NET_POLL_CONTROLLER
2430 /*
2431  * Polling 'interrupt' - used by things like netconsole to send skbs
2432  * without having to re-enable interrupts. It's not called while
2433  * the interrupt routine is executing.
2434  */
2435 static void igbvf_netpoll(struct net_device *netdev)
2436 {
2437 	struct igbvf_adapter *adapter = netdev_priv(netdev);
2438 
2439 	disable_irq(adapter->pdev->irq);
2440 
2441 	igbvf_clean_tx_irq(adapter->tx_ring);
2442 
2443 	enable_irq(adapter->pdev->irq);
2444 }
2445 #endif
2446 
2447 /**
2448  * igbvf_io_error_detected - called when PCI error is detected
2449  * @pdev: Pointer to PCI device
2450  * @state: The current pci connection state
2451  *
2452  * This function is called after a PCI bus error affecting
2453  * this device has been detected.
2454  */
2455 static pci_ers_result_t igbvf_io_error_detected(struct pci_dev *pdev,
2456                                                 pci_channel_state_t state)
2457 {
2458 	struct net_device *netdev = pci_get_drvdata(pdev);
2459 	struct igbvf_adapter *adapter = netdev_priv(netdev);
2460 
2461 	netif_device_detach(netdev);
2462 
2463 	if (state == pci_channel_io_perm_failure)
2464 		return PCI_ERS_RESULT_DISCONNECT;
2465 
2466 	if (netif_running(netdev))
2467 		igbvf_down(adapter);
2468 	pci_disable_device(pdev);
2469 
2470 	/* Request a slot slot reset. */
2471 	return PCI_ERS_RESULT_NEED_RESET;
2472 }
2473 
2474 /**
2475  * igbvf_io_slot_reset - called after the pci bus has been reset.
2476  * @pdev: Pointer to PCI device
2477  *
2478  * Restart the card from scratch, as if from a cold-boot. Implementation
2479  * resembles the first-half of the igbvf_resume routine.
2480  */
2481 static pci_ers_result_t igbvf_io_slot_reset(struct pci_dev *pdev)
2482 {
2483 	struct net_device *netdev = pci_get_drvdata(pdev);
2484 	struct igbvf_adapter *adapter = netdev_priv(netdev);
2485 
2486 	if (pci_enable_device_mem(pdev)) {
2487 		dev_err(&pdev->dev,
2488 			"Cannot re-enable PCI device after reset.\n");
2489 		return PCI_ERS_RESULT_DISCONNECT;
2490 	}
2491 	pci_set_master(pdev);
2492 
2493 	igbvf_reset(adapter);
2494 
2495 	return PCI_ERS_RESULT_RECOVERED;
2496 }
2497 
2498 /**
2499  * igbvf_io_resume - called when traffic can start flowing again.
2500  * @pdev: Pointer to PCI device
2501  *
2502  * This callback is called when the error recovery driver tells us that
2503  * its OK to resume normal operation. Implementation resembles the
2504  * second-half of the igbvf_resume routine.
2505  */
2506 static void igbvf_io_resume(struct pci_dev *pdev)
2507 {
2508 	struct net_device *netdev = pci_get_drvdata(pdev);
2509 	struct igbvf_adapter *adapter = netdev_priv(netdev);
2510 
2511 	if (netif_running(netdev)) {
2512 		if (igbvf_up(adapter)) {
2513 			dev_err(&pdev->dev,
2514 				"can't bring device back up after reset\n");
2515 			return;
2516 		}
2517 	}
2518 
2519 	netif_device_attach(netdev);
2520 }
2521 
2522 static void igbvf_print_device_info(struct igbvf_adapter *adapter)
2523 {
2524 	struct e1000_hw *hw = &adapter->hw;
2525 	struct net_device *netdev = adapter->netdev;
2526 	struct pci_dev *pdev = adapter->pdev;
2527 
2528 	if (hw->mac.type == e1000_vfadapt_i350)
2529 		dev_info(&pdev->dev, "Intel(R) I350 Virtual Function\n");
2530 	else
2531 		dev_info(&pdev->dev, "Intel(R) 82576 Virtual Function\n");
2532 	dev_info(&pdev->dev, "Address: %pM\n", netdev->dev_addr);
2533 }
2534 
2535 static int igbvf_set_features(struct net_device *netdev, u32 features)
2536 {
2537 	struct igbvf_adapter *adapter = netdev_priv(netdev);
2538 
2539 	if (features & NETIF_F_RXCSUM)
2540 		adapter->flags &= ~IGBVF_FLAG_RX_CSUM_DISABLED;
2541 	else
2542 		adapter->flags |= IGBVF_FLAG_RX_CSUM_DISABLED;
2543 
2544 	return 0;
2545 }
2546 
2547 static const struct net_device_ops igbvf_netdev_ops = {
2548 	.ndo_open                       = igbvf_open,
2549 	.ndo_stop                       = igbvf_close,
2550 	.ndo_start_xmit                 = igbvf_xmit_frame,
2551 	.ndo_get_stats                  = igbvf_get_stats,
2552 	.ndo_set_rx_mode		= igbvf_set_multi,
2553 	.ndo_set_mac_address            = igbvf_set_mac,
2554 	.ndo_change_mtu                 = igbvf_change_mtu,
2555 	.ndo_do_ioctl                   = igbvf_ioctl,
2556 	.ndo_tx_timeout                 = igbvf_tx_timeout,
2557 	.ndo_vlan_rx_add_vid            = igbvf_vlan_rx_add_vid,
2558 	.ndo_vlan_rx_kill_vid           = igbvf_vlan_rx_kill_vid,
2559 #ifdef CONFIG_NET_POLL_CONTROLLER
2560 	.ndo_poll_controller            = igbvf_netpoll,
2561 #endif
2562 	.ndo_set_features               = igbvf_set_features,
2563 };
2564 
2565 /**
2566  * igbvf_probe - Device Initialization Routine
2567  * @pdev: PCI device information struct
2568  * @ent: entry in igbvf_pci_tbl
2569  *
2570  * Returns 0 on success, negative on failure
2571  *
2572  * igbvf_probe initializes an adapter identified by a pci_dev structure.
2573  * The OS initialization, configuring of the adapter private structure,
2574  * and a hardware reset occur.
2575  **/
2576 static int __devinit igbvf_probe(struct pci_dev *pdev,
2577                                  const struct pci_device_id *ent)
2578 {
2579 	struct net_device *netdev;
2580 	struct igbvf_adapter *adapter;
2581 	struct e1000_hw *hw;
2582 	const struct igbvf_info *ei = igbvf_info_tbl[ent->driver_data];
2583 
2584 	static int cards_found;
2585 	int err, pci_using_dac;
2586 
2587 	err = pci_enable_device_mem(pdev);
2588 	if (err)
2589 		return err;
2590 
2591 	pci_using_dac = 0;
2592 	err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
2593 	if (!err) {
2594 		err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
2595 		if (!err)
2596 			pci_using_dac = 1;
2597 	} else {
2598 		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
2599 		if (err) {
2600 			err = dma_set_coherent_mask(&pdev->dev,
2601 						    DMA_BIT_MASK(32));
2602 			if (err) {
2603 				dev_err(&pdev->dev, "No usable DMA "
2604 				        "configuration, aborting\n");
2605 				goto err_dma;
2606 			}
2607 		}
2608 	}
2609 
2610 	err = pci_request_regions(pdev, igbvf_driver_name);
2611 	if (err)
2612 		goto err_pci_reg;
2613 
2614 	pci_set_master(pdev);
2615 
2616 	err = -ENOMEM;
2617 	netdev = alloc_etherdev(sizeof(struct igbvf_adapter));
2618 	if (!netdev)
2619 		goto err_alloc_etherdev;
2620 
2621 	SET_NETDEV_DEV(netdev, &pdev->dev);
2622 
2623 	pci_set_drvdata(pdev, netdev);
2624 	adapter = netdev_priv(netdev);
2625 	hw = &adapter->hw;
2626 	adapter->netdev = netdev;
2627 	adapter->pdev = pdev;
2628 	adapter->ei = ei;
2629 	adapter->pba = ei->pba;
2630 	adapter->flags = ei->flags;
2631 	adapter->hw.back = adapter;
2632 	adapter->hw.mac.type = ei->mac;
2633 	adapter->msg_enable = (1 << NETIF_MSG_DRV | NETIF_MSG_PROBE) - 1;
2634 
2635 	/* PCI config space info */
2636 
2637 	hw->vendor_id = pdev->vendor;
2638 	hw->device_id = pdev->device;
2639 	hw->subsystem_vendor_id = pdev->subsystem_vendor;
2640 	hw->subsystem_device_id = pdev->subsystem_device;
2641 	hw->revision_id = pdev->revision;
2642 
2643 	err = -EIO;
2644 	adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, 0),
2645 	                              pci_resource_len(pdev, 0));
2646 
2647 	if (!adapter->hw.hw_addr)
2648 		goto err_ioremap;
2649 
2650 	if (ei->get_variants) {
2651 		err = ei->get_variants(adapter);
2652 		if (err)
2653 			goto err_ioremap;
2654 	}
2655 
2656 	/* setup adapter struct */
2657 	err = igbvf_sw_init(adapter);
2658 	if (err)
2659 		goto err_sw_init;
2660 
2661 	/* construct the net_device struct */
2662 	netdev->netdev_ops = &igbvf_netdev_ops;
2663 
2664 	igbvf_set_ethtool_ops(netdev);
2665 	netdev->watchdog_timeo = 5 * HZ;
2666 	strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
2667 
2668 	adapter->bd_number = cards_found++;
2669 
2670 	netdev->hw_features = NETIF_F_SG |
2671 	                   NETIF_F_IP_CSUM |
2672 			   NETIF_F_IPV6_CSUM |
2673 			   NETIF_F_TSO |
2674 			   NETIF_F_TSO6 |
2675 			   NETIF_F_RXCSUM;
2676 
2677 	netdev->features = netdev->hw_features |
2678 	                   NETIF_F_HW_VLAN_TX |
2679 	                   NETIF_F_HW_VLAN_RX |
2680 	                   NETIF_F_HW_VLAN_FILTER;
2681 
2682 	if (pci_using_dac)
2683 		netdev->features |= NETIF_F_HIGHDMA;
2684 
2685 	netdev->vlan_features |= NETIF_F_TSO;
2686 	netdev->vlan_features |= NETIF_F_TSO6;
2687 	netdev->vlan_features |= NETIF_F_IP_CSUM;
2688 	netdev->vlan_features |= NETIF_F_IPV6_CSUM;
2689 	netdev->vlan_features |= NETIF_F_SG;
2690 
2691 	/*reset the controller to put the device in a known good state */
2692 	err = hw->mac.ops.reset_hw(hw);
2693 	if (err) {
2694 		dev_info(&pdev->dev,
2695 			 "PF still in reset state, assigning new address."
2696 			 " Is the PF interface up?\n");
2697 		dev_hw_addr_random(adapter->netdev, hw->mac.addr);
2698 	} else {
2699 		err = hw->mac.ops.read_mac_addr(hw);
2700 		if (err) {
2701 			dev_err(&pdev->dev, "Error reading MAC address\n");
2702 			goto err_hw_init;
2703 		}
2704 	}
2705 
2706 	memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
2707 	memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
2708 
2709 	if (!is_valid_ether_addr(netdev->perm_addr)) {
2710 		dev_err(&pdev->dev, "Invalid MAC Address: %pM\n",
2711 		        netdev->dev_addr);
2712 		err = -EIO;
2713 		goto err_hw_init;
2714 	}
2715 
2716 	setup_timer(&adapter->watchdog_timer, &igbvf_watchdog,
2717 	            (unsigned long) adapter);
2718 
2719 	INIT_WORK(&adapter->reset_task, igbvf_reset_task);
2720 	INIT_WORK(&adapter->watchdog_task, igbvf_watchdog_task);
2721 
2722 	/* ring size defaults */
2723 	adapter->rx_ring->count = 1024;
2724 	adapter->tx_ring->count = 1024;
2725 
2726 	/* reset the hardware with the new settings */
2727 	igbvf_reset(adapter);
2728 
2729 	strcpy(netdev->name, "eth%d");
2730 	err = register_netdev(netdev);
2731 	if (err)
2732 		goto err_hw_init;
2733 
2734 	/* tell the stack to leave us alone until igbvf_open() is called */
2735 	netif_carrier_off(netdev);
2736 	netif_stop_queue(netdev);
2737 
2738 	igbvf_print_device_info(adapter);
2739 
2740 	igbvf_initialize_last_counter_stats(adapter);
2741 
2742 	return 0;
2743 
2744 err_hw_init:
2745 	kfree(adapter->tx_ring);
2746 	kfree(adapter->rx_ring);
2747 err_sw_init:
2748 	igbvf_reset_interrupt_capability(adapter);
2749 	iounmap(adapter->hw.hw_addr);
2750 err_ioremap:
2751 	free_netdev(netdev);
2752 err_alloc_etherdev:
2753 	pci_release_regions(pdev);
2754 err_pci_reg:
2755 err_dma:
2756 	pci_disable_device(pdev);
2757 	return err;
2758 }
2759 
2760 /**
2761  * igbvf_remove - Device Removal Routine
2762  * @pdev: PCI device information struct
2763  *
2764  * igbvf_remove is called by the PCI subsystem to alert the driver
2765  * that it should release a PCI device.  The could be caused by a
2766  * Hot-Plug event, or because the driver is going to be removed from
2767  * memory.
2768  **/
2769 static void __devexit igbvf_remove(struct pci_dev *pdev)
2770 {
2771 	struct net_device *netdev = pci_get_drvdata(pdev);
2772 	struct igbvf_adapter *adapter = netdev_priv(netdev);
2773 	struct e1000_hw *hw = &adapter->hw;
2774 
2775 	/*
2776 	 * The watchdog timer may be rescheduled, so explicitly
2777 	 * disable it from being rescheduled.
2778 	 */
2779 	set_bit(__IGBVF_DOWN, &adapter->state);
2780 	del_timer_sync(&adapter->watchdog_timer);
2781 
2782 	cancel_work_sync(&adapter->reset_task);
2783 	cancel_work_sync(&adapter->watchdog_task);
2784 
2785 	unregister_netdev(netdev);
2786 
2787 	igbvf_reset_interrupt_capability(adapter);
2788 
2789 	/*
2790 	 * it is important to delete the napi struct prior to freeing the
2791 	 * rx ring so that you do not end up with null pointer refs
2792 	 */
2793 	netif_napi_del(&adapter->rx_ring->napi);
2794 	kfree(adapter->tx_ring);
2795 	kfree(adapter->rx_ring);
2796 
2797 	iounmap(hw->hw_addr);
2798 	if (hw->flash_address)
2799 		iounmap(hw->flash_address);
2800 	pci_release_regions(pdev);
2801 
2802 	free_netdev(netdev);
2803 
2804 	pci_disable_device(pdev);
2805 }
2806 
2807 /* PCI Error Recovery (ERS) */
2808 static struct pci_error_handlers igbvf_err_handler = {
2809 	.error_detected = igbvf_io_error_detected,
2810 	.slot_reset = igbvf_io_slot_reset,
2811 	.resume = igbvf_io_resume,
2812 };
2813 
2814 static DEFINE_PCI_DEVICE_TABLE(igbvf_pci_tbl) = {
2815 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_VF), board_vf },
2816 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_VF), board_i350_vf },
2817 	{ } /* terminate list */
2818 };
2819 MODULE_DEVICE_TABLE(pci, igbvf_pci_tbl);
2820 
2821 /* PCI Device API Driver */
2822 static struct pci_driver igbvf_driver = {
2823 	.name     = igbvf_driver_name,
2824 	.id_table = igbvf_pci_tbl,
2825 	.probe    = igbvf_probe,
2826 	.remove   = __devexit_p(igbvf_remove),
2827 #ifdef CONFIG_PM
2828 	/* Power Management Hooks */
2829 	.suspend  = igbvf_suspend,
2830 	.resume   = igbvf_resume,
2831 #endif
2832 	.shutdown = igbvf_shutdown,
2833 	.err_handler = &igbvf_err_handler
2834 };
2835 
2836 /**
2837  * igbvf_init_module - Driver Registration Routine
2838  *
2839  * igbvf_init_module is the first routine called when the driver is
2840  * loaded. All it does is register with the PCI subsystem.
2841  **/
2842 static int __init igbvf_init_module(void)
2843 {
2844 	int ret;
2845 	printk(KERN_INFO "%s - version %s\n",
2846 	       igbvf_driver_string, igbvf_driver_version);
2847 	printk(KERN_INFO "%s\n", igbvf_copyright);
2848 
2849 	ret = pci_register_driver(&igbvf_driver);
2850 
2851 	return ret;
2852 }
2853 module_init(igbvf_init_module);
2854 
2855 /**
2856  * igbvf_exit_module - Driver Exit Cleanup Routine
2857  *
2858  * igbvf_exit_module is called just before the driver is removed
2859  * from memory.
2860  **/
2861 static void __exit igbvf_exit_module(void)
2862 {
2863 	pci_unregister_driver(&igbvf_driver);
2864 }
2865 module_exit(igbvf_exit_module);
2866 
2867 
2868 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
2869 MODULE_DESCRIPTION("Intel(R) Gigabit Virtual Function Network Driver");
2870 MODULE_LICENSE("GPL");
2871 MODULE_VERSION(DRV_VERSION);
2872 
2873 /* netdev.c */
2874