xref: /linux/drivers/net/ethernet/intel/igb/e1000_mac.c (revision b8265621f4888af9494e1d685620871ec81bc33d)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2007 - 2018 Intel Corporation. */
3 
4 #include <linux/if_ether.h>
5 #include <linux/delay.h>
6 #include <linux/pci.h>
7 #include <linux/netdevice.h>
8 #include <linux/etherdevice.h>
9 
10 #include "e1000_mac.h"
11 
12 #include "igb.h"
13 
14 static s32 igb_set_default_fc(struct e1000_hw *hw);
15 static void igb_set_fc_watermarks(struct e1000_hw *hw);
16 
17 /**
18  *  igb_get_bus_info_pcie - Get PCIe bus information
19  *  @hw: pointer to the HW structure
20  *
21  *  Determines and stores the system bus information for a particular
22  *  network interface.  The following bus information is determined and stored:
23  *  bus speed, bus width, type (PCIe), and PCIe function.
24  **/
25 s32 igb_get_bus_info_pcie(struct e1000_hw *hw)
26 {
27 	struct e1000_bus_info *bus = &hw->bus;
28 	s32 ret_val;
29 	u32 reg;
30 	u16 pcie_link_status;
31 
32 	bus->type = e1000_bus_type_pci_express;
33 
34 	ret_val = igb_read_pcie_cap_reg(hw,
35 					PCI_EXP_LNKSTA,
36 					&pcie_link_status);
37 	if (ret_val) {
38 		bus->width = e1000_bus_width_unknown;
39 		bus->speed = e1000_bus_speed_unknown;
40 	} else {
41 		switch (pcie_link_status & PCI_EXP_LNKSTA_CLS) {
42 		case PCI_EXP_LNKSTA_CLS_2_5GB:
43 			bus->speed = e1000_bus_speed_2500;
44 			break;
45 		case PCI_EXP_LNKSTA_CLS_5_0GB:
46 			bus->speed = e1000_bus_speed_5000;
47 			break;
48 		default:
49 			bus->speed = e1000_bus_speed_unknown;
50 			break;
51 		}
52 
53 		bus->width = (enum e1000_bus_width)((pcie_link_status &
54 						     PCI_EXP_LNKSTA_NLW) >>
55 						     PCI_EXP_LNKSTA_NLW_SHIFT);
56 	}
57 
58 	reg = rd32(E1000_STATUS);
59 	bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT;
60 
61 	return 0;
62 }
63 
64 /**
65  *  igb_clear_vfta - Clear VLAN filter table
66  *  @hw: pointer to the HW structure
67  *
68  *  Clears the register array which contains the VLAN filter table by
69  *  setting all the values to 0.
70  **/
71 void igb_clear_vfta(struct e1000_hw *hw)
72 {
73 	u32 offset;
74 
75 	for (offset = E1000_VLAN_FILTER_TBL_SIZE; offset--;)
76 		hw->mac.ops.write_vfta(hw, offset, 0);
77 }
78 
79 /**
80  *  igb_write_vfta - Write value to VLAN filter table
81  *  @hw: pointer to the HW structure
82  *  @offset: register offset in VLAN filter table
83  *  @value: register value written to VLAN filter table
84  *
85  *  Writes value at the given offset in the register array which stores
86  *  the VLAN filter table.
87  **/
88 void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
89 {
90 	struct igb_adapter *adapter = hw->back;
91 
92 	array_wr32(E1000_VFTA, offset, value);
93 	wrfl();
94 
95 	adapter->shadow_vfta[offset] = value;
96 }
97 
98 /**
99  *  igb_init_rx_addrs - Initialize receive address's
100  *  @hw: pointer to the HW structure
101  *  @rar_count: receive address registers
102  *
103  *  Setups the receive address registers by setting the base receive address
104  *  register to the devices MAC address and clearing all the other receive
105  *  address registers to 0.
106  **/
107 void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
108 {
109 	u32 i;
110 	u8 mac_addr[ETH_ALEN] = {0};
111 
112 	/* Setup the receive address */
113 	hw_dbg("Programming MAC Address into RAR[0]\n");
114 
115 	hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
116 
117 	/* Zero out the other (rar_entry_count - 1) receive addresses */
118 	hw_dbg("Clearing RAR[1-%u]\n", rar_count-1);
119 	for (i = 1; i < rar_count; i++)
120 		hw->mac.ops.rar_set(hw, mac_addr, i);
121 }
122 
123 /**
124  *  igb_find_vlvf_slot - find the VLAN id or the first empty slot
125  *  @hw: pointer to hardware structure
126  *  @vlan: VLAN id to write to VLAN filter
127  *  @vlvf_bypass: skip VLVF if no match is found
128  *
129  *  return the VLVF index where this VLAN id should be placed
130  *
131  **/
132 static s32 igb_find_vlvf_slot(struct e1000_hw *hw, u32 vlan, bool vlvf_bypass)
133 {
134 	s32 regindex, first_empty_slot;
135 	u32 bits;
136 
137 	/* short cut the special case */
138 	if (vlan == 0)
139 		return 0;
140 
141 	/* if vlvf_bypass is set we don't want to use an empty slot, we
142 	 * will simply bypass the VLVF if there are no entries present in the
143 	 * VLVF that contain our VLAN
144 	 */
145 	first_empty_slot = vlvf_bypass ? -E1000_ERR_NO_SPACE : 0;
146 
147 	/* Search for the VLAN id in the VLVF entries. Save off the first empty
148 	 * slot found along the way.
149 	 *
150 	 * pre-decrement loop covering (IXGBE_VLVF_ENTRIES - 1) .. 1
151 	 */
152 	for (regindex = E1000_VLVF_ARRAY_SIZE; --regindex > 0;) {
153 		bits = rd32(E1000_VLVF(regindex)) & E1000_VLVF_VLANID_MASK;
154 		if (bits == vlan)
155 			return regindex;
156 		if (!first_empty_slot && !bits)
157 			first_empty_slot = regindex;
158 	}
159 
160 	return first_empty_slot ? : -E1000_ERR_NO_SPACE;
161 }
162 
163 /**
164  *  igb_vfta_set - enable or disable vlan in VLAN filter table
165  *  @hw: pointer to the HW structure
166  *  @vlan: VLAN id to add or remove
167  *  @vind: VMDq output index that maps queue to VLAN id
168  *  @vlan_on: if true add filter, if false remove
169  *
170  *  Sets or clears a bit in the VLAN filter table array based on VLAN id
171  *  and if we are adding or removing the filter
172  **/
173 s32 igb_vfta_set(struct e1000_hw *hw, u32 vlan, u32 vind,
174 		 bool vlan_on, bool vlvf_bypass)
175 {
176 	struct igb_adapter *adapter = hw->back;
177 	u32 regidx, vfta_delta, vfta, bits;
178 	s32 vlvf_index;
179 
180 	if ((vlan > 4095) || (vind > 7))
181 		return -E1000_ERR_PARAM;
182 
183 	/* this is a 2 part operation - first the VFTA, then the
184 	 * VLVF and VLVFB if VT Mode is set
185 	 * We don't write the VFTA until we know the VLVF part succeeded.
186 	 */
187 
188 	/* Part 1
189 	 * The VFTA is a bitstring made up of 128 32-bit registers
190 	 * that enable the particular VLAN id, much like the MTA:
191 	 *    bits[11-5]: which register
192 	 *    bits[4-0]:  which bit in the register
193 	 */
194 	regidx = vlan / 32;
195 	vfta_delta = BIT(vlan % 32);
196 	vfta = adapter->shadow_vfta[regidx];
197 
198 	/* vfta_delta represents the difference between the current value
199 	 * of vfta and the value we want in the register.  Since the diff
200 	 * is an XOR mask we can just update vfta using an XOR.
201 	 */
202 	vfta_delta &= vlan_on ? ~vfta : vfta;
203 	vfta ^= vfta_delta;
204 
205 	/* Part 2
206 	 * If VT Mode is set
207 	 *   Either vlan_on
208 	 *     make sure the VLAN is in VLVF
209 	 *     set the vind bit in the matching VLVFB
210 	 *   Or !vlan_on
211 	 *     clear the pool bit and possibly the vind
212 	 */
213 	if (!adapter->vfs_allocated_count)
214 		goto vfta_update;
215 
216 	vlvf_index = igb_find_vlvf_slot(hw, vlan, vlvf_bypass);
217 	if (vlvf_index < 0) {
218 		if (vlvf_bypass)
219 			goto vfta_update;
220 		return vlvf_index;
221 	}
222 
223 	bits = rd32(E1000_VLVF(vlvf_index));
224 
225 	/* set the pool bit */
226 	bits |= BIT(E1000_VLVF_POOLSEL_SHIFT + vind);
227 	if (vlan_on)
228 		goto vlvf_update;
229 
230 	/* clear the pool bit */
231 	bits ^= BIT(E1000_VLVF_POOLSEL_SHIFT + vind);
232 
233 	if (!(bits & E1000_VLVF_POOLSEL_MASK)) {
234 		/* Clear VFTA first, then disable VLVF.  Otherwise
235 		 * we run the risk of stray packets leaking into
236 		 * the PF via the default pool
237 		 */
238 		if (vfta_delta)
239 			hw->mac.ops.write_vfta(hw, regidx, vfta);
240 
241 		/* disable VLVF and clear remaining bit from pool */
242 		wr32(E1000_VLVF(vlvf_index), 0);
243 
244 		return 0;
245 	}
246 
247 	/* If there are still bits set in the VLVFB registers
248 	 * for the VLAN ID indicated we need to see if the
249 	 * caller is requesting that we clear the VFTA entry bit.
250 	 * If the caller has requested that we clear the VFTA
251 	 * entry bit but there are still pools/VFs using this VLAN
252 	 * ID entry then ignore the request.  We're not worried
253 	 * about the case where we're turning the VFTA VLAN ID
254 	 * entry bit on, only when requested to turn it off as
255 	 * there may be multiple pools and/or VFs using the
256 	 * VLAN ID entry.  In that case we cannot clear the
257 	 * VFTA bit until all pools/VFs using that VLAN ID have also
258 	 * been cleared.  This will be indicated by "bits" being
259 	 * zero.
260 	 */
261 	vfta_delta = 0;
262 
263 vlvf_update:
264 	/* record pool change and enable VLAN ID if not already enabled */
265 	wr32(E1000_VLVF(vlvf_index), bits | vlan | E1000_VLVF_VLANID_ENABLE);
266 
267 vfta_update:
268 	/* bit was set/cleared before we started */
269 	if (vfta_delta)
270 		hw->mac.ops.write_vfta(hw, regidx, vfta);
271 
272 	return 0;
273 }
274 
275 /**
276  *  igb_check_alt_mac_addr - Check for alternate MAC addr
277  *  @hw: pointer to the HW structure
278  *
279  *  Checks the nvm for an alternate MAC address.  An alternate MAC address
280  *  can be setup by pre-boot software and must be treated like a permanent
281  *  address and must override the actual permanent MAC address.  If an
282  *  alternate MAC address is found it is saved in the hw struct and
283  *  programmed into RAR0 and the function returns success, otherwise the
284  *  function returns an error.
285  **/
286 s32 igb_check_alt_mac_addr(struct e1000_hw *hw)
287 {
288 	u32 i;
289 	s32 ret_val = 0;
290 	u16 offset, nvm_alt_mac_addr_offset, nvm_data;
291 	u8 alt_mac_addr[ETH_ALEN];
292 
293 	/* Alternate MAC address is handled by the option ROM for 82580
294 	 * and newer. SW support not required.
295 	 */
296 	if (hw->mac.type >= e1000_82580)
297 		goto out;
298 
299 	ret_val = hw->nvm.ops.read(hw, NVM_ALT_MAC_ADDR_PTR, 1,
300 				 &nvm_alt_mac_addr_offset);
301 	if (ret_val) {
302 		hw_dbg("NVM Read Error\n");
303 		goto out;
304 	}
305 
306 	if ((nvm_alt_mac_addr_offset == 0xFFFF) ||
307 	    (nvm_alt_mac_addr_offset == 0x0000))
308 		/* There is no Alternate MAC Address */
309 		goto out;
310 
311 	if (hw->bus.func == E1000_FUNC_1)
312 		nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1;
313 	if (hw->bus.func == E1000_FUNC_2)
314 		nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN2;
315 
316 	if (hw->bus.func == E1000_FUNC_3)
317 		nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN3;
318 	for (i = 0; i < ETH_ALEN; i += 2) {
319 		offset = nvm_alt_mac_addr_offset + (i >> 1);
320 		ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data);
321 		if (ret_val) {
322 			hw_dbg("NVM Read Error\n");
323 			goto out;
324 		}
325 
326 		alt_mac_addr[i] = (u8)(nvm_data & 0xFF);
327 		alt_mac_addr[i + 1] = (u8)(nvm_data >> 8);
328 	}
329 
330 	/* if multicast bit is set, the alternate address will not be used */
331 	if (is_multicast_ether_addr(alt_mac_addr)) {
332 		hw_dbg("Ignoring Alternate Mac Address with MC bit set\n");
333 		goto out;
334 	}
335 
336 	/* We have a valid alternate MAC address, and we want to treat it the
337 	 * same as the normal permanent MAC address stored by the HW into the
338 	 * RAR. Do this by mapping this address into RAR0.
339 	 */
340 	hw->mac.ops.rar_set(hw, alt_mac_addr, 0);
341 
342 out:
343 	return ret_val;
344 }
345 
346 /**
347  *  igb_rar_set - Set receive address register
348  *  @hw: pointer to the HW structure
349  *  @addr: pointer to the receive address
350  *  @index: receive address array register
351  *
352  *  Sets the receive address array register at index to the address passed
353  *  in by addr.
354  **/
355 void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
356 {
357 	u32 rar_low, rar_high;
358 
359 	/* HW expects these in little endian so we reverse the byte order
360 	 * from network order (big endian) to little endian
361 	 */
362 	rar_low = ((u32) addr[0] |
363 		   ((u32) addr[1] << 8) |
364 		    ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
365 
366 	rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
367 
368 	/* If MAC address zero, no need to set the AV bit */
369 	if (rar_low || rar_high)
370 		rar_high |= E1000_RAH_AV;
371 
372 	/* Some bridges will combine consecutive 32-bit writes into
373 	 * a single burst write, which will malfunction on some parts.
374 	 * The flushes avoid this.
375 	 */
376 	wr32(E1000_RAL(index), rar_low);
377 	wrfl();
378 	wr32(E1000_RAH(index), rar_high);
379 	wrfl();
380 }
381 
382 /**
383  *  igb_mta_set - Set multicast filter table address
384  *  @hw: pointer to the HW structure
385  *  @hash_value: determines the MTA register and bit to set
386  *
387  *  The multicast table address is a register array of 32-bit registers.
388  *  The hash_value is used to determine what register the bit is in, the
389  *  current value is read, the new bit is OR'd in and the new value is
390  *  written back into the register.
391  **/
392 void igb_mta_set(struct e1000_hw *hw, u32 hash_value)
393 {
394 	u32 hash_bit, hash_reg, mta;
395 
396 	/* The MTA is a register array of 32-bit registers. It is
397 	 * treated like an array of (32*mta_reg_count) bits.  We want to
398 	 * set bit BitArray[hash_value]. So we figure out what register
399 	 * the bit is in, read it, OR in the new bit, then write
400 	 * back the new value.  The (hw->mac.mta_reg_count - 1) serves as a
401 	 * mask to bits 31:5 of the hash value which gives us the
402 	 * register we're modifying.  The hash bit within that register
403 	 * is determined by the lower 5 bits of the hash value.
404 	 */
405 	hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
406 	hash_bit = hash_value & 0x1F;
407 
408 	mta = array_rd32(E1000_MTA, hash_reg);
409 
410 	mta |= BIT(hash_bit);
411 
412 	array_wr32(E1000_MTA, hash_reg, mta);
413 	wrfl();
414 }
415 
416 /**
417  *  igb_hash_mc_addr - Generate a multicast hash value
418  *  @hw: pointer to the HW structure
419  *  @mc_addr: pointer to a multicast address
420  *
421  *  Generates a multicast address hash value which is used to determine
422  *  the multicast filter table array address and new table value.  See
423  *  igb_mta_set()
424  **/
425 static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
426 {
427 	u32 hash_value, hash_mask;
428 	u8 bit_shift = 0;
429 
430 	/* Register count multiplied by bits per register */
431 	hash_mask = (hw->mac.mta_reg_count * 32) - 1;
432 
433 	/* For a mc_filter_type of 0, bit_shift is the number of left-shifts
434 	 * where 0xFF would still fall within the hash mask.
435 	 */
436 	while (hash_mask >> bit_shift != 0xFF)
437 		bit_shift++;
438 
439 	/* The portion of the address that is used for the hash table
440 	 * is determined by the mc_filter_type setting.
441 	 * The algorithm is such that there is a total of 8 bits of shifting.
442 	 * The bit_shift for a mc_filter_type of 0 represents the number of
443 	 * left-shifts where the MSB of mc_addr[5] would still fall within
444 	 * the hash_mask.  Case 0 does this exactly.  Since there are a total
445 	 * of 8 bits of shifting, then mc_addr[4] will shift right the
446 	 * remaining number of bits. Thus 8 - bit_shift.  The rest of the
447 	 * cases are a variation of this algorithm...essentially raising the
448 	 * number of bits to shift mc_addr[5] left, while still keeping the
449 	 * 8-bit shifting total.
450 	 *
451 	 * For example, given the following Destination MAC Address and an
452 	 * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask),
453 	 * we can see that the bit_shift for case 0 is 4.  These are the hash
454 	 * values resulting from each mc_filter_type...
455 	 * [0] [1] [2] [3] [4] [5]
456 	 * 01  AA  00  12  34  56
457 	 * LSB                 MSB
458 	 *
459 	 * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563
460 	 * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6
461 	 * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163
462 	 * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634
463 	 */
464 	switch (hw->mac.mc_filter_type) {
465 	default:
466 	case 0:
467 		break;
468 	case 1:
469 		bit_shift += 1;
470 		break;
471 	case 2:
472 		bit_shift += 2;
473 		break;
474 	case 3:
475 		bit_shift += 4;
476 		break;
477 	}
478 
479 	hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) |
480 				  (((u16) mc_addr[5]) << bit_shift)));
481 
482 	return hash_value;
483 }
484 
485 /**
486  *  igb_update_mc_addr_list - Update Multicast addresses
487  *  @hw: pointer to the HW structure
488  *  @mc_addr_list: array of multicast addresses to program
489  *  @mc_addr_count: number of multicast addresses to program
490  *
491  *  Updates entire Multicast Table Array.
492  *  The caller must have a packed mc_addr_list of multicast addresses.
493  **/
494 void igb_update_mc_addr_list(struct e1000_hw *hw,
495 			     u8 *mc_addr_list, u32 mc_addr_count)
496 {
497 	u32 hash_value, hash_bit, hash_reg;
498 	int i;
499 
500 	/* clear mta_shadow */
501 	memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
502 
503 	/* update mta_shadow from mc_addr_list */
504 	for (i = 0; (u32) i < mc_addr_count; i++) {
505 		hash_value = igb_hash_mc_addr(hw, mc_addr_list);
506 
507 		hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
508 		hash_bit = hash_value & 0x1F;
509 
510 		hw->mac.mta_shadow[hash_reg] |= BIT(hash_bit);
511 		mc_addr_list += (ETH_ALEN);
512 	}
513 
514 	/* replace the entire MTA table */
515 	for (i = hw->mac.mta_reg_count - 1; i >= 0; i--)
516 		array_wr32(E1000_MTA, i, hw->mac.mta_shadow[i]);
517 	wrfl();
518 }
519 
520 /**
521  *  igb_clear_hw_cntrs_base - Clear base hardware counters
522  *  @hw: pointer to the HW structure
523  *
524  *  Clears the base hardware counters by reading the counter registers.
525  **/
526 void igb_clear_hw_cntrs_base(struct e1000_hw *hw)
527 {
528 	rd32(E1000_CRCERRS);
529 	rd32(E1000_SYMERRS);
530 	rd32(E1000_MPC);
531 	rd32(E1000_SCC);
532 	rd32(E1000_ECOL);
533 	rd32(E1000_MCC);
534 	rd32(E1000_LATECOL);
535 	rd32(E1000_COLC);
536 	rd32(E1000_DC);
537 	rd32(E1000_SEC);
538 	rd32(E1000_RLEC);
539 	rd32(E1000_XONRXC);
540 	rd32(E1000_XONTXC);
541 	rd32(E1000_XOFFRXC);
542 	rd32(E1000_XOFFTXC);
543 	rd32(E1000_FCRUC);
544 	rd32(E1000_GPRC);
545 	rd32(E1000_BPRC);
546 	rd32(E1000_MPRC);
547 	rd32(E1000_GPTC);
548 	rd32(E1000_GORCL);
549 	rd32(E1000_GORCH);
550 	rd32(E1000_GOTCL);
551 	rd32(E1000_GOTCH);
552 	rd32(E1000_RNBC);
553 	rd32(E1000_RUC);
554 	rd32(E1000_RFC);
555 	rd32(E1000_ROC);
556 	rd32(E1000_RJC);
557 	rd32(E1000_TORL);
558 	rd32(E1000_TORH);
559 	rd32(E1000_TOTL);
560 	rd32(E1000_TOTH);
561 	rd32(E1000_TPR);
562 	rd32(E1000_TPT);
563 	rd32(E1000_MPTC);
564 	rd32(E1000_BPTC);
565 }
566 
567 /**
568  *  igb_check_for_copper_link - Check for link (Copper)
569  *  @hw: pointer to the HW structure
570  *
571  *  Checks to see of the link status of the hardware has changed.  If a
572  *  change in link status has been detected, then we read the PHY registers
573  *  to get the current speed/duplex if link exists.
574  **/
575 s32 igb_check_for_copper_link(struct e1000_hw *hw)
576 {
577 	struct e1000_mac_info *mac = &hw->mac;
578 	s32 ret_val;
579 	bool link;
580 
581 	/* We only want to go out to the PHY registers to see if Auto-Neg
582 	 * has completed and/or if our link status has changed.  The
583 	 * get_link_status flag is set upon receiving a Link Status
584 	 * Change or Rx Sequence Error interrupt.
585 	 */
586 	if (!mac->get_link_status) {
587 		ret_val = 0;
588 		goto out;
589 	}
590 
591 	/* First we want to see if the MII Status Register reports
592 	 * link.  If so, then we want to get the current speed/duplex
593 	 * of the PHY.
594 	 */
595 	ret_val = igb_phy_has_link(hw, 1, 0, &link);
596 	if (ret_val)
597 		goto out;
598 
599 	if (!link)
600 		goto out; /* No link detected */
601 
602 	mac->get_link_status = false;
603 
604 	/* Check if there was DownShift, must be checked
605 	 * immediately after link-up
606 	 */
607 	igb_check_downshift(hw);
608 
609 	/* If we are forcing speed/duplex, then we simply return since
610 	 * we have already determined whether we have link or not.
611 	 */
612 	if (!mac->autoneg) {
613 		ret_val = -E1000_ERR_CONFIG;
614 		goto out;
615 	}
616 
617 	/* Auto-Neg is enabled.  Auto Speed Detection takes care
618 	 * of MAC speed/duplex configuration.  So we only need to
619 	 * configure Collision Distance in the MAC.
620 	 */
621 	igb_config_collision_dist(hw);
622 
623 	/* Configure Flow Control now that Auto-Neg has completed.
624 	 * First, we need to restore the desired flow control
625 	 * settings because we may have had to re-autoneg with a
626 	 * different link partner.
627 	 */
628 	ret_val = igb_config_fc_after_link_up(hw);
629 	if (ret_val)
630 		hw_dbg("Error configuring flow control\n");
631 
632 out:
633 	return ret_val;
634 }
635 
636 /**
637  *  igb_setup_link - Setup flow control and link settings
638  *  @hw: pointer to the HW structure
639  *
640  *  Determines which flow control settings to use, then configures flow
641  *  control.  Calls the appropriate media-specific link configuration
642  *  function.  Assuming the adapter has a valid link partner, a valid link
643  *  should be established.  Assumes the hardware has previously been reset
644  *  and the transmitter and receiver are not enabled.
645  **/
646 s32 igb_setup_link(struct e1000_hw *hw)
647 {
648 	s32 ret_val = 0;
649 
650 	/* In the case of the phy reset being blocked, we already have a link.
651 	 * We do not need to set it up again.
652 	 */
653 	if (igb_check_reset_block(hw))
654 		goto out;
655 
656 	/* If requested flow control is set to default, set flow control
657 	 * based on the EEPROM flow control settings.
658 	 */
659 	if (hw->fc.requested_mode == e1000_fc_default) {
660 		ret_val = igb_set_default_fc(hw);
661 		if (ret_val)
662 			goto out;
663 	}
664 
665 	/* We want to save off the original Flow Control configuration just
666 	 * in case we get disconnected and then reconnected into a different
667 	 * hub or switch with different Flow Control capabilities.
668 	 */
669 	hw->fc.current_mode = hw->fc.requested_mode;
670 
671 	hw_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode);
672 
673 	/* Call the necessary media_type subroutine to configure the link. */
674 	ret_val = hw->mac.ops.setup_physical_interface(hw);
675 	if (ret_val)
676 		goto out;
677 
678 	/* Initialize the flow control address, type, and PAUSE timer
679 	 * registers to their default values.  This is done even if flow
680 	 * control is disabled, because it does not hurt anything to
681 	 * initialize these registers.
682 	 */
683 	hw_dbg("Initializing the Flow Control address, type and timer regs\n");
684 	wr32(E1000_FCT, FLOW_CONTROL_TYPE);
685 	wr32(E1000_FCAH, FLOW_CONTROL_ADDRESS_HIGH);
686 	wr32(E1000_FCAL, FLOW_CONTROL_ADDRESS_LOW);
687 
688 	wr32(E1000_FCTTV, hw->fc.pause_time);
689 
690 	igb_set_fc_watermarks(hw);
691 
692 out:
693 
694 	return ret_val;
695 }
696 
697 /**
698  *  igb_config_collision_dist - Configure collision distance
699  *  @hw: pointer to the HW structure
700  *
701  *  Configures the collision distance to the default value and is used
702  *  during link setup. Currently no func pointer exists and all
703  *  implementations are handled in the generic version of this function.
704  **/
705 void igb_config_collision_dist(struct e1000_hw *hw)
706 {
707 	u32 tctl;
708 
709 	tctl = rd32(E1000_TCTL);
710 
711 	tctl &= ~E1000_TCTL_COLD;
712 	tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT;
713 
714 	wr32(E1000_TCTL, tctl);
715 	wrfl();
716 }
717 
718 /**
719  *  igb_set_fc_watermarks - Set flow control high/low watermarks
720  *  @hw: pointer to the HW structure
721  *
722  *  Sets the flow control high/low threshold (watermark) registers.  If
723  *  flow control XON frame transmission is enabled, then set XON frame
724  *  tansmission as well.
725  **/
726 static void igb_set_fc_watermarks(struct e1000_hw *hw)
727 {
728 	u32 fcrtl = 0, fcrth = 0;
729 
730 	/* Set the flow control receive threshold registers.  Normally,
731 	 * these registers will be set to a default threshold that may be
732 	 * adjusted later by the driver's runtime code.  However, if the
733 	 * ability to transmit pause frames is not enabled, then these
734 	 * registers will be set to 0.
735 	 */
736 	if (hw->fc.current_mode & e1000_fc_tx_pause) {
737 		/* We need to set up the Receive Threshold high and low water
738 		 * marks as well as (optionally) enabling the transmission of
739 		 * XON frames.
740 		 */
741 		fcrtl = hw->fc.low_water;
742 		if (hw->fc.send_xon)
743 			fcrtl |= E1000_FCRTL_XONE;
744 
745 		fcrth = hw->fc.high_water;
746 	}
747 	wr32(E1000_FCRTL, fcrtl);
748 	wr32(E1000_FCRTH, fcrth);
749 }
750 
751 /**
752  *  igb_set_default_fc - Set flow control default values
753  *  @hw: pointer to the HW structure
754  *
755  *  Read the EEPROM for the default values for flow control and store the
756  *  values.
757  **/
758 static s32 igb_set_default_fc(struct e1000_hw *hw)
759 {
760 	s32 ret_val = 0;
761 	u16 lan_offset;
762 	u16 nvm_data;
763 
764 	/* Read and store word 0x0F of the EEPROM. This word contains bits
765 	 * that determine the hardware's default PAUSE (flow control) mode,
766 	 * a bit that determines whether the HW defaults to enabling or
767 	 * disabling auto-negotiation, and the direction of the
768 	 * SW defined pins. If there is no SW over-ride of the flow
769 	 * control setting, then the variable hw->fc will
770 	 * be initialized based on a value in the EEPROM.
771 	 */
772 	if (hw->mac.type == e1000_i350)
773 		lan_offset = NVM_82580_LAN_FUNC_OFFSET(hw->bus.func);
774 	else
775 		lan_offset = 0;
776 
777 	ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG + lan_offset,
778 				   1, &nvm_data);
779 	if (ret_val) {
780 		hw_dbg("NVM Read Error\n");
781 		goto out;
782 	}
783 
784 	if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0)
785 		hw->fc.requested_mode = e1000_fc_none;
786 	else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == NVM_WORD0F_ASM_DIR)
787 		hw->fc.requested_mode = e1000_fc_tx_pause;
788 	else
789 		hw->fc.requested_mode = e1000_fc_full;
790 
791 out:
792 	return ret_val;
793 }
794 
795 /**
796  *  igb_force_mac_fc - Force the MAC's flow control settings
797  *  @hw: pointer to the HW structure
798  *
799  *  Force the MAC's flow control settings.  Sets the TFCE and RFCE bits in the
800  *  device control register to reflect the adapter settings.  TFCE and RFCE
801  *  need to be explicitly set by software when a copper PHY is used because
802  *  autonegotiation is managed by the PHY rather than the MAC.  Software must
803  *  also configure these bits when link is forced on a fiber connection.
804  **/
805 s32 igb_force_mac_fc(struct e1000_hw *hw)
806 {
807 	u32 ctrl;
808 	s32 ret_val = 0;
809 
810 	ctrl = rd32(E1000_CTRL);
811 
812 	/* Because we didn't get link via the internal auto-negotiation
813 	 * mechanism (we either forced link or we got link via PHY
814 	 * auto-neg), we have to manually enable/disable transmit an
815 	 * receive flow control.
816 	 *
817 	 * The "Case" statement below enables/disable flow control
818 	 * according to the "hw->fc.current_mode" parameter.
819 	 *
820 	 * The possible values of the "fc" parameter are:
821 	 *      0:  Flow control is completely disabled
822 	 *      1:  Rx flow control is enabled (we can receive pause
823 	 *          frames but not send pause frames).
824 	 *      2:  Tx flow control is enabled (we can send pause frames
825 	 *          frames but we do not receive pause frames).
826 	 *      3:  Both Rx and TX flow control (symmetric) is enabled.
827 	 *  other:  No other values should be possible at this point.
828 	 */
829 	hw_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode);
830 
831 	switch (hw->fc.current_mode) {
832 	case e1000_fc_none:
833 		ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE));
834 		break;
835 	case e1000_fc_rx_pause:
836 		ctrl &= (~E1000_CTRL_TFCE);
837 		ctrl |= E1000_CTRL_RFCE;
838 		break;
839 	case e1000_fc_tx_pause:
840 		ctrl &= (~E1000_CTRL_RFCE);
841 		ctrl |= E1000_CTRL_TFCE;
842 		break;
843 	case e1000_fc_full:
844 		ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE);
845 		break;
846 	default:
847 		hw_dbg("Flow control param set incorrectly\n");
848 		ret_val = -E1000_ERR_CONFIG;
849 		goto out;
850 	}
851 
852 	wr32(E1000_CTRL, ctrl);
853 
854 out:
855 	return ret_val;
856 }
857 
858 /**
859  *  igb_config_fc_after_link_up - Configures flow control after link
860  *  @hw: pointer to the HW structure
861  *
862  *  Checks the status of auto-negotiation after link up to ensure that the
863  *  speed and duplex were not forced.  If the link needed to be forced, then
864  *  flow control needs to be forced also.  If auto-negotiation is enabled
865  *  and did not fail, then we configure flow control based on our link
866  *  partner.
867  **/
868 s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
869 {
870 	struct e1000_mac_info *mac = &hw->mac;
871 	s32 ret_val = 0;
872 	u32 pcs_status_reg, pcs_adv_reg, pcs_lp_ability_reg, pcs_ctrl_reg;
873 	u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
874 	u16 speed, duplex;
875 
876 	/* Check for the case where we have fiber media and auto-neg failed
877 	 * so we had to force link.  In this case, we need to force the
878 	 * configuration of the MAC to match the "fc" parameter.
879 	 */
880 	if (mac->autoneg_failed) {
881 		if (hw->phy.media_type == e1000_media_type_internal_serdes)
882 			ret_val = igb_force_mac_fc(hw);
883 	} else {
884 		if (hw->phy.media_type == e1000_media_type_copper)
885 			ret_val = igb_force_mac_fc(hw);
886 	}
887 
888 	if (ret_val) {
889 		hw_dbg("Error forcing flow control settings\n");
890 		goto out;
891 	}
892 
893 	/* Check for the case where we have copper media and auto-neg is
894 	 * enabled.  In this case, we need to check and see if Auto-Neg
895 	 * has completed, and if so, how the PHY and link partner has
896 	 * flow control configured.
897 	 */
898 	if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) {
899 		/* Read the MII Status Register and check to see if AutoNeg
900 		 * has completed.  We read this twice because this reg has
901 		 * some "sticky" (latched) bits.
902 		 */
903 		ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS,
904 						   &mii_status_reg);
905 		if (ret_val)
906 			goto out;
907 		ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS,
908 						   &mii_status_reg);
909 		if (ret_val)
910 			goto out;
911 
912 		if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
913 			hw_dbg("Copper PHY and Auto Neg has not completed.\n");
914 			goto out;
915 		}
916 
917 		/* The AutoNeg process has completed, so we now need to
918 		 * read both the Auto Negotiation Advertisement
919 		 * Register (Address 4) and the Auto_Negotiation Base
920 		 * Page Ability Register (Address 5) to determine how
921 		 * flow control was negotiated.
922 		 */
923 		ret_val = hw->phy.ops.read_reg(hw, PHY_AUTONEG_ADV,
924 					    &mii_nway_adv_reg);
925 		if (ret_val)
926 			goto out;
927 		ret_val = hw->phy.ops.read_reg(hw, PHY_LP_ABILITY,
928 					    &mii_nway_lp_ability_reg);
929 		if (ret_val)
930 			goto out;
931 
932 		/* Two bits in the Auto Negotiation Advertisement Register
933 		 * (Address 4) and two bits in the Auto Negotiation Base
934 		 * Page Ability Register (Address 5) determine flow control
935 		 * for both the PHY and the link partner.  The following
936 		 * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
937 		 * 1999, describes these PAUSE resolution bits and how flow
938 		 * control is determined based upon these settings.
939 		 * NOTE:  DC = Don't Care
940 		 *
941 		 *   LOCAL DEVICE  |   LINK PARTNER
942 		 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
943 		 *-------|---------|-------|---------|--------------------
944 		 *   0   |    0    |  DC   |   DC    | e1000_fc_none
945 		 *   0   |    1    |   0   |   DC    | e1000_fc_none
946 		 *   0   |    1    |   1   |    0    | e1000_fc_none
947 		 *   0   |    1    |   1   |    1    | e1000_fc_tx_pause
948 		 *   1   |    0    |   0   |   DC    | e1000_fc_none
949 		 *   1   |   DC    |   1   |   DC    | e1000_fc_full
950 		 *   1   |    1    |   0   |    0    | e1000_fc_none
951 		 *   1   |    1    |   0   |    1    | e1000_fc_rx_pause
952 		 *
953 		 * Are both PAUSE bits set to 1?  If so, this implies
954 		 * Symmetric Flow Control is enabled at both ends.  The
955 		 * ASM_DIR bits are irrelevant per the spec.
956 		 *
957 		 * For Symmetric Flow Control:
958 		 *
959 		 *   LOCAL DEVICE  |   LINK PARTNER
960 		 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
961 		 *-------|---------|-------|---------|--------------------
962 		 *   1   |   DC    |   1   |   DC    | E1000_fc_full
963 		 *
964 		 */
965 		if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
966 		    (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
967 			/* Now we need to check if the user selected RX ONLY
968 			 * of pause frames.  In this case, we had to advertise
969 			 * FULL flow control because we could not advertise RX
970 			 * ONLY. Hence, we must now check to see if we need to
971 			 * turn OFF  the TRANSMISSION of PAUSE frames.
972 			 */
973 			if (hw->fc.requested_mode == e1000_fc_full) {
974 				hw->fc.current_mode = e1000_fc_full;
975 				hw_dbg("Flow Control = FULL.\n");
976 			} else {
977 				hw->fc.current_mode = e1000_fc_rx_pause;
978 				hw_dbg("Flow Control = RX PAUSE frames only.\n");
979 			}
980 		}
981 		/* For receiving PAUSE frames ONLY.
982 		 *
983 		 *   LOCAL DEVICE  |   LINK PARTNER
984 		 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
985 		 *-------|---------|-------|---------|--------------------
986 		 *   0   |    1    |   1   |    1    | e1000_fc_tx_pause
987 		 */
988 		else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
989 			  (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
990 			  (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
991 			  (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
992 			hw->fc.current_mode = e1000_fc_tx_pause;
993 			hw_dbg("Flow Control = TX PAUSE frames only.\n");
994 		}
995 		/* For transmitting PAUSE frames ONLY.
996 		 *
997 		 *   LOCAL DEVICE  |   LINK PARTNER
998 		 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
999 		 *-------|---------|-------|---------|--------------------
1000 		 *   1   |    1    |   0   |    1    | e1000_fc_rx_pause
1001 		 */
1002 		else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
1003 			 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
1004 			 !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
1005 			 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
1006 			hw->fc.current_mode = e1000_fc_rx_pause;
1007 			hw_dbg("Flow Control = RX PAUSE frames only.\n");
1008 		}
1009 		/* Per the IEEE spec, at this point flow control should be
1010 		 * disabled.  However, we want to consider that we could
1011 		 * be connected to a legacy switch that doesn't advertise
1012 		 * desired flow control, but can be forced on the link
1013 		 * partner.  So if we advertised no flow control, that is
1014 		 * what we will resolve to.  If we advertised some kind of
1015 		 * receive capability (Rx Pause Only or Full Flow Control)
1016 		 * and the link partner advertised none, we will configure
1017 		 * ourselves to enable Rx Flow Control only.  We can do
1018 		 * this safely for two reasons:  If the link partner really
1019 		 * didn't want flow control enabled, and we enable Rx, no
1020 		 * harm done since we won't be receiving any PAUSE frames
1021 		 * anyway.  If the intent on the link partner was to have
1022 		 * flow control enabled, then by us enabling RX only, we
1023 		 * can at least receive pause frames and process them.
1024 		 * This is a good idea because in most cases, since we are
1025 		 * predominantly a server NIC, more times than not we will
1026 		 * be asked to delay transmission of packets than asking
1027 		 * our link partner to pause transmission of frames.
1028 		 */
1029 		else if ((hw->fc.requested_mode == e1000_fc_none) ||
1030 			 (hw->fc.requested_mode == e1000_fc_tx_pause) ||
1031 			 (hw->fc.strict_ieee)) {
1032 			hw->fc.current_mode = e1000_fc_none;
1033 			hw_dbg("Flow Control = NONE.\n");
1034 		} else {
1035 			hw->fc.current_mode = e1000_fc_rx_pause;
1036 			hw_dbg("Flow Control = RX PAUSE frames only.\n");
1037 		}
1038 
1039 		/* Now we need to do one last check...  If we auto-
1040 		 * negotiated to HALF DUPLEX, flow control should not be
1041 		 * enabled per IEEE 802.3 spec.
1042 		 */
1043 		ret_val = hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex);
1044 		if (ret_val) {
1045 			hw_dbg("Error getting link speed and duplex\n");
1046 			goto out;
1047 		}
1048 
1049 		if (duplex == HALF_DUPLEX)
1050 			hw->fc.current_mode = e1000_fc_none;
1051 
1052 		/* Now we call a subroutine to actually force the MAC
1053 		 * controller to use the correct flow control settings.
1054 		 */
1055 		ret_val = igb_force_mac_fc(hw);
1056 		if (ret_val) {
1057 			hw_dbg("Error forcing flow control settings\n");
1058 			goto out;
1059 		}
1060 	}
1061 	/* Check for the case where we have SerDes media and auto-neg is
1062 	 * enabled.  In this case, we need to check and see if Auto-Neg
1063 	 * has completed, and if so, how the PHY and link partner has
1064 	 * flow control configured.
1065 	 */
1066 	if ((hw->phy.media_type == e1000_media_type_internal_serdes)
1067 		&& mac->autoneg) {
1068 		/* Read the PCS_LSTS and check to see if AutoNeg
1069 		 * has completed.
1070 		 */
1071 		pcs_status_reg = rd32(E1000_PCS_LSTAT);
1072 
1073 		if (!(pcs_status_reg & E1000_PCS_LSTS_AN_COMPLETE)) {
1074 			hw_dbg("PCS Auto Neg has not completed.\n");
1075 			return ret_val;
1076 		}
1077 
1078 		/* The AutoNeg process has completed, so we now need to
1079 		 * read both the Auto Negotiation Advertisement
1080 		 * Register (PCS_ANADV) and the Auto_Negotiation Base
1081 		 * Page Ability Register (PCS_LPAB) to determine how
1082 		 * flow control was negotiated.
1083 		 */
1084 		pcs_adv_reg = rd32(E1000_PCS_ANADV);
1085 		pcs_lp_ability_reg = rd32(E1000_PCS_LPAB);
1086 
1087 		/* Two bits in the Auto Negotiation Advertisement Register
1088 		 * (PCS_ANADV) and two bits in the Auto Negotiation Base
1089 		 * Page Ability Register (PCS_LPAB) determine flow control
1090 		 * for both the PHY and the link partner.  The following
1091 		 * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
1092 		 * 1999, describes these PAUSE resolution bits and how flow
1093 		 * control is determined based upon these settings.
1094 		 * NOTE:  DC = Don't Care
1095 		 *
1096 		 *   LOCAL DEVICE  |   LINK PARTNER
1097 		 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
1098 		 *-------|---------|-------|---------|--------------------
1099 		 *   0   |    0    |  DC   |   DC    | e1000_fc_none
1100 		 *   0   |    1    |   0   |   DC    | e1000_fc_none
1101 		 *   0   |    1    |   1   |    0    | e1000_fc_none
1102 		 *   0   |    1    |   1   |    1    | e1000_fc_tx_pause
1103 		 *   1   |    0    |   0   |   DC    | e1000_fc_none
1104 		 *   1   |   DC    |   1   |   DC    | e1000_fc_full
1105 		 *   1   |    1    |   0   |    0    | e1000_fc_none
1106 		 *   1   |    1    |   0   |    1    | e1000_fc_rx_pause
1107 		 *
1108 		 * Are both PAUSE bits set to 1?  If so, this implies
1109 		 * Symmetric Flow Control is enabled at both ends.  The
1110 		 * ASM_DIR bits are irrelevant per the spec.
1111 		 *
1112 		 * For Symmetric Flow Control:
1113 		 *
1114 		 *   LOCAL DEVICE  |   LINK PARTNER
1115 		 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
1116 		 *-------|---------|-------|---------|--------------------
1117 		 *   1   |   DC    |   1   |   DC    | e1000_fc_full
1118 		 *
1119 		 */
1120 		if ((pcs_adv_reg & E1000_TXCW_PAUSE) &&
1121 		    (pcs_lp_ability_reg & E1000_TXCW_PAUSE)) {
1122 			/* Now we need to check if the user selected Rx ONLY
1123 			 * of pause frames.  In this case, we had to advertise
1124 			 * FULL flow control because we could not advertise Rx
1125 			 * ONLY. Hence, we must now check to see if we need to
1126 			 * turn OFF the TRANSMISSION of PAUSE frames.
1127 			 */
1128 			if (hw->fc.requested_mode == e1000_fc_full) {
1129 				hw->fc.current_mode = e1000_fc_full;
1130 				hw_dbg("Flow Control = FULL.\n");
1131 			} else {
1132 				hw->fc.current_mode = e1000_fc_rx_pause;
1133 				hw_dbg("Flow Control = Rx PAUSE frames only.\n");
1134 			}
1135 		}
1136 		/* For receiving PAUSE frames ONLY.
1137 		 *
1138 		 *   LOCAL DEVICE  |   LINK PARTNER
1139 		 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
1140 		 *-------|---------|-------|---------|--------------------
1141 		 *   0   |    1    |   1   |    1    | e1000_fc_tx_pause
1142 		 */
1143 		else if (!(pcs_adv_reg & E1000_TXCW_PAUSE) &&
1144 			  (pcs_adv_reg & E1000_TXCW_ASM_DIR) &&
1145 			  (pcs_lp_ability_reg & E1000_TXCW_PAUSE) &&
1146 			  (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) {
1147 			hw->fc.current_mode = e1000_fc_tx_pause;
1148 			hw_dbg("Flow Control = Tx PAUSE frames only.\n");
1149 		}
1150 		/* For transmitting PAUSE frames ONLY.
1151 		 *
1152 		 *   LOCAL DEVICE  |   LINK PARTNER
1153 		 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
1154 		 *-------|---------|-------|---------|--------------------
1155 		 *   1   |    1    |   0   |    1    | e1000_fc_rx_pause
1156 		 */
1157 		else if ((pcs_adv_reg & E1000_TXCW_PAUSE) &&
1158 			 (pcs_adv_reg & E1000_TXCW_ASM_DIR) &&
1159 			 !(pcs_lp_ability_reg & E1000_TXCW_PAUSE) &&
1160 			 (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) {
1161 			hw->fc.current_mode = e1000_fc_rx_pause;
1162 			hw_dbg("Flow Control = Rx PAUSE frames only.\n");
1163 		} else {
1164 			/* Per the IEEE spec, at this point flow control
1165 			 * should be disabled.
1166 			 */
1167 			hw->fc.current_mode = e1000_fc_none;
1168 			hw_dbg("Flow Control = NONE.\n");
1169 		}
1170 
1171 		/* Now we call a subroutine to actually force the MAC
1172 		 * controller to use the correct flow control settings.
1173 		 */
1174 		pcs_ctrl_reg = rd32(E1000_PCS_LCTL);
1175 		pcs_ctrl_reg |= E1000_PCS_LCTL_FORCE_FCTRL;
1176 		wr32(E1000_PCS_LCTL, pcs_ctrl_reg);
1177 
1178 		ret_val = igb_force_mac_fc(hw);
1179 		if (ret_val) {
1180 			hw_dbg("Error forcing flow control settings\n");
1181 			return ret_val;
1182 		}
1183 	}
1184 
1185 out:
1186 	return ret_val;
1187 }
1188 
1189 /**
1190  *  igb_get_speed_and_duplex_copper - Retrieve current speed/duplex
1191  *  @hw: pointer to the HW structure
1192  *  @speed: stores the current speed
1193  *  @duplex: stores the current duplex
1194  *
1195  *  Read the status register for the current speed/duplex and store the current
1196  *  speed and duplex for copper connections.
1197  **/
1198 s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed,
1199 				      u16 *duplex)
1200 {
1201 	u32 status;
1202 
1203 	status = rd32(E1000_STATUS);
1204 	if (status & E1000_STATUS_SPEED_1000) {
1205 		*speed = SPEED_1000;
1206 		hw_dbg("1000 Mbs, ");
1207 	} else if (status & E1000_STATUS_SPEED_100) {
1208 		*speed = SPEED_100;
1209 		hw_dbg("100 Mbs, ");
1210 	} else {
1211 		*speed = SPEED_10;
1212 		hw_dbg("10 Mbs, ");
1213 	}
1214 
1215 	if (status & E1000_STATUS_FD) {
1216 		*duplex = FULL_DUPLEX;
1217 		hw_dbg("Full Duplex\n");
1218 	} else {
1219 		*duplex = HALF_DUPLEX;
1220 		hw_dbg("Half Duplex\n");
1221 	}
1222 
1223 	return 0;
1224 }
1225 
1226 /**
1227  *  igb_get_hw_semaphore - Acquire hardware semaphore
1228  *  @hw: pointer to the HW structure
1229  *
1230  *  Acquire the HW semaphore to access the PHY or NVM
1231  **/
1232 s32 igb_get_hw_semaphore(struct e1000_hw *hw)
1233 {
1234 	u32 swsm;
1235 	s32 ret_val = 0;
1236 	s32 timeout = hw->nvm.word_size + 1;
1237 	s32 i = 0;
1238 
1239 	/* Get the SW semaphore */
1240 	while (i < timeout) {
1241 		swsm = rd32(E1000_SWSM);
1242 		if (!(swsm & E1000_SWSM_SMBI))
1243 			break;
1244 
1245 		udelay(50);
1246 		i++;
1247 	}
1248 
1249 	if (i == timeout) {
1250 		hw_dbg("Driver can't access device - SMBI bit is set.\n");
1251 		ret_val = -E1000_ERR_NVM;
1252 		goto out;
1253 	}
1254 
1255 	/* Get the FW semaphore. */
1256 	for (i = 0; i < timeout; i++) {
1257 		swsm = rd32(E1000_SWSM);
1258 		wr32(E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
1259 
1260 		/* Semaphore acquired if bit latched */
1261 		if (rd32(E1000_SWSM) & E1000_SWSM_SWESMBI)
1262 			break;
1263 
1264 		udelay(50);
1265 	}
1266 
1267 	if (i == timeout) {
1268 		/* Release semaphores */
1269 		igb_put_hw_semaphore(hw);
1270 		hw_dbg("Driver can't access the NVM\n");
1271 		ret_val = -E1000_ERR_NVM;
1272 		goto out;
1273 	}
1274 
1275 out:
1276 	return ret_val;
1277 }
1278 
1279 /**
1280  *  igb_put_hw_semaphore - Release hardware semaphore
1281  *  @hw: pointer to the HW structure
1282  *
1283  *  Release hardware semaphore used to access the PHY or NVM
1284  **/
1285 void igb_put_hw_semaphore(struct e1000_hw *hw)
1286 {
1287 	u32 swsm;
1288 
1289 	swsm = rd32(E1000_SWSM);
1290 
1291 	swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
1292 
1293 	wr32(E1000_SWSM, swsm);
1294 }
1295 
1296 /**
1297  *  igb_get_auto_rd_done - Check for auto read completion
1298  *  @hw: pointer to the HW structure
1299  *
1300  *  Check EEPROM for Auto Read done bit.
1301  **/
1302 s32 igb_get_auto_rd_done(struct e1000_hw *hw)
1303 {
1304 	s32 i = 0;
1305 	s32 ret_val = 0;
1306 
1307 
1308 	while (i < AUTO_READ_DONE_TIMEOUT) {
1309 		if (rd32(E1000_EECD) & E1000_EECD_AUTO_RD)
1310 			break;
1311 		usleep_range(1000, 2000);
1312 		i++;
1313 	}
1314 
1315 	if (i == AUTO_READ_DONE_TIMEOUT) {
1316 		hw_dbg("Auto read by HW from NVM has not completed.\n");
1317 		ret_val = -E1000_ERR_RESET;
1318 		goto out;
1319 	}
1320 
1321 out:
1322 	return ret_val;
1323 }
1324 
1325 /**
1326  *  igb_valid_led_default - Verify a valid default LED config
1327  *  @hw: pointer to the HW structure
1328  *  @data: pointer to the NVM (EEPROM)
1329  *
1330  *  Read the EEPROM for the current default LED configuration.  If the
1331  *  LED configuration is not valid, set to a valid LED configuration.
1332  **/
1333 static s32 igb_valid_led_default(struct e1000_hw *hw, u16 *data)
1334 {
1335 	s32 ret_val;
1336 
1337 	ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
1338 	if (ret_val) {
1339 		hw_dbg("NVM Read Error\n");
1340 		goto out;
1341 	}
1342 
1343 	if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
1344 		switch (hw->phy.media_type) {
1345 		case e1000_media_type_internal_serdes:
1346 			*data = ID_LED_DEFAULT_82575_SERDES;
1347 			break;
1348 		case e1000_media_type_copper:
1349 		default:
1350 			*data = ID_LED_DEFAULT;
1351 			break;
1352 		}
1353 	}
1354 out:
1355 	return ret_val;
1356 }
1357 
1358 /**
1359  *  igb_id_led_init -
1360  *  @hw: pointer to the HW structure
1361  *
1362  **/
1363 s32 igb_id_led_init(struct e1000_hw *hw)
1364 {
1365 	struct e1000_mac_info *mac = &hw->mac;
1366 	s32 ret_val;
1367 	const u32 ledctl_mask = 0x000000FF;
1368 	const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON;
1369 	const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF;
1370 	u16 data, i, temp;
1371 	const u16 led_mask = 0x0F;
1372 
1373 	/* i210 and i211 devices have different LED mechanism */
1374 	if ((hw->mac.type == e1000_i210) ||
1375 	    (hw->mac.type == e1000_i211))
1376 		ret_val = igb_valid_led_default_i210(hw, &data);
1377 	else
1378 		ret_val = igb_valid_led_default(hw, &data);
1379 
1380 	if (ret_val)
1381 		goto out;
1382 
1383 	mac->ledctl_default = rd32(E1000_LEDCTL);
1384 	mac->ledctl_mode1 = mac->ledctl_default;
1385 	mac->ledctl_mode2 = mac->ledctl_default;
1386 
1387 	for (i = 0; i < 4; i++) {
1388 		temp = (data >> (i << 2)) & led_mask;
1389 		switch (temp) {
1390 		case ID_LED_ON1_DEF2:
1391 		case ID_LED_ON1_ON2:
1392 		case ID_LED_ON1_OFF2:
1393 			mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
1394 			mac->ledctl_mode1 |= ledctl_on << (i << 3);
1395 			break;
1396 		case ID_LED_OFF1_DEF2:
1397 		case ID_LED_OFF1_ON2:
1398 		case ID_LED_OFF1_OFF2:
1399 			mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
1400 			mac->ledctl_mode1 |= ledctl_off << (i << 3);
1401 			break;
1402 		default:
1403 			/* Do nothing */
1404 			break;
1405 		}
1406 		switch (temp) {
1407 		case ID_LED_DEF1_ON2:
1408 		case ID_LED_ON1_ON2:
1409 		case ID_LED_OFF1_ON2:
1410 			mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
1411 			mac->ledctl_mode2 |= ledctl_on << (i << 3);
1412 			break;
1413 		case ID_LED_DEF1_OFF2:
1414 		case ID_LED_ON1_OFF2:
1415 		case ID_LED_OFF1_OFF2:
1416 			mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
1417 			mac->ledctl_mode2 |= ledctl_off << (i << 3);
1418 			break;
1419 		default:
1420 			/* Do nothing */
1421 			break;
1422 		}
1423 	}
1424 
1425 out:
1426 	return ret_val;
1427 }
1428 
1429 /**
1430  *  igb_cleanup_led - Set LED config to default operation
1431  *  @hw: pointer to the HW structure
1432  *
1433  *  Remove the current LED configuration and set the LED configuration
1434  *  to the default value, saved from the EEPROM.
1435  **/
1436 s32 igb_cleanup_led(struct e1000_hw *hw)
1437 {
1438 	wr32(E1000_LEDCTL, hw->mac.ledctl_default);
1439 	return 0;
1440 }
1441 
1442 /**
1443  *  igb_blink_led - Blink LED
1444  *  @hw: pointer to the HW structure
1445  *
1446  *  Blink the led's which are set to be on.
1447  **/
1448 s32 igb_blink_led(struct e1000_hw *hw)
1449 {
1450 	u32 ledctl_blink = 0;
1451 	u32 i;
1452 
1453 	if (hw->phy.media_type == e1000_media_type_fiber) {
1454 		/* always blink LED0 for PCI-E fiber */
1455 		ledctl_blink = E1000_LEDCTL_LED0_BLINK |
1456 		     (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT);
1457 	} else {
1458 		/* Set the blink bit for each LED that's "on" (0x0E)
1459 		 * (or "off" if inverted) in ledctl_mode2.  The blink
1460 		 * logic in hardware only works when mode is set to "on"
1461 		 * so it must be changed accordingly when the mode is
1462 		 * "off" and inverted.
1463 		 */
1464 		ledctl_blink = hw->mac.ledctl_mode2;
1465 		for (i = 0; i < 32; i += 8) {
1466 			u32 mode = (hw->mac.ledctl_mode2 >> i) &
1467 			    E1000_LEDCTL_LED0_MODE_MASK;
1468 			u32 led_default = hw->mac.ledctl_default >> i;
1469 
1470 			if ((!(led_default & E1000_LEDCTL_LED0_IVRT) &&
1471 			     (mode == E1000_LEDCTL_MODE_LED_ON)) ||
1472 			    ((led_default & E1000_LEDCTL_LED0_IVRT) &&
1473 			     (mode == E1000_LEDCTL_MODE_LED_OFF))) {
1474 				ledctl_blink &=
1475 				    ~(E1000_LEDCTL_LED0_MODE_MASK << i);
1476 				ledctl_blink |= (E1000_LEDCTL_LED0_BLINK |
1477 						 E1000_LEDCTL_MODE_LED_ON) << i;
1478 			}
1479 		}
1480 	}
1481 
1482 	wr32(E1000_LEDCTL, ledctl_blink);
1483 
1484 	return 0;
1485 }
1486 
1487 /**
1488  *  igb_led_off - Turn LED off
1489  *  @hw: pointer to the HW structure
1490  *
1491  *  Turn LED off.
1492  **/
1493 s32 igb_led_off(struct e1000_hw *hw)
1494 {
1495 	switch (hw->phy.media_type) {
1496 	case e1000_media_type_copper:
1497 		wr32(E1000_LEDCTL, hw->mac.ledctl_mode1);
1498 		break;
1499 	default:
1500 		break;
1501 	}
1502 
1503 	return 0;
1504 }
1505 
1506 /**
1507  *  igb_disable_pcie_master - Disables PCI-express master access
1508  *  @hw: pointer to the HW structure
1509  *
1510  *  Returns 0 (0) if successful, else returns -10
1511  *  (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused
1512  *  the master requests to be disabled.
1513  *
1514  *  Disables PCI-Express master access and verifies there are no pending
1515  *  requests.
1516  **/
1517 s32 igb_disable_pcie_master(struct e1000_hw *hw)
1518 {
1519 	u32 ctrl;
1520 	s32 timeout = MASTER_DISABLE_TIMEOUT;
1521 	s32 ret_val = 0;
1522 
1523 	if (hw->bus.type != e1000_bus_type_pci_express)
1524 		goto out;
1525 
1526 	ctrl = rd32(E1000_CTRL);
1527 	ctrl |= E1000_CTRL_GIO_MASTER_DISABLE;
1528 	wr32(E1000_CTRL, ctrl);
1529 
1530 	while (timeout) {
1531 		if (!(rd32(E1000_STATUS) &
1532 		      E1000_STATUS_GIO_MASTER_ENABLE))
1533 			break;
1534 		udelay(100);
1535 		timeout--;
1536 	}
1537 
1538 	if (!timeout) {
1539 		hw_dbg("Master requests are pending.\n");
1540 		ret_val = -E1000_ERR_MASTER_REQUESTS_PENDING;
1541 		goto out;
1542 	}
1543 
1544 out:
1545 	return ret_val;
1546 }
1547 
1548 /**
1549  *  igb_validate_mdi_setting - Verify MDI/MDIx settings
1550  *  @hw: pointer to the HW structure
1551  *
1552  *  Verify that when not using auto-negotitation that MDI/MDIx is correctly
1553  *  set, which is forced to MDI mode only.
1554  **/
1555 s32 igb_validate_mdi_setting(struct e1000_hw *hw)
1556 {
1557 	s32 ret_val = 0;
1558 
1559 	/* All MDI settings are supported on 82580 and newer. */
1560 	if (hw->mac.type >= e1000_82580)
1561 		goto out;
1562 
1563 	if (!hw->mac.autoneg && (hw->phy.mdix == 0 || hw->phy.mdix == 3)) {
1564 		hw_dbg("Invalid MDI setting detected\n");
1565 		hw->phy.mdix = 1;
1566 		ret_val = -E1000_ERR_CONFIG;
1567 		goto out;
1568 	}
1569 
1570 out:
1571 	return ret_val;
1572 }
1573 
1574 /**
1575  *  igb_write_8bit_ctrl_reg - Write a 8bit CTRL register
1576  *  @hw: pointer to the HW structure
1577  *  @reg: 32bit register offset such as E1000_SCTL
1578  *  @offset: register offset to write to
1579  *  @data: data to write at register offset
1580  *
1581  *  Writes an address/data control type register.  There are several of these
1582  *  and they all have the format address << 8 | data and bit 31 is polled for
1583  *  completion.
1584  **/
1585 s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg,
1586 			      u32 offset, u8 data)
1587 {
1588 	u32 i, regvalue = 0;
1589 	s32 ret_val = 0;
1590 
1591 	/* Set up the address and data */
1592 	regvalue = ((u32)data) | (offset << E1000_GEN_CTL_ADDRESS_SHIFT);
1593 	wr32(reg, regvalue);
1594 
1595 	/* Poll the ready bit to see if the MDI read completed */
1596 	for (i = 0; i < E1000_GEN_POLL_TIMEOUT; i++) {
1597 		udelay(5);
1598 		regvalue = rd32(reg);
1599 		if (regvalue & E1000_GEN_CTL_READY)
1600 			break;
1601 	}
1602 	if (!(regvalue & E1000_GEN_CTL_READY)) {
1603 		hw_dbg("Reg %08x did not indicate ready\n", reg);
1604 		ret_val = -E1000_ERR_PHY;
1605 		goto out;
1606 	}
1607 
1608 out:
1609 	return ret_val;
1610 }
1611 
1612 /**
1613  *  igb_enable_mng_pass_thru - Enable processing of ARP's
1614  *  @hw: pointer to the HW structure
1615  *
1616  *  Verifies the hardware needs to leave interface enabled so that frames can
1617  *  be directed to and from the management interface.
1618  **/
1619 bool igb_enable_mng_pass_thru(struct e1000_hw *hw)
1620 {
1621 	u32 manc;
1622 	u32 fwsm, factps;
1623 	bool ret_val = false;
1624 
1625 	if (!hw->mac.asf_firmware_present)
1626 		goto out;
1627 
1628 	manc = rd32(E1000_MANC);
1629 
1630 	if (!(manc & E1000_MANC_RCV_TCO_EN))
1631 		goto out;
1632 
1633 	if (hw->mac.arc_subsystem_valid) {
1634 		fwsm = rd32(E1000_FWSM);
1635 		factps = rd32(E1000_FACTPS);
1636 
1637 		if (!(factps & E1000_FACTPS_MNGCG) &&
1638 		    ((fwsm & E1000_FWSM_MODE_MASK) ==
1639 		     (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) {
1640 			ret_val = true;
1641 			goto out;
1642 		}
1643 	} else {
1644 		if ((manc & E1000_MANC_SMBUS_EN) &&
1645 		    !(manc & E1000_MANC_ASF_EN)) {
1646 			ret_val = true;
1647 			goto out;
1648 		}
1649 	}
1650 
1651 out:
1652 	return ret_val;
1653 }
1654