1 /******************************************************************************
2 SPDX-License-Identifier: BSD-3-Clause
3
4 Copyright (c) 2001-2020, Intel Corporation
5 All rights reserved.
6
7 Redistribution and use in source and binary forms, with or without
8 modification, are permitted provided that the following conditions are met:
9
10 1. Redistributions of source code must retain the above copyright notice,
11 this list of conditions and the following disclaimer.
12
13 2. Redistributions in binary form must reproduce the above copyright
14 notice, this list of conditions and the following disclaimer in the
15 documentation and/or other materials provided with the distribution.
16
17 3. Neither the name of the Intel Corporation nor the names of its
18 contributors may be used to endorse or promote products derived from
19 this software without specific prior written permission.
20
21 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 POSSIBILITY OF SUCH DAMAGE.
32
33 ******************************************************************************/
34
35 #include "e1000_api.h"
36
37 static s32 e1000_validate_mdi_setting_generic(struct e1000_hw *hw);
38 static void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw);
39 static void e1000_config_collision_dist_generic(struct e1000_hw *hw);
40
41 /**
42 * e1000_init_mac_ops_generic - Initialize MAC function pointers
43 * @hw: pointer to the HW structure
44 *
45 * Setups up the function pointers to no-op functions
46 **/
e1000_init_mac_ops_generic(struct e1000_hw * hw)47 void e1000_init_mac_ops_generic(struct e1000_hw *hw)
48 {
49 struct e1000_mac_info *mac = &hw->mac;
50 DEBUGFUNC("e1000_init_mac_ops_generic");
51
52 /* General Setup */
53 mac->ops.init_params = e1000_null_ops_generic;
54 mac->ops.init_hw = e1000_null_ops_generic;
55 mac->ops.reset_hw = e1000_null_ops_generic;
56 mac->ops.setup_physical_interface = e1000_null_ops_generic;
57 mac->ops.get_bus_info = e1000_null_ops_generic;
58 mac->ops.set_lan_id = e1000_set_lan_id_multi_port_pcie;
59 mac->ops.read_mac_addr = e1000_read_mac_addr_generic;
60 mac->ops.config_collision_dist = e1000_config_collision_dist_generic;
61 mac->ops.clear_hw_cntrs = e1000_null_mac_generic;
62 /* LED */
63 mac->ops.cleanup_led = e1000_null_ops_generic;
64 mac->ops.setup_led = e1000_null_ops_generic;
65 mac->ops.blink_led = e1000_null_ops_generic;
66 mac->ops.led_on = e1000_null_ops_generic;
67 mac->ops.led_off = e1000_null_ops_generic;
68 /* LINK */
69 mac->ops.setup_link = e1000_null_ops_generic;
70 mac->ops.get_link_up_info = e1000_null_link_info;
71 mac->ops.check_for_link = e1000_null_ops_generic;
72 mac->ops.set_obff_timer = e1000_null_set_obff_timer;
73 /* Management */
74 mac->ops.check_mng_mode = e1000_null_mng_mode;
75 /* VLAN, MC, etc. */
76 mac->ops.update_mc_addr_list = e1000_null_update_mc;
77 mac->ops.clear_vfta = e1000_null_mac_generic;
78 mac->ops.write_vfta = e1000_null_write_vfta;
79 mac->ops.rar_set = e1000_rar_set_generic;
80 mac->ops.validate_mdi_setting = e1000_validate_mdi_setting_generic;
81 }
82
83 /**
84 * e1000_null_ops_generic - No-op function, returns 0
85 * @hw: pointer to the HW structure
86 **/
e1000_null_ops_generic(struct e1000_hw E1000_UNUSEDARG * hw)87 s32 e1000_null_ops_generic(struct e1000_hw E1000_UNUSEDARG *hw)
88 {
89 DEBUGFUNC("e1000_null_ops_generic");
90 return E1000_SUCCESS;
91 }
92
93 /**
94 * e1000_null_mac_generic - No-op function, return void
95 * @hw: pointer to the HW structure
96 **/
e1000_null_mac_generic(struct e1000_hw E1000_UNUSEDARG * hw)97 void e1000_null_mac_generic(struct e1000_hw E1000_UNUSEDARG *hw)
98 {
99 DEBUGFUNC("e1000_null_mac_generic");
100 return;
101 }
102
103 /**
104 * e1000_null_link_info - No-op function, return 0
105 * @hw: pointer to the HW structure
106 * @s: dummy variable
107 * @d: dummy variable
108 **/
e1000_null_link_info(struct e1000_hw E1000_UNUSEDARG * hw,u16 E1000_UNUSEDARG * s,u16 E1000_UNUSEDARG * d)109 s32 e1000_null_link_info(struct e1000_hw E1000_UNUSEDARG *hw,
110 u16 E1000_UNUSEDARG *s, u16 E1000_UNUSEDARG *d)
111 {
112 DEBUGFUNC("e1000_null_link_info");
113 return E1000_SUCCESS;
114 }
115
116 /**
117 * e1000_null_mng_mode - No-op function, return false
118 * @hw: pointer to the HW structure
119 **/
e1000_null_mng_mode(struct e1000_hw E1000_UNUSEDARG * hw)120 bool e1000_null_mng_mode(struct e1000_hw E1000_UNUSEDARG *hw)
121 {
122 DEBUGFUNC("e1000_null_mng_mode");
123 return false;
124 }
125
126 /**
127 * e1000_null_update_mc - No-op function, return void
128 * @hw: pointer to the HW structure
129 * @h: dummy variable
130 * @a: dummy variable
131 **/
e1000_null_update_mc(struct e1000_hw E1000_UNUSEDARG * hw,u8 E1000_UNUSEDARG * h,u32 E1000_UNUSEDARG a)132 void e1000_null_update_mc(struct e1000_hw E1000_UNUSEDARG *hw,
133 u8 E1000_UNUSEDARG *h, u32 E1000_UNUSEDARG a)
134 {
135 DEBUGFUNC("e1000_null_update_mc");
136 return;
137 }
138
139 /**
140 * e1000_null_write_vfta - No-op function, return void
141 * @hw: pointer to the HW structure
142 * @a: dummy variable
143 * @b: dummy variable
144 **/
e1000_null_write_vfta(struct e1000_hw E1000_UNUSEDARG * hw,u32 E1000_UNUSEDARG a,u32 E1000_UNUSEDARG b)145 void e1000_null_write_vfta(struct e1000_hw E1000_UNUSEDARG *hw,
146 u32 E1000_UNUSEDARG a, u32 E1000_UNUSEDARG b)
147 {
148 DEBUGFUNC("e1000_null_write_vfta");
149 return;
150 }
151
152 /**
153 * e1000_null_rar_set - No-op function, return 0
154 * @hw: pointer to the HW structure
155 * @h: dummy variable
156 * @a: dummy variable
157 **/
e1000_null_rar_set(struct e1000_hw E1000_UNUSEDARG * hw,u8 E1000_UNUSEDARG * h,u32 E1000_UNUSEDARG a)158 int e1000_null_rar_set(struct e1000_hw E1000_UNUSEDARG *hw,
159 u8 E1000_UNUSEDARG *h, u32 E1000_UNUSEDARG a)
160 {
161 DEBUGFUNC("e1000_null_rar_set");
162 return E1000_SUCCESS;
163 }
164
165 /**
166 * e1000_null_set_obff_timer - No-op function, return 0
167 * @hw: pointer to the HW structure
168 **/
e1000_null_set_obff_timer(struct e1000_hw E1000_UNUSEDARG * hw,u32 E1000_UNUSEDARG a)169 s32 e1000_null_set_obff_timer(struct e1000_hw E1000_UNUSEDARG *hw,
170 u32 E1000_UNUSEDARG a)
171 {
172 DEBUGFUNC("e1000_null_set_obff_timer");
173 return E1000_SUCCESS;
174 }
175
176 /**
177 * e1000_get_bus_info_pci_generic - Get PCI(x) bus information
178 * @hw: pointer to the HW structure
179 *
180 * Determines and stores the system bus information for a particular
181 * network interface. The following bus information is determined and stored:
182 * bus speed, bus width, type (PCI/PCIx), and PCI(-x) function.
183 **/
e1000_get_bus_info_pci_generic(struct e1000_hw * hw)184 s32 e1000_get_bus_info_pci_generic(struct e1000_hw *hw)
185 {
186 struct e1000_mac_info *mac = &hw->mac;
187 struct e1000_bus_info *bus = &hw->bus;
188 u32 status = E1000_READ_REG(hw, E1000_STATUS);
189 s32 ret_val = E1000_SUCCESS;
190
191 DEBUGFUNC("e1000_get_bus_info_pci_generic");
192
193 /* PCI or PCI-X? */
194 bus->type = (status & E1000_STATUS_PCIX_MODE)
195 ? e1000_bus_type_pcix
196 : e1000_bus_type_pci;
197
198 /* Bus speed */
199 if (bus->type == e1000_bus_type_pci) {
200 bus->speed = (status & E1000_STATUS_PCI66)
201 ? e1000_bus_speed_66
202 : e1000_bus_speed_33;
203 } else {
204 switch (status & E1000_STATUS_PCIX_SPEED) {
205 case E1000_STATUS_PCIX_SPEED_66:
206 bus->speed = e1000_bus_speed_66;
207 break;
208 case E1000_STATUS_PCIX_SPEED_100:
209 bus->speed = e1000_bus_speed_100;
210 break;
211 case E1000_STATUS_PCIX_SPEED_133:
212 bus->speed = e1000_bus_speed_133;
213 break;
214 default:
215 bus->speed = e1000_bus_speed_reserved;
216 break;
217 }
218 }
219
220 /* Bus width */
221 bus->width = (status & E1000_STATUS_BUS64)
222 ? e1000_bus_width_64
223 : e1000_bus_width_32;
224
225 /* Which PCI(-X) function? */
226 mac->ops.set_lan_id(hw);
227
228 return ret_val;
229 }
230
231 /**
232 * e1000_get_bus_info_pcie_generic - Get PCIe bus information
233 * @hw: pointer to the HW structure
234 *
235 * Determines and stores the system bus information for a particular
236 * network interface. The following bus information is determined and stored:
237 * bus speed, bus width, type (PCIe), and PCIe function.
238 **/
e1000_get_bus_info_pcie_generic(struct e1000_hw * hw)239 s32 e1000_get_bus_info_pcie_generic(struct e1000_hw *hw)
240 {
241 struct e1000_mac_info *mac = &hw->mac;
242 struct e1000_bus_info *bus = &hw->bus;
243 s32 ret_val;
244 u16 pcie_link_status;
245
246 DEBUGFUNC("e1000_get_bus_info_pcie_generic");
247
248 bus->type = e1000_bus_type_pci_express;
249
250 ret_val = e1000_read_pcie_cap_reg(hw, PCIE_LINK_STATUS,
251 &pcie_link_status);
252 if (ret_val) {
253 bus->width = e1000_bus_width_unknown;
254 bus->speed = e1000_bus_speed_unknown;
255 } else {
256 switch (pcie_link_status & PCIE_LINK_SPEED_MASK) {
257 case PCIE_LINK_SPEED_2500:
258 bus->speed = e1000_bus_speed_2500;
259 break;
260 case PCIE_LINK_SPEED_5000:
261 bus->speed = e1000_bus_speed_5000;
262 break;
263 default:
264 bus->speed = e1000_bus_speed_unknown;
265 break;
266 }
267
268 bus->width = (enum e1000_bus_width)((pcie_link_status &
269 PCIE_LINK_WIDTH_MASK) >> PCIE_LINK_WIDTH_SHIFT);
270 }
271
272 mac->ops.set_lan_id(hw);
273
274 return E1000_SUCCESS;
275 }
276
277 /**
278 * e1000_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
279 *
280 * @hw: pointer to the HW structure
281 *
282 * Determines the LAN function id by reading memory-mapped registers
283 * and swaps the port value if requested.
284 **/
e1000_set_lan_id_multi_port_pcie(struct e1000_hw * hw)285 static void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw)
286 {
287 struct e1000_bus_info *bus = &hw->bus;
288 u32 reg;
289
290 /* The status register reports the correct function number
291 * for the device regardless of function swap state.
292 */
293 reg = E1000_READ_REG(hw, E1000_STATUS);
294 bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT;
295 }
296
297 /**
298 * e1000_set_lan_id_multi_port_pci - Set LAN id for PCI multiple port devices
299 * @hw: pointer to the HW structure
300 *
301 * Determines the LAN function id by reading PCI config space.
302 **/
e1000_set_lan_id_multi_port_pci(struct e1000_hw * hw)303 void e1000_set_lan_id_multi_port_pci(struct e1000_hw *hw)
304 {
305 struct e1000_bus_info *bus = &hw->bus;
306 u16 pci_header_type;
307 u32 status;
308
309 e1000_read_pci_cfg(hw, PCI_HEADER_TYPE_REGISTER, &pci_header_type);
310 if (pci_header_type & PCI_HEADER_TYPE_MULTIFUNC) {
311 status = E1000_READ_REG(hw, E1000_STATUS);
312 bus->func = (status & E1000_STATUS_FUNC_MASK)
313 >> E1000_STATUS_FUNC_SHIFT;
314 } else {
315 bus->func = 0;
316 }
317 }
318
319 /**
320 * e1000_set_lan_id_single_port - Set LAN id for a single port device
321 * @hw: pointer to the HW structure
322 *
323 * Sets the LAN function id to zero for a single port device.
324 **/
e1000_set_lan_id_single_port(struct e1000_hw * hw)325 void e1000_set_lan_id_single_port(struct e1000_hw *hw)
326 {
327 struct e1000_bus_info *bus = &hw->bus;
328
329 bus->func = 0;
330 }
331
332 /**
333 * e1000_clear_vfta_generic - Clear VLAN filter table
334 * @hw: pointer to the HW structure
335 *
336 * Clears the register array which contains the VLAN filter table by
337 * setting all the values to 0.
338 **/
e1000_clear_vfta_generic(struct e1000_hw * hw)339 void e1000_clear_vfta_generic(struct e1000_hw *hw)
340 {
341 u32 offset;
342
343 DEBUGFUNC("e1000_clear_vfta_generic");
344
345 for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
346 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, 0);
347 E1000_WRITE_FLUSH(hw);
348 }
349 }
350
351 /**
352 * e1000_write_vfta_generic - Write value to VLAN filter table
353 * @hw: pointer to the HW structure
354 * @offset: register offset in VLAN filter table
355 * @value: register value written to VLAN filter table
356 *
357 * Writes value at the given offset in the register array which stores
358 * the VLAN filter table.
359 **/
e1000_write_vfta_generic(struct e1000_hw * hw,u32 offset,u32 value)360 void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value)
361 {
362 DEBUGFUNC("e1000_write_vfta_generic");
363
364 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value);
365 E1000_WRITE_FLUSH(hw);
366 }
367
368 /**
369 * e1000_init_rx_addrs_generic - Initialize receive address's
370 * @hw: pointer to the HW structure
371 * @rar_count: receive address registers
372 *
373 * Setup the receive address registers by setting the base receive address
374 * register to the devices MAC address and clearing all the other receive
375 * address registers to 0.
376 **/
e1000_init_rx_addrs_generic(struct e1000_hw * hw,u16 rar_count)377 void e1000_init_rx_addrs_generic(struct e1000_hw *hw, u16 rar_count)
378 {
379 u32 i;
380 u8 mac_addr[ETHER_ADDR_LEN] = {0};
381
382 DEBUGFUNC("e1000_init_rx_addrs_generic");
383
384 /* Setup the receive address */
385 DEBUGOUT("Programming MAC Address into RAR[0]\n");
386
387 hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
388
389 /* Zero out the other (rar_entry_count - 1) receive addresses */
390 DEBUGOUT1("Clearing RAR[1-%u]\n", rar_count-1);
391 for (i = 1; i < rar_count; i++)
392 hw->mac.ops.rar_set(hw, mac_addr, i);
393 }
394
395 /**
396 * e1000_check_alt_mac_addr_generic - Check for alternate MAC addr
397 * @hw: pointer to the HW structure
398 *
399 * Checks the nvm for an alternate MAC address. An alternate MAC address
400 * can be setup by pre-boot software and must be treated like a permanent
401 * address and must override the actual permanent MAC address. If an
402 * alternate MAC address is found it is programmed into RAR0, replacing
403 * the permanent address that was installed into RAR0 by the Si on reset.
404 * This function will return SUCCESS unless it encounters an error while
405 * reading the EEPROM.
406 **/
e1000_check_alt_mac_addr_generic(struct e1000_hw * hw)407 s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
408 {
409 u32 i;
410 s32 ret_val;
411 u16 offset, nvm_alt_mac_addr_offset, nvm_data;
412 u8 alt_mac_addr[ETHER_ADDR_LEN];
413
414 DEBUGFUNC("e1000_check_alt_mac_addr_generic");
415
416 ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &nvm_data);
417 if (ret_val)
418 return ret_val;
419
420 /* not supported on older hardware or 82573 */
421 if ((hw->mac.type < e1000_82571) || (hw->mac.type == e1000_82573))
422 return E1000_SUCCESS;
423
424 /* Alternate MAC address is handled by the option ROM for 82580
425 * and newer. SW support not required.
426 */
427 if (hw->mac.type >= e1000_82580)
428 return E1000_SUCCESS;
429
430 ret_val = hw->nvm.ops.read(hw, NVM_ALT_MAC_ADDR_PTR, 1,
431 &nvm_alt_mac_addr_offset);
432 if (ret_val) {
433 DEBUGOUT("NVM Read Error\n");
434 return ret_val;
435 }
436
437 if ((nvm_alt_mac_addr_offset == 0xFFFF) ||
438 (nvm_alt_mac_addr_offset == 0x0000))
439 /* There is no Alternate MAC Address */
440 return E1000_SUCCESS;
441
442 if (hw->bus.func == E1000_FUNC_1)
443 nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1;
444 if (hw->bus.func == E1000_FUNC_2)
445 nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN2;
446
447 if (hw->bus.func == E1000_FUNC_3)
448 nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN3;
449 for (i = 0; i < ETHER_ADDR_LEN; i += 2) {
450 offset = nvm_alt_mac_addr_offset + (i >> 1);
451 ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data);
452 if (ret_val) {
453 DEBUGOUT("NVM Read Error\n");
454 return ret_val;
455 }
456
457 alt_mac_addr[i] = (u8)(nvm_data & 0xFF);
458 alt_mac_addr[i + 1] = (u8)(nvm_data >> 8);
459 }
460
461 /* if multicast bit is set, the alternate address will not be used */
462 if (alt_mac_addr[0] & 0x01) {
463 DEBUGOUT("Ignoring Alternate Mac Address with MC bit set\n");
464 return E1000_SUCCESS;
465 }
466
467 /* We have a valid alternate MAC address, and we want to treat it the
468 * same as the normal permanent MAC address stored by the HW into the
469 * RAR. Do this by mapping this address into RAR0.
470 */
471 hw->mac.ops.rar_set(hw, alt_mac_addr, 0);
472
473 return E1000_SUCCESS;
474 }
475
476 /**
477 * e1000_rar_set_generic - Set receive address register
478 * @hw: pointer to the HW structure
479 * @addr: pointer to the receive address
480 * @index: receive address array register
481 *
482 * Sets the receive address array register at index to the address passed
483 * in by addr.
484 **/
e1000_rar_set_generic(struct e1000_hw * hw,u8 * addr,u32 index)485 int e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index)
486 {
487 u32 rar_low, rar_high;
488
489 DEBUGFUNC("e1000_rar_set_generic");
490
491 /* HW expects these in little endian so we reverse the byte order
492 * from network order (big endian) to little endian
493 */
494 rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
495 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
496
497 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
498
499 /* If MAC address zero, no need to set the AV bit */
500 if (rar_low || rar_high)
501 rar_high |= E1000_RAH_AV;
502
503 /* Some bridges will combine consecutive 32-bit writes into
504 * a single burst write, which will malfunction on some parts.
505 * The flushes avoid this.
506 */
507 E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
508 E1000_WRITE_FLUSH(hw);
509 E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
510 E1000_WRITE_FLUSH(hw);
511
512 return E1000_SUCCESS;
513 }
514
515 /**
516 * e1000_hash_mc_addr_generic - Generate a multicast hash value
517 * @hw: pointer to the HW structure
518 * @mc_addr: pointer to a multicast address
519 *
520 * Generates a multicast address hash value which is used to determine
521 * the multicast filter table array address and new table value.
522 **/
e1000_hash_mc_addr_generic(struct e1000_hw * hw,u8 * mc_addr)523 u32 e1000_hash_mc_addr_generic(struct e1000_hw *hw, u8 *mc_addr)
524 {
525 u32 hash_value, hash_mask;
526 u8 bit_shift = 0;
527
528 DEBUGFUNC("e1000_hash_mc_addr_generic");
529
530 /* Register count multiplied by bits per register */
531 hash_mask = (hw->mac.mta_reg_count * 32) - 1;
532
533 /* For a mc_filter_type of 0, bit_shift is the number of left-shifts
534 * where 0xFF would still fall within the hash mask.
535 */
536 while (hash_mask >> bit_shift != 0xFF)
537 bit_shift++;
538
539 /* The portion of the address that is used for the hash table
540 * is determined by the mc_filter_type setting.
541 * The algorithm is such that there is a total of 8 bits of shifting.
542 * The bit_shift for a mc_filter_type of 0 represents the number of
543 * left-shifts where the MSB of mc_addr[5] would still fall within
544 * the hash_mask. Case 0 does this exactly. Since there are a total
545 * of 8 bits of shifting, then mc_addr[4] will shift right the
546 * remaining number of bits. Thus 8 - bit_shift. The rest of the
547 * cases are a variation of this algorithm...essentially raising the
548 * number of bits to shift mc_addr[5] left, while still keeping the
549 * 8-bit shifting total.
550 *
551 * For example, given the following Destination MAC Address and an
552 * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask),
553 * we can see that the bit_shift for case 0 is 4. These are the hash
554 * values resulting from each mc_filter_type...
555 * [0] [1] [2] [3] [4] [5]
556 * 01 AA 00 12 34 56
557 * LSB MSB
558 *
559 * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563
560 * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6
561 * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163
562 * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634
563 */
564 switch (hw->mac.mc_filter_type) {
565 default:
566 case 0:
567 break;
568 case 1:
569 bit_shift += 1;
570 break;
571 case 2:
572 bit_shift += 2;
573 break;
574 case 3:
575 bit_shift += 4;
576 break;
577 }
578
579 hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) |
580 (((u16) mc_addr[5]) << bit_shift)));
581
582 return hash_value;
583 }
584
585 /**
586 * e1000_update_mc_addr_list_generic - Update Multicast addresses
587 * @hw: pointer to the HW structure
588 * @mc_addr_list: array of multicast addresses to program
589 * @mc_addr_count: number of multicast addresses to program
590 *
591 * Updates entire Multicast Table Array.
592 * The caller must have a packed mc_addr_list of multicast addresses.
593 **/
e1000_update_mc_addr_list_generic(struct e1000_hw * hw,u8 * mc_addr_list,u32 mc_addr_count)594 void e1000_update_mc_addr_list_generic(struct e1000_hw *hw,
595 u8 *mc_addr_list, u32 mc_addr_count)
596 {
597 u32 hash_value, hash_bit, hash_reg;
598 int i;
599
600 DEBUGFUNC("e1000_update_mc_addr_list_generic");
601
602 /* clear mta_shadow */
603 memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
604
605 /* update mta_shadow from mc_addr_list */
606 for (i = 0; (u32) i < mc_addr_count; i++) {
607 hash_value = e1000_hash_mc_addr_generic(hw, mc_addr_list);
608
609 hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
610 hash_bit = hash_value & 0x1F;
611
612 hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit);
613 mc_addr_list += (ETHER_ADDR_LEN);
614 }
615
616 /* replace the entire MTA table */
617 for (i = hw->mac.mta_reg_count - 1; i >= 0; i--)
618 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, hw->mac.mta_shadow[i]);
619 E1000_WRITE_FLUSH(hw);
620 }
621
622 /**
623 * e1000_pcix_mmrbc_workaround_generic - Fix incorrect MMRBC value
624 * @hw: pointer to the HW structure
625 *
626 * In certain situations, a system BIOS may report that the PCIx maximum
627 * memory read byte count (MMRBC) value is higher than than the actual
628 * value. We check the PCIx command register with the current PCIx status
629 * register.
630 **/
e1000_pcix_mmrbc_workaround_generic(struct e1000_hw * hw)631 void e1000_pcix_mmrbc_workaround_generic(struct e1000_hw *hw)
632 {
633 u16 cmd_mmrbc;
634 u16 pcix_cmd;
635 u16 pcix_stat_hi_word;
636 u16 stat_mmrbc;
637
638 DEBUGFUNC("e1000_pcix_mmrbc_workaround_generic");
639
640 /* Workaround for PCI-X issue when BIOS sets MMRBC incorrectly */
641 if (hw->bus.type != e1000_bus_type_pcix)
642 return;
643
644 e1000_read_pci_cfg(hw, PCIX_COMMAND_REGISTER, &pcix_cmd);
645 e1000_read_pci_cfg(hw, PCIX_STATUS_REGISTER_HI, &pcix_stat_hi_word);
646 cmd_mmrbc = (pcix_cmd & PCIX_COMMAND_MMRBC_MASK) >>
647 PCIX_COMMAND_MMRBC_SHIFT;
648 stat_mmrbc = (pcix_stat_hi_word & PCIX_STATUS_HI_MMRBC_MASK) >>
649 PCIX_STATUS_HI_MMRBC_SHIFT;
650 if (stat_mmrbc == PCIX_STATUS_HI_MMRBC_4K)
651 stat_mmrbc = PCIX_STATUS_HI_MMRBC_2K;
652 if (cmd_mmrbc > stat_mmrbc) {
653 pcix_cmd &= ~PCIX_COMMAND_MMRBC_MASK;
654 pcix_cmd |= stat_mmrbc << PCIX_COMMAND_MMRBC_SHIFT;
655 e1000_write_pci_cfg(hw, PCIX_COMMAND_REGISTER, &pcix_cmd);
656 }
657 }
658
659 /**
660 * e1000_clear_hw_cntrs_base_generic - Clear base hardware counters
661 * @hw: pointer to the HW structure
662 *
663 * Clears the base hardware counters by reading the counter registers.
664 **/
e1000_clear_hw_cntrs_base_generic(struct e1000_hw * hw)665 void e1000_clear_hw_cntrs_base_generic(struct e1000_hw *hw)
666 {
667 DEBUGFUNC("e1000_clear_hw_cntrs_base_generic");
668
669 E1000_READ_REG(hw, E1000_CRCERRS);
670 E1000_READ_REG(hw, E1000_SYMERRS);
671 E1000_READ_REG(hw, E1000_MPC);
672 E1000_READ_REG(hw, E1000_SCC);
673 E1000_READ_REG(hw, E1000_ECOL);
674 E1000_READ_REG(hw, E1000_MCC);
675 E1000_READ_REG(hw, E1000_LATECOL);
676 E1000_READ_REG(hw, E1000_COLC);
677 E1000_READ_REG(hw, E1000_DC);
678 E1000_READ_REG(hw, E1000_SEC);
679 E1000_READ_REG(hw, E1000_RLEC);
680 E1000_READ_REG(hw, E1000_XONRXC);
681 E1000_READ_REG(hw, E1000_XONTXC);
682 E1000_READ_REG(hw, E1000_XOFFRXC);
683 E1000_READ_REG(hw, E1000_XOFFTXC);
684 E1000_READ_REG(hw, E1000_FCRUC);
685 E1000_READ_REG(hw, E1000_GPRC);
686 E1000_READ_REG(hw, E1000_BPRC);
687 E1000_READ_REG(hw, E1000_MPRC);
688 E1000_READ_REG(hw, E1000_GPTC);
689 E1000_READ_REG(hw, E1000_GORCL);
690 E1000_READ_REG(hw, E1000_GORCH);
691 E1000_READ_REG(hw, E1000_GOTCL);
692 E1000_READ_REG(hw, E1000_GOTCH);
693 E1000_READ_REG(hw, E1000_RNBC);
694 E1000_READ_REG(hw, E1000_RUC);
695 E1000_READ_REG(hw, E1000_RFC);
696 E1000_READ_REG(hw, E1000_ROC);
697 E1000_READ_REG(hw, E1000_RJC);
698 E1000_READ_REG(hw, E1000_TORL);
699 E1000_READ_REG(hw, E1000_TORH);
700 E1000_READ_REG(hw, E1000_TOTL);
701 E1000_READ_REG(hw, E1000_TOTH);
702 E1000_READ_REG(hw, E1000_TPR);
703 E1000_READ_REG(hw, E1000_TPT);
704 E1000_READ_REG(hw, E1000_MPTC);
705 E1000_READ_REG(hw, E1000_BPTC);
706 }
707
708 /**
709 * e1000_check_for_copper_link_generic - Check for link (Copper)
710 * @hw: pointer to the HW structure
711 *
712 * Checks to see of the link status of the hardware has changed. If a
713 * change in link status has been detected, then we read the PHY registers
714 * to get the current speed/duplex if link exists.
715 **/
e1000_check_for_copper_link_generic(struct e1000_hw * hw)716 s32 e1000_check_for_copper_link_generic(struct e1000_hw *hw)
717 {
718 struct e1000_mac_info *mac = &hw->mac;
719 s32 ret_val;
720 bool link;
721
722 DEBUGFUNC("e1000_check_for_copper_link");
723
724 /* We only want to go out to the PHY registers to see if Auto-Neg
725 * has completed and/or if our link status has changed. The
726 * get_link_status flag is set upon receiving a Link Status
727 * Change or Rx Sequence Error interrupt.
728 */
729 if (!mac->get_link_status)
730 return E1000_SUCCESS;
731
732 /* First we want to see if the MII Status Register reports
733 * link. If so, then we want to get the current speed/duplex
734 * of the PHY.
735 */
736 ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
737 if (ret_val)
738 return ret_val;
739
740 if (!link)
741 return E1000_SUCCESS; /* No link detected */
742
743 mac->get_link_status = false;
744
745 /* Check if there was DownShift, must be checked
746 * immediately after link-up
747 */
748 e1000_check_downshift_generic(hw);
749
750 /* If we are forcing speed/duplex, then we simply return since
751 * we have already determined whether we have link or not.
752 */
753 if (!mac->autoneg)
754 return -E1000_ERR_CONFIG;
755
756 /* Auto-Neg is enabled. Auto Speed Detection takes care
757 * of MAC speed/duplex configuration. So we only need to
758 * configure Collision Distance in the MAC.
759 */
760 mac->ops.config_collision_dist(hw);
761
762 /* Configure Flow Control now that Auto-Neg has completed.
763 * First, we need to restore the desired flow control
764 * settings because we may have had to re-autoneg with a
765 * different link partner.
766 */
767 ret_val = e1000_config_fc_after_link_up_generic(hw);
768 if (ret_val)
769 DEBUGOUT("Error configuring flow control\n");
770
771 return ret_val;
772 }
773
774 /**
775 * e1000_check_for_fiber_link_generic - Check for link (Fiber)
776 * @hw: pointer to the HW structure
777 *
778 * Checks for link up on the hardware. If link is not up and we have
779 * a signal, then we need to force link up.
780 **/
e1000_check_for_fiber_link_generic(struct e1000_hw * hw)781 s32 e1000_check_for_fiber_link_generic(struct e1000_hw *hw)
782 {
783 struct e1000_mac_info *mac = &hw->mac;
784 u32 rxcw;
785 u32 ctrl;
786 u32 status;
787 s32 ret_val;
788
789 DEBUGFUNC("e1000_check_for_fiber_link_generic");
790
791 ctrl = E1000_READ_REG(hw, E1000_CTRL);
792 status = E1000_READ_REG(hw, E1000_STATUS);
793 rxcw = E1000_READ_REG(hw, E1000_RXCW);
794
795 /* If we don't have link (auto-negotiation failed or link partner
796 * cannot auto-negotiate), the cable is plugged in (we have signal),
797 * and our link partner is not trying to auto-negotiate with us (we
798 * are receiving idles or data), we need to force link up. We also
799 * need to give auto-negotiation time to complete, in case the cable
800 * was just plugged in. The autoneg_failed flag does this.
801 */
802 /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */
803 if ((ctrl & E1000_CTRL_SWDPIN1) && !(status & E1000_STATUS_LU) &&
804 !(rxcw & E1000_RXCW_C)) {
805 if (!mac->autoneg_failed) {
806 mac->autoneg_failed = true;
807 return E1000_SUCCESS;
808 }
809 DEBUGOUT("NOT Rx'ing /C/, disable AutoNeg and force link.\n");
810
811 /* Disable auto-negotiation in the TXCW register */
812 E1000_WRITE_REG(hw, E1000_TXCW, (mac->txcw & ~E1000_TXCW_ANE));
813
814 /* Force link-up and also force full-duplex. */
815 ctrl = E1000_READ_REG(hw, E1000_CTRL);
816 ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
817 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
818
819 /* Configure Flow Control after forcing link up. */
820 ret_val = e1000_config_fc_after_link_up_generic(hw);
821 if (ret_val) {
822 DEBUGOUT("Error configuring flow control\n");
823 return ret_val;
824 }
825 } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
826 /* If we are forcing link and we are receiving /C/ ordered
827 * sets, re-enable auto-negotiation in the TXCW register
828 * and disable forced link in the Device Control register
829 * in an attempt to auto-negotiate with our link partner.
830 */
831 DEBUGOUT("Rx'ing /C/, enable AutoNeg and stop forcing link.\n");
832 E1000_WRITE_REG(hw, E1000_TXCW, mac->txcw);
833 E1000_WRITE_REG(hw, E1000_CTRL, (ctrl & ~E1000_CTRL_SLU));
834
835 mac->serdes_has_link = true;
836 }
837
838 return E1000_SUCCESS;
839 }
840
841 /**
842 * e1000_check_for_serdes_link_generic - Check for link (Serdes)
843 * @hw: pointer to the HW structure
844 *
845 * Checks for link up on the hardware. If link is not up and we have
846 * a signal, then we need to force link up.
847 **/
e1000_check_for_serdes_link_generic(struct e1000_hw * hw)848 s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
849 {
850 struct e1000_mac_info *mac = &hw->mac;
851 u32 rxcw;
852 u32 ctrl;
853 u32 status;
854 s32 ret_val;
855
856 DEBUGFUNC("e1000_check_for_serdes_link_generic");
857
858 ctrl = E1000_READ_REG(hw, E1000_CTRL);
859 status = E1000_READ_REG(hw, E1000_STATUS);
860 rxcw = E1000_READ_REG(hw, E1000_RXCW);
861
862 /* If we don't have link (auto-negotiation failed or link partner
863 * cannot auto-negotiate), and our link partner is not trying to
864 * auto-negotiate with us (we are receiving idles or data),
865 * we need to force link up. We also need to give auto-negotiation
866 * time to complete.
867 */
868 /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */
869 if (!(status & E1000_STATUS_LU) && !(rxcw & E1000_RXCW_C)) {
870 if (!mac->autoneg_failed) {
871 mac->autoneg_failed = true;
872 return E1000_SUCCESS;
873 }
874 DEBUGOUT("NOT Rx'ing /C/, disable AutoNeg and force link.\n");
875
876 /* Disable auto-negotiation in the TXCW register */
877 E1000_WRITE_REG(hw, E1000_TXCW, (mac->txcw & ~E1000_TXCW_ANE));
878
879 /* Force link-up and also force full-duplex. */
880 ctrl = E1000_READ_REG(hw, E1000_CTRL);
881 ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
882 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
883
884 /* Configure Flow Control after forcing link up. */
885 ret_val = e1000_config_fc_after_link_up_generic(hw);
886 if (ret_val) {
887 DEBUGOUT("Error configuring flow control\n");
888 return ret_val;
889 }
890 } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
891 /* If we are forcing link and we are receiving /C/ ordered
892 * sets, re-enable auto-negotiation in the TXCW register
893 * and disable forced link in the Device Control register
894 * in an attempt to auto-negotiate with our link partner.
895 */
896 DEBUGOUT("Rx'ing /C/, enable AutoNeg and stop forcing link.\n");
897 E1000_WRITE_REG(hw, E1000_TXCW, mac->txcw);
898 E1000_WRITE_REG(hw, E1000_CTRL, (ctrl & ~E1000_CTRL_SLU));
899
900 mac->serdes_has_link = true;
901 } else if (!(E1000_TXCW_ANE & E1000_READ_REG(hw, E1000_TXCW))) {
902 /* If we force link for non-auto-negotiation switch, check
903 * link status based on MAC synchronization for internal
904 * serdes media type.
905 */
906 /* SYNCH bit and IV bit are sticky. */
907 usec_delay(10);
908 rxcw = E1000_READ_REG(hw, E1000_RXCW);
909 if (rxcw & E1000_RXCW_SYNCH) {
910 if (!(rxcw & E1000_RXCW_IV)) {
911 mac->serdes_has_link = true;
912 DEBUGOUT("SERDES: Link up - forced.\n");
913 }
914 } else {
915 mac->serdes_has_link = false;
916 DEBUGOUT("SERDES: Link down - force failed.\n");
917 }
918 }
919
920 if (E1000_TXCW_ANE & E1000_READ_REG(hw, E1000_TXCW)) {
921 status = E1000_READ_REG(hw, E1000_STATUS);
922 if (status & E1000_STATUS_LU) {
923 /* SYNCH bit and IV bit are sticky, so reread rxcw. */
924 usec_delay(10);
925 rxcw = E1000_READ_REG(hw, E1000_RXCW);
926 if (rxcw & E1000_RXCW_SYNCH) {
927 if (!(rxcw & E1000_RXCW_IV)) {
928 mac->serdes_has_link = true;
929 DEBUGOUT("SERDES: Link up - autoneg completed successfully.\n");
930 } else {
931 mac->serdes_has_link = false;
932 DEBUGOUT("SERDES: Link down - invalid codewords detected in autoneg.\n");
933 }
934 } else {
935 mac->serdes_has_link = false;
936 DEBUGOUT("SERDES: Link down - no sync.\n");
937 }
938 } else {
939 mac->serdes_has_link = false;
940 DEBUGOUT("SERDES: Link down - autoneg failed\n");
941 }
942 }
943
944 return E1000_SUCCESS;
945 }
946
947 /**
948 * e1000_set_default_fc_generic - Set flow control default values
949 * @hw: pointer to the HW structure
950 *
951 * Read the EEPROM for the default values for flow control and store the
952 * values.
953 **/
e1000_set_default_fc_generic(struct e1000_hw * hw)954 s32 e1000_set_default_fc_generic(struct e1000_hw *hw)
955 {
956 s32 ret_val;
957 u16 nvm_data;
958 u16 nvm_offset = 0;
959
960 DEBUGFUNC("e1000_set_default_fc_generic");
961
962 /* Read and store word 0x0F of the EEPROM. This word contains bits
963 * that determine the hardware's default PAUSE (flow control) mode,
964 * a bit that determines whether the HW defaults to enabling or
965 * disabling auto-negotiation, and the direction of the
966 * SW defined pins. If there is no SW over-ride of the flow
967 * control setting, then the variable hw->fc will
968 * be initialized based on a value in the EEPROM.
969 */
970 if (hw->mac.type == e1000_i350) {
971 nvm_offset = NVM_82580_LAN_FUNC_OFFSET(hw->bus.func);
972 ret_val = hw->nvm.ops.read(hw,
973 NVM_INIT_CONTROL2_REG +
974 nvm_offset,
975 1, &nvm_data);
976 } else {
977 ret_val = hw->nvm.ops.read(hw,
978 NVM_INIT_CONTROL2_REG,
979 1, &nvm_data);
980 }
981
982
983 if (ret_val) {
984 DEBUGOUT("NVM Read Error\n");
985 return ret_val;
986 }
987
988 if (!(nvm_data & NVM_WORD0F_PAUSE_MASK))
989 hw->fc.requested_mode = e1000_fc_none;
990 else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) ==
991 NVM_WORD0F_ASM_DIR)
992 hw->fc.requested_mode = e1000_fc_tx_pause;
993 else
994 hw->fc.requested_mode = e1000_fc_full;
995
996 return E1000_SUCCESS;
997 }
998
999 /**
1000 * e1000_setup_link_generic - Setup flow control and link settings
1001 * @hw: pointer to the HW structure
1002 *
1003 * Determines which flow control settings to use, then configures flow
1004 * control. Calls the appropriate media-specific link configuration
1005 * function. Assuming the adapter has a valid link partner, a valid link
1006 * should be established. Assumes the hardware has previously been reset
1007 * and the transmitter and receiver are not enabled.
1008 **/
e1000_setup_link_generic(struct e1000_hw * hw)1009 s32 e1000_setup_link_generic(struct e1000_hw *hw)
1010 {
1011 s32 ret_val;
1012
1013 DEBUGFUNC("e1000_setup_link_generic");
1014
1015 /* In the case of the phy reset being blocked, we already have a link.
1016 * We do not need to set it up again.
1017 */
1018 if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw))
1019 return E1000_SUCCESS;
1020
1021 /* If requested flow control is set to default, set flow control
1022 * based on the EEPROM flow control settings.
1023 */
1024 if (hw->fc.requested_mode == e1000_fc_default) {
1025 ret_val = e1000_set_default_fc_generic(hw);
1026 if (ret_val)
1027 return ret_val;
1028 }
1029
1030 /* Save off the requested flow control mode for use later. Depending
1031 * on the link partner's capabilities, we may or may not use this mode.
1032 */
1033 hw->fc.current_mode = hw->fc.requested_mode;
1034
1035 DEBUGOUT1("After fix-ups FlowControl is now = %x\n",
1036 hw->fc.current_mode);
1037
1038 /* Call the necessary media_type subroutine to configure the link. */
1039 ret_val = hw->mac.ops.setup_physical_interface(hw);
1040 if (ret_val)
1041 return ret_val;
1042
1043 /* Initialize the flow control address, type, and PAUSE timer
1044 * registers to their default values. This is done even if flow
1045 * control is disabled, because it does not hurt anything to
1046 * initialize these registers.
1047 */
1048 DEBUGOUT("Initializing the Flow Control address, type and timer regs\n");
1049 E1000_WRITE_REG(hw, E1000_FCT, FLOW_CONTROL_TYPE);
1050 E1000_WRITE_REG(hw, E1000_FCAH, FLOW_CONTROL_ADDRESS_HIGH);
1051 E1000_WRITE_REG(hw, E1000_FCAL, FLOW_CONTROL_ADDRESS_LOW);
1052
1053 E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time);
1054
1055 return e1000_set_fc_watermarks_generic(hw);
1056 }
1057
1058 /**
1059 * e1000_commit_fc_settings_generic - Configure flow control
1060 * @hw: pointer to the HW structure
1061 *
1062 * Write the flow control settings to the Transmit Config Word Register (TXCW)
1063 * base on the flow control settings in e1000_mac_info.
1064 **/
e1000_commit_fc_settings_generic(struct e1000_hw * hw)1065 s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw)
1066 {
1067 struct e1000_mac_info *mac = &hw->mac;
1068 u32 txcw;
1069
1070 DEBUGFUNC("e1000_commit_fc_settings_generic");
1071
1072 /* Check for a software override of the flow control settings, and
1073 * setup the device accordingly. If auto-negotiation is enabled, then
1074 * software will have to set the "PAUSE" bits to the correct value in
1075 * the Transmit Config Word Register (TXCW) and re-start auto-
1076 * negotiation. However, if auto-negotiation is disabled, then
1077 * software will have to manually configure the two flow control enable
1078 * bits in the CTRL register.
1079 *
1080 * The possible values of the "fc" parameter are:
1081 * 0: Flow control is completely disabled
1082 * 1: Rx flow control is enabled (we can receive pause frames,
1083 * but not send pause frames).
1084 * 2: Tx flow control is enabled (we can send pause frames but we
1085 * do not support receiving pause frames).
1086 * 3: Both Rx and Tx flow control (symmetric) are enabled.
1087 */
1088 switch (hw->fc.current_mode) {
1089 case e1000_fc_none:
1090 /* Flow control completely disabled by a software over-ride. */
1091 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD);
1092 break;
1093 case e1000_fc_rx_pause:
1094 /* Rx Flow control is enabled and Tx Flow control is disabled
1095 * by a software over-ride. Since there really isn't a way to
1096 * advertise that we are capable of Rx Pause ONLY, we will
1097 * advertise that we support both symmetric and asymmetric Rx
1098 * PAUSE. Later, we will disable the adapter's ability to send
1099 * PAUSE frames.
1100 */
1101 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
1102 break;
1103 case e1000_fc_tx_pause:
1104 /* Tx Flow control is enabled, and Rx Flow control is disabled,
1105 * by a software over-ride.
1106 */
1107 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR);
1108 break;
1109 case e1000_fc_full:
1110 /* Flow control (both Rx and Tx) is enabled by a software
1111 * over-ride.
1112 */
1113 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
1114 break;
1115 default:
1116 DEBUGOUT("Flow control param set incorrectly\n");
1117 return -E1000_ERR_CONFIG;
1118 break;
1119 }
1120
1121 E1000_WRITE_REG(hw, E1000_TXCW, txcw);
1122 mac->txcw = txcw;
1123
1124 return E1000_SUCCESS;
1125 }
1126
1127 /**
1128 * e1000_poll_fiber_serdes_link_generic - Poll for link up
1129 * @hw: pointer to the HW structure
1130 *
1131 * Polls for link up by reading the status register, if link fails to come
1132 * up with auto-negotiation, then the link is forced if a signal is detected.
1133 **/
e1000_poll_fiber_serdes_link_generic(struct e1000_hw * hw)1134 s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw)
1135 {
1136 struct e1000_mac_info *mac = &hw->mac;
1137 u32 i, status;
1138 s32 ret_val;
1139
1140 DEBUGFUNC("e1000_poll_fiber_serdes_link_generic");
1141
1142 /* If we have a signal (the cable is plugged in, or assumed true for
1143 * serdes media) then poll for a "Link-Up" indication in the Device
1144 * Status Register. Time-out if a link isn't seen in 500 milliseconds
1145 * seconds (Auto-negotiation should complete in less than 500
1146 * milliseconds even if the other end is doing it in SW).
1147 */
1148 for (i = 0; i < FIBER_LINK_UP_LIMIT; i++) {
1149 msec_delay(10);
1150 status = E1000_READ_REG(hw, E1000_STATUS);
1151 if (status & E1000_STATUS_LU)
1152 break;
1153 }
1154 if (i == FIBER_LINK_UP_LIMIT) {
1155 DEBUGOUT("Never got a valid link from auto-neg!!!\n");
1156 mac->autoneg_failed = true;
1157 /* AutoNeg failed to achieve a link, so we'll call
1158 * mac->check_for_link. This routine will force the
1159 * link up if we detect a signal. This will allow us to
1160 * communicate with non-autonegotiating link partners.
1161 */
1162 ret_val = mac->ops.check_for_link(hw);
1163 if (ret_val) {
1164 DEBUGOUT("Error while checking for link\n");
1165 return ret_val;
1166 }
1167 mac->autoneg_failed = false;
1168 } else {
1169 mac->autoneg_failed = false;
1170 DEBUGOUT("Valid Link Found\n");
1171 }
1172
1173 return E1000_SUCCESS;
1174 }
1175
1176 /**
1177 * e1000_setup_fiber_serdes_link_generic - Setup link for fiber/serdes
1178 * @hw: pointer to the HW structure
1179 *
1180 * Configures collision distance and flow control for fiber and serdes
1181 * links. Upon successful setup, poll for link.
1182 **/
e1000_setup_fiber_serdes_link_generic(struct e1000_hw * hw)1183 s32 e1000_setup_fiber_serdes_link_generic(struct e1000_hw *hw)
1184 {
1185 u32 ctrl;
1186 s32 ret_val;
1187
1188 DEBUGFUNC("e1000_setup_fiber_serdes_link_generic");
1189
1190 ctrl = E1000_READ_REG(hw, E1000_CTRL);
1191
1192 /* Take the link out of reset */
1193 ctrl &= ~E1000_CTRL_LRST;
1194
1195 hw->mac.ops.config_collision_dist(hw);
1196
1197 ret_val = e1000_commit_fc_settings_generic(hw);
1198 if (ret_val)
1199 return ret_val;
1200
1201 /* Since auto-negotiation is enabled, take the link out of reset (the
1202 * link will be in reset, because we previously reset the chip). This
1203 * will restart auto-negotiation. If auto-negotiation is successful
1204 * then the link-up status bit will be set and the flow control enable
1205 * bits (RFCE and TFCE) will be set according to their negotiated value.
1206 */
1207 DEBUGOUT("Auto-negotiation enabled\n");
1208
1209 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
1210 E1000_WRITE_FLUSH(hw);
1211 msec_delay(1);
1212
1213 /* For these adapters, the SW definable pin 1 is set when the optics
1214 * detect a signal. If we have a signal, then poll for a "Link-Up"
1215 * indication.
1216 */
1217 if (hw->phy.media_type == e1000_media_type_internal_serdes ||
1218 (E1000_READ_REG(hw, E1000_CTRL) & E1000_CTRL_SWDPIN1)) {
1219 ret_val = e1000_poll_fiber_serdes_link_generic(hw);
1220 } else {
1221 DEBUGOUT("No signal detected\n");
1222 }
1223
1224 return ret_val;
1225 }
1226
1227 /**
1228 * e1000_config_collision_dist_generic - Configure collision distance
1229 * @hw: pointer to the HW structure
1230 *
1231 * Configures the collision distance to the default value and is used
1232 * during link setup.
1233 **/
e1000_config_collision_dist_generic(struct e1000_hw * hw)1234 static void e1000_config_collision_dist_generic(struct e1000_hw *hw)
1235 {
1236 u32 tctl;
1237
1238 DEBUGFUNC("e1000_config_collision_dist_generic");
1239
1240 tctl = E1000_READ_REG(hw, E1000_TCTL);
1241
1242 tctl &= ~E1000_TCTL_COLD;
1243 tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT;
1244
1245 E1000_WRITE_REG(hw, E1000_TCTL, tctl);
1246 E1000_WRITE_FLUSH(hw);
1247 }
1248
1249 /**
1250 * e1000_set_fc_watermarks_generic - Set flow control high/low watermarks
1251 * @hw: pointer to the HW structure
1252 *
1253 * Sets the flow control high/low threshold (watermark) registers. If
1254 * flow control XON frame transmission is enabled, then set XON frame
1255 * transmission as well.
1256 **/
e1000_set_fc_watermarks_generic(struct e1000_hw * hw)1257 s32 e1000_set_fc_watermarks_generic(struct e1000_hw *hw)
1258 {
1259 u32 fcrtl = 0, fcrth = 0;
1260
1261 DEBUGFUNC("e1000_set_fc_watermarks_generic");
1262
1263 /* Set the flow control receive threshold registers. Normally,
1264 * these registers will be set to a default threshold that may be
1265 * adjusted later by the driver's runtime code. However, if the
1266 * ability to transmit pause frames is not enabled, then these
1267 * registers will be set to 0.
1268 */
1269 if (hw->fc.current_mode & e1000_fc_tx_pause) {
1270 /* We need to set up the Receive Threshold high and low water
1271 * marks as well as (optionally) enabling the transmission of
1272 * XON frames.
1273 */
1274 fcrtl = hw->fc.low_water;
1275 if (hw->fc.send_xon)
1276 fcrtl |= E1000_FCRTL_XONE;
1277
1278 fcrth = hw->fc.high_water;
1279 }
1280 E1000_WRITE_REG(hw, E1000_FCRTL, fcrtl);
1281 E1000_WRITE_REG(hw, E1000_FCRTH, fcrth);
1282
1283 return E1000_SUCCESS;
1284 }
1285
1286 /**
1287 * e1000_force_mac_fc_generic - Force the MAC's flow control settings
1288 * @hw: pointer to the HW structure
1289 *
1290 * Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the
1291 * device control register to reflect the adapter settings. TFCE and RFCE
1292 * need to be explicitly set by software when a copper PHY is used because
1293 * autonegotiation is managed by the PHY rather than the MAC. Software must
1294 * also configure these bits when link is forced on a fiber connection.
1295 **/
e1000_force_mac_fc_generic(struct e1000_hw * hw)1296 s32 e1000_force_mac_fc_generic(struct e1000_hw *hw)
1297 {
1298 u32 ctrl;
1299
1300 DEBUGFUNC("e1000_force_mac_fc_generic");
1301
1302 ctrl = E1000_READ_REG(hw, E1000_CTRL);
1303
1304 /* Because we didn't get link via the internal auto-negotiation
1305 * mechanism (we either forced link or we got link via PHY
1306 * auto-neg), we have to manually enable/disable transmit an
1307 * receive flow control.
1308 *
1309 * The "Case" statement below enables/disable flow control
1310 * according to the "hw->fc.current_mode" parameter.
1311 *
1312 * The possible values of the "fc" parameter are:
1313 * 0: Flow control is completely disabled
1314 * 1: Rx flow control is enabled (we can receive pause
1315 * frames but not send pause frames).
1316 * 2: Tx flow control is enabled (we can send pause frames
1317 * frames but we do not receive pause frames).
1318 * 3: Both Rx and Tx flow control (symmetric) is enabled.
1319 * other: No other values should be possible at this point.
1320 */
1321 DEBUGOUT1("hw->fc.current_mode = %u\n", hw->fc.current_mode);
1322
1323 switch (hw->fc.current_mode) {
1324 case e1000_fc_none:
1325 ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE));
1326 break;
1327 case e1000_fc_rx_pause:
1328 ctrl &= (~E1000_CTRL_TFCE);
1329 ctrl |= E1000_CTRL_RFCE;
1330 break;
1331 case e1000_fc_tx_pause:
1332 ctrl &= (~E1000_CTRL_RFCE);
1333 ctrl |= E1000_CTRL_TFCE;
1334 break;
1335 case e1000_fc_full:
1336 ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE);
1337 break;
1338 default:
1339 DEBUGOUT("Flow control param set incorrectly\n");
1340 return -E1000_ERR_CONFIG;
1341 }
1342
1343 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
1344
1345 return E1000_SUCCESS;
1346 }
1347
1348 /**
1349 * e1000_config_fc_after_link_up_generic - Configures flow control after link
1350 * @hw: pointer to the HW structure
1351 *
1352 * Checks the status of auto-negotiation after link up to ensure that the
1353 * speed and duplex were not forced. If the link needed to be forced, then
1354 * flow control needs to be forced also. If auto-negotiation is enabled
1355 * and did not fail, then we configure flow control based on our link
1356 * partner.
1357 **/
e1000_config_fc_after_link_up_generic(struct e1000_hw * hw)1358 s32 e1000_config_fc_after_link_up_generic(struct e1000_hw *hw)
1359 {
1360 struct e1000_mac_info *mac = &hw->mac;
1361 s32 ret_val = E1000_SUCCESS;
1362 u32 pcs_status_reg, pcs_adv_reg, pcs_lp_ability_reg, pcs_ctrl_reg;
1363 u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
1364 u16 speed, duplex;
1365
1366 DEBUGFUNC("e1000_config_fc_after_link_up_generic");
1367
1368 /* Check for the case where we have fiber media and auto-neg failed
1369 * so we had to force link. In this case, we need to force the
1370 * configuration of the MAC to match the "fc" parameter.
1371 */
1372 if (mac->autoneg_failed) {
1373 if (hw->phy.media_type == e1000_media_type_fiber ||
1374 hw->phy.media_type == e1000_media_type_internal_serdes)
1375 ret_val = e1000_force_mac_fc_generic(hw);
1376 } else {
1377 if (hw->phy.media_type == e1000_media_type_copper)
1378 ret_val = e1000_force_mac_fc_generic(hw);
1379 }
1380
1381 if (ret_val) {
1382 DEBUGOUT("Error forcing flow control settings\n");
1383 return ret_val;
1384 }
1385
1386 /* Check for the case where we have copper media and auto-neg is
1387 * enabled. In this case, we need to check and see if Auto-Neg
1388 * has completed, and if so, how the PHY and link partner has
1389 * flow control configured.
1390 */
1391 if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) {
1392 /* Read the MII Status Register and check to see if AutoNeg
1393 * has completed. We read this twice because this reg has
1394 * some "sticky" (latched) bits.
1395 */
1396 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &mii_status_reg);
1397 if (ret_val)
1398 return ret_val;
1399 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &mii_status_reg);
1400 if (ret_val)
1401 return ret_val;
1402
1403 if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
1404 DEBUGOUT("Copper PHY and Auto Neg has not completed.\n");
1405 return ret_val;
1406 }
1407
1408 /* The AutoNeg process has completed, so we now need to
1409 * read both the Auto Negotiation Advertisement
1410 * Register (Address 4) and the Auto_Negotiation Base
1411 * Page Ability Register (Address 5) to determine how
1412 * flow control was negotiated.
1413 */
1414 ret_val = hw->phy.ops.read_reg(hw, PHY_AUTONEG_ADV,
1415 &mii_nway_adv_reg);
1416 if (ret_val)
1417 return ret_val;
1418 ret_val = hw->phy.ops.read_reg(hw, PHY_LP_ABILITY,
1419 &mii_nway_lp_ability_reg);
1420 if (ret_val)
1421 return ret_val;
1422
1423 /* Two bits in the Auto Negotiation Advertisement Register
1424 * (Address 4) and two bits in the Auto Negotiation Base
1425 * Page Ability Register (Address 5) determine flow control
1426 * for both the PHY and the link partner. The following
1427 * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
1428 * 1999, describes these PAUSE resolution bits and how flow
1429 * control is determined based upon these settings.
1430 * NOTE: DC = Don't Care
1431 *
1432 * LOCAL DEVICE | LINK PARTNER
1433 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
1434 *-------|---------|-------|---------|--------------------
1435 * 0 | 0 | DC | DC | e1000_fc_none
1436 * 0 | 1 | 0 | DC | e1000_fc_none
1437 * 0 | 1 | 1 | 0 | e1000_fc_none
1438 * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
1439 * 1 | 0 | 0 | DC | e1000_fc_none
1440 * 1 | DC | 1 | DC | e1000_fc_full
1441 * 1 | 1 | 0 | 0 | e1000_fc_none
1442 * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
1443 *
1444 * Are both PAUSE bits set to 1? If so, this implies
1445 * Symmetric Flow Control is enabled at both ends. The
1446 * ASM_DIR bits are irrelevant per the spec.
1447 *
1448 * For Symmetric Flow Control:
1449 *
1450 * LOCAL DEVICE | LINK PARTNER
1451 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
1452 *-------|---------|-------|---------|--------------------
1453 * 1 | DC | 1 | DC | E1000_fc_full
1454 *
1455 */
1456 if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
1457 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
1458 /* Now we need to check if the user selected Rx ONLY
1459 * of pause frames. In this case, we had to advertise
1460 * FULL flow control because we could not advertise Rx
1461 * ONLY. Hence, we must now check to see if we need to
1462 * turn OFF the TRANSMISSION of PAUSE frames.
1463 */
1464 if (hw->fc.requested_mode == e1000_fc_full) {
1465 hw->fc.current_mode = e1000_fc_full;
1466 DEBUGOUT("Flow Control = FULL.\n");
1467 } else {
1468 hw->fc.current_mode = e1000_fc_rx_pause;
1469 DEBUGOUT("Flow Control = Rx PAUSE frames only.\n");
1470 }
1471 }
1472 /* For receiving PAUSE frames ONLY.
1473 *
1474 * LOCAL DEVICE | LINK PARTNER
1475 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
1476 *-------|---------|-------|---------|--------------------
1477 * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
1478 */
1479 else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
1480 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
1481 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
1482 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
1483 hw->fc.current_mode = e1000_fc_tx_pause;
1484 DEBUGOUT("Flow Control = Tx PAUSE frames only.\n");
1485 }
1486 /* For transmitting PAUSE frames ONLY.
1487 *
1488 * LOCAL DEVICE | LINK PARTNER
1489 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
1490 *-------|---------|-------|---------|--------------------
1491 * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
1492 */
1493 else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
1494 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
1495 !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
1496 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
1497 hw->fc.current_mode = e1000_fc_rx_pause;
1498 DEBUGOUT("Flow Control = Rx PAUSE frames only.\n");
1499 } else {
1500 /* Per the IEEE spec, at this point flow control
1501 * should be disabled.
1502 */
1503 hw->fc.current_mode = e1000_fc_none;
1504 DEBUGOUT("Flow Control = NONE.\n");
1505 }
1506
1507 /* Now we need to do one last check... If we auto-
1508 * negotiated to HALF DUPLEX, flow control should not be
1509 * enabled per IEEE 802.3 spec.
1510 */
1511 ret_val = mac->ops.get_link_up_info(hw, &speed, &duplex);
1512 if (ret_val) {
1513 DEBUGOUT("Error getting link speed and duplex\n");
1514 return ret_val;
1515 }
1516
1517 if (duplex == HALF_DUPLEX)
1518 hw->fc.current_mode = e1000_fc_none;
1519
1520 /* Now we call a subroutine to actually force the MAC
1521 * controller to use the correct flow control settings.
1522 */
1523 ret_val = e1000_force_mac_fc_generic(hw);
1524 if (ret_val) {
1525 DEBUGOUT("Error forcing flow control settings\n");
1526 return ret_val;
1527 }
1528 }
1529
1530 /* Check for the case where we have SerDes media and auto-neg is
1531 * enabled. In this case, we need to check and see if Auto-Neg
1532 * has completed, and if so, how the PHY and link partner has
1533 * flow control configured.
1534 */
1535 if ((hw->phy.media_type == e1000_media_type_internal_serdes) &&
1536 mac->autoneg) {
1537 /* Read the PCS_LSTS and check to see if AutoNeg
1538 * has completed.
1539 */
1540 pcs_status_reg = E1000_READ_REG(hw, E1000_PCS_LSTAT);
1541
1542 if (!(pcs_status_reg & E1000_PCS_LSTS_AN_COMPLETE)) {
1543 DEBUGOUT("PCS Auto Neg has not completed.\n");
1544 return ret_val;
1545 }
1546
1547 /* The AutoNeg process has completed, so we now need to
1548 * read both the Auto Negotiation Advertisement
1549 * Register (PCS_ANADV) and the Auto_Negotiation Base
1550 * Page Ability Register (PCS_LPAB) to determine how
1551 * flow control was negotiated.
1552 */
1553 pcs_adv_reg = E1000_READ_REG(hw, E1000_PCS_ANADV);
1554 pcs_lp_ability_reg = E1000_READ_REG(hw, E1000_PCS_LPAB);
1555
1556 /* Two bits in the Auto Negotiation Advertisement Register
1557 * (PCS_ANADV) and two bits in the Auto Negotiation Base
1558 * Page Ability Register (PCS_LPAB) determine flow control
1559 * for both the PHY and the link partner. The following
1560 * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
1561 * 1999, describes these PAUSE resolution bits and how flow
1562 * control is determined based upon these settings.
1563 * NOTE: DC = Don't Care
1564 *
1565 * LOCAL DEVICE | LINK PARTNER
1566 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
1567 *-------|---------|-------|---------|--------------------
1568 * 0 | 0 | DC | DC | e1000_fc_none
1569 * 0 | 1 | 0 | DC | e1000_fc_none
1570 * 0 | 1 | 1 | 0 | e1000_fc_none
1571 * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
1572 * 1 | 0 | 0 | DC | e1000_fc_none
1573 * 1 | DC | 1 | DC | e1000_fc_full
1574 * 1 | 1 | 0 | 0 | e1000_fc_none
1575 * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
1576 *
1577 * Are both PAUSE bits set to 1? If so, this implies
1578 * Symmetric Flow Control is enabled at both ends. The
1579 * ASM_DIR bits are irrelevant per the spec.
1580 *
1581 * For Symmetric Flow Control:
1582 *
1583 * LOCAL DEVICE | LINK PARTNER
1584 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
1585 *-------|---------|-------|---------|--------------------
1586 * 1 | DC | 1 | DC | e1000_fc_full
1587 *
1588 */
1589 if ((pcs_adv_reg & E1000_TXCW_PAUSE) &&
1590 (pcs_lp_ability_reg & E1000_TXCW_PAUSE)) {
1591 /* Now we need to check if the user selected Rx ONLY
1592 * of pause frames. In this case, we had to advertise
1593 * FULL flow control because we could not advertise Rx
1594 * ONLY. Hence, we must now check to see if we need to
1595 * turn OFF the TRANSMISSION of PAUSE frames.
1596 */
1597 if (hw->fc.requested_mode == e1000_fc_full) {
1598 hw->fc.current_mode = e1000_fc_full;
1599 DEBUGOUT("Flow Control = FULL.\n");
1600 } else {
1601 hw->fc.current_mode = e1000_fc_rx_pause;
1602 DEBUGOUT("Flow Control = Rx PAUSE frames only.\n");
1603 }
1604 }
1605 /* For receiving PAUSE frames ONLY.
1606 *
1607 * LOCAL DEVICE | LINK PARTNER
1608 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
1609 *-------|---------|-------|---------|--------------------
1610 * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
1611 */
1612 else if (!(pcs_adv_reg & E1000_TXCW_PAUSE) &&
1613 (pcs_adv_reg & E1000_TXCW_ASM_DIR) &&
1614 (pcs_lp_ability_reg & E1000_TXCW_PAUSE) &&
1615 (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) {
1616 hw->fc.current_mode = e1000_fc_tx_pause;
1617 DEBUGOUT("Flow Control = Tx PAUSE frames only.\n");
1618 }
1619 /* For transmitting PAUSE frames ONLY.
1620 *
1621 * LOCAL DEVICE | LINK PARTNER
1622 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
1623 *-------|---------|-------|---------|--------------------
1624 * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
1625 */
1626 else if ((pcs_adv_reg & E1000_TXCW_PAUSE) &&
1627 (pcs_adv_reg & E1000_TXCW_ASM_DIR) &&
1628 !(pcs_lp_ability_reg & E1000_TXCW_PAUSE) &&
1629 (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) {
1630 hw->fc.current_mode = e1000_fc_rx_pause;
1631 DEBUGOUT("Flow Control = Rx PAUSE frames only.\n");
1632 } else {
1633 /* Per the IEEE spec, at this point flow control
1634 * should be disabled.
1635 */
1636 hw->fc.current_mode = e1000_fc_none;
1637 DEBUGOUT("Flow Control = NONE.\n");
1638 }
1639
1640 /* Now we call a subroutine to actually force the MAC
1641 * controller to use the correct flow control settings.
1642 */
1643 pcs_ctrl_reg = E1000_READ_REG(hw, E1000_PCS_LCTL);
1644 pcs_ctrl_reg |= E1000_PCS_LCTL_FORCE_FCTRL;
1645 E1000_WRITE_REG(hw, E1000_PCS_LCTL, pcs_ctrl_reg);
1646
1647 ret_val = e1000_force_mac_fc_generic(hw);
1648 if (ret_val) {
1649 DEBUGOUT("Error forcing flow control settings\n");
1650 return ret_val;
1651 }
1652 }
1653
1654 return E1000_SUCCESS;
1655 }
1656
1657 /**
1658 * e1000_get_speed_and_duplex_copper_generic - Retrieve current speed/duplex
1659 * @hw: pointer to the HW structure
1660 * @speed: stores the current speed
1661 * @duplex: stores the current duplex
1662 *
1663 * Read the status register for the current speed/duplex and store the current
1664 * speed and duplex for copper connections.
1665 **/
e1000_get_speed_and_duplex_copper_generic(struct e1000_hw * hw,u16 * speed,u16 * duplex)1666 s32 e1000_get_speed_and_duplex_copper_generic(struct e1000_hw *hw, u16 *speed,
1667 u16 *duplex)
1668 {
1669 u32 status;
1670
1671 DEBUGFUNC("e1000_get_speed_and_duplex_copper_generic");
1672
1673 status = E1000_READ_REG(hw, E1000_STATUS);
1674 if (status & E1000_STATUS_SPEED_1000) {
1675 *speed = SPEED_1000;
1676 DEBUGOUT("1000 Mbs, ");
1677 } else if (status & E1000_STATUS_SPEED_100) {
1678 *speed = SPEED_100;
1679 DEBUGOUT("100 Mbs, ");
1680 } else {
1681 *speed = SPEED_10;
1682 DEBUGOUT("10 Mbs, ");
1683 }
1684
1685 if (status & E1000_STATUS_FD) {
1686 *duplex = FULL_DUPLEX;
1687 DEBUGOUT("Full Duplex\n");
1688 } else {
1689 *duplex = HALF_DUPLEX;
1690 DEBUGOUT("Half Duplex\n");
1691 }
1692
1693 return E1000_SUCCESS;
1694 }
1695
1696 /**
1697 * e1000_get_speed_and_duplex_fiber_generic - Retrieve current speed/duplex
1698 * @hw: pointer to the HW structure
1699 * @speed: stores the current speed
1700 * @duplex: stores the current duplex
1701 *
1702 * Sets the speed and duplex to gigabit full duplex (the only possible option)
1703 * for fiber/serdes links.
1704 **/
e1000_get_speed_and_duplex_fiber_serdes_generic(struct e1000_hw E1000_UNUSEDARG * hw,u16 * speed,u16 * duplex)1705 s32 e1000_get_speed_and_duplex_fiber_serdes_generic(struct e1000_hw E1000_UNUSEDARG *hw,
1706 u16 *speed, u16 *duplex)
1707 {
1708 DEBUGFUNC("e1000_get_speed_and_duplex_fiber_serdes_generic");
1709
1710 *speed = SPEED_1000;
1711 *duplex = FULL_DUPLEX;
1712
1713 return E1000_SUCCESS;
1714 }
1715
1716 /**
1717 * e1000_get_auto_rd_done_generic - Check for auto read completion
1718 * @hw: pointer to the HW structure
1719 *
1720 * Check EEPROM for Auto Read done bit.
1721 **/
e1000_get_auto_rd_done_generic(struct e1000_hw * hw)1722 s32 e1000_get_auto_rd_done_generic(struct e1000_hw *hw)
1723 {
1724 s32 i = 0;
1725
1726 DEBUGFUNC("e1000_get_auto_rd_done_generic");
1727
1728 while (i < AUTO_READ_DONE_TIMEOUT) {
1729 if (E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_AUTO_RD)
1730 break;
1731 msec_delay(1);
1732 i++;
1733 }
1734
1735 if (i == AUTO_READ_DONE_TIMEOUT) {
1736 DEBUGOUT("Auto read by HW from NVM has not completed.\n");
1737 return -E1000_ERR_RESET;
1738 }
1739
1740 return E1000_SUCCESS;
1741 }
1742
1743 /**
1744 * e1000_valid_led_default_generic - Verify a valid default LED config
1745 * @hw: pointer to the HW structure
1746 * @data: pointer to the NVM (EEPROM)
1747 *
1748 * Read the EEPROM for the current default LED configuration. If the
1749 * LED configuration is not valid, set to a valid LED configuration.
1750 **/
e1000_valid_led_default_generic(struct e1000_hw * hw,u16 * data)1751 s32 e1000_valid_led_default_generic(struct e1000_hw *hw, u16 *data)
1752 {
1753 s32 ret_val;
1754
1755 DEBUGFUNC("e1000_valid_led_default_generic");
1756
1757 ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
1758 if (ret_val) {
1759 DEBUGOUT("NVM Read Error\n");
1760 return ret_val;
1761 }
1762
1763 if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
1764 *data = ID_LED_DEFAULT;
1765
1766 return E1000_SUCCESS;
1767 }
1768
1769 /**
1770 * e1000_id_led_init_generic -
1771 * @hw: pointer to the HW structure
1772 *
1773 **/
e1000_id_led_init_generic(struct e1000_hw * hw)1774 s32 e1000_id_led_init_generic(struct e1000_hw *hw)
1775 {
1776 struct e1000_mac_info *mac = &hw->mac;
1777 s32 ret_val;
1778 const u32 ledctl_mask = 0x000000FF;
1779 const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON;
1780 const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF;
1781 u16 data, i, temp;
1782 const u16 led_mask = 0x0F;
1783
1784 DEBUGFUNC("e1000_id_led_init_generic");
1785
1786 ret_val = hw->nvm.ops.valid_led_default(hw, &data);
1787 if (ret_val)
1788 return ret_val;
1789
1790 mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL);
1791 mac->ledctl_mode1 = mac->ledctl_default;
1792 mac->ledctl_mode2 = mac->ledctl_default;
1793
1794 for (i = 0; i < 4; i++) {
1795 temp = (data >> (i << 2)) & led_mask;
1796 switch (temp) {
1797 case ID_LED_ON1_DEF2:
1798 case ID_LED_ON1_ON2:
1799 case ID_LED_ON1_OFF2:
1800 mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
1801 mac->ledctl_mode1 |= ledctl_on << (i << 3);
1802 break;
1803 case ID_LED_OFF1_DEF2:
1804 case ID_LED_OFF1_ON2:
1805 case ID_LED_OFF1_OFF2:
1806 mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
1807 mac->ledctl_mode1 |= ledctl_off << (i << 3);
1808 break;
1809 default:
1810 /* Do nothing */
1811 break;
1812 }
1813 switch (temp) {
1814 case ID_LED_DEF1_ON2:
1815 case ID_LED_ON1_ON2:
1816 case ID_LED_OFF1_ON2:
1817 mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
1818 mac->ledctl_mode2 |= ledctl_on << (i << 3);
1819 break;
1820 case ID_LED_DEF1_OFF2:
1821 case ID_LED_ON1_OFF2:
1822 case ID_LED_OFF1_OFF2:
1823 mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
1824 mac->ledctl_mode2 |= ledctl_off << (i << 3);
1825 break;
1826 default:
1827 /* Do nothing */
1828 break;
1829 }
1830 }
1831
1832 return E1000_SUCCESS;
1833 }
1834
1835 /**
1836 * e1000_setup_led_generic - Configures SW controllable LED
1837 * @hw: pointer to the HW structure
1838 *
1839 * This prepares the SW controllable LED for use and saves the current state
1840 * of the LED so it can be later restored.
1841 **/
e1000_setup_led_generic(struct e1000_hw * hw)1842 s32 e1000_setup_led_generic(struct e1000_hw *hw)
1843 {
1844 u32 ledctl;
1845
1846 DEBUGFUNC("e1000_setup_led_generic");
1847
1848 if (hw->mac.ops.setup_led != e1000_setup_led_generic)
1849 return -E1000_ERR_CONFIG;
1850
1851 if (hw->phy.media_type == e1000_media_type_fiber) {
1852 ledctl = E1000_READ_REG(hw, E1000_LEDCTL);
1853 hw->mac.ledctl_default = ledctl;
1854 /* Turn off LED0 */
1855 ledctl &= ~(E1000_LEDCTL_LED0_IVRT | E1000_LEDCTL_LED0_BLINK |
1856 E1000_LEDCTL_LED0_MODE_MASK);
1857 ledctl |= (E1000_LEDCTL_MODE_LED_OFF <<
1858 E1000_LEDCTL_LED0_MODE_SHIFT);
1859 E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl);
1860 } else if (hw->phy.media_type == e1000_media_type_copper) {
1861 E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
1862 }
1863
1864 return E1000_SUCCESS;
1865 }
1866
1867 /**
1868 * e1000_cleanup_led_generic - Set LED config to default operation
1869 * @hw: pointer to the HW structure
1870 *
1871 * Remove the current LED configuration and set the LED configuration
1872 * to the default value, saved from the EEPROM.
1873 **/
e1000_cleanup_led_generic(struct e1000_hw * hw)1874 s32 e1000_cleanup_led_generic(struct e1000_hw *hw)
1875 {
1876 DEBUGFUNC("e1000_cleanup_led_generic");
1877
1878 E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
1879 return E1000_SUCCESS;
1880 }
1881
1882 /**
1883 * e1000_blink_led_generic - Blink LED
1884 * @hw: pointer to the HW structure
1885 *
1886 * Blink the LEDs which are set to be on.
1887 **/
e1000_blink_led_generic(struct e1000_hw * hw)1888 s32 e1000_blink_led_generic(struct e1000_hw *hw)
1889 {
1890 u32 ledctl_blink = 0;
1891 u32 i;
1892
1893 DEBUGFUNC("e1000_blink_led_generic");
1894
1895 if (hw->phy.media_type == e1000_media_type_fiber) {
1896 /* always blink LED0 for PCI-E fiber */
1897 ledctl_blink = E1000_LEDCTL_LED0_BLINK |
1898 (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT);
1899 } else {
1900 /* Set the blink bit for each LED that's "on" (0x0E)
1901 * (or "off" if inverted) in ledctl_mode2. The blink
1902 * logic in hardware only works when mode is set to "on"
1903 * so it must be changed accordingly when the mode is
1904 * "off" and inverted.
1905 */
1906 ledctl_blink = hw->mac.ledctl_mode2;
1907 for (i = 0; i < 32; i += 8) {
1908 u32 mode = (hw->mac.ledctl_mode2 >> i) &
1909 E1000_LEDCTL_LED0_MODE_MASK;
1910 u32 led_default = hw->mac.ledctl_default >> i;
1911
1912 if ((!(led_default & E1000_LEDCTL_LED0_IVRT) &&
1913 (mode == E1000_LEDCTL_MODE_LED_ON)) ||
1914 ((led_default & E1000_LEDCTL_LED0_IVRT) &&
1915 (mode == E1000_LEDCTL_MODE_LED_OFF))) {
1916 ledctl_blink &=
1917 ~(E1000_LEDCTL_LED0_MODE_MASK << i);
1918 ledctl_blink |= (E1000_LEDCTL_LED0_BLINK |
1919 E1000_LEDCTL_MODE_LED_ON) << i;
1920 }
1921 }
1922 }
1923
1924 E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl_blink);
1925
1926 return E1000_SUCCESS;
1927 }
1928
1929 /**
1930 * e1000_led_on_generic - Turn LED on
1931 * @hw: pointer to the HW structure
1932 *
1933 * Turn LED on.
1934 **/
e1000_led_on_generic(struct e1000_hw * hw)1935 s32 e1000_led_on_generic(struct e1000_hw *hw)
1936 {
1937 u32 ctrl;
1938
1939 DEBUGFUNC("e1000_led_on_generic");
1940
1941 switch (hw->phy.media_type) {
1942 case e1000_media_type_fiber:
1943 ctrl = E1000_READ_REG(hw, E1000_CTRL);
1944 ctrl &= ~E1000_CTRL_SWDPIN0;
1945 ctrl |= E1000_CTRL_SWDPIO0;
1946 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
1947 break;
1948 case e1000_media_type_copper:
1949 E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2);
1950 break;
1951 default:
1952 break;
1953 }
1954
1955 return E1000_SUCCESS;
1956 }
1957
1958 /**
1959 * e1000_led_off_generic - Turn LED off
1960 * @hw: pointer to the HW structure
1961 *
1962 * Turn LED off.
1963 **/
e1000_led_off_generic(struct e1000_hw * hw)1964 s32 e1000_led_off_generic(struct e1000_hw *hw)
1965 {
1966 u32 ctrl;
1967
1968 DEBUGFUNC("e1000_led_off_generic");
1969
1970 switch (hw->phy.media_type) {
1971 case e1000_media_type_fiber:
1972 ctrl = E1000_READ_REG(hw, E1000_CTRL);
1973 ctrl |= E1000_CTRL_SWDPIN0;
1974 ctrl |= E1000_CTRL_SWDPIO0;
1975 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
1976 break;
1977 case e1000_media_type_copper:
1978 E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
1979 break;
1980 default:
1981 break;
1982 }
1983
1984 return E1000_SUCCESS;
1985 }
1986
1987 /**
1988 * e1000_set_pcie_no_snoop_generic - Set PCI-express capabilities
1989 * @hw: pointer to the HW structure
1990 * @no_snoop: bitmap of snoop events
1991 *
1992 * Set the PCI-express register to snoop for events enabled in 'no_snoop'.
1993 **/
e1000_set_pcie_no_snoop_generic(struct e1000_hw * hw,u32 no_snoop)1994 void e1000_set_pcie_no_snoop_generic(struct e1000_hw *hw, u32 no_snoop)
1995 {
1996 u32 gcr;
1997
1998 DEBUGFUNC("e1000_set_pcie_no_snoop_generic");
1999
2000 if (hw->bus.type != e1000_bus_type_pci_express)
2001 return;
2002
2003 if (no_snoop) {
2004 gcr = E1000_READ_REG(hw, E1000_GCR);
2005 gcr &= ~(PCIE_NO_SNOOP_ALL);
2006 gcr |= no_snoop;
2007 E1000_WRITE_REG(hw, E1000_GCR, gcr);
2008 }
2009 }
2010
2011 /**
2012 * e1000_disable_pcie_master_generic - Disables PCI-express master access
2013 * @hw: pointer to the HW structure
2014 *
2015 * Returns E1000_SUCCESS if successful, else returns -10
2016 * (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused
2017 * the master requests to be disabled.
2018 *
2019 * Disables PCI-Express master access and verifies there are no pending
2020 * requests.
2021 **/
e1000_disable_pcie_master_generic(struct e1000_hw * hw)2022 s32 e1000_disable_pcie_master_generic(struct e1000_hw *hw)
2023 {
2024 u32 ctrl;
2025 s32 timeout = MASTER_DISABLE_TIMEOUT;
2026
2027 DEBUGFUNC("e1000_disable_pcie_master_generic");
2028
2029 if (hw->bus.type != e1000_bus_type_pci_express)
2030 return E1000_SUCCESS;
2031
2032 ctrl = E1000_READ_REG(hw, E1000_CTRL);
2033 ctrl |= E1000_CTRL_GIO_MASTER_DISABLE;
2034 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
2035
2036 while (timeout) {
2037 if (!(E1000_READ_REG(hw, E1000_STATUS) &
2038 E1000_STATUS_GIO_MASTER_ENABLE) ||
2039 E1000_REMOVED(hw->hw_addr))
2040 break;
2041 usec_delay(100);
2042 timeout--;
2043 }
2044
2045 if (!timeout) {
2046 DEBUGOUT("Master requests are pending.\n");
2047 return -E1000_ERR_MASTER_REQUESTS_PENDING;
2048 }
2049
2050 return E1000_SUCCESS;
2051 }
2052
2053 /**
2054 * e1000_reset_adaptive_generic - Reset Adaptive Interframe Spacing
2055 * @hw: pointer to the HW structure
2056 *
2057 * Reset the Adaptive Interframe Spacing throttle to default values.
2058 **/
e1000_reset_adaptive_generic(struct e1000_hw * hw)2059 void e1000_reset_adaptive_generic(struct e1000_hw *hw)
2060 {
2061 struct e1000_mac_info *mac = &hw->mac;
2062
2063 DEBUGFUNC("e1000_reset_adaptive_generic");
2064
2065 if (!mac->adaptive_ifs) {
2066 DEBUGOUT("Not in Adaptive IFS mode!\n");
2067 return;
2068 }
2069
2070 mac->current_ifs_val = 0;
2071 mac->ifs_min_val = IFS_MIN;
2072 mac->ifs_max_val = IFS_MAX;
2073 mac->ifs_step_size = IFS_STEP;
2074 mac->ifs_ratio = IFS_RATIO;
2075
2076 mac->in_ifs_mode = false;
2077 E1000_WRITE_REG(hw, E1000_AIT, 0);
2078 }
2079
2080 /**
2081 * e1000_update_adaptive_generic - Update Adaptive Interframe Spacing
2082 * @hw: pointer to the HW structure
2083 *
2084 * Update the Adaptive Interframe Spacing Throttle value based on the
2085 * time between transmitted packets and time between collisions.
2086 **/
e1000_update_adaptive_generic(struct e1000_hw * hw)2087 void e1000_update_adaptive_generic(struct e1000_hw *hw)
2088 {
2089 struct e1000_mac_info *mac = &hw->mac;
2090
2091 DEBUGFUNC("e1000_update_adaptive_generic");
2092
2093 if (!mac->adaptive_ifs) {
2094 DEBUGOUT("Not in Adaptive IFS mode!\n");
2095 return;
2096 }
2097
2098 if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) {
2099 if (mac->tx_packet_delta > MIN_NUM_XMITS) {
2100 mac->in_ifs_mode = true;
2101 if (mac->current_ifs_val < mac->ifs_max_val) {
2102 if (!mac->current_ifs_val)
2103 mac->current_ifs_val = mac->ifs_min_val;
2104 else
2105 mac->current_ifs_val +=
2106 mac->ifs_step_size;
2107 E1000_WRITE_REG(hw, E1000_AIT,
2108 mac->current_ifs_val);
2109 }
2110 }
2111 } else {
2112 if (mac->in_ifs_mode &&
2113 (mac->tx_packet_delta <= MIN_NUM_XMITS)) {
2114 mac->current_ifs_val = 0;
2115 mac->in_ifs_mode = false;
2116 E1000_WRITE_REG(hw, E1000_AIT, 0);
2117 }
2118 }
2119 }
2120
2121 /**
2122 * e1000_validate_mdi_setting_generic - Verify MDI/MDIx settings
2123 * @hw: pointer to the HW structure
2124 *
2125 * Verify that when not using auto-negotiation that MDI/MDIx is correctly
2126 * set, which is forced to MDI mode only.
2127 **/
e1000_validate_mdi_setting_generic(struct e1000_hw * hw)2128 static s32 e1000_validate_mdi_setting_generic(struct e1000_hw *hw)
2129 {
2130 DEBUGFUNC("e1000_validate_mdi_setting_generic");
2131
2132 if (!hw->mac.autoneg && (hw->phy.mdix == 0 || hw->phy.mdix == 3)) {
2133 DEBUGOUT("Invalid MDI setting detected\n");
2134 hw->phy.mdix = 1;
2135 return -E1000_ERR_CONFIG;
2136 }
2137
2138 return E1000_SUCCESS;
2139 }
2140
2141 /**
2142 * e1000_validate_mdi_setting_crossover_generic - Verify MDI/MDIx settings
2143 * @hw: pointer to the HW structure
2144 *
2145 * Validate the MDI/MDIx setting, allowing for auto-crossover during forced
2146 * operation.
2147 **/
e1000_validate_mdi_setting_crossover_generic(struct e1000_hw E1000_UNUSEDARG * hw)2148 s32 e1000_validate_mdi_setting_crossover_generic(struct e1000_hw E1000_UNUSEDARG *hw)
2149 {
2150 DEBUGFUNC("e1000_validate_mdi_setting_crossover_generic");
2151
2152 return E1000_SUCCESS;
2153 }
2154
2155 /**
2156 * e1000_write_8bit_ctrl_reg_generic - Write a 8bit CTRL register
2157 * @hw: pointer to the HW structure
2158 * @reg: 32bit register offset such as E1000_SCTL
2159 * @offset: register offset to write to
2160 * @data: data to write at register offset
2161 *
2162 * Writes an address/data control type register. There are several of these
2163 * and they all have the format address << 8 | data and bit 31 is polled for
2164 * completion.
2165 **/
e1000_write_8bit_ctrl_reg_generic(struct e1000_hw * hw,u32 reg,u32 offset,u8 data)2166 s32 e1000_write_8bit_ctrl_reg_generic(struct e1000_hw *hw, u32 reg,
2167 u32 offset, u8 data)
2168 {
2169 u32 i, regvalue = 0;
2170
2171 DEBUGFUNC("e1000_write_8bit_ctrl_reg_generic");
2172
2173 /* Set up the address and data */
2174 regvalue = ((u32)data) | (offset << E1000_GEN_CTL_ADDRESS_SHIFT);
2175 E1000_WRITE_REG(hw, reg, regvalue);
2176
2177 /* Poll the ready bit to see if the MDI read completed */
2178 for (i = 0; i < E1000_GEN_POLL_TIMEOUT; i++) {
2179 usec_delay(5);
2180 regvalue = E1000_READ_REG(hw, reg);
2181 if (regvalue & E1000_GEN_CTL_READY)
2182 break;
2183 }
2184 if (!(regvalue & E1000_GEN_CTL_READY)) {
2185 DEBUGOUT1("Reg %08x did not indicate ready\n", reg);
2186 return -E1000_ERR_PHY;
2187 }
2188
2189 return E1000_SUCCESS;
2190 }
2191
2192 /**
2193 * e1000_get_hw_semaphore - Acquire hardware semaphore
2194 * @hw: pointer to the HW structure
2195 *
2196 * Acquire the HW semaphore to access the PHY or NVM
2197 **/
e1000_get_hw_semaphore(struct e1000_hw * hw)2198 s32 e1000_get_hw_semaphore(struct e1000_hw *hw)
2199 {
2200 u32 swsm;
2201 s32 fw_timeout = hw->nvm.word_size + 1;
2202 s32 sw_timeout = hw->nvm.word_size + 1;
2203 s32 i = 0;
2204
2205 DEBUGFUNC("e1000_get_hw_semaphore");
2206
2207 /* _82571 */
2208 /* If we have timedout 3 times on trying to acquire
2209 * the inter-port SMBI semaphore, there is old code
2210 * operating on the other port, and it is not
2211 * releasing SMBI. Modify the number of times that
2212 * we try for the semaphore to interwork with this
2213 * older code.
2214 */
2215 if (hw->dev_spec._82571.smb_counter > 2)
2216 sw_timeout = 1;
2217
2218
2219 /* Get the SW semaphore */
2220 while (i < sw_timeout) {
2221 swsm = E1000_READ_REG(hw, E1000_SWSM);
2222 if (!(swsm & E1000_SWSM_SMBI))
2223 break;
2224
2225 usec_delay(50);
2226 i++;
2227 }
2228
2229 if (i == sw_timeout) {
2230 DEBUGOUT("Driver can't access device - SMBI bit is set.\n");
2231 hw->dev_spec._82571.smb_counter++;
2232 }
2233
2234 /* In rare circumstances, the SW semaphore may already be held
2235 * unintentionally. Clear the semaphore once before giving up.
2236 */
2237 if (hw->dev_spec._82575.clear_semaphore_once) {
2238 hw->dev_spec._82575.clear_semaphore_once = false;
2239 e1000_put_hw_semaphore(hw);
2240 for (i = 0; i < fw_timeout; i++) {
2241 swsm = E1000_READ_REG(hw, E1000_SWSM);
2242 if (!(swsm & E1000_SWSM_SMBI))
2243 break;
2244
2245 usec_delay(50);
2246 }
2247 }
2248
2249 /* Get the FW semaphore. */
2250 for (i = 0; i < fw_timeout; i++) {
2251 swsm = E1000_READ_REG(hw, E1000_SWSM);
2252 E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
2253
2254 /* Semaphore acquired if bit latched */
2255 if (E1000_READ_REG(hw, E1000_SWSM) & E1000_SWSM_SWESMBI)
2256 break;
2257
2258 usec_delay(50);
2259 }
2260
2261 if (i == fw_timeout) {
2262 /* Release semaphores */
2263 e1000_put_hw_semaphore(hw);
2264 DEBUGOUT("Driver can't access the NVM\n");
2265 return -E1000_ERR_NVM;
2266 }
2267
2268 return E1000_SUCCESS;
2269 }
2270
2271 /**
2272 * e1000_put_hw_semaphore - Release hardware semaphore
2273 * @hw: pointer to the HW structure
2274 *
2275 * Release hardware semaphore used to access the PHY or NVM
2276 **/
e1000_put_hw_semaphore(struct e1000_hw * hw)2277 void e1000_put_hw_semaphore(struct e1000_hw *hw)
2278 {
2279 u32 swsm;
2280
2281 DEBUGFUNC("e1000_put_hw_semaphore");
2282
2283 swsm = E1000_READ_REG(hw, E1000_SWSM);
2284
2285 swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
2286
2287 E1000_WRITE_REG(hw, E1000_SWSM, swsm);
2288 }
2289
2290
2291 /**
2292 * e1000_acquire_swfw_sync - Acquire SW/FW semaphore
2293 * @hw: pointer to the HW structure
2294 * @mask: specifies which semaphore to acquire
2295 *
2296 * Acquire the SW/FW semaphore to access the PHY or NVM. The mask
2297 * will also specify which port we're acquiring the lock for.
2298 **/
2299 s32
e1000_acquire_swfw_sync(struct e1000_hw * hw,u16 mask)2300 e1000_acquire_swfw_sync(struct e1000_hw *hw, u16 mask)
2301 {
2302 u32 swfw_sync;
2303 u32 swmask = mask;
2304 u32 fwmask = mask << 16;
2305 s32 ret_val = E1000_SUCCESS;
2306 s32 i = 0, timeout = 200;
2307
2308 DEBUGFUNC("e1000_acquire_swfw_sync");
2309 ASSERT_NO_LOCKS();
2310 while (i < timeout) {
2311 if (e1000_get_hw_semaphore(hw)) {
2312 ret_val = -E1000_ERR_SWFW_SYNC;
2313 goto out;
2314 }
2315
2316 swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
2317 if (!(swfw_sync & (fwmask | swmask)))
2318 break;
2319
2320 /*
2321 * Firmware currently using resource (fwmask)
2322 * or other software thread using resource (swmask)
2323 */
2324 e1000_put_hw_semaphore(hw);
2325 msec_delay_irq(5);
2326 i++;
2327 }
2328
2329 if (i == timeout) {
2330 DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n");
2331 ret_val = -E1000_ERR_SWFW_SYNC;
2332 goto out;
2333 }
2334
2335 swfw_sync |= swmask;
2336 E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
2337
2338 e1000_put_hw_semaphore(hw);
2339
2340 out:
2341 return ret_val;
2342 }
2343
2344 /**
2345 * e1000_release_swfw_sync - Release SW/FW semaphore
2346 * @hw: pointer to the HW structure
2347 * @mask: specifies which semaphore to acquire
2348 *
2349 * Release the SW/FW semaphore used to access the PHY or NVM. The mask
2350 * will also specify which port we're releasing the lock for.
2351 **/
2352 void
e1000_release_swfw_sync(struct e1000_hw * hw,u16 mask)2353 e1000_release_swfw_sync(struct e1000_hw *hw, u16 mask)
2354 {
2355 u32 swfw_sync;
2356
2357 DEBUGFUNC("e1000_release_swfw_sync");
2358
2359 while (e1000_get_hw_semaphore(hw) != E1000_SUCCESS)
2360 ; /* Empty */
2361
2362 swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
2363 swfw_sync &= (u32)~mask;
2364 E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
2365
2366 e1000_put_hw_semaphore(hw);
2367 }
2368
2369