1 /*-
2 * Copyright 2021 Intel Corp
3 * Copyright 2021 Rubicon Communications, LLC (Netgate)
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #include <sys/cdefs.h>
8 #include "igc_api.h"
9
10 static s32 igc_init_nvm_params_i225(struct igc_hw *hw);
11 static s32 igc_init_mac_params_i225(struct igc_hw *hw);
12 static s32 igc_init_phy_params_i225(struct igc_hw *hw);
13 static s32 igc_reset_hw_i225(struct igc_hw *hw);
14 static s32 igc_acquire_nvm_i225(struct igc_hw *hw);
15 static void igc_release_nvm_i225(struct igc_hw *hw);
16 static s32 igc_get_hw_semaphore_i225(struct igc_hw *hw);
17 static s32 __igc_write_nvm_srwr(struct igc_hw *hw, u16 offset, u16 words,
18 u16 *data);
19 static s32 igc_pool_flash_update_done_i225(struct igc_hw *hw);
20
21 /**
22 * igc_init_nvm_params_i225 - Init NVM func ptrs.
23 * @hw: pointer to the HW structure
24 **/
igc_init_nvm_params_i225(struct igc_hw * hw)25 static s32 igc_init_nvm_params_i225(struct igc_hw *hw)
26 {
27 struct igc_nvm_info *nvm = &hw->nvm;
28 u32 eecd = IGC_READ_REG(hw, IGC_EECD);
29 u16 size;
30
31 DEBUGFUNC("igc_init_nvm_params_i225");
32
33 size = (u16)((eecd & IGC_EECD_SIZE_EX_MASK) >>
34 IGC_EECD_SIZE_EX_SHIFT);
35 /*
36 * Added to a constant, "size" becomes the left-shift value
37 * for setting word_size.
38 */
39 size += NVM_WORD_SIZE_BASE_SHIFT;
40
41 /* Just in case size is out of range, cap it to the largest
42 * EEPROM size supported
43 */
44 if (size > 15)
45 size = 15;
46
47 nvm->word_size = 1 << size;
48 nvm->opcode_bits = 8;
49 nvm->delay_usec = 1;
50 nvm->type = igc_nvm_eeprom_spi;
51
52
53 nvm->page_size = eecd & IGC_EECD_ADDR_BITS ? 32 : 8;
54 nvm->address_bits = eecd & IGC_EECD_ADDR_BITS ?
55 16 : 8;
56
57 if (nvm->word_size == (1 << 15))
58 nvm->page_size = 128;
59
60 nvm->ops.acquire = igc_acquire_nvm_i225;
61 nvm->ops.release = igc_release_nvm_i225;
62 if (igc_get_flash_presence_i225(hw)) {
63 hw->nvm.type = igc_nvm_flash_hw;
64 nvm->ops.read = igc_read_nvm_srrd_i225;
65 nvm->ops.write = igc_write_nvm_srwr_i225;
66 nvm->ops.validate = igc_validate_nvm_checksum_i225;
67 nvm->ops.update = igc_update_nvm_checksum_i225;
68 } else {
69 hw->nvm.type = igc_nvm_invm;
70 nvm->ops.write = igc_null_write_nvm;
71 nvm->ops.validate = igc_null_ops_generic;
72 nvm->ops.update = igc_null_ops_generic;
73 }
74
75 return IGC_SUCCESS;
76 }
77
78 /**
79 * igc_init_mac_params_i225 - Init MAC func ptrs.
80 * @hw: pointer to the HW structure
81 **/
igc_init_mac_params_i225(struct igc_hw * hw)82 static s32 igc_init_mac_params_i225(struct igc_hw *hw)
83 {
84 struct igc_mac_info *mac = &hw->mac;
85 struct igc_dev_spec_i225 *dev_spec = &hw->dev_spec._i225;
86
87 DEBUGFUNC("igc_init_mac_params_i225");
88
89 /* Initialize function pointer */
90 igc_init_mac_ops_generic(hw);
91
92 /* Set media type */
93 hw->phy.media_type = igc_media_type_copper;
94 /* Set mta register count */
95 mac->mta_reg_count = 128;
96 /* Set rar entry count */
97 mac->rar_entry_count = IGC_RAR_ENTRIES_BASE;
98
99 /* reset */
100 mac->ops.reset_hw = igc_reset_hw_i225;
101 /* hw initialization */
102 mac->ops.init_hw = igc_init_hw_i225;
103 /* link setup */
104 mac->ops.setup_link = igc_setup_link_generic;
105 /* check for link */
106 mac->ops.check_for_link = igc_check_for_link_i225;
107 /* link info */
108 mac->ops.get_link_up_info = igc_get_speed_and_duplex_copper_generic;
109 /* acquire SW_FW sync */
110 mac->ops.acquire_swfw_sync = igc_acquire_swfw_sync_i225;
111 /* release SW_FW sync */
112 mac->ops.release_swfw_sync = igc_release_swfw_sync_i225;
113
114 /* Allow a single clear of the SW semaphore on I225 */
115 dev_spec->clear_semaphore_once = true;
116 mac->ops.setup_physical_interface = igc_setup_copper_link_i225;
117
118 /* Set if part includes ASF firmware */
119 mac->asf_firmware_present = true;
120
121 /* multicast address update */
122 mac->ops.update_mc_addr_list = igc_update_mc_addr_list_generic;
123
124 mac->ops.write_vfta = igc_write_vfta_generic;
125
126 return IGC_SUCCESS;
127 }
128
129 /**
130 * igc_init_phy_params_i225 - Init PHY func ptrs.
131 * @hw: pointer to the HW structure
132 **/
igc_init_phy_params_i225(struct igc_hw * hw)133 static s32 igc_init_phy_params_i225(struct igc_hw *hw)
134 {
135 struct igc_phy_info *phy = &hw->phy;
136 s32 ret_val = IGC_SUCCESS;
137
138 DEBUGFUNC("igc_init_phy_params_i225");
139
140
141 if (hw->phy.media_type != igc_media_type_copper) {
142 phy->type = igc_phy_none;
143 goto out;
144 }
145
146 phy->ops.power_up = igc_power_up_phy_copper;
147 phy->ops.power_down = igc_power_down_phy_copper_base;
148
149 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT_2500;
150
151 phy->reset_delay_us = 100;
152
153 phy->ops.acquire = igc_acquire_phy_base;
154 phy->ops.check_reset_block = igc_check_reset_block_generic;
155 phy->ops.release = igc_release_phy_base;
156 phy->ops.reset = igc_phy_hw_reset_generic;
157 phy->ops.read_reg = igc_read_phy_reg_gpy;
158 phy->ops.write_reg = igc_write_phy_reg_gpy;
159
160 /* Make sure the PHY is in a good state. Several people have reported
161 * firmware leaving the PHY's page select register set to something
162 * other than the default of zero, which causes the PHY ID read to
163 * access something other than the intended register.
164 */
165 ret_val = hw->phy.ops.reset(hw);
166 if (ret_val)
167 goto out;
168
169 ret_val = igc_get_phy_id(hw);
170 phy->type = igc_phy_i225;
171
172 out:
173 return ret_val;
174 }
175
176 /**
177 * igc_reset_hw_i225 - Reset hardware
178 * @hw: pointer to the HW structure
179 *
180 * This resets the hardware into a known state.
181 **/
igc_reset_hw_i225(struct igc_hw * hw)182 static s32 igc_reset_hw_i225(struct igc_hw *hw)
183 {
184 u32 ctrl;
185 s32 ret_val;
186
187 DEBUGFUNC("igc_reset_hw_i225");
188
189 /*
190 * Prevent the PCI-E bus from sticking if there is no TLP connection
191 * on the last TLP read/write transaction when MAC is reset.
192 */
193 ret_val = igc_disable_pcie_master_generic(hw);
194 if (ret_val)
195 DEBUGOUT("PCI-E Master disable polling has failed.\n");
196
197 DEBUGOUT("Masking off all interrupts\n");
198 IGC_WRITE_REG(hw, IGC_IMC, 0xffffffff);
199
200 IGC_WRITE_REG(hw, IGC_RCTL, 0);
201 IGC_WRITE_REG(hw, IGC_TCTL, IGC_TCTL_PSP);
202 IGC_WRITE_FLUSH(hw);
203
204 msec_delay(10);
205
206 ctrl = IGC_READ_REG(hw, IGC_CTRL);
207
208 DEBUGOUT("Issuing a global reset to MAC\n");
209 IGC_WRITE_REG(hw, IGC_CTRL, ctrl | IGC_CTRL_DEV_RST);
210
211 ret_val = igc_get_auto_rd_done_generic(hw);
212 if (ret_val) {
213 /*
214 * When auto config read does not complete, do not
215 * return with an error. This can happen in situations
216 * where there is no eeprom and prevents getting link.
217 */
218 DEBUGOUT("Auto Read Done did not complete\n");
219 }
220
221 /* Clear any pending interrupt events. */
222 IGC_WRITE_REG(hw, IGC_IMC, 0xffffffff);
223 IGC_READ_REG(hw, IGC_ICR);
224
225 /* Install any alternate MAC address into RAR0 */
226 ret_val = igc_check_alt_mac_addr_generic(hw);
227
228 return ret_val;
229 }
230
231 /* igc_acquire_nvm_i225 - Request for access to EEPROM
232 * @hw: pointer to the HW structure
233 *
234 * Acquire the necessary semaphores for exclusive access to the EEPROM.
235 * Set the EEPROM access request bit and wait for EEPROM access grant bit.
236 * Return successful if access grant bit set, else clear the request for
237 * EEPROM access and return -IGC_ERR_NVM (-1).
238 */
igc_acquire_nvm_i225(struct igc_hw * hw)239 static s32 igc_acquire_nvm_i225(struct igc_hw *hw)
240 {
241 s32 ret_val;
242
243 DEBUGFUNC("igc_acquire_nvm_i225");
244
245 ret_val = igc_acquire_swfw_sync_i225(hw, IGC_SWFW_EEP_SM);
246
247 return ret_val;
248 }
249
250 /* igc_release_nvm_i225 - Release exclusive access to EEPROM
251 * @hw: pointer to the HW structure
252 *
253 * Stop any current commands to the EEPROM and clear the EEPROM request bit,
254 * then release the semaphores acquired.
255 */
igc_release_nvm_i225(struct igc_hw * hw)256 static void igc_release_nvm_i225(struct igc_hw *hw)
257 {
258 DEBUGFUNC("igc_release_nvm_i225");
259
260 igc_release_swfw_sync_i225(hw, IGC_SWFW_EEP_SM);
261 }
262
263 /* igc_acquire_swfw_sync_i225 - Acquire SW/FW semaphore
264 * @hw: pointer to the HW structure
265 * @mask: specifies which semaphore to acquire
266 *
267 * Acquire the SW/FW semaphore to access the PHY or NVM. The mask
268 * will also specify which port we're acquiring the lock for.
269 */
igc_acquire_swfw_sync_i225(struct igc_hw * hw,u16 mask)270 s32 igc_acquire_swfw_sync_i225(struct igc_hw *hw, u16 mask)
271 {
272 u32 swfw_sync;
273 u32 swmask = mask;
274 u32 fwmask = mask << 16;
275 s32 ret_val = IGC_SUCCESS;
276 s32 i = 0, timeout = 200; /* FIXME: find real value to use here */
277
278 DEBUGFUNC("igc_acquire_swfw_sync_i225");
279
280 while (i < timeout) {
281 if (igc_get_hw_semaphore_i225(hw)) {
282 ret_val = -IGC_ERR_SWFW_SYNC;
283 goto out;
284 }
285
286 swfw_sync = IGC_READ_REG(hw, IGC_SW_FW_SYNC);
287 if (!(swfw_sync & (fwmask | swmask)))
288 break;
289
290 /* Firmware currently using resource (fwmask)
291 * or other software thread using resource (swmask)
292 */
293 igc_put_hw_semaphore_generic(hw);
294 msec_delay_irq(5);
295 i++;
296 }
297
298 if (i == timeout) {
299 DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n");
300 ret_val = -IGC_ERR_SWFW_SYNC;
301 goto out;
302 }
303
304 swfw_sync |= swmask;
305 IGC_WRITE_REG(hw, IGC_SW_FW_SYNC, swfw_sync);
306
307 igc_put_hw_semaphore_generic(hw);
308
309 out:
310 return ret_val;
311 }
312
313 /* igc_release_swfw_sync_i225 - Release SW/FW semaphore
314 * @hw: pointer to the HW structure
315 * @mask: specifies which semaphore to acquire
316 *
317 * Release the SW/FW semaphore used to access the PHY or NVM. The mask
318 * will also specify which port we're releasing the lock for.
319 */
igc_release_swfw_sync_i225(struct igc_hw * hw,u16 mask)320 void igc_release_swfw_sync_i225(struct igc_hw *hw, u16 mask)
321 {
322 u32 swfw_sync;
323
324 DEBUGFUNC("igc_release_swfw_sync_i225");
325
326 while (igc_get_hw_semaphore_i225(hw) != IGC_SUCCESS)
327 ; /* Empty */
328
329 swfw_sync = IGC_READ_REG(hw, IGC_SW_FW_SYNC);
330 swfw_sync &= ~mask;
331 IGC_WRITE_REG(hw, IGC_SW_FW_SYNC, swfw_sync);
332
333 igc_put_hw_semaphore_generic(hw);
334 }
335
336 /*
337 * igc_setup_copper_link_i225 - Configure copper link settings
338 * @hw: pointer to the HW structure
339 *
340 * Configures the link for auto-neg or forced speed and duplex. Then we check
341 * for link, once link is established calls to configure collision distance
342 * and flow control are called.
343 */
igc_setup_copper_link_i225(struct igc_hw * hw)344 s32 igc_setup_copper_link_i225(struct igc_hw *hw)
345 {
346 u32 phpm_reg;
347 s32 ret_val;
348 u32 ctrl;
349
350 DEBUGFUNC("igc_setup_copper_link_i225");
351
352 ctrl = IGC_READ_REG(hw, IGC_CTRL);
353 ctrl |= IGC_CTRL_SLU;
354 ctrl &= ~(IGC_CTRL_FRCSPD | IGC_CTRL_FRCDPX);
355 IGC_WRITE_REG(hw, IGC_CTRL, ctrl);
356
357 phpm_reg = IGC_READ_REG(hw, IGC_I225_PHPM);
358 phpm_reg &= ~IGC_I225_PHPM_GO_LINKD;
359 IGC_WRITE_REG(hw, IGC_I225_PHPM, phpm_reg);
360
361 ret_val = igc_setup_copper_link_generic(hw);
362
363 return ret_val;
364 }
365
366 /* igc_get_hw_semaphore_i225 - Acquire hardware semaphore
367 * @hw: pointer to the HW structure
368 *
369 * Acquire the HW semaphore to access the PHY or NVM
370 */
igc_get_hw_semaphore_i225(struct igc_hw * hw)371 static s32 igc_get_hw_semaphore_i225(struct igc_hw *hw)
372 {
373 u32 swsm;
374 s32 timeout = hw->nvm.word_size + 1;
375 s32 i = 0;
376
377 DEBUGFUNC("igc_get_hw_semaphore_i225");
378
379 /* Get the SW semaphore */
380 while (i < timeout) {
381 swsm = IGC_READ_REG(hw, IGC_SWSM);
382 if (!(swsm & IGC_SWSM_SMBI))
383 break;
384
385 usec_delay(50);
386 i++;
387 }
388
389 if (i == timeout) {
390 /* In rare circumstances, the SW semaphore may already be held
391 * unintentionally. Clear the semaphore once before giving up.
392 */
393 if (hw->dev_spec._i225.clear_semaphore_once) {
394 hw->dev_spec._i225.clear_semaphore_once = false;
395 igc_put_hw_semaphore_generic(hw);
396 for (i = 0; i < timeout; i++) {
397 swsm = IGC_READ_REG(hw, IGC_SWSM);
398 if (!(swsm & IGC_SWSM_SMBI))
399 break;
400
401 usec_delay(50);
402 }
403 }
404
405 /* If we do not have the semaphore here, we have to give up. */
406 if (i == timeout) {
407 DEBUGOUT("Driver can't access device -\n");
408 DEBUGOUT("SMBI bit is set.\n");
409 return -IGC_ERR_NVM;
410 }
411 }
412
413 /* Get the FW semaphore. */
414 for (i = 0; i < timeout; i++) {
415 swsm = IGC_READ_REG(hw, IGC_SWSM);
416 IGC_WRITE_REG(hw, IGC_SWSM, swsm | IGC_SWSM_SWESMBI);
417
418 /* Semaphore acquired if bit latched */
419 if (IGC_READ_REG(hw, IGC_SWSM) & IGC_SWSM_SWESMBI)
420 break;
421
422 usec_delay(50);
423 }
424
425 if (i == timeout) {
426 /* Release semaphores */
427 igc_put_hw_semaphore_generic(hw);
428 DEBUGOUT("Driver can't access the NVM\n");
429 return -IGC_ERR_NVM;
430 }
431
432 return IGC_SUCCESS;
433 }
434
435 /* igc_read_nvm_srrd_i225 - Reads Shadow Ram using EERD register
436 * @hw: pointer to the HW structure
437 * @offset: offset of word in the Shadow Ram to read
438 * @words: number of words to read
439 * @data: word read from the Shadow Ram
440 *
441 * Reads a 16 bit word from the Shadow Ram using the EERD register.
442 * Uses necessary synchronization semaphores.
443 */
igc_read_nvm_srrd_i225(struct igc_hw * hw,u16 offset,u16 words,u16 * data)444 s32 igc_read_nvm_srrd_i225(struct igc_hw *hw, u16 offset, u16 words,
445 u16 *data)
446 {
447 s32 status = IGC_SUCCESS;
448 u16 i, count;
449
450 DEBUGFUNC("igc_read_nvm_srrd_i225");
451
452 /* We cannot hold synchronization semaphores for too long,
453 * because of forceful takeover procedure. However it is more efficient
454 * to read in bursts than synchronizing access for each word.
455 */
456 for (i = 0; i < words; i += IGC_EERD_EEWR_MAX_COUNT) {
457 count = (words - i) / IGC_EERD_EEWR_MAX_COUNT > 0 ?
458 IGC_EERD_EEWR_MAX_COUNT : (words - i);
459 if (hw->nvm.ops.acquire(hw) == IGC_SUCCESS) {
460 status = igc_read_nvm_eerd(hw, offset, count,
461 data + i);
462 hw->nvm.ops.release(hw);
463 } else {
464 status = IGC_ERR_SWFW_SYNC;
465 }
466
467 if (status != IGC_SUCCESS)
468 break;
469 }
470
471 return status;
472 }
473
474 /* igc_write_nvm_srwr_i225 - Write to Shadow RAM using EEWR
475 * @hw: pointer to the HW structure
476 * @offset: offset within the Shadow RAM to be written to
477 * @words: number of words to write
478 * @data: 16 bit word(s) to be written to the Shadow RAM
479 *
480 * Writes data to Shadow RAM at offset using EEWR register.
481 *
482 * If igc_update_nvm_checksum is not called after this function , the
483 * data will not be committed to FLASH and also Shadow RAM will most likely
484 * contain an invalid checksum.
485 *
486 * If error code is returned, data and Shadow RAM may be inconsistent - buffer
487 * partially written.
488 */
igc_write_nvm_srwr_i225(struct igc_hw * hw,u16 offset,u16 words,u16 * data)489 s32 igc_write_nvm_srwr_i225(struct igc_hw *hw, u16 offset, u16 words,
490 u16 *data)
491 {
492 s32 status = IGC_SUCCESS;
493 u16 i, count;
494
495 DEBUGFUNC("igc_write_nvm_srwr_i225");
496
497 /* We cannot hold synchronization semaphores for too long,
498 * because of forceful takeover procedure. However it is more efficient
499 * to write in bursts than synchronizing access for each word.
500 */
501 for (i = 0; i < words; i += IGC_EERD_EEWR_MAX_COUNT) {
502 count = (words - i) / IGC_EERD_EEWR_MAX_COUNT > 0 ?
503 IGC_EERD_EEWR_MAX_COUNT : (words - i);
504 if (hw->nvm.ops.acquire(hw) == IGC_SUCCESS) {
505 status = __igc_write_nvm_srwr(hw, offset, count,
506 data + i);
507 hw->nvm.ops.release(hw);
508 } else {
509 status = IGC_ERR_SWFW_SYNC;
510 }
511
512 if (status != IGC_SUCCESS)
513 break;
514 }
515
516 return status;
517 }
518
519 /* __igc_write_nvm_srwr - Write to Shadow Ram using EEWR
520 * @hw: pointer to the HW structure
521 * @offset: offset within the Shadow Ram to be written to
522 * @words: number of words to write
523 * @data: 16 bit word(s) to be written to the Shadow Ram
524 *
525 * Writes data to Shadow Ram at offset using EEWR register.
526 *
527 * If igc_update_nvm_checksum is not called after this function , the
528 * Shadow Ram will most likely contain an invalid checksum.
529 */
__igc_write_nvm_srwr(struct igc_hw * hw,u16 offset,u16 words,u16 * data)530 static s32 __igc_write_nvm_srwr(struct igc_hw *hw, u16 offset, u16 words,
531 u16 *data)
532 {
533 struct igc_nvm_info *nvm = &hw->nvm;
534 u32 i, k, eewr = 0;
535 u32 attempts = 100000;
536 s32 ret_val = IGC_SUCCESS;
537
538 DEBUGFUNC("__igc_write_nvm_srwr");
539
540 /* A check for invalid values: offset too large, too many words,
541 * too many words for the offset, and not enough words.
542 */
543 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
544 (words == 0)) {
545 DEBUGOUT("nvm parameter(s) out of bounds\n");
546 ret_val = -IGC_ERR_NVM;
547 goto out;
548 }
549
550 for (i = 0; i < words; i++) {
551 eewr = ((offset + i) << IGC_NVM_RW_ADDR_SHIFT) |
552 (data[i] << IGC_NVM_RW_REG_DATA) |
553 IGC_NVM_RW_REG_START;
554
555 IGC_WRITE_REG(hw, IGC_SRWR, eewr);
556
557 for (k = 0; k < attempts; k++) {
558 if (IGC_NVM_RW_REG_DONE &
559 IGC_READ_REG(hw, IGC_SRWR)) {
560 ret_val = IGC_SUCCESS;
561 break;
562 }
563 usec_delay(5);
564 }
565
566 if (ret_val != IGC_SUCCESS) {
567 DEBUGOUT("Shadow RAM write EEWR timed out\n");
568 break;
569 }
570 }
571
572 out:
573 return ret_val;
574 }
575
576 /* igc_validate_nvm_checksum_i225 - Validate EEPROM checksum
577 * @hw: pointer to the HW structure
578 *
579 * Calculates the EEPROM checksum by reading/adding each word of the EEPROM
580 * and then verifies that the sum of the EEPROM is equal to 0xBABA.
581 */
igc_validate_nvm_checksum_i225(struct igc_hw * hw)582 s32 igc_validate_nvm_checksum_i225(struct igc_hw *hw)
583 {
584 s32 status = IGC_SUCCESS;
585 s32 (*read_op_ptr)(struct igc_hw *, u16, u16, u16 *);
586
587 DEBUGFUNC("igc_validate_nvm_checksum_i225");
588
589 if (hw->nvm.ops.acquire(hw) == IGC_SUCCESS) {
590 /* Replace the read function with semaphore grabbing with
591 * the one that skips this for a while.
592 * We have semaphore taken already here.
593 */
594 read_op_ptr = hw->nvm.ops.read;
595 hw->nvm.ops.read = igc_read_nvm_eerd;
596
597 status = igc_validate_nvm_checksum_generic(hw);
598
599 /* Revert original read operation. */
600 hw->nvm.ops.read = read_op_ptr;
601
602 hw->nvm.ops.release(hw);
603 } else {
604 status = IGC_ERR_SWFW_SYNC;
605 }
606
607 return status;
608 }
609
610 /* igc_update_nvm_checksum_i225 - Update EEPROM checksum
611 * @hw: pointer to the HW structure
612 *
613 * Updates the EEPROM checksum by reading/adding each word of the EEPROM
614 * up to the checksum. Then calculates the EEPROM checksum and writes the
615 * value to the EEPROM. Next commit EEPROM data onto the Flash.
616 */
igc_update_nvm_checksum_i225(struct igc_hw * hw)617 s32 igc_update_nvm_checksum_i225(struct igc_hw *hw)
618 {
619 s32 ret_val;
620 u16 checksum = 0;
621 u16 i, nvm_data;
622
623 DEBUGFUNC("igc_update_nvm_checksum_i225");
624
625 /* Read the first word from the EEPROM. If this times out or fails, do
626 * not continue or we could be in for a very long wait while every
627 * EEPROM read fails
628 */
629 ret_val = igc_read_nvm_eerd(hw, 0, 1, &nvm_data);
630 if (ret_val != IGC_SUCCESS) {
631 DEBUGOUT("EEPROM read failed\n");
632 goto out;
633 }
634
635 if (hw->nvm.ops.acquire(hw) == IGC_SUCCESS) {
636 /* Do not use hw->nvm.ops.write, hw->nvm.ops.read
637 * because we do not want to take the synchronization
638 * semaphores twice here.
639 */
640
641 for (i = 0; i < NVM_CHECKSUM_REG; i++) {
642 ret_val = igc_read_nvm_eerd(hw, i, 1, &nvm_data);
643 if (ret_val) {
644 hw->nvm.ops.release(hw);
645 DEBUGOUT("NVM Read Error while updating\n");
646 DEBUGOUT("checksum.\n");
647 goto out;
648 }
649 checksum += nvm_data;
650 }
651 checksum = (u16)NVM_SUM - checksum;
652 ret_val = __igc_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1,
653 &checksum);
654 if (ret_val != IGC_SUCCESS) {
655 hw->nvm.ops.release(hw);
656 DEBUGOUT("NVM Write Error while updating checksum.\n");
657 goto out;
658 }
659
660 hw->nvm.ops.release(hw);
661
662 ret_val = igc_update_flash_i225(hw);
663 } else {
664 ret_val = IGC_ERR_SWFW_SYNC;
665 }
666 out:
667 return ret_val;
668 }
669
670 /* igc_get_flash_presence_i225 - Check if flash device is detected.
671 * @hw: pointer to the HW structure
672 */
igc_get_flash_presence_i225(struct igc_hw * hw)673 bool igc_get_flash_presence_i225(struct igc_hw *hw)
674 {
675 u32 eec = 0;
676 bool ret_val = false;
677
678 DEBUGFUNC("igc_get_flash_presence_i225");
679
680 eec = IGC_READ_REG(hw, IGC_EECD);
681
682 if (eec & IGC_EECD_FLASH_DETECTED_I225)
683 ret_val = true;
684
685 return ret_val;
686 }
687
688 /* igc_set_flsw_flash_burst_counter_i225 - sets FLSW NVM Burst
689 * Counter in FLSWCNT register.
690 *
691 * @hw: pointer to the HW structure
692 * @burst_counter: size in bytes of the Flash burst to read or write
693 */
igc_set_flsw_flash_burst_counter_i225(struct igc_hw * hw,u32 burst_counter)694 s32 igc_set_flsw_flash_burst_counter_i225(struct igc_hw *hw,
695 u32 burst_counter)
696 {
697 s32 ret_val = IGC_SUCCESS;
698
699 DEBUGFUNC("igc_set_flsw_flash_burst_counter_i225");
700
701 /* Validate input data */
702 if (burst_counter < IGC_I225_SHADOW_RAM_SIZE) {
703 /* Write FLSWCNT - burst counter */
704 IGC_WRITE_REG(hw, IGC_I225_FLSWCNT, burst_counter);
705 } else {
706 ret_val = IGC_ERR_INVALID_ARGUMENT;
707 }
708
709 return ret_val;
710 }
711
712 /* igc_write_erase_flash_command_i225 - write/erase to a sector
713 * region on a given address.
714 *
715 * @hw: pointer to the HW structure
716 * @opcode: opcode to be used for the write command
717 * @address: the offset to write into the FLASH image
718 */
igc_write_erase_flash_command_i225(struct igc_hw * hw,u32 opcode,u32 address)719 s32 igc_write_erase_flash_command_i225(struct igc_hw *hw, u32 opcode,
720 u32 address)
721 {
722 u32 flswctl = 0;
723 s32 timeout = IGC_NVM_GRANT_ATTEMPTS;
724 s32 ret_val = IGC_SUCCESS;
725
726 DEBUGFUNC("igc_write_erase_flash_command_i225");
727
728 flswctl = IGC_READ_REG(hw, IGC_I225_FLSWCTL);
729 /* Polling done bit on FLSWCTL register */
730 while (timeout) {
731 if (flswctl & IGC_FLSWCTL_DONE)
732 break;
733 usec_delay(5);
734 flswctl = IGC_READ_REG(hw, IGC_I225_FLSWCTL);
735 timeout--;
736 }
737
738 if (!timeout) {
739 DEBUGOUT("Flash transaction was not done\n");
740 return -IGC_ERR_NVM;
741 }
742
743 /* Build and issue command on FLSWCTL register */
744 flswctl = address | opcode;
745 IGC_WRITE_REG(hw, IGC_I225_FLSWCTL, flswctl);
746
747 /* Check if issued command is valid on FLSWCTL register */
748 flswctl = IGC_READ_REG(hw, IGC_I225_FLSWCTL);
749 if (!(flswctl & IGC_FLSWCTL_CMDV)) {
750 DEBUGOUT("Write flash command failed\n");
751 ret_val = IGC_ERR_INVALID_ARGUMENT;
752 }
753
754 return ret_val;
755 }
756
757 /* igc_update_flash_i225 - Commit EEPROM to the flash
758 * if fw_valid_bit is set, FW is active. setting FLUPD bit in EEC
759 * register makes the FW load the internal shadow RAM into the flash.
760 * Otherwise, fw_valid_bit is 0. if FL_SECU.block_prtotected_sw = 0
761 * then FW is not active so the SW is responsible shadow RAM dump.
762 *
763 * @hw: pointer to the HW structure
764 */
igc_update_flash_i225(struct igc_hw * hw)765 s32 igc_update_flash_i225(struct igc_hw *hw)
766 {
767 u16 current_offset_data = 0;
768 u32 block_sw_protect = 1;
769 u16 base_address = 0x0;
770 u32 i, fw_valid_bit;
771 u16 current_offset;
772 s32 ret_val = 0;
773 u32 flup;
774
775 DEBUGFUNC("igc_update_flash_i225");
776
777 block_sw_protect = IGC_READ_REG(hw, IGC_I225_FLSECU) &
778 IGC_FLSECU_BLK_SW_ACCESS_I225;
779 fw_valid_bit = IGC_READ_REG(hw, IGC_FWSM) &
780 IGC_FWSM_FW_VALID_I225;
781 if (fw_valid_bit) {
782 ret_val = igc_pool_flash_update_done_i225(hw);
783 if (ret_val == -IGC_ERR_NVM) {
784 DEBUGOUT("Flash update time out\n");
785 goto out;
786 }
787
788 flup = IGC_READ_REG(hw, IGC_EECD) | IGC_EECD_FLUPD_I225;
789 IGC_WRITE_REG(hw, IGC_EECD, flup);
790
791 ret_val = igc_pool_flash_update_done_i225(hw);
792 if (ret_val == IGC_SUCCESS)
793 DEBUGOUT("Flash update complete\n");
794 else
795 DEBUGOUT("Flash update time out\n");
796 } else if (!block_sw_protect) {
797 /* FW is not active and security protection is disabled.
798 * therefore, SW is in charge of shadow RAM dump.
799 * Check which sector is valid. if sector 0 is valid,
800 * base address remains 0x0. otherwise, sector 1 is
801 * valid and it's base address is 0x1000
802 */
803 if (IGC_READ_REG(hw, IGC_EECD) & IGC_EECD_SEC1VAL_I225)
804 base_address = 0x1000;
805
806 /* Valid sector erase */
807 ret_val = igc_write_erase_flash_command_i225(hw,
808 IGC_I225_ERASE_CMD_OPCODE,
809 base_address);
810 if (!ret_val) {
811 DEBUGOUT("Sector erase failed\n");
812 goto out;
813 }
814
815 current_offset = base_address;
816
817 /* Write */
818 for (i = 0; i < IGC_I225_SHADOW_RAM_SIZE / 2; i++) {
819 /* Set burst write length */
820 ret_val = igc_set_flsw_flash_burst_counter_i225(hw,
821 0x2);
822 if (ret_val != IGC_SUCCESS)
823 break;
824
825 /* Set address and opcode */
826 ret_val = igc_write_erase_flash_command_i225(hw,
827 IGC_I225_WRITE_CMD_OPCODE,
828 2 * current_offset);
829 if (ret_val != IGC_SUCCESS)
830 break;
831
832 ret_val = igc_read_nvm_eerd(hw, current_offset,
833 1, ¤t_offset_data);
834 if (ret_val) {
835 DEBUGOUT("Failed to read from EEPROM\n");
836 goto out;
837 }
838
839 /* Write CurrentOffseData to FLSWDATA register */
840 IGC_WRITE_REG(hw, IGC_I225_FLSWDATA,
841 current_offset_data);
842 current_offset++;
843
844 /* Wait till operation has finished */
845 ret_val = igc_poll_eerd_eewr_done(hw,
846 IGC_NVM_POLL_READ);
847 if (ret_val)
848 break;
849
850 usec_delay(1000);
851 }
852 }
853 out:
854 return ret_val;
855 }
856
857 /* igc_pool_flash_update_done_i225 - Pool FLUDONE status.
858 * @hw: pointer to the HW structure
859 */
igc_pool_flash_update_done_i225(struct igc_hw * hw)860 s32 igc_pool_flash_update_done_i225(struct igc_hw *hw)
861 {
862 s32 ret_val = -IGC_ERR_NVM;
863 u32 i, reg;
864
865 DEBUGFUNC("igc_pool_flash_update_done_i225");
866
867 for (i = 0; i < IGC_FLUDONE_ATTEMPTS; i++) {
868 reg = IGC_READ_REG(hw, IGC_EECD);
869 if (reg & IGC_EECD_FLUDONE_I225) {
870 ret_val = IGC_SUCCESS;
871 break;
872 }
873 usec_delay(5);
874 }
875
876 return ret_val;
877 }
878
879 /* igc_set_ltr_i225 - Set Latency Tolerance Reporting thresholds.
880 * @hw: pointer to the HW structure
881 * @link: bool indicating link status
882 *
883 * Set the LTR thresholds based on the link speed (Mbps), EEE, and DMAC
884 * settings, otherwise specify that there is no LTR requirement.
885 */
igc_set_ltr_i225(struct igc_hw * hw,bool link)886 static s32 igc_set_ltr_i225(struct igc_hw *hw, bool link)
887 {
888 u16 speed, duplex;
889 u32 tw_system, ltrc, ltrv, ltr_min, ltr_max, scale_min, scale_max;
890 s32 size;
891
892 DEBUGFUNC("igc_set_ltr_i225");
893
894 /* If we do not have link, LTR thresholds are zero. */
895 if (link) {
896 hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
897
898 /* Check if using copper interface with EEE enabled or if the
899 * link speed is 10 Mbps.
900 */
901 if ((hw->phy.media_type == igc_media_type_copper) &&
902 !(hw->dev_spec._i225.eee_disable) &&
903 (speed != SPEED_10)) {
904 /* EEE enabled, so send LTRMAX threshold. */
905 ltrc = IGC_READ_REG(hw, IGC_LTRC) |
906 IGC_LTRC_EEEMS_EN;
907 IGC_WRITE_REG(hw, IGC_LTRC, ltrc);
908
909 /* Calculate tw_system (nsec). */
910 if (speed == SPEED_100) {
911 tw_system = ((IGC_READ_REG(hw, IGC_EEE_SU) &
912 IGC_TW_SYSTEM_100_MASK) >>
913 IGC_TW_SYSTEM_100_SHIFT) * 500;
914 } else {
915 tw_system = (IGC_READ_REG(hw, IGC_EEE_SU) &
916 IGC_TW_SYSTEM_1000_MASK) * 500;
917 }
918 } else {
919 tw_system = 0;
920 }
921
922 /* Get the Rx packet buffer size. */
923 size = IGC_READ_REG(hw, IGC_RXPBS) &
924 IGC_RXPBS_SIZE_I225_MASK;
925
926 /* Calculations vary based on DMAC settings. */
927 if (IGC_READ_REG(hw, IGC_DMACR) & IGC_DMACR_DMAC_EN) {
928 size -= (IGC_READ_REG(hw, IGC_DMACR) &
929 IGC_DMACR_DMACTHR_MASK) >>
930 IGC_DMACR_DMACTHR_SHIFT;
931 /* Convert size to bits. */
932 size *= 1024 * 8;
933 } else {
934 /* Convert size to bytes, subtract the MTU, and then
935 * convert the size to bits.
936 */
937 size *= 1024;
938 size -= hw->dev_spec._i225.mtu;
939 size *= 8;
940 }
941
942 if (size < 0) {
943 DEBUGOUT1("Invalid effective Rx buffer size %d\n",
944 size);
945 return -IGC_ERR_CONFIG;
946 }
947
948 /* Calculate the thresholds. Since speed is in Mbps, simplify
949 * the calculation by multiplying size/speed by 1000 for result
950 * to be in nsec before dividing by the scale in nsec. Set the
951 * scale such that the LTR threshold fits in the register.
952 */
953 ltr_min = (1000 * size) / speed;
954 ltr_max = ltr_min + tw_system;
955 scale_min = (ltr_min / 1024) < 1024 ? IGC_LTRMINV_SCALE_1024 :
956 IGC_LTRMINV_SCALE_32768;
957 scale_max = (ltr_max / 1024) < 1024 ? IGC_LTRMAXV_SCALE_1024 :
958 IGC_LTRMAXV_SCALE_32768;
959 ltr_min /= scale_min == IGC_LTRMINV_SCALE_1024 ? 1024 : 32768;
960 ltr_max /= scale_max == IGC_LTRMAXV_SCALE_1024 ? 1024 : 32768;
961
962 /* Only write the LTR thresholds if they differ from before. */
963 ltrv = IGC_READ_REG(hw, IGC_LTRMINV);
964 if (ltr_min != (ltrv & IGC_LTRMINV_LTRV_MASK)) {
965 ltrv = IGC_LTRMINV_LSNP_REQ | ltr_min |
966 (scale_min << IGC_LTRMINV_SCALE_SHIFT);
967 IGC_WRITE_REG(hw, IGC_LTRMINV, ltrv);
968 }
969
970 ltrv = IGC_READ_REG(hw, IGC_LTRMAXV);
971 if (ltr_max != (ltrv & IGC_LTRMAXV_LTRV_MASK)) {
972 ltrv = IGC_LTRMAXV_LSNP_REQ | ltr_max |
973 (scale_min << IGC_LTRMAXV_SCALE_SHIFT);
974 IGC_WRITE_REG(hw, IGC_LTRMAXV, ltrv);
975 }
976 }
977
978 return IGC_SUCCESS;
979 }
980
981 /* igc_check_for_link_i225 - Check for link
982 * @hw: pointer to the HW structure
983 *
984 * Checks to see of the link status of the hardware has changed. If a
985 * change in link status has been detected, then we read the PHY registers
986 * to get the current speed/duplex if link exists.
987 */
igc_check_for_link_i225(struct igc_hw * hw)988 s32 igc_check_for_link_i225(struct igc_hw *hw)
989 {
990 struct igc_mac_info *mac = &hw->mac;
991 s32 ret_val;
992 bool link = false;
993
994 DEBUGFUNC("igc_check_for_link_i225");
995
996 /* We only want to go out to the PHY registers to see if
997 * Auto-Neg has completed and/or if our link status has
998 * changed. The get_link_status flag is set upon receiving
999 * a Link Status Change or Rx Sequence Error interrupt.
1000 */
1001 if (!mac->get_link_status) {
1002 ret_val = IGC_SUCCESS;
1003 goto out;
1004 }
1005
1006 /* First we want to see if the MII Status Register reports
1007 * link. If so, then we want to get the current speed/duplex
1008 * of the PHY.
1009 */
1010 ret_val = igc_phy_has_link_generic(hw, 1, 0, &link);
1011 if (ret_val)
1012 goto out;
1013
1014 if (!link)
1015 goto out; /* No link detected */
1016
1017 /* First we want to see if the MII Status Register reports
1018 * link. If so, then we want to get the current speed/duplex
1019 * of the PHY.
1020 */
1021 ret_val = igc_phy_has_link_generic(hw, 1, 0, &link);
1022 if (ret_val)
1023 goto out;
1024
1025 if (!link)
1026 goto out; /* No link detected */
1027
1028 mac->get_link_status = false;
1029
1030 /* Check if there was DownShift, must be checked
1031 * immediately after link-up
1032 */
1033 igc_check_downshift_generic(hw);
1034
1035 /* If we are forcing speed/duplex, then we simply return since
1036 * we have already determined whether we have link or not.
1037 */
1038 if (!mac->autoneg)
1039 goto out;
1040
1041 /* Auto-Neg is enabled. Auto Speed Detection takes care
1042 * of MAC speed/duplex configuration. So we only need to
1043 * configure Collision Distance in the MAC.
1044 */
1045 mac->ops.config_collision_dist(hw);
1046
1047 /* Configure Flow Control now that Auto-Neg has completed.
1048 * First, we need to restore the desired flow control
1049 * settings because we may have had to re-autoneg with a
1050 * different link partner.
1051 */
1052 ret_val = igc_config_fc_after_link_up_generic(hw);
1053 if (ret_val)
1054 DEBUGOUT("Error configuring flow control\n");
1055 out:
1056 /* Now that we are aware of our link settings, we can set the LTR
1057 * thresholds.
1058 */
1059 ret_val = igc_set_ltr_i225(hw, link);
1060
1061 return ret_val;
1062 }
1063
1064 /* igc_init_function_pointers_i225 - Init func ptrs.
1065 * @hw: pointer to the HW structure
1066 *
1067 * Called to initialize all function pointers and parameters.
1068 */
igc_init_function_pointers_i225(struct igc_hw * hw)1069 void igc_init_function_pointers_i225(struct igc_hw *hw)
1070 {
1071 igc_init_mac_ops_generic(hw);
1072 igc_init_phy_ops_generic(hw);
1073 igc_init_nvm_ops_generic(hw);
1074 hw->mac.ops.init_params = igc_init_mac_params_i225;
1075 hw->nvm.ops.init_params = igc_init_nvm_params_i225;
1076 hw->phy.ops.init_params = igc_init_phy_params_i225;
1077 }
1078
1079 /* igc_init_hw_i225 - Init hw for I225
1080 * @hw: pointer to the HW structure
1081 *
1082 * Called to initialize hw for i225 hw family.
1083 */
igc_init_hw_i225(struct igc_hw * hw)1084 s32 igc_init_hw_i225(struct igc_hw *hw)
1085 {
1086 s32 ret_val;
1087
1088 DEBUGFUNC("igc_init_hw_i225");
1089
1090 ret_val = igc_init_hw_base(hw);
1091 return ret_val;
1092 }
1093
1094 /*
1095 * igc_set_d0_lplu_state_i225 - Set Low-Power-Link-Up (LPLU) D0 state
1096 * @hw: pointer to the HW structure
1097 * @active: true to enable LPLU, false to disable
1098 *
1099 * Note: since I225 does not actually support LPLU, this function
1100 * simply enables/disables 1G and 2.5G speeds in D0.
1101 */
igc_set_d0_lplu_state_i225(struct igc_hw * hw,bool active)1102 s32 igc_set_d0_lplu_state_i225(struct igc_hw *hw, bool active)
1103 {
1104 u32 data;
1105
1106 DEBUGFUNC("igc_set_d0_lplu_state_i225");
1107
1108 data = IGC_READ_REG(hw, IGC_I225_PHPM);
1109
1110 if (active) {
1111 data |= IGC_I225_PHPM_DIS_1000;
1112 data |= IGC_I225_PHPM_DIS_2500;
1113 } else {
1114 data &= ~IGC_I225_PHPM_DIS_1000;
1115 data &= ~IGC_I225_PHPM_DIS_2500;
1116 }
1117
1118 IGC_WRITE_REG(hw, IGC_I225_PHPM, data);
1119 return IGC_SUCCESS;
1120 }
1121
1122 /*
1123 * igc_set_d3_lplu_state_i225 - Set Low-Power-Link-Up (LPLU) D3 state
1124 * @hw: pointer to the HW structure
1125 * @active: true to enable LPLU, false to disable
1126 *
1127 * Note: since I225 does not actually support LPLU, this function
1128 * simply enables/disables 100M, 1G and 2.5G speeds in D3.
1129 */
igc_set_d3_lplu_state_i225(struct igc_hw * hw,bool active)1130 s32 igc_set_d3_lplu_state_i225(struct igc_hw *hw, bool active)
1131 {
1132 u32 data;
1133
1134 DEBUGFUNC("igc_set_d3_lplu_state_i225");
1135
1136 data = IGC_READ_REG(hw, IGC_I225_PHPM);
1137
1138 if (active) {
1139 data |= IGC_I225_PHPM_DIS_100_D3;
1140 data |= IGC_I225_PHPM_DIS_1000_D3;
1141 data |= IGC_I225_PHPM_DIS_2500_D3;
1142 } else {
1143 data &= ~IGC_I225_PHPM_DIS_100_D3;
1144 data &= ~IGC_I225_PHPM_DIS_1000_D3;
1145 data &= ~IGC_I225_PHPM_DIS_2500_D3;
1146 }
1147
1148 IGC_WRITE_REG(hw, IGC_I225_PHPM, data);
1149 return IGC_SUCCESS;
1150 }
1151
1152 /**
1153 * igc_set_eee_i225 - Enable/disable EEE support
1154 * @hw: pointer to the HW structure
1155 * @adv2p5G: boolean flag enabling 2.5G EEE advertisement
1156 * @adv1G: boolean flag enabling 1G EEE advertisement
1157 * @adv100M: boolean flag enabling 100M EEE advertisement
1158 *
1159 * Enable/disable EEE based on setting in dev_spec structure.
1160 *
1161 **/
igc_set_eee_i225(struct igc_hw * hw,bool adv2p5G,bool adv1G,bool adv100M)1162 s32 igc_set_eee_i225(struct igc_hw *hw, bool adv2p5G, bool adv1G,
1163 bool adv100M)
1164 {
1165 u32 ipcnfg, eeer;
1166
1167 DEBUGFUNC("igc_set_eee_i225");
1168
1169 if (hw->mac.type != igc_i225 ||
1170 hw->phy.media_type != igc_media_type_copper)
1171 goto out;
1172 ipcnfg = IGC_READ_REG(hw, IGC_IPCNFG);
1173 eeer = IGC_READ_REG(hw, IGC_EEER);
1174
1175 /* enable or disable per user setting */
1176 if (!(hw->dev_spec._i225.eee_disable)) {
1177 u32 eee_su = IGC_READ_REG(hw, IGC_EEE_SU);
1178
1179 if (adv100M)
1180 ipcnfg |= IGC_IPCNFG_EEE_100M_AN;
1181 else
1182 ipcnfg &= ~IGC_IPCNFG_EEE_100M_AN;
1183
1184 if (adv1G)
1185 ipcnfg |= IGC_IPCNFG_EEE_1G_AN;
1186 else
1187 ipcnfg &= ~IGC_IPCNFG_EEE_1G_AN;
1188
1189 if (adv2p5G)
1190 ipcnfg |= IGC_IPCNFG_EEE_2_5G_AN;
1191 else
1192 ipcnfg &= ~IGC_IPCNFG_EEE_2_5G_AN;
1193
1194 eeer |= (IGC_EEER_TX_LPI_EN | IGC_EEER_RX_LPI_EN |
1195 IGC_EEER_LPI_FC);
1196
1197 /* This bit should not be set in normal operation. */
1198 if (eee_su & IGC_EEE_SU_LPI_CLK_STP)
1199 DEBUGOUT("LPI Clock Stop Bit should not be set!\n");
1200 } else {
1201 ipcnfg &= ~(IGC_IPCNFG_EEE_2_5G_AN | IGC_IPCNFG_EEE_1G_AN |
1202 IGC_IPCNFG_EEE_100M_AN);
1203 eeer &= ~(IGC_EEER_TX_LPI_EN | IGC_EEER_RX_LPI_EN |
1204 IGC_EEER_LPI_FC);
1205 }
1206 IGC_WRITE_REG(hw, IGC_IPCNFG, ipcnfg);
1207 IGC_WRITE_REG(hw, IGC_EEER, eeer);
1208 IGC_READ_REG(hw, IGC_IPCNFG);
1209 IGC_READ_REG(hw, IGC_EEER);
1210 out:
1211
1212 return IGC_SUCCESS;
1213 }
1214
1215