1 /*
2 * Copyright 2014-2017 Cavium, Inc.
3 * The contents of this file are subject to the terms of the Common Development
4 * and Distribution License, v.1, (the "License").
5 *
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the License at available
9 * at http://opensource.org/licenses/CDDL-1.0
10 *
11 * See the License for the specific language governing permissions and
12 * limitations under the License.
13 */
14
15
16 #include "lm5706.h"
17
18
19
20 /*******************************************************************************
21 * Description:
22 *
23 * Return:
24 ******************************************************************************/
25 u8_t
fw_reset_sync(lm_device_t * pdev,lm_reason_t reason,u32_t msg_data,u32_t fw_ack_timeout_us)26 fw_reset_sync(
27 lm_device_t *pdev,
28 lm_reason_t reason,
29 u32_t msg_data,
30 u32_t fw_ack_timeout_us) /* timeout in microseconds. */
31 {
32 u32_t cnt;
33 u32_t val;
34
35 /* Skip handshake for 5709 for emulation */
36 if (CHIP_ID(pdev) == CHIP_ID_5709_IKOS)
37 {
38 return TRUE;
39 }
40
41 /* If we timed out, inform the firmware that this is the case. */
42 if(pdev->vars.fw_timed_out)
43 {
44 return TRUE;
45 }
46
47 pdev->vars.fw_wr_seq++;
48 msg_data |= (pdev->vars.fw_wr_seq & DRV_MSG_SEQ);
49
50 switch(reason)
51 {
52 case LM_REASON_DRIVER_RESET:
53 msg_data |= DRV_MSG_CODE_RESET;
54 break;
55
56 case LM_REASON_DRIVER_UNLOAD:
57 msg_data |= DRV_MSG_CODE_UNLOAD;
58 break;
59
60 case LM_REASON_DRIVER_UNLOAD_POWER_DOWN:
61 msg_data |= DRV_MSG_CODE_UNLOAD_LNK_DN;
62 break;
63
64 case LM_REASON_DRIVER_SHUTDOWN:
65 msg_data |= DRV_MSG_CODE_SHUTDOWN;
66 break;
67
68 case LM_REASON_WOL_SUSPEND:
69 msg_data |= DRV_MSG_CODE_SUSPEND_WOL;
70 break;
71
72 case LM_REASON_NO_WOL_SUSPEND:
73 msg_data |= DRV_MSG_CODE_SUSPEND_NO_WOL;
74 break;
75
76 case LM_REASON_DIAG:
77 msg_data |= DRV_MSG_CODE_DIAG;
78 break;
79
80 default:
81 DbgBreakMsg("invalid reason code.\n");
82 break;
83 }
84
85 REG_WR_IND(
86 pdev,
87 pdev->hw_info.shmem_base +
88 OFFSETOF(shmem_region_t, drv_fw_mb.drv_mb),
89 msg_data);
90
91 val = 0;
92
93 /* wait for an acknowledgement. */
94 for(cnt = 0; cnt < fw_ack_timeout_us/5; cnt++)
95 {
96 mm_wait(pdev, 5);
97
98 REG_RD_IND(
99 pdev,
100 pdev->hw_info.shmem_base +
101 OFFSETOF(shmem_region_t, drv_fw_mb.fw_mb),
102 &val);
103 if((val & FW_MSG_ACK) == (msg_data & DRV_MSG_SEQ))
104 {
105 break;
106 }
107 }
108
109 if((val & FW_MSG_ACK) != (msg_data & DRV_MSG_SEQ))
110 {
111 if((msg_data & DRV_MSG_DATA) != DRV_MSG_DATA_WAIT0)
112 {
113 msg_data &= ~DRV_MSG_CODE;
114 msg_data |= DRV_MSG_CODE_FW_TIMEOUT;
115
116 REG_WR_IND(
117 pdev,
118 pdev->hw_info.shmem_base +
119 OFFSETOF(shmem_region_t, drv_fw_mb.drv_mb),
120 msg_data);
121
122 pdev->vars.fw_timed_out = TRUE;
123 pdev->fw_timed_out_cnt++;
124
125 DbgMessage(pdev, WARN, "firmware timed out.\n");
126 }
127
128 return TRUE;
129 }
130
131 return FALSE;
132 } /* fw_reset_sync */
133
134
135
136 /*******************************************************************************
137 * Description:
138 *
139 * Return:
140 ******************************************************************************/
141 STATIC void
init_context_5706_a0_wa(lm_device_t * pdev)142 init_context_5706_a0_wa(
143 lm_device_t *pdev)
144 {
145 u8_t vcid_to_pcid[96];
146 u32_t vcid_addr;
147 u32_t pcid_addr;
148 u32_t offset;
149 u32_t vcid;
150
151 /* In A0 silicon, certain context memory region is not accessible
152 * due to address decoding problem. The bad context memory is identify
153 * by its pcid having Bit 3 set. This table provides a mapping between
154 * the virtual context id to the usable physical context id. */
155 vcid_to_pcid[0x00] = 0x00; vcid_to_pcid[0x01] = 0x01;
156 vcid_to_pcid[0x02] = 0x02; vcid_to_pcid[0x03] = 0x03;
157 vcid_to_pcid[0x04] = 0x04; vcid_to_pcid[0x05] = 0x05;
158 vcid_to_pcid[0x06] = 0x06; vcid_to_pcid[0x07] = 0x07;
159 vcid_to_pcid[0x08] = 0x60; vcid_to_pcid[0x09] = 0x61; /* bad entries. */
160 vcid_to_pcid[0x0a] = 0x62; vcid_to_pcid[0x0b] = 0x63; /* bad entries. */
161 vcid_to_pcid[0x0c] = 0x64; vcid_to_pcid[0x0d] = 0x65; /* bad entries. */
162 vcid_to_pcid[0x0e] = 0x66; vcid_to_pcid[0x0f] = 0x67; /* bad entries. */
163 vcid_to_pcid[0x10] = 0x10; vcid_to_pcid[0x11] = 0x11;
164 vcid_to_pcid[0x12] = 0x12; vcid_to_pcid[0x13] = 0x13;
165 vcid_to_pcid[0x14] = 0x14; vcid_to_pcid[0x15] = 0x15;
166 vcid_to_pcid[0x16] = 0x16; vcid_to_pcid[0x17] = 0x17;
167 vcid_to_pcid[0x18] = 0x70; vcid_to_pcid[0x19] = 0x71; /* bad entries. */
168 vcid_to_pcid[0x1a] = 0x72; vcid_to_pcid[0x1b] = 0x73; /* bad entries. */
169 vcid_to_pcid[0x1c] = 0x74; vcid_to_pcid[0x1d] = 0x75; /* bad entries. */
170 vcid_to_pcid[0x1e] = 0x76; vcid_to_pcid[0x1f] = 0x77; /* bad entries. */
171 vcid_to_pcid[0x20] = 0x20; vcid_to_pcid[0x21] = 0x21;
172 vcid_to_pcid[0x22] = 0x22; vcid_to_pcid[0x23] = 0x23;
173 vcid_to_pcid[0x24] = 0x24; vcid_to_pcid[0x25] = 0x25;
174 vcid_to_pcid[0x26] = 0x26; vcid_to_pcid[0x27] = 0x27;
175 vcid_to_pcid[0x28] = 0x80; vcid_to_pcid[0x29] = 0x81; /* bad entries. */
176 vcid_to_pcid[0x2a] = 0x82; vcid_to_pcid[0x2b] = 0x83; /* bad entries. */
177 vcid_to_pcid[0x2c] = 0x84; vcid_to_pcid[0x2d] = 0x85; /* bad entries. */
178 vcid_to_pcid[0x2e] = 0x86; vcid_to_pcid[0x2f] = 0x87; /* bad entries. */
179 vcid_to_pcid[0x30] = 0x30; vcid_to_pcid[0x31] = 0x31;
180 vcid_to_pcid[0x32] = 0x32; vcid_to_pcid[0x33] = 0x33;
181 vcid_to_pcid[0x34] = 0x34; vcid_to_pcid[0x35] = 0x35;
182 vcid_to_pcid[0x36] = 0x36; vcid_to_pcid[0x37] = 0x37;
183 vcid_to_pcid[0x38] = 0x90; vcid_to_pcid[0x39] = 0x91; /* bad entries. */
184 vcid_to_pcid[0x3a] = 0x92; vcid_to_pcid[0x3b] = 0x93; /* bad entries. */
185 vcid_to_pcid[0x3c] = 0x94; vcid_to_pcid[0x3d] = 0x95; /* bad entries. */
186 vcid_to_pcid[0x3e] = 0x96; vcid_to_pcid[0x3f] = 0x97; /* bad entries. */
187 vcid_to_pcid[0x40] = 0x40; vcid_to_pcid[0x41] = 0x41;
188 vcid_to_pcid[0x42] = 0x42; vcid_to_pcid[0x43] = 0x43;
189 vcid_to_pcid[0x44] = 0x44; vcid_to_pcid[0x45] = 0x45;
190 vcid_to_pcid[0x46] = 0x46; vcid_to_pcid[0x47] = 0x47;
191 vcid_to_pcid[0x48] = 0xa0; vcid_to_pcid[0x49] = 0xa1; /* bad entries. */
192 vcid_to_pcid[0x4a] = 0xa2; vcid_to_pcid[0x4b] = 0xa3; /* bad entries. */
193 vcid_to_pcid[0x4c] = 0xa4; vcid_to_pcid[0x4d] = 0xa5; /* bad entries. */
194 vcid_to_pcid[0x4e] = 0xa6; vcid_to_pcid[0x4f] = 0xa7; /* bad entries. */
195 vcid_to_pcid[0x50] = 0x50; vcid_to_pcid[0x51] = 0x51;
196 vcid_to_pcid[0x52] = 0x52; vcid_to_pcid[0x53] = 0x53;
197 vcid_to_pcid[0x54] = 0x54; vcid_to_pcid[0x55] = 0x55;
198 vcid_to_pcid[0x56] = 0x56; vcid_to_pcid[0x57] = 0x57;
199 vcid_to_pcid[0x58] = 0xb0; vcid_to_pcid[0x59] = 0xb1; /* bad entries. */
200 vcid_to_pcid[0x5a] = 0xb2; vcid_to_pcid[0x5b] = 0xb3; /* bad entries. */
201 vcid_to_pcid[0x5c] = 0xb4; vcid_to_pcid[0x5d] = 0xb5; /* bad entries. */
202 vcid_to_pcid[0x5e] = 0xb6; vcid_to_pcid[0x5f] = 0xb7; /* bad entries. */
203
204 vcid = sizeof(vcid_to_pcid);
205 while(vcid)
206 {
207 vcid--;
208
209 vcid_addr = GET_PCID_ADDR(vcid);
210 pcid_addr = GET_PCID_ADDR(vcid_to_pcid[vcid]);
211
212 /* There maybe some residuals in the context that may cause
213 * receive problem later. The problem intermittently occurs
214 * when we are resetting the chip while there are incoming
215 * traffic and some other firmware is running. To prevent this
216 * problem from occuring we need to zero out context first
217 * before initializing the virtual to physical mapping. We
218 * arbitrarily use a virtual context address 0x00 to map to a
219 * physical context one at a time then zero them out.
220 *
221 * First map the physical context to virtual context 0 then
222 * zero out the context. */
223 REG_WR(pdev, context.ctx_virt_addr, 0x00);
224 REG_WR(pdev, context.ctx_page_tbl, pcid_addr);
225
226 /* Zero out the context. */
227 for(offset = 0; offset < PHY_CTX_SIZE; offset += 4)
228 {
229 CTX_WR(pdev, 0x00, offset, 0);
230 }
231
232 /* Now initalize the correct mapping in which the virtual
233 * context to the correspondinding physical context. */
234 REG_WR(pdev, context.ctx_virt_addr, vcid_addr);
235 REG_WR(pdev, context.ctx_page_tbl, pcid_addr);
236 }
237 } /* init_context_5706_a0_wa */
238
239
240
241 /*******************************************************************************
242 * Description:
243 *
244 * Return:
245 ******************************************************************************/
246 STATIC void
init_context_5706(lm_device_t * pdev)247 init_context_5706(
248 lm_device_t *pdev)
249 {
250 u32_t vcid_addr;
251 u32_t offset;
252
253 vcid_addr = GET_CID_ADDR(96); /* This corresponds to 48 context. */
254
255 while(vcid_addr)
256 {
257 vcid_addr -= PHY_CTX_SIZE;
258
259 /* There maybe some residuals in the context that may cause
260 * receive problem later. The problem intermittently occurs
261 * when we are resetting the chip while there are incoming
262 * traffic and some other firmware is running. To prevent this
263 * problem from occuring we need to zero out context first
264 * before initializing the virtual to physical mapping. We
265 * arbitrarily use a virtual context address 0x00 to map to a
266 * physical context one at a time then zero them out.
267 *
268 * First map the physical context to virtual context 0 then
269 * zero out the context. */
270 REG_WR(pdev, context.ctx_virt_addr, 0x00);
271 REG_WR(pdev, context.ctx_page_tbl, vcid_addr);
272
273 /* Zero out the context. */
274 for(offset = 0; offset < PHY_CTX_SIZE; offset += 4)
275 {
276 CTX_WR(pdev, 0x00, offset, 0);
277 }
278
279 /* Now initalize the correct mapping in which the virtual
280 * context to the correspondinding physical context. */
281 REG_WR(pdev, context.ctx_virt_addr, vcid_addr);
282 REG_WR(pdev, context.ctx_page_tbl, vcid_addr);
283 }
284 } /* init_context_5706 */
285
286
287
288 /*******************************************************************************
289 * Description:
290 *
291 * Return:
292 ******************************************************************************/
293 STATIC void
init_context_5709(lm_device_t * pdev)294 init_context_5709(
295 lm_device_t *pdev)
296 {
297 lm_address_t mem_phy;
298 u8_t *mem_virt;
299 u32_t mem_size;
300 u32_t page_idx;
301 u32_t idx;
302 u32_t cnt;
303 u32_t val;
304
305 DbgBreakIf(CHIP_NUM(pdev) != CHIP_NUM_5709);
306
307 val = 0x3001;
308 val |= (LM_PAGE_BITS - 8) << 16;
309 REG_WR(pdev, context.ctx_command, val);
310
311 page_idx = 0;
312
313 for(idx = 0; idx < NUM_CTX_MBLKS; idx++)
314 {
315 mem_virt = pdev->vars.ctx_mem[idx].start;
316 mem_phy = pdev->vars.ctx_mem[idx].start_phy;
317 mem_size = pdev->vars.ctx_mem[idx].size;
318
319 DbgBreakIf(mem_phy.as_u32.low & LM_PAGE_MASK);
320 DbgBreakIf(mem_size & LM_PAGE_MASK);
321
322 while(mem_size)
323 {
324 for(cnt = 0; cnt < LM_PAGE_SIZE; cnt += 4)
325 {
326 ((u32_t *) mem_virt)[cnt/4] = 0;
327 }
328
329 REG_WR(
330 pdev,
331 context.ctx_host_page_tbl_data0,
332 mem_phy.as_u32.low | CTX_HOST_PAGE_TBL_DATA0_VALID);
333 REG_WR(
334 pdev,
335 context.ctx_host_page_tbl_data1,
336 mem_phy.as_u32.high);
337 REG_WR(
338 pdev,
339 context.ctx_host_page_tbl_ctrl,
340 page_idx | CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
341
342 for(cnt = 0; cnt < 100; cnt++)
343 {
344 REG_RD(pdev, context.ctx_host_page_tbl_ctrl, &val);
345
346 if(!(val & CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
347 {
348 break;
349 }
350
351 mm_wait(pdev, 5);
352 }
353
354 DbgBreakIf(val & CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
355
356 mem_virt += LM_PAGE_SIZE;
357 LM_INC64(&mem_phy, LM_PAGE_SIZE);
358 mem_size -= LM_PAGE_SIZE;
359
360 page_idx++;
361 }
362 }
363 } /* init_context_5709 */
364
365
366
367 /*******************************************************************************
368 * Description:
369 * This workaround must be applied right after a CORE clock reset
370 * and before enable other blocks which may try to allocate mbufs.
371 *
372 * Return:
373 ******************************************************************************/
374 STATIC void
alloc_bad_rbuf_5706_a0_wa(lm_device_t * pdev)375 alloc_bad_rbuf_5706_a0_wa(
376 lm_device_t *pdev)
377 {
378 u16_t good_mbuf[512];
379 u32_t good_mbuf_cnt;
380 u32_t val;
381
382 REG_WR(
383 pdev,
384 misc.misc_enable_set_bits,
385 MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
386
387 good_mbuf_cnt = 0;
388
389 /* Allocate a bunch of mbufs and save the good ones in an array. */
390 REG_RD_IND(pdev, OFFSETOF(reg_space_t, rbuf.rbuf_status1), &val);
391 while(val & RBUF_STATUS1_FREE_COUNT)
392 {
393 REG_WR_IND(
394 pdev,
395 OFFSETOF(reg_space_t, rbuf.rbuf_command),
396 RBUF_COMMAND_ALLOC_REQ_TE);
397
398 REG_RD_IND(
399 pdev,
400 OFFSETOF(reg_space_t, rbuf.rbuf_fw_buf_alloc),
401 &val);
402 val &= RBUF_FW_BUF_ALLOC_VALUE;
403
404 /* The addresses with Bit 9 set are bad memory blocks. */
405 if(!(val & (1 << 9)))
406 {
407 DbgBreakIf(good_mbuf_cnt >= sizeof(good_mbuf)/sizeof(u16_t));
408
409 good_mbuf[good_mbuf_cnt] = (u16_t) val;
410 good_mbuf_cnt++;
411 }
412
413 REG_RD_IND(pdev, OFFSETOF(reg_space_t, rbuf.rbuf_status1), &val);
414 }
415
416 /* Free the good ones back to the mbuf pool thus discardining
417 * all the bad ones. */
418 while(good_mbuf_cnt)
419 {
420 good_mbuf_cnt--;
421
422 val = good_mbuf[good_mbuf_cnt];
423 val = (val << 9) | val | 1;
424
425 REG_WR_IND(pdev, OFFSETOF(reg_space_t, rbuf.rbuf_fw_buf_free), val);
426 }
427 } /* alloc_bad_rbuf_5706_a0_wa */
428
429
430
431 /*******************************************************************************
432 * Description:
433 *
434 * Return:
435 ******************************************************************************/
436 void
lm_chip_reset(lm_device_t * pdev,lm_reason_t reason)437 lm_chip_reset(
438 lm_device_t *pdev,
439 lm_reason_t reason)
440 {
441 u32_t val;
442 u32_t idx;
443
444 DbgMessage(pdev, VERBOSE, "+++ lm_chip_reset\n");
445 pdev->chip_reset_cnt++;
446
447 /* acquiesce the bus before a reset. */
448 if(CHIP_NUM(pdev) == CHIP_NUM_5706 || CHIP_NUM(pdev) == CHIP_NUM_5708)
449 {
450 REG_WR(
451 pdev,
452 misc.misc_enable_clr_bits,
453 MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
454 MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
455 MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE);
456 mm_wait(pdev, 5);
457 }
458 else
459 {
460 if(CHIP_ID(pdev) == CHIP_ID_5709_A0)
461 {
462 /* Disable bus_master. */
463 REG_RD_OFFSET(
464 pdev,
465 OFFSETOF(reg_space_t, pci_config.pcicfg_command),
466 &val);
467 val &= ~PCICFG_COMMAND_BUS_MASTER;
468 REG_WR_OFFSET(
469 pdev,
470 OFFSETOF(reg_space_t, pci_config.pcicfg_command),
471 val);
472 }
473 else
474 {
475 /* Disable DMA activities. */
476 REG_RD(pdev, misc.misc_new_core_ctl, &val);
477 val &= ~(1 << 16);
478 REG_WR(pdev, misc.misc_new_core_ctl, val);
479 }
480
481 /* wait until there is no pending transaction. */
482 for(idx = 0; idx < 1000; idx++)
483 {
484 REG_RD_OFFSET(
485 pdev,
486 OFFSETOF(reg_space_t, pci_config.pcicfg_device_control),
487 &val);
488 if((val & (PCICFG_DEVICE_STATUS_NO_PEND << 16)) == 0)
489 {
490 break;
491 }
492
493 mm_wait(pdev, 5);
494 }
495 }
496
497 /* Enable or disable remote phy. */
498 REG_RD_IND(
499 pdev,
500 pdev->hw_info.shmem_base +
501 OFFSETOF(shmem_region_t, drv_fw_cap_mb.fw_cap_mb),
502 &val);
503
504 if((val & CAPABILITY_SIGNATURE_MASK) == FW_CAP_SIGNATURE)
505 {
506 val = DRV_ACK_CAP_SIGNATURE;
507
508 if(pdev->params.enable_remote_phy)
509 {
510 if (LM_REASON_DIAG != reason)
511 {
512 val |= FW_CAP_REMOTE_PHY_CAPABLE;
513 }
514 else
515 {
516 val &= ~FW_CAP_REMOTE_PHY_CAPABLE;
517 }
518 }
519
520 REG_WR_IND(
521 pdev,
522 pdev->hw_info.shmem_base +
523 OFFSETOF(shmem_region_t, drv_fw_cap_mb.drv_ack_cap_mb),
524 val);
525 }
526
527 /* Wait for the firmware to tell us it is ok to issue a reason. */
528 (void) fw_reset_sync(pdev, reason, DRV_MSG_DATA_WAIT0, FW_ACK_TIME_OUT_MS*1000);
529
530 /* Deposit a driver reset signature so the firmware knows
531 * that this is a soft reset. */
532 REG_WR_IND(
533 pdev,
534 pdev->hw_info.shmem_base +
535 OFFSETOF(shmem_region_t, drv_fw_mb.drv_reset_signature),
536 DRV_RESET_SIGNATURE);
537
538 /* Force the driver to wait for the acknowledgement from
539 * the firmware. */
540 pdev->vars.fw_timed_out = FALSE;
541
542 /* Do a dummy read to force the chip to complete all current
543 * transaction before we issue a reset. This is a workaround
544 * for A0. If there is any pending transactions when a reset
545 * occur, the chip will lock up. There must be one last read
546 * before a core clock reset. */
547 REG_RD(pdev, misc.misc_id, &val);
548
549 /* Chip reset. */
550 if(CHIP_NUM(pdev) == CHIP_NUM_5706 || CHIP_NUM(pdev) == CHIP_NUM_5708)
551 {
552 REG_WR(
553 pdev,
554 pci_config.pcicfg_misc_config,
555 PCICFG_MISC_CONFIG_CORE_RST_REQ |
556 PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
557 PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
558
559 /* Wait for the firmware to configure for PLL bypass. This is a
560 * 5706 A0 workaround. Without the wait the system will lock up
561 * on the first register access in PCI-X mode and may intermittently
562 * do the same in PCI mode. */
563 if(CHIP_ID(pdev) == CHIP_ID_5706_A0 || CHIP_ID(pdev) == CHIP_ID_5706_A1)
564 {
565 /* 15ms is how long for the first stage of bootcode to load
566 * and set up the PLL bypass. */
567 for(idx = 0; idx < 1000; idx++)
568 {
569 mm_wait(pdev, 15);
570 }
571 }
572
573 /* Reset takes at approximate 3ms on the FPGA which is 100 times
574 * slower than the real chip. IKOS is 10 times slower than the FPGA. */
575 for(idx = 0; idx < 5000; idx++)
576 {
577 REG_RD(pdev, pci_config.pcicfg_misc_config, &val);
578
579 mm_wait(pdev, 10);
580
581 if((val & (
582 PCICFG_MISC_CONFIG_CORE_RST_REQ |
583 PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
584 {
585 break;
586 }
587 }
588
589 DbgBreakIf(val & (
590 PCICFG_MISC_CONFIG_CORE_RST_REQ |
591 PCICFG_MISC_CONFIG_CORE_RST_BSY));
592 }
593 else
594 {
595 REG_WR(pdev, misc.misc_command, MISC_COMMAND_SW_RESET);
596
597 /* Flush the previous write and wait at least 500 nsec */
598 REG_RD( pdev, misc.misc_command, &val);
599 mm_wait(pdev, 1);
600
601 /* Reset takes at approximate 3ms on the FPGA which is 100 times
602 * slower than the real chip. IKOS is 10 times slower than the FPGA. */
603 for(idx = 0; idx < 5000; idx++)
604 {
605 REG_RD(pdev, misc.misc_command, &val);
606
607 mm_wait(pdev, 10);
608
609 if((val & MISC_COMMAND_SW_RESET) == 0)
610 {
611 break;
612 }
613 }
614
615 DbgBreakIf(val & MISC_COMMAND_SW_RESET);
616
617 REG_WR(
618 pdev,
619 pci_config.pcicfg_misc_config,
620 PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
621 PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
622
623 if(CHIP_ID(pdev) == CHIP_ID_5709_A0)
624 {
625 REG_RD_OFFSET(
626 pdev,
627 OFFSETOF(reg_space_t, pci_config.pcicfg_command),
628 &val);
629 val |= PCICFG_COMMAND_BUS_MASTER;
630 REG_WR_OFFSET(
631 pdev,
632 OFFSETOF(reg_space_t, pci_config.pcicfg_command),
633 val);
634
635 /* cq#28345. */
636 REG_RD(pdev, tsch.tsch_ctx_access_cfg, &val);
637 val &= ~TSCH_CTX_ACCESS_CFG_L5_TCMD_PREFETCH_SIZE;
638 REG_WR(pdev, tsch.tsch_ctx_access_cfg, val);
639 }
640 else
641 {
642 if((reason == LM_REASON_DRIVER_RESET) || (reason == LM_REASON_DIAG))
643 {
644 /* Enable DMA activities. */
645 REG_RD(pdev, misc.misc_new_core_ctl, &val);
646 val |= (1 << 16);
647 REG_WR(pdev, misc.misc_new_core_ctl, val);
648 }
649 }
650
651 if(CHIP_ID(pdev) == CHIP_ID_5709_A0 ||
652 CHIP_ID(pdev) == CHIP_ID_5709_B0 ||
653 CHIP_ID(pdev) == CHIP_ID_5709_B1 ||
654 CHIP_ID(pdev) == CHIP_ID_5709_B2 ||
655 CHIP_ID(pdev) == CHIP_ID_5709_A1)
656 {
657 REG_RD(pdev, mq.mq_config, &val);
658 REG_WR(pdev, mq.mq_config, val | MQ_CONFIG_HALT_DIS);
659 }
660 }
661
662 DbgMessage1(pdev, INFORM, "Reset done, idx = %d\n", idx);
663
664 /* Wait for the firmware to finish its initialization. */
665 (void) fw_reset_sync(pdev, reason, DRV_MSG_DATA_WAIT1, FW_ACK_TIME_OUT_MS*1000);
666
667 /* Make sure byte swapping is properly configured. */
668 REG_RD(pdev, pci.pci_swap_diag0, &val);
669
670 DbgBreakIf(val != 0x01020304);
671
672 /* The emac block will lock up if the power_down_mode is enabled.
673 *
674 * This is now done by the bootcode.
675 *
676 * lm_mread(pdev, PHY_CTRL_REG, &val);
677 * if(val & PHY_CTRL_LOWER_POWER_MODE)
678 * {
679 * val &= ~PHY_CTRL_LOWER_POWER_MODE;
680 * lm_mwrite(pdev, PHY_CTRL_REG, val);
681 * } */
682
683 if(CHIP_NUM(pdev) == CHIP_NUM_5709)
684 {
685 /* make sure the MSI-X setting is preserved */
686 REG_WR(pdev,
687 pci.pci_grc_window_addr,
688 (pdev->hw_info.shmem_base & ~0x7fff) |
689 PCI_GRC_WINDOW_ADDR_SEP_WIN);
690
691 REG_WR(pdev,
692 pci.pci_grc_window1_addr,
693 (pdev->hw_info.shmem_base & ~0x7fff) + 0x6000 /*0x16e000 */);
694
695 REG_WR(pdev,
696 pci.pci_grc_window2_addr,
697 MSIX_TABLE_ADDR /*MSIX vector addr */);
698 REG_WR(pdev,
699 pci.pci_grc_window3_addr,
700 MSIX_PBA_ADDR /*MSIX PBA addr */);
701 REG_WR(pdev, pci.pci_msix_tbl_off_bir, PCI_GRC_WINDOW2_BASE);
702 REG_WR(pdev, pci.pci_msix_pba_off_bit, PCI_GRC_WINDOW3_BASE);
703 if(pdev->params.ena_large_grc_timeout)
704 {
705 /* this workaround cause IBM minnow to reboot randomly */
706 /* set large GRC timeout in MSIX mode */
707 REG_RD(pdev, misc.misc_eco_hw_ctl, &val);
708 val |= MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN;
709 REG_WR(pdev, misc.misc_eco_hw_ctl, val);
710 }
711 else
712 {
713 REG_RD(pdev, misc.misc_eco_hw_ctl, &val);
714 val &= ~MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN;
715 REG_WR(pdev, misc.misc_eco_hw_ctl, val);
716 }
717 }
718 else
719 {
720 /* Default 32k window. */
721 REG_WR(pdev, pci.pci_grc_window_addr, pdev->hw_info.shmem_base & ~0x7fff);
722 }
723
724 /* 5706A0 workaround. */
725 if(CHIP_ID(pdev) == CHIP_ID_5706_A0)
726 {
727 /* Adjust the voltage regular to two steps lower. The default
728 * of this register is 0x0000000e. */
729 REG_WR(pdev, misc.misc_vreg_control, 0x000000fa);
730
731 /* Remove bad rbuf memory from the free pool. */
732 alloc_bad_rbuf_5706_a0_wa(pdev);
733 }
734
735 REG_WR(
736 pdev,
737 timer.timer_sw_tmr_reload_value1,
738 pdev->params.tmr_reload_value1);
739
740 (void) lm_set_mac_addr(pdev, 0x0, pdev->params.mac_addr);
741
742 val = pdev->params.mac_addr[0] +
743 (pdev->params.mac_addr[1] << 8) +
744 (pdev->params.mac_addr[2] << 16) +
745 pdev->params.mac_addr[3] +
746 (pdev->params.mac_addr[4] << 8) +
747 (pdev->params.mac_addr[5] << 16);
748 REG_WR(pdev, emac.emac_backoff_seed, val);
749
750 (void) lm_set_rx_mask(
751 pdev,
752 RX_FILTER_USER_IDX0,
753 pdev->rx_info.mask[RX_FILTER_USER_IDX0]);
754
755 /* The firmware relies on the driver to issue a periodic pulse to
756 * determine when to go enter an OS absent mode. During debugging
757 * we may not want the firmware to go into this mode. */
758 if(pdev->params.test_mode & TEST_MODE_DRIVER_PULSE_ALWAYS_ALIVE)
759 {
760 pdev->vars.drv_pulse_wr_seq++;
761
762 val = pdev->vars.drv_pulse_wr_seq | DRV_PULSE_ALWAYS_ALIVE;
763
764 REG_WR_IND(
765 pdev,
766 pdev->hw_info.shmem_base +
767 OFFSETOF(shmem_region_t, drv_fw_mb.drv_pulse_mb),
768 val);
769 }
770 } /* lm_chip_reset */
771
772
773
774 /*******************************************************************************
775 * Description:
776 *
777 * Return:
778 ******************************************************************************/
779 void
lm_setup_bd_chain_ring(u8_t * mem_virt,lm_address_t mem_phy,u32_t page_cnt)780 lm_setup_bd_chain_ring(
781 u8_t *mem_virt,
782 lm_address_t mem_phy,
783 u32_t page_cnt)
784 {
785 lm_address_t start_mem_phy;
786 u8_t *start_mem_virt;
787 tx_bd_next_t *next_ptr;
788 u32_t idx;
789
790 DbgBreakIf(
791 ((u32_t) PTR_SUB(mem_virt, 0) & LM_PAGE_MASK) !=
792 (mem_phy.as_u32.low & LM_PAGE_MASK));
793
794 start_mem_phy = mem_phy;
795 start_mem_virt = mem_virt;
796
797 for(idx = 0; idx < page_cnt-1; idx++)
798 {
799 /* Increment mem_phy to the next page. */
800 LM_INC64(&mem_phy, LM_PAGE_SIZE);
801
802 next_ptr = &((tx_bd_next_t *) mem_virt)[MAX_BD_PER_PAGE];
803
804 /* Initialize the physical address of the next bd chain. */
805 next_ptr->tx_bd_next_paddr_hi = mem_phy.as_u32.high;
806 next_ptr->tx_bd_next_paddr_lo = mem_phy.as_u32.low;
807
808 /* Initialize the virtual address of the next bd chain. */
809 *((u8_t **) next_ptr->tx_bd_next_reserved) = mem_virt + LM_PAGE_SIZE;
810
811 /* Move to the next bd chain. */
812 mem_virt += LM_PAGE_SIZE;
813 }
814
815 next_ptr = &((tx_bd_next_t *) mem_virt)[MAX_BD_PER_PAGE];
816
817 next_ptr->tx_bd_next_paddr_hi = start_mem_phy.as_u32.high;
818 next_ptr->tx_bd_next_paddr_lo = start_mem_phy.as_u32.low;
819 *((u8_t **) next_ptr->tx_bd_next_reserved) = start_mem_virt;
820 } /* lm_setup_bd_chain_ring */
821
822
823
824 #ifndef EXCLUDE_KQE_SUPPORT
825 /*******************************************************************************
826 * Description:
827 *
828 * Return:
829 ******************************************************************************/
830 STATIC void
setup_page_table(void * page_table,u32_t page_cnt,lm_address_t page_base_phy)831 setup_page_table(
832 void *page_table,
833 u32_t page_cnt,
834 lm_address_t page_base_phy)
835 {
836 u32_t *page_entry;
837
838 page_entry = (u32_t *) page_table;
839 while(page_cnt)
840 {
841 /* Each entry needs to be in big endian format. */
842 *page_entry = page_base_phy.as_u32.high;
843 page_entry++;
844 *page_entry = page_base_phy.as_u32.low;
845 page_entry++;
846
847 LM_INC64(&page_base_phy, LM_PAGE_SIZE);
848
849 page_cnt--;
850 }
851 } /* setup_page_table */
852 #endif
853
854
855 #if INCLUDE_OFLD_SUPPORT
856 /*******************************************************************************
857 * Description:
858 *
859 * Return:
860 ******************************************************************************/
861 STATIC void
l4_reset_setup(lm_device_t * pdev)862 l4_reset_setup(
863 lm_device_t *pdev)
864 {
865 u32_t val;
866
867 lm_setup_bd_chain_ring(
868 (u8_t *) pdev->ofld.gen_chain.bd_chain_virt,
869 pdev->ofld.gen_chain.bd_chain_phy,
870 pdev->params.gen_bd_page_cnt);
871
872 pdev->ofld.gen_chain.prod_idx = 0;
873 pdev->ofld.gen_chain.prod_bseq = 0;
874 pdev->ofld.gen_chain.prod_bd = pdev->ofld.gen_chain.bd_chain_virt;
875
876 /* Don't count the last bd of a BD page. A full BD chain must
877 * have at least one empty entry. */
878 pdev->ofld.gen_chain.bd_left = pdev->params.gen_bd_page_cnt *
879 MAX_BD_PER_PAGE - 1;
880
881 DbgMessage2(pdev, INFORMrs, "gen_chain %p, bd_left %d\n",
882 pdev->ofld.gen_chain.bd_chain_virt,
883 pdev->ofld.gen_chain.bd_left);
884
885 /* Initialize the type, size, bd_pre_read. */
886 val = L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE << 24;
887 val |= (((sizeof(l2_bd_chain_context_t) + 0x1f) & ~0x1f) / 0x20) << 16;
888 val |= 0x2 << 8;
889 CTX_WR(
890 pdev,
891 pdev->ofld.gen_chain.cid_addr,
892 WORD_ALIGNED_OFFSETOF(l2_bd_chain_context_t, l2ctx_ctx_type),
893 val);
894
895 val = pdev->ofld.gen_chain.bd_chain_phy.as_u32.high;
896 CTX_WR(
897 pdev,
898 pdev->ofld.gen_chain.cid_addr,
899 WORD_ALIGNED_OFFSETOF(l2_bd_chain_context_t, l2ctx_nx_bdhaddr_hi),
900 val);
901
902 val = pdev->ofld.gen_chain.bd_chain_phy.as_u32.low;
903 CTX_WR(
904 pdev,
905 pdev->ofld.gen_chain.cid_addr,
906 WORD_ALIGNED_OFFSETOF(l2_bd_chain_context_t, l2ctx_nx_bdhaddr_lo),
907 val);
908
909 /* Set up the hcopy chain. */
910 if(pdev->params.hcopy_desc_cnt)
911 {
912 lm_setup_bd_chain_ring(
913 (u8_t *) pdev->ofld.hcopy_chain.bd_chain_virt,
914 pdev->ofld.hcopy_chain.bd_chain_phy,
915 pdev->params.hcopy_bd_page_cnt);
916
917 pdev->ofld.hcopy_chain.prod_bd =
918 pdev->ofld.hcopy_chain.bd_chain_virt;
919 pdev->ofld.hcopy_chain.prod_idx = 0;
920 pdev->ofld.hcopy_chain.con_idx = 0;
921 pdev->ofld.hcopy_chain.prod_bseq = 0;
922
923 /* Don't count the last bd of a BD page. A full BD chain must
924 * have at least one empty entry. */
925 pdev->ofld.hcopy_chain.bd_left = pdev->params.hcopy_bd_page_cnt *
926 MAX_BD_PER_PAGE - 1;
927
928 val = L4CTX_TYPE_TYPE_L2 << 24;
929 val |= (((sizeof(l4_context_t) + 0x1f) & ~0x1f) / 0x20) << 16;
930 CTX_WR(
931 pdev,
932 pdev->ofld.hcopy_chain.cid_addr,
933 WORD_ALIGNED_OFFSETOF(l4_context_t, l4ctx_ctx_type),
934 val);
935
936 val = (CCELL_CMD_TYPE_TYPE_L2 | ((LM_PAGE_BITS-8) << 4)) << 24;
937 val |= 8 << 16;
938 CTX_WR(
939 pdev,
940 pdev->ofld.hcopy_chain.cid_addr,
941 WORD_ALIGNED_OFFSETOF(l4_context_t, l4ctx_cmd),
942 val);
943
944 val = pdev->ofld.hcopy_chain.bd_chain_phy.as_u32.high;
945 CTX_WR(
946 pdev,
947 pdev->ofld.hcopy_chain.cid_addr,
948 WORD_ALIGNED_OFFSETOF(l4_context_t, l4ctx_cmd) +
949 WORD_ALIGNED_OFFSETOF(tcp_context_cmd_cell_te_t,
950 ccell_tbdr_bhaddr.hi),
951 val);
952
953 val = pdev->ofld.hcopy_chain.bd_chain_phy.as_u32.low;
954 CTX_WR(
955 pdev,
956 pdev->ofld.hcopy_chain.cid_addr,
957 WORD_ALIGNED_OFFSETOF(l4_context_t, l4ctx_cmd) +
958 WORD_ALIGNED_OFFSETOF(tcp_context_cmd_cell_te_t,
959 ccell_tbdr_bhaddr.lo),
960 val);
961 }
962
963 /* Setup statistics mapping. */
964 REG_WR(
965 pdev,
966 hc.hc_stat_gen_sel_0,
967 HC_STAT_GEN_SEL_0_GEN_SEL_0_RXP_STAT0_TE | /* 0 - inseg */
968 (HC_STAT_GEN_SEL_0_GEN_SEL_0_RXP_STAT1_TE << 8) | /* 1 - inerr */
969 (HC_STAT_GEN_SEL_0_GEN_SEL_0_RXP_STAT2_TE << 16) | /* 2 - inrecv */
970 (HC_STAT_GEN_SEL_0_GEN_SEL_0_RXP_STAT3_TE << 24)); /* 3 - inhdrerr */
971
972 REG_WR(
973 pdev,
974 hc.hc_stat_gen_sel_1,
975 HC_STAT_GEN_SEL_0_GEN_SEL_0_RXP_STAT4_TE | /* 4 - indiscard */
976 (HC_STAT_GEN_SEL_0_GEN_SEL_0_RXP_STAT5_TE << 8) | /* 5 - indeliver */
977 (HC_STAT_GEN_SEL_0_GEN_SEL_0_TXP_STAT0_TE << 16) | /* 6 - outseg */
978 (HC_STAT_GEN_SEL_0_GEN_SEL_0_TXP_STAT1_TE << 24)); /* 7 - retrans */
979
980 REG_WR(
981 pdev,
982 hc.hc_stat_gen_sel_2,
983 HC_STAT_GEN_SEL_0_GEN_SEL_0_TXP_STAT2_TE | /* 8 - outreset */
984 (HC_STAT_GEN_SEL_0_GEN_SEL_0_TXP_STAT3_TE << 8) | /* 9 - outreq */
985 (HC_STAT_GEN_SEL_0_GEN_SEL_0_TXP_STAT4_TE << 16) | /* 10 - outdiscrd */
986 (HC_STAT_GEN_SEL_0_GEN_SEL_0_TXP_STAT5_TE << 24)); /* 11 - outnorte */
987
988 /* set enable_iscsi_fast_response. */
989 REG_WR_IND(
990 pdev,
991 OFFSETOF(reg_space_t, com.com_scratch[0])+COM_HSI_OFFSETOFF(enable_fast_iscsi_response),
992 pdev->params.enable_fir);
993 } /* l4_reset_setup */
994 #endif
995
996
997
998 /*******************************************************************************
999 * Description:
1000 *
1001 * Return:
1002 ******************************************************************************/
1003 STATIC void
init_l2txq(lm_device_t * pdev)1004 init_l2txq(
1005 lm_device_t *pdev)
1006 {
1007 lm_tx_chain_t *txq;
1008 u32_t bd_page_cnt;
1009 u32_t offset;
1010 u32_t idx;
1011 u32_t val;
1012
1013 for(idx = 0; idx < sizeof(lm_tx_stats_t)/sizeof(u32_t); idx++)
1014 {
1015 ((u32_t *) &pdev->tx_info.stats)[idx] = 0;
1016 }
1017
1018 for(idx = 0; idx < pdev->tx_info.num_txq; idx++)
1019 {
1020 txq = &pdev->tx_info.chain[idx];
1021
1022 bd_page_cnt = pdev->params.l2_tx_bd_page_cnt[txq->idx];
1023
1024 txq->prod_idx = 0;
1025 txq->con_idx = 0;
1026 txq->prod_bseq = 0;
1027 txq->prod_bd = txq->bd_chain_virt;
1028 txq->bd_left = bd_page_cnt * MAX_BD_PER_PAGE - 1;
1029
1030 if(bd_page_cnt == 0)
1031 {
1032 continue;
1033 }
1034
1035 lm_setup_bd_chain_ring(
1036 (u8_t *) txq->bd_chain_virt,
1037 txq->bd_chain_phy,
1038 bd_page_cnt);
1039
1040 #ifndef L2_ONLY
1041 val = (L4CTX_TYPE_TYPE_L2 << 24) |
1042 (((sizeof(l4_context_t) + 0x1f) & ~0x1f) / 0x20) << 16;
1043 #else
1044 // This is equivalent as above, but some constants/structures are not
1045 // defined for Solaris
1046 val = (0x10 << 24) |
1047 (((80 * sizeof(u32_t) + 0x1f) & ~0x1f) / 0x20) << 16;
1048 #endif
1049
1050 if (CHIP_NUM(pdev) == CHIP_NUM_5709)
1051 {
1052 offset = 0x80;
1053 }
1054 else
1055 {
1056 // offset = WORD_ALIGNED_OFFSETOF(l4_context_t, l4ctx_ctx_type);
1057 offset = 0;
1058 }
1059
1060 CTX_WR(pdev, txq->cid_addr, offset, val);
1061
1062 if (CHIP_NUM(pdev) == CHIP_NUM_5709)
1063 {
1064 offset = 0x240;
1065 }
1066 else
1067 {
1068 // offset = WORD_ALIGNED_OFFSETOF(l4_context_t, l4ctx_cmd);
1069 offset = 34*sizeof(u32_t);
1070 }
1071
1072 val = (CCELL_CMD_TYPE_TYPE_L2 | ((LM_PAGE_BITS-8) << 4)) << 24;
1073 val |= 8 << 16;
1074 CTX_WR(pdev, txq->cid_addr, offset, val);
1075
1076 val = txq->bd_chain_phy.as_u32.high;
1077 CTX_WR(
1078 pdev,
1079 txq->cid_addr,
1080 offset + WORD_ALIGNED_OFFSETOF(
1081 tcp_context_cmd_cell_te_t, ccell_tbdr_bhaddr.hi),
1082 val);
1083
1084 val = txq->bd_chain_phy.as_u32.low;
1085 CTX_WR(
1086 pdev,
1087 txq->cid_addr,
1088 offset + WORD_ALIGNED_OFFSETOF(
1089 tcp_context_cmd_cell_te_t, ccell_tbdr_bhaddr.lo),
1090 val);
1091
1092 }
1093 } /* init_l2txq */
1094
1095
1096
1097 /*******************************************************************************
1098 * Description:
1099 *
1100 * Return:
1101 ******************************************************************************/
1102 STATIC void
init_l2rxq(lm_device_t * pdev)1103 init_l2rxq(
1104 lm_device_t *pdev)
1105 {
1106 lm_rx_chain_t *rxq;
1107 u32_t bd_page_cnt;
1108 u32_t idx;
1109 u32_t val;
1110
1111 for(idx = 0; idx < sizeof(lm_rx_stats_t)/sizeof(u32_t); idx++)
1112 {
1113 ((u32_t *) &pdev->rx_info.stats)[idx] = 0;
1114 }
1115
1116 for(idx = 0; idx < pdev->rx_info.num_rxq; idx++)
1117 {
1118 rxq = &pdev->rx_info.chain[idx];
1119
1120 bd_page_cnt = pdev->params.l2_rx_bd_page_cnt[rxq->idx];
1121
1122 rxq->prod_idx = 0;
1123 rxq->con_idx = 0;
1124 rxq->prod_bseq = 0;
1125 rxq->prod_bd = rxq->bd_chain_virt;
1126 rxq->bd_left = bd_page_cnt * MAX_BD_PER_PAGE - 1;
1127
1128 if(bd_page_cnt == 0)
1129 {
1130 continue;
1131 }
1132
1133 lm_setup_bd_chain_ring(
1134 (u8_t *) rxq->bd_chain_virt,
1135 rxq->bd_chain_phy,
1136 bd_page_cnt);
1137
1138 val = L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE << 24;
1139 val |= (((sizeof(l2_bd_chain_context_t) + 0x1f) & ~0x1f) / 0x20) << 16;
1140 val |= 0x02 << 8;
1141 CTX_WR(
1142 pdev,
1143 rxq->cid_addr,
1144 WORD_ALIGNED_OFFSETOF(l2_bd_chain_context_t, l2ctx_ctx_type),
1145 val);
1146
1147 val = rxq->bd_chain_phy.as_u32.high;
1148 CTX_WR(
1149 pdev,
1150 rxq->cid_addr,
1151 WORD_ALIGNED_OFFSETOF(l2_bd_chain_context_t, l2ctx_nx_bdhaddr_hi),
1152 val);
1153
1154 val = rxq->bd_chain_phy.as_u32.low;
1155 CTX_WR(
1156 pdev,
1157 rxq->cid_addr,
1158 WORD_ALIGNED_OFFSETOF(l2_bd_chain_context_t, l2ctx_nx_bdhaddr_lo),
1159 val);
1160
1161 // In case we are coming out from hibernation, we need to restore
1162 // previous MTU setting. Otherwise, we would initialize max packet
1163 // length to default (i.e. initial power-up)
1164 CTX_WR(
1165 pdev,
1166 rxq->cid_addr,
1167 WORD_ALIGNED_OFFSETOF(l2_bd_chain_context_t, l2ctx_max_pkt_len),
1168 rxq->max_pkt_len ?
1169 rxq->max_pkt_len:
1170 pdev->params.mtu + 4); // + 4 L2CRC
1171
1172
1173 }
1174 } /* init_l2rxq */
1175
1176
1177
1178 #ifndef EXCLUDE_KQE_SUPPORT
1179 /*******************************************************************************
1180 * Description:
1181 *
1182 * Return:
1183 ******************************************************************************/
1184 STATIC void
init_kq(lm_device_t * pdev)1185 init_kq(
1186 lm_device_t *pdev)
1187 {
1188 lm_kq_info_t *kq;
1189 u32_t page_cnt;
1190 u32_t val;
1191
1192 kq = &pdev->kq_info;
1193
1194 /* initialize kwq. */
1195 page_cnt = pdev->params.kwq_page_cnt;
1196 if(page_cnt)
1197 {
1198 kq->kwq_cid_addr = GET_CID_ADDR(KWQ_CID);
1199 kq->kwqe_left = (LM_PAGE_SIZE/sizeof(kwqe_t)) * page_cnt - 1;
1200 kq->kwq_last_qe = kq->kwq_virt + kq->kwqe_left;
1201
1202 setup_page_table(kq->kwq_pgtbl_virt, page_cnt, kq->kwq_phy);
1203
1204 kq->kwq_prod_idx = 0;
1205 kq->kwq_con_idx = 0;
1206 kq->kwq_prod_qe = kq->kwq_virt;
1207 kq->kwq_con_qe = kq->kwq_virt;
1208 kq->kwqe_left = (LM_PAGE_SIZE/sizeof(kwqe_t)) * page_cnt - 1;
1209
1210 val = KRNLQ_TYPE_TYPE_KRNLQ << 24;
1211 val |= (((sizeof(krnlq_context_t) + 0x1f) & ~0x1f) / 0x20) << 16;
1212 val |= LM_PAGE_BITS-8;
1213 val |= KRNLQ_FLAGS_QE_SELF_SEQ;
1214 CTX_WR(
1215 pdev,
1216 kq->kwq_cid_addr,
1217 WORD_ALIGNED_OFFSETOF(krnlq_context_t, krnlq_type),
1218 val);
1219
1220 val = (LM_PAGE_SIZE/sizeof(kwqe_t) - 1) << 16;
1221 CTX_WR(
1222 pdev,
1223 kq->kwq_cid_addr,
1224 WORD_ALIGNED_OFFSETOF(krnlq_context_t, krnlq_qe_self_seq_max),
1225 val);
1226
1227 val = (LM_PAGE_SIZE/sizeof(kwqe_t)) << 16;
1228 val |= pdev->params.kwq_page_cnt;
1229 CTX_WR(
1230 pdev,
1231 kq->kwq_cid_addr,
1232 WORD_ALIGNED_OFFSETOF(krnlq_context_t, krnlq_pgtbl_npages),
1233 val);
1234
1235 val = kq->kwq_pgtbl_phy.as_u32.high;
1236 CTX_WR(
1237 pdev,
1238 kq->kwq_cid_addr,
1239 WORD_ALIGNED_OFFSETOF(krnlq_context_t, krnlq_pgtbl_haddr_hi),
1240 val);
1241
1242 val = kq->kwq_pgtbl_phy.as_u32.low;
1243 CTX_WR(
1244 pdev,
1245 kq->kwq_cid_addr,
1246 WORD_ALIGNED_OFFSETOF(krnlq_context_t, krnlq_pgtbl_haddr_lo),
1247 val);
1248 }
1249
1250 /* initialize kcq. */
1251 page_cnt = pdev->params.kcq_page_cnt;
1252 if(page_cnt)
1253 {
1254 kq->kcq_cid_addr = GET_CID_ADDR(KCQ_CID);
1255 kq->kcq_last_qe = kq->kcq_virt +
1256 (LM_PAGE_SIZE/sizeof(kcqe_t)) * page_cnt - 1;
1257
1258 setup_page_table(kq->kcq_pgtbl_virt, page_cnt, kq->kcq_phy);
1259
1260 kq->kcq_con_idx = 0;
1261 kq->history_kcq_con_idx = 0;
1262 kq->kcq_con_qe = kq->kcq_virt;
1263 kq->history_kcq_con_qe = kq->kcq_virt;
1264
1265 val = KRNLQ_TYPE_TYPE_KRNLQ << 24;
1266 val |= (((sizeof(krnlq_context_t) + 0x1f) & ~0x1f) / 0x20) << 16;
1267 val |= LM_PAGE_BITS-8;
1268 val |= KRNLQ_FLAGS_QE_SELF_SEQ;
1269 CTX_WR(
1270 pdev,
1271 kq->kcq_cid_addr,
1272 WORD_ALIGNED_OFFSETOF(krnlq_context_t, krnlq_type),
1273 val);
1274
1275 val = (LM_PAGE_SIZE/sizeof(kwqe_t) - 1) << 16;
1276 CTX_WR(
1277 pdev,
1278 kq->kcq_cid_addr,
1279 WORD_ALIGNED_OFFSETOF(krnlq_context_t, krnlq_qe_self_seq_max),
1280 val);
1281
1282 val = (LM_PAGE_SIZE/sizeof(kcqe_t)) << 16;
1283 val |= pdev->params.kcq_page_cnt;
1284 CTX_WR(
1285 pdev,
1286 kq->kcq_cid_addr,
1287 WORD_ALIGNED_OFFSETOF(krnlq_context_t, krnlq_pgtbl_npages),
1288 val);
1289
1290 val = kq->kcq_pgtbl_phy.as_u32.high;
1291 CTX_WR(
1292 pdev,
1293 kq->kcq_cid_addr,
1294 WORD_ALIGNED_OFFSETOF(krnlq_context_t, krnlq_pgtbl_haddr_hi),
1295 val);
1296
1297 val = kq->kcq_pgtbl_phy.as_u32.low;
1298 CTX_WR(
1299 pdev,
1300 kq->kcq_cid_addr,
1301 WORD_ALIGNED_OFFSETOF(krnlq_context_t, krnlq_pgtbl_haddr_lo),
1302 val);
1303 }
1304 } /* init_kq */
1305 #endif /* EXCLUDE_KQE_SUPPORT */
1306
1307 /*******************************************************************************
1308 * Description: Determines the flow control, MAC, and CU trip values
1309 *
1310 * xoff = processing_q_delay + propagation_delay + response_delay +
1311 * propagation_delay for return path + drop_margin_delay
1312 * xon = xoff + (mtu/mbuf_size)
1313 *
1314 * MAC_drop = drop_margin_low*mtu/mbuf_size
1315 * MAC_keep = drop_margin_high*mtu/mbuf_size
1316 *
1317 * CU_drop = (drop_margin_low+1)*mtu/mbuf_size
1318 * CU_keep = (drop_margin_high)*mtu/mbuf_size
1319 *
1320 * processing_q_delay = ((mtu+20)/(64+20))+1)
1321 * propagation_delay = 1
1322 * response_time = 2 (quanta)
1323 * mbuf_size = 128
1324 * response_delay = (response_time*512)/(mbuf_size*8) + (mtu/mbuf_size)
1325 * drop_margin_low = 0.5
1326 * drop_margin_high = 2.5
1327 * drop_margin_mid = 1.5
1328 * drop_margin_delay = (mtu*drop_margin_mid/mbuf_size)
1329 *
1330 * Table:
1331 *
1332 * Return: Flow control, MAC, and CU trip values
1333 ******************************************************************************/
1334 typedef enum
1335 {
1336 TRIP_FLOW = 0,
1337 TRIP_MAC = 1,
1338 TRIP_CU = 2
1339 } trip_type_t;
1340
1341 STATIC void
get_trip_val(trip_type_t type,u32_t mtu,u32_t * val,u8_t enable_cu_rate_limiter,u8_t mbuf_cnt_adj)1342 get_trip_val(
1343 trip_type_t type,
1344 u32_t mtu,
1345 u32_t *val,
1346 u8_t enable_cu_rate_limiter,
1347 u8_t mbuf_cnt_adj)
1348 {
1349 #define NONJF_MTU_SIZE 1500
1350 #define MTU_STEP 500
1351
1352 const u32_t trip_tbl[3][2] = {
1353 /* Base value, Increment */
1354 { 0x00410036, 0x00140010 }, /* XOFF/XON setting */
1355 { 0x001e0006, 0x000a0002 }, /* MAC drop/keep trip setting */
1356 { 0x005e0052, 0x000a0006 } /* CU drop/keep trip setting */
1357 };
1358
1359 const u32_t isolate_rbuf_trip_tbl[3][2] = {
1360 /* Base value, Increment */
1361 { 0x0089007e, 0x00140010 }, /* XOFF/XON setting */
1362 { 0x0066004e, 0x000a0002 }, /* MAC drop/keep trip setting */
1363 { 0x0066004e, 0x000a0006 } /* CU drop/keep trip setting */
1364 };
1365
1366 if(type > TRIP_CU)
1367 type = 0; /* Crash prevention */
1368
1369 *val = 0;
1370 while(mtu > NONJF_MTU_SIZE + MTU_STEP)
1371 {
1372 if(enable_cu_rate_limiter)
1373 *val += isolate_rbuf_trip_tbl[type][1];
1374 else
1375 *val += trip_tbl[type][1];
1376
1377 mtu -= MTU_STEP;
1378 }
1379 if(enable_cu_rate_limiter)
1380 *val = *val + (isolate_rbuf_trip_tbl[type][0] - (mbuf_cnt_adj<<16 | mbuf_cnt_adj));
1381 else
1382 *val = *val + trip_tbl[type][0];
1383
1384 } /* get_trip_val */
1385
1386
1387
1388 /*******************************************************************************
1389 * Description:
1390 *
1391 * Return:
1392 ******************************************************************************/
1393 STATIC void
zero_out_sb(lm_device_t * pdev,u32_t * sb_ptr)1394 zero_out_sb(
1395 lm_device_t *pdev,
1396 u32_t *sb_ptr)
1397 {
1398 u32_t sb_size;
1399 u32_t offset;
1400
1401 if(CHIP_NUM(pdev) == CHIP_NUM_5709)
1402 {
1403 sb_size = sizeof(status_blk_combined_t);
1404 }
1405 else
1406 {
1407 sb_size = sizeof(status_block_t);
1408 }
1409
1410 offset = 0;
1411
1412 while(offset < sb_size)
1413 {
1414 *sb_ptr = 0;
1415 sb_ptr++;
1416 offset += sizeof(u32_t);
1417 }
1418 } /* zero_out_sb */
1419
1420
1421
1422 /*******************************************************************************
1423 * Description:
1424 *
1425 * Return:
1426 ******************************************************************************/
1427 STATIC void
reduce_ftq_depth(lm_device_t * pdev)1428 reduce_ftq_depth(
1429 lm_device_t *pdev)
1430 {
1431 DbgBreakIf(CHIP_REV(pdev) != CHIP_REV_IKOS &&
1432 CHIP_REV(pdev) != CHIP_REV_FPGA);
1433
1434 REG_WR_IND(
1435 pdev,
1436 OFFSETOF(reg_space_t, com.com_comxq_ftq_ctl),
1437 2 << 12);
1438 REG_WR_IND(
1439 pdev,
1440 OFFSETOF(reg_space_t, com.com_comtq_ftq_ctl),
1441 2 << 12);
1442 REG_WR_IND(
1443 pdev,
1444 OFFSETOF(reg_space_t, com.com_comq_ftq_ctl),
1445 2 << 12);
1446
1447 REG_WR_IND(
1448 pdev,
1449 OFFSETOF(reg_space_t, cp.cp_cpq_ftq_ctl),
1450 4 << 12);
1451
1452 REG_WR(pdev, csch.csch_ch_ftq_ctl, 8 << 12);
1453
1454 REG_WR_IND(
1455 pdev,
1456 OFFSETOF(reg_space_t, mcp.mcp_mcpq_ftq_ctl),
1457 32 << 12);
1458
1459 REG_WR(pdev, rdma.rdma_ftq_ctl, 2 << 12);
1460
1461 REG_WR(pdev, rlup.rlup_ftq_ctl, 8 << 12);
1462
1463 REG_WR(pdev, rv2p.rv2p_pftq_ctl, 2 << 12);
1464 REG_WR(pdev, rv2p.rv2p_tftq_ctl, 2 << 12);
1465 REG_WR(pdev, rv2p.rv2p_mftq_ctl, 4 << 12);
1466
1467 REG_WR_IND(
1468 pdev,
1469 OFFSETOF(reg_space_t, rxp.rxp_cftq_ctl),
1470 8 << 12);
1471 REG_WR_IND(
1472 pdev,
1473 OFFSETOF(reg_space_t, rxp.rxp_ftq_ctl),
1474 8 << 12);
1475
1476 REG_WR_IND(
1477 pdev,
1478 OFFSETOF(reg_space_t, tas.tas_ftq_ctl),
1479 16 << 12);
1480
1481 REG_WR(pdev, tbdr.tbdr_ftq_ctl, 2 << 12);
1482
1483 REG_WR(pdev, tdma.tdma_ftq_ctl, 2 << 12);
1484
1485 REG_WR_IND(
1486 pdev,
1487 OFFSETOF(reg_space_t, tpat.tpat_ftq_ctl),
1488 16 << 12);
1489
1490 REG_WR(pdev, tsch.tsch_ftq_ctl, 2 << 12);
1491
1492 REG_WR_IND(
1493 pdev,
1494 OFFSETOF(reg_space_t, txp.txp_ftq_ctl),
1495 2 << 12);
1496 } /* reduce_ftq_depth */
1497
1498
1499
1500 /*******************************************************************************
1501 * Description:
1502 *
1503 * Return:
1504 ******************************************************************************/
1505 STATIC void
init_5709_for_msix(lm_device_t * pdev)1506 init_5709_for_msix(
1507 lm_device_t *pdev)
1508 {
1509 u32_t val;
1510
1511 DbgBreakIf(CHIP_NUM(pdev) != CHIP_NUM_5709);
1512
1513 REG_WR(pdev,
1514 pci.pci_grc_window_addr,
1515 (pdev->hw_info.shmem_base & ~0x7fff) |
1516 PCI_GRC_WINDOW_ADDR_SEP_WIN);
1517
1518 REG_WR(pdev,
1519 pci.pci_grc_window1_addr,
1520 (pdev->hw_info.shmem_base & ~0x7fff) + 0x6000 /*0x16e000 */);
1521
1522 REG_RD(pdev, pci_config.pcicfg_msix_control, &val);
1523 switch(pdev->vars.interrupt_mode)
1524 {
1525 case IRQ_MODE_MSIX_BASED:
1526 /* enable all msix vectors */
1527 REG_WR(pdev,
1528 hc.hc_msix_bit_vector,
1529 HC_MSIX_BIT_VECTOR_VAL);
1530 break;
1531
1532 case IRQ_MODE_MSI_BASED:
1533 /* enable 16 messages so hardware will
1534 * generate maximum of 9 messages
1535 */
1536 REG_RD(pdev,
1537 pci_config.pcicfg_msi_control,
1538 &val);
1539 val &= PCICFG_MSI_CONTROL_MENA;
1540 val |= PCICFG_MSI_CONTROL_MENA_16;
1541 REG_WR(pdev,
1542 pci_config.pcicfg_msi_control,
1543 (u16_t)val);
1544 break;
1545
1546 case IRQ_MODE_SIMD:
1547 /* tell the chip that we are in single isr/multiple dpc mode */
1548 if(val & PCICFG_MSIX_CONTROL_MSIX_ENABLE)
1549 {
1550 u32_t idx, addr_l, addr_h, vec_data;
1551
1552 REG_WR(pdev,
1553 hc.hc_msix_bit_vector,
1554 HC_MSIX_BIT_VECTOR_VAL);
1555
1556 REG_RD_IND(
1557 pdev,
1558 OFFSETOF(reg_space_t, hc1.hc1_msix_vector0_addr_l),
1559 &addr_l);
1560 REG_RD_IND(
1561 pdev,
1562 OFFSETOF(reg_space_t, hc1.hc1_msix_vector0_addr_h),
1563 &addr_h);
1564 REG_RD_IND(
1565 pdev,
1566 OFFSETOF(reg_space_t, hc1.hc1_msix_vector0_data),
1567 &vec_data);
1568 for(idx = 1; idx < 9; idx++)
1569 {
1570 REG_WR_IND(
1571 pdev,
1572 OFFSETOF(reg_space_t,
1573 hc1.hc1_msix_vector0_addr_l) +
1574 idx*4*sizeof(u32_t),
1575 addr_l);
1576 REG_WR_IND(
1577 pdev,
1578 OFFSETOF(reg_space_t,
1579 hc1.hc1_msix_vector0_addr_h) +
1580 idx*4*sizeof(u32_t),
1581 addr_h);
1582 REG_WR_IND(
1583 pdev,
1584 OFFSETOF(reg_space_t,
1585 hc1.hc1_msix_vector0_data) +
1586 idx*4*sizeof(u32_t),
1587 vec_data);
1588 }
1589 }
1590 else
1591 {
1592 REG_RD(pdev,
1593 pci_config.pcicfg_msi_control,
1594 &val);
1595 val &= ~PCICFG_MSI_CONTROL_MENA;
1596 REG_WR(pdev,
1597 pci_config.pcicfg_msi_control,
1598 (u16_t)val);
1599 }
1600 break;
1601
1602 case IRQ_MODE_LINE_BASED:
1603 /* do nothing */
1604 break;
1605
1606 default:
1607 DbgBreakMsg("Unknown interrupt mode\n");
1608 break;
1609 }
1610
1611 REG_WR(pdev,
1612 pci.pci_grc_window2_addr,
1613 MSIX_TABLE_ADDR /*MSIX vector addr */);
1614 REG_WR(pdev,
1615 pci.pci_grc_window3_addr,
1616 MSIX_PBA_ADDR /*MSIX PBA addr */);
1617 REG_WR(pdev, pci.pci_msix_tbl_off_bir, PCI_GRC_WINDOW2_BASE);
1618 REG_WR(pdev, pci.pci_msix_pba_off_bit, PCI_GRC_WINDOW3_BASE);
1619 } /* init_5709_for_msix */
1620
1621
1622
1623 /*******************************************************************************
1624 * Description:
1625 *
1626 * Return:
1627 ******************************************************************************/
1628 STATIC void
init_hc(lm_device_t * pdev)1629 init_hc(
1630 lm_device_t *pdev)
1631 {
1632 u32_t val;
1633
1634 /* Set HC timer mode. */
1635 REG_RD(pdev, hc.hc_config, &val);
1636 val &= ~(HC_CONFIG_RX_TMR_MODE | HC_CONFIG_TX_TMR_MODE |
1637 HC_CONFIG_COM_TMR_MODE | HC_CONFIG_CMD_TMR_MODE);
1638
1639 if(pdev->params.hc_timer_mode & HC_RX_TIMER_MODE)
1640 {
1641 val |= HC_CONFIG_RX_TMR_MODE;
1642 }
1643
1644 if(pdev->params.hc_timer_mode & HC_TX_TIMER_MODE)
1645 {
1646 val |= HC_CONFIG_TX_TMR_MODE;
1647 }
1648
1649 if(pdev->params.hc_timer_mode & HC_COM_TIMER_MODE)
1650 {
1651 val |= HC_CONFIG_COM_TMR_MODE;
1652 }
1653
1654 if(pdev->params.hc_timer_mode & HC_CMD_TIMER_MODE)
1655 {
1656 val |= HC_CONFIG_CMD_TMR_MODE;
1657 }
1658
1659 if(CHIP_NUM(pdev) == CHIP_NUM_5709)
1660 {
1661 val &= ~HC_CONFIG_SET_MASK_AT_RD;
1662 //if(pdev->vars.interrupt_mode > IRQ_MODE_SIMD)
1663 //{
1664 // val |= HC_CONFIG_ONE_SHOT;
1665 //}
1666 }
1667
1668 REG_WR(pdev, hc.hc_config, val);
1669
1670 /* Enable timer abort a attention which is used to request
1671 * the driver to write a driver pulse to the firmware. */
1672 REG_RD(pdev, hc.hc_attn_bits_enable, &val);
1673 val |= STATUS_ATTN_BITS_TIMER_ABORT;
1674 REG_WR(pdev, hc.hc_attn_bits_enable, val);
1675
1676 /* Set HC parameters. */
1677 REG_WR(pdev, hc.hc_status_addr_l, pdev->vars.status_phy.as_u32.low);
1678 REG_WR(pdev, hc.hc_status_addr_h, pdev->vars.status_phy.as_u32.high);
1679
1680 REG_WR(pdev, hc.hc_statistics_addr_l, pdev->vars.stats_phy.as_u32.low);
1681 REG_WR(pdev, hc.hc_statistics_addr_h, pdev->vars.stats_phy.as_u32.high);
1682
1683 REG_WR(
1684 pdev,
1685 hc.hc_tx_quick_cons_trip,
1686 (pdev->params.tx_quick_cons_trip_int << 16) |
1687 pdev->params.tx_quick_cons_trip);
1688 REG_WR(
1689 pdev,
1690 hc.hc_rx_quick_cons_trip,
1691 (pdev->params.rx_quick_cons_trip_int << 16) |
1692 pdev->params.rx_quick_cons_trip);
1693 REG_WR(
1694 pdev,
1695 hc.hc_comp_prod_trip,
1696 (pdev->params.comp_prod_trip_int << 16) |
1697 pdev->params.comp_prod_trip);
1698 REG_WR(
1699 pdev,
1700 hc.hc_tx_ticks,
1701 (pdev->params.tx_ticks_int << 16) |
1702 pdev->params.tx_ticks);
1703 REG_WR(
1704 pdev,
1705 hc.hc_rx_ticks,
1706 (pdev->params.rx_ticks_int << 16) |
1707 pdev->params.rx_ticks);
1708 REG_WR(
1709 pdev,
1710 hc.hc_com_ticks,
1711 (pdev->params.com_ticks_int << 16) |
1712 pdev->params.com_ticks);
1713 REG_WR(
1714 pdev, hc.hc_cmd_ticks,
1715 (pdev->params.cmd_ticks_int << 16) |
1716 pdev->params.cmd_ticks);
1717
1718 val = pdev->params.stats_ticks;
1719 if(CHIP_REV(pdev) == CHIP_REV_IKOS)
1720 {
1721 val = val / 1000;
1722 if(val < 0x100)
1723 {
1724 val = 0x100;
1725 }
1726 }
1727 REG_WR(pdev, hc.hc_stats_ticks, val);
1728
1729 REG_WR(pdev, hc.hc_stat_collect_ticks, 0xbb8); /* 3ms */
1730 REG_WR(pdev, hc.hc_command, HC_COMMAND_CLR_STAT_NOW);
1731 } /* init_hc */
1732
1733
1734
1735 /*******************************************************************************
1736 * Description:
1737 *
1738 * Return:
1739 ******************************************************************************/
1740 STATIC void
init_hc_for_5709(lm_device_t * pdev)1741 init_hc_for_5709(
1742 lm_device_t *pdev)
1743 {
1744 DbgBreakIf(CHIP_NUM(pdev) != CHIP_NUM_5709);
1745
1746 init_hc(pdev);
1747
1748 REG_WR(pdev, hc.hc_tx_quick_cons_trip_1, pdev->params.psb_tx_cons_trip);
1749 REG_WR(pdev, hc.hc_tx_ticks_1, pdev->params.psb_tx_ticks);
1750 REG_WR(pdev, hc.hc_rx_quick_cons_trip_1, pdev->params.psb_rx_cons_trip);
1751 REG_WR(pdev, hc.hc_rx_ticks_1, pdev->params.psb_rx_ticks);
1752 REG_WR(pdev, hc.hc_comp_prod_trip_1, pdev->params.psb_comp_prod_trip);
1753 REG_WR(pdev, hc.hc_com_ticks_1, pdev->params.psb_com_ticks);
1754 REG_WR(pdev, hc.hc_cmd_ticks_1, pdev->params.psb_cmd_ticks);
1755 REG_WR(pdev, hc.hc_periodic_ticks_1, pdev->params.psb_period_ticks);
1756 //if(pdev->vars.interrupt_mode > IRQ_MODE_SIMD)
1757 //{
1758 // REG_RD(pdev, hc.hc_sb_config_1, &val);
1759 // val |= HC_SB_CONFIG_1_ONE_SHOT;
1760 // REG_WR(pdev, hc.hc_sb_config_1, val);
1761 //}
1762
1763 REG_WR(pdev, hc.hc_tx_quick_cons_trip_2, pdev->params.psb_tx_cons_trip);
1764 REG_WR(pdev, hc.hc_tx_ticks_2, pdev->params.psb_tx_ticks);
1765 REG_WR(pdev, hc.hc_rx_quick_cons_trip_2, pdev->params.psb_rx_cons_trip);
1766 REG_WR(pdev, hc.hc_rx_ticks_2, pdev->params.psb_rx_ticks);
1767 REG_WR(pdev, hc.hc_comp_prod_trip_2, pdev->params.psb_comp_prod_trip);
1768 REG_WR(pdev, hc.hc_com_ticks_2, pdev->params.psb_com_ticks);
1769 REG_WR(pdev, hc.hc_cmd_ticks_2, pdev->params.psb_cmd_ticks);
1770 REG_WR(pdev, hc.hc_periodic_ticks_2, pdev->params.psb_period_ticks);
1771 //if(pdev->vars.interrupt_mode > IRQ_MODE_SIMD)
1772 //{
1773 // REG_RD(pdev, hc.hc_sb_config_2, &val);
1774 // val |= HC_SB_CONFIG_2_ONE_SHOT;
1775 // REG_WR(pdev, hc.hc_sb_config_2, val);
1776 //}
1777
1778 REG_WR(pdev, hc.hc_tx_quick_cons_trip_3, pdev->params.psb_tx_cons_trip);
1779 REG_WR(pdev, hc.hc_tx_ticks_3, pdev->params.psb_tx_ticks);
1780 REG_WR(pdev, hc.hc_rx_quick_cons_trip_3, pdev->params.psb_rx_cons_trip);
1781 REG_WR(pdev, hc.hc_rx_ticks_3, pdev->params.psb_rx_ticks);
1782 REG_WR(pdev, hc.hc_comp_prod_trip_3, pdev->params.psb_comp_prod_trip);
1783 REG_WR(pdev, hc.hc_com_ticks_3, pdev->params.psb_com_ticks);
1784 REG_WR(pdev, hc.hc_cmd_ticks_3, pdev->params.psb_cmd_ticks);
1785 REG_WR(pdev, hc.hc_periodic_ticks_3, pdev->params.psb_period_ticks);
1786 //if(pdev->vars.interrupt_mode > IRQ_MODE_SIMD)
1787 //{
1788 // REG_RD(pdev, hc.hc_sb_config_3, &val);
1789 // val |= HC_SB_CONFIG_3_ONE_SHOT;
1790 // REG_WR(pdev, hc.hc_sb_config_3, val);
1791 //}
1792
1793 REG_WR(pdev, hc.hc_tx_quick_cons_trip_4, pdev->params.psb_tx_cons_trip);
1794 REG_WR(pdev, hc.hc_tx_ticks_4, pdev->params.psb_tx_ticks);
1795 REG_WR(pdev, hc.hc_rx_quick_cons_trip_4, pdev->params.psb_rx_cons_trip);
1796 REG_WR(pdev, hc.hc_rx_ticks_4, pdev->params.psb_rx_ticks);
1797 REG_WR(pdev, hc.hc_comp_prod_trip_4, pdev->params.psb_comp_prod_trip);
1798 REG_WR(pdev, hc.hc_com_ticks_4, pdev->params.psb_com_ticks);
1799 REG_WR(pdev, hc.hc_cmd_ticks_4, pdev->params.psb_cmd_ticks);
1800 REG_WR(pdev, hc.hc_periodic_ticks_4, pdev->params.psb_period_ticks);
1801 //if(pdev->vars.interrupt_mode > IRQ_MODE_SIMD)
1802 //{
1803 // REG_RD(pdev, hc.hc_sb_config_4, &val);
1804 // val |= HC_SB_CONFIG_4_ONE_SHOT;
1805 // REG_WR(pdev, hc.hc_sb_config_4, val);
1806 //}
1807
1808 REG_WR(pdev, hc.hc_tx_quick_cons_trip_5, pdev->params.psb_tx_cons_trip);
1809 REG_WR(pdev, hc.hc_tx_ticks_5, pdev->params.psb_tx_ticks);
1810 REG_WR(pdev, hc.hc_rx_quick_cons_trip_5, pdev->params.psb_rx_cons_trip);
1811 REG_WR(pdev, hc.hc_rx_ticks_5, pdev->params.psb_rx_ticks);
1812 REG_WR(pdev, hc.hc_comp_prod_trip_5, pdev->params.psb_comp_prod_trip);
1813 REG_WR(pdev, hc.hc_com_ticks_5, pdev->params.psb_com_ticks);
1814 REG_WR(pdev, hc.hc_cmd_ticks_5, pdev->params.psb_cmd_ticks);
1815 REG_WR(pdev, hc.hc_periodic_ticks_5, pdev->params.psb_period_ticks);
1816 //if(pdev->vars.interrupt_mode > IRQ_MODE_SIMD)
1817 //{
1818 // REG_RD(pdev, hc.hc_sb_config_5, &val);
1819 // val |= HC_SB_CONFIG_5_ONE_SHOT;
1820 // REG_WR(pdev, hc.hc_sb_config_5, val);
1821 //}
1822
1823 REG_WR(pdev, hc.hc_tx_quick_cons_trip_6, pdev->params.psb_tx_cons_trip);
1824 REG_WR(pdev, hc.hc_tx_ticks_6, pdev->params.psb_tx_ticks);
1825 REG_WR(pdev, hc.hc_rx_quick_cons_trip_6, pdev->params.psb_rx_cons_trip);
1826 REG_WR(pdev, hc.hc_rx_ticks_6, pdev->params.psb_rx_ticks);
1827 REG_WR(pdev, hc.hc_comp_prod_trip_6, pdev->params.psb_comp_prod_trip);
1828 REG_WR(pdev, hc.hc_com_ticks_6, pdev->params.psb_com_ticks);
1829 REG_WR(pdev, hc.hc_cmd_ticks_6, pdev->params.psb_cmd_ticks);
1830 REG_WR(pdev, hc.hc_periodic_ticks_6, pdev->params.psb_period_ticks);
1831 //if(pdev->vars.interrupt_mode > IRQ_MODE_SIMD)
1832 //{
1833 // REG_RD(pdev, hc.hc_sb_config_6, &val);
1834 // val |= HC_SB_CONFIG_6_ONE_SHOT;
1835 // REG_WR(pdev, hc.hc_sb_config_6, val);
1836 //}
1837
1838 REG_WR(pdev, hc.hc_tx_quick_cons_trip_7, pdev->params.psb_tx_cons_trip);
1839 REG_WR(pdev, hc.hc_tx_ticks_7, pdev->params.psb_tx_ticks);
1840 REG_WR(pdev, hc.hc_rx_quick_cons_trip_7, pdev->params.psb_rx_cons_trip);
1841 REG_WR(pdev, hc.hc_rx_ticks_7, pdev->params.psb_rx_ticks);
1842 REG_WR(pdev, hc.hc_comp_prod_trip_7, pdev->params.psb_comp_prod_trip);
1843 REG_WR(pdev, hc.hc_com_ticks_7, pdev->params.psb_com_ticks);
1844 REG_WR(pdev, hc.hc_cmd_ticks_7, pdev->params.psb_cmd_ticks);
1845 REG_WR(pdev, hc.hc_periodic_ticks_7, pdev->params.psb_period_ticks);
1846 //if(pdev->vars.interrupt_mode > IRQ_MODE_SIMD)
1847 //{
1848 // REG_RD(pdev, hc.hc_sb_config_7, &val);
1849 // val |= HC_SB_CONFIG_7_ONE_SHOT;
1850 // REG_WR(pdev, hc.hc_sb_config_7, val);
1851 //}
1852
1853 REG_WR(pdev, hc.hc_tx_quick_cons_trip_8, pdev->params.psb_tx_cons_trip);
1854 REG_WR(pdev, hc.hc_tx_ticks_8, pdev->params.psb_tx_ticks);
1855 REG_WR(pdev, hc.hc_rx_quick_cons_trip_8, pdev->params.psb_rx_cons_trip);
1856 REG_WR(pdev, hc.hc_rx_ticks_8, pdev->params.psb_rx_ticks);
1857 REG_WR(pdev, hc.hc_comp_prod_trip_8, pdev->params.psb_comp_prod_trip);
1858 REG_WR(pdev, hc.hc_com_ticks_8, pdev->params.psb_com_ticks);
1859 REG_WR(pdev, hc.hc_cmd_ticks_8, pdev->params.psb_cmd_ticks);
1860 REG_WR(pdev, hc.hc_periodic_ticks_8, pdev->params.psb_period_ticks);
1861 //if(pdev->vars.interrupt_mode > IRQ_MODE_SIMD)
1862 //{
1863 // REG_RD(pdev, hc.hc_sb_config_8, &val);
1864 // val |= HC_SB_CONFIG_8_ONE_SHOT;
1865 // REG_WR(pdev, hc.hc_sb_config_8, val);
1866 //}
1867 } /* init_hc_for_5709 */
1868
1869
1870
1871 /*******************************************************************************
1872 * Description:
1873 *
1874 * Return:
1875 ******************************************************************************/
1876 STATIC void
init_hc_for_57728(lm_device_t * pdev)1877 init_hc_for_57728(
1878 lm_device_t *pdev)
1879 {
1880 init_hc(pdev);
1881 init_hc_for_5709(pdev);
1882
1883 #if X1V_havhavhav
1884 REG_WR(pdev, hc.hc_sb_haddr_0_lo, pdev->vars.status_phy.as_u32.low);
1885 REG_WR(pdev, hc.hc_sb_haddr_0_hi, pdev->vars.status_phy.as_u32.high);
1886
1887 REG_WR(pdev, hc.hc_sb_select_0_config,
1888 ENABLE | fid == 7 | param-sel = 0 | haddr_sel = 0 |
1889 haddr_idx_sel = 0);
1890 REG_WR(pdev, hc.hc_sb_select_1_config,
1891 ENABLE | fid == 1 | param-sel = 0 | haddr_sel = 0 |
1892 haddr_idx_sel = 7);
1893 REG_WR(pdev, hc.hc_sb_select_2_config,
1894 ENABLE | fid == 7 | param-sel = 0 | haddr_sel = 0 |
1895 haddr_idx_sel = 2);
1896 REG_WR(pdev, hc.hc_sb_select_3_config,
1897 ENABLE | fid == 7 | param-sel = 0 | haddr_sel = 0 |
1898 haddr_idx_sel = 3);
1899 REG_WR(pdev, hc.hc_sb_select_4_config,
1900 ENABLE | fid == 7 | param-sel = 0 | haddr_sel = 0 |
1901 haddr_idx_sel = 4);
1902 REG_WR(pdev, hc.hc_sb_select_5_config,
1903 ENABLE | fid == 7 | param-sel = 0 | haddr_sel = 0 |
1904 haddr_idx_sel = 5);
1905 REG_WR(pdev, hc.hc_sb_select_6_config,
1906 ENABLE | fid == 7 | param-sel = 0 | haddr_sel = 0 |
1907 haddr_idx_sel = 6);
1908 REG_WR(pdev, hc.hc_sb_select_7_config,
1909 ENABLE | fid == 7 | param-sel = 0 | haddr_sel = 0 |
1910 haddr_idx_sel = 7);
1911 REG_WR(pdev, hc.hc_sb_select_8_config,
1912 ENABLE | fid == 7 | param-sel = 0 | haddr_sel = 0 |
1913 haddr_idx_sel = 8);
1914 REG_WR(pdev, hc.hc_sb_select_8_config,
1915 ENABLE | fid == 7 | param-sel = 0 | haddr_sel = 0 |
1916 haddr_idx_sel = 9);
1917 REG_WR(pdev, hc.hc_sb_select_8_config,
1918 ENABLE | fid == 7 | param-sel = 0 | haddr_sel = 0 |
1919 haddr_idx_sel = 10);
1920 REG_WR(pdev, hc.hc_sb_select_8_config,
1921 ENABLE | fid == 7 | param-sel = 0 | haddr_sel = 0 |
1922 haddr_idx_sel = 11);
1923 REG_WR(pdev, hc.hc_sb_select_8_config,
1924 ENABLE | fid == 7 | param-sel = 0 | haddr_sel = 0 |
1925 haddr_idx_sel = 12);
1926 REG_WR(pdev, hc.hc_sb_select_8_config,
1927 ENABLE | fid == 7 | param-sel = 0 | haddr_sel = 0 |
1928 haddr_idx_sel = 13);
1929 REG_WR(pdev, hc.hc_sb_select_8_config,
1930 ENABLE | fid == 7 | param-sel = 0 | haddr_sel = 0 |
1931 haddr_idx_sel = 14);
1932 REG_WR(pdev, hc.hc_sb_select_8_config,
1933 ENABLE | fid == 7 | param-sel = 0 | haddr_sel = 0 |
1934 haddr_idx_sel = 15);
1935 REG_WR(pdev, hc.hc_sb_select_8_config,
1936 ENABLE | fid == 7 | param-sel = 0 | haddr_sel = 0 |
1937 haddr_idx_sel = 16);
1938 #endif
1939 } /* init_hc_for_57728 */
1940
1941
1942
1943 /*******************************************************************************
1944 * Description:
1945 *
1946 * Return:
1947 ******************************************************************************/
1948
1949 // Refer to TetonII Register spec, setting bits in krl_???_mask1 and
1950 // krl_???_mask2 will cause the corresponding engine (CP or RV2P) to be
1951 // activated when any word enabled by this mask is written. Mask1 is
1952 // for first 128 bytes and mask2 is for second 128 bytes.
1953 // Each bit in the mask correspond to a 32 bit word in the kernal area.
1954 // e.g. Writing 0x2000 to the mask2 means activating the engine
1955 // when context location 0xB4 is being written
1956 // (i.e. (0xB4 - 128)/sizeof(u32_t) = bit 13
1957 #define KNL_L4_MASK(field) \
1958 (1<<(OFFSETOF(l4_context_t, l4ctx_l4_bd_chain_##field) & ~0x80)/sizeof(u32_t))
1959
1960 #define KNL_L5_MASK(field) \
1961 (1<<(OFFSETOF(l5_context_t, l5ctx_##field) & ~0x80)/sizeof(u32_t))
1962
1963 lm_status_t
lm_reset_setup(lm_device_t * pdev,u32_t reset_reason)1964 lm_reset_setup(
1965 lm_device_t *pdev,
1966 u32_t reset_reason)
1967 {
1968 u32_t val;
1969 u8_t mbuf_adj = 0;
1970
1971 lm_chip_reset(pdev, reset_reason);
1972
1973 /* Teton family of chips does not support PCI-X relax ordering. */
1974 if(pdev->hw_info.bus_mode == BUS_MODE_PCIX)
1975 {
1976 REG_RD_OFFSET(
1977 pdev,
1978 OFFSETOF(reg_space_t, pci_config.pcicfg_pcix_cap_id),
1979 &val);
1980 val &= ~(PCICFG_PCIX_COMMAND_RELAX_ORDER << 16);
1981 REG_WR_OFFSET(
1982 pdev,
1983 OFFSETOF(reg_space_t, pci_config.pcicfg_pcix_cap_id),
1984 val);
1985 }
1986
1987 /* 5709 devices have interrupts enabled by default
1988 * after a hardware reset. Disable them.
1989 */
1990 lm_disable_int(pdev);
1991
1992 /* The linkready signal going to the MAC is qualified by a port
1993 * mode of GMII or MII. When the port mode is NONE, the linkready
1994 * signal is always deasserted when when link is active. Thus for
1995 * us to get a link change event, we need to set the port mode to
1996 * something other than NONE. This logic may change in future
1997 * version of the chip.
1998 *
1999 * Also when the port mode is set NONE, the register read/write
2000 * to the emac block (0x1408) will cause the TETON-II FPGA to
2001 * lock up. This is not seen with the original TETON FPGA. */
2002 REG_WR(pdev, emac.emac_mode, EMAC_MODE_EXT_LINK_POL | EMAC_MODE_PORT_GMII);
2003
2004 /* Setup DMA configuration. The swap settings are what the device will
2005 * will do, not the net result you want. This is because there could
2006 * be swapping by intermediary devices (pci bridges). */
2007 val = DMA_CONFIG_DATA_BYTE_SWAP_TE |
2008 DMA_CONFIG_DATA_WORD_SWAP_TE |
2009 DMA_CONFIG_CNTL_WORD_SWAP_TE |
2010 #ifdef BIG_ENDIAN
2011 DMA_CONFIG_CNTL_BYTE_SWAP_TE |
2012 #endif
2013 (pdev->params.num_rchans & 0xf) << 12 |
2014 (pdev->params.num_wchans & 0xf) << 16;
2015
2016 /* Workaround for data corruption on Intel 840/860 chipset. */
2017 if(pdev->params.ping_pong_dma)
2018 {
2019 val |= DMA_CONFIG_CNTL_PING_PONG_DMA_TE;
2020 }
2021
2022 /* Apply workaround to avoid race condition in DMA completion
2023 * and write to DMA buffer memory. This configuration should be
2024 * enabled on all versions of 5706. */
2025 val |= (0x2<<20) | (1<<11);
2026
2027 /* Enable delayed completion. */
2028 if(pdev->hw_info.bus_mode == BUS_MODE_PCIX &&
2029 pdev->hw_info.bus_speed == BUS_SPEED_133_MHZ &&
2030 CHIP_ID(pdev) != CHIP_ID_5706_A0)
2031 {
2032 val |= 1 << 23;
2033 }
2034
2035 /* Configure the clock ratio in the FPGA mode. */
2036 if(CHIP_REV(pdev) == CHIP_REV_FPGA)
2037 {
2038 val |= 0x100;
2039 }
2040
2041 REG_WR(pdev, dma.dma_config, val);
2042
2043 if(pdev->params.one_tdma)
2044 {
2045 REG_RD(pdev, tdma.tdma_config, &val);
2046 val |= TDMA_CONFIG_ONE_DMA;
2047 REG_WR(pdev, tdma.tdma_config, val);
2048 }
2049
2050 if(CHIP_REV(pdev) == CHIP_REV_FPGA)
2051 {
2052 REG_RD(pdev, pci.pci_config_2, &val);
2053 val &= ~0x02000000;
2054 REG_WR(pdev, pci.pci_config_2, val);
2055 }
2056
2057 /* We need to enable the context block so we can initialize context
2058 * memory.
2059 *
2060 * We also need to enable HC so it can record the link state and the
2061 * first status block update we get will reflect the current state.
2062 *
2063 * We need to enable RV2P in order to download the firmwares for
2064 * its two processors. */
2065 REG_WR(
2066 pdev,
2067 misc.misc_enable_set_bits,
2068 MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
2069 MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
2070 MISC_ENABLE_SET_BITS_DMA_ENGINE_ENABLE |
2071 MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
2072
2073 /* Initialize context mapping and zero out the quick contexts. The
2074 * context block must have already been enabled. */
2075 if(CHIP_ID(pdev) == CHIP_ID_5706_A0)
2076 {
2077 init_context_5706_a0_wa(pdev);
2078 }
2079 else if(CHIP_NUM(pdev) == CHIP_NUM_5706 || CHIP_NUM(pdev) == CHIP_NUM_5708)
2080 {
2081 init_context_5706(pdev);
2082 }
2083 else if(CHIP_NUM(pdev) == CHIP_NUM_5709)
2084 {
2085 init_context_5709(pdev);
2086 #if 0
2087 /* Temporary L4 fix. */
2088 // if(CHIP_ID(pdev) == CHIP_ID_5709_IKOS ||
2089 // CHIP_ID(pdev) == CHIP_ID_5709_FPGA)
2090 {
2091 REG_WR(pdev, mq.mq_map_l4_0, 0x8001c1b9);
2092 }
2093 #endif
2094
2095 REG_WR(pdev, mq.mq_map_l4_0, 0x80010db9);
2096 REG_WR(pdev, mq.mq_map_l4_4, 0x82810eb2);
2097 REG_WR(pdev, mq.mq_map_l4_5, 0x8f0113b4);
2098 }
2099 else
2100 {
2101 DbgBreakIf(1);
2102 }
2103
2104 if(pdev->params.test_mode & TEST_MODE_XDIAG_ISCSI)
2105 {
2106 lm_init_cpus(pdev, CPU_RV2P_1 | CPU_RV2P_2); /* other CPUs are loaded through TCL */
2107 }
2108 else
2109 {
2110 lm_init_cpus(pdev, CPU_ALL);
2111 }
2112
2113 if(CHIP_NUM(pdev) == CHIP_NUM_5709)
2114 {
2115 REG_RD_IND(
2116 pdev,
2117 OFFSETOF(reg_space_t, rxp.rxp_scratch[0])+
2118 RXP_HSI_OFFSETOFF(hw_filter_ctx_offset),
2119 &pdev->vars.hw_filter_ctx_offset);
2120
2121 init_5709_for_msix(pdev);
2122 }
2123
2124 lm_nvram_init(pdev, FALSE);
2125
2126 /* tcp_syn_dos_defense - let the firmware route all the packets with
2127 * TCP SYN bit set to rx chain #1. */
2128 REG_WR_IND(
2129 pdev,
2130 OFFSETOF(reg_space_t, rxp.rxp_scratch[0])+RXP_HSI_OFFSETOFF(tcp_syn_dos_defense),
2131 pdev->params.enable_syn_rcvq);
2132
2133 REG_RD(pdev, mq.mq_config, &val);
2134 val &= ~MQ_CONFIG_KNL_BYP_BLK_SIZE;
2135 switch((LM_PAGE_BITS - 8) << 4)
2136 {
2137 case MQ_CONFIG_KNL_BYP_BLK_SIZE_256:
2138 val |= MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
2139 break;
2140
2141 case MQ_CONFIG_KNL_BYP_BLK_SIZE_512:
2142 val |= MQ_CONFIG_KNL_BYP_BLK_SIZE_512;
2143 break;
2144
2145 case MQ_CONFIG_KNL_BYP_BLK_SIZE_1K:
2146 val |= MQ_CONFIG_KNL_BYP_BLK_SIZE_1K;
2147 break;
2148
2149 case MQ_CONFIG_KNL_BYP_BLK_SIZE_2K:
2150 val |= MQ_CONFIG_KNL_BYP_BLK_SIZE_2K;
2151 break;
2152
2153 case MQ_CONFIG_KNL_BYP_BLK_SIZE_4K:
2154 val |= MQ_CONFIG_KNL_BYP_BLK_SIZE_4K;
2155 break;
2156
2157 default:
2158 DbgBreakMsg("Not supported page size.\n");
2159 break;
2160 }
2161
2162 if(pdev->params.bin_mq_mode)
2163 {
2164 DbgBreakIf(CHIP_NUM(pdev) != CHIP_NUM_5709);
2165
2166 val |= MQ_CONFIG_BIN_MQ_MODE;
2167 }
2168
2169 REG_WR(pdev, mq.mq_config, val);
2170
2171 /* Configure the end of the kernel mailboxq window and the start of the
2172 * kernel bypass mailboxq. */
2173 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
2174 REG_WR(pdev, mq.mq_knl_byp_wind_start, val);
2175 REG_WR(pdev, mq.mq_knl_wind_end, val);
2176
2177 /* Configure page size. */
2178 REG_RD(pdev, tbdr.tbdr_config, &val);
2179 val &= ~TBDR_CONFIG_PAGE_SIZE;
2180 val |= (LM_PAGE_BITS - 8) << 24 | 0x40;
2181 REG_WR(pdev, tbdr.tbdr_config, val);
2182
2183 /* Program the MTU. Also include 4 bytes for CRC32. */
2184 val = pdev->params.mtu+4;
2185 if(pdev->params.mtu > MAX_ETHERNET_PACKET_SIZE)
2186 {
2187 val |= EMAC_RX_MTU_SIZE_JUMBO_ENA;
2188 }
2189 REG_WR(pdev, emac.emac_rx_mtu_size, val);
2190
2191 if(pdev->vars.enable_cu_rate_limiter)
2192 {
2193 if(pdev->vars.cu_mbuf_cnt > 0x48)
2194 {
2195 /* only allow cu mbuf cluster cnt up to 0x48 to accomodate jumbo
2196 * frame size of 9018 ( note: each mbuf cluster is 128 bytes) */
2197 pdev->vars.cu_mbuf_cnt = 0x48;
2198 }
2199
2200 if(pdev->vars.cu_mbuf_cnt == 0)
2201 {
2202 /* chip default use 8k cu mbuf */
2203 mbuf_adj = 0x48 - 0x40;
2204 }
2205 else
2206 {
2207 mbuf_adj = 0x48 - pdev->vars.cu_mbuf_cnt;
2208 }
2209 }
2210 /* Added flow control trip setup, JF or non-JF */
2211 get_trip_val(
2212 TRIP_FLOW,
2213 pdev->params.mtu,
2214 &val,
2215 pdev->vars.enable_cu_rate_limiter,
2216 mbuf_adj);
2217
2218 REG_WR_IND(
2219 pdev,
2220 OFFSETOF(reg_space_t, rbuf.rbuf_config),
2221 val);
2222
2223 get_trip_val(
2224 TRIP_MAC,
2225 pdev->params.mtu,
2226 &val,
2227 pdev->vars.enable_cu_rate_limiter,
2228 mbuf_adj);
2229
2230 REG_WR_IND(
2231 pdev,
2232 OFFSETOF(reg_space_t, rbuf.rbuf_config2),
2233 val);
2234
2235 if(!pdev->vars.enable_cu_rate_limiter)
2236 {
2237 get_trip_val(TRIP_CU, pdev->params.mtu, &val, 0, 0);
2238 REG_WR_IND(
2239 pdev,
2240 OFFSETOF(reg_space_t, rbuf.rbuf_config3),
2241 val);
2242 }
2243 else
2244 {
2245 /* isolate catchup traffic rbuf from normal traffic */
2246 REG_RD_IND(
2247 pdev,
2248 OFFSETOF(reg_space_t, rbuf.rbuf_command),
2249 &val);
2250 val |= RBUF_COMMAND_CU_ISOLATE_XI;
2251 REG_WR_IND(
2252 pdev,
2253 OFFSETOF(reg_space_t, rbuf.rbuf_command),
2254 val);
2255
2256 REG_WR_IND(
2257 pdev,
2258 OFFSETOF(reg_space_t, rbuf.rbuf_config3),
2259 0);
2260 if(pdev->vars.cu_mbuf_cnt)
2261 {
2262 val = pdev->vars.cu_mbuf_cnt;
2263 REG_WR_IND(
2264 pdev,
2265 OFFSETOF(reg_space_t, rbuf.rbuf_cu_buffer_size),
2266 val);
2267 }
2268 else
2269 {
2270 /* get default cu_mbuf_cnt from chip */
2271 REG_RD_IND(
2272 pdev,
2273 OFFSETOF(reg_space_t, rbuf.rbuf_cu_buffer_size),
2274 &val);
2275 }
2276 /*account for initial MBUF allocated by the RPC*/
2277 val -= 1;
2278 val *= 128;
2279 REG_WR_IND(
2280 pdev,
2281 OFFSETOF(reg_space_t, com.com_scratch[0])+COM_HSI_OFFSETOFF(com_cu_buf_size),
2282 val);
2283 REG_WR_IND(
2284 pdev,
2285 OFFSETOF(reg_space_t, com.com_scratch[0])+COM_HSI_OFFSETOFF(cu_rate_limiter_enable),
2286 1);
2287 REG_WR_IND(
2288 pdev,
2289 OFFSETOF(reg_space_t, txp.txp_scratch[0])+TXP_HSI_OFFSETOFF(cu_rate_limiter_enable),
2290 1);
2291 REG_WR_IND(
2292 pdev,
2293 OFFSETOF(reg_space_t, rxp.rxp_scratch[0])+RXP_HSI_OFFSETOFF(cu_rate_limiter_enable),
2294 1);
2295 }
2296
2297 /* Set up how to generate a link change interrupt. */
2298 if(pdev->params.phy_int_mode == PHY_INT_MODE_MI_INTERRUPT)
2299 {
2300 REG_WR(pdev, emac.emac_attention_ena, EMAC_ATTENTION_ENA_MI_INT);
2301 }
2302 else if(pdev->params.phy_int_mode == PHY_INT_MODE_LINK_READY)
2303 {
2304 REG_WR(pdev, emac.emac_attention_ena, EMAC_ATTENTION_ENA_LINK);
2305 }
2306 else if(pdev->params.phy_int_mode == PHY_INT_MODE_AUTO_POLLING)
2307 {
2308 REG_WR(pdev, emac.emac_attention_ena, EMAC_ATTENTION_ENA_LINK);
2309
2310 REG_RD(pdev, emac.emac_mdio_mode, &val);
2311 val |= EMAC_MDIO_MODE_AUTO_POLL;
2312 REG_WR(pdev, emac.emac_mdio_mode, val);
2313 }
2314 else
2315 {
2316 DbgBreakMsg("Invalid phy_int_mode.\n");
2317 }
2318
2319 zero_out_sb(pdev, (u32_t *) pdev->vars.status_virt);
2320
2321 if(CHIP_NUM(pdev) == CHIP_NUM_5706 ||
2322 CHIP_NUM(pdev) == CHIP_NUM_5708)
2323 {
2324 init_hc(pdev);
2325 }
2326 else if(CHIP_NUM(pdev) == CHIP_NUM_5709)
2327 {
2328 init_hc_for_5709(pdev);
2329 }
2330 else if(CHIP_NUM(pdev) == CHIP_NUM_57728)
2331 {
2332 init_hc_for_57728(pdev);
2333 }
2334 else
2335 {
2336 DbgBreakMsg("### Invalid chip number.\n");
2337 }
2338
2339 if(CHIP_REV(pdev) == CHIP_REV_IKOS || CHIP_REV(pdev) == CHIP_REV_FPGA)
2340 {
2341 reduce_ftq_depth(pdev);
2342 }
2343
2344 init_l2txq(pdev);
2345 init_l2rxq(pdev);
2346
2347 #ifndef EXCLUDE_KQE_SUPPORT
2348 init_kq(pdev);
2349 #endif
2350
2351 #if INCLUDE_OFLD_SUPPORT
2352 l4_reset_setup(pdev);
2353 #endif
2354
2355 /* Enable Commnad Scheduler notification when we write to the
2356 * host producer index of the kernel contexts. */
2357 REG_WR(pdev, mq.mq_knl_cmd_mask1, KNL_L5_MASK(sq_pidx));
2358
2359 /* Enable Command Scheduler notification when we write to either
2360 * the Send Queue or Receive Queue producer indexes of the kernel
2361 * bypass contexts. */
2362 REG_WR(pdev, mq.mq_knl_byp_cmd_mask1, KNL_L5_MASK(cq_cidx)|
2363 KNL_L5_MASK(sq_pidx)|
2364 KNL_L5_MASK(rq_pidx));
2365 REG_WR(pdev, mq.mq_knl_byp_write_mask1, KNL_L5_MASK(cq_cidx)|
2366 KNL_L5_MASK(sq_pidx)|
2367 KNL_L5_MASK(rq_pidx));
2368
2369 /* Use kernel mailbox for L5 context (iSCSI and rdma). */
2370 REG_WR(pdev, mq.mq_knl_cmd_mask1, KNL_L5_MASK(cq_cidx)|
2371 KNL_L5_MASK(sq_pidx)|
2372 KNL_L5_MASK(rq_pidx));
2373 REG_WR(pdev, mq.mq_knl_write_mask1, KNL_L5_MASK(cq_cidx)|
2374 KNL_L5_MASK(sq_pidx)|
2375 KNL_L5_MASK(rq_pidx));
2376 #ifndef L2_ONLY
2377 if(CHIP_NUM(pdev) != CHIP_NUM_5709)
2378 {
2379 /* Notify CP when the driver post an application buffer. (i.e. writing to host_bseq) */
2380 REG_WR(pdev, mq.mq_knl_cmd_mask2, KNL_L4_MASK(host_bseq));
2381 }
2382 else // CHIP_NUM_5709
2383 {
2384 /* Notify RV2P when the driver post an application buffer. (i.e. writing to host_bseq) */
2385 REG_WR(pdev, mq.mq_knl_rx_v2p_mask2, KNL_L4_MASK(host_bseq));
2386 }
2387 #endif
2388 #ifndef EXCLUDE_KQE_SUPPORT
2389 /* fw_doorbell - These two processors polls the doorbell for a non zero
2390 * value before running. This must be done after setting up the kernel
2391 * queue contexts. */
2392 if(pdev->params.kcq_page_cnt)
2393 {
2394 REG_WR_IND(pdev, OFFSETOF(reg_space_t, cp.cp_scratch[0])+CP_HSI_OFFSETOFF(fw_doorbell), 1);
2395 REG_WR_IND(pdev, OFFSETOF(reg_space_t, com.com_scratch[0])+COM_HSI_OFFSETOFF(fw_doorbell), 1);
2396
2397 mm_wait(pdev, 5);
2398 }
2399 #endif
2400
2401 /* get information from firmware-configured mq.mq_config2. */
2402 if(pdev->params.bin_mq_mode)
2403 {
2404 REG_RD(pdev, mq.mq_config2, &val);
2405
2406 pdev->hw_info.first_l4_l5_bin = (u16_t) (val & MQ_CONFIG2_FIRST_L4L5);
2407 pdev->hw_info.bin_size = (u8_t) (val & MQ_CONFIG2_CONT_SZ) >> 3;
2408 }
2409
2410 /* Configure page size and start the RV2P processors. */
2411 val = (LM_PAGE_BITS - 8) << 24;
2412 REG_WR(pdev, rv2p.rv2p_config, val);
2413
2414 /* Setup the MAC for the current link settings. The HC should be already
2415 * enabled. We need to enable it so it is aware of the current link
2416 * state and link acknowledgement (via the call below). The first
2417 * status block update we get will reflect the current link state. */
2418 lm_service_phy_int(pdev, TRUE);
2419
2420 return LM_STATUS_SUCCESS;
2421 } /* lm_reset_setup */
2422
2423
2424
2425 #if INCLUDE_OFLD_SUPPORT
2426 /*******************************************************************************
2427 * Description:
2428 *
2429 * Return:
2430 ******************************************************************************/
2431 STATIC void
enable_alt_catchup(lm_device_t * pdev)2432 enable_alt_catchup(
2433 lm_device_t *pdev)
2434 {
2435 l4_kwqe_enable_alt_catchup_t *alt_catchup_kwqe;
2436 kwqe_t *prod_qe;
2437 u16_t prod_idx;
2438
2439 pdev->kq_info.kwqe_left -= 1;
2440
2441 prod_qe = pdev->kq_info.kwq_prod_qe;
2442 prod_idx = pdev->kq_info.kwq_prod_idx;
2443
2444 alt_catchup_kwqe = (l4_kwqe_enable_alt_catchup_t *) prod_qe;
2445 alt_catchup_kwqe->tcp_hdr_flags = TCP_HDR_FLAGS_LAYER_MASK_L4;
2446 alt_catchup_kwqe->tcp_hdr_opcode = TCP_HDR_OPCODE_VALUE_ENABLE_ALT_CATCHUP;
2447
2448 /* Advance to the next KWQE. */
2449 if(prod_qe == pdev->kq_info.kwq_last_qe)
2450 {
2451 prod_qe = pdev->kq_info.kwq_virt;
2452 }
2453 else
2454 {
2455 prod_qe++;
2456 }
2457 prod_idx++;
2458
2459 pdev->kq_info.kwq_prod_qe = prod_qe;
2460 pdev->kq_info.kwq_prod_idx = prod_idx;
2461
2462 /* catchup_override - use cid 0x30 (catchup2) instead of tx1 for catcup. */
2463 REG_WR_IND(
2464 pdev,
2465 OFFSETOF(reg_space_t, tpat.tpat_scratch[0])+TPAT_HSI_OFFSETOFF(catchup_overide),
2466 1);
2467
2468 MBQ_WR16(
2469 pdev,
2470 GET_CID(pdev->kq_info.kwq_cid_addr),
2471 OFFSETOF(krnlq_context_t, krnlq_host_qidx),
2472 prod_idx);
2473 } /* enable_alt_catchup */
2474 #endif
2475
2476
2477
2478 /*******************************************************************************
2479 * Description:
2480 *
2481 * Return:
2482 ******************************************************************************/
2483 lm_status_t
lm_reset_run(lm_device_t * pdev)2484 lm_reset_run(
2485 lm_device_t *pdev)
2486 {
2487 u32_t max_loop_cnt;
2488 u32_t idx;
2489
2490 /* Enable all the state machines including the processors. We could use
2491 * REG_WR(pdev, misc.misc_command, MISC_COMMAND_ENABLE_ALL) this write
2492 * but for now we don't want to enable the timer block yet. This
2493 * needs to be done by the firmware. */
2494 REG_WR(pdev, misc.misc_enable_set_bits, 0x15ffffff);
2495
2496 /* Allow the firmware to run. How long is the delay? */
2497 max_loop_cnt = 1000;
2498 if(CHIP_REV(pdev) == CHIP_REV_IKOS)
2499 {
2500 max_loop_cnt = 25000;
2501 }
2502
2503 for(idx = 0; idx < max_loop_cnt; idx++)
2504 {
2505 mm_wait(pdev, 10);
2506 }
2507
2508 #if INCLUDE_OFLD_SUPPORT
2509 /* 'tx4' (cid 30/31) for catcup. */
2510 if(pdev->tx_info.cu_idx != TX_CHAIN_IDX1)
2511 {
2512 enable_alt_catchup(pdev);
2513 }
2514 #endif
2515
2516 /* Force the first status block update so we can acknowledge the initial
2517 * link status and service an link change since we last call
2518 * lm_service_phy_int. If we need to do this here so that we don't have
2519 * to service a link change event when later we receive a status
2520 * block update. */
2521 REG_WR(pdev, hc.hc_command, HC_COMMAND_COAL_NOW_WO_INT);
2522
2523 /* Wait for the status block. In the IKOS environment we need to
2524 * wait this long. This delay may be reduced significantly when running
2525 * on the real chip. */
2526 mm_wait(pdev, 20);
2527 if(CHIP_REV(pdev) == CHIP_REV_IKOS)
2528 {
2529 for(idx = 0; idx < 100; idx++)
2530 {
2531 mm_wait(pdev, 10);
2532 }
2533 }
2534
2535 /* Setup the MAC for the current link settings and acknowledge the
2536 * current link state if necessary. */
2537 lm_service_phy_int(pdev, FALSE);
2538
2539 /* Ensure the status block in host memory reflect the current link
2540 * state and link acknowledgement. */
2541 REG_WR(pdev, hc.hc_command, HC_COMMAND_COAL_NOW);
2542
2543 return LM_STATUS_SUCCESS;
2544 } /* lm_reset_run */
2545
2546
2547
2548 /*******************************************************************************
2549 * Description:
2550 *
2551 * Return:
2552 ******************************************************************************/
2553 lm_status_t
lm_reset(lm_device_t * pdev,u32_t reset_reason)2554 lm_reset(
2555 lm_device_t *pdev,
2556 u32_t reset_reason)
2557 {
2558 lm_status_t status;
2559
2560 status = lm_reset_setup(pdev, reset_reason);
2561 if(status == LM_STATUS_SUCCESS)
2562 {
2563 status = lm_reset_run(pdev);
2564 }
2565
2566 return status;
2567 } /* lm_reset */
2568