1 /*******************************************************************************
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 *
21 * Copyright 2014 QLogic Corporation
22 * The contents of this file are subject to the terms of the
23 * QLogic End User License (the "License").
24 * You may not use this file except in compliance with the License.
25 *
26 * You can obtain a copy of the License at
27 * http://www.qlogic.com/Resources/Documents/DriverDownloadHelp/
28 * QLogic_End_User_Software_License.txt
29 * See the License for the specific language governing permissions
30 * and limitations under the License.
31 *
32 *
33 * Module Description:
34 * This file contains functions that handle IGU access and SB management
35 *
36 ******************************************************************************/
37
38 #include "lm5710.h"
39 #include "577xx_int_offsets.h"
40 #include "bcmtype.h"
41
42 /* Reads IGU interrupt status register MSB / LSB */
lm_read_isr32(lm_device_t * pdev,u32_t addr)43 static u32_t lm_read_isr32 (
44 lm_device_t *pdev,
45 u32_t addr)
46 {
47 u32 offset = IS_PFDEV(pdev) ? BAR_IGU_INTMEM : VF_BAR0_IGU_OFFSET;
48 u32_t res = 0;
49 u32_t value;
50 do {
51 /* Read the 32 bit value from BAR */
52 LM_BAR_RD32_OFFSET(pdev,BAR_0,offset + addr, &value);
53 DbgMessage(pdev, VERBOSEi, " ### lm_read_isr32 read address 0x%x value=0x%x\n",addr,value);
54 DbgBreakIf(value == 0xffffffff);
55 res |= value;
56 /* Do one more iteration if we got the value for a legitimate "all ones" */
57 } while (value == 0xefffffff);
58 return res;
59 }
60
61 /* Reads IGU interrupt status register MSB / LSB */
lm_read_isr64(lm_device_t * pdev,u32_t addr)62 static u64_t lm_read_isr64(
63 lm_device_t *pdev,
64 u32_t addr)
65 {
66 u32 offset = IS_PFDEV(pdev) ? BAR_IGU_INTMEM : VF_BAR0_IGU_OFFSET;
67 u64_t res = 0;
68 u64_t value;
69 do {
70 /* Read the 32 bit value from BAR */
71 LM_BAR_RD64_OFFSET(pdev,BAR_0, offset + addr,&value);
72 DbgMessage(pdev, FATAL, " ### lm_read_isr64 read address 0x%x value=0x%x 0x%x\n",addr,(u32_t)(value>>32),(u32_t)value);
73 DbgBreakIf(value == 0xffffffffffffffffULL);
74 res |= value;
75 /* Do one more iteration if we got the value for a legitimate "all ones" */
76 } while (value == 0xefffffffffffffffULL);
77 DbgMessage(pdev, FATAL, " ### lm_read_isr64 res=0x%x 0x%x\n",(u32_t)(res>>32),(u32_t)res);
78 return res;
79 }
80
lm_igutest_get_isr32(struct _lm_device_t * pdev)81 u64_t lm_igutest_get_isr32(struct _lm_device_t *pdev)
82 {
83 u64_t intr_status = 0;
84 intr_status = ((u64_t)lm_read_isr32(pdev,8 * IGU_REG_SISR_MDPC_WMASK_MSB_UPPER) << 32) |
85 lm_read_isr32(pdev,8 * IGU_REG_SISR_MDPC_WMASK_LSB_UPPER);
86 return intr_status;
87 }
88
89 u64_t
lm_igutest_get_isr64(struct _lm_device_t * pdev)90 lm_igutest_get_isr64(struct _lm_device_t *pdev)
91 {
92 return lm_read_isr64(pdev,8 * IGU_REG_SISR_MDPC_WMASK_UPPER);
93 }
94
95 lm_interrupt_status_t
lm_get_interrupt_status_wo_mask(lm_device_t * pdev)96 lm_get_interrupt_status_wo_mask(
97 lm_device_t *pdev)
98 {
99 lm_interrupt_status_t intr_status = 0;
100 if (INTR_BLK_REQUIRE_CMD_CTRL(pdev)) {
101 /* This is IGU GRC Access... need to write ctrl and then read data */
102 REG_WR(pdev, IGU_REG_COMMAND_REG_CTRL, INTR_BLK_CMD_CTRL_RD_WOMASK(pdev));
103 }
104 intr_status = REG_RD(pdev, INTR_BLK_SIMD_ADDR_WOMASK(pdev));
105 /* if above, need to read 64 bits from IGU...and take care of all-ones */
106 ASSERT_STATIC(MAX_RSS_CHAINS <= 32);
107 return intr_status;
108 }
109
110 lm_interrupt_status_t
lm_get_interrupt_status(lm_device_t * pdev)111 lm_get_interrupt_status(
112 lm_device_t *pdev)
113 {
114 lm_interrupt_status_t intr_status = 0;
115
116 if (INTR_BLK_REQUIRE_CMD_CTRL(pdev)) {
117 /* This is IGU GRC Access... need to write ctrl and then read data */
118 REG_WR(pdev, IGU_REG_COMMAND_REG_CTRL, INTR_BLK_CMD_CTRL_RD_WMASK(pdev));
119 }
120 intr_status = REG_RD(pdev, INTR_BLK_SIMD_ADDR_WMASK(pdev));
121 /* if above, need to read 64 bits from IGU...and take care of all-ones */
122 ASSERT_STATIC(MAX_RSS_CHAINS <= 32);
123 return intr_status;
124 } /* lm_get_interrupt_status */
125
lm_set_interrupt_moderation(struct _lm_device_t * pdev,u8_t is_enable)126 lm_status_t lm_set_interrupt_moderation(struct _lm_device_t *pdev, u8_t is_enable)
127 {
128 lm_status_t lm_status = LM_STATUS_SUCCESS;
129 u8_t sb_id = 0 ;
130
131 pdev->params.int_coalesing_mode_disabled_by_ndis = !is_enable;
132 if (pdev->params.int_coalesing_mode == LM_INT_COAL_NONE) {
133 DbgMessage(pdev, WARN, "HC is not supported (disabled) in driver\n");
134 return LM_STATUS_SUCCESS;
135 }
136 if (IS_PFDEV(pdev))
137 {
138 LM_FOREACH_SB_ID(pdev, sb_id)
139 {
140 if ((lm_status = lm_set_hc_flag(pdev, sb_id, HC_INDEX_TOE_RX_CQ_CONS, is_enable)) != LM_STATUS_SUCCESS)
141 break;
142 if ((lm_status = lm_set_hc_flag(pdev, sb_id, HC_INDEX_TOE_TX_CQ_CONS, is_enable)) != LM_STATUS_SUCCESS)
143 break;
144 if ((lm_status = lm_set_hc_flag(pdev, sb_id, HC_INDEX_ETH_RX_CQ_CONS, is_enable)) != LM_STATUS_SUCCESS)
145 break;
146 if ((lm_status = lm_set_hc_flag(pdev, sb_id, HC_INDEX_ETH_TX_CQ_CONS_COS0, is_enable)) != LM_STATUS_SUCCESS)
147 break;
148 if ((lm_status = lm_set_hc_flag(pdev, sb_id, HC_INDEX_ETH_TX_CQ_CONS_COS1, is_enable)) != LM_STATUS_SUCCESS)
149 break;
150 if ((lm_status = lm_set_hc_flag(pdev, sb_id, HC_INDEX_ETH_TX_CQ_CONS_COS2, is_enable)) != LM_STATUS_SUCCESS)
151 break;
152
153 }
154 }
155
156 return lm_status;
157 }
158
lm_set_igu_tmode(struct _lm_device_t * pdev,u8_t tmode)159 void lm_set_igu_tmode(struct _lm_device_t *pdev, u8_t tmode)
160 {
161 pdev->vars.is_igu_test_mode = tmode;
162 }
163
lm_get_igu_tmode(struct _lm_device_t * pdev)164 u8_t lm_get_igu_tmode(struct _lm_device_t *pdev)
165 {
166 return pdev->vars.is_igu_test_mode;
167 }
168
lm_set_interrupt_mode(struct _lm_device_t * pdev,u32_t mode)169 void lm_set_interrupt_mode(struct _lm_device_t *pdev, u32_t mode)
170 {
171 DbgBreakIf(mode > LM_INT_MODE_MIMD);
172 pdev->params.interrupt_mode = mode;
173 }
174
lm_get_interrupt_mode(struct _lm_device_t * pdev)175 u32_t lm_get_interrupt_mode(struct _lm_device_t *pdev)
176 {
177 return pdev->params.interrupt_mode;
178 }
179
lm_get_num_fp_msix_messages(struct _lm_device_t * pdev)180 u8_t lm_get_num_fp_msix_messages(struct _lm_device_t *pdev)
181 {
182 if (INTR_BLK_TYPE(pdev) == INTR_BLK_IGU) {
183 if (pdev->vars.is_igu_test_mode) {
184 DbgMessage(pdev, FATAL, "IGU test mode: returned %d fp-messages\n", pdev->hw_info.intr_blk_info.igu_info.igu_test_sb_cnt + pdev->hw_info.intr_blk_info.igu_info.igu_sb_cnt);
185 return (pdev->hw_info.intr_blk_info.igu_info.igu_test_sb_cnt + pdev->hw_info.intr_blk_info.igu_info.igu_sb_cnt);
186 }
187 return pdev->hw_info.intr_blk_info.igu_info.igu_sb_cnt;
188 } else {
189 return pdev->params.sb_cnt;
190 }
191 }
192
lm_get_base_msix_msg(struct _lm_device_t * pdev)193 u8_t lm_get_base_msix_msg(struct _lm_device_t *pdev)
194 {
195 if (IS_PFDEV(pdev)) {
196 return 1;
197 } else {
198 return 0;
199 }
200 }
201
lm_has_sp_msix_vector(struct _lm_device_t * pdev)202 u8_t lm_has_sp_msix_vector(struct _lm_device_t *pdev)
203 {
204 if (IS_PFDEV(pdev)) {
205 return TRUE;
206 } else {
207 return FALSE;
208 }
209 }
210
lm_set_hc_flag(struct _lm_device_t * pdev,u8_t sb_id,u8_t idx,u8_t is_enable)211 lm_status_t lm_set_hc_flag(struct _lm_device_t *pdev, u8_t sb_id, u8_t idx, u8_t is_enable)
212 {
213 lm_status_t lm_status = LM_STATUS_SUCCESS;
214 struct hc_index_data * hc_index_entry;
215 u8_t fw_sb_id;
216 u8_t notify_fw = FALSE;
217
218 if (CHIP_IS_E1x(pdev)) {
219 hc_index_entry = pdev->vars.status_blocks_arr[sb_id].hc_status_block_data.e1x_sb_data.index_data + idx;
220 } else {
221 hc_index_entry = pdev->vars.status_blocks_arr[sb_id].hc_status_block_data.e2_sb_data.index_data + idx;
222 }
223 if (pdev->params.int_coalesing_mode == LM_INT_COAL_PERIODIC_SYNC) {
224 if (is_enable) {
225 if (!(hc_index_entry->flags & HC_INDEX_DATA_HC_ENABLED) && hc_index_entry->timeout) {
226 hc_index_entry->flags |= HC_INDEX_DATA_HC_ENABLED;
227 notify_fw = TRUE;
228 }
229 } else {
230 if (hc_index_entry->flags & HC_INDEX_DATA_HC_ENABLED) {
231 hc_index_entry->flags &= ~HC_INDEX_DATA_HC_ENABLED;
232 notify_fw = TRUE;
233 }
234 }
235 }
236 if (notify_fw) {
237 fw_sb_id = LM_FW_SB_ID(pdev, sb_id);
238 if (CHIP_IS_E1x(pdev)) {
239 LM_INTMEM_WRITE8(PFDEV(pdev), (CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id)
240 + OFFSETOF(struct hc_status_block_data_e1x, index_data)
241 + sizeof(struct hc_index_data)*idx
242 + OFFSETOF(struct hc_index_data,flags)),
243 hc_index_entry->flags, BAR_CSTRORM_INTMEM);
244 } else {
245 LM_INTMEM_WRITE8(PFDEV(pdev), (CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id)
246 + OFFSETOF(struct hc_status_block_data_e2, index_data)
247 + sizeof(struct hc_index_data)*idx
248 + OFFSETOF(struct hc_index_data,flags)),
249 hc_index_entry->flags, BAR_CSTRORM_INTMEM);
250
251 }
252 DbgMessage(pdev, INFORMi, "HC set to %d for SB%d(index%d)\n",is_enable,sb_id,idx);
253 } else {
254 DbgMessage(pdev, INFORMi, "HC already set to %d for SB%d(index%d)\n",is_enable,sb_id,idx);
255 }
256
257 return lm_status;
258 }
259
lm_update_def_hc_indices(lm_device_t * pdev,u8_t dummy_sb_id,u32_t * activity_flg)260 void lm_update_def_hc_indices(lm_device_t *pdev, u8_t dummy_sb_id, u32_t *activity_flg)
261 {
262 volatile struct hc_sp_status_block * sp_sb = NULL;
263 volatile struct atten_sp_status_block * attn_sb = NULL;
264 u16_t atomic_index = 0;
265
266 *activity_flg = 0;
267
268 DbgBreakIf(!pdev);
269
270
271 //It's a default status block
272
273 DbgMessage(pdev, INFORMi, "BEFORE update: hc_def_ack:%d, attn_def_ack:%d\n",
274 pdev->vars.hc_def_ack,
275 pdev->vars.attn_def_ack);
276
277 sp_sb = lm_get_default_status_block(pdev);
278
279 atomic_index = mm_le16_to_cpu(sp_sb->running_index);
280 if (atomic_index != pdev->vars.hc_def_ack)
281 {
282 pdev->vars.hc_def_ack = atomic_index;
283 (*activity_flg) |= LM_SP_ACTIVE;
284 }
285
286
287 attn_sb = lm_get_attention_status_block(pdev);
288
289 atomic_index = mm_le16_to_cpu(attn_sb->attn_bits_index);
290 if (atomic_index != pdev->vars.attn_def_ack)
291 {
292 pdev->vars.attn_def_ack = atomic_index;
293 (*activity_flg) |= LM_DEF_ATTN_ACTIVE;
294 }
295
296 DbgMessage(pdev, INFORMi, "AFTER update: hc_def_ack:%d, attn_def_ack:%d\n",
297 pdev->vars.hc_def_ack,
298 pdev->vars.attn_def_ack);
299 }
300
lm_update_fp_hc_indices(lm_device_t * pdev,u8_t igu_sb_id,u32_t * activity_flg,u8_t * drv_rss_id)301 void lm_update_fp_hc_indices(lm_device_t *pdev, u8_t igu_sb_id, u32_t *activity_flg, u8_t *drv_rss_id)
302 {
303 u16_t atomic_index = 0;
304 u8_t flags;
305 u8_t drv_sb_id;
306
307 *activity_flg = 0;
308 drv_sb_id = igu_sb_id;
309
310 DbgBreakIf(!(pdev && (drv_sb_id <= ARRSIZE(pdev->vars.status_blocks_arr))));
311 DbgMessage(pdev, INFORMi, "lm_update_hc_indices: inside with sb_idx:%d\n", drv_sb_id);
312
313 DbgBreakIf(!LM_SB_ID_VALID(pdev, drv_sb_id));
314
315
316 flags = lm_query_storm_intr(pdev, igu_sb_id, &drv_sb_id);
317
318 DbgMessage(pdev, INFORMi, "BEFORE update: c_hc_ack:%d\n", pdev->vars.c_hc_ack[drv_sb_id]);
319 DbgMessage(pdev, INFORMi, "BEFORE update: u_hc_ack:%d\n", pdev->vars.u_hc_ack[drv_sb_id]);
320
321 if (GET_FLAGS(flags, CSTORM_INTR_FLAG)) {
322 atomic_index = lm_get_sb_running_index(pdev, drv_sb_id, SM_TX_ID);
323
324 if (atomic_index != pdev->vars.c_hc_ack[drv_sb_id])
325 {
326 pdev->vars.c_hc_ack[drv_sb_id] = atomic_index;
327 (*activity_flg) |= LM_NON_DEF_CSTORM_ACTIVE;
328 }
329 }
330
331 if (GET_FLAGS(flags, USTORM_INTR_FLAG)) {
332 atomic_index = lm_get_sb_running_index(pdev, drv_sb_id, SM_RX_ID);
333
334 if (atomic_index != pdev->vars.u_hc_ack[drv_sb_id])
335 {
336 pdev->vars.u_hc_ack[drv_sb_id] = atomic_index;
337 (*activity_flg) |= LM_NON_DEF_USTORM_ACTIVE;
338 if ((pdev->params.ndsb_type == LM_SINGLE_SM) || (pdev->params.ndsb_type == LM_DOUBLE_SM_SINGLE_IGU)) {
339 (*activity_flg) |= LM_NON_DEF_CSTORM_ACTIVE;
340 }
341 }
342 }
343
344
345 DbgMessage(pdev, INFORMi, "AFTER update: c_hc_ack:%d\n", pdev->vars.c_hc_ack[drv_sb_id]);
346 DbgMessage(pdev, INFORMi, "AFTER update: u_hc_ack:%d\n", pdev->vars.u_hc_ack[drv_sb_id]);
347
348 /* Fixme - doesn't have to be... */
349 *drv_rss_id = drv_sb_id;
350 }
351
lm_is_def_sb_updated(lm_device_t * pdev)352 u8_t lm_is_def_sb_updated(lm_device_t *pdev)
353 {
354 volatile struct hc_sp_status_block * sp_sb = NULL;
355 volatile struct atten_sp_status_block * attn_sb = NULL;
356 u8_t result = FALSE;
357 u16_t hw_sb_idx = 0;
358
359 DbgBreakIfFastPath(!pdev);
360 if (!pdev || IS_VFDEV(pdev))
361 {
362 return FALSE;
363 }
364
365 DbgMessage(pdev, INFORMi, "lm_is_def_sb_updated() inside!\n");
366
367 sp_sb = lm_get_default_status_block(pdev);
368 //it is legit that only a subgroup of the storms may change between our local copy.
369 //at least one storm index change implies that we have work to do on this sb
370 hw_sb_idx = mm_le16_to_cpu(sp_sb->running_index);
371 if (hw_sb_idx != pdev->vars.hc_def_ack)
372 {
373 DbgMessage(pdev, INFORMi, "lm_is_sb_updated: sp running_index:%d, hc_def_ack:%d\n",
374 hw_sb_idx, pdev->vars.hc_def_ack);
375
376 result = TRUE;
377 }
378
379 attn_sb = lm_get_attention_status_block(pdev);
380 hw_sb_idx = mm_le16_to_cpu(attn_sb->attn_bits_index);
381 if (hw_sb_idx != pdev->vars.attn_def_ack)
382 {
383 DbgMessage(pdev, INFORMi, "lm_is_sb_updated: def.attn_bits_index:%d attn_def_ack:%d\n",
384 hw_sb_idx, pdev->vars.attn_def_ack);
385
386 result = TRUE;
387 }
388
389 DbgMessage(pdev, INFORMi, "lm_is_def_sb_updated: result:%s\n", result? "TRUE" : "FALSE");
390
391 return result;
392 }
393
394
395
396
lm_handle_igu_sb_id(lm_device_t * pdev,u8_t igu_sb_id,u8_t * rx_rss_id,u8_t * tx_rss_id)397 u8_t lm_handle_igu_sb_id(lm_device_t *pdev, u8_t igu_sb_id, u8_t *rx_rss_id, u8_t *tx_rss_id)
398 {
399 u16_t atomic_index = 0;
400 u8_t drv_sb_id = 0;
401 u8_t flags = 0;
402 u8_t drv_rss_id = 0;
403
404 drv_sb_id = igu_sb_id;
405
406 if ((INTR_BLK_TYPE(pdev) == INTR_BLK_HC) || (IGU_U_NDSB_OFFSET(pdev) == 0)) {
407 /* One Segment Per u/c */
408 SET_FLAGS(flags, USTORM_INTR_FLAG);
409 SET_FLAGS(flags, CSTORM_INTR_FLAG);
410 } else {
411 if (drv_sb_id >= IGU_U_NDSB_OFFSET(pdev)) {
412 drv_sb_id -= IGU_U_NDSB_OFFSET(pdev);
413 SET_FLAGS(flags, USTORM_INTR_FLAG);
414 //DbgMessage(pdev, FATAL, "Ustorm drv_sb_id=%d\n", drv_sb_id);
415 } else {
416 SET_FLAGS(flags, CSTORM_INTR_FLAG);
417 //DbgMessage(pdev, FATAL, "Cstorm drv_sb_id=%d\n", drv_sb_id);
418 }
419 }
420
421 if (GET_FLAGS(flags, USTORM_INTR_FLAG)) {
422 atomic_index = lm_get_sb_running_index(pdev, drv_sb_id, SM_RX_ID);
423
424 if (atomic_index != pdev->vars.u_hc_ack[drv_sb_id]) {
425 pdev->vars.u_hc_ack[drv_sb_id] = atomic_index;
426 }
427
428 drv_rss_id = drv_sb_id; /* FIXME: doesn't have to be... */
429 //Check for Rx completions
430 if (lm_is_rx_completion(pdev, drv_rss_id))
431 {
432 //DbgMessage(pdev, FATAL, "RX_completion=%d\n", drv_rss_id);
433 SET_FLAGS(flags, SERV_RX_INTR_FLAG);
434 }
435
436 #ifdef INCLUDE_L4_SUPPORT
437 //Check for L4 Rx completions
438 if (lm_toe_is_rx_completion(pdev, drv_rss_id))
439 {
440 lm_toe_service_rx_intr(pdev, drv_rss_id);
441 }
442 #endif
443 }
444 if (GET_FLAGS(flags, CSTORM_INTR_FLAG)) {
445 if (IGU_U_NDSB_OFFSET(pdev)) {
446 atomic_index = lm_get_sb_running_index(pdev, drv_sb_id, SM_TX_ID);
447
448 if (atomic_index != pdev->vars.c_hc_ack[drv_sb_id]) {
449 pdev->vars.c_hc_ack[drv_sb_id] = atomic_index;
450 }
451 }
452 drv_rss_id = drv_sb_id; /* FIXME: doesn't have to be... */
453 //Check for Tx completions
454 if (lm_is_tx_completion(pdev, drv_rss_id))
455 {
456 //DbgMessage(pdev, FATAL, "TX_completion=%d\n", drv_rss_id);
457 SET_FLAGS(flags, SERV_TX_INTR_FLAG);
458 }
459
460
461 #ifdef INCLUDE_L4_SUPPORT
462 //Check for L4 Tx completions
463 if (lm_toe_is_tx_completion(pdev, drv_rss_id))
464 {
465 lm_toe_service_tx_intr(pdev, drv_rss_id);
466 }
467 #endif
468 }
469 *rx_rss_id = drv_rss_id;
470 *tx_rss_id = drv_rss_id;
471
472 return flags;
473 }
474
475
lm_get_e2_status_block(lm_device_t * pdev,u8_t rss_id)476 volatile struct host_hc_status_block_e2 * lm_get_e2_status_block(lm_device_t *pdev, u8_t rss_id)
477 {
478 return pdev->vars.status_blocks_arr[rss_id].host_hc_status_block.e2_sb;
479 }
480
lm_get_e1x_status_block(lm_device_t * pdev,u8_t rss_id)481 volatile struct host_hc_status_block_e1x * lm_get_e1x_status_block(lm_device_t *pdev, u8_t rss_id)
482 {
483 return pdev->vars.status_blocks_arr[rss_id].host_hc_status_block.e1x_sb;
484 }
485
lm_get_default_status_block(lm_device_t * pdev)486 volatile struct hc_sp_status_block * lm_get_default_status_block(lm_device_t *pdev)
487 {
488 return &pdev->vars.gen_sp_status_block.hc_sp_status_blk->sp_sb;
489 }
490
lm_get_attention_status_block(lm_device_t * pdev)491 volatile struct atten_sp_status_block * lm_get_attention_status_block(lm_device_t *pdev)
492 {
493 return &pdev->vars.gen_sp_status_block.hc_sp_status_blk->atten_status_block;
494 }
495
496
print_sb_info(lm_device_t * pdev)497 void print_sb_info(lm_device_t *pdev)
498 {
499 #if 0
500 u8_t index = 0;
501 volatile struct host_status_block *rss_sb = NULL;
502
503 DbgBreakIf(!pdev);
504 DbgMessage(pdev, INFORMi, "print_sb_info() inside!\n");
505 //print info of all non-default status blocks
506 for(index=0; index < MAX_RSS_CHAINS; index++)
507 {
508 rss_sb = lm_get_status_block(pdev, index);
509
510 DbgBreakIf(!rss_sb);
511 DbgBreakIf(*(LM_RCQ(pdev, index).
512 hw_con_idx_ptr) != rss_sb->u_status_block.index_values[HC_INDEX_U_ETH_RX_CQ_CONS]);
513 DbgBreakIf(*(LM_TXQ(pdev, index).hw_con_idx_ptr) != rss_sb->c_status_block.index_values[HC_INDEX_C_ETH_TX_CQ_CONS]);
514
515 DbgMessage(pdev, INFORMi, "rss sb #%d: u_new_cons:%d, c_new_cons:%d, c_status idx:%d, c_sbID:%d, u_status idx:%d, u_sbID:%d\n",
516 index,
517 rss_sb->u_status_block.index_values[HC_INDEX_U_ETH_RX_CQ_CONS],
518 rss_sb->c_status_block.index_values[HC_INDEX_C_ETH_TX_CQ_CONS],
519 rss_sb->c_status_block.status_block_index,
520 rss_sb->c_status_block.status_block_id,
521 rss_sb->u_status_block.status_block_index,
522 rss_sb->u_status_block.status_block_id);
523
524 DbgMessage(pdev, INFORMi, "____________________________________________________________\n");
525 }
526 //print info of the default status block
527 DbgBreakIf(pdev->vars.gen_sp_status_block.hc_sp_status_blk == NULL);
528
529 DbgMessage(pdev, INFORMi, "sp sb: c_status idx:%d, c_sbID:%d\n",
530 pdev->vars.gen_sp_status_block.hc_sp_status_blk->sp_sb.running_index, pdev->vars.gen_sp_status_block.sb_data.igu_sb_id);
531
532 DbgMessage(pdev, INFORMi, "____________________________________________________________\n");
533 #endif
534 }
535
536 /**
537 * This function sets all the status-block ack values back to
538 * zero. Must be called BEFORE initializing the igu + before
539 * initializing status-blocks.
540 *
541 * @param pdev
542 */
lm_reset_sb_ack_values(struct _lm_device_t * pdev)543 void lm_reset_sb_ack_values(struct _lm_device_t *pdev)
544 {
545 //re-initialize all the local copy indices of sbs for load/unload scenarios
546 pdev->vars.hc_def_ack = 0;
547
548 //init attn state
549 pdev->vars.attn_state = 0;
550
551 pdev->vars.attn_def_ack = 0;
552
553 mm_memset(pdev->vars.c_hc_ack, 0, sizeof(pdev->vars.c_hc_ack));
554 mm_memset(pdev->vars.u_hc_ack, 0, sizeof(pdev->vars.u_hc_ack));
555 }
556
init_hc_attn_status_block(struct _lm_device_t * pdev,u8_t sb_id,lm_address_t * host_sb_addr)557 static void init_hc_attn_status_block(struct _lm_device_t *pdev,
558 u8_t sb_id,
559 lm_address_t *host_sb_addr)
560 {
561 volatile struct atten_sp_status_block * attention_sb = NULL;
562 //give the IGU the status block number(ID) of attention bits section.
563 DbgBreakIf(!pdev);
564
565 DbgMessage(pdev, INFORMi, "init_status_block: host_sb_addr_low:0x%x; host_sb_addr_low:0x%x\n",
566 host_sb_addr->as_u32.low, host_sb_addr->as_u32.high);
567 attention_sb = lm_get_attention_status_block(pdev);
568 attention_sb->status_block_id = sb_id;
569 //write to IGU the physical address where the attention bits lie
570 REG_WR(pdev, HC_REG_ATTN_MSG0_ADDR_L + 8*PORT_ID(pdev), host_sb_addr->as_u32.low);
571 REG_WR(pdev, HC_REG_ATTN_MSG0_ADDR_H + 8*PORT_ID(pdev), host_sb_addr->as_u32.high);
572 }
573
init_igu_attn_status_block(struct _lm_device_t * pdev,lm_address_t * host_sb_addr)574 static void init_igu_attn_status_block(
575 struct _lm_device_t *pdev,
576 lm_address_t *host_sb_addr)
577 {
578
579 //write to IGU the physical address where the attention bits lie
580 REG_WR(pdev, IGU_REG_ATTN_MSG_ADDR_L, host_sb_addr->as_u32.low);
581 REG_WR(pdev, IGU_REG_ATTN_MSG_ADDR_H, host_sb_addr->as_u32.high);
582
583 DbgMessage(pdev, INFORMi, "init_attn_igu_status_block: host_sb_addr_low:0x%x; host_sb_addr_low:0x%x\n",
584 host_sb_addr->as_u32.low, host_sb_addr->as_u32.high);
585
586
587 }
588
589
init_attn_status_block(struct _lm_device_t * pdev,u8_t sb_id,lm_address_t * host_sb_addr)590 static void init_attn_status_block(struct _lm_device_t *pdev,
591 u8_t sb_id,
592 lm_address_t *host_sb_addr)
593 {
594 if (INTR_BLK_TYPE(pdev) == INTR_BLK_HC) {
595 init_hc_attn_status_block(pdev,sb_id,host_sb_addr);
596 } else {
597 init_igu_attn_status_block(pdev, host_sb_addr);
598 }
599 }
600
lm_init_sp_status_block(struct _lm_device_t * pdev)601 static void lm_init_sp_status_block(struct _lm_device_t *pdev)
602 {
603 lm_address_t sb_phy_addr;
604 u8_t igu_sp_sb_index; /* igu Status Block constant identifier (0-135) */
605 u8_t igu_seg_id;
606 u8_t func;
607 u8_t i;
608
609 DbgBreakIf(!pdev);
610 DbgBreakIf(IS_VFDEV(pdev));
611
612 DbgBreakIf((CSTORM_SP_STATUS_BLOCK_SIZE % 4) != 0);
613 DbgBreakIf((CSTORM_SP_STATUS_BLOCK_DATA_SIZE % 4) != 0);
614 DbgBreakIf((CSTORM_SP_SYNC_BLOCK_SIZE % 4) != 0);
615 func = FUNC_ID(pdev);
616
617 if ((INTR_BLK_TYPE(pdev) == INTR_BLK_IGU) && (INTR_BLK_MODE(pdev) == INTR_BLK_MODE_NORM) ) {
618 igu_sp_sb_index = IGU_DSB_ID(pdev);
619 igu_seg_id = IGU_SEG_ACCESS_DEF;
620 } else {
621 igu_sp_sb_index = DEF_STATUS_BLOCK_IGU_INDEX;
622 igu_seg_id = HC_SEG_ACCESS_DEF;
623 }
624
625 sb_phy_addr = pdev->vars.gen_sp_status_block.blk_phy_address;
626
627 init_attn_status_block(pdev, igu_sp_sb_index, &sb_phy_addr);
628
629 LM_INC64(&sb_phy_addr, OFFSETOF(struct host_sp_status_block, sp_sb));
630
631 /* CQ#46240: Disable the function in the status-block data before nullifying sync-line + status-block */
632 LM_INTMEM_WRITE8(pdev, CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func),
633 SB_DISABLED, BAR_CSTRORM_INTMEM);
634
635 REG_WR_DMAE_LEN_ZERO(pdev, CSEM_REG_FAST_MEMORY + CSTORM_SP_SYNC_BLOCK_OFFSET(func), CSTORM_SP_SYNC_BLOCK_SIZE/4);
636 REG_WR_DMAE_LEN_ZERO(pdev, CSEM_REG_FAST_MEMORY + CSTORM_SP_STATUS_BLOCK_OFFSET(func), CSTORM_SP_STATUS_BLOCK_SIZE/4);
637
638
639 pdev->vars.gen_sp_status_block.sb_data.host_sb_addr.lo = sb_phy_addr.as_u32.low;
640 pdev->vars.gen_sp_status_block.sb_data.host_sb_addr.hi = sb_phy_addr.as_u32.high;
641 pdev->vars.gen_sp_status_block.sb_data.igu_sb_id = igu_sp_sb_index;
642 pdev->vars.gen_sp_status_block.sb_data.igu_seg_id = igu_seg_id;
643 pdev->vars.gen_sp_status_block.sb_data.p_func.pf_id = func;
644 pdev->vars.gen_sp_status_block.sb_data.p_func.vnic_id = VNIC_ID(pdev);
645 pdev->vars.gen_sp_status_block.sb_data.p_func.vf_id = 0xff;
646 pdev->vars.gen_sp_status_block.sb_data.p_func.vf_valid = FALSE;
647 pdev->vars.gen_sp_status_block.sb_data.state = SB_ENABLED;
648
649 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32_t); i++) {
650 LM_INTMEM_WRITE32(PFDEV(pdev), CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) + i*sizeof(u32_t), *((u32_t*)&pdev->vars.gen_sp_status_block.sb_data + i), BAR_CSTRORM_INTMEM);
651 }
652
653
654 }
655
656 /* Initalize the whole status blocks per port - overall: 1 defalt sb, 16 non-default sbs
657 *
658 * Parameters:
659 * pdev - the LM device which holds the sbs
660 * port - the port number
661 */
init_status_blocks(struct _lm_device_t * pdev)662 void init_status_blocks(struct _lm_device_t *pdev)
663 {
664 u8_t sb_id = 0;
665 u8_t port = PORT_ID(pdev);
666 u8_t group_idx;
667 DbgMessage(pdev, INFORMi, "init_status_blocks() inside! func:%d\n",FUNC_ID(pdev));
668 DbgBreakIf(!pdev);
669
670 pdev->vars.num_attn_sig_regs =
671 (CHIP_IS_E1x(pdev))? NUM_ATTN_REGS_E1X : NUM_ATTN_REGS_E2;
672
673 //Read routing configuration for attn signal output of groups. Currently, only group 0,1,2 are wired.
674 for (group_idx = 0; group_idx < MAX_DYNAMIC_ATTN_GRPS; group_idx++)
675 {
676
677 //group index
678 pdev->vars.attn_groups_output[group_idx].attn_sig_dword[0] =
679 REG_RD(pdev, (PORT_ID(pdev) ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0) + group_idx*16);
680 pdev->vars.attn_groups_output[group_idx].attn_sig_dword[1] =
681 REG_RD(pdev, (PORT_ID(pdev) ? MISC_REG_AEU_ENABLE2_FUNC_1_OUT_0 : MISC_REG_AEU_ENABLE2_FUNC_0_OUT_0) + group_idx*16);
682 pdev->vars.attn_groups_output[group_idx].attn_sig_dword[2] =
683 REG_RD(pdev, (PORT_ID(pdev) ? MISC_REG_AEU_ENABLE3_FUNC_1_OUT_0 : MISC_REG_AEU_ENABLE3_FUNC_0_OUT_0) + group_idx*16);
684 pdev->vars.attn_groups_output[group_idx].attn_sig_dword[3] =
685 REG_RD(pdev, (PORT_ID(pdev) ? MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0 : MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0) + group_idx*16);
686 if (pdev->vars.num_attn_sig_regs == 5) {
687 /* enable5 is separate from the rest of the registers, and therefore the address skip is 4 and not 16 between the different groups */
688 pdev->vars.attn_groups_output[group_idx].attn_sig_dword[4] =
689 REG_RD(pdev, (PORT_ID(pdev) ? MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 : MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0) + group_idx*4);
690 } else {
691 pdev->vars.attn_groups_output[group_idx].attn_sig_dword[4] = 0;
692 }
693
694 DbgMessage(pdev, INFORMi, "lm_handle_deassertion_processing: group %d mask1:0x%x, mask2:0x%x, mask3:0x%x, mask4:0x%x, mask5:0x%x\n",
695 group_idx,
696 pdev->vars.attn_groups_output[group_idx].attn_sig_dword[0],
697 pdev->vars.attn_groups_output[group_idx].attn_sig_dword[1],
698 pdev->vars.attn_groups_output[group_idx].attn_sig_dword[2],
699 pdev->vars.attn_groups_output[group_idx].attn_sig_dword[3],
700 pdev->vars.attn_groups_output[group_idx].attn_sig_dword[4]);
701
702 }
703 pdev->vars.attn_sig_af_inv_reg_addr[0] =
704 PORT_ID(pdev) ? MISC_REG_AEU_AFTER_INVERT_1_FUNC_1 : MISC_REG_AEU_AFTER_INVERT_1_FUNC_0;
705 pdev->vars.attn_sig_af_inv_reg_addr[1] =
706 PORT_ID(pdev) ? MISC_REG_AEU_AFTER_INVERT_2_FUNC_1 : MISC_REG_AEU_AFTER_INVERT_2_FUNC_0;
707 pdev->vars.attn_sig_af_inv_reg_addr[2] =
708 PORT_ID(pdev) ? MISC_REG_AEU_AFTER_INVERT_3_FUNC_1 : MISC_REG_AEU_AFTER_INVERT_3_FUNC_0;
709 pdev->vars.attn_sig_af_inv_reg_addr[3] =
710 PORT_ID(pdev) ? MISC_REG_AEU_AFTER_INVERT_4_FUNC_1 : MISC_REG_AEU_AFTER_INVERT_4_FUNC_0;
711 pdev->vars.attn_sig_af_inv_reg_addr[4] =
712 PORT_ID(pdev) ? MISC_REG_AEU_AFTER_INVERT_5_FUNC_1 : MISC_REG_AEU_AFTER_INVERT_5_FUNC_0;
713
714 // init the non-default status blocks
715 LM_FOREACH_SB_ID(pdev, sb_id)
716 {
717 lm_init_non_def_status_block(pdev, sb_id, port);
718 }
719
720 if (pdev->params.int_coalesing_mode_disabled_by_ndis) {
721 lm_set_interrupt_moderation(pdev, FALSE);
722 }
723 // init the default status block - composed of 5 parts per storm: Attention bits, Ustorm, Cstorm, Xstorm, Tstorm
724
725 //Init the attention bits part of the default status block
726 lm_init_sp_status_block(pdev);
727 }
728
729 /* set interrupt coalesing parameters.
730 - these settings are derived from user configured interrupt coalesing mode and tx/rx interrupts rate (lm params).
731 - these settings are used for status blocks initialization */
lm_set_int_coal_info(struct _lm_device_t * pdev)732 void lm_set_int_coal_info(struct _lm_device_t *pdev)
733 {
734 lm_int_coalesing_info* ic = &pdev->vars.int_coal;
735 u32_t rx_coal_usec[HC_USTORM_SB_NUM_INDICES];
736 u32_t tx_coal_usec[HC_CSTORM_SB_NUM_INDICES];
737 u32_t i = 0;
738
739 mm_mem_zero( ic, sizeof(lm_int_coalesing_info) );
740
741 for (i = 0; i < HC_USTORM_SB_NUM_INDICES; i++) {
742 rx_coal_usec[i] = 0;
743 }
744
745 for (i = 0; i < HC_CSTORM_SB_NUM_INDICES; i++) {
746 tx_coal_usec[i] = 0;
747 }
748
749 switch (pdev->params.int_coalesing_mode)
750 {
751 case LM_INT_COAL_PERIODIC_SYNC: /* static periodic sync */
752 for (i = 0; i < HC_USTORM_SB_NUM_INDICES; i++) {
753 if (pdev->params.int_per_sec_rx_override)
754 pdev->params.int_per_sec_rx[i] = pdev->params.int_per_sec_rx_override;
755
756 DbgMessage(pdev, WARN, "##lm_set_int_coal_info: int_per_sec_rx[%d] = %d\n",i,pdev->params.int_per_sec_rx[i]);
757 if (pdev->params.int_per_sec_rx[i])
758 {
759 rx_coal_usec[i] = 1000000 / pdev->params.int_per_sec_rx[i];
760 }
761 if(rx_coal_usec[i] > 0x3ff)
762 {
763 rx_coal_usec[i] = 0x3ff; /* min 1k us, i.e. 1k int per sec */
764 }
765 }
766
767 for (i = 0; i < HC_CSTORM_SB_NUM_INDICES; i++) {
768 if (pdev->params.int_per_sec_tx_override)
769 pdev->params.int_per_sec_tx[i] = pdev->params.int_per_sec_tx_override;
770
771 DbgMessage(pdev, WARN, "##lm_set_int_coal_info: int_per_sec_tx[%d] = %d\n",i,pdev->params.int_per_sec_tx[i]);
772
773 if (pdev->params.int_per_sec_tx[i])
774 {
775 tx_coal_usec[i] = 1000000 / pdev->params.int_per_sec_tx[i];
776 }
777 if(tx_coal_usec[i] > 0x3ff)
778 {
779 tx_coal_usec[i] = 0x3ff; /* min 1k us, i.e. 1k int per sec */
780 }
781 }
782 break;
783
784 case LM_INT_COAL_NONE: /* this is the default */
785 default:
786 break;
787 }
788
789 /* set hc period for c sb for all indices */
790 for (i = 0; i < HC_CSTORM_SB_NUM_INDICES; i++) {
791 ic->hc_usec_c_sb[i] = tx_coal_usec[i];
792 }
793 /* set hc period for u sb for all indices */
794 for (i = 0; i < HC_USTORM_SB_NUM_INDICES; i++) {
795 ic->hc_usec_u_sb[i] = rx_coal_usec[i];
796 }
797
798 #if 0
799 if (pdev->params.l4_fw_dca_enabled) {
800 /* set TOE HC to minimum possible for ustorm */
801 ic->hc_usec_u_sb[HC_INDEX_U_TOE_RX_CQ_CONS] = pdev->params.l4_hc_ustorm_thresh; /* 12usec */
802 }
803 #endif
804
805 /* by default set hc period for x/t/c/u defualt sb to NONE.
806 (that was already implicitly done by memset 0 above) */
807
808
809 /* set dynamic hc params */
810 for (i = 0; i < HC_USTORM_SB_NUM_INDICES; i++) {
811 ic->eth_dynamic_hc_cfg.sm_config[SM_RX_ID].hc_timeout0[i] = (u8_t)pdev->params.hc_timeout0[SM_RX_ID][i];
812 ic->eth_dynamic_hc_cfg.sm_config[SM_RX_ID].hc_timeout1[i] = (u8_t)pdev->params.hc_timeout1[SM_RX_ID][i];
813 ic->eth_dynamic_hc_cfg.sm_config[SM_RX_ID].hc_timeout2[i] = (u8_t)pdev->params.hc_timeout2[SM_RX_ID][i];
814 ic->eth_dynamic_hc_cfg.sm_config[SM_RX_ID].hc_timeout3[i] = (u8_t)pdev->params.hc_timeout3[SM_RX_ID][i];
815 }
816 ic->eth_dynamic_hc_cfg.sm_config[SM_RX_ID].threshold[0] = pdev->params.hc_threshold0[SM_RX_ID];
817 ic->eth_dynamic_hc_cfg.sm_config[SM_RX_ID].threshold[1] = pdev->params.hc_threshold1[SM_RX_ID];
818 ic->eth_dynamic_hc_cfg.sm_config[SM_RX_ID].threshold[2] = pdev->params.hc_threshold2[SM_RX_ID];
819
820 for (i = 0; i < HC_CSTORM_SB_NUM_INDICES; i++) {
821 ic->eth_dynamic_hc_cfg.sm_config[SM_TX_ID].hc_timeout0[i] = (u8_t)pdev->params.hc_timeout0[SM_TX_ID][i];
822 ic->eth_dynamic_hc_cfg.sm_config[SM_TX_ID].hc_timeout1[i] = (u8_t)pdev->params.hc_timeout1[SM_TX_ID][i];
823 ic->eth_dynamic_hc_cfg.sm_config[SM_TX_ID].hc_timeout2[i] = (u8_t)pdev->params.hc_timeout2[SM_TX_ID][i];
824 ic->eth_dynamic_hc_cfg.sm_config[SM_TX_ID].hc_timeout3[i] = (u8_t)pdev->params.hc_timeout3[SM_TX_ID][i];
825 }
826 ic->eth_dynamic_hc_cfg.sm_config[SM_TX_ID].threshold[0] = pdev->params.hc_threshold0[SM_TX_ID];
827 ic->eth_dynamic_hc_cfg.sm_config[SM_TX_ID].threshold[1] = pdev->params.hc_threshold1[SM_TX_ID];
828 ic->eth_dynamic_hc_cfg.sm_config[SM_TX_ID].threshold[2] = pdev->params.hc_threshold2[SM_TX_ID];
829 }
830
831
832
lm_setup_ndsb_index(struct _lm_device_t * pdev,u8_t sb_id,u8_t idx,u8_t sm_idx,u8_t timeout,u8_t dhc_enable)833 void lm_setup_ndsb_index(struct _lm_device_t *pdev, u8_t sb_id, u8_t idx, u8_t sm_idx, u8_t timeout, u8_t dhc_enable)
834 {
835 struct hc_index_data * hc_index_entry;
836 if (CHIP_IS_E1x(pdev)) {
837 hc_index_entry = pdev->vars.status_blocks_arr[sb_id].hc_status_block_data.e1x_sb_data.index_data + idx;
838 } else {
839 hc_index_entry = pdev->vars.status_blocks_arr[sb_id].hc_status_block_data.e2_sb_data.index_data + idx;
840 }
841 hc_index_entry->timeout = timeout;
842 hc_index_entry->flags = (sm_idx << HC_INDEX_DATA_SM_ID_SHIFT) & HC_INDEX_DATA_SM_ID;
843 if (timeout) {
844 hc_index_entry->flags |= HC_INDEX_DATA_HC_ENABLED;
845 }
846 if (dhc_enable) {
847 hc_index_entry->flags |= HC_INDEX_DATA_DYNAMIC_HC_ENABLED;
848 }
849 }
850
lm_setup_ndsb_state_machine(struct _lm_device_t * pdev,u8_t sb_id,u8_t sm_id,u8_t igu_sb_id,u8_t igu_seg_id)851 void lm_setup_ndsb_state_machine(struct _lm_device_t *pdev, u8_t sb_id, u8_t sm_id, u8_t igu_sb_id, u8_t igu_seg_id)
852 {
853 struct hc_status_block_sm * hc_state_machine;
854 if (CHIP_IS_E1x(pdev)) {
855 hc_state_machine = pdev->vars.status_blocks_arr[sb_id].hc_status_block_data.e1x_sb_data.common.state_machine + sm_id;
856 } else {
857 hc_state_machine = pdev->vars.status_blocks_arr[sb_id].hc_status_block_data.e2_sb_data.common.state_machine + sm_id;
858 }
859
860 hc_state_machine->igu_sb_id = igu_sb_id;
861 hc_state_machine->igu_seg_id = igu_seg_id;
862 hc_state_machine->timer_value = 0xFF;
863 hc_state_machine->time_to_expire = 0xFFFFFFFF;
864 }
865
866
867
lm_int_hc_ack_sb(lm_device_t * pdev,u8_t rss_id,u8_t storm_id,u16_t sb_index,u8_t int_op,u8_t is_update_idx)868 void lm_int_hc_ack_sb(lm_device_t *pdev, u8_t rss_id, u8_t storm_id, u16_t sb_index, u8_t int_op, u8_t is_update_idx)
869 {
870 struct igu_ack_register hc_data;
871
872 //this is the result which should be communicated to the driver!
873 u32_t result = 0;
874
875
876
877 //don't forget this
878 hc_data.sb_id_and_flags = 0;
879 hc_data.status_block_index = 0;
880
881 DbgMessage(pdev, INFORMi, "lm_int_ack_sb() inside! rss_id:%d, sb_index:%d, func_num:%d is_update:%d\n", rss_id, sb_index, FUNC_ID(pdev), is_update_idx);
882
883 hc_data.sb_id_and_flags |= (0xffffffff & (int_op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
884 hc_data.sb_id_and_flags |= (0xffffffff & (rss_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT));
885 hc_data.sb_id_and_flags |= (0xffffffff & (storm_id << IGU_ACK_REGISTER_STORM_ID_SHIFT));
886 hc_data.sb_id_and_flags |= (0xffffffff & (is_update_idx << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT));
887 hc_data.status_block_index = sb_index;
888
889 DbgMessage(pdev, INFORMi, "lm_int_ack_sb() inside! data:0x%x; status_block_index:%d\n", hc_data.sb_id_and_flags, hc_data.status_block_index);
890
891 result = ((u32_t)hc_data.sb_id_and_flags) << 16 | hc_data.status_block_index;
892
893 DbgMessage(pdev, INFORMi, "lm_int_ack_sb() result:0x%x\n", result);
894
895 // interrupt ack
896 REG_WR(pdev, HC_REG_COMMAND_REG + PORT_ID(pdev)*32 + COMMAND_REG_INT_ACK, result);
897
898 }
899
900
901
lm_int_igu_ack_sb(lm_device_t * pdev,u8_t igu_sb_id,u8_t segment_access,u16_t sb_index,u8_t int_op,u8_t is_update_idx)902 void lm_int_igu_ack_sb(lm_device_t *pdev, u8_t igu_sb_id, u8_t segment_access, u16_t sb_index, u8_t int_op, u8_t is_update_idx)
903 {
904 struct igu_regular cmd_data;
905 struct igu_ctrl_reg cmd_ctrl;
906 u32_t cmd_addr;
907
908 //DbgMessage(pdev, FATAL, "int-igu-ack segment_access=%d\n", segment_access);
909 DbgBreakIf(sb_index & ~IGU_REGULAR_SB_INDEX);
910
911 /*
912 * We may get here with IGU disabled. In that case, no IGU access is permitted.
913 */
914 if (!pdev->vars.enable_intr)
915 {
916 return;
917 }
918
919 cmd_data.sb_id_and_flags =
920 ((sb_index << IGU_REGULAR_SB_INDEX_SHIFT) |
921 (segment_access << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
922 (is_update_idx << IGU_REGULAR_BUPDATE_SHIFT) |
923 (int_op << IGU_REGULAR_ENABLE_INT_SHIFT));
924
925 cmd_addr = IGU_CMD_INT_ACK_BASE + igu_sb_id;
926
927 if (INTR_BLK_ACCESS(pdev) == INTR_BLK_ACCESS_IGUMEM) {
928 if (IS_PFDEV(pdev)) {
929 REG_WR(pdev, BAR_IGU_INTMEM + cmd_addr*8, cmd_data.sb_id_and_flags);
930 } else {
931 VF_REG_WR(pdev, VF_BAR0_IGU_OFFSET + cmd_addr*8, cmd_data.sb_id_and_flags);
932 }
933 } else {
934 u8_t igu_func_id = 0;
935
936 /* GRC ACCESS: */
937 DbgBreakIf(IS_VFDEV(pdev));
938 /* Write the Data, then the control */
939 /* [18:12] - FID (if VF - [18] = 0; [17:12] = VF number; if PF - [18] = 1; [17:14] = 0; [13:12] = PF number) */
940 igu_func_id = IGU_FUNC_ID(pdev);
941 cmd_ctrl.ctrl_data =
942 ((cmd_addr << IGU_CTRL_REG_ADDRESS_SHIFT) |
943 (igu_func_id << IGU_CTRL_REG_FID_SHIFT) |
944 (IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT));
945
946 REG_WR(pdev, IGU_REG_COMMAND_REG_32LSB_DATA, cmd_data.sb_id_and_flags);
947 REG_WR(pdev, IGU_REG_COMMAND_REG_CTRL, cmd_ctrl.ctrl_data);
948 }
949 }
950
lm_int_igu_sb_cleanup(lm_device_t * pdev,u8 igu_sb_id)951 void lm_int_igu_sb_cleanup(lm_device_t *pdev, u8 igu_sb_id)
952 {
953 struct igu_regular cmd_data = {0};
954 struct igu_ctrl_reg cmd_ctrl = {0};
955 u32_t igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (igu_sb_id/32)*4;
956 u32_t sb_bit = 1 << (igu_sb_id%32);
957 u32_t cnt = 100;
958
959 #ifdef _VBD_CMD_
960 return;
961 #endif
962
963 /* Not supported in backward compatible mode! */
964 if (INTR_BLK_MODE(pdev) == INTR_BLK_MODE_BC)
965 {
966 return;
967 }
968
969 /* Cleanup can be done only via GRC access using the producer update command */
970 cmd_data.sb_id_and_flags =
971 ((IGU_USE_REGISTER_cstorm_type_0_sb_cleanup << IGU_REGULAR_CLEANUP_TYPE_SHIFT) |
972 IGU_REGULAR_CLEANUP_SET |
973 IGU_REGULAR_BCLEANUP);
974
975 cmd_ctrl.ctrl_data =
976 (((IGU_CMD_E2_PROD_UPD_BASE + igu_sb_id) << IGU_CTRL_REG_ADDRESS_SHIFT) |
977 (IGU_FUNC_ID(pdev) << IGU_CTRL_REG_FID_SHIFT) |
978 (IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT));
979
980 REG_WR(pdev, IGU_REG_COMMAND_REG_32LSB_DATA, cmd_data.sb_id_and_flags);
981 REG_WR(pdev, IGU_REG_COMMAND_REG_CTRL, cmd_ctrl.ctrl_data);
982
983 /* wait for clean up to finish */
984 while (!(REG_RD(pdev, igu_addr_ack) & sb_bit) && --cnt)
985 {
986 mm_wait(pdev, 10);
987 }
988
989 if (!(REG_RD(pdev, igu_addr_ack) & sb_bit))
990 {
991 DbgMessage(pdev, FATAL, "Unable to finish IGU cleanup - set: igu_sb_id %d offset %d bit %d (cnt %d)\n",
992 igu_sb_id, igu_sb_id/32, igu_sb_id%32, cnt);
993 }
994
995 /* Now we clear the cleanup-bit... same command without cleanup_set... */
996 cmd_data.sb_id_and_flags =
997 ((IGU_USE_REGISTER_cstorm_type_0_sb_cleanup << IGU_REGULAR_CLEANUP_TYPE_SHIFT) |
998 IGU_REGULAR_BCLEANUP);
999
1000
1001 REG_WR(pdev, IGU_REG_COMMAND_REG_32LSB_DATA, cmd_data.sb_id_and_flags);
1002 REG_WR(pdev, IGU_REG_COMMAND_REG_CTRL, cmd_ctrl.ctrl_data);
1003
1004 /* wait for clean up to finish */
1005 while ((REG_RD(pdev, igu_addr_ack) & sb_bit) && --cnt)
1006 {
1007 mm_wait(pdev, 10);
1008 }
1009
1010 if ((REG_RD(pdev, igu_addr_ack) & sb_bit))
1011 {
1012 DbgMessage(pdev, FATAL, "Unable to finish IGU cleanup - clear: igu_sb_id %d offset %d bit %d (cnt %d)\n",
1013 igu_sb_id, igu_sb_id/32, igu_sb_id%32, cnt);
1014 }
1015 }
1016
1017
lm_int_ack_def_sb_disable(lm_device_t * pdev)1018 void lm_int_ack_def_sb_disable(lm_device_t *pdev)
1019 {
1020 pdev->debug_info.ack_def_dis++;
1021 if (INTR_BLK_TYPE(pdev) == INTR_BLK_HC) {
1022 lm_int_hc_ack_sb(pdev, DEF_STATUS_BLOCK_IGU_INDEX, HC_SEG_ACCESS_DEF, DEF_SB_INDEX(pdev), IGU_INT_DISABLE, 0); //DEF_STATUS_BLOCK_INDEX
1023 } else {
1024 if (INTR_BLK_MODE(pdev) == INTR_BLK_MODE_BC) {
1025 lm_int_igu_ack_sb(pdev, IGU_DSB_ID(pdev), HC_SEG_ACCESS_DEF, DEF_SB_INDEX(pdev), IGU_INT_DISABLE, 0);
1026 } else {
1027 lm_int_igu_ack_sb(pdev, IGU_DSB_ID(pdev), IGU_SEG_ACCESS_DEF, DEF_SB_INDEX(pdev), IGU_INT_DISABLE, 1);
1028 }
1029 }
1030 }
1031
1032 /* Assumptions: Called when acking a status-block and enabling interrupts */
lm_int_ack_def_sb_enable(lm_device_t * pdev)1033 void lm_int_ack_def_sb_enable(lm_device_t *pdev)
1034 {
1035 pdev->debug_info.ack_def_en++;
1036 if (INTR_BLK_TYPE(pdev) == INTR_BLK_HC) {
1037 lm_int_hc_ack_sb(pdev, DEF_STATUS_BLOCK_IGU_INDEX, HC_SEG_ACCESS_ATTN, DEF_SB_INDEX_OF_ATTN(pdev), IGU_INT_NOP, 1); //DEF_STATUS_BLOCK_INDEX
1038 lm_int_hc_ack_sb(pdev, DEF_STATUS_BLOCK_IGU_INDEX, HC_SEG_ACCESS_DEF, DEF_SB_INDEX(pdev), IGU_INT_ENABLE, 1); //DEF_STATUS_BLOCK_INDEX
1039 } else {
1040 if (INTR_BLK_MODE(pdev) == INTR_BLK_MODE_BC) {
1041 lm_int_igu_ack_sb(pdev, IGU_DSB_ID(pdev), HC_SEG_ACCESS_ATTN, DEF_SB_INDEX_OF_ATTN(pdev), IGU_INT_NOP, 1);
1042 lm_int_igu_ack_sb(pdev, IGU_DSB_ID(pdev), HC_SEG_ACCESS_DEF, DEF_SB_INDEX(pdev), IGU_INT_ENABLE, 1);
1043 } else {
1044 lm_int_igu_ack_sb(pdev, IGU_DSB_ID(pdev), IGU_SEG_ACCESS_ATTN, DEF_SB_INDEX_OF_ATTN(pdev), IGU_INT_NOP, 1);
1045 lm_int_igu_ack_sb(pdev, IGU_DSB_ID(pdev), IGU_SEG_ACCESS_DEF, DEF_SB_INDEX(pdev), IGU_INT_ENABLE, 1);
1046 }
1047 }
1048 }
1049
lm_int_ack_sb_disable(lm_device_t * pdev,u8_t rss_id)1050 void lm_int_ack_sb_disable(lm_device_t *pdev, u8_t rss_id)
1051 {
1052 if (INTR_BLK_TYPE(pdev) == INTR_BLK_HC) {
1053 lm_int_hc_ack_sb(pdev, rss_id , HC_SEG_ACCESS_NORM, 0, IGU_INT_DISABLE, 0);
1054 pdev->debug_info.ack_dis[rss_id]++;
1055 } else {
1056 if (INTR_BLK_MODE(pdev) == INTR_BLK_MODE_BC) {
1057 lm_int_igu_ack_sb(pdev, rss_id + IGU_BASE_NDSB(pdev) , HC_SEG_ACCESS_NORM, 0, IGU_INT_DISABLE, 0);
1058 pdev->debug_info.ack_dis[rss_id]++;
1059 } else {
1060 if (pdev->debug_info.ack_dis[rss_id] == pdev->debug_info.ack_en[rss_id]) {
1061 //DbgMessage(pdev, WARN, "********lm_int_ack_sb_disable() during DPC\n");
1062 // REG_WR(PFDEV(pdev), IGU_REG_ECO_RESERVED, 8);
1063 // DbgBreak();
1064 }
1065 if (IS_PFDEV(pdev))
1066 {
1067 lm_int_igu_ack_sb(pdev, rss_id + IGU_BASE_NDSB(pdev), IGU_SEG_ACCESS_NORM, 0, IGU_INT_DISABLE, 0);
1068 }
1069 else
1070 {
1071 lm_int_igu_ack_sb(pdev, IGU_VF_NDSB(pdev,rss_id), IGU_SEG_ACCESS_NORM, 0, IGU_INT_DISABLE, 0);
1072 }
1073 pdev->debug_info.ack_dis[rss_id]++;
1074 }
1075 }
1076 }
1077
lm_int_ack_sb_enable(lm_device_t * pdev,u8_t rss_id)1078 void lm_int_ack_sb_enable(lm_device_t *pdev, u8_t rss_id)
1079 {
1080 if (INTR_BLK_TYPE(pdev) == INTR_BLK_HC) {
1081 lm_int_hc_ack_sb(pdev, rss_id , HC_SEG_ACCESS_NORM, SB_RX_INDEX(pdev,rss_id), IGU_INT_ENABLE, 1);
1082 pdev->debug_info.ack_en[rss_id]++;
1083 } else {
1084 if (INTR_BLK_MODE(pdev) == INTR_BLK_MODE_BC) {
1085 lm_int_igu_ack_sb(pdev, rss_id + IGU_BASE_NDSB(pdev) , HC_SEG_ACCESS_NORM, SB_RX_INDEX(pdev,rss_id), IGU_INT_ENABLE, 1);
1086 pdev->debug_info.ack_en[rss_id]++;
1087 } else {
1088 if (rss_id >= IGU_U_NDSB_OFFSET(pdev)) {
1089 if (IS_PFDEV(pdev))
1090 {
1091 lm_int_igu_ack_sb(pdev, rss_id + IGU_BASE_NDSB(pdev), IGU_SEG_ACCESS_NORM, SB_RX_INDEX(pdev,rss_id), IGU_INT_ENABLE, 1);
1092 }
1093 else
1094 {
1095 lm_int_igu_ack_sb(pdev, IGU_VF_NDSB(pdev,rss_id), IGU_SEG_ACCESS_NORM, SB_RX_INDEX(pdev,rss_id), IGU_INT_ENABLE, 1);
1096 }
1097 pdev->debug_info.ack_en[rss_id]++;
1098 } else {
1099 lm_int_igu_ack_sb(pdev, rss_id + IGU_BASE_NDSB(pdev), IGU_SEG_ACCESS_NORM, SB_TX_INDEX(pdev,rss_id), IGU_INT_ENABLE, 1);
1100 }
1101 }
1102 }
1103 }
1104
lm_enable_hc_int(struct _lm_device_t * pdev)1105 void lm_enable_hc_int(struct _lm_device_t *pdev)
1106 {
1107 u32_t val;
1108 u32_t reg_name;
1109
1110 DbgBreakIf(!pdev);
1111
1112 reg_name = PORT_ID(pdev) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1113
1114 DbgMessage(pdev, INFORMnv, "### lm_enable_int\n");
1115
1116 val = 0x1000;
1117
1118 SET_FLAGS(val, (PORT_ID(pdev)? HC_CONFIG_1_REG_ATTN_BIT_EN_1 : HC_CONFIG_0_REG_ATTN_BIT_EN_0));
1119
1120 switch (pdev->params.interrupt_mode) {
1121 case LM_INT_MODE_INTA:
1122 SET_FLAGS(val, (HC_CONFIG_0_REG_INT_LINE_EN_0 |
1123 HC_CONFIG_0_REG_SINGLE_ISR_EN_0));
1124
1125 /* we trust that if we're in inta... the os will take care of the configuration space...and therefore
1126 * that will determine whether we are in inta or msix and not this configuration, we can't take down msix
1127 * due to a hw bug */
1128 if (CHIP_IS_E1(pdev))
1129 {
1130 SET_FLAGS(val, HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0);
1131 }
1132 break;
1133 case LM_INT_MODE_SIMD:
1134 SET_FLAGS(val, (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0) );
1135 RESET_FLAGS(val, HC_CONFIG_0_REG_INT_LINE_EN_0);
1136 break;
1137 case LM_INT_MODE_MIMD:
1138 SET_FLAGS(val, HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0);
1139 RESET_FLAGS(val, (HC_CONFIG_0_REG_INT_LINE_EN_0 |
1140 HC_CONFIG_0_REG_SINGLE_ISR_EN_0));
1141 break;
1142 default:
1143 DbgBreakMsg("Wrong Interrupt Mode\n");
1144 return;
1145 }
1146
1147 if (CHIP_IS_E1(pdev))
1148 {
1149 REG_WR(pdev, HC_REG_INT_MASK + PORT_ID(pdev)*4, 0x1FFFF);
1150 }
1151
1152 REG_WR(pdev, reg_name, val);
1153
1154 if(!CHIP_IS_E1(pdev))
1155 {
1156 /* init leading/trailing edge */
1157 if(IS_MULTI_VNIC(pdev))
1158 {
1159 /* in mf mode:
1160 * - Set only VNIC bit out of the "per vnic group attentions" (bits[4-7]) */
1161 val = (0xee0f | (1 << (VNIC_ID(pdev) + 4)));
1162 /* Connect to PMF to NIG attention bit 8 */
1163 if (IS_PMF(pdev)) {
1164 val |= 0x1100;
1165 }
1166 } else
1167 {
1168 val = 0xffff;
1169 }
1170 REG_WR(pdev, (PORT_ID(pdev) ? HC_REG_TRAILING_EDGE_1 : HC_REG_TRAILING_EDGE_0), val);
1171 REG_WR(pdev, (PORT_ID(pdev) ? HC_REG_LEADING_EDGE_1 : HC_REG_LEADING_EDGE_0), val);
1172 }
1173
1174 pdev->vars.enable_intr = 1;
1175 }
1176
lm_enable_igu_int(struct _lm_device_t * pdev)1177 lm_status_t lm_enable_igu_int(struct _lm_device_t *pdev)
1178 {
1179 u32_t val = 0;
1180
1181 if(ERR_IF(!pdev)) {
1182 return LM_STATUS_INVALID_PARAMETER;
1183 }
1184
1185 #ifdef VF_INVOLVED
1186 if (IS_VFDEV(pdev)) {
1187 lm_status_t lm_status;
1188 lm_status = lm_vf_enable_igu_int(pdev);
1189 if (lm_status != LM_STATUS_SUCCESS) {
1190 DbgMessage(pdev, FATAL, "VF can't enable igu interrupt\n");
1191 return lm_status;
1192 }
1193 pdev->vars.enable_intr = 1;
1194 return lm_status;
1195
1196 }
1197 #endif
1198
1199 DbgMessage(pdev, INFORMnv, "### lm_enable_int\n");
1200
1201 val=REG_RD(pdev, IGU_REG_PF_CONFIGURATION);
1202
1203 SET_FLAGS(val, IGU_PF_CONF_FUNC_EN);
1204 SET_FLAGS(val, IGU_PF_CONF_ATTN_BIT_EN);
1205
1206 switch (pdev->params.interrupt_mode) {
1207 case LM_INT_MODE_INTA:
1208 SET_FLAGS(val, (IGU_PF_CONF_INT_LINE_EN | IGU_PF_CONF_SINGLE_ISR_EN));
1209 RESET_FLAGS(val, IGU_PF_CONF_MSI_MSIX_EN);
1210 break;
1211 case LM_INT_MODE_SIMD:
1212 SET_FLAGS(val, (IGU_PF_CONF_SINGLE_ISR_EN | IGU_PF_CONF_MSI_MSIX_EN) );
1213 RESET_FLAGS(val, IGU_PF_CONF_INT_LINE_EN);
1214 break;
1215 case LM_INT_MODE_MIMD:
1216 SET_FLAGS(val, IGU_PF_CONF_MSI_MSIX_EN);
1217 RESET_FLAGS(val, (IGU_PF_CONF_INT_LINE_EN | IGU_PF_CONF_SINGLE_ISR_EN));
1218 break;
1219 default:
1220 DbgBreakMsg("Wrong Interrupt Mode\n");
1221 return LM_STATUS_FAILURE;
1222 }
1223
1224 REG_WR(pdev, IGU_REG_PF_CONFIGURATION, val);
1225
1226 if(!CHIP_IS_E1(pdev))
1227 {
1228 /* init leading/trailing edge */
1229 if(IS_MULTI_VNIC(pdev))
1230 {
1231 /* in mf mode:
1232 * - Do not set the link attention (bit 11) (will be set by MCP for the PMF)
1233 * - Set only VNIC bit out of the "per vnic group attentions" (bits[4-7]) */
1234 val = (0xee0f | (1 << (VNIC_ID(pdev) + 4)));
1235 /* Connect to PMF to NIG attention bit 8 */
1236 if (IS_PMF(pdev)) {
1237 val |= 0x1100;
1238 }
1239 } else
1240 {
1241 val = 0xffff;
1242 }
1243 if (CHIP_IS_E3(pdev)) {
1244 val &= ~ATTN_SW_TIMER_4_FUNC; // To prevent Timer4 expiration attention
1245 }
1246 REG_WR(pdev, IGU_REG_TRAILING_EDGE_LATCH, val);
1247 REG_WR(pdev, IGU_REG_LEADING_EDGE_LATCH, val);
1248 }
1249
1250 pdev->vars.enable_intr = 1;
1251
1252 return LM_STATUS_SUCCESS;
1253 }
1254
lm_enable_int(struct _lm_device_t * pdev)1255 void lm_enable_int(struct _lm_device_t *pdev)
1256 {
1257 if (INTR_BLK_TYPE(pdev) == INTR_BLK_HC) {
1258 lm_enable_hc_int(pdev);
1259 } else {
1260 lm_enable_igu_int(pdev);
1261 }
1262 }
1263
1264
lm_disable_hc_int(struct _lm_device_t * pdev)1265 void lm_disable_hc_int(struct _lm_device_t *pdev)
1266 {
1267 u32_t val;
1268 u32_t reg_name;
1269
1270 DbgBreakIf(!pdev);
1271
1272 reg_name = PORT_ID(pdev) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1273
1274 DbgMessage(pdev, INFORMnv, "### lm_disable_int\n");
1275
1276 val=REG_RD(pdev, reg_name);
1277
1278 /* disable both bits, for INTA, MSI and MSI-X. */
1279 RESET_FLAGS(val, (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1280 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1281 HC_CONFIG_0_REG_ATTN_BIT_EN_0 |
1282 HC_CONFIG_0_REG_SINGLE_ISR_EN_0));
1283
1284 if (CHIP_IS_E1(pdev))
1285 {
1286 REG_WR(pdev, HC_REG_INT_MASK + PORT_ID(pdev)*4, 0);
1287
1288 /* E1 Errate: can't ever take msix bit down */
1289 SET_FLAGS(val,HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0);
1290 }
1291
1292 REG_WR(pdev, reg_name, val);
1293
1294 pdev->vars.enable_intr = 0;
1295 }
1296
lm_disable_igu_int(struct _lm_device_t * pdev)1297 void lm_disable_igu_int(struct _lm_device_t *pdev)
1298 {
1299 u32_t val;
1300
1301 DbgBreakIf(!pdev);
1302
1303 DbgMessage(pdev, INFORMnv, "### lm_disable_int\n");
1304
1305 #ifdef VF_INVOLVED
1306 if (IS_VFDEV(pdev)) {
1307 lm_vf_disable_igu_int(pdev);
1308 pdev->vars.enable_intr = 0;
1309 return;
1310 }
1311 #endif
1312
1313 val = REG_RD(pdev, IGU_REG_PF_CONFIGURATION);
1314
1315 /* disable both bits, for INTA, MSI and MSI-X. */
1316 RESET_FLAGS(val, (IGU_PF_CONF_MSI_MSIX_EN |
1317 IGU_PF_CONF_INT_LINE_EN |
1318 IGU_PF_CONF_ATTN_BIT_EN |
1319 IGU_PF_CONF_SINGLE_ISR_EN |
1320 IGU_PF_CONF_FUNC_EN));
1321
1322 REG_WR(pdev, IGU_REG_PF_CONFIGURATION, val);
1323
1324 pdev->vars.enable_intr = 0;
1325 }
1326
lm_disable_int(struct _lm_device_t * pdev)1327 void lm_disable_int(struct _lm_device_t *pdev)
1328 {
1329 if (INTR_BLK_TYPE(pdev) == INTR_BLK_HC) {
1330 lm_disable_hc_int(pdev);
1331 } else {
1332 lm_disable_igu_int(pdev);
1333 }
1334 }
1335
lm_init_non_def_status_block(struct _lm_device_t * pdev,u8_t sb_id,u8_t port)1336 void lm_init_non_def_status_block(struct _lm_device_t *pdev,
1337 u8_t sb_id,
1338 u8_t port)
1339 {
1340 lm_int_coalesing_info *ic = &pdev->vars.int_coal;
1341 u8_t index = 0;
1342 const u8_t fw_sb_id = LM_FW_SB_ID(pdev, sb_id);
1343 const u8_t dhc_qzone_id = LM_FW_DHC_QZONE_ID(pdev, sb_id);
1344 const u8_t byte_counter_id = CHIP_IS_E1x(pdev)? fw_sb_id : dhc_qzone_id;
1345 u8_t igu_sb_id = 0;
1346 u8_t igu_seg_id = 0;
1347 u8_t timeout = 0;
1348 u8_t dhc_enable = FALSE;
1349 u8_t sm_idx;
1350 u8_t hc_sb_max_indices;
1351
1352 DbgBreakIf(!pdev);
1353
1354 /* CQ#46240: Disable the function in the status-block data before nullifying sync-line + status-block */
1355 LM_INTMEM_WRITE8(pdev, CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fw_sb_id),
1356 SB_DISABLED, BAR_CSTRORM_INTMEM);
1357
1358 /* nullify the status block */
1359 DbgBreakIf((CSTORM_STATUS_BLOCK_SIZE % 4) != 0);
1360 DbgBreakIf((CSTORM_STATUS_BLOCK_DATA_SIZE % 4) != 0);
1361 DbgBreakIf((CSTORM_SYNC_BLOCK_SIZE % 4) != 0);
1362
1363 for (index = 0; index < CSTORM_SYNC_BLOCK_SIZE / sizeof(u32_t); index++) {
1364 LM_INTMEM_WRITE32(PFDEV(pdev), CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id) + 4*index, 0, BAR_CSTRORM_INTMEM);
1365 }
1366 for (index = 0; index < CSTORM_STATUS_BLOCK_SIZE / sizeof(u32_t); index++) {
1367 LM_INTMEM_WRITE32(PFDEV(pdev), CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id) + 4*index, 0, BAR_CSTRORM_INTMEM);
1368 }
1369
1370
1371 /* Initialize cstorm_status_block_data structure */
1372 if (CHIP_IS_E1x(pdev)) {
1373
1374 pdev->vars.status_blocks_arr[sb_id].hc_status_block_data.e1x_sb_data.common.p_func.pf_id = FUNC_ID(pdev);
1375 pdev->vars.status_blocks_arr[sb_id].hc_status_block_data.e1x_sb_data.common.p_func.vf_id = 0xff;
1376 pdev->vars.status_blocks_arr[sb_id].hc_status_block_data.e1x_sb_data.common.p_func.vf_valid = FALSE;
1377 pdev->vars.status_blocks_arr[sb_id].hc_status_block_data.e1x_sb_data.common.p_func.vnic_id = VNIC_ID(pdev);
1378
1379 if (pdev->params.ndsb_type == LM_DOUBLE_SM_SINGLE_IGU) {
1380 pdev->vars.status_blocks_arr[sb_id].hc_status_block_data.e1x_sb_data.common.same_igu_sb_1b = TRUE;
1381 } else {
1382 pdev->vars.status_blocks_arr[sb_id].hc_status_block_data.e1x_sb_data.common.same_igu_sb_1b = FALSE;
1383 }
1384
1385 pdev->vars.status_blocks_arr[sb_id].hc_status_block_data.e1x_sb_data.common.state = SB_ENABLED;
1386 } else {
1387
1388 pdev->vars.status_blocks_arr[sb_id].hc_status_block_data.e2_sb_data.common.p_func.pf_id = FUNC_ID(pdev);
1389 if (IS_PFDEV(pdev)) {
1390 pdev->vars.status_blocks_arr[sb_id].hc_status_block_data.e2_sb_data.common.p_func.vf_id = 0xff;
1391 pdev->vars.status_blocks_arr[sb_id].hc_status_block_data.e2_sb_data.common.p_func.vf_valid = FALSE;
1392 } else {
1393 pdev->vars.status_blocks_arr[sb_id].hc_status_block_data.e2_sb_data.common.p_func.vf_id = ABS_VFID(pdev);
1394 pdev->vars.status_blocks_arr[sb_id].hc_status_block_data.e2_sb_data.common.p_func.vf_valid = TRUE;
1395 }
1396 pdev->vars.status_blocks_arr[sb_id].hc_status_block_data.e2_sb_data.common.p_func.vnic_id = VNIC_ID(pdev);
1397 if (pdev->params.ndsb_type == LM_DOUBLE_SM_SINGLE_IGU) {
1398 pdev->vars.status_blocks_arr[sb_id].hc_status_block_data.e2_sb_data.common.same_igu_sb_1b = TRUE;
1399 } else {
1400 pdev->vars.status_blocks_arr[sb_id].hc_status_block_data.e2_sb_data.common.same_igu_sb_1b = FALSE;
1401 }
1402 pdev->vars.status_blocks_arr[sb_id].hc_status_block_data.e2_sb_data.common.dhc_qzone_id = dhc_qzone_id;
1403
1404 pdev->vars.status_blocks_arr[sb_id].hc_status_block_data.e2_sb_data.common.state = SB_ENABLED;
1405
1406 }
1407
1408 if ((INTR_BLK_TYPE(pdev) == INTR_BLK_IGU) && (INTR_BLK_MODE(pdev) == INTR_BLK_MODE_NORM) ) {
1409 igu_sb_id = IGU_BASE_NDSB(pdev) + /*IGU_U_NDSB_OFFSET(pdev)*/ + sb_id;
1410 igu_seg_id = IGU_SEG_ACCESS_NORM;
1411 } else {
1412 igu_sb_id = sb_id;
1413 igu_seg_id = HC_SEG_ACCESS_NORM;
1414 }
1415
1416 lm_setup_ndsb_state_machine(pdev, sb_id, SM_RX_ID, igu_sb_id + IGU_U_NDSB_OFFSET(pdev), igu_seg_id);
1417 if (pdev->params.ndsb_type != LM_SINGLE_SM) {
1418 lm_setup_ndsb_state_machine(pdev, sb_id, SM_TX_ID, igu_sb_id,igu_seg_id);
1419 }
1420
1421 //init host coalescing params - supported dymanicHC indices
1422 if (CHIP_IS_E1x(pdev)) {
1423 hc_sb_max_indices = HC_SB_MAX_INDICES_E1X;
1424 } else {
1425 hc_sb_max_indices = HC_SB_MAX_INDICES_E2;
1426 }
1427 for (index = 0; index < hc_sb_max_indices; index++) {
1428 if (index < HC_DHC_SB_NUM_INDICES) {
1429 dhc_enable = (pdev->params.enable_dynamic_hc[index] != 0);
1430 REG_WR(PFDEV(pdev), CSEM_REG_FAST_MEMORY + CSTORM_BYTE_COUNTER_OFFSET(byte_counter_id, index), 0);
1431 } else {
1432 dhc_enable = FALSE;
1433 }
1434 switch (index) {
1435 case HC_INDEX_TOE_RX_CQ_CONS:
1436 case HC_INDEX_ETH_RX_CQ_CONS:
1437 case HC_INDEX_FCOE_EQ_CONS:
1438 sm_idx = SM_RX_ID;
1439 if (dhc_enable && ic->hc_usec_u_sb[index]) {
1440 timeout = (u8_t)pdev->params.hc_timeout0[SM_RX_ID][index];
1441 } else {
1442 timeout = (u8_t)(ic->hc_usec_u_sb[index] / HC_TIMEOUT_RESOLUTION_IN_US);
1443 }
1444 break;
1445 case HC_INDEX_TOE_TX_CQ_CONS:
1446 if (pdev->params.ndsb_type != LM_SINGLE_SM) {
1447 sm_idx = SM_TX_ID;
1448 } else {
1449 sm_idx = SM_RX_ID;
1450 }
1451 if (dhc_enable && ic->hc_usec_c_sb[0]) {
1452 if (pdev->params.ndsb_type != LM_SINGLE_SM) {
1453 timeout = (u8_t)pdev->params.hc_timeout0[SM_TX_ID][index];
1454 } else {
1455 timeout = (u8_t)pdev->params.hc_timeout0[SM_RX_ID][index];
1456 }
1457 } else {
1458 timeout = (u8_t)(ic->hc_usec_c_sb[0] / HC_TIMEOUT_RESOLUTION_IN_US);
1459 }
1460 break;
1461
1462 case HC_INDEX_ETH_TX_CQ_CONS_COS0:
1463 case HC_INDEX_ETH_TX_CQ_CONS_COS1:
1464 case HC_INDEX_ETH_TX_CQ_CONS_COS2:
1465 if (pdev->params.ndsb_type != LM_SINGLE_SM) {
1466 sm_idx = SM_TX_ID;
1467 } else {
1468 sm_idx = SM_RX_ID;
1469 }
1470 // TODO Shayh: HC_PARAMS_ETH_INDEX (DYNAMIC_HC_ETH_INDEX) Should be handeled better from registry
1471 // (not as part of this submit) .
1472 timeout = (u8_t)(ic->hc_usec_c_sb[1] / HC_TIMEOUT_RESOLUTION_IN_US);
1473 break;
1474
1475 case HC_INDEX_ISCSI_EQ_CONS:
1476 if (pdev->params.ndsb_type != LM_SINGLE_SM) {
1477 sm_idx = SM_TX_ID;
1478 } else {
1479 sm_idx = SM_RX_ID;
1480 }
1481 // DYNAMIC_HC_ISCSI_INDEX
1482 timeout = (u8_t)(ic->hc_usec_c_sb[2] / HC_TIMEOUT_RESOLUTION_IN_US);
1483 break;
1484
1485 default:
1486 if (pdev->params.ndsb_type != LM_SINGLE_SM) {
1487 sm_idx = SM_TX_ID;
1488 } else {
1489 sm_idx = SM_RX_ID;
1490 }
1491 timeout = (u8_t)(ic->hc_usec_c_sb[3] / HC_TIMEOUT_RESOLUTION_IN_US);
1492 dhc_enable = FALSE;
1493 break;
1494 }
1495 lm_setup_ndsb_index(pdev, sb_id, index, sm_idx, timeout, dhc_enable);
1496 }
1497 if (CHIP_IS_E1x(pdev)) {
1498 for (index = 0; index < sizeof(struct hc_status_block_data_e1x)/sizeof(u32_t); index++) {
1499 LM_INTMEM_WRITE32(PFDEV(pdev), CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) + sizeof(u32_t)*index,
1500 *((u32_t*)(&pdev->vars.status_blocks_arr[sb_id].hc_status_block_data.e1x_sb_data) + index), BAR_CSTRORM_INTMEM);
1501 }
1502 } else {
1503 for (index = 0; index < sizeof(struct hc_status_block_data_e2)/sizeof(u32_t); index++) {
1504 LM_INTMEM_WRITE32(PFDEV(pdev), CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) + sizeof(u32_t)*index,
1505 *((u32_t*)(&pdev->vars.status_blocks_arr[sb_id].hc_status_block_data.e2_sb_data) + index), BAR_CSTRORM_INTMEM);
1506 }
1507 }
1508 }
1509
1510 /**
1511 * @description
1512 * Get the HC_INDEX_ETH_TX_CQ_CONS_COSX index from chain.
1513 * @param pdev
1514 * @param chain
1515 *
1516 * @return STATIC u8_t
1517 */
1518 u8_t
lm_eth_tx_hc_cq_cons_cosx_from_chain(IN lm_device_t * pdev,IN const u32_t chain)1519 lm_eth_tx_hc_cq_cons_cosx_from_chain(IN lm_device_t *pdev,
1520 IN const u32_t chain)
1521 {
1522 u8_t sb_index_number = HC_INDEX_ETH_TX_CQ_CONS_COS0;
1523 const u8_t cos = lm_mp_cos_from_chain(pdev,chain);
1524
1525 DbgBreakIf(lm_chain_type_not_cos == lm_mp_get_chain_type(pdev, chain));
1526
1527 switch(cos)
1528 {
1529 case 0:
1530 sb_index_number = HC_INDEX_ETH_TX_CQ_CONS_COS0;
1531 break;
1532 case 1:
1533 sb_index_number = HC_INDEX_ETH_TX_CQ_CONS_COS1;
1534 break;
1535 case 2:
1536 sb_index_number = HC_INDEX_ETH_TX_CQ_CONS_COS2;
1537 break;
1538 default:
1539 DbgBreakMsg("Invalid cos");
1540 break;
1541 }
1542
1543 return sb_index_number;
1544 }
1545
1546 #ifdef VF_INVOLVED
1547
lm_pf_init_vf_non_def_sb(struct _lm_device_t * pdev,lm_vf_info_t * vf_info,u8_t sb_idx,u64 sb_addr)1548 lm_status_t lm_pf_init_vf_non_def_sb(struct _lm_device_t *pdev, lm_vf_info_t *vf_info, u8_t sb_idx, u64 sb_addr)
1549 {
1550 lm_status_t lm_status = LM_STATUS_SUCCESS;
1551
1552 lm_int_coalesing_info *ic = &pdev->vars.int_coal;
1553 u8_t index = 0;
1554 const u8_t fw_sb_id = LM_FW_VF_SB_ID(vf_info, sb_idx);
1555 const u8_t dhc_qzone_id = LM_FW_VF_DHC_QZONE_ID(vf_info, sb_idx);
1556 const u8_t byte_counter_id = dhc_qzone_id;
1557 u8_t igu_sb_id = 0;
1558 u8_t igu_seg_id = 0;
1559 lm_address_t sb_phy_address;
1560 u8_t hc_sb_max_indices;
1561 u8_t dhc_enable = FALSE;
1562 u8_t sm_idx;
1563 u8_t timeout = 0;
1564
1565 DbgBreakIf(!pdev);
1566
1567 /* CQ#46240: Disable the function in the status-block data before nullifying sync-line + status-block */
1568 LM_INTMEM_WRITE8(pdev, CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fw_sb_id),
1569 SB_DISABLED, BAR_CSTRORM_INTMEM);
1570
1571 /* nullify the status block */
1572 DbgBreakIf((CSTORM_STATUS_BLOCK_SIZE % 4) != 0);
1573 DbgBreakIf((CSTORM_STATUS_BLOCK_DATA_SIZE % 4) != 0);
1574 DbgBreakIf((CSTORM_SYNC_BLOCK_SIZE % 4) != 0);
1575 if (IS_PFDEV(pdev)) {
1576 for (index = 0; index < CSTORM_SYNC_BLOCK_SIZE / sizeof(u32_t); index++) {
1577 LM_INTMEM_WRITE32(PFDEV(pdev), CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id) + 4*index, 0, BAR_CSTRORM_INTMEM);
1578 }
1579 for (index = 0; index < CSTORM_STATUS_BLOCK_SIZE / sizeof(u32_t); index++) {
1580 LM_INTMEM_WRITE32(PFDEV(pdev), CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id) + 4*index, 0, BAR_CSTRORM_INTMEM);
1581 }
1582 } else {
1583 DbgBreak();
1584 }
1585
1586 sb_phy_address.as_u64 = sb_addr;
1587 pdev->vars.status_blocks_arr[LM_SW_VF_SB_ID(vf_info,sb_idx)].hc_status_block_data.e2_sb_data.common.host_sb_addr.lo = sb_phy_address.as_u32.low;
1588 pdev->vars.status_blocks_arr[LM_SW_VF_SB_ID(vf_info,sb_idx)].hc_status_block_data.e2_sb_data.common.host_sb_addr.hi = sb_phy_address.as_u32.high;
1589
1590 /* Initialize cstorm_status_block_data structure */
1591 pdev->vars.status_blocks_arr[LM_SW_VF_SB_ID(vf_info,sb_idx)].hc_status_block_data.e2_sb_data.common.p_func.pf_id = FUNC_ID(pdev);
1592 pdev->vars.status_blocks_arr[LM_SW_VF_SB_ID(vf_info,sb_idx)].hc_status_block_data.e2_sb_data.common.p_func.vf_id = vf_info->abs_vf_id;
1593 pdev->vars.status_blocks_arr[LM_SW_VF_SB_ID(vf_info,sb_idx)].hc_status_block_data.e2_sb_data.common.p_func.vf_valid = TRUE;
1594 pdev->vars.status_blocks_arr[LM_SW_VF_SB_ID(vf_info,sb_idx)].hc_status_block_data.e2_sb_data.common.p_func.vnic_id = VNIC_ID(pdev);
1595 if (pdev->params.ndsb_type == LM_DOUBLE_SM_SINGLE_IGU) {
1596 pdev->vars.status_blocks_arr[LM_SW_VF_SB_ID(vf_info,sb_idx)].hc_status_block_data.e2_sb_data.common.same_igu_sb_1b = TRUE;
1597 } else {
1598 pdev->vars.status_blocks_arr[LM_SW_VF_SB_ID(vf_info,sb_idx)].hc_status_block_data.e2_sb_data.common.same_igu_sb_1b = FALSE;
1599 }
1600 pdev->vars.status_blocks_arr[LM_SW_VF_SB_ID(vf_info,sb_idx)].hc_status_block_data.e2_sb_data.common.dhc_qzone_id = dhc_qzone_id;
1601 pdev->vars.status_blocks_arr[LM_SW_VF_SB_ID(vf_info,sb_idx)].hc_status_block_data.e2_sb_data.common.state = SB_ENABLED;
1602
1603 if ((INTR_BLK_TYPE(pdev) == INTR_BLK_IGU) && (INTR_BLK_MODE(pdev) == INTR_BLK_MODE_NORM) ) {
1604 igu_sb_id = LM_VF_IGU_SB_ID(vf_info,sb_idx);
1605 igu_seg_id = IGU_SEG_ACCESS_NORM;
1606 } else {
1607 DbgBreak();
1608 }
1609
1610 lm_setup_ndsb_state_machine(pdev, LM_SW_VF_SB_ID(vf_info,sb_idx), SM_RX_ID, igu_sb_id + IGU_U_NDSB_OFFSET(pdev), igu_seg_id);
1611 if (pdev->params.ndsb_type != LM_SINGLE_SM) {
1612 lm_setup_ndsb_state_machine(pdev, LM_SW_VF_SB_ID(vf_info,sb_idx), SM_TX_ID, igu_sb_id,igu_seg_id);
1613 }
1614
1615 //init host coalescing params - supported dymanicHC indices
1616 if (CHIP_IS_E1x(pdev)) {
1617 DbgBreak();
1618 } else {
1619 hc_sb_max_indices = HC_SB_MAX_INDICES_E2;
1620 }
1621 for (index = 0; index < hc_sb_max_indices; index++) {
1622 if (index < HC_DHC_SB_NUM_INDICES) {
1623 dhc_enable = (pdev->params.enable_dynamic_hc[index] != 0);
1624 REG_WR(PFDEV(pdev), CSEM_REG_FAST_MEMORY + CSTORM_BYTE_COUNTER_OFFSET(byte_counter_id, index), 0);
1625 } else {
1626 dhc_enable = FALSE;
1627 }
1628 switch (index) {
1629 case HC_INDEX_TOE_RX_CQ_CONS:
1630 case HC_INDEX_ETH_RX_CQ_CONS:
1631 case HC_INDEX_FCOE_EQ_CONS:
1632 sm_idx = SM_RX_ID;
1633 if (dhc_enable && ic->hc_usec_u_sb[index]) {
1634 timeout = (u8_t)pdev->params.hc_timeout0[SM_RX_ID][index];
1635 } else {
1636 timeout = (u8_t)(ic->hc_usec_u_sb[index] / HC_TIMEOUT_RESOLUTION_IN_US);
1637 }
1638 break;
1639 case HC_INDEX_TOE_TX_CQ_CONS:
1640 if (pdev->params.ndsb_type != LM_SINGLE_SM) {
1641 sm_idx = SM_TX_ID;
1642 } else {
1643 sm_idx = SM_RX_ID;
1644 }
1645 if (dhc_enable && ic->hc_usec_c_sb[0]) {
1646 if (pdev->params.ndsb_type != LM_SINGLE_SM) {
1647 timeout = (u8_t)pdev->params.hc_timeout0[SM_TX_ID][index];
1648 } else {
1649 timeout = (u8_t)pdev->params.hc_timeout0[SM_RX_ID][index];
1650 }
1651 } else {
1652 timeout = (u8_t)(ic->hc_usec_c_sb[0] / HC_TIMEOUT_RESOLUTION_IN_US);
1653 }
1654 break;
1655
1656 case HC_INDEX_ETH_TX_CQ_CONS_COS0:
1657 case HC_INDEX_ETH_TX_CQ_CONS_COS1:
1658 case HC_INDEX_ETH_TX_CQ_CONS_COS2:
1659 if (pdev->params.ndsb_type != LM_SINGLE_SM) {
1660 sm_idx = SM_TX_ID;
1661 } else {
1662 sm_idx = SM_RX_ID;
1663 }
1664 // TODO Shayh: HC_PARAMS_ETH_INDEX (DYNAMIC_HC_ETH_INDEX) Should be handeled better from registry
1665 // (not as part of this submit) .
1666 timeout = (u8_t)(ic->hc_usec_c_sb[1] / HC_TIMEOUT_RESOLUTION_IN_US);
1667 break;
1668
1669 case HC_INDEX_ISCSI_EQ_CONS:
1670 if (pdev->params.ndsb_type != LM_SINGLE_SM) {
1671 sm_idx = SM_TX_ID;
1672 } else {
1673 sm_idx = SM_RX_ID;
1674 }
1675 // DYNAMIC_HC_ISCSI_INDEX
1676 timeout = (u8_t)(ic->hc_usec_c_sb[2] / HC_TIMEOUT_RESOLUTION_IN_US);
1677 break;
1678
1679 default:
1680 if (pdev->params.ndsb_type != LM_SINGLE_SM) {
1681 sm_idx = SM_TX_ID;
1682 } else {
1683 sm_idx = SM_RX_ID;
1684 }
1685 timeout = (u8_t)(ic->hc_usec_c_sb[3] / HC_TIMEOUT_RESOLUTION_IN_US);
1686 dhc_enable = FALSE;
1687 break;
1688 }
1689 lm_setup_ndsb_index(pdev, LM_SW_VF_SB_ID(vf_info,sb_idx), index, sm_idx, timeout, dhc_enable);
1690 }
1691
1692 if (!CHIP_IS_E1x(pdev)) {
1693 for (index = 0; index < sizeof(struct hc_status_block_data_e2)/sizeof(u32_t); index++) {
1694 LM_INTMEM_WRITE32(pdev, CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) + sizeof(u32_t)*index,
1695 *((u32_t*)(&pdev->vars.status_blocks_arr[LM_SW_VF_SB_ID(vf_info,sb_idx)].hc_status_block_data.e2_sb_data) + index), BAR_CSTRORM_INTMEM);
1696 }
1697 } else {
1698 DbgBreak();
1699 }
1700
1701
1702 return lm_status;
1703 }
1704
1705 #endif //VF_INVOLVED
1706
lm_clear_non_def_status_block(struct _lm_device_t * pdev,u8_t fw_sb_id)1707 void lm_clear_non_def_status_block(struct _lm_device_t *pdev, u8_t fw_sb_id)
1708 {
1709 u32_t index = 0;
1710 u8_t func = 0;
1711
1712 DbgBreakIf(!pdev);
1713 DbgMessage(pdev, INFORMi, "clear_status_block: fw_sb_id:%d\n",fw_sb_id);
1714
1715 func = FUNC_ID(pdev);
1716
1717 /* nullify the status block */
1718 DbgBreakIf((CSTORM_STATUS_BLOCK_SIZE % 4) != 0);
1719 DbgBreakIf((CSTORM_STATUS_BLOCK_DATA_SIZE % 4) != 0);
1720 DbgBreakIf((CSTORM_SYNC_BLOCK_SIZE % 4) != 0);
1721
1722 LM_INTMEM_WRITE8(pdev, CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fw_sb_id),
1723 SB_DISABLED, BAR_CSTRORM_INTMEM);
1724
1725
1726 for (index = 0; index < CSTORM_SYNC_BLOCK_SIZE / sizeof(u32_t); index++) {
1727 LM_INTMEM_WRITE32(PFDEV(pdev), CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id) + 4*index, 0, BAR_CSTRORM_INTMEM);
1728 }
1729 for (index = 0; index < CSTORM_STATUS_BLOCK_SIZE / sizeof(u32_t); index++) {
1730 LM_INTMEM_WRITE32(PFDEV(pdev), CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id) + 4*index, 0, BAR_CSTRORM_INTMEM);
1731 }
1732
1733 }
1734
1735
1736