xref: /illumos-gate/usr/src/uts/common/io/bnxe/577xx/drivers/common/lm/device/lm_resc.c (revision 6680ee99638d23c9c2561c782eb1df2176e04698)
1 /*******************************************************************************
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  *
21  * Copyright 2014 QLogic Corporation
22  * The contents of this file are subject to the terms of the
23  * QLogic End User License (the "License").
24  * You may not use this file except in compliance with the License.
25  *
26  * You can obtain a copy of the License at
27  * http://www.qlogic.com/Resources/Documents/DriverDownloadHelp/
28  * QLogic_End_User_Software_License.txt
29  * See the License for the specific language governing permissions
30  * and limitations under the License.
31  *
32  *
33  * Module Description:
34  *      This file contains functions that deal with resource allocation and setup
35  *
36  ******************************************************************************/
37 #include "lm5710.h"
38 #include "bd_chain.h"
39 #include "command.h"
40 #include "ecore_common.h"
41 #include "577xx_int_offsets.h"
42 #include "bcmtype.h"
43 
44 // should be same as ceil (math.h) doesn't support u64_t
45 #define _ceil( _x_32, _divisor_32 ) ((_x_32 / _divisor_32) + ( (_x_32%_divisor_32) ? 1 : 0))
46 
47 lm_status_t
lm_clear_chain_sb_cons_idx(IN struct _lm_device_t * pdev,IN u8_t sb_id,IN struct _lm_hc_sb_info_t * hc_sb_info,IN volatile u16_t ** hw_con_idx_ptr)48 lm_clear_chain_sb_cons_idx(
49     IN struct _lm_device_t *pdev,
50     IN u8_t sb_id,
51     IN struct _lm_hc_sb_info_t *hc_sb_info,
52     IN volatile u16_t ** hw_con_idx_ptr
53     )
54 {
55     u8_t  port       = 0;
56     u8_t  func       = 0;
57     u16_t rd_val     = 0xFFFF;
58     u32_t rd_val_32  = 0xFFFFFFFF;
59     u8_t  fw_sb_id   = 0;
60     u8_t  sb_lock_id = 0;
61 
62     if (CHK_NULL(pdev) || CHK_NULL(hc_sb_info) || CHK_NULL(hw_con_idx_ptr))
63     {
64         return LM_STATUS_INVALID_PARAMETER;
65     }
66 
67     if (IS_VFDEV(pdev))
68     {
69         return LM_STATUS_SUCCESS;
70     }
71 
72     sb_lock_id = lm_sb_id_from_chain(pdev, sb_id);
73     if (sb_lock_id == DEF_STATUS_BLOCK_INDEX)
74     {
75         sb_lock_id = DEF_STATUS_BLOCK_IGU_INDEX;
76     }
77 
78     /* make sure that the sb is not during processing while we
79      * clear the pointer */
80     MM_ACQUIRE_SB_LOCK(pdev, sb_lock_id);
81 
82     *hw_con_idx_ptr = NULL;
83 
84     MM_RELEASE_SB_LOCK(pdev, sb_lock_id);
85 
86     if (lm_reset_is_inprogress(pdev))
87     {
88         return LM_STATUS_SUCCESS;
89     }
90 
91     port = PORT_ID(pdev);
92     func = FUNC_ID(pdev);
93     fw_sb_id = LM_FW_SB_ID(pdev, sb_id);
94 
95     switch (hc_sb_info->hc_sb) {
96     case STATUS_BLOCK_SP_SL_TYPE:
97         LM_INTMEM_WRITE16(pdev, CSTORM_SP_HC_SYNC_LINE_INDEX_OFFSET(hc_sb_info->hc_index_value,func), 0, BAR_CSTRORM_INTMEM);
98         LM_INTMEM_READ16(pdev, CSTORM_SP_HC_SYNC_LINE_INDEX_OFFSET(hc_sb_info->hc_index_value,func),  &rd_val, BAR_CSTRORM_INTMEM);
99         DbgBreakIfAll(rd_val != 0);
100 
101         LM_INTMEM_WRITE16(pdev, (CSTORM_SP_STATUS_BLOCK_OFFSET(func) + OFFSETOF(struct hc_sp_status_block, index_values) + (hc_sb_info->hc_index_value * sizeof(u16_t))), 0, BAR_CSTRORM_INTMEM);
102         LM_INTMEM_READ16 (pdev, (CSTORM_SP_STATUS_BLOCK_OFFSET(func) + OFFSETOF(struct hc_sp_status_block, index_values) + (hc_sb_info->hc_index_value * sizeof(u16_t))), &rd_val, BAR_CSTRORM_INTMEM);
103         DbgBreakIfAll(rd_val != 0);
104         break;
105     case STATUS_BLOCK_NORMAL_SL_TYPE:
106         if (!LM_SB_ID_VALID(pdev, sb_id))
107         {
108             return LM_STATUS_INVALID_PARAMETER;
109         }
110         LM_INTMEM_WRITE32(PFDEV(pdev), CSTORM_HC_SYNC_LINE_DHC_OFFSET(hc_sb_info->hc_index_value, fw_sb_id), 0, BAR_CSTRORM_INTMEM);
111         LM_INTMEM_READ32(PFDEV(pdev), CSTORM_HC_SYNC_LINE_DHC_OFFSET(hc_sb_info->hc_index_value, fw_sb_id), &rd_val_32, BAR_CSTRORM_INTMEM);
112         DbgBreakIfAll(rd_val_32 != 0);
113         /* FALLTHROUGH */
114     case STATUS_BLOCK_NORMAL_TYPE:
115         if (CHIP_IS_E1x(PFDEV(pdev))) {
116             LM_INTMEM_WRITE16(PFDEV(pdev), CSTORM_HC_SYNC_LINE_INDEX_E1X_OFFSET(hc_sb_info->hc_index_value, fw_sb_id), 0, BAR_CSTRORM_INTMEM);
117             LM_INTMEM_READ16(PFDEV(pdev), CSTORM_HC_SYNC_LINE_INDEX_E1X_OFFSET(hc_sb_info->hc_index_value, fw_sb_id), &rd_val, BAR_CSTRORM_INTMEM);
118         } else {
119             LM_INTMEM_WRITE16(PFDEV(pdev), CSTORM_HC_SYNC_LINE_INDEX_E2_OFFSET(hc_sb_info->hc_index_value, fw_sb_id), 0, BAR_CSTRORM_INTMEM);
120             LM_INTMEM_READ16(PFDEV(pdev), CSTORM_HC_SYNC_LINE_INDEX_E2_OFFSET(hc_sb_info->hc_index_value, fw_sb_id), &rd_val, BAR_CSTRORM_INTMEM);
121         }
122         DbgBreakIfAll(rd_val != 0);
123         if (CHIP_IS_E1x(pdev)) {
124             LM_INTMEM_WRITE16(PFDEV(pdev), (CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id) + OFFSETOF(struct hc_status_block_e1x, index_values) + (hc_sb_info->hc_index_value * sizeof(u16_t))), 0, BAR_CSTRORM_INTMEM);
125             LM_INTMEM_READ16 (PFDEV(pdev), (CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id) + OFFSETOF(struct hc_status_block_e1x, index_values) + (hc_sb_info->hc_index_value * sizeof(u16_t))), &rd_val, BAR_CSTRORM_INTMEM);
126         } else {
127             LM_INTMEM_WRITE16(PFDEV(pdev), (CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id) + OFFSETOF(struct hc_status_block_e2, index_values) + (hc_sb_info->hc_index_value * sizeof(u16_t))), 0, BAR_CSTRORM_INTMEM);
128             LM_INTMEM_READ16 (PFDEV(pdev), (CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id) + OFFSETOF(struct hc_status_block_e2, index_values) + (hc_sb_info->hc_index_value * sizeof(u16_t))), &rd_val, BAR_CSTRORM_INTMEM);
129 
130         }
131         break;
132     default:
133         DbgMessage(NULL, FATAL, "Invalid hc_sb value: 0x%x.\n", hc_sb_info->hc_sb);
134         DbgBreakIf(1);
135     }
136     /* We read from the same memory and verify that it's 0 to make sure that the value was written to the grc and was not delayed in the pci */
137     DbgBreakIfAll(rd_val != 0);
138 
139     return LM_STATUS_SUCCESS;
140 }
141 
142 /*
143  * reset txq, rxq, rcq counters for L2 client connection
144  *
145  * assumption: the cid equals the chain idx
146  */
147 /**
148  * @Description:
149  *   allocate given num of coalesce buffers, and queue them in the txq chain.
150  *   1 buffer is allocated for LSO packets, and the rest are allocated with
151  *   MTU size.
152  * @Return:
153  *   lm_status
154 */
155 static lm_status_t
lm_allocate_coalesce_buffers(lm_device_t * pdev,lm_tx_chain_t * txq,u32_t coalesce_buf_cnt,u32_t cid)156 lm_allocate_coalesce_buffers(
157     lm_device_t     *pdev,
158     lm_tx_chain_t   *txq,
159     u32_t           coalesce_buf_cnt,
160     u32_t           cid)
161 {
162     lm_coalesce_buffer_t *last_coalesce_buf = NULL;
163     lm_coalesce_buffer_t *coalesce_buf      = NULL;
164     lm_address_t         mem_phy            = {{0}};
165     u8_t *               mem_virt           = NULL;
166     u32_t                mem_left           = 0;
167     u32_t                mem_size           = 0;
168     u32_t                buf_size           = 0;
169     u32_t                cnt                = 0;
170     u8_t                 mm_cli_idx         = 0;
171 
172     /* check arguments */
173     if(CHK_NULL(pdev) || CHK_NULL(txq))
174     {
175         return LM_STATUS_FAILURE;
176     }
177 
178     DbgMessage(pdev, VERBOSEi | VERBOSEl2sp,
179                 "#lm_allocate_coalesce_buffers, coalesce_buf_cnt=%d\n",
180                 coalesce_buf_cnt);
181 
182     mm_cli_idx = cid_to_resource(pdev, cid); //!!DP mm_cli_idx_to_um_idx(LM_CHAIN_IDX_CLI(pdev, idx));
183 
184     if(coalesce_buf_cnt == 0)
185     {
186         return LM_STATUS_SUCCESS;
187     }
188 
189     buf_size = MAX_L2_CLI_BUFFER_SIZE(pdev, cid);
190 
191     mem_size = coalesce_buf_cnt * sizeof(lm_coalesce_buffer_t);
192     mem_virt = mm_alloc_mem(pdev,mem_size, mm_cli_idx);
193     if(ERR_IF(!mem_virt))
194     {
195         DbgBreakIf(DBG_BREAK_ON(MEMORY_ALLOCATION_FAILURE));
196         return LM_STATUS_RESOURCE;
197     }
198     mm_memset(mem_virt, 0, mem_size);
199 
200     /* Create a list of frame buffer descriptors. */
201     coalesce_buf = (lm_coalesce_buffer_t *) mem_virt;
202     for(cnt = 0; cnt < coalesce_buf_cnt; cnt++)
203     {
204         coalesce_buf->frags.cnt = 1;
205         coalesce_buf->frags.size = 0; /* not in use */
206         coalesce_buf->buf_size = buf_size;
207 
208         s_list_push_tail(
209             &txq->coalesce_buf_list,
210             &coalesce_buf->link);
211 
212         coalesce_buf++;
213     }
214 
215     /* Have at least one coalesce buffer large enough to copy
216      * an LSO frame. */
217     coalesce_buf = (lm_coalesce_buffer_t *) s_list_peek_head(
218         &txq->coalesce_buf_list);
219     coalesce_buf->buf_size = 0x10000; /* TBD: consider apply change here for GSO */
220 
221     /* Determine the total memory for the coalesce buffers. */
222     mem_left = 0;
223 
224     coalesce_buf = (lm_coalesce_buffer_t *) s_list_peek_head(
225         &txq->coalesce_buf_list);
226     while(coalesce_buf)
227     {
228         mem_left += coalesce_buf->buf_size;
229         coalesce_buf = (lm_coalesce_buffer_t *) s_list_next_entry(
230             &coalesce_buf->link);
231     }
232 
233     mem_size = 0;
234 
235     /* Initialize all the descriptors to point to a buffer. */
236     coalesce_buf = (lm_coalesce_buffer_t *) s_list_peek_head(
237         &txq->coalesce_buf_list);
238     while(coalesce_buf)
239     {
240         #define MAX_CONTIGUOUS_BLOCK            (64*1024)
241 
242         /* Allocate a small block of memory at a time. */
243         if(mem_size == 0)
244         {
245             last_coalesce_buf = coalesce_buf;
246 
247             while(coalesce_buf)
248             {
249                 mem_size += coalesce_buf->buf_size;
250                 if(mem_size >= MAX_CONTIGUOUS_BLOCK) /* TBD: consider apply change here for GSO */
251                 {
252                     break;
253                 }
254 
255                 coalesce_buf = (lm_coalesce_buffer_t *) s_list_next_entry(
256                     &coalesce_buf->link);
257             }
258 
259             mem_left -= mem_size;
260 
261             mem_virt = mm_alloc_phys_mem( pdev, mem_size, &mem_phy, 0, mm_cli_idx);
262             if(ERR_IF(!mem_virt))
263             {
264                 DbgBreakIf(DBG_BREAK_ON(MEMORY_ALLOCATION_FAILURE));
265                 return LM_STATUS_RESOURCE;
266             }
267             mm_memset(mem_virt, 0, mem_size);
268 
269             coalesce_buf = last_coalesce_buf;
270         }
271 
272         coalesce_buf->mem_virt = mem_virt;
273         coalesce_buf->frags.frag_arr[0].addr = mem_phy;
274         coalesce_buf->frags.frag_arr[0].size = 0; /* to be set later according to actual packet size */
275         mem_size -= coalesce_buf->buf_size;
276 
277         /* Go to the next packet buffer. */
278         mem_virt += coalesce_buf->buf_size;
279         LM_INC64(&mem_phy, coalesce_buf->buf_size);
280 
281         coalesce_buf = (lm_coalesce_buffer_t *) s_list_next_entry(
282             &coalesce_buf->link);
283     }
284 
285     if(mem_left || mem_size)
286     {
287         DbgBreakMsg("Memory allocation out of sync\n");
288 
289         return LM_STATUS_FAILURE;
290     }
291 
292     return LM_STATUS_SUCCESS;
293 } /* lm_allocate_coalesce_buffers */
294 
295 lm_status_t
lm_alloc_txq(IN struct _lm_device_t * pdev,IN u32_t const cid,IN u16_t const page_cnt,IN u16_t const coalesce_buf_cnt)296 lm_alloc_txq(
297     IN struct _lm_device_t *pdev,
298     IN u32_t const          cid, /* chain id */
299     IN u16_t const          page_cnt,
300     IN u16_t const          coalesce_buf_cnt)
301 {
302     lm_tx_chain_t *tx_chain   = NULL  ;
303     u32_t const    mem_size   = page_cnt * LM_PAGE_SIZE;
304     u8_t  mm_cli_idx      = 0 ;
305 
306     DbgMessage(pdev, INFORMi | INFORMl2sp, "#lm_alloc_txq, cid=%d, page_cnt=%d\n", cid, page_cnt);
307 
308     /* check arguments */
309     if(CHK_NULL(pdev) ||
310        ERR_IF((ARRSIZE(pdev->tx_info.chain) <= cid) || !page_cnt))
311     {
312         return LM_STATUS_FAILURE;
313     }
314 
315     tx_chain = &LM_TXQ(pdev, cid);
316 
317     mm_cli_idx = cid_to_resource(pdev, cid);
318 
319     /* alloc the chain */
320 
321     tx_chain->bd_chain.bd_chain_virt =
322         mm_alloc_phys_mem( pdev, mem_size, &tx_chain->bd_chain.bd_chain_phy, 0, mm_cli_idx);
323     if(ERR_IF(!tx_chain->bd_chain.bd_chain_virt))
324     {
325         DbgBreakIf(DBG_BREAK_ON(MEMORY_ALLOCATION_FAILURE));
326         return LM_STATUS_RESOURCE;
327     }
328     mm_mem_zero(tx_chain->bd_chain.bd_chain_virt, mem_size);
329 
330     tx_chain->bd_chain.page_cnt = page_cnt;
331 
332     s_list_init(&tx_chain->active_descq, NULL, NULL, 0);
333     s_list_init(&tx_chain->coalesce_buf_list, NULL, NULL, 0);
334     tx_chain->idx              = cid;
335     tx_chain->coalesce_buf_cnt = coalesce_buf_cnt;
336 
337     return lm_allocate_coalesce_buffers(
338         pdev,
339         &LM_TXQ(pdev, cid),
340         coalesce_buf_cnt,
341         cid);
342 
343 } /* lm_alloc_txq */
344 
345 lm_status_t
lm_alloc_rxq(IN struct _lm_device_t * pdev,IN u32_t const cid,IN u16_t const page_cnt,IN u32_t const desc_cnt)346 lm_alloc_rxq(
347     IN struct _lm_device_t *pdev,
348     IN u32_t const          cid,
349     IN u16_t const          page_cnt,
350     IN u32_t const          desc_cnt)
351 {
352     lm_rx_chain_t*     rxq_chain        = NULL;
353     lm_bd_chain_t *    bd_chain         = NULL;
354     lm_rxq_chain_idx_t rx_chain_idx_max = LM_RXQ_CHAIN_IDX_MAX;
355     lm_rxq_chain_idx_t rx_chain_idx_cur = 0;
356     u32_t const        mem_size         = page_cnt * LM_PAGE_SIZE;
357     u8_t               mm_cli_idx       = 0 ;
358 
359     /* check arguments */
360     if(CHK_NULL(pdev) ||
361        ERR_IF((ARRSIZE(pdev->rx_info.rxq_chain) <= cid) || !page_cnt))
362     {
363         return LM_STATUS_FAILURE;
364     }
365 
366     rxq_chain = &LM_RXQ(pdev, cid);
367 
368     DbgMessage(pdev, INFORMi, "#lm_alloc_rxq, cid=%d, page_cnt=%d, desc_cnt=%d\n",
369                 cid, page_cnt, desc_cnt);
370 
371     mm_cli_idx = cid_to_resource(pdev, cid);//!!DP mm_cli_idx_to_um_idx(LM_CHAIN_IDX_CLI(pdev, idx));
372 
373     s_list_init(&rxq_chain->common.free_descq, NULL, NULL, 0);
374     s_list_init(&rxq_chain->active_descq, NULL, NULL, 0);
375     rxq_chain->idx      = cid;
376     rxq_chain->common.desc_cnt = desc_cnt;
377 
378     /* alloc the chain(s) */
379     rx_chain_idx_max = LM_RXQ_IS_CHAIN_SGE_VALID( pdev, cid ) ? LM_RXQ_CHAIN_IDX_SGE : LM_RXQ_CHAIN_IDX_BD;
380 
381     for( rx_chain_idx_cur = 0; rx_chain_idx_cur <= rx_chain_idx_max; rx_chain_idx_cur++ )
382     {
383         bd_chain = &LM_RXQ_CHAIN( pdev, cid, rx_chain_idx_cur );
384 
385         bd_chain->bd_chain_virt =  mm_alloc_phys_mem( pdev, mem_size, &bd_chain->bd_chain_phy, 0, mm_cli_idx);
386         if(ERR_IF(!bd_chain->bd_chain_virt))
387         {
388             DbgBreakIf(DBG_BREAK_ON(MEMORY_ALLOCATION_FAILURE));
389             return LM_STATUS_RESOURCE;
390         }
391         mm_mem_zero(bd_chain->bd_chain_virt, mem_size);
392 
393         bd_chain->page_cnt = page_cnt;
394     }
395 
396     return LM_STATUS_SUCCESS;
397 } /* lm_alloc_rxq */
398 
399 lm_status_t
lm_alloc_rcq(IN struct _lm_device_t * pdev,IN u32_t const cid,IN u16_t const page_cnt)400 lm_alloc_rcq(
401     IN struct _lm_device_t *pdev,
402     IN u32_t const          cid,
403     IN u16_t const          page_cnt)
404 {
405     lm_rcq_chain_t *rcq_chain = NULL;
406     u32_t const mem_size      = page_cnt * LM_PAGE_SIZE;
407     u8_t  mm_cli_idx      = 0 ;
408 
409     /* check arguments */
410     if(CHK_NULL(pdev) ||
411        ERR_IF((ARRSIZE(pdev->rx_info.rcq_chain) <= cid) || !page_cnt))
412     {
413         return LM_STATUS_FAILURE;
414     }
415 
416     ASSERT_STATIC(sizeof(struct eth_rx_bd)*LM_RX_BD_CQ_SIZE_RATIO == sizeof(union eth_rx_cqe));
417     ASSERT_STATIC(sizeof(struct eth_rx_bd) == sizeof(struct eth_rx_sge) );
418 
419     rcq_chain = &pdev->rx_info.rcq_chain[cid];
420 
421     DbgMessage(pdev, INFORMi | INFORMl2sp,
422                 "#lm_alloc_rcq, idx=%d, page_cnt=%d\n",
423                 cid, page_cnt);
424 
425     mm_cli_idx = cid_to_resource(pdev, cid);//!!DP mm_cli_idx_to_um_idx(LM_CHAIN_IDX_CLI(pdev, idx));
426 
427     /* alloc the chain */
428     rcq_chain->bd_chain.bd_chain_virt =
429         mm_alloc_phys_mem( pdev, mem_size, &rcq_chain->bd_chain.bd_chain_phy, 0, mm_cli_idx);
430 
431     if(ERR_IF(!rcq_chain->bd_chain.bd_chain_virt))
432     {
433         DbgBreakIf(DBG_BREAK_ON(MEMORY_ALLOCATION_FAILURE));
434         return LM_STATUS_RESOURCE;
435     }
436 
437     mm_mem_zero(rcq_chain->bd_chain.bd_chain_virt, mem_size);
438     rcq_chain->bd_chain.page_cnt = page_cnt;
439 
440     return LM_STATUS_SUCCESS;
441 } /* lm_alloc_rcq */
442 
443 /**
444  * @description
445  * Allocte TPA chain
446  * @param pdev
447  * @param cid -chain index.
448  * @param page_cnt - Number of BD pages
449  * @param desc_cnt - Number of descriptor counts
450  * @param bds_per_page - Number of BDs per page.
451  *
452  * @return lm_status_t
453  */
454 lm_status_t
lm_alloc_tpa_chain(IN struct _lm_device_t * pdev,IN u32_t const cid,IN u16_t const page_cnt,IN u32_t const desc_cnt,IN u32_t const bds_per_page)455 lm_alloc_tpa_chain(
456     IN struct _lm_device_t *pdev,
457     IN u32_t const          cid,
458     IN u16_t const          page_cnt,
459     IN u32_t const          desc_cnt,
460     IN u32_t const          bds_per_page)
461 {
462     lm_tpa_chain_t*     tpa_chain   = NULL;
463     lm_bd_chain_t *     bd_chain    = NULL;
464     lm_tpa_sge_chain_t*    sge_chain   = NULL;
465     u32_t               mem_size    = 0;
466     u8_t                mm_cli_idx  = 0 ;
467 
468     /* check arguments */
469     if(CHK_NULL(pdev) ||
470        ERR_IF((ARRSIZE(pdev->rx_info.rxq_chain) <= cid) || !page_cnt))
471     {
472         return LM_STATUS_FAILURE;
473     }
474 
475     tpa_chain = &LM_TPA(pdev, cid);
476     bd_chain  = &LM_TPA_CHAIN_BD( pdev, cid );
477     sge_chain = &LM_SGE_TPA_CHAIN( pdev, cid );
478 
479     DbgMessage(pdev, INFORMi, "#lm_alloc_tpa, cid=%d, page_cnt=%d, desc_cnt=%d\n",
480                 cid, page_cnt, desc_cnt);
481 
482     mm_cli_idx = cid_to_resource(pdev, cid);
483 
484     s_list_init(&tpa_chain->common.free_descq, NULL, NULL, 0);
485     tpa_chain->common.desc_cnt = desc_cnt;
486 
487     /************ Alocate BD chain********************************/
488     mem_size    = page_cnt * LM_PAGE_SIZE;
489     bd_chain->bd_chain_virt =  mm_alloc_phys_mem( pdev, mem_size, &bd_chain->bd_chain_phy, 0, mm_cli_idx);
490     if(ERR_IF(!bd_chain->bd_chain_virt))
491     {
492         DbgBreakIf(DBG_BREAK_ON(MEMORY_ALLOCATION_FAILURE));
493         return LM_STATUS_RESOURCE;
494     }
495     mm_mem_zero(bd_chain->bd_chain_virt, mem_size);
496     bd_chain->page_cnt = page_cnt;
497 
498     // The number of SGE bd entries
499     sge_chain->size = page_cnt * bds_per_page;
500     tpa_chain->state = lm_tpa_state_disable;
501 
502     /************ Alocate active descriptor array********************************/
503     mem_size = LM_TPA_ACTIVE_DESCQ_ARRAY_ELEM(pdev,cid);
504     mem_size *= sizeof(lm_packet_t *);
505     sge_chain->active_descq_array = mm_alloc_mem(pdev, mem_size, mm_cli_idx);
506 
507     if(CHK_NULL(sge_chain->active_descq_array))
508     {
509         DbgBreakIf(DBG_BREAK_ON(MEMORY_ALLOCATION_FAILURE));
510         return LM_STATUS_RESOURCE;
511     }
512     mm_mem_zero(sge_chain->active_descq_array, mem_size);
513 
514     /************ Alocate mask_array descriptor array********************************/
515     ASSERT_STATIC(0 != BIT_VEC64_ELEM_SZ); //LM_TPA_MASK_LEN - divide by BIT_VEC64_ELEM_SZ
516     mem_size = LM_TPA_MASK_LEN(pdev, cid);
517     mem_size = mem_size * sizeof(u64_t);
518     sge_chain->mask_array = mm_alloc_mem(pdev, mem_size, mm_cli_idx);
519 
520     if(CHK_NULL(sge_chain->mask_array))
521     {
522         DbgBreakIf(DBG_BREAK_ON(MEMORY_ALLOCATION_FAILURE));
523         return LM_STATUS_RESOURCE;
524     }
525     mm_mem_zero(sge_chain->mask_array, mem_size);
526 
527     /************ Alocate TPA ramrod data********************************/
528     mem_size = sizeof(struct tpa_update_ramrod_data);
529     tpa_chain->ramrod_data_virt = mm_alloc_phys_mem(pdev, mem_size, &tpa_chain->ramrod_data_phys, 0, mm_cli_idx);
530 
531     if(CHK_NULL(tpa_chain->ramrod_data_virt))
532     {
533         DbgBreakIf(DBG_BREAK_ON(MEMORY_ALLOCATION_FAILURE));
534         return LM_STATUS_RESOURCE ;
535     }
536     mm_mem_zero(tpa_chain->ramrod_data_virt, mem_size);
537 
538     return LM_STATUS_SUCCESS;
539 } /* lm_alloc_tpa */
540 
cid_to_resource(lm_device_t * pdev,u32_t cid)541 lm_resource_idx_t cid_to_resource(lm_device_t *pdev, u32_t cid)
542 {
543     lm_resource_idx_t resource;
544 
545     if (lm_chain_type_not_cos != lm_mp_get_chain_type(pdev, cid))
546     {
547         resource = LM_RESOURCE_NDIS;
548     }
549     else if (cid == ISCSI_CID(pdev))
550     {
551         resource = LM_RESOURCE_ISCSI;
552     }
553     else if (cid == FCOE_CID(pdev))
554     {
555         resource = LM_RESOURCE_FCOE;
556     }
557     else if (cid == FWD_CID(pdev))
558     {
559         resource = LM_RESOURCE_FWD;
560     }
561     else if (cid == OOO_CID(pdev))
562     {
563         resource = LM_RESOURCE_OOO;
564     }
565     else
566     {
567         resource = LM_RESOURCE_COMMON;
568     }
569 
570     return resource;
571 }
572 
573 
574 lm_status_t
lm_setup_txq(IN struct _lm_device_t * pdev,IN u32_t cid)575 lm_setup_txq(
576     IN struct _lm_device_t *pdev,
577     IN u32_t                cid)
578 {
579     lm_bd_chain_t *                         bd_chain = NULL;
580     volatile struct hc_sp_status_block *    sp_sb = NULL;
581     u16_t volatile *                        sb_indexes = NULL;
582     u8_t                                    tx_sb_index_number =0;
583     /* check arguments */
584     if(CHK_NULL(pdev) ||
585        ERR_IF((ARRSIZE(pdev->tx_info.chain) <= cid)))
586     {
587         return LM_STATUS_FAILURE;
588     }
589     DbgMessage(pdev, INFORMi|INFORMl2sp, "#lm_setup_txq, cid=%d\n",cid);
590 
591     sp_sb = lm_get_default_status_block(pdev);
592 
593     LM_TXQ(pdev, cid).prod_bseq = 0;
594     LM_TXQ(pdev, cid).pkt_idx = 0;
595     LM_TXQ(pdev, cid).coalesce_buf_used = 0;
596     LM_TXQ(pdev, cid).lso_split_used = 0;
597 
598     bd_chain = &LM_TXQ(pdev, cid).bd_chain;
599     lm_bd_chain_setup(pdev, bd_chain, bd_chain->bd_chain_virt, bd_chain->bd_chain_phy, bd_chain->page_cnt, sizeof(struct eth_tx_bd), /* is full? */0, TRUE);
600 
601     DbgMessage(pdev, INFORMi, "txq %d, bd_chain %p, bd_left %d\n",
602         cid,
603         LM_TXQ(pdev, cid).bd_chain.next_bd,
604         LM_TXQ(pdev, cid).bd_chain.bd_left);
605 
606     DbgMessage(pdev, INFORMi, "   bd_chain_phy 0x%x%08x\n",
607         LM_TXQ(pdev, cid).bd_chain.bd_chain_phy.as_u32.high,
608         LM_TXQ(pdev, cid).bd_chain.bd_chain_phy.as_u32.low);
609 
610     mm_memset(&LM_TXQ(pdev, cid).eth_tx_prods.packets_prod, 0, sizeof(eth_tx_prod_t));
611 
612     if (cid == FWD_CID(pdev))
613     {
614         sp_sb->index_values[HC_SP_INDEX_ETH_FW_TX_CQ_CONS] = 0;
615         LM_TXQ(pdev, cid).hw_con_idx_ptr =
616             &(sp_sb->index_values[HC_SP_INDEX_ETH_FW_TX_CQ_CONS]);
617         LM_TXQ(pdev, cid).hc_sb_info.hc_sb = STATUS_BLOCK_SP_SL_TYPE; // STATUS_BLOCK_SP_TYPE;
618         LM_TXQ(pdev, cid).hc_sb_info.hc_index_value = HC_SP_INDEX_ETH_FW_TX_CQ_CONS;
619         /* iro_dhc_offste not initialized on purpose --> not expected for FWD channel */
620     }
621     else if (cid == ISCSI_CID(pdev))
622     {
623         sp_sb->index_values[HC_SP_INDEX_ETH_ISCSI_CQ_CONS] = 0;
624         LM_TXQ(pdev, cid).hw_con_idx_ptr = &(sp_sb->index_values[HC_SP_INDEX_ETH_ISCSI_CQ_CONS]);
625         LM_TXQ(pdev, cid).hc_sb_info.hc_sb = STATUS_BLOCK_SP_SL_TYPE; //STATUS_BLOCK_SP_TYPE;
626         LM_TXQ(pdev, cid).hc_sb_info.hc_index_value = HC_SP_INDEX_ETH_ISCSI_CQ_CONS;
627         /* iro_dhc_offste not initialized on purpose --> not expected for FWD channel */
628     }
629     else if (cid == FCOE_CID(pdev))
630     {
631         sp_sb->index_values[HC_SP_INDEX_ETH_FCOE_CQ_CONS] = 0;
632         LM_TXQ(pdev, cid).hw_con_idx_ptr =
633             &(sp_sb->index_values[HC_SP_INDEX_ETH_FCOE_CQ_CONS]);
634         LM_TXQ(pdev, cid).hc_sb_info.hc_sb = STATUS_BLOCK_SP_SL_TYPE; //STATUS_BLOCK_SP_TYPE;
635         LM_TXQ(pdev, cid).hc_sb_info.hc_index_value = HC_SP_INDEX_ETH_FCOE_CQ_CONS;
636         /* iro_dhc_offste not initialized on purpose --> not expected for FWD channel */
637     }
638     else if(cid == OOO_CID(pdev))
639     {
640         DbgBreakMsg("OOO doesn't have a txq");
641         return LM_STATUS_FAILURE;
642     }
643     else
644     {
645         u32_t sb_id = RSS_ID_TO_SB_ID(CHAIN_TO_RSS_ID(pdev,cid));
646         const u8_t byte_counter_id = CHIP_IS_E1x(pdev)? LM_FW_SB_ID(pdev, sb_id) : LM_FW_DHC_QZONE_ID(pdev, sb_id);
647 
648         // Assign the TX chain consumer pointer to the consumer index in the status block. TBD: rename HC_INDEX_C_ETH_TX_CQ_CONS as its inappropriate
649         if( sb_id >= ARRSIZE(pdev->vars.status_blocks_arr) )
650         {
651             DbgBreakIf( sb_id >= ARRSIZE(pdev->vars.status_blocks_arr) ) ;
652             return LM_STATUS_FAILURE ;
653         }
654 
655         sb_indexes = lm_get_sb_indexes(pdev, (u8_t)sb_id);
656         // This isn't realy cid it is the chain index
657         tx_sb_index_number =
658             lm_eth_tx_hc_cq_cons_cosx_from_chain(pdev, cid);
659 
660         sb_indexes[tx_sb_index_number] = 0;
661         LM_TXQ(pdev, cid).hw_con_idx_ptr = sb_indexes + tx_sb_index_number;
662         LM_TXQ(pdev, cid).hc_sb_info.hc_sb = STATUS_BLOCK_NORMAL_TYPE;
663         LM_TXQ(pdev, cid).hc_sb_info.hc_index_value = tx_sb_index_number;
664         if (IS_PFDEV(pdev))
665         {
666             LM_TXQ(pdev, cid).hc_sb_info.iro_dhc_offset = CSTORM_BYTE_COUNTER_OFFSET(byte_counter_id, tx_sb_index_number);
667         }
668         else
669         {
670             DbgMessage(pdev, FATAL, "Dhc not implemented for VF yet\n");
671         }
672     }
673 
674     return LM_STATUS_SUCCESS;
675 } /* lm_setup_txq */
676 
lm_setup_rxq(IN struct _lm_device_t * pdev,IN u32_t const cid)677 lm_status_t lm_setup_rxq( IN struct _lm_device_t *pdev,
678                           IN u32_t const          cid)
679 {
680     lm_bd_chain_t * bd_chain = NULL;
681     lm_rx_chain_t *    rxq_chain                             = NULL;
682     lm_rxq_chain_idx_t rx_chain_idx_max                      = LM_RXQ_CHAIN_IDX_MAX;
683     lm_rxq_chain_idx_t rx_chain_idx_cur                      = 0;
684     static u8_t const  eth_rx_size_arr[LM_RXQ_CHAIN_IDX_MAX] = {sizeof(struct eth_rx_bd), sizeof(struct eth_rx_sge)};
685     u32_t              sb_id                                 = RSS_ID_TO_SB_ID(CHAIN_TO_RSS_ID(pdev,cid));
686     const u8_t         byte_counter_id                       = CHIP_IS_E1x(pdev)? LM_FW_SB_ID(pdev, sb_id) : LM_FW_DHC_QZONE_ID(pdev, sb_id);
687 
688     /* check arguments */
689     if(CHK_NULL(pdev) ||
690        ERR_IF((ARRSIZE(pdev->rx_info.rxq_chain) <= cid)))
691     {
692         return LM_STATUS_FAILURE;
693     }
694 
695     DbgMessage(pdev, INFORMi|INFORMl2sp, "#lm_setup_rxq, cid=%d\n",cid);
696 
697     rxq_chain = &LM_RXQ(pdev, cid);
698 
699     rxq_chain->common.prod_bseq                = 0;
700     rxq_chain->ret_bytes                = 0;
701     rxq_chain->ret_bytes_last_fw_update = 0;
702     rxq_chain->common.bd_prod_without_next     = 0;
703 
704     rx_chain_idx_max = LM_RXQ_IS_CHAIN_SGE_VALID( pdev, cid ) ? LM_RXQ_CHAIN_IDX_SGE : LM_RXQ_CHAIN_IDX_BD;
705 
706     for( rx_chain_idx_cur = 0; rx_chain_idx_cur <= rx_chain_idx_max; rx_chain_idx_cur++ )
707     {
708         bd_chain = &LM_RXQ_CHAIN( pdev, cid, rx_chain_idx_cur );
709 
710         lm_bd_chain_setup(pdev, bd_chain, bd_chain->bd_chain_virt, bd_chain->bd_chain_phy,bd_chain->page_cnt, eth_rx_size_arr[rx_chain_idx_cur], /* is full? */0, TRUE);
711 
712         DbgMessage(pdev, INFORMi, "rxq[%d] bd_chain[%d] %p, bd_left %d\n", cid,
713                                                                             rx_chain_idx_cur,
714                                                                             bd_chain->next_bd,
715                                                                             bd_chain->bd_left);
716 
717         DbgMessage(pdev, INFORMi, "   bd_chain_phy[%d] 0x%x%08x\n", rx_chain_idx_cur,
718                                                                      bd_chain->bd_chain_phy.as_u32.high,
719                                                                      bd_chain->bd_chain_phy.as_u32.low);
720     }
721 
722     /* We initilize the hc_sb_info here for completeness. The fw updates are actually done by rcq-chain, but the dynamic-host-coalescing based on rx-chain */
723     rxq_chain->hc_sb_info.hc_sb = STATUS_BLOCK_NORMAL_SL_TYPE;
724     rxq_chain->hc_sb_info.hc_index_value = HC_INDEX_ETH_RX_CQ_CONS;
725     if (IS_PFDEV(pdev))
726     {
727         rxq_chain->hc_sb_info.iro_dhc_offset = CSTORM_BYTE_COUNTER_OFFSET(byte_counter_id, HC_INDEX_ETH_RX_CQ_CONS);
728     }
729     else
730     {
731         rxq_chain->hc_sb_info.iro_dhc_offset = sizeof(struct cstorm_queue_zone_data) * LM_FW_DHC_QZONE_ID(pdev, sb_id)
732             + sizeof(u32_t) * HC_INDEX_ETH_RX_CQ_CONS;
733         DbgMessage(pdev, WARN, "Dhc offset is 0x%x for VF Q Zone %d\n",rxq_chain->hc_sb_info.iro_dhc_offset,LM_FW_DHC_QZONE_ID(pdev, sb_id));
734     }
735 
736     return LM_STATUS_SUCCESS;
737 } /* lm_setup_rxq */
738 
739 
740 lm_status_t
lm_setup_rcq(IN struct _lm_device_t * pdev,IN u32_t const cid)741 lm_setup_rcq( IN struct _lm_device_t *pdev,
742               IN u32_t  const         cid)
743 {
744     lm_bd_chain_t *                      bd_chain   = NULL;
745     lm_rcq_chain_t *                     rcq_chain  = NULL;
746     lm_rx_chain_t *                      rxq_chain  = NULL;
747     volatile struct hc_sp_status_block * sp_sb      = NULL;
748     u16_t volatile *                     sb_indexes = NULL;
749 
750     /* check arguments */
751     if(CHK_NULL(pdev) ||
752        ERR_IF((ARRSIZE(pdev->rx_info.rcq_chain) <= cid)))
753     {
754         return LM_STATUS_FAILURE;
755     }
756 
757     rcq_chain = &LM_RCQ(pdev, cid);
758     rxq_chain = &LM_RXQ(pdev, cid);
759 
760     DbgMessage(pdev, INFORMi|INFORMl2sp, "#lm_setup_rcq, cid=%d\n",cid);
761 
762     sp_sb = lm_get_default_status_block(pdev);
763 
764     rcq_chain->prod_bseq = 0;
765     if (CHIP_IS_E1x(pdev))
766     {
767         rcq_chain->iro_prod_offset = USTORM_RX_PRODS_E1X_OFFSET(PORT_ID(pdev), LM_FW_CLI_ID(pdev, cid));
768     }
769     else
770     {
771         if (IS_VFDEV(pdev))
772         {
773             rcq_chain->iro_prod_offset = LM_FW_QZONE_ID(pdev, cid)*sizeof(struct ustorm_queue_zone_data);
774             DbgMessage(pdev, FATAL, "iro_prod_offset for vf = %x...\n", rcq_chain->iro_prod_offset);
775         }
776     }
777 
778     //if(pdev->params.l2_rx_desc_cnt[0]) /* if removed. was not required */
779     bd_chain = &rcq_chain->bd_chain;
780 
781     lm_bd_chain_setup(pdev, bd_chain, bd_chain->bd_chain_virt, bd_chain->bd_chain_phy,bd_chain->page_cnt, sizeof(union eth_rx_cqe), /* is full? */0, TRUE);
782 
783     //number of Bds left in the RCQ must be at least the same with its corresponding Rx chain.
784     DbgBreakIf(lm_bd_chain_avail_bds(&rxq_chain->chain_arr[LM_RXQ_CHAIN_IDX_BD]) <= lm_bd_chain_avail_bds(&rcq_chain->bd_chain));
785 
786     if( LM_RXQ_IS_CHAIN_SGE_VALID(pdev, cid ) )
787     {
788         DbgBreakIf( !lm_bd_chains_are_consistent( &rxq_chain->chain_arr[LM_RXQ_CHAIN_IDX_BD], &rxq_chain->chain_arr[LM_RXQ_CHAIN_IDX_SGE]) );
789     }
790 
791     DbgMessage(pdev, INFORMi, "rcq %d, bd_chain %p, bd_left %d\n", cid,
792                                                                     rcq_chain->bd_chain.next_bd,
793                                                                     rcq_chain->bd_chain.bd_left);
794     DbgMessage(pdev, INFORMi, "   bd_chain_phy 0x%x%08x\n", rcq_chain->bd_chain.bd_chain_phy.as_u32.high,
795                                                              rcq_chain->bd_chain.bd_chain_phy.as_u32.low);
796 
797     // Assign the RCQ chain consumer pointer to the consumer index in the status block.
798     if (cid == ISCSI_CID(pdev))
799     {
800         if (CHIP_IS_E2E3(pdev)) {
801             u8_t rel_cid = cid - LM_MAX_RSS_CHAINS(pdev);
802             rcq_chain->iro_prod_offset = USTORM_RX_PRODS_E2_OFFSET(LM_FW_AUX_QZONE_ID(pdev, rel_cid));
803         }
804         sp_sb->index_values[HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS] = 0;
805         rcq_chain->hw_con_idx_ptr                             = &(sp_sb->index_values[HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS]);
806         rcq_chain->hc_sb_info.hc_sb                           = STATUS_BLOCK_SP_SL_TYPE;
807         rcq_chain->hc_sb_info.hc_index_value                  = HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS;
808     }
809     else if (cid == FCOE_CID(pdev))
810     {
811         if (CHIP_IS_E2E3(pdev)) {
812             u8_t rel_cid = cid - LM_MAX_RSS_CHAINS(pdev);
813             rcq_chain->iro_prod_offset = USTORM_RX_PRODS_E2_OFFSET(LM_FW_AUX_QZONE_ID(pdev, rel_cid));
814         }
815         sp_sb->index_values[HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS] = 0;
816         rcq_chain->hw_con_idx_ptr                             = &(sp_sb->index_values[HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS]);
817         rcq_chain->hc_sb_info.hc_sb                           = STATUS_BLOCK_SP_SL_TYPE;
818         rcq_chain->hc_sb_info.hc_index_value                  = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS;
819     }
820     else if (cid == OOO_CID(pdev))
821     {
822         // Any SB that isn't RSS share the same SB.
823         // basically we will want the ISCSI OOO to work on the same SB that ISCSI works.(This does happen see the line above)
824         // Even if we want to count on ISCSI and make sure we will work on the same SB:
825         // 1.There is no promise on the order the ISCSI nminiport will call
826         // ISCSI_KWQE_OPCODE_INIT1 (lm_sc_init inits pdev->iscsi_info.l5_eq_base_chain_idx) or
827         // 2.OOO is general code that doesn't depend on a protocol (ISCSI).
828 
829         //TODO_OOO Ask Michal regarding E2 if we need LM_FW_SB_ID
830         if (CHIP_IS_E2E3(pdev)) {
831             u8_t rel_cid = cid - LM_MAX_RSS_CHAINS(pdev);
832             rcq_chain->iro_prod_offset = USTORM_RX_PRODS_E2_OFFSET(LM_FW_AUX_QZONE_ID(pdev, rel_cid));
833         }
834         sp_sb->index_values[HC_SP_INDEX_ISCSI_OOO_RX_CONS]  = 0;
835         rcq_chain->hw_con_idx_ptr                           = &(sp_sb->index_values[HC_SP_INDEX_ISCSI_OOO_RX_CONS]);
836         rcq_chain->hc_sb_info.hc_sb                         = STATUS_BLOCK_SP_SL_TYPE;
837         rcq_chain->hc_sb_info.hc_index_value                = HC_SP_INDEX_ISCSI_OOO_RX_CONS;
838     }
839     else /* NDIS */
840     {
841         u32_t sb_id = RSS_ID_TO_SB_ID(CHAIN_TO_RSS_ID(pdev,cid));
842         const u8_t byte_counter_id = CHIP_IS_E1x(pdev)? LM_FW_SB_ID(pdev, sb_id) : LM_FW_DHC_QZONE_ID(pdev, sb_id);
843 
844         if (IS_PFDEV(pdev) && CHIP_IS_E2E3(pdev)) {
845             rcq_chain->iro_prod_offset = USTORM_RX_PRODS_E2_OFFSET(LM_FW_DHC_QZONE_ID(pdev, sb_id));
846         }
847         if( sb_id >= ARRSIZE(pdev->vars.status_blocks_arr) )
848         {
849             DbgBreakIf( sb_id >= ARRSIZE(pdev->vars.status_blocks_arr) ) ;
850             return LM_STATUS_FAILURE ;
851         }
852 
853         sb_indexes = lm_get_sb_indexes(pdev, (u8_t)sb_id);
854         sb_indexes[HC_INDEX_ETH_RX_CQ_CONS] = 0;
855         rcq_chain->hw_con_idx_ptr = sb_indexes + HC_INDEX_ETH_RX_CQ_CONS;
856         rcq_chain->hc_sb_info.hc_sb = STATUS_BLOCK_NORMAL_SL_TYPE;
857         rcq_chain->hc_sb_info.hc_index_value = HC_INDEX_ETH_RX_CQ_CONS;
858         if (IS_PFDEV(pdev))
859         {
860             rcq_chain->hc_sb_info.iro_dhc_offset = CSTORM_BYTE_COUNTER_OFFSET(byte_counter_id, HC_INDEX_ETH_RX_CQ_CONS);
861         }
862         else
863         {
864             DbgMessage(pdev, FATAL, "Dhc not implemented for VF yet\n");
865         }
866     }
867 
868     return LM_STATUS_SUCCESS;
869 } /* lm_setup_rcq */
870 
871 lm_status_t
lm_setup_client_con_resc(IN struct _lm_device_t * pdev,IN u32_t cid)872 lm_setup_client_con_resc(
873     IN struct _lm_device_t *pdev,
874     IN u32_t cid
875     )
876 {
877     lm_status_t lm_status = LM_STATUS_SUCCESS;
878 
879     if CHK_NULL(pdev)
880     {
881         return LM_STATUS_INVALID_PARAMETER;
882     }
883 
884     if((GET_FLAGS(pdev->params.l2_cli_con_params[cid].attributes,LM_CLIENT_ATTRIBUTES_RX) &&
885        (cid >= MAX_RX_CHAIN(pdev))) ||
886         (GET_FLAGS(pdev->params.l2_cli_con_params[cid].attributes,LM_CLIENT_ATTRIBUTES_TX) &&
887        (cid >= MAX_TX_CHAIN(pdev))))
888 
889     {
890         DbgBreakMsg(" invalid chain ");
891         return LM_STATUS_INVALID_PARAMETER;
892     }
893 
894     if(GET_FLAGS(pdev->params.l2_cli_con_params[cid].attributes,LM_CLIENT_ATTRIBUTES_TX))
895     {
896         lm_status = lm_setup_txq(pdev, cid);
897         if (lm_status != LM_STATUS_SUCCESS)
898         {
899             return lm_status;
900         }
901     }
902 
903 
904     if(GET_FLAGS(pdev->params.l2_cli_con_params[cid].attributes,LM_CLIENT_ATTRIBUTES_RX))
905     {
906         lm_status = lm_setup_rxq(pdev, cid);
907         if (lm_status != LM_STATUS_SUCCESS)
908         {
909             return lm_status;
910         }
911 
912         lm_status = lm_setup_rcq(pdev, cid);
913         if (lm_status != LM_STATUS_SUCCESS)
914         {
915             return lm_status;
916         }
917     }
918 
919     if(GET_FLAGS(pdev->params.l2_cli_con_params[cid].attributes,LM_CLIENT_ATTRIBUTES_TPA))
920     {
921         lm_status = lm_setup_tpa_chain(pdev, cid);
922         if (lm_status != LM_STATUS_SUCCESS)
923         {
924             return lm_status;
925         }
926     }
927     pdev->client_info[cid].last_set_rx_mask = 0;
928 
929     return LM_STATUS_SUCCESS;
930 }
931 
932 /*
933  * reset txq, rxq, rcq counters for L2 client connection
934  *
935  * assumption: the cid equals the chain idx
936  */
lm_clear_eth_con_resc(IN struct _lm_device_t * pdev,IN u8_t const cid)937 lm_status_t lm_clear_eth_con_resc( IN struct _lm_device_t *pdev,
938                                    IN u8_t const          cid )
939 {
940     u8_t sb_id = lm_sb_id_from_chain(pdev, cid);
941     u8_t max_eth_cid;
942     if CHK_NULL(pdev)
943     {
944         return LM_STATUS_INVALID_PARAMETER;
945     }
946     if (MM_DCB_MP_L2_IS_ENABLE(pdev))
947     {
948         max_eth_cid = lm_mp_max_cos_chain_used(pdev);
949     }
950     else
951     {
952         max_eth_cid = MAX_RX_CHAIN(pdev);
953     }
954     if (cid >= max_eth_cid)
955     {
956         return LM_STATUS_INVALID_PARAMETER;
957     }
958 
959     /* Set hw consumer index pointers to null, so we won't get rx/tx completion */
960     /* for this connection, next time we'll load it                             */
961 
962     // Regardless the attributes we "clean' the TX status block
963 
964     if(GET_FLAGS(pdev->params.l2_cli_con_params[cid].attributes,LM_CLIENT_ATTRIBUTES_TX))
965     {
966         if (cid >= MAX_TX_CHAIN(pdev))
967         {
968             DbgBreakMsg(" Invalid TX chain index ");
969             return LM_STATUS_INVALID_PARAMETER;
970         }
971         /* first set the hw consumer index pointers to null, and only then clear the pkt_idx value
972          * to avoid a race when servicing interrupt at the same time */
973         lm_clear_chain_sb_cons_idx(pdev, sb_id, &LM_TXQ(pdev, cid).hc_sb_info, &LM_TXQ(pdev, cid).hw_con_idx_ptr);
974         LM_TXQ(pdev, cid).pkt_idx = 0;
975     }
976 
977     if(GET_FLAGS(pdev->params.l2_cli_con_params[cid].attributes,LM_CLIENT_ATTRIBUTES_RX))
978     {
979         if (cid >= MAX_RX_CHAIN(pdev))
980         {
981             DbgBreakMsg(" Invalid RX chain index ");
982             return LM_STATUS_INVALID_PARAMETER;
983         }
984         lm_clear_chain_sb_cons_idx(pdev, sb_id, &LM_RCQ(pdev, cid).hc_sb_info, &LM_RCQ(pdev, cid).hw_con_idx_ptr);
985     }
986     //s_list_init(&LM_RXQ(pdev, cid).active_descq, NULL, NULL, 0);
987     //s_list_init(&LM_RXQ(pdev, cid).free_descq, NULL, NULL, 0);
988 
989     return LM_STATUS_SUCCESS;
990 }
991 
992 lm_status_t
lm_alloc_chain_con_resc(IN struct _lm_device_t * pdev,IN u32_t const cid,IN lm_cli_idx_t const lm_cli_idx)993 lm_alloc_chain_con_resc(
994     IN struct _lm_device_t *pdev,
995     IN u32_t        const   cid,
996     IN lm_cli_idx_t const   lm_cli_idx
997     )
998 {
999     lm_status_t  lm_status = LM_STATUS_SUCCESS;
1000     u16_t   l2_rx_bd_page_cnt = 0;
1001     u16_t l2_tpa_bd_page_cnt = 0;
1002     u16_t bds_per_page = 0;
1003 
1004     if CHK_NULL(pdev)
1005     {
1006         return LM_STATUS_INVALID_PARAMETER;
1007     }
1008 
1009     if((GET_FLAGS(pdev->params.l2_cli_con_params[cid].attributes,LM_CLIENT_ATTRIBUTES_RX) &&
1010        (cid >= MAX_RX_CHAIN(pdev))) ||
1011         (GET_FLAGS(pdev->params.l2_cli_con_params[cid].attributes,LM_CLIENT_ATTRIBUTES_TX) &&
1012        (cid >= MAX_TX_CHAIN(pdev))))
1013 
1014     {
1015         DbgBreakMsg(" invalid chain ");
1016         return LM_STATUS_INVALID_PARAMETER;
1017     }
1018 
1019     if(GET_FLAGS(pdev->params.l2_cli_con_params[cid].attributes,LM_CLIENT_ATTRIBUTES_TX))
1020     {
1021         lm_status = lm_alloc_txq(pdev,
1022                                  cid,
1023                                  (u16_t)pdev->params.l2_tx_bd_page_cnt[lm_cli_idx],
1024                                  (u16_t)pdev->params.l2_tx_coal_buf_cnt[lm_cli_idx]);
1025         if (lm_status != LM_STATUS_SUCCESS)
1026         {
1027             return lm_status;
1028         }
1029     }
1030 
1031     if(GET_FLAGS(pdev->params.l2_cli_con_params[cid].attributes,LM_CLIENT_ATTRIBUTES_RX))
1032     {
1033         l2_rx_bd_page_cnt =_ceil( pdev->params.l2_cli_con_params[cid].num_rx_desc, 500 );
1034         lm_status = lm_alloc_rxq(pdev,
1035                                  cid,
1036                                  l2_rx_bd_page_cnt,
1037                                  pdev->params.l2_cli_con_params[cid].num_rx_desc);
1038 
1039         if (lm_status != LM_STATUS_SUCCESS)
1040         {
1041             return lm_status;
1042         }
1043 
1044         lm_status = lm_alloc_rcq(pdev,
1045                                  cid,
1046                                  (u16_t)l2_rx_bd_page_cnt * LM_RX_BD_CQ_SIZE_RATIO);
1047         if (lm_status != LM_STATUS_SUCCESS)
1048         {
1049             return lm_status;
1050         }
1051     }
1052 
1053     if(GET_FLAGS(pdev->params.l2_cli_con_params[cid].attributes,LM_CLIENT_ATTRIBUTES_TPA))
1054     {
1055         bds_per_page = BD_PER_PAGE(LM_TPA_BD_ELEN_SIZE);
1056 
1057         if ((0 == pdev->params.tpa_desc_cnt_per_chain) ||
1058             (!(POWER_OF_2(bds_per_page))))
1059         {
1060             DbgBreakMsg(" Illegal TPA params");
1061             return LM_STATUS_FAILURE;
1062         }
1063         l2_tpa_bd_page_cnt =_ceil( pdev->params.tpa_desc_cnt_per_chain,
1064                                   USABLE_BDS_PER_PAGE(LM_TPA_BD_ELEN_SIZE, TRUE));
1065 
1066         l2_tpa_bd_page_cnt = (u16_t)
1067             upper_align_power_of_2(l2_tpa_bd_page_cnt,
1068                                    sizeof(l2_tpa_bd_page_cnt) * BITS_PER_BYTE);
1069 
1070         lm_status = lm_alloc_tpa_chain(pdev,
1071                                        cid,
1072                                        l2_tpa_bd_page_cnt,
1073                                        pdev->params.tpa_desc_cnt_per_chain,
1074                                        bds_per_page);
1075 
1076         if (lm_status != LM_STATUS_SUCCESS)
1077         {
1078             return lm_status;
1079         }
1080     }
1081     return LM_STATUS_SUCCESS;
1082 }
1083 
1084 lm_status_t
lm_setup_client_con_params(IN struct _lm_device_t * pdev,IN u8_t const chain_idx,IN struct _lm_client_con_params_t * cli_params)1085 lm_setup_client_con_params( IN struct _lm_device_t            *pdev,
1086                             IN u8_t const                      chain_idx,
1087                             IN struct _lm_client_con_params_t *cli_params )
1088 {
1089     lm_rx_chain_t* rxq_chain = NULL;
1090 
1091     if (CHK_NULL(pdev) ||
1092         CHK_NULL(cli_params) ||
1093         ERR_IF((ARRSIZE(pdev->params.l2_cli_con_params) <= chain_idx) ||
1094                (CHIP_IS_E1H(pdev) && (chain_idx >= ETH_MAX_RX_CLIENTS_E1H)) || /* TODO E2 add IS_E2*/
1095                (CHIP_IS_E1(pdev) && (chain_idx >= ETH_MAX_RX_CLIENTS_E1)) ))
1096     {
1097         return LM_STATUS_INVALID_PARAMETER;
1098     }
1099 
1100     mm_memcpy(&pdev->params.l2_cli_con_params[chain_idx], cli_params, sizeof(struct _lm_client_con_params_t));
1101 
1102 
1103     if(GET_FLAGS(pdev->params.l2_cli_con_params[chain_idx].attributes,
1104                  LM_CLIENT_ATTRIBUTES_RX))
1105     {
1106         // update rxq_chain strucutre
1107         rxq_chain           = &LM_RXQ(pdev, chain_idx);
1108         rxq_chain->lah_size = pdev->params.l2_cli_con_params[chain_idx].lah_size;
1109     }
1110 
1111     return LM_STATUS_SUCCESS;
1112 }
1113 
1114 lm_status_t
lm_init_chain_con(IN struct _lm_device_t * pdev,IN u8_t const chain_idx,IN u8_t const b_alloc)1115 lm_init_chain_con( IN struct _lm_device_t *pdev,
1116                     IN u8_t const          chain_idx,
1117                     IN u8_t const          b_alloc )
1118 {
1119     lm_status_t  lm_status  = LM_STATUS_SUCCESS;
1120     u8_t         lm_cli_idx = LM_CHAIN_IDX_CLI(pdev, chain_idx); // FIXME!!!
1121 
1122     if (CHK_NULL(pdev) ||
1123         (LM_CLI_IDX_MAX <= lm_cli_idx))
1124     {
1125         DbgBreakMsg(" lm_init_client_con lm_cli_idx has an invalid value");
1126         return LM_STATUS_INVALID_PARAMETER;
1127     }
1128 
1129     if (b_alloc)
1130     {
1131         lm_status = lm_alloc_chain_con_resc(pdev, chain_idx, lm_cli_idx);
1132 
1133         if (lm_status != LM_STATUS_SUCCESS)
1134         {
1135             return lm_status;
1136         }
1137 
1138         if(GET_FLAGS(pdev->params.l2_cli_con_params[chain_idx].attributes,LM_CLIENT_ATTRIBUTES_RX))
1139         {
1140             /* On allocation, init the clients objects... do this only on allocation, on setup, we'll need
1141              * the info to reconfigure... */
1142             ecore_init_mac_obj(pdev,
1143                            &pdev->client_info[chain_idx].mac_obj,
1144                            LM_FW_CLI_ID(pdev, chain_idx),
1145                            chain_idx,
1146                            FUNC_ID(pdev),
1147                            LM_SLOWPATH(pdev, mac_rdata)[lm_cli_idx],
1148                            LM_SLOWPATH_PHYS(pdev, mac_rdata)[lm_cli_idx],
1149                            ECORE_FILTER_MAC_PENDING,
1150                            (unsigned long *)&pdev->client_info[chain_idx].sp_mac_state,
1151                            ECORE_OBJ_TYPE_RX_TX,
1152                            &pdev->slowpath_info.macs_pool);
1153 
1154 
1155             if (!CHIP_IS_E1(pdev))
1156             {
1157                 ecore_init_vlan_mac_obj(pdev,
1158                                    &pdev->client_info[chain_idx].mac_vlan_obj,
1159                                    LM_FW_CLI_ID(pdev, chain_idx),
1160                                    chain_idx,
1161                                    FUNC_ID(pdev),
1162                                    LM_SLOWPATH(pdev, mac_rdata)[lm_cli_idx],
1163                                    LM_SLOWPATH_PHYS(pdev, mac_rdata)[lm_cli_idx],
1164                                    ECORE_FILTER_VLAN_MAC_PENDING,
1165                                    (unsigned long *)&pdev->client_info[chain_idx].sp_mac_state,
1166                                    ECORE_OBJ_TYPE_RX_TX,
1167                                    &pdev->slowpath_info.macs_pool,
1168                                    &pdev->slowpath_info.vlans_pool);
1169 
1170             }
1171 
1172             if (!CHIP_IS_E1x(pdev))
1173             {
1174                 ecore_init_vlan_obj(pdev,
1175                                     &pdev->client_info[chain_idx].vlan_obj,
1176                                     LM_FW_CLI_ID(pdev, chain_idx),
1177                                     chain_idx,
1178                                     FUNC_ID(pdev),
1179                                     LM_SLOWPATH(pdev, mac_rdata)[lm_cli_idx],
1180                                     LM_SLOWPATH_PHYS(pdev, mac_rdata)[lm_cli_idx],
1181                                     ECORE_FILTER_VLAN_PENDING,
1182                                     (unsigned long *)&pdev->client_info[chain_idx].sp_mac_state,
1183                                     ECORE_OBJ_TYPE_RX_TX,
1184                                     &pdev->slowpath_info.vlans_pool);
1185             }
1186         }
1187     }
1188 
1189 
1190     lm_status = lm_setup_client_con_resc(pdev, chain_idx);
1191 
1192     return lm_status;
1193 }
1194 
lm_alloc_sq(struct _lm_device_t * pdev)1195 lm_status_t lm_alloc_sq(struct _lm_device_t *pdev)
1196 {
1197     lm_sq_info_t * sq_info = &pdev->sq_info;
1198 
1199     sq_info->sq_chain.sq_chain_virt = mm_alloc_phys_mem( pdev,
1200                                                          LM_PAGE_SIZE,
1201                                                          (lm_address_t*)&(sq_info->sq_chain.bd_chain_phy),
1202                                                          0,
1203                                                          LM_CLI_IDX_MAX);
1204 
1205     if CHK_NULL(sq_info->sq_chain.sq_chain_virt)
1206     {
1207         DbgBreakIf(DBG_BREAK_ON(MEMORY_ALLOCATION_FAILURE));
1208         return LM_STATUS_RESOURCE ;
1209     }
1210 
1211     mm_mem_zero(sq_info->sq_chain.sq_chain_virt, LM_PAGE_SIZE);
1212 
1213     return LM_STATUS_SUCCESS;
1214 }
1215 
lm_alloc_eq(struct _lm_device_t * pdev)1216 lm_status_t lm_alloc_eq(struct _lm_device_t *pdev)
1217 {
1218     lm_eq_chain_t *eq_chain = NULL;
1219     u32_t          mem_size = 0;
1220     u8_t  const    page_cnt = 1;
1221 
1222 
1223     /* check arguments */
1224     if(CHK_NULL(pdev))
1225     {
1226         return LM_STATUS_FAILURE;
1227     }
1228 
1229     DbgMessage(pdev, INFORMi | INFORMl2sp, "#lm_alloc_eq\n");
1230 
1231     mem_size = page_cnt * LM_PAGE_SIZE;
1232     eq_chain = &pdev->eq_info.eq_chain;
1233 
1234 
1235     /* alloc the chain */
1236     eq_chain->bd_chain.bd_chain_virt =
1237         mm_alloc_phys_mem( pdev, mem_size, &eq_chain->bd_chain.bd_chain_phy, 0, LM_CLI_IDX_MAX);
1238 
1239     if(ERR_IF(!eq_chain->bd_chain.bd_chain_virt))
1240     {
1241         DbgBreakIf(DBG_BREAK_ON(MEMORY_ALLOCATION_FAILURE));
1242         return LM_STATUS_RESOURCE;
1243     }
1244 
1245     mm_mem_zero(eq_chain->bd_chain.bd_chain_virt, mem_size);
1246     eq_chain->bd_chain.page_cnt = page_cnt;
1247 
1248     return LM_STATUS_SUCCESS;
1249 
1250 }
1251 
lm_alloc_client_info(struct _lm_device_t * pdev)1252 lm_status_t lm_alloc_client_info(struct _lm_device_t *pdev)
1253 {
1254     client_init_data_t  *client_init_data_virt                  = NULL;
1255     const u32_t mem_size_init                                   = sizeof(client_init_data_t);
1256     struct client_update_ramrod_data  *client_update_data_virt  = NULL;
1257     const u32_t mem_size_update                                 = sizeof(struct client_update_ramrod_data);
1258     u8_t i                                                      = 0;
1259 
1260     for (i = 0; i < ARRSIZE(pdev->client_info); i++)
1261     {
1262         //Init data
1263         client_init_data_virt = mm_alloc_phys_mem(pdev, mem_size_init, &pdev->client_info[i].client_init_data_phys, 0, LM_RESOURCE_COMMON);
1264         if CHK_NULL(client_init_data_virt)
1265         {
1266             DbgBreakIf(DBG_BREAK_ON(MEMORY_ALLOCATION_FAILURE));
1267             return LM_STATUS_RESOURCE ;
1268         }
1269 
1270         mm_mem_zero(client_init_data_virt, mem_size_init);
1271 
1272         pdev->client_info[i].client_init_data_virt = client_init_data_virt;
1273 
1274         //update data
1275         client_update_data_virt = mm_alloc_phys_mem(pdev, mem_size_update, &pdev->client_info[i].update.data_phys, 0, LM_RESOURCE_COMMON);
1276         if CHK_NULL(client_update_data_virt)
1277         {
1278             DbgBreakIf(DBG_BREAK_ON(MEMORY_ALLOCATION_FAILURE));
1279             return LM_STATUS_RESOURCE ;
1280         }
1281 
1282         mm_mem_zero(client_update_data_virt, mem_size_update);
1283 
1284         pdev->client_info[i].update.data_virt = client_update_data_virt;
1285     }
1286 
1287     return LM_STATUS_SUCCESS;
1288 }
1289 
lm_setup_client_info(struct _lm_device_t * pdev)1290 lm_status_t lm_setup_client_info(struct _lm_device_t *pdev)
1291 {
1292     client_init_data_t  *client_init_data_virt                  = NULL;
1293     const u32_t mem_size_init                                   = sizeof(client_init_data_t);
1294     struct client_update_ramrod_data  *client_update_data_virt  = NULL;
1295     const u32_t mem_size_update                                 = sizeof(struct client_update_ramrod_data);
1296     u8_t i                                                      = 0;
1297 
1298     for (i = 0; i < ARRSIZE(pdev->client_info); i++)
1299     {
1300         //Init
1301         client_init_data_virt = pdev->client_info[i].client_init_data_virt;
1302         if CHK_NULL(client_init_data_virt)
1303         {
1304             DbgMessage(pdev, FATAL, "client-init-data at this point is not expected to be null... \n");
1305             return LM_STATUS_FAILURE ;
1306         }
1307         mm_mem_zero(client_init_data_virt, mem_size_init);
1308 
1309         //update
1310         client_update_data_virt = pdev->client_info[i].update.data_virt;
1311         if CHK_NULL(client_update_data_virt)
1312         {
1313             DbgMessage(pdev, FATAL, "client-update-data at this point is not expected to be null... \n");
1314             return LM_STATUS_FAILURE ;
1315         }
1316         mm_mem_zero(client_update_data_virt, mem_size_update);
1317     }
1318 
1319     return LM_STATUS_SUCCESS;
1320 }
1321 
1322 /**
1323  * @description
1324  * The next page entrys are static and wont be used by active
1325  * descriptor array and mask array.
1326  * @param pdev
1327  * @param chain_idx
1328  *
1329  * @return STATIC void
1330  */
1331 __inline STATIC void
lm_tpa_clear_next_page(IN lm_device_t * pdev,IN const u32_t chain_idx)1332 lm_tpa_clear_next_page( IN        lm_device_t*        pdev,
1333                         IN const  u32_t               chain_idx)
1334 {
1335     lm_bd_chain_t*      bd_chain        = &LM_TPA_CHAIN_BD(pdev, chain_idx);
1336     u16_t               active_entry    = 0;
1337     u16_t               bd_entry        = 0;
1338     u16_t               i               = 0;
1339     u16_t               j               = 0;
1340 
1341     for(i = 1; i <= lm_bd_chain_page_cnt(bd_chain); i++ )
1342     {
1343         bd_entry = (lm_bd_chain_bds_per_page(bd_chain) * i) - lm_bd_chain_bds_skip_eop(bd_chain);
1344         /* clear page-end entries */
1345         for(j = 0; j < lm_bd_chain_bds_skip_eop(bd_chain); j++ )
1346         {
1347             active_entry = LM_TPA_BD_ENTRY_TO_ACTIVE_ENTRY(pdev, chain_idx, bd_entry);
1348             LM_TPA_MASK_CLEAR_ACTIVE_BIT(pdev, chain_idx, active_entry);
1349             bd_entry++;
1350         }
1351     }
1352 }
1353 
1354 /**
1355  * @description
1356  * Clear TPA parameters. TPA can be disabled between NDIS bind
1357  * unbind but the RX cahin will stay used.
1358  * @param pdev
1359  * @param cid
1360  */
1361 lm_status_t
lm_tpa_chain_reset(IN lm_device_t * pdev,IN const u32_t cid)1362 lm_tpa_chain_reset(IN lm_device_t   *pdev,
1363                    IN const u32_t   cid)
1364 {
1365 
1366     lm_tpa_chain_t *    tpa_chain          = NULL;
1367 
1368     /* check arguments */
1369     if(CHK_NULL(pdev) ||
1370        ERR_IF((ARRSIZE(pdev->rx_info.rxq_chain) <= cid)))
1371     {
1372         return LM_STATUS_FAILURE;
1373     }
1374 
1375     DbgMessage(pdev, INFORMi|INFORMl2sp, "#lm_setup_tpa, cid=%d\n",cid);
1376 
1377     tpa_chain = &LM_TPA(pdev, cid);
1378     /***************** SGE chain setup *************************************/
1379     mm_mem_zero(tpa_chain,sizeof(lm_tpa_chain_t));
1380 
1381     return LM_STATUS_SUCCESS;
1382 }
1383 /**
1384  * @description
1385  *
1386  * @param pdev
1387  * @param cid
1388  *
1389  * @return lm_status_t
1390  */
lm_setup_tpa_chain(IN struct _lm_device_t * pdev,IN u32_t const cid)1391 lm_status_t lm_setup_tpa_chain( IN struct _lm_device_t *pdev,
1392                                 IN u32_t const          cid)
1393 {
1394     lm_bd_chain_t *     bd_chain            = NULL;
1395     lm_tpa_chain_t *    tpa_chain           = NULL;
1396     u16_t                i                  = 0;
1397 
1398     /* check arguments */
1399     if(CHK_NULL(pdev) ||
1400        ERR_IF((ARRSIZE(pdev->rx_info.rxq_chain) <= cid)))
1401     {
1402         return LM_STATUS_FAILURE;
1403     }
1404 
1405     DbgMessage(pdev, INFORMi|INFORMl2sp, "#lm_setup_tpa, cid=%d\n",cid);
1406 
1407     tpa_chain = &LM_TPA(pdev, cid);
1408     bd_chain = &LM_TPA_CHAIN_BD( pdev, cid );
1409 
1410 
1411     /***************** TPA chain setup ************************************/
1412     for(i = 0; i < ARRSIZE(tpa_chain->start_coales_bd) ; i++)
1413     {
1414         tpa_chain->start_coales_bd[i].is_entry_used = FALSE;
1415         tpa_chain->start_coales_bd[i].packet = NULL;
1416     }
1417 
1418     /***************** SGE common setup ************************************/
1419     tpa_chain->common.prod_bseq                 = 0;
1420     tpa_chain->common.bd_prod_without_next      = 0;
1421 
1422     /***************** SGE chain setup *************************************/
1423     lm_bd_chain_setup(pdev, bd_chain, bd_chain->bd_chain_virt, bd_chain->bd_chain_phy,bd_chain->page_cnt, LM_TPA_BD_ELEN_SIZE, /* is full? */0, TRUE);
1424 
1425     DbgMessage(pdev, INFORMi, "rxq[%d] bd_chain[%d] %p, bd_left %d\n", cid,
1426                                                                         bd_chain->next_bd,
1427                                                                         bd_chain->bd_left);
1428 
1429     DbgMessage(pdev, INFORMi, "   bd_chain_phy[%d] 0x%x%08x\n", bd_chain->bd_chain_phy.as_u32.high,
1430                                                                  bd_chain->bd_chain_phy.as_u32.low);
1431     tpa_chain->sge_chain.last_max_con = 0;
1432 
1433     for(i = 0; i < LM_TPA_ACTIVE_DESCQ_ARRAY_ELEM(pdev, cid) ; i++)
1434     {
1435         tpa_chain->sge_chain.active_descq_array[i] = NULL;
1436     }
1437 
1438     /***************** Mask entry prepare *************************************/
1439     ASSERT_STATIC(0 != BIT_VEC64_ELEM_SZ); //LM_TPA_MASK_LEN - divide by BIT_VEC64_ELEM_SZ
1440     for(i = 0; i < LM_TPA_MASK_LEN(pdev, cid) ; i++)
1441     {
1442         tpa_chain->sge_chain.mask_array[i] = BIT_VEC64_ELEM_ONE_MASK;
1443     }
1444 
1445     lm_tpa_clear_next_page(pdev,
1446                            cid);
1447 
1448     return LM_STATUS_SUCCESS;
1449 } /* lm_setup_tpa */
1450 
lm_setup_sq(struct _lm_device_t * pdev)1451 lm_status_t lm_setup_sq(struct _lm_device_t *pdev)
1452 {
1453     lm_sq_info_t * sq_info = &pdev->sq_info;
1454 
1455     mm_mem_zero(sq_info->sq_chain.sq_chain_virt, LM_PAGE_SIZE);
1456 
1457     pdev->sq_info.num_pending_normal = MAX_NORMAL_PRIORITY_SPE;
1458     pdev->sq_info.num_pending_high = MAX_HIGH_PRIORITY_SPE;
1459 
1460     d_list_init(&pdev->sq_info.pending_normal, 0,0,0);
1461     d_list_init(&pdev->sq_info.pending_high, 0,0,0);
1462     d_list_init(&pdev->sq_info.pending_complete, 0,0,0);
1463 
1464 
1465     /* The spq dont have next bd */
1466     pdev->sq_info.sq_chain.bd_left =  USABLE_BDS_PER_PAGE(sizeof(struct slow_path_element), TRUE); /* prod == cons means empty chain */
1467     pdev->sq_info.sq_chain.con_idx = 0;
1468 
1469     pdev->sq_info.sq_chain.prod_bd = pdev->sq_info.sq_chain.sq_chain_virt;
1470     pdev->sq_info.sq_chain.last_bd = pdev->sq_info.sq_chain.prod_bd + pdev->sq_info.sq_chain.bd_left ;
1471     pdev->sq_info.sq_chain.prod_idx = 0;
1472 
1473     return LM_STATUS_SUCCESS;
1474 
1475 }
1476 
lm_setup_eq(struct _lm_device_t * pdev)1477 lm_status_t lm_setup_eq(struct _lm_device_t *pdev)
1478 {
1479     lm_bd_chain_t * bd_chain = NULL;
1480     lm_eq_chain_t * eq_chain = NULL;
1481     volatile struct hc_sp_status_block * sp_sb = NULL;
1482 
1483 
1484     /* check arguments */
1485     if(CHK_NULL(pdev))
1486     {
1487         return LM_STATUS_FAILURE;
1488     }
1489 
1490     DbgMessage(pdev, INFORMeq, "#lm_setup_eq\n");
1491 
1492     eq_chain = &pdev->eq_info.eq_chain;
1493     bd_chain = &eq_chain->bd_chain;
1494 
1495     lm_bd_chain_setup(pdev, bd_chain, bd_chain->bd_chain_virt, bd_chain->bd_chain_phy, bd_chain->page_cnt, sizeof(union event_ring_elem), /* is full? */TRUE, TRUE);
1496 
1497     sp_sb = lm_get_default_status_block(pdev);
1498 
1499     sp_sb->index_values[HC_SP_INDEX_EQ_CONS] = 0;
1500 
1501     eq_chain->hw_con_idx_ptr = &sp_sb->index_values[HC_SP_INDEX_EQ_CONS];
1502     eq_chain->hc_sb_info.hc_sb = STATUS_BLOCK_NORMAL_SL_TYPE;
1503     eq_chain->hc_sb_info.hc_index_value = HC_SP_INDEX_EQ_CONS;
1504     eq_chain->iro_prod_offset = CSTORM_EVENT_RING_PROD_OFFSET(FUNC_ID(pdev));
1505 
1506     return LM_STATUS_SUCCESS;
1507 }
1508 
lm_init_sp_objs(struct _lm_device_t * pdev)1509 lm_status_t lm_init_sp_objs(struct _lm_device_t *pdev)
1510 {
1511     u32_t lm_cli_idx = LM_CLI_IDX_MAX;
1512 
1513     ecore_init_mac_credit_pool(pdev, &pdev->slowpath_info.macs_pool, FUNC_ID(pdev), CHIP_IS_E1x(pdev)? VNICS_PER_PORT(pdev) : VNICS_PER_PATH(pdev));
1514     ecore_init_vlan_credit_pool(pdev, &pdev->slowpath_info.vlans_pool, FUNC_ID(pdev), CHIP_IS_E1x(pdev)? VNICS_PER_PORT(pdev) : VNICS_PER_PATH(pdev));
1515     ecore_init_rx_mode_obj(pdev, &pdev->slowpath_info.rx_mode_obj);
1516 
1517     for (lm_cli_idx=0; lm_cli_idx < ARRSIZE(pdev->slowpath_info.mcast_obj); lm_cli_idx++)
1518     {
1519         ecore_init_mcast_obj(pdev,
1520                              &pdev->slowpath_info.mcast_obj[lm_cli_idx],
1521                              LM_FW_CLI_ID(pdev, pdev->params.map_client_to_cid[lm_cli_idx]),
1522                              pdev->params.map_client_to_cid[lm_cli_idx],
1523                              FUNC_ID(pdev),
1524                              FUNC_ID(pdev),
1525                              LM_SLOWPATH(pdev, mcast_rdata)[lm_cli_idx],
1526                              LM_SLOWPATH_PHYS(pdev, mcast_rdata)[lm_cli_idx],
1527                              ECORE_FILTER_MCAST_PENDING,
1528                              (unsigned long *)&pdev->slowpath_info.sp_mcast_state[lm_cli_idx],
1529                              ECORE_OBJ_TYPE_RX_TX);
1530     }
1531 
1532     ecore_init_rss_config_obj(pdev,
1533                               &pdev->slowpath_info.rss_conf_obj,
1534                               LM_FW_CLI_ID(pdev, LM_SW_LEADING_RSS_CID(pdev)),
1535                               LM_SW_LEADING_RSS_CID(pdev),
1536                               FUNC_ID(pdev),
1537                               FUNC_ID(pdev),
1538                               LM_SLOWPATH(pdev, rss_rdata),
1539                               LM_SLOWPATH_PHYS(pdev, rss_rdata),
1540                               ECORE_FILTER_RSS_CONF_PENDING,
1541                               (unsigned long *)&pdev->slowpath_info.sp_rss_state,
1542                               ECORE_OBJ_TYPE_RX);
1543 
1544     return LM_STATUS_SUCCESS;
1545 }
1546 
1547 /**
1548  * Description:
1549  *   allocate slowpath resources
1550  */
1551 static lm_status_t
lm_alloc_setup_slowpath_resc(struct _lm_device_t * pdev,u8_t b_alloc)1552 lm_alloc_setup_slowpath_resc(struct _lm_device_t *pdev , u8_t b_alloc)
1553 {
1554     lm_slowpath_data_t *slowpath_data = &pdev->slowpath_info.slowpath_data;
1555     u8_t                i             = 0;
1556 
1557     ASSERT_STATIC(ARRSIZE(slowpath_data->mac_rdata) == ARRSIZE(slowpath_data->rx_mode_rdata));
1558     ASSERT_STATIC(ARRSIZE(slowpath_data->mac_rdata) == ARRSIZE(slowpath_data->mcast_rdata));
1559 
1560     for (i = 0; i < ARRSIZE(slowpath_data->mac_rdata); i++ )
1561     {
1562         if (b_alloc)
1563     {
1564             slowpath_data->mac_rdata[i] =
1565                 mm_alloc_phys_mem(pdev,
1566                                   sizeof(*slowpath_data->mac_rdata[i]),
1567                                   &slowpath_data->mac_rdata_phys[i],
1568                                   0,
1569                                   LM_RESOURCE_COMMON);
1570 
1571             slowpath_data->rx_mode_rdata[i] =
1572                 mm_alloc_phys_mem(pdev,
1573                                   sizeof(*slowpath_data->rx_mode_rdata[i]),
1574                                   &slowpath_data->rx_mode_rdata_phys[i],
1575                                   0,
1576                                   LM_RESOURCE_COMMON);
1577 
1578             slowpath_data->mcast_rdata[i] =
1579                 mm_alloc_phys_mem(pdev,
1580                                   sizeof(*slowpath_data->mcast_rdata[i]),
1581                                   &slowpath_data->mcast_rdata_phys[i],
1582                                   0,
1583                                   LM_RESOURCE_COMMON);
1584 
1585 
1586     }
1587 
1588         if (CHK_NULL(slowpath_data->mac_rdata[i]) ||
1589             CHK_NULL(slowpath_data->rx_mode_rdata[i]) ||
1590             CHK_NULL(slowpath_data->mcast_rdata[i]))
1591 
1592         {
1593             DbgBreakIf(DBG_BREAK_ON(MEMORY_ALLOCATION_FAILURE));
1594             return LM_STATUS_RESOURCE ;
1595         }
1596 
1597         mm_mem_zero(slowpath_data->mac_rdata[i], sizeof(*slowpath_data->mac_rdata[i]));
1598         mm_mem_zero(slowpath_data->rx_mode_rdata[i], sizeof(*slowpath_data->rx_mode_rdata[i]));
1599         mm_mem_zero(slowpath_data->mcast_rdata[i], sizeof(*slowpath_data->mcast_rdata[i]));
1600     }
1601 
1602     if (b_alloc)
1603     {
1604         slowpath_data->rss_rdata  = mm_alloc_phys_mem(pdev, sizeof(*slowpath_data->rss_rdata), &slowpath_data->rss_rdata_phys, 0, LM_RESOURCE_COMMON);
1605     }
1606 
1607     if CHK_NULL(slowpath_data->rss_rdata)
1608     {
1609         DbgBreakIf(DBG_BREAK_ON(MEMORY_ALLOCATION_FAILURE));
1610         return LM_STATUS_RESOURCE ;
1611     }
1612 
1613     mm_mem_zero(slowpath_data->rss_rdata, sizeof(*slowpath_data->rss_rdata));
1614 
1615     if (b_alloc)
1616     {
1617         slowpath_data->func_start_data  = mm_alloc_phys_mem(pdev, sizeof(*slowpath_data->func_start_data), &slowpath_data->func_start_data_phys, 0, LM_RESOURCE_COMMON);
1618     }
1619 
1620     if CHK_NULL(slowpath_data->func_start_data)
1621     {
1622         DbgBreakIf(DBG_BREAK_ON(MEMORY_ALLOCATION_FAILURE));
1623         return LM_STATUS_RESOURCE ;
1624     }
1625 
1626     mm_mem_zero(slowpath_data->func_start_data, sizeof(*slowpath_data->func_start_data));
1627 
1628     if (b_alloc)
1629     {
1630         slowpath_data->niv_function_update_data = mm_alloc_phys_mem(pdev, sizeof(*slowpath_data->niv_function_update_data), &slowpath_data->niv_function_update_data_phys, 0, LM_RESOURCE_COMMON);
1631     }
1632     if CHK_NULL(slowpath_data->niv_function_update_data)
1633     {
1634         DbgBreakIf(DBG_BREAK_ON(MEMORY_ALLOCATION_FAILURE));
1635         return LM_STATUS_RESOURCE ;
1636     }
1637     mm_mem_zero(slowpath_data->niv_function_update_data, sizeof(*slowpath_data->niv_function_update_data));
1638 
1639     if (b_alloc)
1640     {
1641         slowpath_data->l2mp_func_update_data = mm_alloc_phys_mem(pdev, sizeof(*slowpath_data->l2mp_func_update_data), &slowpath_data->l2mp_func_update_data_phys, 0, LM_RESOURCE_COMMON);
1642     }
1643     if CHK_NULL(slowpath_data->l2mp_func_update_data)
1644     {
1645         DbgBreakIf(DBG_BREAK_ON(MEMORY_ALLOCATION_FAILURE));
1646         return LM_STATUS_RESOURCE ;
1647     }
1648     mm_mem_zero(slowpath_data->l2mp_func_update_data, sizeof(*slowpath_data->l2mp_func_update_data));
1649 
1650     if (b_alloc)
1651     {
1652         slowpath_data->encap_function_update_data = mm_alloc_phys_mem(pdev, sizeof(*slowpath_data->encap_function_update_data), &slowpath_data->encap_function_update_data_phys, 0, LM_RESOURCE_COMMON);
1653     }
1654     if CHK_NULL(slowpath_data->encap_function_update_data)
1655     {
1656         DbgBreakIf(DBG_BREAK_ON(MEMORY_ALLOCATION_FAILURE));
1657         return LM_STATUS_RESOURCE ;
1658     }
1659     mm_mem_zero(slowpath_data->encap_function_update_data, sizeof(*slowpath_data->encap_function_update_data));
1660 
1661     if (b_alloc)
1662     {
1663         slowpath_data->ufp_function_update_data = mm_alloc_phys_mem(pdev, sizeof(*slowpath_data->ufp_function_update_data), &slowpath_data->ufp_function_update_data_phys, 0, LM_RESOURCE_COMMON);
1664     }
1665     if CHK_NULL(slowpath_data->ufp_function_update_data)
1666     {
1667         DbgBreakIf(DBG_BREAK_ON(MEMORY_ALLOCATION_FAILURE));
1668         return LM_STATUS_RESOURCE ;
1669     }
1670     mm_mem_zero(slowpath_data->ufp_function_update_data, sizeof(*slowpath_data->ufp_function_update_data));
1671 
1672     pdev->slowpath_info.niv_ramrod_state                              = NIV_RAMROD_NOT_POSTED;
1673     pdev->slowpath_info.l2mp_func_update_ramrod_state                 = L2MP_FUNC_UPDATE_RAMROD_NOT_POSTED;
1674     pdev->slowpath_info.ufp_func_ramrod_state                         = UFP_RAMROD_NOT_POSTED;
1675 
1676     return LM_STATUS_SUCCESS ;
1677 }
1678 
1679 
lm_setup_allocate_ilt_client_page(struct _lm_device_t * pdev,lm_address_t * phys_mem,u8_t const cli_idx)1680 static void * lm_setup_allocate_ilt_client_page( struct _lm_device_t *pdev,
1681     lm_address_t        *phys_mem,
1682                                                  u8_t const          cli_idx )
1683 {
1684     void* ilt_client_page_virt_address = NULL;
1685 
1686     if (!CHIP_IS_E1(pdev))
1687     {
1688         ilt_client_page_virt_address = mm_alloc_phys_mem_align( pdev,
1689                                                                    pdev->params.ilt_client_page_size,
1690                                                                    phys_mem,
1691                                                                    LM_ILT_ALIGNMENT,
1692                                                                    0,
1693                                                                    cli_idx);
1694     }
1695     else
1696     {
1697         ilt_client_page_virt_address = mm_alloc_phys_mem_align(pdev,
1698                                                                    pdev->params.ilt_client_page_size,
1699                                                                    phys_mem,
1700                                                                    pdev->params.ilt_client_page_size,
1701                                                                    0,
1702                                                                    cli_idx);
1703     }
1704 
1705     return ilt_client_page_virt_address;
1706 }
1707 
1708 /* Description:
1709 *    This routine contain common code for alloc/setup distinguish by flag
1710 */
lm_common_setup_alloc_resc(struct _lm_device_t * pdev,u8_t const b_is_alloc)1711 lm_status_t lm_common_setup_alloc_resc(struct _lm_device_t *pdev, u8_t const b_is_alloc )
1712 {
1713     lm_params_t*    params     = NULL ;
1714     lm_variables_t* vars       = NULL ;
1715 //    lm_sq_info_t*   sq_info    = NULL ;
1716     lm_status_t     lm_status;
1717     u32_t           alloc_size = 0 ;
1718     u32_t           alloc_num  = 0 ;
1719     u32_t           i          = 0 ;
1720     u32_t           mem_size   = 0 ;
1721     u8_t            sb_id      = 0 ;
1722     u8_t            mm_cli_idx = 0 ;
1723     lm_address_t    sb_phy_address;
1724 
1725     if CHK_NULL( pdev )
1726     {
1727         return LM_STATUS_INVALID_PARAMETER ;
1728     }
1729 
1730     DbgMessage(pdev, INFORMi , "### lm_common_setup_alloc_resc b_is_alloc=%s\n", b_is_alloc ? "TRUE" : "FALSE" );
1731 
1732     params     = &pdev->params ;
1733     vars       = &(pdev->vars) ;
1734 
1735     //       Status blocks allocation. We allocate mem both for the default and non-default status blocks
1736     //       there is 1 def sb and 16 non-def sb per port.
1737     //       non-default sb: index 0-15, default sb: index 16.
1738     if (CHIP_IS_E1x(pdev))
1739     {
1740         mem_size = E1X_STATUS_BLOCK_BUFFER_SIZE;
1741     }
1742     else
1743     {
1744         mem_size = E2_STATUS_BLOCK_BUFFER_SIZE;
1745     }
1746 
1747     mm_cli_idx = LM_RESOURCE_COMMON;//!!DP mm_cli_idx_to_um_idx(LM_CLI_IDX_MAX);
1748 
1749     LM_FOREACH_SB_ID(pdev, sb_id)
1750     {
1751         if( b_is_alloc )
1752         {
1753             vars->status_blocks_arr[sb_id].host_hc_status_block.e1x_sb = mm_alloc_phys_mem(pdev, mem_size, &sb_phy_address, 0, mm_cli_idx);
1754             if (CHIP_IS_E1x(pdev))
1755             {
1756                 vars->status_blocks_arr[sb_id].hc_status_block_data.e1x_sb_data.common.host_sb_addr.lo = sb_phy_address.as_u32.low;
1757                 vars->status_blocks_arr[sb_id].hc_status_block_data.e1x_sb_data.common.host_sb_addr.hi = sb_phy_address.as_u32.high;
1758             }
1759             else
1760             {
1761                 vars->status_blocks_arr[sb_id].hc_status_block_data.e2_sb_data.common.host_sb_addr.lo = sb_phy_address.as_u32.low;
1762                 vars->status_blocks_arr[sb_id].hc_status_block_data.e2_sb_data.common.host_sb_addr.hi = sb_phy_address.as_u32.high;
1763             }
1764         }
1765         if CHK_NULL(vars->status_blocks_arr[sb_id].host_hc_status_block.e1x_sb)
1766         {
1767             DbgBreakIf(DBG_BREAK_ON(MEMORY_ALLOCATION_FAILURE));
1768             return LM_STATUS_RESOURCE ;
1769         }
1770         mm_mem_zero((void *)(vars->status_blocks_arr[sb_id].host_hc_status_block.e1x_sb), mem_size);
1771     }
1772 
1773     mem_size = DEF_STATUS_BLOCK_BUFFER_SIZE;
1774 
1775 
1776     if( b_is_alloc )
1777     {
1778         vars->gen_sp_status_block.hc_sp_status_blk = mm_alloc_phys_mem(pdev,
1779                                                     mem_size,
1780                                                     &(vars->gen_sp_status_block.blk_phy_address),
1781                                                     0,
1782                                                     mm_cli_idx);
1783     }
1784 
1785     if CHK_NULL(vars->gen_sp_status_block.hc_sp_status_blk)
1786     {
1787         DbgBreakIf(DBG_BREAK_ON(MEMORY_ALLOCATION_FAILURE));
1788         return LM_STATUS_RESOURCE ;
1789     }
1790 
1791     mm_mem_zero((void *)(vars->gen_sp_status_block.hc_sp_status_blk), mem_size);
1792 
1793     /* Now reset the status-block ack values back to zero. */
1794     lm_reset_sb_ack_values(pdev);
1795 
1796     mm_mem_zero(pdev->debug_info.ack_dis,     sizeof(pdev->debug_info.ack_dis));
1797     mm_mem_zero(pdev->debug_info.ack_en,      sizeof(pdev->debug_info.ack_en));
1798     pdev->debug_info.ack_def_dis = pdev->debug_info.ack_def_en = 0;
1799     mm_mem_zero(pdev->debug_info.rx_only_int, sizeof(pdev->debug_info.rx_only_int));
1800     mm_mem_zero(pdev->debug_info.tx_only_int, sizeof(pdev->debug_info.tx_only_int));
1801     mm_mem_zero(pdev->debug_info.both_int,    sizeof(pdev->debug_info.both_int));
1802     mm_mem_zero(pdev->debug_info.empty_int,   sizeof(pdev->debug_info.empty_int));
1803     mm_mem_zero(pdev->debug_info.false_int,   sizeof(pdev->debug_info.false_int));
1804 
1805     /* Register common and ethernet connection types completion callback. */
1806     lm_sq_comp_cb_register(pdev, ETH_CONNECTION_TYPE, lm_eth_comp_cb);
1807     lm_sq_comp_cb_register(pdev, NONE_CONNECTION_TYPE, lm_eq_comp_cb);
1808 
1809     /* SlowPath Info */
1810     lm_status = lm_alloc_setup_slowpath_resc(pdev, b_is_alloc);
1811     if (lm_status != LM_STATUS_SUCCESS)
1812     {
1813         DbgMessage(pdev, FATAL, "lm_alloc_client_info failed lm-status = %d\n", lm_status);
1814         return lm_status;
1815     }
1816 
1817 
1818     /* Client Info */
1819     if( b_is_alloc )
1820     {
1821         lm_status = lm_alloc_client_info(pdev);
1822         if (lm_status != LM_STATUS_SUCCESS)
1823         {
1824             DbgMessage(pdev, FATAL, "lm_alloc_client_info failed lm-status = %d\n", lm_status);
1825             return lm_status;
1826         }
1827     }
1828 
1829     lm_status = lm_setup_client_info(pdev);
1830     if (lm_status != LM_STATUS_SUCCESS)
1831     {
1832         DbgMessage(pdev, FATAL, "lm_setup_client_info failed lm-status = %d\n", lm_status);
1833         return lm_status;
1834     }
1835 
1836     //  Context (roundup ( MAX_CONN / CONN_PER_PAGE) We may configure the CDU to have more than max_func_connections, specifically, we will
1837     // configure the CDU to have max_port_connections since it is a per-port register and not per-func, but it is OK to allocate
1838     // less for the cdu, and allocate only what will be used in practice - which is what is configured in max_func_connectinos.
1839     alloc_num = vars->context_cdu_num_pages = (params->max_func_connections / params->num_context_in_page) +
1840         ((params->max_func_connections % params->num_context_in_page)? 1:0);
1841 
1842     //TODO: optimize the roundup
1843     //TODO: assert that we did not go over the limit
1844 
1845     // allocate buffer pointers
1846     if( b_is_alloc )
1847     {
1848         mem_size = alloc_num * sizeof(void *) ;
1849         vars->context_cdu_virt_addr_table = (void **) mm_alloc_mem( pdev, mem_size, mm_cli_idx );
1850     }
1851     if CHK_NULL( vars->context_cdu_virt_addr_table )
1852     {
1853         DbgBreakIf(DBG_BREAK_ON(MEMORY_ALLOCATION_FAILURE));
1854         return LM_STATUS_RESOURCE ;
1855     }
1856     else if ( b_is_alloc )
1857     {
1858         mm_mem_zero( vars->context_cdu_virt_addr_table, mem_size ) ;
1859     }
1860 
1861     if( b_is_alloc )
1862     {
1863         mem_size = alloc_num * sizeof(lm_address_t) ;
1864         vars->context_cdu_phys_addr_table = mm_alloc_mem(pdev, mem_size, mm_cli_idx );
1865     }
1866 
1867     if CHK_NULL( vars->context_cdu_phys_addr_table )
1868     {
1869         DbgBreakIf(DBG_BREAK_ON(MEMORY_ALLOCATION_FAILURE));
1870         return LM_STATUS_RESOURCE ;
1871     }
1872     else if ( b_is_alloc )
1873     {
1874         mm_mem_zero(vars->context_cdu_phys_addr_table, mem_size );
1875     }
1876 
1877     /* TBD: for starters, we'll just allocate each page seperatly, to save space in the future, we may want */
1878     for( i = 0  ;i < alloc_num; i++)
1879     {
1880         if( b_is_alloc )
1881         {
1882             vars->context_cdu_virt_addr_table[i] = lm_setup_allocate_ilt_client_page(pdev,
1883                                                                                      (lm_address_t*)&vars->context_cdu_phys_addr_table[i],
1884                                                          mm_cli_idx);
1885         }
1886         if CHK_NULL( vars->context_cdu_virt_addr_table[i] )
1887         {
1888             DbgBreakIf(DBG_BREAK_ON(MEMORY_ALLOCATION_FAILURE));
1889             return LM_STATUS_RESOURCE ;
1890         }
1891         mm_mem_zero( vars->context_cdu_virt_addr_table[i], params->ilt_client_page_size ) ;
1892     }
1893 
1894 
1895     //  Searcher T1  (roundup to log2 of 64*MAX_CONN), T2 is 1/4 of T1. The searcher has a 'per-function' register we configure
1896     // with the number of max connections, therefore, we use the max_func_connections. It can be different per function and independent
1897     // from what we configure for qm/timers/cdu.
1898     alloc_size = (log2_align(max(params->max_func_connections,(u32_t)1000))*64);
1899     alloc_num = vars->searcher_t1_num_pages = max((alloc_size / params->ilt_client_page_size),(u32_t)1);
1900     mem_size = alloc_num * sizeof(void *) ;
1901 
1902     if( b_is_alloc )
1903     {
1904         vars->searcher_t1_virt_addr_table = (void **) mm_alloc_mem(pdev, mem_size, mm_cli_idx);
1905     }
1906     if CHK_NULL(vars->searcher_t1_virt_addr_table)
1907     {
1908         DbgBreakIf(DBG_BREAK_ON(MEMORY_ALLOCATION_FAILURE));
1909         return LM_STATUS_RESOURCE ;
1910     }
1911     else if ( b_is_alloc )
1912     {
1913         mm_mem_zero( vars->searcher_t1_virt_addr_table, mem_size ) ;
1914     }
1915 
1916     mem_size = alloc_num * sizeof(lm_address_t) ;
1917 
1918     if( b_is_alloc )
1919     {
1920         vars->searcher_t1_phys_addr_table = mm_alloc_mem(pdev, mem_size, mm_cli_idx );
1921     }
1922     if CHK_NULL(vars->searcher_t1_phys_addr_table)
1923     {
1924         DbgBreakIf(DBG_BREAK_ON(MEMORY_ALLOCATION_FAILURE));
1925         return LM_STATUS_RESOURCE ;
1926     }
1927     else if ( b_is_alloc )
1928     {
1929         mm_mem_zero( vars->searcher_t1_phys_addr_table, mem_size ) ;
1930     }
1931 
1932     for( i = 0  ; i < alloc_num; i++ )
1933     {
1934         if( b_is_alloc )
1935         {
1936             vars->searcher_t1_virt_addr_table[i] = lm_setup_allocate_ilt_client_page(pdev,
1937                                                          (lm_address_t*)&(vars->searcher_t1_phys_addr_table[i]),
1938                                                          mm_cli_idx);
1939         }
1940         if CHK_NULL( vars->searcher_t1_virt_addr_table[i] )
1941         {
1942             DbgBreakIf(DBG_BREAK_ON(MEMORY_ALLOCATION_FAILURE));
1943             return LM_STATUS_RESOURCE ;
1944         }
1945         mm_mem_zero( vars->searcher_t1_virt_addr_table[i], params->ilt_client_page_size ) ;
1946     }
1947 
1948     // allocate searcher T2 table
1949     // T2 does not entered into the ILT)
1950     alloc_size = (params->max_func_connections + 4)*64;
1951     alloc_num = vars->searcher_t2_num_pages = alloc_size / params->ilt_client_page_size +
1952         ((alloc_size % params->ilt_client_page_size)? 1:0) ;
1953     mem_size = alloc_num * sizeof(void *) ;
1954 
1955     if ( b_is_alloc )
1956     {
1957         vars->searcher_t2_virt_addr_table = (void **) mm_alloc_mem(pdev, mem_size, mm_cli_idx) ;
1958     }
1959     if CHK_NULL(vars->searcher_t2_virt_addr_table)
1960     {
1961         DbgBreakIf(DBG_BREAK_ON(MEMORY_ALLOCATION_FAILURE));
1962         return LM_STATUS_RESOURCE ;
1963     }
1964     else if (b_is_alloc)
1965     {
1966         mm_mem_zero( vars->searcher_t2_virt_addr_table, mem_size ) ;
1967     }
1968 
1969     mem_size = alloc_num * sizeof(lm_address_t) ;
1970     if (b_is_alloc)
1971     {
1972         vars->searcher_t2_phys_addr_table = mm_alloc_mem(pdev, mem_size, mm_cli_idx );
1973     }
1974     if CHK_NULL(vars->searcher_t2_phys_addr_table)
1975     {
1976         DbgBreakIf(DBG_BREAK_ON(MEMORY_ALLOCATION_FAILURE));
1977         return LM_STATUS_RESOURCE ;
1978     }
1979 
1980     for( i = 0  ; i < alloc_num; i++)
1981     {
1982         if (b_is_alloc )
1983         {
1984             vars->searcher_t2_virt_addr_table[i] = lm_setup_allocate_ilt_client_page(pdev,
1985                                                          (lm_address_t*)&(vars->searcher_t2_phys_addr_table[i]),
1986                                                          mm_cli_idx);
1987         }
1988         if CHK_NULL(vars->searcher_t2_virt_addr_table[i])
1989         {
1990             DbgBreakIf(DBG_BREAK_ON(MEMORY_ALLOCATION_FAILURE));
1991             return LM_STATUS_RESOURCE ;
1992         }
1993         mm_mem_zero( vars->searcher_t2_virt_addr_table[i],params->ilt_client_page_size ) ;
1994     }
1995 
1996     //  Timer block array (MAX_CONN*8) phys uncached. Timer block has a per-port register that defines it's size, and the amount of
1997     // memory we allocate MUST match this number, therefore we have to allocate the amount of max_port_connections.
1998     alloc_size = ( 8 * pdev->hw_info.max_port_conns);
1999     alloc_num = vars->timers_linear_num_pages = alloc_size / params->ilt_client_page_size +
2000         ((alloc_size % params->ilt_client_page_size)? 1:0) ;
2001     mem_size = alloc_num * sizeof(void *) ;
2002 
2003     if( b_is_alloc )
2004     {
2005         vars->timers_linear_virt_addr_table = (void **) mm_alloc_mem(pdev, mem_size, mm_cli_idx );
2006     }
2007     if CHK_NULL(vars->timers_linear_virt_addr_table)
2008     {
2009         DbgBreakIf(DBG_BREAK_ON(MEMORY_ALLOCATION_FAILURE));
2010         return LM_STATUS_RESOURCE ;
2011     }
2012     else if ( b_is_alloc )
2013     {
2014         mm_mem_zero( vars->timers_linear_virt_addr_table, mem_size ) ;
2015     }
2016 
2017     mem_size = alloc_num * sizeof(lm_address_t) ;
2018 
2019     if ( b_is_alloc )
2020     {
2021         vars->timers_linear_phys_addr_table = mm_alloc_mem(pdev, mem_size, mm_cli_idx );
2022     }
2023     if CHK_NULL(vars->timers_linear_phys_addr_table)
2024     {
2025         DbgBreakIf(DBG_BREAK_ON(MEMORY_ALLOCATION_FAILURE));
2026         return LM_STATUS_RESOURCE ;
2027     }
2028     else if ( b_is_alloc )
2029     {
2030         mm_mem_zero( vars->timers_linear_phys_addr_table, mem_size ) ;
2031     }
2032 
2033     for( i = 0  ;i < alloc_num; i++)
2034     {
2035         if( b_is_alloc )
2036         {
2037             vars->timers_linear_virt_addr_table[i] = lm_setup_allocate_ilt_client_page(pdev,
2038                                                            (lm_address_t*)&(vars->timers_linear_phys_addr_table[i]),
2039                                                            mm_cli_idx);
2040         }
2041         if CHK_NULL(vars->timers_linear_virt_addr_table[i])
2042         {
2043             DbgBreakIf(DBG_BREAK_ON(MEMORY_ALLOCATION_FAILURE));
2044             return LM_STATUS_RESOURCE ;
2045         }
2046         mm_mem_zero( vars->timers_linear_virt_addr_table[i], params->ilt_client_page_size ) ;
2047     }
2048 
2049     //  QM queues (128*MAX_CONN) QM has a per-port register that defines it's size, and the amount of
2050     // memory we allocate MUST match this number, therefore we have to allocate the amount of max_port_connections.
2051     alloc_size = ( 128 * pdev->hw_info.max_common_conns);
2052     alloc_num = vars->qm_queues_num_pages = alloc_size / params->ilt_client_page_size +
2053         ((alloc_size % params->ilt_client_page_size)? 1:0) ;
2054     mem_size = alloc_num * sizeof(void *) ;
2055 
2056     if( b_is_alloc )
2057     {
2058         vars->qm_queues_virt_addr_table = (void **) mm_alloc_mem(pdev, mem_size, mm_cli_idx );
2059     }
2060     if CHK_NULL(vars->qm_queues_virt_addr_table)
2061     {
2062         DbgBreakIf(DBG_BREAK_ON(MEMORY_ALLOCATION_FAILURE));
2063         return LM_STATUS_RESOURCE ;
2064     }
2065     else if (b_is_alloc)
2066     {
2067         mm_mem_zero( vars->qm_queues_virt_addr_table, mem_size ) ;
2068     }
2069 
2070     mem_size = alloc_num * sizeof(lm_address_t) ;
2071 
2072     if( b_is_alloc )
2073     {
2074         vars->qm_queues_phys_addr_table = mm_alloc_mem(pdev, mem_size, mm_cli_idx );
2075     }
2076     if CHK_NULL(vars->qm_queues_phys_addr_table)
2077     {
2078         DbgBreakIf(DBG_BREAK_ON(MEMORY_ALLOCATION_FAILURE));
2079         return LM_STATUS_RESOURCE ;
2080     }
2081     else if (b_is_alloc)
2082     {
2083         mm_mem_zero( vars->qm_queues_phys_addr_table, mem_size ) ;
2084     }
2085 
2086     for( i=0  ;i < alloc_num; i++)
2087     {
2088         if (b_is_alloc)
2089         {
2090             vars->qm_queues_virt_addr_table[i] = lm_setup_allocate_ilt_client_page(pdev,
2091                                                        (lm_address_t*)&(vars->qm_queues_phys_addr_table[i]),
2092                                                        mm_cli_idx);
2093         }
2094         if CHK_NULL( vars->qm_queues_virt_addr_table[i] )
2095         {
2096             DbgBreakIf(DBG_BREAK_ON(MEMORY_ALLOCATION_FAILURE));
2097             return LM_STATUS_RESOURCE ;
2098         }
2099         mm_mem_zero( vars->qm_queues_virt_addr_table[i],params->ilt_client_page_size ) ;
2100     }
2101 
2102     // common scratchpad buffer for dmae copies of less than 4 bytes
2103     if( b_is_alloc )
2104     {
2105         void *virt = mm_alloc_phys_mem(pdev,
2106                           8,
2107                           &params->dmae_copy_scratchpad_phys,
2108                           0,
2109                           mm_cli_idx);
2110         if CHK_NULL(virt)
2111         {
2112             DbgBreakIf(DBG_BREAK_ON(MEMORY_ALLOCATION_FAILURE));
2113             return LM_STATUS_RESOURCE ;
2114         }
2115     }
2116 
2117     return LM_STATUS_SUCCESS ;
2118 }
2119 
ecore_resc_alloc(struct _lm_device_t * pdev)2120 lm_status_t ecore_resc_alloc(struct _lm_device_t * pdev)
2121 {
2122     pdev->ecore_info.gunzip_buf = mm_alloc_phys_mem(pdev, FW_BUF_SIZE, &pdev->ecore_info.gunzip_phys, PHYS_MEM_TYPE_NONCACHED, LM_RESOURCE_COMMON);
2123     if CHK_NULL(pdev->ecore_info.gunzip_buf)
2124     {
2125         return LM_STATUS_RESOURCE ;
2126     }
2127     return LM_STATUS_SUCCESS;
2128 }
2129 
2130 /**lm_dmae_resc_alloc
2131  * Allocate and initialize the TOE and default DMAE contexts.
2132  * The statistics DMAE context is set-up in lm_stats_alloc_resc.
2133  *
2134  * @param pdev the device to use.
2135  *
2136  * @return lm_status_t LM_STATUS_SUCCESS on success, some other
2137  *         failure code on failure.
2138  */
lm_dmae_alloc_resc(struct _lm_device_t * pdev)2139 static lm_status_t lm_dmae_alloc_resc(struct _lm_device_t * pdev)
2140 {
2141     lm_status_t lm_status = LM_STATUS_FAILURE;
2142     lm_dmae_context_info_t* default_dmae_info = lm_dmae_get(pdev, LM_DMAE_DEFAULT);
2143     lm_dmae_context_info_t* toe_dmae_info = lm_dmae_get(pdev, LM_DMAE_TOE);
2144 
2145     //allocate and initialize the default DMAE context (used for init, WB access etc...)
2146     lm_status = lm_dmae_locking_policy_create(  pdev,
2147                                                 LM_PROTECTED_RESOURCE_DMAE_DEFAULT,
2148                                                 LM_DMAE_LOCKING_POLICY_TYPE_PER_PF,
2149                                                 &default_dmae_info->locking_policy);
2150     if( LM_STATUS_SUCCESS != lm_status )
2151     {
2152         return lm_status ;
2153     }
2154 
2155     default_dmae_info->context = lm_dmae_context_create(pdev,
2156                                                         DMAE_WB_ACCESS_FUNCTION_CMD(FUNC_ID(pdev)),
2157                                                         &default_dmae_info->locking_policy,
2158                                                         CHANGE_ENDIANITY);
2159     if( NULL == default_dmae_info->context )
2160     {
2161         return LM_STATUS_FAILURE;
2162     }
2163 
2164     //allocate and initialize the TOE DMAE context
2165     lm_status = lm_dmae_locking_policy_create(  pdev,
2166                                                 LM_PROTECTED_RESOURCE_DMAE_TOE,
2167                                                 LM_DMAE_LOCKING_POLICY_TYPE_INTER_PF,
2168                                                 &toe_dmae_info->locking_policy);
2169     if( LM_STATUS_SUCCESS != lm_status )
2170     {
2171         return lm_status ;
2172     }
2173 
2174     toe_dmae_info->context = lm_dmae_context_create(pdev,
2175                                                     DMAE_COPY_PCI_PCI_PORT_0_CMD + PORT_ID(pdev),
2176                                                     &toe_dmae_info->locking_policy,
2177                                                     TRUE);
2178     if( NULL == toe_dmae_info->context )
2179     {
2180         return LM_STATUS_FAILURE;
2181     }
2182 
2183     return lm_status;
2184 }
2185 
2186 /* Description:
2187 *    This routine is called during driver initialization.  It is responsible
2188 *    for allocating memory resources needed by the driver for common init.
2189 *    This routine calls the following mm routines:
2190 *    mm_alloc_mem, mm_alloc_phys_mem, and mm_init_packet_desc. */
lm_alloc_resc(struct _lm_device_t * pdev)2191 lm_status_t lm_alloc_resc(struct _lm_device_t *pdev)
2192 {
2193     lm_params_t*    params     = NULL ;
2194     lm_variables_t* vars       = NULL ;
2195     lm_status_t     lm_status  = LM_STATUS_SUCCESS ;
2196     u8_t            mm_cli_idx = 0;
2197     if CHK_NULL( pdev )
2198     {
2199         return LM_STATUS_INVALID_PARAMETER ;
2200     }
2201     DbgMessage(pdev, INFORMi , "### lm_alloc_resc\n");
2202 
2203 #ifdef VF_INVOLVED
2204     if (IS_VFDEV(pdev)) {
2205         lm_status = lm_vf_init_dev_info(pdev);
2206         if (LM_STATUS_SUCCESS != lm_status)
2207             return lm_status;
2208     }
2209 #endif
2210 
2211     params     = &pdev->params ;
2212     vars       = &(pdev->vars) ;
2213 
2214     mm_cli_idx = LM_CLI_IDX_MAX;//!!DP mm_cli_idx_to_um_idx(LM_CLI_IDX_MAX);
2215 
2216     // Cleaning after driver unload
2217     pdev->context_info = NULL;
2218     mm_mem_zero(&pdev->cid_recycled_callbacks, sizeof(pdev->cid_recycled_callbacks));
2219     mm_mem_zero(&pdev->toe_info, sizeof(pdev->toe_info));
2220 
2221     lm_status = lm_alloc_sq(pdev);
2222     if(LM_STATUS_SUCCESS != lm_status)
2223     {
2224         return lm_status;
2225     }
2226 
2227     /* alloc forward chain */
2228     pdev->tx_info.catchup_chain_idx = FWD_CID(pdev);
2229     if (IS_PFDEV(pdev))
2230     {
2231         /* Allocate Event-Queue: only the pf has an event queue */
2232         lm_status = lm_alloc_eq(pdev);
2233         if(LM_STATUS_SUCCESS != lm_status)
2234         {
2235             return lm_status;
2236         }
2237 
2238         pdev->tx_info.catchup_chain_idx = FWD_CID(pdev);
2239 
2240         lm_status = lm_alloc_txq(pdev, pdev->tx_info.catchup_chain_idx,
2241                                  (u16_t)params->l2_tx_bd_page_cnt[LM_CLI_IDX_FWD],
2242                                  (u16_t)params->l2_tx_coal_buf_cnt[LM_CLI_IDX_FWD]);
2243         if(LM_STATUS_SUCCESS != lm_status)
2244         {
2245             return lm_status;
2246         }
2247     }
2248 
2249     if (IS_PFDEV(pdev))
2250     {
2251         lm_status = lm_common_setup_alloc_resc(pdev, TRUE ) ;
2252     }
2253 #ifdef VF_INVOLVED
2254     else
2255     {
2256         lm_status = lm_vf_setup_alloc_resc(pdev, TRUE);
2257     }
2258 #endif
2259 
2260     if(LM_STATUS_SUCCESS != lm_status)
2261     {
2262         return lm_status;
2263     }
2264 
2265     if (IS_PFDEV(pdev)) {
2266         lm_status = lm_stats_alloc_resc( pdev ) ;
2267         if( LM_STATUS_SUCCESS != lm_status )
2268         {
2269             return lm_status ;
2270         }
2271 
2272         lm_status = lm_dmae_alloc_resc(pdev);
2273         if( LM_STATUS_SUCCESS != lm_status )
2274         {
2275             DbgBreakIf( LM_STATUS_SUCCESS != lm_status ) ;
2276             return lm_status ;
2277         }
2278 
2279         // Init context allocation system
2280         lm_status = lm_alloc_context_pool(pdev);
2281         if( LM_STATUS_SUCCESS != lm_status )
2282         {
2283             DbgBreakIf( LM_STATUS_SUCCESS != lm_status ) ;
2284             return lm_status ;
2285         }
2286         //  CAM mirror?
2287 
2288         /* alloc for ecore */
2289         lm_status = ecore_resc_alloc(pdev);
2290         if( LM_STATUS_SUCCESS != lm_status )
2291         {
2292             DbgBreakIf( LM_STATUS_SUCCESS != lm_status ) ;
2293             return lm_status ;
2294         }
2295     }
2296     else if (IS_CHANNEL_VFDEV(pdev))
2297     {
2298         // Init context allocation system
2299         lm_status = lm_alloc_context_pool(pdev);
2300         if( LM_STATUS_SUCCESS != lm_status )
2301         {
2302             DbgBreakIf( LM_STATUS_SUCCESS != lm_status ) ;
2303             return lm_status ;
2304         }
2305 
2306         lm_status = lm_stats_alloc_fw_resc(pdev);
2307         if( LM_STATUS_SUCCESS != lm_status )
2308         {
2309             DbgBreakIf( LM_STATUS_SUCCESS != lm_status ) ;
2310             return lm_status ;
2311     }
2312     }
2313     DbgMessage(pdev, INFORMi , "### exit lm_alloc_resc\n");
2314 
2315     /* FIXME: (MichalS : should be called by um, but this requires lm-um api, so should rethink...) */
2316     lm_status = lm_init_sp_objs(pdev);
2317     if( LM_STATUS_SUCCESS != lm_status )
2318     {
2319         DbgBreakIf( LM_STATUS_SUCCESS != lm_status ) ;
2320         return lm_status ;
2321     }
2322 
2323     return lm_setup_resc(pdev);
2324 }
2325 
2326 /* Description:
2327 *    This routine is called during driver initialization.  It is responsible
2328 *    for initilazing  memory resources needed by the driver for common init.
2329 *    This routine calls the following mm routines:
2330 *    mm_alloc_mem, mm_alloc_phys_mem, and mm_init_packet_desc. */
lm_setup_resc(struct _lm_device_t * pdev)2331 lm_status_t lm_setup_resc(struct _lm_device_t *pdev)
2332 {
2333     volatile struct hc_sp_status_block * sp_sb = NULL;
2334     lm_params_t *    params     = NULL ;
2335     lm_variables_t*  vars       = NULL ;
2336     lm_tx_info_t *   tx_info    = NULL ;
2337     lm_rx_info_t *   rx_info    = NULL ;
2338     u32_t            i          = 0 ;
2339     u32_t            j          = 0 ;
2340     lm_status_t      lm_status  = LM_STATUS_SUCCESS ;
2341 
2342     if CHK_NULL( pdev )
2343     {
2344         return LM_STATUS_INVALID_PARAMETER ;
2345     }
2346 
2347     params    = &pdev->params;
2348     vars      = &(pdev->vars);
2349     tx_info   = &pdev->tx_info;
2350     rx_info   = &pdev->rx_info;
2351     sp_sb     = lm_get_default_status_block(pdev);
2352 
2353     mm_mem_zero(&pdev->cid_recycled_callbacks, sizeof(pdev->cid_recycled_callbacks));
2354     mm_mem_zero(rx_info->appr_mc.mcast_add_hash_bit_array, sizeof(rx_info->appr_mc.mcast_add_hash_bit_array));
2355 
2356     mm_mem_zero(&pdev->vars.nig_mirror, sizeof(lm_nig_mirror_t));
2357 
2358     pdev->vars.b_is_dmae_ready = FALSE ;
2359 
2360     if (IS_PFDEV(pdev)) {
2361         // adjust the FWD Tx ring consumer - default sb
2362         lm_status = lm_setup_txq(pdev, pdev->tx_info.catchup_chain_idx);
2363         if(LM_STATUS_SUCCESS != lm_status)
2364         {
2365             return lm_status;
2366         }
2367     }
2368 
2369     if (IS_PFDEV(pdev)) {
2370         /* setup mac flitering to drop all for all clients */
2371         // lm_status = lm_setup_tstorm_mac_filter(pdev); FIXME - necessary??
2372         if(LM_STATUS_SUCCESS != lm_status)
2373         {
2374             return lm_status;
2375         }
2376     }
2377 
2378     if (IS_PFDEV(pdev)) {
2379         lm_status = lm_common_setup_alloc_resc(pdev, FALSE ) ;
2380     }
2381 #ifdef VF_INVOLVED
2382     else {
2383         lm_status = lm_vf_setup_alloc_resc(pdev, FALSE);
2384     }
2385 #endif
2386     if(LM_STATUS_SUCCESS != lm_status)
2387     {
2388         return lm_status;
2389     }
2390 
2391     lm_status = lm_setup_sq(pdev);
2392     if(LM_STATUS_SUCCESS != lm_status)
2393     {
2394         return lm_status;
2395     }
2396 
2397     /* Only pfdev has an event-queue */
2398     if (IS_PFDEV(pdev))
2399     {
2400         lm_status = lm_setup_eq(pdev);
2401         if(LM_STATUS_SUCCESS != lm_status)
2402         {
2403             return lm_status;
2404         }
2405     }
2406 
2407     // Initialize T1
2408     if (IS_PFDEV(pdev)) {
2409         for( i = 0 ; i < vars->searcher_t1_num_pages ; i ++)
2410         {
2411             mm_mem_zero( vars->searcher_t1_virt_addr_table[i], params->ilt_client_page_size ) ;
2412         }
2413 
2414         // Initialize T2 first we make each next filed point to its address +1 then we fixup the edges
2415         for(i=0 ; i < vars->searcher_t2_num_pages ; i ++)
2416         {
2417             for (j=0; j < params->ilt_client_page_size; j+=64)
2418             {
2419                 *(u64_t*)((char*)vars->searcher_t2_virt_addr_table[i]+j+56) = vars->searcher_t2_phys_addr_table[i].as_u64+j+64; //64bit pointer
2420             }
2421             // now fix up the last line in the block to point to the next block
2422             j = params->ilt_client_page_size - 8;
2423 
2424             if (i < vars->searcher_t2_num_pages -1)
2425             {
2426                 // this is not the last block
2427                 *(u64_t*)((char*)vars->searcher_t2_virt_addr_table[i]+j) = vars->searcher_t2_phys_addr_table[i+1].as_u64; //64bit pointer
2428             }
2429         }
2430 
2431         for( i=0  ;i < vars->timers_linear_num_pages; i++)
2432         {
2433             mm_mem_zero(vars->timers_linear_virt_addr_table[i],params->ilt_client_page_size);
2434         }
2435 
2436 #if defined(EMULATION_DOORBELL_FULL_WORKAROUND)
2437         mm_atomic_set(&vars->doorbells_cnt, DOORBELL_CHECK_FREQUENCY);
2438 #endif
2439 
2440         lm_status = lm_stats_hw_setup(pdev);
2441         if(lm_status != LM_STATUS_SUCCESS)
2442         {
2443             DbgMessage(pdev, WARN, "lm_stats_hw_setup failed.\n");
2444             return lm_status;
2445         }
2446 
2447         lm_stats_fw_setup(pdev);
2448 
2449         // init_context
2450         lm_status = lm_setup_context_pool(pdev) ;
2451         if(lm_status != LM_STATUS_SUCCESS)
2452         {
2453             DbgMessage(pdev, WARN, "lm_setup_context_pool failed.\n");
2454             return lm_status;
2455         }
2456     }
2457     else if (IS_CHANNEL_VFDEV(pdev))
2458     {
2459         lm_status = lm_setup_context_pool(pdev) ;
2460         if(lm_status != LM_STATUS_SUCCESS)
2461         {
2462             DbgMessage(pdev, WARN, "lm_setup_context_pool failed.\n");
2463             return lm_status;
2464         }
2465     }
2466 
2467 
2468     pdev->vars.mac_type = MAC_TYPE_NONE;
2469     pdev->vars.is_pmf = NOT_PMF;
2470 
2471     lm_set_int_coal_info(pdev);
2472 
2473     mm_mem_zero(&pdev->vars.nig_mirror, sizeof(pdev->vars.nig_mirror));
2474 
2475     return lm_status;
2476 }
2477 
2478 /**
2479  * @description
2480  * Indicate packets from the free descriptor list and the given list
2481  * @param pdev
2482  * @param rx_common         - The chain to free RSC/RX.
2483  * @param packet_list       - A list of packets to indicate.
2484  * @param idx               - Chain index.
2485  * @param is_stat_handle    - Is updating statistic is needed.
2486  */
2487 STATIC void
lm_abort_indicate_free_list(IN OUT lm_device_t * pdev,IN lm_rx_chain_common_t * rx_common,IN s_list_t * packet_list,IN const u32_t idx,IN const u8_t is_stat_handle)2488 lm_abort_indicate_free_list( IN OUT   lm_device_t*          pdev,
2489                              IN       lm_rx_chain_common_t* rx_common,
2490                              IN       s_list_t*             packet_list,
2491                              IN const u32_t                 idx,
2492                              IN const u8_t                  is_stat_handle)
2493 {
2494     lm_packet_t*            pkt          = NULL;
2495     for(; ;)
2496     {
2497         // Run on all the free list
2498         pkt = (lm_packet_t *) s_list_pop_head(&rx_common->free_descq);
2499         if (pkt == NULL)
2500         {
2501             break;
2502         }
2503         pkt->status = LM_STATUS_ABORTED;
2504         if(is_stat_handle)
2505         {
2506             LM_COMMON_DRV_STATS_ATOMIC_INC_ETH(pdev, rx_aborted);
2507         }
2508 #if (!defined(LINUX) && !defined(__SunOS) && !defined(UEFI) && !defined(DOS))
2509         s_list_push_tail(packet_list, (s_list_entry_t *)pkt);
2510 #endif
2511     }
2512 
2513     if (!s_list_is_empty(packet_list))
2514     {
2515 #if (!defined(LINUX) && !defined(__SunOS) && !defined(UEFI) && !defined(DOS))
2516         mm_indicate_rx(pdev, idx, packet_list, LM_STATUS_ABORTED);
2517 #endif
2518     }
2519 }
2520 
2521 /*******************************************************************************
2522  * Description:
2523  *
2524  * Return:
2525  ******************************************************************************/
lm_abort(lm_device_t * pdev,const lm_abort_op_t abort_op,const u32_t idx)2526 void lm_abort( lm_device_t        *pdev,
2527                const lm_abort_op_t abort_op,
2528                const u32_t         idx)
2529 {
2530     lm_packet_t          *pkt          = NULL;
2531     lm_rx_chain_t        *rxq_chain    = NULL;
2532     lm_rx_chain_common_t *rx_common    = NULL;
2533     lm_tpa_chain_t       *tpa_chain    = NULL;
2534     lm_bd_chain_t        *rx_chain_bd  = NULL;
2535     lm_bd_chain_t        *rx_chain_sge = NULL;
2536     lm_tx_chain_t        *tx_chain     = NULL;
2537     s_list_t              packet_list  = {0};
2538     u16_t                 i            = 0;
2539     u16_t                 active_entry = 0;
2540 
2541     DbgMessage(pdev, INFORM, "### lm_abort   abort_op=%d idx=%d\n", abort_op, idx);
2542     switch(abort_op)
2543     {
2544         case ABORT_OP_RX_CHAIN:
2545         case ABORT_OP_INDICATE_RX_CHAIN:
2546         {
2547             rxq_chain    = &LM_RXQ(pdev, idx);
2548             rx_common    = &LM_RXQ_COMMON(pdev, idx);
2549             rx_chain_bd  = &LM_RXQ_CHAIN_BD(pdev, idx);
2550             rx_chain_sge = LM_RXQ_SGE_PTR_IF_VALID(pdev, idx);
2551             // Verify BD's consistent
2552             DbgBreakIfFastPath( rx_chain_sge && !lm_bd_chains_are_consistent( rx_chain_sge, rx_chain_bd ) );
2553             /* indicate packets from the active descriptor list */
2554             for(; ;)
2555             {
2556                 pkt = (lm_packet_t *) s_list_pop_head(&rxq_chain->active_descq);
2557                 if(pkt == NULL)
2558                 {
2559                     break;
2560                 }
2561                 lm_bd_chain_bds_consumed(rx_chain_bd, 1);
2562                 if( rx_chain_sge )
2563                 {
2564                     lm_bd_chain_bds_consumed(rx_chain_sge, 1);
2565                 }
2566                 LM_COMMON_DRV_STATS_ATOMIC_INC_ETH(pdev, rx_aborted);
2567                 // if in shutdown flow or not if in d3 flow ?
2568                 if (abort_op == ABORT_OP_INDICATE_RX_CHAIN)
2569                 {
2570 #if (!defined(LINUX) && !defined(__SunOS) && !defined(UEFI) && !defined(DOS))
2571                     s_list_push_tail(&packet_list, (s_list_entry_t *)pkt);
2572 #endif
2573                 }
2574                 else
2575                 {
2576                     s_list_push_tail(&rx_common->free_descq, &pkt->link);
2577                 }
2578             }
2579             if ( ABORT_OP_INDICATE_RX_CHAIN == abort_op )
2580             {
2581                 /* indicate packets from the free descriptor list */
2582                 lm_abort_indicate_free_list( pdev,
2583                                              rx_common,
2584                                              &packet_list,
2585                                              idx,
2586                                              TRUE);
2587             }
2588         } // ABORT_OP_INDICATE_RX_CHAIN
2589         // Fall Through
2590         case ABORT_OP_TPA_CHAIN:
2591         case ABORT_OP_INDICATE_TPA_CHAIN:
2592         {
2593             tpa_chain    = &LM_TPA(pdev, idx);
2594             rx_chain_bd  = &LM_TPA_CHAIN_BD(pdev, idx);
2595             rx_common    = &LM_TPA_COMMON(pdev, idx);
2596 
2597             DbgBreakIf(!(s_list_is_empty(&packet_list)));
2598 
2599             /* indicate packets from the active descriptor list */
2600             for(i = lm_bd_chain_cons_idx(rx_chain_bd); i != lm_bd_chain_prod_idx(rx_chain_bd); i++ )
2601             {
2602                 // Run on all the valid active descriptor
2603                 // Valid active descriptors can only be beteen the consumer to the producers
2604                 active_entry = LM_TPA_BD_ENTRY_TO_ACTIVE_ENTRY(pdev,idx,i);
2605 
2606                 LM_TPA_ACTIVE_ENTRY_BOUNDARIES_VERIFY(pdev, idx, active_entry);
2607                 if(LM_TPA_MASK_TEST_ACTIVE_BIT(pdev, idx, active_entry))
2608                 {
2609                     LM_TPA_MASK_CLEAR_ACTIVE_BIT(pdev, idx, active_entry);
2610                     pkt = tpa_chain->sge_chain.active_descq_array[active_entry];
2611 
2612                     if(NULL == pkt)
2613                     {
2614                         DbgBreakMsg(" Packet is null suppose to be null");
2615                         continue;
2616                     }
2617 
2618                     lm_bd_chain_bds_consumed(rx_chain_bd, 1);
2619                     // if in shutdown flow or not if in d3 flow ?
2620                     if ((abort_op == ABORT_OP_INDICATE_TPA_CHAIN) ||
2621                         (abort_op == ABORT_OP_INDICATE_RX_CHAIN))
2622                     {
2623 #if (DBG)
2624                         /************start TPA debbug code******************************/
2625                         tpa_chain->dbg_params.pck_ret_abort_active++;
2626                         /************end TPA debbug code********************************/
2627 #endif //DBG
2628 #if (!defined(LINUX) && !defined(__SunOS) && !defined(UEFI) && !defined(DOS))
2629                     s_list_push_tail(&packet_list, (s_list_entry_t *)pkt);
2630 #endif
2631                 }
2632                     else
2633                 {
2634                         s_list_push_tail(&rx_common->free_descq, &pkt->link);
2635                     }
2636                 }
2637                 }
2638             if ((abort_op == ABORT_OP_INDICATE_TPA_CHAIN) ||
2639                 (abort_op == ABORT_OP_INDICATE_RX_CHAIN))
2640             {
2641 #if (DBG)
2642                 /************start TPA debbug code******************************/
2643                 // Total packet aborted
2644                 tpa_chain->dbg_params.pck_ret_abort += s_list_entry_cnt(&packet_list) + s_list_entry_cnt(&rx_common->free_descq);
2645 
2646                 if((tpa_chain->dbg_params.pck_ret_abort + tpa_chain->dbg_params.pck_ret_from_chip) !=
2647                    (tpa_chain->dbg_params.pck_received + tpa_chain->dbg_params.pck_received_ind) )
2648                 {
2649                     DbgBreakMsg("VBD didn't return all packets this chain ");
2650                 }
2651                 /************end TPA debbug code******************************/
2652 #endif //DBG
2653                 /* indicate packets from the free descriptor list */
2654                 lm_abort_indicate_free_list( pdev,
2655                                              rx_common,
2656                                              &packet_list,
2657                                              idx,
2658                                              FALSE);
2659 
2660 #if (DBG)
2661                 /************start TPA debbug code******************************/
2662                 // make sure all packets were abort
2663                 if(0 != (s_list_entry_cnt(&packet_list) + s_list_entry_cnt(&rx_common->free_descq)))
2664                 {
2665                     DbgBreakMsg("VBD didn't return all packets this chain ");
2666             }
2667                 /************end TPA debbug code******************************/
2668 #endif //DBG
2669             }
2670         break;
2671         } // ABORT_OP_INDICATE_TPA_CHAIN
2672         case ABORT_OP_INDICATE_TX_CHAIN:
2673         {
2674             tx_chain = &LM_TXQ(pdev, idx);
2675             for(; ;)
2676             {
2677                 pkt = (lm_packet_t *) s_list_pop_head(&tx_chain->active_descq);
2678                 if(pkt == NULL)
2679                 {
2680                     break;
2681                 }
2682                 pkt->status = LM_STATUS_ABORTED;
2683                 LM_COMMON_DRV_STATS_ATOMIC_INC_ETH(pdev, tx_aborted);
2684                 lm_bd_chain_bds_consumed(&tx_chain->bd_chain, pkt->u1.tx.bd_used);
2685                 if (pkt->u1.tx.coalesce_buf) {
2686                     /* return coalesce buffer to the chain's pool */
2687                     lm_put_coalesce_buffer(pdev, tx_chain, pkt->u1.tx.coalesce_buf);
2688                     pkt->u1.tx.coalesce_buf = NULL;
2689                 }
2690                 s_list_push_tail(&packet_list, (s_list_entry_t *)pkt);
2691             }
2692             if (!s_list_is_empty(&packet_list))
2693             {
2694                 mm_indicate_tx(pdev, idx, &packet_list);
2695             }
2696 
2697             // changed from pdev->params.l2_tx_bd_page_cnt[idx] to pdev->params.l2_tx_bd_page_cnt[0]
2698             DbgBreakIf(!lm_bd_chain_is_full(&tx_chain->bd_chain));
2699             DbgBreakIf(s_list_entry_cnt(&tx_chain->coalesce_buf_list) != tx_chain->coalesce_buf_cnt);
2700             break;
2701         } // ABORT_OP_INDICATE_TX_CHAIN
2702         default:
2703         {
2704             DbgBreakMsg("unknown abort operation.\n");
2705             break;
2706         }
2707     } //switch
2708 } /* lm_abort */
2709 
2710 #include "57710_int_offsets.h"
2711 #include "57711_int_offsets.h"
2712 #include "57712_int_offsets.h"
2713 void ecore_init_e1_firmware(struct _lm_device_t *pdev);
2714 void ecore_init_e1h_firmware(struct _lm_device_t *pdev);
2715 void ecore_init_e2_firmware(struct _lm_device_t *pdev);
2716 
lm_set_init_arrs(lm_device_t * pdev)2717 int lm_set_init_arrs(lm_device_t *pdev)
2718 {
2719     u32_t const chip_num = CHIP_NUM(pdev);
2720     switch(chip_num)
2721     {
2722     case CHIP_NUM_5710:
2723         DbgBreakIf( !CHIP_IS_E1(pdev) );
2724         ecore_init_e1_firmware(pdev);
2725         INIT_IRO_ARRAY(pdev) = e1_iro_arr;
2726         break;
2727     case CHIP_NUM_5711:
2728     case CHIP_NUM_5711E:
2729         DbgBreakIf( !CHIP_IS_E1H(pdev) );
2730         ecore_init_e1h_firmware(pdev);
2731         INIT_IRO_ARRAY(pdev) = e1h_iro_arr;
2732         break;
2733     case CHIP_NUM_5712:
2734     case CHIP_NUM_5713:
2735     case CHIP_NUM_5712E:
2736     case CHIP_NUM_5713E:
2737         DbgBreakIf( !CHIP_IS_E2(pdev) );
2738     case CHIP_NUM_57800:
2739     case CHIP_NUM_57810:
2740     case CHIP_NUM_57840_4_10:
2741     case CHIP_NUM_57840_2_20:
2742     case CHIP_NUM_57840_OBSOLETE:
2743     case CHIP_NUM_57811:
2744         DbgBreakIf( !CHIP_IS_E2(pdev) && !CHIP_IS_E3(pdev) );
2745         ecore_init_e2_firmware(pdev);
2746         INIT_IRO_ARRAY(pdev) = e2_iro_arr;
2747         break;
2748     default:
2749         DbgMessage(pdev, FATAL, "chip-id=%x NOT SUPPORTED\n", CHIP_NUM(pdev));
2750         return -1; // for now not supported, can't have all three...
2751     }
2752     return 0;
2753 }
2754 
2755