xref: /illumos-gate/usr/src/uts/common/io/bnxe/577xx/drivers/common/lm/l5/lm_l5.c (revision 6680ee99638d23c9c2561c782eb1df2176e04698)
1 #include "lm5710.h"
2 #include "everest_iscsi_constants.h"
3 #include "everest_l5cm_constants.h"
4 #include "577xx_int_offsets.h"
5 #include "bd_chain.h"
6 #include "command.h"
7 #include "lm_sp_req_mgr.h"
8 #include "lm_l4sp.h"
9 #include "lm_l4if.h"
10 #include "lm_l5if.h"
11 #include "mm_l5if.h"
12 #include "mm_l4if.h"
13 #include "mm.h"
14 
15 
16 
lm_get_pbl_entries(IN u32_t bufferSize)17 u32_t lm_get_pbl_entries(
18     IN  u32_t bufferSize
19     )
20 {
21     return CEIL_DIV(bufferSize, LM_PAGE_SIZE);
22 }
23 
24 
25 
lm_alloc_pbl_mem(IN struct _lm_device_t * pdev,IN u32_t pbl_entries,OUT lm_address_t ** pbl_virt,OUT lm_address_t * pbl_phy,OUT void ** pbl_virt_table,IN u8_t rt_mem,OUT u32_t * pbl_size,IN u8_t mm_cli_idx)26 lm_status_t lm_alloc_pbl_mem(
27     IN  struct _lm_device_t *pdev,
28     IN  u32_t pbl_entries,
29     OUT lm_address_t** pbl_virt,
30     OUT lm_address_t *pbl_phy,
31     OUT void** pbl_virt_table,
32     IN  u8_t rt_mem,
33     OUT u32_t *pbl_size,
34     IN  u8_t mm_cli_idx
35     )
36 {
37 
38     if (CHK_NULL(pdev) || (pbl_entries == 0) ||
39         CHK_NULL(pbl_virt) || CHK_NULL(pbl_phy) ||
40         CHK_NULL(pbl_size))
41     {
42         /* allocPblMem - illegal pblSize */
43         return LM_STATUS_INVALID_PARAMETER;
44     }
45 
46     *pbl_size = pbl_entries * sizeof(lm_address_t);
47 
48     if (rt_mem)
49     {
50         *pbl_virt = (lm_address_t *)mm_rt_alloc_phys_mem(pdev,
51                                                         *pbl_size,
52                                                         pbl_phy,
53                                                         0,
54                                                         mm_cli_idx);
55         if CHK_NULL(*pbl_virt)
56         {
57             *pbl_size = 0;
58 
59             return LM_STATUS_RESOURCE;
60         }
61 
62         *pbl_virt_table = (void *)mm_rt_alloc_mem(pdev,
63                                                    pbl_entries * sizeof(void *),
64                                                    mm_cli_idx);
65 
66         if CHK_NULL(*pbl_virt_table)
67         {
68             *pbl_size = 0;
69             mm_rt_free_phys_mem(pdev, *pbl_size, *pbl_virt, *pbl_phy, mm_cli_idx);
70             *pbl_virt = NULL;
71 
72             return LM_STATUS_RESOURCE;
73         }
74     }
75     else
76     {
77         *pbl_virt = (lm_address_t *)mm_alloc_phys_mem_align(pdev,
78                                                         *pbl_size,
79                                                         pbl_phy,
80                                                         LM_PAGE_SIZE,
81                                                         0,
82                                                         mm_cli_idx);
83         if CHK_NULL(*pbl_virt)
84         {
85             *pbl_size = 0;
86 
87             return LM_STATUS_RESOURCE;
88         }
89 
90         *pbl_virt_table = (void *)mm_alloc_mem(pdev,
91                                                 pbl_entries * sizeof(void *),
92                                                 mm_cli_idx);
93 
94         if CHK_NULL(*pbl_virt_table)
95         {
96             *pbl_size = 0;
97             *pbl_virt = NULL;
98 
99             return LM_STATUS_RESOURCE;
100         }
101     }
102 
103     return LM_STATUS_SUCCESS;
104 }
105 
106 
107 
lm_create_pbl(IN struct _lm_device_t * pdev,IN void * buf_base_virt,IN lm_address_t * buf_base_phy,IN u32_t buffer_size,OUT lm_address_t ** pbl_virt,OUT lm_address_t * pbl_phy,OUT void ** pbl_virt_table,OUT u32_t * pbl_entries,OUT u32_t * pbl_size,IN u8_t rt_mem,IN u8_t mm_cli_idx)108 lm_status_t lm_create_pbl(
109     IN  struct _lm_device_t *pdev,
110     IN  void* buf_base_virt,
111     IN  lm_address_t* buf_base_phy,
112     IN  u32_t buffer_size,
113     OUT lm_address_t** pbl_virt,
114     OUT lm_address_t* pbl_phy,
115     OUT void** pbl_virt_table,
116     OUT u32_t *pbl_entries,
117     OUT u32_t *pbl_size,
118     IN  u8_t rt_mem,
119     IN  u8_t mm_cli_idx)
120 {
121     lm_status_t lm_status;
122 
123     if (CHK_NULL(pdev) || CHK_NULL(buf_base_virt) ||
124         CHK_NULL(buf_base_phy) || CHK_NULL(pbl_virt) ||
125         CHK_NULL(pbl_phy) || CHK_NULL(pbl_virt_table) ||
126         CHK_NULL(pbl_entries) || CHK_NULL(pbl_size))
127     {
128         return LM_STATUS_INVALID_PARAMETER;
129     }
130 
131     *pbl_entries = lm_get_pbl_entries(buffer_size);
132 
133     lm_status = lm_alloc_pbl_mem(pdev, *pbl_entries, pbl_virt, pbl_phy, pbl_virt_table, rt_mem, pbl_size, mm_cli_idx);
134     if (lm_status != LM_STATUS_SUCCESS)
135     {
136         *pbl_entries = 0;
137 
138         return lm_status;
139     }
140 
141     lm_status = lm_bd_chain_pbl_set_ptrs(buf_base_virt, *buf_base_phy, *pbl_virt, *pbl_virt_table, *pbl_entries);
142     if (lm_status != LM_STATUS_SUCCESS)
143     {
144         if (rt_mem)
145         {
146             mm_rt_free_phys_mem(pdev, *pbl_size, *pbl_virt, *pbl_phy, mm_cli_idx);
147             mm_rt_free_mem(pdev, *pbl_virt_table, *pbl_entries * sizeof(void *), mm_cli_idx);
148         }
149 
150         *pbl_entries = 0;
151         *pbl_size = 0;
152 
153         return lm_status;
154     }
155 
156     return LM_STATUS_SUCCESS;
157 }
158 
159 
160 
161 lm_status_t
lm_l5_alloc_eq(IN struct _lm_device_t * pdev,IN lm_eq_chain_t * eq_chain,IN lm_eq_addr_t * eq_addr_save,IN u16_t page_cnt,IN u8_t cli_idx)162 lm_l5_alloc_eq(
163     IN      struct _lm_device_t  *pdev,
164     IN      lm_eq_chain_t        *eq_chain,
165     IN      lm_eq_addr_t         *eq_addr_save,
166     IN      u16_t                page_cnt,
167     IN      u8_t                 cli_idx)
168 {
169     u32_t                mem_size     = 0;
170 
171     /* check arguments */
172     if ((CHK_NULL(pdev) || CHK_NULL(eq_chain) || !page_cnt) ||
173         (LM_CLI_IDX_FCOE != cli_idx) && (LM_CLI_IDX_ISCSI != cli_idx))
174     {
175         return LM_STATUS_FAILURE;
176     }
177 
178     DbgMessage(pdev, INFORMi | INFORMl5sp, "#lm_alloc_eq, eq_chain=%p, page_cnt=%d\n", eq_chain, page_cnt);
179 
180     /* alloc the chain */
181     mem_size = page_cnt * LM_PAGE_SIZE;
182 
183     if(!eq_addr_save->b_allocated)
184     {
185         eq_chain->bd_chain.bd_chain_virt = mm_alloc_phys_mem(pdev,
186                                                              mem_size,
187                                                              &eq_chain->bd_chain.bd_chain_phy,
188                                                              0,
189                                                              cli_idx);
190 
191         if (ERR_IF(!eq_chain->bd_chain.bd_chain_virt))
192         {
193             DbgBreakIf(DBG_BREAK_ON(MEMORY_ALLOCATION_FAILURE));
194             return LM_STATUS_RESOURCE;
195         }
196 
197         eq_addr_save->bd_chain_virt = eq_chain->bd_chain.bd_chain_virt ;
198         eq_addr_save->bd_chain_phy.as_u64 = eq_chain->bd_chain.bd_chain_phy.as_u64;
199         eq_addr_save->b_allocated = TRUE;
200         // For debugging
201         eq_addr_save->prev_mem_size = mem_size;
202     }
203     else
204     {
205         DbgBreakIf(mem_size != eq_addr_save->prev_mem_size);
206         eq_chain->bd_chain.bd_chain_virt = eq_addr_save->bd_chain_virt;
207         eq_chain->bd_chain.bd_chain_phy.as_u64 = eq_addr_save->bd_chain_phy.as_u64;
208     }
209     mm_memset(eq_chain->bd_chain.bd_chain_virt, 0, mem_size);
210 
211     eq_chain->bd_chain.page_cnt = page_cnt;
212 
213 
214     return LM_STATUS_SUCCESS;
215 } /* lm_alloc_eq */
216 
217 
218 
219 lm_status_t
lm_sc_setup_eq(IN struct _lm_device_t * pdev,IN u32_t idx,IN const u8_t is_chain_mode)220 lm_sc_setup_eq(
221     IN struct _lm_device_t *pdev,
222     IN u32_t                idx,
223     IN const u8_t           is_chain_mode)
224 {
225     lm_bd_chain_t * bd_chain;
226     u16_t volatile * sb_indexes;
227 
228     /* check arguments */
229     if(CHK_NULL(pdev) || ERR_IF((ARRSIZE(pdev->iscsi_info.run_time.eq_chain) <= idx)))
230     {
231         return LM_STATUS_INVALID_PARAMETER;
232     }
233 
234     DbgMessage(pdev, INFORMi|INFORMl5sp, "#lm_sc_setup_eq, idx=%d\n",idx);
235 
236     bd_chain = &LM_SC_EQ(pdev, idx).bd_chain;
237     lm_bd_chain_setup(pdev, bd_chain, bd_chain->bd_chain_virt,
238                       bd_chain->bd_chain_phy, (u16_t)bd_chain->page_cnt, sizeof(struct iscsi_kcqe), 1/*0*/, is_chain_mode);
239 
240     /* verify that EQ size is not too large */
241     if(bd_chain->capacity > MAX_EQ_SIZE_ISCSI(is_chain_mode))
242     {
243         DbgBreakIf(bd_chain->capacity > MAX_EQ_SIZE_ISCSI(is_chain_mode));
244         return LM_STATUS_FAILURE;
245     }
246 
247     DbgMessage(pdev, INFORMi, "is eq %d, bd_chain %p, bd_left %d\n",
248         idx,
249         bd_chain->next_bd,
250         bd_chain->bd_left);
251     DbgMessage(pdev, INFORMi, "   bd_chain_phy 0x%x%08x\n",
252         bd_chain->bd_chain_phy.as_u32.high,
253         bd_chain->bd_chain_phy.as_u32.low);
254 
255     // Assign the EQ chain consumer pointer to the consumer index in the status block.
256     if( idx >= ARRSIZE(pdev->vars.status_blocks_arr) )
257     {
258         DbgBreakIf( idx >= ARRSIZE(pdev->vars.status_blocks_arr) );
259         return LM_STATUS_FAILURE;
260     }
261 
262     sb_indexes = lm_get_sb_indexes(pdev, (u8_t)idx);
263     sb_indexes[HC_INDEX_ISCSI_EQ_CONS] = 0;
264     LM_SC_EQ(pdev, idx).hw_con_idx_ptr = sb_indexes + HC_INDEX_ISCSI_EQ_CONS;
265 /*
266     if (IS_E2(pdev)) {
267         pdev->vars.status_blocks_arr[idx].host_hc_status_block.e2_sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS] = 0;
268         LM_SC_EQ(pdev, idx).hw_con_idx_ptr =
269             &(pdev->vars.status_blocks_arr[idx].host_hc_status_block.e2_sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS]);
270     } else {
271         pdev->vars.status_blocks_arr[idx].host_hc_status_block.e1x_sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS] = 0;
272         LM_SC_EQ(pdev, idx).hw_con_idx_ptr =
273             &(pdev->vars.status_blocks_arr[idx].host_hc_status_block.e1x_sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS]);
274     }
275  */
276     LM_SC_EQ(pdev, idx).hc_sb_info.hc_sb = STATUS_BLOCK_NORMAL_TYPE; //STATUS_BLOCK_CSTORM_TYPE;
277     LM_SC_EQ(pdev, idx).hc_sb_info.hc_index_value = HC_INDEX_ISCSI_EQ_CONS;
278 
279     return LM_STATUS_SUCCESS;
280 } /* lm_sc_setup_eq */
281 /**
282  *
283  * @description
284  * Allocate EQ PBL to pass to FW in init ramrod
285  * @param pdev
286  * @param eq_chain
287  * @param pbl
288  * @param eq_addr_save
289  *
290  * @return lm_status_t
291  */
292 lm_status_t
lm_fc_alloc_eq_pbl(IN struct _lm_device_t * pdev,IN lm_eq_chain_t * eq_chain,IN lm_fcoe_pbl_t * pbl,IN lm_eq_addr_t * eq_addr_save)293 lm_fc_alloc_eq_pbl(
294     IN struct   _lm_device_t  *pdev,
295     IN          lm_eq_chain_t *eq_chain,
296     IN          lm_fcoe_pbl_t *pbl,
297     IN          lm_eq_addr_t  *eq_addr_save)
298 {
299     lm_status_t     lm_status = LM_STATUS_SUCCESS;
300 
301     /* check arguments */
302     if(CHK_NULL(pdev))
303     {
304         return LM_STATUS_INVALID_PARAMETER;
305     }
306 
307     DbgMessage(pdev, INFORMi|INFORMl5sp, "#lm_fc_alloc_eq_pbl\n");
308 
309     // For D3 case
310     if(FALSE == pbl->allocated)
311     {
312         lm_status = lm_create_pbl(pdev,
313                                   eq_chain->bd_chain.bd_chain_virt,
314                                   &(eq_chain->bd_chain.bd_chain_phy),
315                                   eq_addr_save->prev_mem_size,
316                                   &pbl->pbl_phys_table_virt,
317                                   &pbl->pbl_phys_table_phys,
318                                   &pbl->pbl_virt_table,
319                                   &pbl->pbl_entries,
320                                   &pbl->pbl_size,
321                                   FALSE,
322                                   LM_CLI_IDX_FCOE);
323 
324         if (lm_status != LM_STATUS_SUCCESS)
325         {
326             mm_mem_zero(&(pbl) ,sizeof(lm_fcoe_pbl_t));
327             return LM_STATUS_FAILURE;
328         }
329         pbl->allocated = TRUE;
330     }
331     return lm_status;
332 }
333 
334 lm_status_t
lm_fc_setup_eq(IN struct _lm_device_t * pdev,IN u32_t idx,IN const u8_t is_chain_mode)335 lm_fc_setup_eq(
336     IN struct   _lm_device_t  *pdev,
337     IN          u32_t         idx,
338     IN const    u8_t          is_chain_mode)
339 {
340     lm_bd_chain_t   * bd_chain;
341     lm_fcoe_pbl_t   * pbl;
342     u16_t volatile  * sb_indexes;
343 
344     /* check arguments */
345     if(CHK_NULL(pdev) || ERR_IF((ARRSIZE(pdev->fcoe_info.run_time.eq_chain) <= idx)))
346     {
347         return LM_STATUS_INVALID_PARAMETER;
348     }
349 
350     DbgMessage(pdev, INFORMi|INFORMl5sp, "#lm_fc_setup_eq, idx=%d\n",idx);
351 
352     bd_chain = &LM_FC_EQ(pdev, idx).bd_chain;
353     pbl = &LM_FC_PBL(pdev, idx);
354     lm_bd_chain_pbl_setup(pdev, bd_chain, bd_chain->bd_chain_virt,
355                       bd_chain->bd_chain_phy, pbl->pbl_virt_table, pbl->pbl_phys_table_virt,
356                       (u16_t)bd_chain->page_cnt, sizeof(struct fcoe_kcqe),
357                       1/*0*/); /* EQ is considered full of blank entries */
358 
359     /* verify that EQ size is not too large */
360     if (bd_chain->capacity > MAX_EQ_SIZE_FCOE(is_chain_mode))
361     {
362         DbgBreakIf(bd_chain->capacity > MAX_EQ_SIZE_FCOE(is_chain_mode));
363         return LM_STATUS_FAILURE;
364     }
365 
366     DbgMessage(pdev, INFORMi, "fc eq %d, bd_chain %p, bd_left %d\n",
367         idx,
368         bd_chain->next_bd,
369         bd_chain->bd_left);
370     DbgMessage(pdev, INFORMi, "   bd_chain_phy 0x%x%08x\n",
371         bd_chain->bd_chain_phy.as_u32.high,
372         bd_chain->bd_chain_phy.as_u32.low);
373 
374     // Assign the EQ chain consumer pointer to the consumer index in the status block.
375     if (idx >= ARRSIZE(pdev->vars.status_blocks_arr))
376     {
377         DbgBreakIf( idx >= ARRSIZE(pdev->vars.status_blocks_arr) );
378         return LM_STATUS_FAILURE;
379     }
380 
381     sb_indexes = lm_get_sb_indexes(pdev, (u8_t)idx);
382     sb_indexes[HC_INDEX_FCOE_EQ_CONS] = 0;
383     LM_FC_EQ(pdev, idx).hw_con_idx_ptr = sb_indexes + HC_INDEX_FCOE_EQ_CONS;
384 /*
385     if (IS_E2(pdev)) {
386         pdev->vars.status_blocks_arr[idx].host_hc_status_block.e2_sb->sb.index_values[HC_INDEX_FCOE_EQ_CONS] = 0;
387         LM_FC_EQ(pdev, idx).hw_con_idx_ptr =
388             &(pdev->vars.status_blocks_arr[idx].host_hc_status_block.e2_sb->sb.index_values[HC_INDEX_FCOE_EQ_CONS]);
389     } else {
390         pdev->vars.status_blocks_arr[idx].host_hc_status_block.e1x_sb->sb.index_values[HC_INDEX_FCOE_EQ_CONS] = 0;
391         LM_FC_EQ(pdev, idx).hw_con_idx_ptr =
392             &(pdev->vars.status_blocks_arr[idx].host_hc_status_block.e1x_sb->sb.index_values[HC_INDEX_FCOE_EQ_CONS]);
393     }
394 */
395     LM_FC_EQ(pdev, idx).hc_sb_info.hc_sb = STATUS_BLOCK_NORMAL_SL_TYPE; //STATUS_BLOCK_USTORM_TYPE;
396     LM_FC_EQ(pdev, idx).hc_sb_info.hc_index_value = HC_INDEX_FCOE_EQ_CONS;
397 
398     return LM_STATUS_SUCCESS;
399 } /* lm_fc_setup_eq */
400 
401 
402 
403 
404 /** Description
405  *  Callback function for cids being recylced
406  */
lm_sc_recycle_cid_cb(struct _lm_device_t * pdev,void * cookie,s32_t cid)407 void lm_sc_recycle_cid_cb(
408     struct _lm_device_t *pdev,
409     void *cookie,
410     s32_t cid)
411 {
412     lm_status_t lm_status;
413     lm_sp_req_common_t * sp_req = NULL;
414     lm_iscsi_state_t * iscsi = (lm_iscsi_state_t *)cookie;
415 
416     if (CHK_NULL(pdev) || CHK_NULL(iscsi))
417     {
418         DbgBreakIf(1);
419         return;
420     }
421 
422     MM_ACQUIRE_TOE_LOCK(pdev);
423 
424     /* un-block the manager... */
425     lm_set_cid_state(pdev, iscsi->cid, LM_CID_STATE_VALID);
426 
427     if (iscsi->hdr.status == STATE_STATUS_INIT_CONTEXT)
428     {
429         lm_status = lm_sc_init_iscsi_context(pdev,
430                                              iscsi,
431                                              &iscsi->pending_ofld1,
432                                              &iscsi->pending_ofld2,
433                                              &iscsi->pending_ofld3);
434 
435         mm_sc_complete_offload_request(pdev, iscsi, lm_status);
436     }
437 
438     /* we can now unblock any pending slow-paths */
439     lm_sp_req_manager_unblock(pdev, cid, &sp_req);
440 
441     MM_RELEASE_TOE_LOCK(pdev);
442 }
443 
444 
lm_sc_comp_cb(struct _lm_device_t * pdev,struct sq_pending_command * pending)445 void lm_sc_comp_cb(struct _lm_device_t *pdev, struct sq_pending_command *pending)
446 {
447     struct iscsi_kcqe kcqe  = {0};
448     lm_iscsi_state_t *iscsi = NULL;
449     u32_t            cid;
450     u8_t             cmd;
451 
452 
453     if (CHK_NULL(pdev) || CHK_NULL(pending))
454     {
455         return;
456     }
457 
458     cmd = pending->cmd;
459     cid = pending->cid;
460 
461     iscsi = lm_cid_cookie(pdev, ISCSI_CONNECTION_TYPE, cid);
462 
463     if (iscsi)
464     {
465         kcqe.iscsi_conn_id         = iscsi->iscsi_conn_id;
466         kcqe.iscsi_conn_context_id = HW_CID(pdev, cid);
467     }
468 
469     kcqe.completion_status = LM_STATUS_SUCCESS; /* TODO_ER: Fixme: do we want this?? maybe ok since l5 is aware of er... */
470 
471     kcqe.op_code = cmd; /* In iSCSI they are the same */
472 
473     kcqe.flags |= (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
474 
475     lm_sc_complete_slow_path_request(pdev, &kcqe);
476 }
477 
478 lm_status_t
lm_sc_alloc_resc(IN struct _lm_device_t * pdev)479 lm_sc_alloc_resc(
480     IN struct _lm_device_t *pdev
481     )
482 {
483     u8_t        mm_cli_idx  = LM_RESOURCE_ISCSI;
484     u8_t        *chk_buf    = NULL;
485     u16_t       i           = 0;
486 
487     if CHK_NULL(pdev)
488     {
489         return LM_STATUS_INVALID_PARAMETER;
490     }
491 
492     mm_mem_zero(&pdev->iscsi_info, sizeof(lm_iscsi_info_t));
493 
494     /* Allocate global buffer */
495     pdev->iscsi_info.bind.global_buff_base_virt = (u8_t*)mm_alloc_phys_mem(pdev,
496                                                                       ISCSI_GLOBAL_BUF_SIZE,
497                                                                       &pdev->iscsi_info.bind.global_buff_base_phy,
498                                                                       0,
499                                                                       mm_cli_idx);
500     if CHK_NULL(pdev->iscsi_info.bind.global_buff_base_virt)
501     {
502         return LM_STATUS_RESOURCE;
503     }
504 
505     /* cid recycled cb registration  */
506     lm_cid_recycled_cb_register(pdev, ISCSI_CONNECTION_TYPE, lm_sc_recycle_cid_cb);
507 
508     /* Sq-completion cb registration (sq that get completed internally in driver */
509     lm_sq_comp_cb_register(pdev, ISCSI_CONNECTION_TYPE, lm_sc_comp_cb);
510 
511     chk_buf = (u8_t *)(&(pdev->iscsi_info.eq_addr_save));
512     // Except global_buff and pdev->iscsi_info all other fileds should be zero
513     for(i = 0 ;i < sizeof(pdev->iscsi_info.eq_addr_save) ;i++)
514     {
515         DbgBreakIf(0 != chk_buf[i]);
516     }
517 
518     chk_buf = (u8_t *)(&(pdev->iscsi_info.run_time));
519     // Except global_buff and pdev->iscsi_info all other fileds should be zero
520     for(i = 0 ;i < sizeof(pdev->iscsi_info.run_time) ;i++)
521     {
522         DbgBreakIf(0 != chk_buf[i]);
523     }
524     return LM_STATUS_SUCCESS;
525 } /* lm_sc_alloc_resc */
526 
527 /*******************************************************************************
528  * Description:
529  *
530  * Return:
531  ******************************************************************************/
532 u16_t
lm_l5_eq_page_cnt(IN struct _lm_device_t * pdev,const u32_t max_func_cons,const u16_t reserved_eq_elements,const u16_t eqes_per_page,const u16_t max_eq_pages)533 lm_l5_eq_page_cnt(
534     IN struct       _lm_device_t *pdev,
535     const u32_t     max_func_cons,
536     const u16_t     reserved_eq_elements,
537     const u16_t     eqes_per_page,
538     const u16_t     max_eq_pages
539     )
540 {
541     u16_t eq_page_cnt = 0;
542     u16_t min_eq_size = 0;
543 
544     /* Init EQs - create page chains */
545     min_eq_size = (u16_t)(max_func_cons + reserved_eq_elements);
546     eq_page_cnt = CEIL_DIV(min_eq_size, (eqes_per_page));
547     eq_page_cnt = min(eq_page_cnt, max_eq_pages);
548 
549     return eq_page_cnt;
550 }
551 
552 /*******************************************************************************
553  * Description:
554  *
555  * Return:
556  ******************************************************************************/
557 lm_status_t
lm_fc_free_init_resc(IN struct _lm_device_t * pdev)558 lm_fc_free_init_resc(
559     IN struct _lm_device_t *pdev
560     )
561 {
562     lm_status_t         lm_status   = LM_STATUS_SUCCESS;
563     u16_t               eq_sb_idx   = 0;
564     u16_t               eq_page_cnt = 0;
565 
566     if (CHK_NULL(pdev))
567     {
568         DbgBreakMsg("lm_fc_free_init_resc failed");
569         return LM_STATUS_INVALID_PARAMETER;
570     }
571 
572     mm_memset(&(pdev->fcoe_info.run_time), 0, sizeof(pdev->fcoe_info.run_time));
573     return lm_status;
574 }
575 
576 
577 lm_status_t
lm_fc_clear_d0_resc(IN struct _lm_device_t * pdev,const u8_t cid)578 lm_fc_clear_d0_resc(
579     IN struct _lm_device_t *pdev,
580     const u8_t cid
581     )
582 {
583     lm_status_t lm_status = LM_STATUS_SUCCESS;
584     u8_t eq_idx = 0;
585 
586     if CHK_NULL(pdev)
587     {
588         return LM_STATUS_INVALID_PARAMETER;
589     }
590 
591     LM_FC_FOREACH_EQ_IDX(pdev, eq_idx)
592     {
593         lm_clear_chain_sb_cons_idx(pdev, eq_idx, &LM_FC_EQ(pdev, eq_idx).hc_sb_info, &LM_FC_EQ(pdev, eq_idx).hw_con_idx_ptr);
594     }
595 
596     lm_status = lm_fc_free_init_resc(pdev);
597 
598     return lm_status;
599 } /* lm_fc_clear_d0_resc */
600 
601 lm_status_t
lm_fc_clear_resc(IN struct _lm_device_t * pdev)602 lm_fc_clear_resc(
603     IN struct _lm_device_t *pdev
604     )
605 {
606     lm_status_t lm_status = LM_STATUS_SUCCESS;
607     const u8_t cid  = FCOE_CID(pdev);
608 
609     if CHK_NULL(pdev)
610     {
611         return LM_STATUS_INVALID_PARAMETER;
612     }
613 
614     lm_fc_clear_d0_resc(
615         pdev,
616         cid);
617     s_list_init(&LM_RXQ(pdev, cid).active_descq, NULL, NULL, 0);
618     s_list_init(&LM_RXQ(pdev, cid).common.free_descq, NULL, NULL, 0);
619 
620     return lm_status;
621 } /* lm_fc_clear_resc */
622 
623 
624 /*******************************************************************************
625  * Description:
626  *
627  * Return:
628  ******************************************************************************/
629 lm_status_t
lm_sc_free_init_resc(IN struct _lm_device_t * pdev)630 lm_sc_free_init_resc(
631     IN struct _lm_device_t *pdev
632     )
633 {
634     lm_status_t         lm_status   = LM_STATUS_SUCCESS;
635     u16_t               eq_sb_idx   = 0;
636     u16_t               eq_page_cnt = 0;
637 
638     if (CHK_NULL(pdev))
639     {
640         DbgBreakMsg("lm_sc_free_init_resc failed");
641         return LM_STATUS_INVALID_PARAMETER;
642     }
643 
644     mm_memset(&(pdev->iscsi_info.run_time), 0, sizeof(pdev->iscsi_info.run_time));
645     return lm_status;
646 }
647 
648 
649 lm_status_t
lm_sc_clear_d0_resc(IN struct _lm_device_t * pdev,const u8_t cid)650 lm_sc_clear_d0_resc(
651     IN struct _lm_device_t *pdev,
652     const u8_t cid
653     )
654 {
655     lm_status_t lm_status = LM_STATUS_SUCCESS;
656     u8_t eq_idx = 0;
657 
658     if CHK_NULL(pdev)
659     {
660         return LM_STATUS_INVALID_PARAMETER;
661     }
662 
663     LM_SC_FOREACH_EQ_IDX(pdev, eq_idx)
664     {
665         lm_clear_chain_sb_cons_idx(pdev, eq_idx, &LM_SC_EQ(pdev, eq_idx).hc_sb_info, &LM_SC_EQ(pdev, eq_idx).hw_con_idx_ptr);
666     }
667 
668     lm_status = lm_sc_free_init_resc(pdev);
669 
670     return lm_status;
671 } /* lm_sc_clear_d0_resc */
672 
673 lm_status_t
lm_sc_clear_resc(IN struct _lm_device_t * pdev)674 lm_sc_clear_resc(
675     IN struct _lm_device_t *pdev
676     )
677 {
678     lm_status_t lm_status = LM_STATUS_SUCCESS;
679     const u8_t cid  = ISCSI_CID(pdev);
680 
681     if CHK_NULL(pdev)
682     {
683         return LM_STATUS_INVALID_PARAMETER;
684     }
685 
686     lm_sc_clear_d0_resc(
687         pdev,
688         cid);
689     s_list_init(&LM_RXQ(pdev, cid).active_descq, NULL, NULL, 0);
690     s_list_init(&LM_RXQ(pdev, cid).common.free_descq, NULL, NULL, 0);
691 
692     return lm_status;
693 } /* lm_sc_clear_resc */
694 
695 
696 
697 lm_status_t
lm_sc_ooo_chain_establish(IN struct _lm_device_t * pdev)698 lm_sc_ooo_chain_establish(
699     IN struct _lm_device_t *pdev)
700 {
701     lm_status_t             lm_status = LM_STATUS_SUCCESS;
702     const u32_t             func        = FUNC_ID(pdev);
703 
704     if CHK_NULL(pdev)
705     {
706         lm_status = LM_STATUS_INVALID_PARAMETER;
707         return lm_status;
708     }
709     LM_INTMEM_WRITE32(pdev,
710                       TSTORM_ISCSI_L2_ISCSI_OOO_CONS_OFFSET(func),
711                       0,
712                       BAR_TSTRORM_INTMEM);
713 
714     LM_INTMEM_WRITE32(pdev,
715                       TSTORM_ISCSI_L2_ISCSI_OOO_CID_TABLE_OFFSET(func),
716                       HW_CID(pdev, OOO_CID(pdev)),
717                       BAR_TSTRORM_INTMEM);
718 
719     LM_INTMEM_WRITE32(pdev,
720                       TSTORM_ISCSI_L2_ISCSI_OOO_CLIENT_ID_TABLE_OFFSET(func),
721                       LM_FW_CLI_ID(pdev,OOO_CID(pdev)),
722                       BAR_TSTRORM_INTMEM);
723 
724 
725     return lm_status;
726 }
727 
728 
729 /*******************************************************************************
730  * Description:
731  *
732  * Return:
733  ******************************************************************************/
734 lm_status_t
lm_sc_init(IN struct _lm_device_t * pdev,IN struct iscsi_kwqe_init1 * req1,IN struct iscsi_kwqe_init2 * req2)735 lm_sc_init(
736     IN struct _lm_device_t *pdev,
737     IN struct iscsi_kwqe_init1  *req1,
738     IN struct iscsi_kwqe_init2  *req2
739     )
740 {
741     lm_status_t                  lm_status;
742     u16_t                        eq_page_cnt;
743     u32_t                        hq_size_in_bytes;
744     u32_t                        hq_pbl_entries;
745     u32_t                        eq_idx;
746     u16_t                        eq_sb_idx;
747     u32_t                        page_size_bits;
748     u8_t                         delayed_ack_en              = 0;
749     const u8_t                   is_chain_mode               = TRUE;
750     const u32_t                  func                        = FUNC_ID(pdev);
751     struct tstorm_l5cm_tcp_flags tstorm_l5cm_tcp_flags_param = {0};
752 
753     if (CHK_NULL(req1) || CHK_NULL(req2))
754     {
755         return LM_STATUS_FAILURE;
756     }
757 
758     DbgMessage(pdev, INFORM, "### lm_sc_init\n");
759 
760     page_size_bits = GET_FIELD(req1->flags, ISCSI_KWQE_INIT1_PAGE_SIZE);
761     if (LM_PAGE_BITS - ISCSI_PAGE_BITS_SHIFT != page_size_bits)
762     {
763         DbgMessage(pdev, INFORM, "lm_sc_init: Illegal page size.\n");
764         return LM_STATUS_FAILURE;
765     }
766 
767     if(ISCSI_HSI_VERSION != req1->hsi_version)
768     {
769         return LM_STATUS_INVALID_PARAMETER;
770     }
771 
772     delayed_ack_en = GET_FIELD(req1->flags, ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE);
773 
774     pdev->iscsi_info.run_time.num_of_tasks = req1->num_tasks_per_conn;
775     pdev->iscsi_info.run_time.cq_size      = req1->cq_num_wqes;
776     pdev->iscsi_info.run_time.num_of_cqs   = req1->num_cqs;
777 
778     /* the number of cqs is used to determine the number of eqs */
779     if (pdev->iscsi_info.run_time.num_of_cqs > MAX_EQ_CHAIN)
780     {
781         DbgBreakIf(pdev->iscsi_info.run_time.num_of_cqs > MAX_EQ_CHAIN);
782         pdev->iscsi_info.run_time.num_of_cqs = MAX_EQ_CHAIN;
783     }
784     pdev->iscsi_info.run_time.l5_eq_chain_cnt     = pdev->iscsi_info.run_time.num_of_cqs;
785     pdev->iscsi_info.run_time.l5_eq_max_chain_cnt = MAX_EQ_CHAIN;
786     // Only one EQ chain is supported.
787 
788     if ((pdev->iscsi_info.run_time.l5_eq_chain_cnt > 1)||
789         (pdev->params.sb_cnt < pdev->iscsi_info.run_time.l5_eq_chain_cnt))
790     {
791         DbgMessage(pdev, INFORM, "lm_sc_init: l5_eq_chain_cnt=%d\n.\n",pdev->iscsi_info.run_time.l5_eq_chain_cnt);
792         DbgBreakMsg("lm_sc_init: pdev->iscsi_info.l5_eq_chain_cnt is bigger than 1.\n");
793         return LM_STATUS_FAILURE;
794     }
795     DbgBreakIf(pdev->iscsi_info.run_time.l5_eq_chain_cnt > 1);
796     DbgBreakIf(pdev->params.sb_cnt < pdev->iscsi_info.run_time.l5_eq_chain_cnt);
797     /* TOE when RSS is disabled, ISCSI and FCOE will use the same NDSB.  */
798     pdev->iscsi_info.run_time.l5_eq_base_chain_idx = LM_NON_RSS_SB(pdev);
799 
800 //    if (!pdev->params.l4_enable_rss) {
801 //        RESET_FLAGS(pdev->params.sb_cpu_affinity, 1 << LM_TOE_RSS_BASE_CHAIN_INDEX(&pdev->lmdev));
802 //    }
803 
804 
805     /* round up HQ size to fill an entire page */
806     hq_size_in_bytes = req1->num_ccells_per_conn * sizeof(struct iscsi_hq_bd);
807     hq_pbl_entries = lm_get_pbl_entries(hq_size_in_bytes);
808     pdev->iscsi_info.run_time.hq_size = (u16_t)(hq_pbl_entries * (LM_PAGE_SIZE / sizeof(struct iscsi_hq_bd)));
809 
810     /* Init EQs - create page chains */
811     // The size of the EQ in iSCSI is <num iscsi connections> * 2 +slowpath.
812     // I.e. for each connection there should be room for 1 fastpath completion and 1 error notification.
813     eq_page_cnt = lm_l5_eq_page_cnt(pdev,
814                                     (u16_t)(pdev->params.max_func_iscsi_cons * 2),
815                                     RESERVED_ISCSI_EQ_ELEMENTS,
816                                     (ISCSI_EQES_PER_PAGE(is_chain_mode)),
817                                     MAX_EQ_PAGES);// Sub the next BD page.
818 
819     LM_SC_FOREACH_EQ_IDX(pdev, eq_sb_idx)
820     {
821         lm_status = lm_l5_alloc_eq(pdev, &LM_SC_EQ(pdev, eq_sb_idx), &LM_EQ_ADDR_SAVE_SC(pdev, eq_sb_idx) , eq_page_cnt, LM_CLI_IDX_ISCSI);
822         if (lm_status != LM_STATUS_SUCCESS)
823         {
824             return lm_status;
825         }
826 
827         lm_status = lm_sc_setup_eq(pdev, eq_sb_idx,is_chain_mode);
828         if (lm_status != LM_STATUS_SUCCESS)
829         {
830             return lm_status;
831         }
832     }
833 
834     SET_FLAGS( tstorm_l5cm_tcp_flags_param.flags, delayed_ack_en << TSTORM_L5CM_TCP_FLAGS_DELAYED_ACK_EN_SHIFT);
835 
836     // in case size change, we need to change LM_INTMEM_WRITEXX macro etc...
837     ASSERT_STATIC( sizeof(tstorm_l5cm_tcp_flags_param) == sizeof(u16_t) );
838 
839     /* Init internal RAM */
840     ASSERT_STATIC(sizeof(struct regpair_t) == sizeof(lm_address_t));
841 
842     /* init Tstorm RAM */
843     LM_INTMEM_WRITE16(pdev, TSTORM_ISCSI_RQ_SIZE_OFFSET(func),           req1->rq_num_wqes, BAR_TSTRORM_INTMEM);
844     LM_INTMEM_WRITE16(pdev, TSTORM_ISCSI_PAGE_SIZE_OFFSET(func),         LM_PAGE_SIZE, BAR_TSTRORM_INTMEM);
845     LM_INTMEM_WRITE8 (pdev, TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(func),     LM_PAGE_BITS, BAR_TSTRORM_INTMEM);
846     LM_INTMEM_WRITE32(pdev, TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(func), 0x100000, BAR_TSTRORM_INTMEM);
847     LM_INTMEM_WRITE16(pdev, TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(func),      req1->num_tasks_per_conn, BAR_TSTRORM_INTMEM);
848     LM_INTMEM_WRITE64(pdev, TSTORM_ISCSI_ERROR_BITMAP_OFFSET(func),      *((u64_t *)&req2->error_bit_map), BAR_TSTRORM_INTMEM);
849     LM_INTMEM_WRITE16(pdev, TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(func),     tstorm_l5cm_tcp_flags_param.flags, BAR_TSTRORM_INTMEM);
850 
851     /* init Ustorm RAM */
852     LM_INTMEM_WRITE16(pdev, USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(func), req1->rq_buffer_size, BAR_USTRORM_INTMEM);
853     LM_INTMEM_WRITE16(pdev, USTORM_ISCSI_PAGE_SIZE_OFFSET(func), LM_PAGE_SIZE, BAR_USTRORM_INTMEM);
854     LM_INTMEM_WRITE8 (pdev, USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(func), LM_PAGE_BITS, BAR_USTRORM_INTMEM);
855     LM_INTMEM_WRITE16(pdev, USTORM_ISCSI_NUM_OF_TASKS_OFFSET(func), req1->num_tasks_per_conn, BAR_USTRORM_INTMEM);
856     LM_INTMEM_WRITE16(pdev, USTORM_ISCSI_RQ_SIZE_OFFSET(func), req1->rq_num_wqes, BAR_USTRORM_INTMEM);
857     LM_INTMEM_WRITE16(pdev, USTORM_ISCSI_CQ_SIZE_OFFSET(func), req1->cq_num_wqes, BAR_USTRORM_INTMEM);
858     LM_INTMEM_WRITE16(pdev, USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(func), req2->max_cq_sqn, BAR_USTRORM_INTMEM);
859     LM_INTMEM_WRITE16(pdev, USTORM_ISCSI_R2TQ_SIZE_OFFSET(func), (u16_t)pdev->iscsi_info.run_time.num_of_tasks * ISCSI_MAX_NUM_OF_PENDING_R2TS, BAR_USTRORM_INTMEM);
860     LM_INTMEM_WRITE64(pdev, USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(func), pdev->iscsi_info.bind.global_buff_base_phy.as_u64, BAR_USTRORM_INTMEM);
861     LM_INTMEM_WRITE64(pdev, USTORM_ISCSI_ERROR_BITMAP_OFFSET(func), *((u64_t *)&req2->error_bit_map), BAR_USTRORM_INTMEM);
862 
863     /* init Xstorm RAM */
864     LM_INTMEM_WRITE16(pdev, XSTORM_ISCSI_PAGE_SIZE_OFFSET(func), LM_PAGE_SIZE, BAR_XSTRORM_INTMEM);
865     LM_INTMEM_WRITE8 (pdev, XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(func), LM_PAGE_BITS, BAR_XSTRORM_INTMEM);
866     LM_INTMEM_WRITE16(pdev, XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(func), req1->num_tasks_per_conn, BAR_XSTRORM_INTMEM);
867     LM_INTMEM_WRITE16(pdev, XSTORM_ISCSI_HQ_SIZE_OFFSET(func), pdev->iscsi_info.run_time.hq_size, BAR_XSTRORM_INTMEM);
868     LM_INTMEM_WRITE16(pdev, XSTORM_ISCSI_SQ_SIZE_OFFSET(func), req1->num_tasks_per_conn, BAR_XSTRORM_INTMEM);
869     LM_INTMEM_WRITE16(pdev, XSTORM_ISCSI_R2TQ_SIZE_OFFSET(func), req1->num_tasks_per_conn * ISCSI_MAX_NUM_OF_PENDING_R2TS, BAR_XSTRORM_INTMEM);
870 
871     /* init Cstorm RAM */
872     LM_INTMEM_WRITE16(pdev, CSTORM_ISCSI_PAGE_SIZE_OFFSET(func), LM_PAGE_SIZE, BAR_CSTRORM_INTMEM);
873     LM_INTMEM_WRITE8 (pdev, CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(func), LM_PAGE_BITS, BAR_CSTRORM_INTMEM);
874     LM_INTMEM_WRITE16(pdev, CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(func), req1->num_tasks_per_conn, BAR_CSTRORM_INTMEM);
875     LM_SC_FOREACH_EQ_IDX(pdev, eq_sb_idx)
876     {
877         eq_idx = eq_sb_idx - pdev->iscsi_info.run_time.l5_eq_base_chain_idx;
878         LM_INTMEM_WRITE16(pdev, CSTORM_ISCSI_EQ_PROD_OFFSET(func, eq_idx), lm_bd_chain_prod_idx(&LM_SC_EQ(pdev, eq_sb_idx).bd_chain), BAR_CSTRORM_INTMEM);
879         LM_INTMEM_WRITE16(pdev, CSTORM_ISCSI_EQ_CONS_OFFSET(func, eq_idx), 0 , BAR_CSTRORM_INTMEM);
880         LM_INTMEM_WRITE32(pdev, CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(func, eq_idx), lm_bd_chain_phys_addr(&LM_SC_EQ(pdev, eq_sb_idx).bd_chain, 1).as_u32.low, BAR_CSTRORM_INTMEM);
881         LM_INTMEM_WRITE32(pdev, 4 + CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(func, eq_idx), lm_bd_chain_phys_addr(&LM_SC_EQ(pdev, eq_sb_idx).bd_chain, 1).as_u32.high, BAR_CSTRORM_INTMEM);
882         LM_INTMEM_WRITE32(pdev, CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(func, eq_idx), lm_bd_chain_phys_addr(&LM_SC_EQ(pdev, eq_sb_idx).bd_chain, 0).as_u32.low, BAR_CSTRORM_INTMEM);
883         LM_INTMEM_WRITE32(pdev, 4 + CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(func, eq_idx), lm_bd_chain_phys_addr(&LM_SC_EQ(pdev, eq_sb_idx).bd_chain, 0).as_u32.high, BAR_CSTRORM_INTMEM);
884         LM_INTMEM_WRITE8 (pdev, CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(func, eq_idx), 1, BAR_CSTRORM_INTMEM); // maybe move to init tool
885         LM_INTMEM_WRITE16(pdev, CSTORM_ISCSI_EQ_SB_NUM_OFFSET(func, eq_idx), LM_FW_SB_ID(pdev,eq_sb_idx), BAR_CSTRORM_INTMEM);
886         LM_INTMEM_WRITE8 (pdev, CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(func, eq_idx), HC_INDEX_ISCSI_EQ_CONS, BAR_CSTRORM_INTMEM);
887     }
888     LM_INTMEM_WRITE16(pdev, CSTORM_ISCSI_HQ_SIZE_OFFSET(func), pdev->iscsi_info.run_time.hq_size, BAR_CSTRORM_INTMEM);
889     LM_INTMEM_WRITE16(pdev, CSTORM_ISCSI_CQ_SIZE_OFFSET(func), req1->cq_num_wqes, BAR_CSTRORM_INTMEM);
890     LM_INTMEM_WRITE16(pdev, CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(func), req2->max_cq_sqn, BAR_CSTRORM_INTMEM);
891 
892     return LM_STATUS_SUCCESS;
893 } /* lm_sc_init */
894 
895 
896 
897 /* Get dma memory for init ramrod */
898 STATIC lm_status_t
lm_fc_get_ramrod_phys_mem(IN struct _lm_device_t * pdev)899 lm_fc_get_ramrod_phys_mem(
900     IN struct _lm_device_t *pdev)
901 {
902 
903     if CHK_NULL(pdev->fcoe_info.bind.ramrod_mem_virt)
904     {
905         pdev->fcoe_info.bind.ramrod_mem_virt =
906         mm_alloc_phys_mem(pdev,
907                           sizeof(lm_fcoe_slow_path_phys_data_t),
908                           &pdev->fcoe_info.bind.ramrod_mem_phys,
909                           0,
910                           LM_CLI_IDX_FCOE);
911 
912         if CHK_NULL(pdev->fcoe_info.bind.ramrod_mem_virt)
913         {
914             return LM_STATUS_RESOURCE;
915         }
916     }
917     return LM_STATUS_SUCCESS;
918 }
919 
920 
921 
922 lm_status_t
lm_fc_init(IN struct _lm_device_t * pdev,IN struct fcoe_kwqe_init1 * init1,IN struct fcoe_kwqe_init2 * init2,IN struct fcoe_kwqe_init3 * init3)923 lm_fc_init(
924     IN struct _lm_device_t          *pdev,
925     IN struct fcoe_kwqe_init1       *init1,
926     IN struct fcoe_kwqe_init2       *init2,
927     IN struct fcoe_kwqe_init3       *init3)
928 {
929     lm_status_t                     lm_status;
930     lm_fcoe_slow_path_phys_data_t   *ramrod_params;
931     u16_t                           eq_page_cnt;
932     u16_t                           eq_sb_idx;
933     u32_t                           func;
934     u32_t                           port;
935     const u8_t                      is_chain_mode = FALSE;
936     if (CHK_NULL(pdev) || CHK_NULL(init1) || CHK_NULL(init2) || CHK_NULL(init3))
937     {
938         return LM_STATUS_INVALID_PARAMETER;
939     }
940 
941     func = FUNC_ID(pdev);
942     port = PORT_ID(pdev);
943 
944     DbgMessage(pdev, INFORM, "### lm_fc_init\n");
945 
946     pdev->fcoe_info.run_time.num_of_cqs = 1;                 // one EQ
947 
948     // Only one EQ chain is supported.
949     if ((pdev->fcoe_info.run_time.num_of_cqs > 1)||
950         (pdev->params.sb_cnt < pdev->fcoe_info.run_time.num_of_cqs))
951     {
952         DbgMessage(pdev, INFORM, "lm_fc_init: num_of_cqs=%d\n.\n",pdev->fcoe_info.run_time.num_of_cqs);
953         DbgBreakMsg("lm_fc_init: pdev->fcoe_info.run_time.num_of_cqs is bigger than 1.\n");
954         return LM_STATUS_INVALID_PARAMETER;
955     }
956     DbgBreakIf(pdev->fcoe_info.run_time.num_of_cqs > 1);
957     DbgBreakIf(pdev->params.sb_cnt < pdev->fcoe_info.run_time.num_of_cqs);
958     /* TOE when RSS is disabled, ISCSI and FCOE will use the same NDSB.  */
959     pdev->fcoe_info.run_time.fc_eq_base_chain_idx = LM_NON_RSS_SB(pdev);
960 
961     if(CHK_NULL(pdev->fcoe_info.bind.ramrod_mem_virt))
962     {
963         return LM_STATUS_RESOURCE;
964     }
965     ramrod_params = (lm_fcoe_slow_path_phys_data_t*)pdev->fcoe_info.bind.ramrod_mem_virt;
966 
967     // Init EQs - create page chains
968     eq_page_cnt = lm_l5_eq_page_cnt(pdev,
969                                     (u16_t)pdev->params.max_func_fcoe_cons,
970                                     RESERVED_FCOE_EQ_ELEMENTS,
971                                     FCOE_EQES_PER_PAGE(is_chain_mode),
972                                     FCOE_MAX_EQ_PAGES_PER_FUNC);
973 
974 
975     LM_FC_FOREACH_EQ_IDX(pdev, eq_sb_idx)
976     {
977         lm_status = lm_l5_alloc_eq(pdev, &LM_FC_EQ(pdev, eq_sb_idx),&LM_EQ_ADDR_SAVE_FC(pdev, eq_sb_idx),eq_page_cnt, LM_CLI_IDX_FCOE);
978         if (lm_status != LM_STATUS_SUCCESS)
979         {
980             return lm_status;
981         }
982 
983         lm_status = lm_fc_alloc_eq_pbl(pdev, &LM_FC_EQ(pdev, eq_sb_idx), &LM_FC_PBL(pdev, eq_sb_idx),
984                                        &LM_EQ_ADDR_SAVE_FC(pdev, eq_sb_idx));
985         if (lm_status != LM_STATUS_SUCCESS)
986         {
987             return lm_status;
988         }
989 
990         lm_status = lm_fc_setup_eq(pdev, eq_sb_idx,is_chain_mode);
991         if (lm_status != LM_STATUS_SUCCESS)
992         {
993             return lm_status;
994         }
995     }
996 
997     /* Set up the ramrod params */
998     mm_memset(ramrod_params, 0, sizeof(lm_fcoe_slow_path_phys_data_t));
999 
1000     memcpy(&ramrod_params->fcoe_init.init_kwqe1, init1, sizeof(struct fcoe_kwqe_init1));
1001     memcpy(&ramrod_params->fcoe_init.init_kwqe2, init2, sizeof(struct fcoe_kwqe_init2));
1002     memcpy(&ramrod_params->fcoe_init.init_kwqe3, init3, sizeof(struct fcoe_kwqe_init3));
1003 
1004 
1005     /* waiting for new HSI */
1006     ramrod_params->fcoe_init.eq_pbl_base.lo = mm_cpu_to_le32(LM_FC_PBL(pdev, pdev->fcoe_info.run_time.fc_eq_base_chain_idx).pbl_phys_table_phys.as_u32.low);
1007     ramrod_params->fcoe_init.eq_pbl_base.hi = mm_cpu_to_le32(LM_FC_PBL(pdev, pdev->fcoe_info.run_time.fc_eq_base_chain_idx).pbl_phys_table_phys.as_u32.high);
1008     ramrod_params->fcoe_init.eq_pbl_size = mm_cpu_to_le32(LM_FC_PBL(pdev, pdev->fcoe_info.run_time.fc_eq_base_chain_idx).pbl_entries);
1009     ramrod_params->fcoe_init.eq_prod = mm_cpu_to_le16(lm_bd_chain_prod_idx(&LM_FC_EQ(pdev, pdev->fcoe_info.run_time.fc_eq_base_chain_idx).bd_chain));
1010     ramrod_params->fcoe_init.sb_num = mm_cpu_to_le16(LM_FW_SB_ID(pdev,pdev->fcoe_info.run_time.fc_eq_base_chain_idx));
1011     ramrod_params->fcoe_init.sb_id = HC_INDEX_FCOE_EQ_CONS;
1012 
1013     if (IS_SD_UFP_MODE(pdev))
1014     {
1015         ramrod_params->fcoe_init.init_kwqe1.flags |= FCOE_KWQE_INIT1_CLASSIFY_FAILED_ALLOWED;
1016     }
1017 
1018     lm_status = lm_command_post(pdev,
1019                                 LM_CLI_CID(pdev, LM_CLI_IDX_FCOE),      /* cid */
1020                                 FCOE_RAMROD_CMD_ID_INIT_FUNC,
1021                                 CMD_PRIORITY_NORMAL,
1022                                 FCOE_CONNECTION_TYPE,
1023                                 pdev->fcoe_info.bind.ramrod_mem_phys.as_u64);
1024 
1025     if (lm_status != LM_STATUS_SUCCESS)
1026     {
1027         /* only one we know off... */
1028         DbgBreakIf(lm_status != LM_STATUS_REQUEST_NOT_ACCEPTED);
1029         /* Command wasn't posted, so we need to complete it from here. */
1030 
1031     }
1032 
1033     // completion is asynchronous
1034 
1035     return LM_STATUS_SUCCESS;
1036 } /* lm_fc_init */
1037 
1038 
1039 /** Description
1040  *  Callback function for cids being recylced
1041  */
1042 void
lm_fc_recycle_cid_cb(struct _lm_device_t * pdev,void * cookie,s32_t cid)1043 lm_fc_recycle_cid_cb(
1044     struct _lm_device_t             *pdev,
1045     void                            *cookie,
1046     s32_t                           cid)
1047 {
1048     lm_status_t         lm_status;
1049     lm_sp_req_common_t  *sp_req = NULL;
1050     lm_fcoe_state_t     *fcoe = (lm_fcoe_state_t *)cookie;
1051 
1052     if (CHK_NULL(pdev) || CHK_NULL(fcoe))
1053     {
1054         DbgBreakIf(1);
1055         return;
1056     }
1057 
1058     MM_ACQUIRE_TOE_LOCK(pdev);
1059 
1060     /* un-block the manager... */
1061     lm_set_cid_state(pdev, fcoe->cid, LM_CID_STATE_VALID);
1062 
1063     lm_status = lm_fc_init_fcoe_context(pdev, fcoe);
1064 
1065     lm_status = lm_fc_post_offload_ramrod(pdev, fcoe);
1066 
1067     /* we can now unblock any pending slow-paths */
1068     lm_sp_req_manager_unblock(pdev, cid, &sp_req);
1069 
1070     MM_RELEASE_TOE_LOCK(pdev);
1071 }
1072 
lm_fc_comp_cb(struct _lm_device_t * pdev,struct sq_pending_command * pending)1073 void lm_fc_comp_cb(struct _lm_device_t *pdev, struct sq_pending_command *pending)
1074 {
1075     struct fcoe_kcqe kcqe = {0};
1076     lm_fcoe_state_t *fcoe = NULL;
1077     u32_t            cid;
1078     u8_t             cmd;
1079 
1080 
1081     if (CHK_NULL(pdev) || CHK_NULL(pending))
1082     {
1083         return;
1084     }
1085 
1086     cmd = pending->cmd;
1087     cid = pending->cid;
1088 
1089     fcoe = lm_cid_cookie(pdev, FCOE_CONNECTION_TYPE, cid);
1090 
1091     if (fcoe)
1092     {
1093         kcqe.fcoe_conn_id         = fcoe->fcoe_conn_id;
1094         kcqe.fcoe_conn_context_id = HW_CID(pdev, cid);
1095     }
1096 
1097     kcqe.completion_status = LM_STATUS_SUCCESS; /* Fixme: do we want this?? maybe ok since l5 is aware of er... */
1098 
1099     switch (cmd)
1100     {
1101     case FCOE_RAMROD_CMD_ID_INIT_FUNC:
1102         kcqe.op_code = FCOE_KCQE_OPCODE_INIT_FUNC;
1103         break;
1104 
1105     case FCOE_RAMROD_CMD_ID_DESTROY_FUNC:
1106         kcqe.op_code = FCOE_KCQE_OPCODE_DESTROY_FUNC;
1107         break;
1108 
1109     case FCOE_RAMROD_CMD_ID_STAT_FUNC:
1110         kcqe.op_code = FCOE_KCQE_OPCODE_STAT_FUNC;
1111         break;
1112 
1113     case FCOE_RAMROD_CMD_ID_OFFLOAD_CONN:
1114         kcqe.op_code = FCOE_KCQE_OPCODE_OFFLOAD_CONN;
1115         break;
1116 
1117     case FCOE_RAMROD_CMD_ID_ENABLE_CONN:
1118         kcqe.op_code = FCOE_KCQE_OPCODE_ENABLE_CONN;
1119         break;
1120 
1121     case FCOE_RAMROD_CMD_ID_DISABLE_CONN:
1122         kcqe.op_code = FCOE_KCQE_OPCODE_DISABLE_CONN;
1123         break;
1124 
1125     case FCOE_RAMROD_CMD_ID_TERMINATE_CONN:
1126         kcqe.op_code = FCOE_RAMROD_CMD_ID_TERMINATE_CONN;
1127         break;
1128     }
1129 
1130     lm_fc_complete_slow_path_request(pdev, &kcqe);
1131 }
1132 
1133 /**
1134  * @description
1135  * Returns the max FCOE task supported.
1136  * In oreder to know the max task enabled refer to
1137  * pdev->params.max_fcoe_task
1138  * @param pdev
1139  *
1140  * @return u32_t
1141  */
1142 u32_t
lm_fc_max_fcoe_task_sup(IN struct _lm_device_t * pdev)1143 lm_fc_max_fcoe_task_sup(
1144     IN struct _lm_device_t          *pdev)
1145 {
1146     u32_t max_fcoe_task = MAX_NUM_FCOE_TASKS_PER_ENGINE;
1147 
1148     /* FCOE supports a maximum of MAX_FCOE_FUNCS_PER_ENGINE per engine.
1149      * Incase of mf / 4-port mode it means we can have more than one fcoe function
1150      * on an engine - in which case we'll need to divide the number of tasks between them.
1151      * However, in single function mode, on a 2-port chip (i.e. one function on the engine)
1152      * the fcoe function will have all the tasks allocated to it
1153      */
1154     if (IS_MULTI_VNIC(pdev) || (CHIP_PORT_MODE(pdev) == LM_CHIP_PORT_MODE_4))
1155     {
1156         max_fcoe_task = max_fcoe_task / MAX_FCOE_FUNCS_PER_ENGINE;
1157     }
1158 
1159     return max_fcoe_task;
1160 }
1161 /**
1162  *
1163  *
1164  * @description
1165  *
1166  * @param pdev
1167  *
1168  * @return STATIC void
1169  */
1170 STATIC void
lm_fc_init_vars(IN struct _lm_device_t * pdev)1171 lm_fc_init_vars(
1172     IN struct _lm_device_t          *pdev)
1173 {
1174 
1175     if CHK_NULL(pdev)
1176     {
1177         return ;
1178     }
1179 
1180     mm_mem_zero(&pdev->fcoe_info, sizeof(lm_fcoe_info_t));
1181 }
1182 /**
1183  *
1184  *
1185  * @description
1186  *
1187  * @param pdev
1188  *
1189  * @return lm_status_t
1190  */
1191 lm_status_t
lm_fc_alloc_resc(IN struct _lm_device_t * pdev)1192 lm_fc_alloc_resc(
1193     IN struct _lm_device_t          *pdev)
1194 {
1195     lm_status_t lm_status = LM_STATUS_SUCCESS;
1196     if CHK_NULL(pdev)
1197     {
1198         return LM_STATUS_INVALID_PARAMETER;
1199     }
1200     lm_fc_init_vars(pdev);
1201     /* cid recycled cb registration */
1202     lm_cid_recycled_cb_register(pdev, FCOE_CONNECTION_TYPE, lm_fc_recycle_cid_cb);
1203 
1204     /* Sq-completion cb registration (sq that get completed internally in driver */
1205     lm_sq_comp_cb_register(pdev, FCOE_CONNECTION_TYPE, lm_fc_comp_cb);
1206     /* Get physical memory for RAMROD commands */
1207     lm_status = lm_fc_get_ramrod_phys_mem(pdev);
1208 
1209     if (lm_status != LM_STATUS_SUCCESS)
1210     {
1211         return lm_status;
1212     }
1213     return LM_STATUS_SUCCESS;
1214 } /* lm_fc_alloc_resc */
1215 
1216 
1217 
1218 
lm_sc_complete_l4_ofld_request(lm_device_t * pdev,struct iscsi_kcqe * kcqe)1219 lm_status_t lm_sc_complete_l4_ofld_request(lm_device_t *pdev, struct iscsi_kcqe *kcqe)
1220 {
1221     u32_t comp_status = 0;
1222     lm_tcp_state_t *tcp;
1223     u32_t cid;
1224 
1225     if (CHK_NULL(pdev) || CHK_NULL(kcqe))
1226     {
1227         return LM_STATUS_INVALID_PARAMETER;
1228     }
1229 
1230     cid = SW_CID(kcqe->iscsi_conn_context_id);
1231     tcp = lm_cid_cookie(pdev, TOE_CONNECTION_TYPE, cid);
1232     DbgBreakIf(!tcp);
1233 
1234     if (kcqe->completion_status & ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE)
1235     {
1236         /* currently there is no specific completion status handling, only success / fail */
1237         /* but originally the flags are those of toe_initiate_offload_ramrod_data */
1238         comp_status = 1;
1239     }
1240 
1241     /* toe lock is taken inside */
1242     lm_tcp_comp_initiate_offload_request(pdev, tcp, comp_status);
1243 
1244     return LM_STATUS_SUCCESS;
1245 }
1246 
lm_sc_complete_l4_upload_request(lm_device_t * pdev,u8_t op_code,u32_t cid)1247 lm_status_t lm_sc_complete_l4_upload_request(lm_device_t *pdev, u8_t op_code, u32_t cid)
1248 {
1249     lm_status_t      lm_status = LM_STATUS_SUCCESS;
1250     lm_tcp_state_t * tcp       = NULL;
1251 
1252     tcp = lm_cid_cookie(pdev, TOE_CONNECTION_TYPE, cid);
1253     if (NULL == tcp)
1254     {
1255         return LM_STATUS_FAILURE;
1256     }
1257 
1258     switch (op_code)
1259     {
1260     case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
1261         if (mm_sc_is_omgr_enabled(pdev))
1262         {
1263             lm_empty_ramrod_eth(pdev, OOO_CID(pdev), cid, NULL, 0 /*d/c*/);
1264         }
1265         else
1266         {
1267             lm_tcp_searcher_ramrod_complete(pdev, tcp);
1268         }
1269         break;
1270     case RAMROD_CMD_ID_ETH_EMPTY:
1271         lm_tcp_searcher_ramrod_complete(pdev, tcp);
1272         break;
1273     case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD:
1274         lm_tcp_terminate_ramrod_complete(pdev, tcp);
1275         break;
1276     case L5CM_RAMROD_CMD_ID_QUERY:
1277         lm_tcp_query_ramrod_complete(pdev, tcp);
1278         break;
1279     default:
1280         DbgMessage(pdev, WARN, "lm_sc_complete_l4_upload_request: Invalid op_code 0x%x.\n", op_code);
1281         return LM_STATUS_INVALID_PARAMETER;
1282     }
1283 
1284     return LM_STATUS_SUCCESS;
1285 }
1286 
1287 
1288 
lm_sc_complete_slow_path_request(lm_device_t * pdev,struct iscsi_kcqe * kcqe)1289 lm_status_t lm_sc_complete_slow_path_request(lm_device_t *pdev, struct iscsi_kcqe *kcqe)
1290 {
1291     lm_status_t lm_status = LM_STATUS_FAILURE;
1292     u8_t        op_code   = 0;
1293 
1294     if (CHK_NULL(pdev) || CHK_NULL(kcqe))
1295     {
1296         return LM_STATUS_INVALID_PARAMETER;
1297     }
1298 
1299     op_code = kcqe->op_code; /* Store the opcode, the function below may modify it (internal searcher), need to keep for sq_complete later on  */
1300 
1301     switch (kcqe->op_code)
1302     {
1303 /*  case ISCSI_KCQE_OPCODE_INIT:
1304         lm_status = mm_sc_complete_init_request(pdev, kcqe);
1305         if (lm_status != LM_STATUS_SUCCESS)
1306         {
1307             DbgMessage(pdev, WARN, "lm_sc_complete_slow_path_request: lm_sc_complete_init_request failed.\n");
1308         }
1309         break;
1310 */    case L5CM_RAMROD_CMD_ID_ADD_NEW_CONNECTION:
1311         lm_status = lm_sc_complete_l4_ofld_request(pdev, kcqe);
1312         if (lm_status != LM_STATUS_SUCCESS)
1313         {
1314             DbgMessage(pdev, WARN, "lm_sc_complete_slow_path_request: lm_sc_complete_l4_ofld_request failed.\n");
1315         }
1316         break;
1317     case ISCSI_KCQE_OPCODE_UPDATE_CONN:
1318         lm_status = mm_sc_complete_update_request(pdev, kcqe);
1319         if (lm_status != LM_STATUS_SUCCESS)
1320         {
1321             DbgMessage(pdev, WARN, "lm_sc_complete_slow_path_request: lm_sc_complete_update_request failed.\n");
1322         }
1323         break;
1324     case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
1325     case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD:
1326     case L5CM_RAMROD_CMD_ID_QUERY:
1327         lm_status = lm_sc_complete_l4_upload_request(pdev, kcqe->op_code, SW_CID(kcqe->iscsi_conn_context_id));
1328         break;
1329     default:
1330         DbgMessage(pdev, WARN, "lm_sc_complete_slow_path_request: Invalid op_code 0x%x.\n", kcqe->op_code);
1331     }
1332 
1333     lm_sq_complete(pdev, CMD_PRIORITY_NORMAL, op_code,
1334                    ISCSI_CONNECTION_TYPE, SW_CID(kcqe->iscsi_conn_context_id));
1335 
1336     return lm_status;
1337 }
1338 
1339 
1340 /* Handle FC related ramrod completions */
1341 lm_status_t
lm_fc_complete_slow_path_request(IN struct _lm_device_t * pdev,IN struct fcoe_kcqe * kcqe)1342 lm_fc_complete_slow_path_request(
1343     IN struct _lm_device_t          *pdev,
1344     IN struct fcoe_kcqe             *kcqe)
1345 {
1346     lm_status_t                     lm_status    = LM_STATUS_FAILURE;
1347     lm_fcoe_state_t                 *fcoe        = NULL;
1348     const u8_t                      priority     = CMD_PRIORITY_NORMAL;
1349     const enum connection_type      con_type     = FCOE_CONNECTION_TYPE;
1350     u32_t                           cid          = 0;
1351     u32_t                           sw_cid       = 0;
1352     u8_t                            fcoe_commnad = 0;
1353     u8_t                            b_valid      = TRUE;
1354 
1355     if (CHK_NULL(pdev) || CHK_NULL(kcqe))
1356     {
1357         return LM_STATUS_INVALID_PARAMETER;
1358     }
1359 
1360     switch (kcqe->op_code)
1361     {
1362         case FCOE_KCQE_OPCODE_INIT_FUNC:
1363         {
1364             fcoe_commnad = FCOE_RAMROD_CMD_ID_INIT_FUNC;
1365             lm_status    = mm_fc_complete_init_request(pdev, kcqe);
1366             cid          = LM_CLI_CID(pdev, LM_CLI_IDX_FCOE);
1367             break;
1368         }
1369         case FCOE_KCQE_OPCODE_OFFLOAD_CONN:
1370         {
1371             fcoe_commnad = FCOE_RAMROD_CMD_ID_OFFLOAD_CONN;
1372 
1373             DbgBreakIf(0 != mm_le32_to_cpu(kcqe->completion_status)); /* offload should never fail */
1374 
1375             sw_cid = SW_CID(mm_le32_to_cpu(kcqe->fcoe_conn_context_id));
1376             fcoe   = lm_cid_cookie(pdev, con_type, sw_cid);
1377 
1378             if(!fcoe)
1379             {
1380                 lm_status = LM_STATUS_RESOURCE;
1381                 DbgBreakIf(!fcoe);
1382                 break;
1383             }
1384 
1385             cid       = fcoe->cid;
1386             lm_status = mm_fc_complete_ofld_request(pdev, fcoe, kcqe);
1387             break;
1388         }
1389         case FCOE_KCQE_OPCODE_ENABLE_CONN:
1390         {
1391             fcoe_commnad = FCOE_RAMROD_CMD_ID_ENABLE_CONN;
1392 
1393             DbgBreakIf(0 != mm_le32_to_cpu(kcqe->completion_status)); /* enable should never fail */
1394 
1395             sw_cid = SW_CID(mm_le32_to_cpu(kcqe->fcoe_conn_context_id));
1396             fcoe   = lm_cid_cookie(pdev, con_type, sw_cid);
1397 
1398             if(!fcoe)
1399             {
1400                 lm_status = LM_STATUS_RESOURCE;
1401                 DbgBreakIf(!fcoe);
1402                 break;
1403             }
1404             cid    = fcoe->cid;
1405 
1406             lm_status = mm_fc_complete_enable_request(pdev, fcoe, kcqe);
1407             break;
1408         }
1409         case FCOE_KCQE_OPCODE_DISABLE_CONN:
1410         {
1411             fcoe_commnad = FCOE_RAMROD_CMD_ID_DISABLE_CONN;
1412 
1413             /* Disable is complete, now we need to send the terminate ramrod */
1414             DbgBreakIf(0 != mm_le32_to_cpu(kcqe->completion_status)); /* disable should never fail */
1415 
1416             sw_cid = SW_CID(mm_le32_to_cpu(kcqe->fcoe_conn_context_id));
1417             fcoe   = lm_cid_cookie(pdev, con_type, sw_cid);
1418 
1419             if(!fcoe)
1420             {
1421                 lm_status = LM_STATUS_RESOURCE;
1422                 DbgBreakIf(!fcoe);
1423                 break;
1424             }
1425 
1426             cid          = fcoe->cid;
1427             lm_status    = mm_fc_complete_disable_request(pdev, fcoe, kcqe);
1428             break;
1429         }
1430         case FCOE_KCQE_OPCODE_DESTROY_FUNC:
1431         {
1432             fcoe_commnad = FCOE_RAMROD_CMD_ID_DESTROY_FUNC;
1433             lm_status    = mm_fc_complete_destroy_request(pdev, kcqe);
1434             cid          = LM_CLI_CID(pdev, LM_CLI_IDX_FCOE);
1435             break;
1436         }
1437         case FCOE_KCQE_OPCODE_STAT_FUNC:
1438         {
1439             fcoe_commnad = FCOE_RAMROD_CMD_ID_STAT_FUNC;
1440             lm_status    = mm_fc_complete_stat_request(pdev, kcqe);
1441             cid          = LM_CLI_CID(pdev, LM_CLI_IDX_FCOE);
1442             break;
1443         }
1444         case FCOE_RAMROD_CMD_ID_TERMINATE_CONN: /* Internal VBD not passed up... */
1445         {
1446             fcoe_commnad = FCOE_RAMROD_CMD_ID_TERMINATE_CONN;
1447 
1448             /* Terminate is complete, now we need to send the CFC delete ramrod */
1449             DbgBreakIf(0 != mm_le32_to_cpu(kcqe->completion_status)); /* terminate should never fail */
1450 
1451             sw_cid = SW_CID(mm_le32_to_cpu(kcqe->fcoe_conn_context_id));
1452 
1453             fcoe = lm_cid_cookie(pdev, con_type, sw_cid);
1454 
1455             if(!fcoe)
1456             {
1457                 lm_status = LM_STATUS_RESOURCE;
1458                 DbgBreakIf(!fcoe);
1459                 break;
1460             }
1461 
1462             cid = fcoe->cid;
1463 
1464             lm_status = mm_fc_complete_terminate_request(pdev, fcoe, kcqe);
1465             break;
1466         }
1467         default:
1468         {
1469             DbgMessage(pdev, WARN, "lm_fc_complete_slow_path_request: Invalid op_code 0x%x.\n", kcqe->op_code);
1470             b_valid = FALSE;
1471             break;
1472         }
1473     }
1474 
1475     if( b_valid )
1476     {
1477         lm_sq_complete(pdev, priority, fcoe_commnad, con_type, cid);
1478     }
1479 
1480     return lm_status;
1481 }
1482 
lm_sc_is_eq_completion(lm_device_t * pdev,u8_t sb_idx)1483 u8_t lm_sc_is_eq_completion(lm_device_t *pdev, u8_t sb_idx)
1484 {
1485     u8_t result = FALSE;
1486     lm_eq_chain_t *eq = NULL;
1487 
1488     DbgBreakIf(!(pdev && ARRSIZE(pdev->iscsi_info.run_time.eq_chain) > sb_idx));
1489 
1490     eq = &LM_SC_EQ(pdev, sb_idx);
1491 
1492     if (eq->hw_con_idx_ptr &&
1493         mm_le16_to_cpu(*eq->hw_con_idx_ptr) != lm_bd_chain_cons_idx(&eq->bd_chain) )
1494     {
1495         result = TRUE;
1496     }
1497     DbgMessage(pdev, INFORMl5, "lm_sc_is_rx_completion(): result is:%s\n", result? "TRUE" : "FALSE");
1498 
1499     return result;
1500 }
1501 
1502 
1503 
1504 u8_t
lm_fc_is_eq_completion(lm_device_t * pdev,u8_t sb_idx)1505 lm_fc_is_eq_completion(lm_device_t *pdev, u8_t sb_idx)
1506 {
1507     u8_t result = FALSE;
1508     lm_eq_chain_t *eq = NULL;
1509 
1510     DbgBreakIf(!(pdev && ARRSIZE(pdev->fcoe_info.run_time.eq_chain) > sb_idx));
1511 
1512     eq = &LM_FC_EQ(pdev, sb_idx);
1513 
1514     if (eq->hw_con_idx_ptr &&
1515         mm_le16_to_cpu(*eq->hw_con_idx_ptr) != lm_bd_chain_cons_idx(&eq->bd_chain))
1516     {
1517         result = TRUE;
1518     }
1519 
1520     DbgMessage(pdev, INFORMl5, "lm_fc_is_rx_completion(): result is:%s\n", result? "TRUE" : "FALSE");
1521 
1522     return result;
1523 }
1524 
1525 
1526 
1527 lm_status_t
lm_sc_handle_tcp_event(IN lm_device_t * pdev,IN u32_t cid,IN u32_t op_code)1528 lm_sc_handle_tcp_event(
1529     IN    lm_device_t *pdev,
1530     IN    u32_t cid,
1531     IN    u32_t op_code
1532     )
1533 {
1534     lm_tcp_state_t *tcp = NULL;
1535 
1536     if CHK_NULL(pdev)
1537     {
1538         return LM_STATUS_INVALID_PARAMETER;
1539     }
1540 
1541     tcp = lm_cid_cookie(pdev, TOE_CONNECTION_TYPE, cid);
1542     if CHK_NULL(tcp)
1543     {
1544         return LM_STATUS_INVALID_PARAMETER;
1545     }
1546 
1547     switch (op_code)
1548     {
1549     case ISCSI_KCQE_OPCODE_TCP_FIN:
1550         tcp->tcp_state_calc.fin_reception_time = mm_get_current_time(pdev);
1551         break;
1552     case ISCSI_KCQE_OPCODE_TCP_RESET:
1553         tcp->tcp_state_calc.con_rst_flag = TRUE;
1554         break;
1555     default:
1556         DbgMessage(pdev, WARN, "lm_sc_handle_tcp_event: Invalid op_code 0x%x\n", op_code);
1557         return LM_STATUS_INVALID_PARAMETER;
1558     }
1559 
1560     return LM_STATUS_SUCCESS;
1561 }
1562 
1563 lm_status_t
lm_sc_comp_l5_request(IN lm_device_t * pdev,IN lm_eq_chain_t * eq_chain,INOUT struct iscsi_kcqe ** l5_kcqe_start,INOUT u16_t * l5_kcqe_num)1564 lm_sc_comp_l5_request(
1565     IN    lm_device_t *pdev,
1566     IN    lm_eq_chain_t *eq_chain,
1567     INOUT struct iscsi_kcqe **l5_kcqe_start,
1568     INOUT u16_t *l5_kcqe_num)
1569 {
1570     lm_status_t lm_status;
1571 
1572     if (CHK_NULL(pdev) || CHK_NULL(eq_chain) || CHK_NULL(l5_kcqe_start) || CHK_NULL(l5_kcqe_num))
1573     {
1574         return LM_STATUS_INVALID_PARAMETER;
1575     }
1576 
1577     lm_status = mm_sc_comp_l5_request(pdev, *l5_kcqe_start, *l5_kcqe_num);
1578     if (lm_status != LM_STATUS_SUCCESS)
1579     {
1580         DbgMessage(pdev, WARN, "lm_sc_service_eq_intr: mm_sc_comp_l5_request failed.\n");
1581     }
1582 
1583     lm_bd_chain_bds_produced(&eq_chain->bd_chain, *l5_kcqe_num);
1584     *l5_kcqe_num = 0;
1585     *l5_kcqe_start = NULL;
1586 
1587     return lm_status;
1588 }
1589 
1590 
1591 
1592 lm_status_t
lm_fc_comp_request(IN lm_device_t * pdev,IN lm_eq_chain_t * eq_chain,INOUT struct fcoe_kcqe ** fcoe_kcqe_start,INOUT u16_t * fcoe_kcqe_num)1593 lm_fc_comp_request(
1594     IN    lm_device_t       *pdev,
1595     IN    lm_eq_chain_t     *eq_chain,
1596     INOUT struct fcoe_kcqe  **fcoe_kcqe_start,
1597     INOUT u16_t             *fcoe_kcqe_num)
1598 {
1599     lm_status_t lm_status;
1600 
1601     if (CHK_NULL(pdev) || CHK_NULL(eq_chain) || CHK_NULL(fcoe_kcqe_start) || CHK_NULL(fcoe_kcqe_num))
1602     {
1603         return LM_STATUS_INVALID_PARAMETER;
1604     }
1605 
1606     lm_status = mm_fc_comp_request(pdev, *fcoe_kcqe_start, *fcoe_kcqe_num);
1607     if (lm_status != LM_STATUS_SUCCESS)
1608     {
1609         DbgMessage(pdev, WARN, "lm_fc_service_eq_intr: lm_fc_comp_request failed.\n");
1610     }
1611 
1612     lm_bd_chain_bds_produced(&eq_chain->bd_chain, *fcoe_kcqe_num);
1613     *fcoe_kcqe_num = 0;
1614     *fcoe_kcqe_start = NULL;
1615 
1616     return lm_status;
1617 }
1618 
1619 
1620 
1621 
1622 void
lm_sc_service_eq_intr(IN struct _lm_device_t * pdev,IN u8_t sb_idx)1623 lm_sc_service_eq_intr(
1624     IN struct _lm_device_t          *pdev,
1625     IN u8_t                         sb_idx)
1626 {
1627     lm_status_t         lm_status;
1628     lm_eq_chain_t *eq_chain       = NULL;
1629     struct iscsi_kcqe   *kcqe           = NULL;
1630     struct iscsi_kcqe   *l5_kcqe_start  = NULL;
1631     u16_t               l5_kcqe_num     = 0;
1632     u16_t               eq_new_idx      = 0;
1633     u16_t               eq_old_idx      = 0;
1634     u32_t               eq_num          = 0;
1635     u32_t               cid             = 0;
1636 
1637 
1638     if (CHK_NULL(pdev) || (ARRSIZE(pdev->iscsi_info.run_time.eq_chain) <= sb_idx))
1639     {
1640         DbgBreakIf(ARRSIZE(pdev->iscsi_info.run_time.eq_chain) <= sb_idx);
1641         DbgBreakIf(!pdev);
1642         return;
1643     }
1644 
1645     eq_chain = &LM_SC_EQ(pdev, sb_idx);
1646 
1647     eq_new_idx = mm_le16_to_cpu(*(eq_chain->hw_con_idx_ptr));
1648     eq_old_idx = lm_bd_chain_cons_idx(&eq_chain->bd_chain);
1649     DbgBreakIf(S16_SUB(eq_new_idx, eq_old_idx) < 0);
1650 
1651     while (eq_old_idx != eq_new_idx)
1652     {
1653         DbgBreakIf(S16_SUB(eq_new_idx, eq_old_idx) <= 0);
1654 
1655         /* get next consumed kcqe */
1656         kcqe = (struct iscsi_kcqe *)lm_bd_chain_consume_bd_contiguous(&eq_chain->bd_chain);
1657 
1658         /* we got to the end of the page, if we have some kcqe that we need to indicate, */
1659         /* do it now, cause we can't assume that the memorey of the pages is contiguous */
1660         if (kcqe == NULL)
1661         {
1662             if (l5_kcqe_num != 0)
1663             {
1664                 lm_status = lm_sc_comp_l5_request(pdev, eq_chain, &l5_kcqe_start, &l5_kcqe_num);
1665             }
1666 
1667             /* check cons index again */
1668             eq_old_idx = lm_bd_chain_cons_idx(&eq_chain->bd_chain);
1669 
1670             if (eq_old_idx != eq_new_idx)
1671             {
1672                 /* get next consumed cqe */
1673                 kcqe = (struct iscsi_kcqe *)lm_bd_chain_consume_bd_contiguous(&eq_chain->bd_chain);
1674 
1675                 if (CHK_NULL(kcqe))
1676                 {
1677                     /* shouldn't have happened, got second null from the bd */
1678                     DbgBreakIf(!kcqe);
1679                     break;
1680                 }
1681             }
1682             else
1683             {
1684                 /* the new kcqe was the last one we got, break */
1685                 break;
1686             }
1687         }
1688 
1689         switch (kcqe->op_code)
1690         {
1691         case ISCSI_RAMROD_CMD_ID_INIT:
1692         case L5CM_RAMROD_CMD_ID_ADD_NEW_CONNECTION:
1693         case ISCSI_RAMROD_CMD_ID_UPDATE_CONN:
1694         case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
1695         case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD:
1696         case L5CM_RAMROD_CMD_ID_QUERY:
1697 
1698             /* first, complete fast path and error indication, if any */
1699             if (l5_kcqe_num != 0)
1700             {
1701                 lm_status = lm_sc_comp_l5_request(pdev, eq_chain, &l5_kcqe_start, &l5_kcqe_num);
1702             }
1703 
1704             lm_status = lm_sc_complete_slow_path_request(pdev, kcqe);
1705             if (lm_status != LM_STATUS_SUCCESS)
1706             {
1707                 DbgMessage(pdev, WARN, "lm_sc_service_eq_intr: mm_sc_comp_l5_request failed.\n");
1708             }
1709 
1710             lm_bd_chain_bds_produced(&eq_chain->bd_chain, 1);
1711             break;
1712 
1713         case ISCSI_KCQE_OPCODE_TCP_FIN:
1714         case ISCSI_KCQE_OPCODE_TCP_RESET:
1715             cid = SW_CID(kcqe->iscsi_conn_context_id);
1716 
1717             lm_sc_handle_tcp_event(pdev, cid, kcqe->op_code);
1718             /* FALLTHROUGH */
1719         default:
1720             if (l5_kcqe_start == NULL)
1721             {
1722                 l5_kcqe_start = kcqe;
1723             }
1724 
1725             l5_kcqe_num++;
1726             break;
1727         }
1728 
1729         eq_old_idx = lm_bd_chain_cons_idx(&eq_chain->bd_chain);
1730     }
1731 
1732     /* complete left fast path events */
1733     if (l5_kcqe_num != 0)
1734     {
1735         lm_status = lm_sc_comp_l5_request(pdev, eq_chain, &l5_kcqe_start, &l5_kcqe_num);
1736     }
1737 
1738     /* update EQ prod in RAM */
1739     eq_num = sb_idx - pdev->iscsi_info.run_time.l5_eq_base_chain_idx;
1740     LM_INTMEM_WRITE16(pdev, CSTORM_ISCSI_EQ_PROD_OFFSET(FUNC_ID(pdev), eq_num), lm_bd_chain_prod_idx(&eq_chain->bd_chain), BAR_CSTRORM_INTMEM);
1741 }
1742 
1743 
1744 
1745 void
lm_fc_service_eq_intr(lm_device_t * pdev,u8_t sb_idx)1746 lm_fc_service_eq_intr(lm_device_t *pdev, u8_t sb_idx)
1747 {
1748     lm_status_t         lm_status;
1749     lm_eq_chain_t       *eq_chain       = NULL;
1750     struct fcoe_kcqe    *kcqe           = NULL;
1751     struct fcoe_kcqe    *fcoe_kcqe_start= NULL;
1752     u16_t               fcoe_kcqe_num   = 0;
1753     u16_t               eq_new_idx      = 0;
1754     u16_t               eq_old_idx      = 0;
1755 
1756     if (CHK_NULL(pdev) || (ARRSIZE(pdev->fcoe_info.run_time.eq_chain) <= sb_idx))
1757     {
1758         DbgBreakIf(ARRSIZE(pdev->fcoe_info.run_time.eq_chain) <= sb_idx);
1759         DbgBreakIf(!pdev);
1760         return;
1761     }
1762 
1763     eq_chain = &LM_FC_EQ(pdev, sb_idx);
1764 
1765     eq_new_idx = mm_le16_to_cpu(*(eq_chain->hw_con_idx_ptr));
1766     eq_old_idx = lm_bd_chain_cons_idx(&eq_chain->bd_chain);
1767     DbgBreakIf(S16_SUB(eq_new_idx, eq_old_idx) < 0);
1768 
1769     while (eq_old_idx != eq_new_idx)
1770     {
1771         DbgBreakIf(S16_SUB(eq_new_idx, eq_old_idx) <= 0);
1772 
1773         /* get next consumed kcqe */
1774         kcqe = (struct fcoe_kcqe *)lm_bd_chain_consume_bd_contiguous(&eq_chain->bd_chain);
1775 
1776         /* we got to the end of the page, if we have some kcqe that we need to indicate, */
1777         /* do it now, cause we can't assume that the memorey of the pages is contiguous */
1778         if (kcqe == NULL)
1779         {
1780             if (fcoe_kcqe_num != 0)
1781             {
1782                 lm_status = lm_fc_comp_request(pdev,
1783                                                eq_chain,
1784                                                &fcoe_kcqe_start,
1785                                                &fcoe_kcqe_num);
1786             }
1787 
1788             /* check cons index again */
1789             eq_old_idx = lm_bd_chain_cons_idx(&eq_chain->bd_chain);
1790 
1791             if (eq_old_idx != eq_new_idx)
1792             {
1793                 /* get next consumed cqe */
1794                 kcqe = (struct fcoe_kcqe *)lm_bd_chain_consume_bd(&eq_chain->bd_chain);
1795 
1796                 if (CHK_NULL(kcqe))
1797                 {
1798                     /* shouldn't have happened, got second null from the bd */
1799                     DbgBreakIf(!kcqe);
1800                     break;
1801                 }
1802             }
1803             else
1804             {
1805                 /* the new kcqe was the last one we got, break */
1806                 break;
1807             }
1808         }
1809 
1810         /* first, complete fast path completion notification and error indication, if any */
1811         if (fcoe_kcqe_num != 0)
1812         {
1813             lm_status = lm_fc_comp_request(pdev,
1814                                            eq_chain,
1815                                            &fcoe_kcqe_start,
1816                                            &fcoe_kcqe_num);
1817         }
1818 
1819         switch (kcqe->op_code)
1820         {
1821             case FCOE_KCQE_OPCODE_INIT_FUNC:
1822             case FCOE_KCQE_OPCODE_OFFLOAD_CONN:
1823             case FCOE_KCQE_OPCODE_ENABLE_CONN:
1824             case FCOE_KCQE_OPCODE_DISABLE_CONN:
1825             case FCOE_KCQE_OPCODE_DESTROY_FUNC:
1826             case FCOE_KCQE_OPCODE_STAT_FUNC:
1827             case FCOE_RAMROD_CMD_ID_TERMINATE_CONN:
1828             {
1829                 lm_status = lm_fc_complete_slow_path_request(pdev, kcqe);
1830                 if (lm_status != LM_STATUS_SUCCESS)
1831                 {
1832                     DbgMessage(pdev, WARN, "lm_fc_service_eq_intr: lm_fc_complete_slow_path_request failed.\n");
1833                 }
1834 
1835                 lm_bd_chain_bds_produced(&eq_chain->bd_chain, 1);
1836                 break;
1837             }
1838 
1839             default:
1840             {
1841                 if (fcoe_kcqe_start == NULL)
1842                 {
1843                     fcoe_kcqe_start = kcqe;
1844                 }
1845 
1846                 fcoe_kcqe_num++;
1847                 break;
1848             }
1849         }
1850 
1851         eq_old_idx = lm_bd_chain_cons_idx(&eq_chain->bd_chain);
1852     }
1853 
1854     /* complete left fast path events */
1855     if (fcoe_kcqe_num != 0)
1856     {
1857         lm_status = lm_fc_comp_request(pdev, eq_chain, &fcoe_kcqe_start, &fcoe_kcqe_num);
1858     }
1859 
1860     /* update EQ prod in RAM */
1861     LM_INTMEM_WRITE16(pdev, USTORM_FCOE_EQ_PROD_OFFSET(FUNC_ID(pdev)), lm_bd_chain_prod_idx(&eq_chain->bd_chain), BAR_USTRORM_INTMEM);
1862 }
1863 
1864 
1865 lm_status_t
lm_sc_alloc_con_phys_mem(IN struct _lm_device_t * pdev,IN lm_iscsi_state_t * iscsi)1866 lm_sc_alloc_con_phys_mem(
1867     IN struct _lm_device_t          *pdev,
1868     IN lm_iscsi_state_t             *iscsi)
1869 {
1870     lm_status_t lm_status  = LM_STATUS_SUCCESS;
1871     u32_t       mem_size   = sizeof(*iscsi->sp_req_data.virt_addr);
1872     u8_t        mm_cli_idx = LM_RESOURCE_ISCSI;
1873 
1874 
1875     /* Allocate slopwath request data */
1876     iscsi->sp_req_data.virt_addr = mm_rt_alloc_phys_mem(pdev,
1877                                                         mem_size,
1878                                                         &iscsi->sp_req_data.phys_addr,
1879                                                         0,
1880                                                         mm_cli_idx);
1881     if CHK_NULL(iscsi->sp_req_data.virt_addr)
1882     {   /* can't allocate task array */
1883         return LM_STATUS_RESOURCE;
1884     }
1885 
1886     mm_memset(iscsi->sp_req_data.virt_addr, 0, mem_size);
1887 
1888     /* Allocate task array */
1889     iscsi->task_array.base_size = pdev->iscsi_info.run_time.num_of_tasks * sizeof(struct iscsi_task_context_entry);
1890     iscsi->task_array.base_virt = mm_rt_alloc_phys_mem(pdev,
1891                                                 iscsi->task_array.base_size,
1892                                                 &iscsi->task_array.base_phy,
1893                                                 0,
1894                                                 mm_cli_idx);
1895     if CHK_NULL(iscsi->task_array.base_virt)
1896     {   /* can't allocate task array */
1897         return LM_STATUS_RESOURCE;
1898     }
1899 
1900     mm_memset(iscsi->task_array.base_virt, 0, iscsi->task_array.base_size);
1901 
1902     lm_status = lm_create_pbl(pdev,
1903                               iscsi->task_array.base_virt,
1904                               &iscsi->task_array.base_phy,
1905                               iscsi->task_array.base_size,
1906                               &iscsi->task_array.pbl_phys_table_virt,
1907                               &iscsi->task_array.pbl_phys_table_phys,
1908                               &iscsi->task_array.pbl_virt_table,
1909                               &iscsi->task_array.pbl_entries,
1910                               &iscsi->task_array.pbl_size,
1911                               TRUE,
1912                               LM_RESOURCE_ISCSI);
1913     if (lm_status != LM_STATUS_SUCCESS)
1914     {
1915         return lm_status;
1916     }
1917 
1918     /* Allocate R2TQ */
1919     iscsi->r2tq.base_size = pdev->iscsi_info.run_time.num_of_tasks * ISCSI_MAX_NUM_OF_PENDING_R2TS * ISCSI_R2TQE_SIZE;
1920     iscsi->r2tq.base_virt = mm_rt_alloc_phys_mem(pdev,
1921                                                 iscsi->r2tq.base_size,
1922                                                 &iscsi->r2tq.base_phy,
1923                                                 0,
1924                                                 mm_cli_idx);
1925     if CHK_NULL(iscsi->r2tq.base_virt)
1926     {   /* can't allocate R2TQ */
1927         return LM_STATUS_RESOURCE;
1928     }
1929 
1930     mm_memset(iscsi->r2tq.base_virt, 0, iscsi->r2tq.base_size);
1931 
1932     lm_status = lm_create_pbl(pdev,
1933                               iscsi->r2tq.base_virt,
1934                               &iscsi->r2tq.base_phy,
1935                               iscsi->r2tq.base_size,
1936                               &iscsi->r2tq.pbl_phys_table_virt,
1937                               &iscsi->r2tq.pbl_phys_table_phys,
1938                               &iscsi->r2tq.pbl_virt_table,
1939                               &iscsi->r2tq.pbl_entries,
1940                               &iscsi->r2tq.pbl_size,
1941                               TRUE,
1942                               LM_RESOURCE_ISCSI);
1943     if (lm_status != LM_STATUS_SUCCESS)
1944     {
1945         return lm_status;
1946     }
1947 
1948     /* Allocate HQ */
1949     iscsi->hq.base_size = pdev->iscsi_info.run_time.hq_size * sizeof(struct iscsi_hq_bd);
1950     iscsi->hq.base_virt = mm_rt_alloc_phys_mem(pdev,
1951                                                 iscsi->hq.base_size,
1952                                                 &iscsi->hq.base_phy,
1953                                                 0,
1954                                                 mm_cli_idx);
1955     if CHK_NULL(iscsi->hq.base_virt)
1956     {   /* can't allocate HQ */
1957 
1958         return LM_STATUS_RESOURCE;
1959     }
1960 
1961     mm_memset(iscsi->hq.base_virt, 0, iscsi->hq.base_size);
1962 
1963     lm_status = lm_create_pbl(pdev,
1964                               iscsi->hq.base_virt,
1965                               &iscsi->hq.base_phy,
1966                               iscsi->hq.base_size,
1967                               &iscsi->hq.pbl_phys_table_virt,
1968                               &iscsi->hq.pbl_phys_table_phys,
1969                               &iscsi->hq.pbl_virt_table,
1970                               &iscsi->hq.pbl_entries,
1971                               &iscsi->hq.pbl_size,
1972                               TRUE,
1973                               LM_RESOURCE_ISCSI);
1974     if (lm_status != LM_STATUS_SUCCESS)
1975     {
1976         return lm_status;
1977     }
1978 
1979     return lm_status;
1980 
1981 }
1982 /*******************************************************************************
1983  * Description:
1984  *
1985  * Return:
1986  ******************************************************************************/
1987 lm_status_t
lm_sc_alloc_con_resc(IN struct _lm_device_t * pdev,IN lm_iscsi_state_t * iscsi,IN struct iscsi_kwqe_conn_offload1 * req1,IN struct iscsi_kwqe_conn_offload2 * req2,IN struct iscsi_kwqe_conn_offload3 * req3)1988 lm_sc_alloc_con_resc(
1989     IN struct _lm_device_t          *pdev,
1990     IN lm_iscsi_state_t             *iscsi,
1991     IN struct iscsi_kwqe_conn_offload1   *req1,
1992     IN struct iscsi_kwqe_conn_offload2   *req2,
1993     IN struct iscsi_kwqe_conn_offload3   *req3
1994     )
1995 {
1996     lm_status_t lm_status;
1997     s32_t cid;
1998 
1999     if (CHK_NULL(pdev) || CHK_NULL(iscsi) || CHK_NULL(req1) || CHK_NULL(req2) || CHK_NULL(req3))
2000     {
2001         return LM_STATUS_INVALID_PARAMETER;
2002     }
2003 
2004     DbgMessage(pdev, INFORM, "### lm_sc_alloc_con_resc\n");
2005 
2006     /* save the miniport's conn id */
2007     iscsi->iscsi_conn_id = req1->iscsi_conn_id;
2008 
2009     /* Boot connections physical resources are allocated during bind, and not during offload... */
2010     if (!iscsi->b_resources_allocated)
2011     {
2012         lm_status = lm_sc_alloc_con_phys_mem(pdev, iscsi);
2013         if (lm_status != LM_STATUS_SUCCESS)
2014         {
2015             lm_sc_free_con_resc(pdev, iscsi);
2016             return lm_status;
2017         }
2018         iscsi->b_resources_allocated = TRUE;
2019     }
2020 
2021 
2022     /* Allocate CID */
2023     lm_status = lm_allocate_cid(pdev, ISCSI_CONNECTION_TYPE, (void *)iscsi, &cid);
2024     if (lm_status == LM_STATUS_PENDING)
2025     {
2026         lm_sp_req_manager_block(pdev, (u32_t)cid);
2027     }
2028     else if (lm_status != LM_STATUS_SUCCESS)
2029     {
2030         /* failed to allocate CID */
2031         lm_sc_free_con_resc(pdev, iscsi);
2032 
2033         return lm_status;
2034     }
2035 
2036     /* save the returned cid */
2037     iscsi->cid = (u32_t)cid;
2038 
2039     /* the allocated slow path request phys data for iscsi will be used in the tcp_state.sp_data, for the query request */
2040     lm_status = lm_sp_req_manager_set_sp_data(pdev, iscsi->cid, iscsi->sp_req_data.virt_addr, iscsi->sp_req_data.phys_addr);
2041     if (lm_status != LM_STATUS_SUCCESS)
2042     {
2043         lm_sc_free_con_resc(pdev, iscsi);
2044 
2045         return lm_status;
2046     }
2047 
2048     if (lm_cid_state(pdev, iscsi->cid) == LM_CID_STATE_PENDING) {
2049         return LM_STATUS_PENDING; /* Too soon to initialize context */
2050     }
2051 
2052     return LM_STATUS_SUCCESS;
2053 } /* lm_sc_alloc_con_resc */
2054 
2055 
lm_sc_free_con_phys_mem(IN struct _lm_device_t * pdev,IN lm_iscsi_state_t * iscsi)2056 void lm_sc_free_con_phys_mem(
2057     IN struct _lm_device_t *pdev,
2058     IN lm_iscsi_state_t *iscsi
2059     )
2060 {
2061     u8_t mm_cli_idx = LM_RESOURCE_ISCSI;
2062 
2063     if (iscsi->sp_req_data.virt_addr)
2064     {
2065         mm_rt_free_phys_mem(pdev, sizeof(*iscsi->sp_req_data.virt_addr), iscsi->sp_req_data.virt_addr, iscsi->sp_req_data.phys_addr, mm_cli_idx);
2066         iscsi->sp_req_data.virt_addr = NULL;
2067     }
2068     if (iscsi->task_array.base_virt) {
2069         mm_rt_free_phys_mem(pdev, iscsi->task_array.base_size, iscsi->task_array.base_virt, iscsi->task_array.base_phy, mm_cli_idx);
2070         iscsi->task_array.base_virt = NULL;
2071     }
2072     if (iscsi->task_array.pbl_phys_table_virt) {
2073         mm_rt_free_phys_mem(pdev, iscsi->task_array.pbl_size, iscsi->task_array.pbl_phys_table_virt, iscsi->task_array.pbl_phys_table_phys, mm_cli_idx);
2074         iscsi->task_array.pbl_phys_table_virt = NULL;
2075     }
2076     if (iscsi->task_array.pbl_virt_table) {
2077         mm_rt_free_mem(pdev, iscsi->task_array.pbl_virt_table, iscsi->task_array.pbl_entries * sizeof(void *), mm_cli_idx);
2078         iscsi->task_array.pbl_virt_table = NULL;
2079     }
2080     if (iscsi->r2tq.base_virt) {
2081         mm_rt_free_phys_mem(pdev, iscsi->r2tq.base_size, iscsi->r2tq.base_virt, iscsi->r2tq.base_phy, mm_cli_idx);
2082         iscsi->r2tq.base_virt = NULL;
2083     }
2084     if (iscsi->r2tq.pbl_phys_table_virt) {
2085         mm_rt_free_phys_mem(pdev, iscsi->r2tq.pbl_size, iscsi->r2tq.pbl_phys_table_virt, iscsi->r2tq.pbl_phys_table_phys, mm_cli_idx);
2086         iscsi->r2tq.pbl_phys_table_virt = NULL;
2087     }
2088     if (iscsi->r2tq.pbl_virt_table) {
2089         mm_rt_free_mem(pdev, iscsi->r2tq.pbl_virt_table, iscsi->r2tq.pbl_entries * sizeof(void *), mm_cli_idx);
2090         iscsi->r2tq.pbl_virt_table = NULL;
2091     }
2092     if (iscsi->hq.base_virt) {
2093         mm_rt_free_phys_mem(pdev, iscsi->hq.base_size, iscsi->hq.base_virt, iscsi->hq.base_phy, mm_cli_idx);
2094         iscsi->hq.base_virt = NULL;
2095     }
2096     if (iscsi->hq.pbl_phys_table_virt) {
2097         mm_rt_free_phys_mem(pdev, iscsi->hq.pbl_size, iscsi->hq.pbl_phys_table_virt, iscsi->hq.pbl_phys_table_phys, mm_cli_idx);
2098         iscsi->hq.pbl_phys_table_virt = NULL;
2099     }
2100     if (iscsi->hq.pbl_virt_table) {
2101         mm_rt_free_mem(pdev, iscsi->hq.pbl_virt_table, iscsi->hq.pbl_entries * sizeof(void *), mm_cli_idx);
2102         iscsi->hq.pbl_virt_table = NULL;
2103     }
2104 
2105 }
2106 /*******************************************************************************
2107  * Description:
2108  *
2109  * Return:
2110  ******************************************************************************/
lm_sc_free_con_resc(IN struct _lm_device_t * pdev,IN lm_iscsi_state_t * iscsi)2111 lm_status_t lm_sc_free_con_resc(
2112     IN struct _lm_device_t *pdev,
2113     IN lm_iscsi_state_t *iscsi
2114     )
2115 {
2116     u8_t notify_fw = 1;
2117 
2118     if (CHK_NULL(pdev) || CHK_NULL(iscsi))
2119     {
2120         return LM_STATUS_INVALID_PARAMETER;
2121     }
2122 
2123     if (iscsi->cid != 0) {
2124         if (iscsi->hdr.status == STATE_STATUS_INIT_OFFLOAD_ERR) {
2125             notify_fw = 0;
2126         }
2127         lm_free_cid_resc(pdev, ISCSI_CONNECTION_TYPE, iscsi->cid, notify_fw);
2128         iscsi->cid = 0;
2129     }
2130 
2131     if (!iscsi->b_keep_resources)
2132     {
2133         lm_sc_free_con_phys_mem(pdev, iscsi);
2134     }
2135 
2136     return LM_STATUS_SUCCESS;
2137 }
2138 
2139 
2140 /* Free the ramrod memory and the CID */
2141 lm_status_t
lm_fc_free_con_resc(IN struct _lm_device_t * pdev,IN lm_fcoe_state_t * fcoe)2142 lm_fc_free_con_resc(
2143     IN struct _lm_device_t          *pdev,
2144     IN lm_fcoe_state_t              *fcoe)
2145 {
2146     u8_t                            notify_fw = 1;
2147 
2148     if (CHK_NULL(pdev) || CHK_NULL(fcoe))
2149     {
2150         return LM_STATUS_INVALID_PARAMETER;
2151     }
2152 
2153     if (fcoe->cid != 0)
2154     {
2155         if (fcoe->hdr.status == STATE_STATUS_INIT_OFFLOAD_ERR)
2156         {
2157             notify_fw = 0;
2158         }
2159 
2160         lm_free_cid_resc(pdev, FCOE_CONNECTION_TYPE, fcoe->cid, notify_fw);
2161 
2162         fcoe->hdr.state_blk = NULL;
2163         fcoe->cid = 0;
2164         fcoe->ctx_virt = NULL;
2165         fcoe->ctx_phys.as_u64 = 0;
2166     }
2167 
2168     return LM_STATUS_SUCCESS;
2169 }
2170 
2171 
2172 
2173 /*******************************************************************************
2174  * Description:
2175  *
2176  * Return:
2177  ******************************************************************************/
lm_sc_init_iscsi_context(IN struct _lm_device_t * pdev,IN lm_iscsi_state_t * iscsi,struct iscsi_kwqe_conn_offload1 * req1,struct iscsi_kwqe_conn_offload2 * req2,struct iscsi_kwqe_conn_offload3 * req3)2178 lm_status_t lm_sc_init_iscsi_context(
2179     IN struct _lm_device_t      *pdev,
2180     IN lm_iscsi_state_t         *iscsi,
2181     struct iscsi_kwqe_conn_offload1  *req1,
2182     struct iscsi_kwqe_conn_offload2  *req2,
2183     struct iscsi_kwqe_conn_offload3  *req3
2184     )
2185 {
2186     struct iscsi_context *ctx;
2187     u32_t cid;
2188     u32_t cq_size_in_bytes;
2189     u32_t single_cq_pbl_entries;
2190     u32_t i;
2191     u16_t conn_id;
2192     lm_address_t pbl_base;
2193 
2194     if (CHK_NULL(pdev) || CHK_NULL(iscsi) || CHK_NULL(req1) || CHK_NULL(req2) || CHK_NULL(req3))
2195     {
2196         return LM_STATUS_INVALID_PARAMETER;
2197     }
2198 
2199 
2200     conn_id = req1->iscsi_conn_id;
2201     cid = iscsi->cid;
2202 
2203     DbgMessage(pdev, INFORM, "### lm_sc_init_iscsi_context\n");
2204 
2205     if (req2->num_additional_wqes != 1)
2206     {
2207         return LM_STATUS_INVALID_PARAMETER;
2208     }
2209 
2210     /* get context */
2211     iscsi->ctx_virt = (struct iscsi_context *)lm_get_context(pdev, iscsi->cid);
2212     DbgBreakIf(!iscsi->ctx_virt);
2213     iscsi->ctx_phys.as_u64 = lm_get_context_phys(pdev, iscsi->cid);
2214     DbgBreakIf(!iscsi->ctx_phys.as_u64);
2215     DbgMessage(pdev, VERBOSEl5sp,
2216                 "iscsi->ctx_virt=%p, iscsi->ctx_phys_high=%x, iscsi->ctx_phys_low=%x\n",
2217                 iscsi->ctx_virt, iscsi->ctx_phys.as_u32.high, iscsi->ctx_phys.as_u32.low);
2218 
2219     ctx = iscsi->ctx_virt;
2220 
2221     mm_memset(ctx, 0, sizeof(struct iscsi_context));
2222 
2223     // init xstorm aggregative context
2224     ctx->xstorm_ag_context.hq_prod = 1; //this value represents actual hq_prod + 1
2225 
2226     // init xstorm storm context
2227     //iscsi context
2228     ctx->xstorm_st_context.iscsi.first_burst_length = ISCSI_DEFAULT_FIRST_BURST_LENGTH;
2229     ctx->xstorm_st_context.iscsi.max_send_pdu_length = ISCSI_DEFAULT_MAX_PDU_LENGTH;
2230 
2231     /* advance the SQ pbl_base cause it's pointing the SQ_DB */
2232     pbl_base.as_u32.low = req1->sq_page_table_addr_lo;
2233     pbl_base.as_u32.high = req1->sq_page_table_addr_hi;
2234     LM_INC64(&pbl_base, ISCSI_SQ_DB_SIZE);
2235     ctx->xstorm_st_context.iscsi.sq_pbl_base.lo = pbl_base.as_u32.low;
2236     ctx->xstorm_st_context.iscsi.sq_pbl_base.hi = pbl_base.as_u32.high;
2237 
2238     //!!DP
2239     ctx->xstorm_st_context.iscsi.sq_curr_pbe.lo = req2->sq_first_pte.lo;
2240     ctx->xstorm_st_context.iscsi.sq_curr_pbe.hi = req2->sq_first_pte.hi;
2241 
2242     ctx->xstorm_st_context.iscsi.hq_pbl_base.lo = iscsi->hq.pbl_phys_table_phys.as_u32.low;
2243     ctx->xstorm_st_context.iscsi.hq_pbl_base.hi = iscsi->hq.pbl_phys_table_phys.as_u32.high;
2244     ctx->xstorm_st_context.iscsi.hq_curr_pbe_base.lo = iscsi->hq.pbl_phys_table_virt[0].as_u32.low;
2245     ctx->xstorm_st_context.iscsi.hq_curr_pbe_base.hi = iscsi->hq.pbl_phys_table_virt[0].as_u32.high;
2246 
2247     ctx->xstorm_st_context.iscsi.r2tq_pbl_base.lo = iscsi->r2tq.pbl_phys_table_phys.as_u32.low;
2248     ctx->xstorm_st_context.iscsi.r2tq_pbl_base.hi = iscsi->r2tq.pbl_phys_table_phys.as_u32.high;
2249     ctx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.lo = iscsi->r2tq.pbl_phys_table_virt[0].as_u32.low;
2250     ctx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.hi = iscsi->r2tq.pbl_phys_table_virt[0].as_u32.high;
2251 
2252     ctx->xstorm_st_context.iscsi.task_pbl_base.lo = iscsi->task_array.pbl_phys_table_phys.as_u32.low;
2253     ctx->xstorm_st_context.iscsi.task_pbl_base.hi = iscsi->task_array.pbl_phys_table_phys.as_u32.high;
2254     ctx->xstorm_st_context.iscsi.task_pbl_cache_idx = ISCSI_PBL_NOT_CACHED;
2255     //ctx->xstorm_st_context.iscsi.max_outstanding_r2ts = ISCSI_DEFAULT_MAX_OUTSTANDING_R2T;
2256     SET_FIELD(ctx->xstorm_st_context.iscsi.flags.flags, XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA, ISCSI_DEFAULT_IMMEDIATE_DATA);
2257     SET_FIELD(ctx->xstorm_st_context.iscsi.flags.flags, XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T, ISCSI_DEFAULT_INITIAL_R2T);
2258     SET_FIELD(ctx->xstorm_st_context.iscsi.flags.flags, XSTORM_ISCSI_CONTEXT_FLAGS_B_EN_HEADER_DIGEST, ISCSI_DEFAULT_HEADER_DIGEST);
2259     SET_FIELD(ctx->xstorm_st_context.iscsi.flags.flags, XSTORM_ISCSI_CONTEXT_FLAGS_B_EN_DATA_DIGEST, ISCSI_DEFAULT_DATA_DIGEST);
2260 
2261     // init tstorm storm context
2262     ctx->tstorm_st_context.iscsi.hdr_bytes_2_fetch = ISCSI_HEADER_SIZE + (ISCSI_DEFAULT_HEADER_DIGEST ? ISCSI_DIGEST_SIZE : 0);
2263     SET_FIELD(ctx->tstorm_st_context.iscsi.flags, TSTORM_ISCSI_ST_CONTEXT_SECTION_B_HDR_DIGEST_EN, ISCSI_DEFAULT_HEADER_DIGEST);
2264     SET_FIELD(ctx->tstorm_st_context.iscsi.flags, TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DATA_DIGEST_EN, ISCSI_DEFAULT_DATA_DIGEST);
2265     ctx->tstorm_st_context.iscsi.rq_db_phy_addr.lo = req2->rq_page_table_addr_lo;
2266     ctx->tstorm_st_context.iscsi.rq_db_phy_addr.hi = req2->rq_page_table_addr_hi;
2267     ctx->tstorm_st_context.iscsi.iscsi_conn_id = conn_id;
2268 
2269     //To enable the timer block.
2270     SET_FIELD(ctx->timers_context.flags, TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG, 1);
2271 
2272     // init ustorm storm context
2273     cq_size_in_bytes = pdev->iscsi_info.run_time.cq_size * ISCSI_CQE_SIZE;
2274     single_cq_pbl_entries = lm_get_pbl_entries(cq_size_in_bytes);
2275 
2276     ctx->ustorm_st_context.task_pbe_cache_index = ISCSI_PBL_NOT_CACHED;
2277     ctx->ustorm_st_context.task_pdu_cache_index = ISCSI_PDU_HEADER_NOT_CACHED;
2278 
2279     /* advance the RQ pbl_base cause it's pointing the RQ_DB  */
2280     pbl_base.as_u32.low = req2->rq_page_table_addr_lo;
2281     pbl_base.as_u32.high = req2->rq_page_table_addr_hi;
2282     LM_INC64(&pbl_base, ISCSI_RQ_DB_SIZE);
2283     ctx->ustorm_st_context.ring.rq.pbl_base.lo = pbl_base.as_u32.low;
2284     ctx->ustorm_st_context.ring.rq.pbl_base.hi = pbl_base.as_u32.high;
2285 
2286     //!!DP
2287     /* qp_first_pte[0] will contain the first PTE of the RQ */
2288     ctx->ustorm_st_context.ring.rq.curr_pbe.lo = req3->qp_first_pte[0].lo;
2289     ctx->ustorm_st_context.ring.rq.curr_pbe.hi = req3->qp_first_pte[0].hi;
2290 
2291     ctx->ustorm_st_context.ring.r2tq.pbl_base.lo = iscsi->r2tq.pbl_phys_table_phys.as_u32.low;
2292     ctx->ustorm_st_context.ring.r2tq.pbl_base.hi = iscsi->r2tq.pbl_phys_table_phys.as_u32.high;
2293     ctx->ustorm_st_context.ring.r2tq.curr_pbe.lo = iscsi->r2tq.pbl_phys_table_virt[0].as_u32.low;
2294     ctx->ustorm_st_context.ring.r2tq.curr_pbe.hi = iscsi->r2tq.pbl_phys_table_virt[0].as_u32.high;
2295 
2296     /* Set up the first CQ, the first PTE info is contained in req2 */
2297     pbl_base.as_u32.low = req1->cq_page_table_addr_lo;
2298     pbl_base.as_u32.high = req1->cq_page_table_addr_hi;
2299     LM_INC64(&pbl_base, ISCSI_CQ_DB_SIZE);
2300     ctx->ustorm_st_context.ring.cq_pbl_base.lo = pbl_base.as_u32.low;
2301     ctx->ustorm_st_context.ring.cq_pbl_base.hi = pbl_base.as_u32.high;
2302     ctx->ustorm_st_context.ring.cq[0].cq_sn = ISCSI_INITIAL_SN;
2303     ctx->ustorm_st_context.ring.cq[0].curr_pbe.lo = req2->cq_first_pte.lo;
2304     ctx->ustorm_st_context.ring.cq[0].curr_pbe.hi = req2->cq_first_pte.hi;
2305 
2306     if (1 != pdev->iscsi_info.run_time.num_of_cqs)
2307     {
2308         /* For now we only support a single CQ */
2309         return LM_STATUS_INVALID_PARAMETER;
2310 
2311 #if 0
2312         /* Set up additional CQs */
2313         for (i = 1; i < pdev->iscsi_info.run_time.num_of_cqs; i++)   // 8 x CQ curr_pbe
2314         {
2315             ctx->ustorm_st_context.ring.cq[i].cq_sn = ISCSI_INITIAL_SN;
2316 
2317             curr_pbl_base.as_u32.low = pbl_base.as_u32.low;
2318             curr_pbl_base.as_u32.high = pbl_base.as_u32.high;
2319 
2320             LM_INC64(&curr_pbl_base, i * single_cq_pbl_entries * sizeof(lm_address_t));
2321 #if 0
2322             fix this if we ever want to use > 1 CQ
2323 
2324             curr_pbe = (lm_address_t *)mm_map_io_space(curr_pbl_base, sizeof(lm_address_t));
2325             if CHK_NULL(curr_pbe)
2326             {
2327                 return LM_STATUS_INVALID_PARAMETER;
2328             }
2329             ctx->ustorm_st_context.ring.cq[i].curr_pbe.lo = curr_pbe->as_u32.low;
2330             ctx->ustorm_st_context.ring.cq[i].curr_pbe.hi = curr_pbe->as_u32.high;
2331             mm_unmap_io_space(curr_pbe, sizeof(lm_address_t));
2332 
2333 #endif
2334         }
2335 #endif
2336     }
2337 
2338     ctx->ustorm_st_context.task_pbl_base.lo = iscsi->task_array.pbl_phys_table_phys.as_u32.low;
2339     ctx->ustorm_st_context.task_pbl_base.hi = iscsi->task_array.pbl_phys_table_phys.as_u32.high;
2340     ctx->ustorm_st_context.tce_phy_addr.lo = iscsi->task_array.pbl_phys_table_virt[0].as_u32.low;
2341     ctx->ustorm_st_context.tce_phy_addr.hi = iscsi->task_array.pbl_phys_table_virt[0].as_u32.high;
2342     ctx->ustorm_st_context.iscsi_conn_id = conn_id;
2343     SET_FIELD(ctx->ustorm_st_context.negotiated_rx, USTORM_ISCSI_ST_CONTEXT_MAX_RECV_PDU_LENGTH, ISCSI_DEFAULT_MAX_PDU_LENGTH);
2344     SET_FIELD(ctx->ustorm_st_context.negotiated_rx_and_flags, USTORM_ISCSI_ST_CONTEXT_MAX_BURST_LENGTH, ISCSI_DEFAULT_MAX_BURST_LENGTH);
2345     SET_FIELD(ctx->ustorm_st_context.negotiated_rx, USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS, ISCSI_DEFAULT_MAX_OUTSTANDING_R2T);
2346     SET_FIELD(ctx->ustorm_st_context.negotiated_rx_and_flags, USTORM_ISCSI_ST_CONTEXT_B_HDR_DIGEST_EN, ISCSI_DEFAULT_HEADER_DIGEST);
2347     SET_FIELD(ctx->ustorm_st_context.negotiated_rx_and_flags, USTORM_ISCSI_ST_CONTEXT_B_DATA_DIGEST_EN, ISCSI_DEFAULT_DATA_DIGEST);
2348     ctx->ustorm_st_context.num_cqs = pdev->iscsi_info.run_time.num_of_cqs;
2349 
2350     // init cstorm storm context
2351     ctx->cstorm_st_context.hq_pbl_base.lo = iscsi->hq.pbl_phys_table_phys.as_u32.low;
2352     ctx->cstorm_st_context.hq_pbl_base.hi = iscsi->hq.pbl_phys_table_phys.as_u32.high;
2353     ctx->cstorm_st_context.hq_curr_pbe.lo = iscsi->hq.pbl_phys_table_virt[0].as_u32.low;
2354     ctx->cstorm_st_context.hq_curr_pbe.hi = iscsi->hq.pbl_phys_table_virt[0].as_u32.high;
2355 
2356     ctx->cstorm_st_context.task_pbl_base.lo = iscsi->task_array.pbl_phys_table_phys.as_u32.low;
2357     ctx->cstorm_st_context.task_pbl_base.hi = iscsi->task_array.pbl_phys_table_phys.as_u32.high;
2358     ctx->cstorm_st_context.cq_db_base.lo = req1->cq_page_table_addr_lo;
2359     ctx->cstorm_st_context.cq_db_base.hi = req1->cq_page_table_addr_hi;
2360     ctx->cstorm_st_context.iscsi_conn_id = conn_id;
2361     ctx->cstorm_st_context.cq_proc_en_bit_map = (1 << pdev->iscsi_info.run_time.num_of_cqs) - 1;
2362     SET_FIELD(ctx->cstorm_st_context.flags, CSTORM_ISCSI_ST_CONTEXT_DATA_DIGEST_EN, ISCSI_DEFAULT_HEADER_DIGEST);
2363     SET_FIELD(ctx->cstorm_st_context.flags, CSTORM_ISCSI_ST_CONTEXT_HDR_DIGEST_EN, ISCSI_DEFAULT_DATA_DIGEST);
2364     for (i = 0; i < pdev->iscsi_info.run_time.num_of_cqs; i++)
2365     {
2366         ctx->cstorm_st_context.cq_c_prod_sqn_arr.sqn[i] = ISCSI_INITIAL_SN;
2367         ctx->cstorm_st_context.cq_c_sqn_2_notify_arr.sqn[i] = ISCSI_INITIAL_SN;
2368     }
2369 
2370     /* now we need to configure the cdu-validation data */
2371     lm_set_cdu_validation_data(pdev, iscsi->cid, FALSE /* don't invalidate */);
2372 
2373     return LM_STATUS_SUCCESS;
2374 }
2375 
2376 
2377 lm_status_t
lm_fc_init_fcoe_context(IN struct _lm_device_t * pdev,IN lm_fcoe_state_t * fcoe)2378 lm_fc_init_fcoe_context(
2379     IN struct _lm_device_t          *pdev,
2380     IN lm_fcoe_state_t              *fcoe)
2381 {
2382     struct fcoe_context *ctx;
2383     u32_t cid;
2384     u16_t conn_id;
2385 
2386     if (CHK_NULL(pdev) || CHK_NULL(fcoe))
2387     {
2388         return LM_STATUS_INVALID_PARAMETER;
2389     }
2390 
2391     conn_id = fcoe->ofld1.fcoe_conn_id;
2392     cid = fcoe->cid;
2393 
2394     DbgMessage(pdev, INFORM, "### lm_fc_init_fcoe_context\n");
2395 
2396     /* get context */
2397     fcoe->ctx_virt = (struct fcoe_context *)lm_get_context(pdev, fcoe->cid);
2398     DbgBreakIf(!fcoe->ctx_virt);
2399     fcoe->ctx_phys.as_u64 = lm_get_context_phys(pdev, fcoe->cid);
2400     DbgBreakIf(!fcoe->ctx_phys.as_u64);
2401     DbgMessage(pdev, VERBOSEl5sp,
2402                 "fcoe->ctx_virt=%p, fcoe->ctx_phys_high=%x, fcoe->ctx_phys_low=%x\n",
2403                 fcoe->ctx_virt, fcoe->ctx_phys.as_u32.high, fcoe->ctx_phys.as_u32.low);
2404 
2405     ctx = fcoe->ctx_virt;
2406 
2407     mm_memset(ctx, 0, sizeof(struct fcoe_context));
2408 
2409     /* now we need to configure the cdu-validation data */
2410     lm_set_cdu_validation_data(pdev, fcoe->cid, FALSE /* don't invalidate */);
2411 
2412     return LM_STATUS_SUCCESS;
2413 }
2414 
2415 
2416 
2417 lm_status_t
lm_fc_alloc_con_resc(IN struct _lm_device_t * pdev,IN lm_fcoe_state_t * fcoe)2418 lm_fc_alloc_con_resc(
2419     IN struct _lm_device_t          *pdev,
2420     IN lm_fcoe_state_t              *fcoe)
2421 {
2422     lm_status_t                     lm_status = LM_STATUS_SUCCESS;
2423     s32_t                           cid       = 0;
2424 
2425     if (CHK_NULL(pdev) || CHK_NULL(fcoe))
2426     {
2427         return LM_STATUS_INVALID_PARAMETER;
2428     }
2429 
2430     DbgMessage(pdev, INFORM, "### lm_fc_alloc_con_resc\n");
2431 
2432     /* save the miniport's conn id */
2433     fcoe->fcoe_conn_id = fcoe->ofld1.fcoe_conn_id;
2434 
2435     /* Allocate CID */
2436     lm_status = lm_allocate_cid(pdev, FCOE_CONNECTION_TYPE, (void *)fcoe, &cid);
2437     if (lm_status == LM_STATUS_PENDING)
2438     {
2439         lm_sp_req_manager_block(pdev, (u32_t)cid);
2440     }
2441     else if (lm_status != LM_STATUS_SUCCESS)
2442     {
2443         /* failed to allocate CID */
2444         lm_fc_free_con_resc(pdev, fcoe);
2445         return lm_status;
2446     }
2447 
2448     /* save the returned cid */
2449     fcoe->cid = (u32_t)cid;
2450 
2451     if (lm_cid_state(pdev, fcoe->cid) == LM_CID_STATE_PENDING)
2452     {
2453         return LM_STATUS_PENDING; /* Too soon to initialize context */
2454     }
2455 
2456     return LM_STATUS_SUCCESS;
2457 } /* lm_fc_alloc_con_resc */
2458 
2459 
2460 
2461 lm_status_t
lm_fc_post_offload_ramrod(struct _lm_device_t * pdev,lm_fcoe_state_t * fcoe)2462 lm_fc_post_offload_ramrod(
2463     struct _lm_device_t             *pdev,
2464     lm_fcoe_state_t                 *fcoe)
2465 {
2466     lm_fcoe_slow_path_phys_data_t   *ramrod_params;
2467     lm_status_t                     lm_status;
2468 
2469     ramrod_params = (lm_fcoe_slow_path_phys_data_t*)pdev->fcoe_info.bind.ramrod_mem_virt;
2470 
2471     mm_memset(ramrod_params, 0, sizeof(lm_fcoe_slow_path_phys_data_t));
2472 
2473     memcpy(&ramrod_params->fcoe_ofld.offload_kwqe1, &fcoe->ofld1, sizeof(struct fcoe_kwqe_conn_offload1));
2474     memcpy(&ramrod_params->fcoe_ofld.offload_kwqe2, &fcoe->ofld2, sizeof(struct fcoe_kwqe_conn_offload2));
2475     memcpy(&ramrod_params->fcoe_ofld.offload_kwqe3, &fcoe->ofld3, sizeof(struct fcoe_kwqe_conn_offload3));
2476     memcpy(&ramrod_params->fcoe_ofld.offload_kwqe4, &fcoe->ofld4, sizeof(struct fcoe_kwqe_conn_offload4));
2477 
2478     lm_status = lm_command_post(pdev,
2479                                 fcoe->cid,
2480                                 FCOE_RAMROD_CMD_ID_OFFLOAD_CONN,
2481                                 CMD_PRIORITY_NORMAL,
2482                                 FCOE_CONNECTION_TYPE,
2483                                 pdev->fcoe_info.bind.ramrod_mem_phys.as_u64);
2484 
2485     return lm_status;
2486 }
2487 
2488 
2489 
2490 lm_status_t
lm_fc_post_enable_ramrod(struct _lm_device_t * pdev,lm_fcoe_state_t * fcoe,struct fcoe_kwqe_conn_enable_disable * enable)2491 lm_fc_post_enable_ramrod(
2492     struct _lm_device_t                     *pdev,
2493     lm_fcoe_state_t                         *fcoe,
2494     struct fcoe_kwqe_conn_enable_disable    *enable)
2495 {
2496     lm_fcoe_slow_path_phys_data_t   *ramrod_params;
2497     lm_status_t                     lm_status;
2498 
2499     ramrod_params = (lm_fcoe_slow_path_phys_data_t*)pdev->fcoe_info.bind.ramrod_mem_virt;
2500 
2501     mm_memset(ramrod_params, 0, sizeof(lm_fcoe_slow_path_phys_data_t));
2502 
2503     memcpy(&ramrod_params->fcoe_enable.enable_disable_kwqe, enable, sizeof(struct fcoe_kwqe_conn_enable_disable));
2504 
2505     lm_status = lm_command_post(pdev,
2506                                 fcoe->cid,
2507                                 FCOE_RAMROD_CMD_ID_ENABLE_CONN,
2508                                 CMD_PRIORITY_NORMAL,
2509                                 FCOE_CONNECTION_TYPE,
2510                                 pdev->fcoe_info.bind.ramrod_mem_phys.as_u64);
2511 
2512     return lm_status;
2513 }
2514 
2515 
2516 
2517 lm_status_t
lm_fc_post_disable_ramrod(struct _lm_device_t * pdev,lm_fcoe_state_t * fcoe,struct fcoe_kwqe_conn_enable_disable * disable)2518 lm_fc_post_disable_ramrod(
2519     struct _lm_device_t                    *pdev,
2520     lm_fcoe_state_t                        *fcoe,
2521     struct fcoe_kwqe_conn_enable_disable   *disable)
2522 {
2523     lm_fcoe_slow_path_phys_data_t   *ramrod_params;
2524     lm_status_t                     lm_status;
2525 
2526     ramrod_params = (lm_fcoe_slow_path_phys_data_t*)pdev->fcoe_info.bind.ramrod_mem_virt;
2527 
2528     mm_memset(ramrod_params, 0, sizeof(lm_fcoe_slow_path_phys_data_t));
2529 
2530     memcpy(&ramrod_params->fcoe_enable.enable_disable_kwqe, disable, sizeof *disable);
2531 
2532     lm_status = lm_command_post(pdev,
2533                                 fcoe->cid,
2534                                 FCOE_RAMROD_CMD_ID_DISABLE_CONN,
2535                                 CMD_PRIORITY_NORMAL,
2536                                 FCOE_CONNECTION_TYPE,
2537                                 pdev->fcoe_info.bind.ramrod_mem_phys.as_u64);
2538 
2539     return lm_status;
2540 }
2541 
2542 lm_status_t
lm_fc_post_destroy_ramrod(struct _lm_device_t * pdev)2543 lm_fc_post_destroy_ramrod(
2544     struct _lm_device_t             *pdev)
2545 {
2546     lm_status_t                     lm_status;
2547 
2548     lm_status = lm_command_post(pdev,
2549                                 LM_CLI_CID(pdev, LM_CLI_IDX_FCOE),      /* cid */
2550                                 FCOE_RAMROD_CMD_ID_DESTROY_FUNC,
2551                                 CMD_PRIORITY_NORMAL,
2552                                 FCOE_CONNECTION_TYPE,
2553                                 0);
2554 
2555     return lm_status;
2556 }
2557 
2558 
2559 lm_status_t
lm_fc_post_stat_ramrod(struct _lm_device_t * pdev,struct fcoe_kwqe_stat * stat)2560 lm_fc_post_stat_ramrod(
2561     struct _lm_device_t         *pdev,
2562     struct fcoe_kwqe_stat       *stat)
2563 {
2564     lm_status_t                     lm_status = LM_STATUS_SUCCESS;
2565 
2566     lm_fcoe_slow_path_phys_data_t   *ramrod_params;
2567 
2568     if(CHK_NULL(pdev->fcoe_info.bind.ramrod_mem_virt))
2569     {
2570         return LM_STATUS_RESOURCE;
2571     }
2572     ramrod_params = (lm_fcoe_slow_path_phys_data_t*)pdev->fcoe_info.bind.ramrod_mem_virt;
2573 
2574     mm_memset(ramrod_params, 0, sizeof(lm_fcoe_slow_path_phys_data_t));
2575 
2576     memcpy(&ramrod_params->fcoe_stat.stat_kwqe, stat, sizeof(struct fcoe_kwqe_stat));
2577 
2578     lm_status = lm_command_post(pdev,
2579                                 LM_CLI_CID(pdev, LM_CLI_IDX_FCOE),      /* cid */
2580                                 FCOE_RAMROD_CMD_ID_STAT_FUNC,
2581                                 CMD_PRIORITY_NORMAL,
2582                                 FCOE_CONNECTION_TYPE,
2583                                 pdev->fcoe_info.bind.ramrod_mem_phys.as_u64);
2584 
2585     return lm_status;
2586 }
2587 
2588 lm_status_t
lm_fc_post_terminate_ramrod(struct _lm_device_t * pdev,lm_fcoe_state_t * fcoe)2589 lm_fc_post_terminate_ramrod(
2590     struct _lm_device_t             *pdev,
2591     lm_fcoe_state_t                 *fcoe)
2592 {
2593     lm_status_t                     lm_status;
2594 
2595     lm_status = lm_command_post(pdev,
2596                                 fcoe->cid,
2597                                 FCOE_RAMROD_CMD_ID_TERMINATE_CONN,
2598                                 CMD_PRIORITY_NORMAL,
2599                                 FCOE_CONNECTION_TYPE,
2600                                 0);
2601 
2602     return lm_status;
2603 }
2604