xref: /titanic_44/usr/src/uts/common/io/bnxe/577xx/drivers/common/lm/device/lm_sp.c (revision d14abf155341d55053c76eeec58b787a456b753b)
1 /*******************************************************************************
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  *
21  * Copyright 2014 QLogic Corporation
22  * The contents of this file are subject to the terms of the
23  * QLogic End User License (the "License").
24  * You may not use this file except in compliance with the License.
25  *
26  * You can obtain a copy of the License at
27  * http://www.qlogic.com/Resources/Documents/DriverDownloadHelp/
28  * QLogic_End_User_Software_License.txt
29  * See the License for the specific language governing permissions
30  * and limitations under the License.
31  *
32  *
33  * Module Description:
34  *      This file contains the implementation of slow-path operations
35  *      for L2 + Common. It uses ecore_sp_verbs in most cases.
36  *
37  ******************************************************************************/
38 
39 #include "lm5710.h"
40 
41 #if !defined(__LINUX) && !defined(__SunOS)
42 // disable warning C4127 (conditional expression is constant)
43 // for this file (relevant when compiling with W4 warning level)
44 #pragma warning( disable : 4127 )
45 #endif /* __LINUX */
46 
47 #if !defined(__LINUX) && !defined(__SunOS)
48 #pragma warning( default : 4127 )
49 #endif
50 
51 #include "mm.h"
52 #include "context.h"
53 #include "command.h"
54 #include "bd_chain.h"
55 #include "ecore_common.h"
56 #include "ecore_sp_verbs.h"
57 #include "debug.h"
58 
59 typedef enum _ecore_status_t ecore_status_t;
60 
61 
62 
63 lm_status_t
lm_empty_ramrod_eth(IN struct _lm_device_t * pdev,IN const u32_t cid,IN u32_t data_cid,IN volatile u32_t * curr_state,IN u32_t new_state)64 lm_empty_ramrod_eth(IN struct _lm_device_t *pdev,
65                     IN const u32_t          cid,
66                     IN u32_t                data_cid,
67                     IN volatile u32_t       *curr_state,
68                     IN u32_t                new_state)
69 {
70     union eth_specific_data ramrod_data = {{0}};
71     lm_status_t             lm_status   = LM_STATUS_SUCCESS;
72 
73     DbgMessage(pdev, INFORMi|INFORMl2sp, "#lm_empty_ramrod_eth_conn, curr_state=%d\n",curr_state);
74 
75     ASSERT_STATIC(sizeof(ramrod_data) == sizeof(u64_t));
76 
77     //Prepare ramrod data
78     ramrod_data.update_data_addr.lo = data_cid;
79     ramrod_data.update_data_addr.hi = 0 ;
80 
81     // Send Empty ramrod.
82     lm_status = lm_sq_post(pdev,
83                            cid,
84                            RAMROD_CMD_ID_ETH_EMPTY,
85                            CMD_PRIORITY_MEDIUM,
86                            ETH_CONNECTION_TYPE,
87                            *(u64_t *)&ramrod_data );
88 
89     if (lm_status != LM_STATUS_SUCCESS)
90     {
91         return lm_status;
92     }
93 
94     /* curr_state may be NULL incase wait isn't required */
95     if (curr_state != NULL)
96     {
97         lm_status = lm_wait_state_change(pdev,
98                                          curr_state,
99                                          new_state);
100 
101         if ((lm_status != LM_STATUS_SUCCESS) && (lm_status != LM_STATUS_ABORTED))
102         {
103             DbgBreakMsg("lm_empty_ramrod_eth: lm_wait_state_change failed");
104         }
105     }
106 
107 
108 
109     return lm_status;
110 } /* lm_empty_ramrod_eth */
111 
112 
113 
lm_ecore_status_to_lm_status(const ecore_status_t ecore_status)114 static lm_status_t lm_ecore_status_to_lm_status( const ecore_status_t ecore_status )
115 {
116     lm_status_t lm_status = LM_STATUS_FAILURE;
117 
118     switch (ecore_status)
119     {
120     case ECORE_SUCCESS:
121         lm_status = LM_STATUS_SUCCESS;
122         break;
123 
124     case ECORE_TIMEOUT:
125         lm_status = LM_STATUS_TIMEOUT;
126         break;
127 
128     case ECORE_INVAL:
129        lm_status = LM_STATUS_INVALID_PARAMETER;
130        break;
131 
132     case ECORE_BUSY:
133         lm_status = LM_STATUS_BUSY;
134         break;
135 
136     case ECORE_NOMEM:
137         lm_status = LM_STATUS_RESOURCE;
138         break;
139 
140     case ECORE_PENDING:
141         lm_status = LM_STATUS_PENDING;
142         break;
143 
144     case ECORE_EXISTS:
145         lm_status = LM_STATUS_EXISTING_OBJECT;
146         break;
147 
148     case ECORE_IO:
149         lm_status = LM_STATUS_FAILURE;
150         break;
151 
152     default:
153         DbgBreakMsg("Unknwon ecore_status_t");
154         break;
155     }
156 
157     return lm_status;
158 }
159 
lm_is_eq_completion(lm_device_t * pdev)160 u8_t lm_is_eq_completion(lm_device_t *pdev)
161 {
162     lm_eq_chain_t * eq_chain = NULL;
163     u8_t            result   = FALSE;
164 
165     DbgBreakIf(!pdev);
166     if (!pdev || IS_VFDEV(pdev))
167     {
168         return FALSE;
169     }
170 
171     eq_chain = &pdev->eq_info.eq_chain;
172     if ( eq_chain->hw_con_idx_ptr && (mm_le16_to_cpu(*eq_chain->hw_con_idx_ptr) != lm_bd_chain_cons_idx(&eq_chain->bd_chain)))
173     {
174         result = TRUE;
175     }
176 
177     DbgMessage(pdev, INFORMeq, "lm_is_eq_completion: result is:%s\n", result? "TRUE" : "FALSE");
178 
179     return result;
180 }
181 
182 STATIC lm_status_t
lm_eth_init_client_init_general_data(IN lm_device_t * pdev,OUT struct client_init_general_data * general,IN const u8_t cid)183 lm_eth_init_client_init_general_data(IN         lm_device_t                     *pdev,
184                                      OUT        struct client_init_general_data *general,
185                                      IN const   u8_t                            cid)
186 {
187     const u8_t  stats_cnt_id  = LM_STATS_CNT_ID(pdev);
188     const u8_t  is_pfdev      = IS_PFDEV(pdev);
189     const u8_t  reg_cid        = (u8_t)lm_mp_get_reg_chain_from_chain(pdev,cid);
190     const u8_t  cos            = lm_mp_cos_from_chain(pdev, cid);
191     const u8_t  traffic_type  = LM_CHAIN_IDX_TRAFFIC_TYPE(pdev, cid);
192     lm_status_t lm_status     = LM_STATUS_SUCCESS;
193 
194     if( LLFC_DRIVER_TRAFFIC_TYPE_MAX == traffic_type)
195     {
196         DbgBreakMsg("lm_eth_init_client_init_general_data failed ");
197         return LM_STATUS_FAILURE;
198     }
199 
200     /* General Structure */
201     general->activate_flg          = 1;
202     general->client_id             = LM_FW_CLI_ID(pdev, reg_cid);
203     general->is_fcoe_flg           = (cid == FCOE_CID(pdev))? TRUE : FALSE;
204     general->statistics_en_flg     = (is_pfdev || (stats_cnt_id != 0xFF))? TRUE : FALSE;
205     general->statistics_counter_id = (general->statistics_en_flg)? stats_cnt_id : DISABLE_STATISTIC_COUNTER_ID_VALUE;
206     general->sp_client_id          = LM_FW_CLI_ID(pdev, reg_cid);
207     general->mtu                   = mm_cpu_to_le16((u16_t)pdev->params.l2_cli_con_params[cid].mtu);
208     general->func_id               = FUNC_ID(pdev); /* FIXME: VFID needs to be given here for VFs... */
209     // Don't care data for Non cos clients
210     if(lm_chain_type_not_cos == lm_mp_get_chain_type(pdev,cid))
211     {
212         // FW requires a valid COS number
213         general->cos                   = 0;
214     }
215     else
216     {
217         general->cos                   = cos;//The connection cos, if applicable only if STATIC_COS is set
218     }
219     general->traffic_type          = traffic_type;
220 
221     /* TODO: using path_has_ovlan for finding if it is UFP/BD mode or not is correct?
222      * does this needs to be done even in lm_vf.c lm_vf_pf_acquire_msg
223      * function? Also how do we handle the check in lm_pf_vf_check_compatibility
224      */
225     if(IS_MF_SD_MODE(pdev) && (IS_SD_UFP_MODE(pdev) || IS_SD_BD_MODE(pdev)) && general->is_fcoe_flg)
226         general->fp_hsi_ver            = ETH_FP_HSI_VER_2;
227     else
228         general->fp_hsi_ver            = ETH_FP_HSI_VER_1; // default is v1 since only when conditions above are true HSI is v2
229 
230     return lm_status;
231 }
232 
233 STATIC void
lm_eth_init_client_init_rx_data(IN lm_device_t * pdev,OUT struct client_init_rx_data * rx,IN const u8_t cid,IN const u8_t sb_id)234 lm_eth_init_client_init_rx_data(IN          lm_device_t                 *pdev,
235                                 OUT         struct client_init_rx_data  *rx,
236                                 IN const    u8_t                        cid,
237                                 IN const    u8_t                        sb_id)
238 {
239     lm_bd_chain_t * rx_chain_sge  = NULL;
240     lm_bd_chain_t * rx_chain_bd   = NULL;
241     u8_t            rel_cid       = 0;
242 
243     DbgBreakIf(cid == FWD_CID(pdev));
244 
245     rx_chain_sge = LM_RXQ_SGE_PTR_IF_VALID(pdev, cid);
246     rx_chain_bd  = &LM_RXQ_CHAIN_BD(pdev, cid);
247 
248     rx->status_block_id               = LM_FW_SB_ID(pdev, sb_id);
249     // TPA is enabled in run time.(TPA is disabled in init time)
250     rx->tpa_en                        = 0;
251     rx->max_agg_size                  = mm_cpu_to_le16(0); /* TPA related only  */;
252     rx->max_tpa_queues                = 0;
253 
254     rx->extra_data_over_sgl_en_flg    = (cid == OOO_CID(pdev))? TRUE : FALSE;
255     rx->cache_line_alignment_log_size = (u8_t)LOG2(CACHE_LINE_SIZE/* TODO mm_get_cache_line_alignment()*/);
256     rx->enable_dynamic_hc             = (u8_t)pdev->params.enable_dynamic_hc[HC_INDEX_ETH_RX_CQ_CONS];
257 
258     rx->outer_vlan_removal_enable_flg = IS_MULTI_VNIC(pdev)? TRUE: FALSE;
259     if(OOO_CID(pdev) == cid)
260     {
261         rx->inner_vlan_removal_enable_flg = 0;
262     }
263     else
264     {
265         rx->inner_vlan_removal_enable_flg = !pdev->params.keep_vlan_tag;
266 
267         if(IS_MF_AFEX_MODE(pdev))
268         {
269             // In NIV we must remove default VLAN.
270             rx->silent_vlan_removal_flg         = 1;
271             rx->silent_vlan_value               = mm_cpu_to_le16(NIV_DEFAULT_VLAN(pdev));
272             rx->silent_vlan_mask                = mm_cpu_to_le16(ETHERNET_VLAN_ID_MASK);
273         }
274 
275     }
276 
277     rx->bd_page_base.lo= mm_cpu_to_le32(lm_bd_chain_phys_addr(rx_chain_bd, 0).as_u32.low);
278     rx->bd_page_base.hi= mm_cpu_to_le32(lm_bd_chain_phys_addr(rx_chain_bd, 0).as_u32.high);
279 
280     rx->cqe_page_base.lo = mm_cpu_to_le32(lm_bd_chain_phys_addr(&pdev->rx_info.rcq_chain[cid].bd_chain, 0).as_u32.low);
281     rx->cqe_page_base.hi = mm_cpu_to_le32(lm_bd_chain_phys_addr(&pdev->rx_info.rcq_chain[cid].bd_chain, 0).as_u32.high);
282 
283 
284     if (cid == LM_SW_LEADING_RSS_CID(pdev))
285     {
286         /* TODO: for now... doesn't have to be leading cid, anyone can get the approx mcast... */
287         rx->is_leading_rss = TRUE;
288         rx->is_approx_mcast = TRUE;
289     }
290 
291     rx->approx_mcast_engine_id = FUNC_ID(pdev); /* FIMXE (MichalS) */
292     rx->rss_engine_id          = FUNC_ID(pdev); /* FIMXE (MichalS) */
293 
294     if(rx_chain_sge)
295     {
296         /* override bd_buff_size if we are in LAH enabled mode */
297         rx->max_bytes_on_bd     = mm_cpu_to_le16((u16_t)pdev->params.l2_cli_con_params[cid].lah_size);
298         rx->vmqueue_mode_en_flg = TRUE;
299         rx->max_sges_for_packet = LM_MAX_SGES_FOR_PACKET;
300         rx->sge_buff_size       = mm_cpu_to_le16(MAX_L2_CLI_BUFFER_SIZE(pdev, cid) - (u16_t)pdev->params.l2_cli_con_params[cid].lah_size - (u16_t)pdev->params.rcv_buffer_offset - CACHE_LINE_SIZE);
301 
302         rx->sge_page_base.hi    = mm_cpu_to_le32(lm_bd_chain_phys_addr(rx_chain_sge, 0).as_u32.high);
303         rx->sge_page_base.lo    = mm_cpu_to_le32(lm_bd_chain_phys_addr(rx_chain_sge, 0).as_u32.low);
304     }
305     else
306     {
307         rx->max_bytes_on_bd     = mm_cpu_to_le16(MAX_L2_CLI_BUFFER_SIZE(pdev, cid) - (u16_t)pdev->params.rcv_buffer_offset - CACHE_LINE_SIZE);
308         rx->vmqueue_mode_en_flg = FALSE;
309         rx->max_sges_for_packet = 0;
310         rx->sge_buff_size       = 0;
311 
312         rx->sge_page_base.hi    = 0;
313         rx->sge_page_base.lo    = 0;
314     }
315 
316     if (cid == OOO_CID(pdev))
317     {
318         rel_cid = cid - LM_MAX_RSS_CHAINS(pdev);
319         rx->client_qzone_id = LM_FW_AUX_QZONE_ID(pdev, rel_cid);
320         rx->rx_sb_index_number = HC_SP_INDEX_ISCSI_OOO_RX_CONS;
321     }
322     else if (cid == ISCSI_CID(pdev))
323     {
324         rel_cid = cid - LM_MAX_RSS_CHAINS(pdev);
325         rx->client_qzone_id = LM_FW_AUX_QZONE_ID(pdev, rel_cid);
326         rx->rx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS;
327     }
328     else if (cid == FCOE_CID(pdev))
329     {
330         rel_cid = cid - LM_MAX_RSS_CHAINS(pdev);
331         rx->client_qzone_id = LM_FW_AUX_QZONE_ID(pdev, rel_cid);
332         rx->rx_sb_index_number = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS;
333     }
334     else if (cid < MAX_RX_CHAIN(pdev))
335     {
336         rx->client_qzone_id = LM_FW_DHC_QZONE_ID(pdev, sb_id);
337         rx->rx_sb_index_number = HC_INDEX_ETH_RX_CQ_CONS;
338     }
339     else
340     {
341         DbgMessage(NULL, FATAL, "Invalid cid 0x%x.\n", cid);
342         DbgBreakIf(1);
343     }
344 
345     // Avoiding rings thresholds verification is aimed for eVBD
346     // which receives its buffers and SGEs only after client init
347     // is completed.(eVBD receives the buffers and SGEs only after
348     // client setup is completed.)
349     rx->dont_verify_rings_pause_thr_flg = 1;
350 
351     /* FC */
352     if (pdev->params.l2_fw_flow_ctrl)
353     {
354         u16_t desired_cqe_bd_low_thresh;
355         u16_t desired_cqe_bd_high_thresh;
356         u16_t low_thresh;
357         u16_t high_thresh;
358         u16_t next_page_bds;
359 
360         next_page_bds = LM_RXQ_CHAIN_BD(pdev, cid).bds_skip_eop * LM_RXQ_CHAIN_BD(pdev, cid).page_cnt;
361         desired_cqe_bd_low_thresh = BRB_SIZE(pdev) + next_page_bds + FW_DROP_LEVEL(pdev);
362         desired_cqe_bd_high_thresh = desired_cqe_bd_low_thresh + DROPLESS_FC_HEADROOM;
363 
364         low_thresh  = mm_cpu_to_le16(min(desired_cqe_bd_low_thresh,  (u16_t)((LM_RXQ(pdev, cid).common.desc_cnt)/4)));
365         high_thresh = mm_cpu_to_le16(min(desired_cqe_bd_high_thresh, (u16_t)((LM_RXQ(pdev, cid).common.desc_cnt)/2)));
366 
367         rx->cqe_pause_thr_low  = low_thresh;
368         rx->bd_pause_thr_low   = low_thresh;
369         rx->sge_pause_thr_low  = 0;
370         rx->rx_cos_mask        = 1;
371         rx->cqe_pause_thr_high = high_thresh;
372         rx->bd_pause_thr_high  = high_thresh;
373         rx->sge_pause_thr_high = 0;
374     }
375 }
376 
377 STATIC void
lm_eth_init_client_init_tx_data(IN lm_device_t * pdev,OUT struct client_init_tx_data * tx,IN const u8_t cid,IN const u8_t sb_id)378 lm_eth_init_client_init_tx_data(IN          lm_device_t                 *pdev,
379                                 OUT         struct client_init_tx_data  *tx,
380                                 IN const    u8_t                        cid,
381                                 IN const    u8_t                        sb_id)
382 {
383 
384     /* Status block index init we do for Rx + Tx together so that we ask which cid we are only once */
385     if (cid == FWD_CID(pdev))
386     {
387         tx->tx_sb_index_number = HC_SP_INDEX_ETH_FW_TX_CQ_CONS;
388     }
389     else if (cid == OOO_CID(pdev))
390     {
391         // OOO CID doesn't really has a TX client this is don't
392         // care data for FW.
393         tx->tx_sb_index_number = HC_SP_INDEX_NOT_USED; /* D/C */
394     }
395     else if (cid == ISCSI_CID(pdev))
396     {
397         tx->tx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_CQ_CONS;
398     }
399     else if (cid == FCOE_CID(pdev))
400     {
401         tx->tx_sb_index_number = HC_SP_INDEX_ETH_FCOE_CQ_CONS;
402 
403         if (IS_MF_AFEX_MODE(pdev))
404         {
405             tx->force_default_pri_flg = TRUE;
406         }
407     }
408     else if (lm_chain_type_not_cos != lm_mp_get_chain_type(pdev, cid))
409     {
410         // This isn't realy cid it is the chain index
411         tx->tx_sb_index_number = lm_eth_tx_hc_cq_cons_cosx_from_chain(pdev, cid);
412     }
413     else
414     {
415         DbgMessage(NULL, FATAL, "Invalid cid 0x%x.\n", cid);
416         DbgBreakIf(1);
417     }
418 
419     /* TX Data (remaining , sb index above...)  */
420     /* ooo cid doesn't have a tx chain... */
421     if (cid != OOO_CID(pdev))
422     {
423         tx->tx_bd_page_base.hi = mm_cpu_to_le32(lm_bd_chain_phys_addr(&pdev->tx_info.chain[cid].bd_chain, 0).as_u32.high);
424         tx->tx_bd_page_base.lo = mm_cpu_to_le32(lm_bd_chain_phys_addr(&pdev->tx_info.chain[cid].bd_chain, 0).as_u32.low);
425     }
426     tx->tx_status_block_id = LM_FW_SB_ID(pdev, sb_id);
427     tx->enforce_security_flg = FALSE; /* TBD: turn on for KVM VF? */
428 
429     /* Tx Switching... */
430     if (IS_MF_SI_MODE(pdev) && pdev->params.npar_vm_switching_enable &&
431         (cid != FWD_CID(pdev)) && (cid != FCOE_CID(pdev)) && (cid != ISCSI_CID(pdev)))
432     {
433         tx->tx_switching_flg = TRUE;
434     }
435     else
436     {
437         tx->tx_switching_flg = FALSE;
438     }
439 
440     tx->tss_leading_client_id = LM_FW_CLI_ID(pdev, LM_SW_LEADING_RSS_CID(pdev));
441 
442     tx->refuse_outband_vlan_flg = 0;
443 
444 
445     // for encapsulated packets
446     // the hw ip header will be the inner ip header, the hw will incremnet the inner ip id.
447     // the fw ip header will be the outer ip header, this means that if the outer ip header is ipv4, its ip id will not be incremented.
448     tx->tunnel_lso_inc_ip_id = INT_HEADER;
449     // In case of non-Lso encapsulated packets with L4 checksum offload, the pseudo checksum location - on BD
450     tx->tunnel_non_lso_pcsum_location = CSUM_ON_BD;
451     // In case of non-Lso encapsulated packets with outer L3 ip checksum offload, the pseudo checksum location - on BD
452     tx->tunnel_non_lso_outer_ip_csum_location = CSUM_ON_BD;
453 }
454 
lm_get_sw_client_idx_from_cid(lm_device_t * pdev,u32_t cid)455 u32_t lm_get_sw_client_idx_from_cid(lm_device_t * pdev,
456                                     u32_t         cid)
457 {
458 
459     u32_t client_info_idx;
460 
461     /* If MP is enabled, we need to take care of tx-only connections, which use the
462      * regular connection client-idx... the rest are split into regular eth
463      * and vfs... */
464     if (MM_DCB_MP_L2_IS_ENABLE(pdev))
465     {
466         if (lm_chain_type_cos_tx_only == lm_mp_get_chain_type(pdev, cid))
467         {
468             client_info_idx = lm_mp_get_reg_chain_from_chain(pdev,cid);
469             return client_info_idx;
470         }
471     }
472 
473 #ifdef VF_INVOLVED
474     if (IS_CHANNEL_VIRT_MODE_MASTER_PFDEV(pdev))
475     {
476         client_info_idx = lm_pf_get_sw_client_idx_from_cid(pdev, cid);
477     }
478     else
479 #endif
480     {
481         client_info_idx = cid;
482     }
483 
484     return client_info_idx;
485 }
486 
lm_get_fw_client_idx_from_cid(lm_device_t * pdev,u32_t cid)487 u32_t lm_get_fw_client_idx_from_cid(lm_device_t * pdev,
488                                     u32_t         cid)
489 {
490     u32_t client_info_idx;
491     u32_t fw_client_id;
492 
493     /* If MP is enabled, we need to take care of tx-only connections, which use the
494      * regular connection client-idx... the rest are split into regular eth
495      * and vfs... */
496     if (MM_DCB_MP_L2_IS_ENABLE(pdev))
497     {
498         if (lm_chain_type_cos_tx_only == lm_mp_get_chain_type(pdev, cid))
499         {
500             client_info_idx = lm_mp_get_reg_chain_from_chain(pdev,cid);
501             return LM_FW_CLI_ID(pdev, client_info_idx);
502         }
503     }
504 
505 #ifdef VF_INVOLVED
506     if (IS_CHANNEL_VIRT_MODE_MASTER_PFDEV(pdev))
507     {
508         fw_client_id = lm_pf_get_fw_client_idx_from_cid(pdev, cid);
509     }
510     else
511 #endif
512     {
513         fw_client_id = LM_FW_CLI_ID(pdev, cid);
514     }
515 
516     return fw_client_id;
517 }
518 
519 STATIC lm_status_t
lm_eth_init_tx_queue_data(IN lm_device_t * pdev,IN const u8_t chain_id,IN const u8_t sb_id)520 lm_eth_init_tx_queue_data(IN       lm_device_t * pdev,
521                           IN const u8_t          chain_id,
522                           IN const u8_t          sb_id)
523 {
524     struct tx_queue_init_ramrod_data * tx_queue_init_data_virt = NULL;
525     u32_t                              client_info_idx         = 0;
526     lm_status_t                        lm_status               = LM_STATUS_SUCCESS;
527     u8_t                               cid                     = 0;
528 
529     if((lm_chain_type_cos_tx_only != lm_mp_get_chain_type(pdev,chain_id)) &&
530        (chain_id != FWD_CID(pdev)))
531     {
532         DbgBreakMsg("lm_eth_init_tx_queue_data: the chain isn't TX only " );
533         return LM_STATUS_FAILURE;
534     }
535 
536     /* a bit redundant, but just so we're clear on terminology... */
537     cid = chain_id;
538 
539     /* Since ramrods are sent sequentially for tx only clients, and then regular client, and
540      * we won't have a case of these being sent in parallel, we can safely use the client_init_data_virt
541      * of the regular eth connection for the tx only connection.
542      * This way, we don't need to allocate client_info for tx only connections.
543      */
544     client_info_idx = lm_get_sw_client_idx_from_cid(pdev, cid);
545 
546     tx_queue_init_data_virt = &(pdev->client_info[client_info_idx].client_init_data_virt->tx_queue);
547 
548     if CHK_NULL(tx_queue_init_data_virt)
549     {
550         return LM_STATUS_FAILURE;
551     }
552 
553     mm_mem_zero(tx_queue_init_data_virt , sizeof(struct tx_queue_init_ramrod_data));
554 
555     /* General Structure */
556     lm_status = lm_eth_init_client_init_general_data(pdev,
557                                                      &(tx_queue_init_data_virt->general),
558                                                      chain_id);
559 
560     if(LM_STATUS_SUCCESS != lm_status)
561     {
562         return lm_status;
563     }
564 
565     /* Tx Data */
566     lm_eth_init_client_init_tx_data(pdev,
567                                     &(tx_queue_init_data_virt->tx),
568                                     chain_id,
569                                     sb_id);
570 
571     return LM_STATUS_SUCCESS;
572 }
573 
lm_eth_init_client_init_data(lm_device_t * pdev,u8_t cid,u8_t sb_id)574 lm_status_t lm_eth_init_client_init_data(lm_device_t *pdev, u8_t cid, u8_t sb_id)
575 {
576     struct client_init_ramrod_data * client_init_data_virt = NULL;
577     lm_status_t                      lm_status             = LM_STATUS_SUCCESS;
578     const u32_t                      client_info_idx       = lm_get_sw_client_idx_from_cid(pdev, cid);
579 
580 
581     if (client_info_idx >= ARRSIZE(pdev->client_info))
582     {
583         DbgBreakIf(client_info_idx >= ARRSIZE(pdev->client_info));
584         return LM_STATUS_FAILURE;
585     }
586 
587     client_init_data_virt = &(pdev->client_info[client_info_idx].client_init_data_virt->init_data);
588 
589     if CHK_NULL(client_init_data_virt)
590     {
591         return LM_STATUS_FAILURE;
592     }
593 
594     mm_mem_zero(client_init_data_virt , sizeof(struct client_init_ramrod_data));
595 
596     /* General Structure */
597     lm_status = lm_eth_init_client_init_general_data(pdev,
598                                          &(client_init_data_virt->general),
599                                          cid);
600     if(LM_STATUS_SUCCESS != lm_status)
601     {
602         return lm_status;
603     }
604 
605     /* Rx Data */
606     lm_eth_init_client_init_rx_data(pdev,
607                                     &(client_init_data_virt->rx),
608                                     cid,
609                                     sb_id);
610 
611     /* Tx Data */
612     lm_eth_init_client_init_tx_data(pdev,
613                                     &(client_init_data_virt->tx),
614                                     cid,
615                                     sb_id);
616 
617     return LM_STATUS_SUCCESS;
618 }
619 
620 /**
621 
622  * @assumptions: STRONG ASSUMPTION: This function is not
623  *             called for SRIOV / MP connections...
624  */
lm_update_eth_client(IN struct _lm_device_t * pdev,IN const u8_t client_idx,IN const u16_t silent_vlan_value,IN const u16_t silent_vlan_mask,IN const u8_t silent_vlan_removal_flg,IN const u8_t silent_vlan_change_flg)625 lm_status_t lm_update_eth_client(IN struct _lm_device_t    *pdev,
626                                  IN const u8_t             client_idx,
627                                  IN const u16_t            silent_vlan_value,
628                                  IN const u16_t            silent_vlan_mask,
629                                  IN const u8_t             silent_vlan_removal_flg,
630                                  IN const u8_t             silent_vlan_change_flg
631                                  )
632 {
633     struct client_update_ramrod_data * client_update_data_virt = pdev->client_info[client_idx].update.data_virt;
634     lm_status_t                        lm_status               = LM_STATUS_FAILURE;
635     u32_t                              con_state               = 0;
636     const u32_t                        cid                     = client_idx; //lm_get_cid_from_sw_client_idx(pdev);
637 
638     if CHK_NULL(client_update_data_virt)
639     {
640         return LM_STATUS_FAILURE;
641     }
642 
643     mm_mem_zero(client_update_data_virt , sizeof(struct client_update_ramrod_data));
644 
645     MM_ACQUIRE_ETH_CON_LOCK(pdev);
646 
647     // We will send a client update ramrod in any case we can we don't optimize this flow.
648     // Client setup may already took the correct NIV value but the ramrod will be sent anyway
649     con_state = lm_get_con_state(pdev, cid);
650     if((LM_CON_STATE_OPEN != con_state) &&
651         (LM_CON_STATE_OPEN_SENT != con_state))
652     {
653         // Clinet is not in a state that it can recieve the ramrod
654         MM_RELEASE_ETH_CON_LOCK(pdev);
655         return LM_STATUS_ABORTED;
656     }
657 
658     /* We don't expect this function to be called for non eth regular connections.
659      * If we hit this assert it means we need support for SRIOV +  AFEX
660      */
661     if (cid >= MAX_RX_CHAIN(pdev))
662     {
663         DbgBreakIf(cid >= MAX_RX_CHAIN(pdev));
664         MM_RELEASE_ETH_CON_LOCK(pdev);
665         return LM_STATUS_FAILURE;
666     }
667 
668     DbgBreakIf( LM_CLI_UPDATE_NOT_USED != pdev->client_info[client_idx].update.state);
669 
670     pdev->client_info[client_idx].update.state = LM_CLI_UPDATE_USED;
671 
672     client_update_data_virt->client_id  = LM_FW_CLI_ID(pdev, client_idx);
673     client_update_data_virt->func_id    = FUNC_ID(pdev); /* FIXME: VFID needs to be given here for VFs... */
674 
675     client_update_data_virt->silent_vlan_value          = mm_cpu_to_le16(silent_vlan_value);
676     client_update_data_virt->silent_vlan_mask           = mm_cpu_to_le16(silent_vlan_mask);
677     client_update_data_virt->silent_vlan_removal_flg    = silent_vlan_removal_flg;
678     client_update_data_virt->silent_vlan_change_flg     = silent_vlan_change_flg;
679 
680     client_update_data_virt->refuse_outband_vlan_flg        = 0;
681     client_update_data_virt->refuse_outband_vlan_change_flg = 0;
682 
683     lm_status = lm_sq_post(pdev,
684                            cid,
685                            RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
686                            CMD_PRIORITY_MEDIUM,
687                            ETH_CONNECTION_TYPE,
688                            pdev->client_info[client_idx].update.data_phys.as_u64);
689 
690     MM_RELEASE_ETH_CON_LOCK(pdev);
691 
692 
693     if (lm_status != LM_STATUS_SUCCESS)
694     {
695         return lm_status;
696     }
697 
698     lm_status = lm_wait_state_change(pdev, &pdev->client_info[client_idx].update.state, LM_CLI_UPDATE_RECV);
699 
700     pdev->client_info[client_idx].update.state = LM_CLI_UPDATE_NOT_USED;
701 
702     return lm_status;
703 }
704 
lm_establish_eth_con(struct _lm_device_t * pdev,u8_t const chain_idx,u8_t sb_id,u8_t attributes_bitmap)705 lm_status_t lm_establish_eth_con(struct _lm_device_t *pdev, u8_t const chain_idx, u8_t sb_id, u8_t attributes_bitmap)
706 {
707     lm_status_t     lm_status       = LM_STATUS_SUCCESS;
708     u8_t            cmd_id          = 0;
709     u8_t            type            = 0;
710     lm_rcq_chain_t* rcq_chain       = NULL;
711     const u8_t      cid             = chain_idx; /* redundant, but here for terminology sake... */
712     u32_t           client_info_idx = 0;
713 
714     DbgMessage(pdev, INFORMi|INFORMl2sp, "#lm_establish_eth_con, cid=%d\n",cid);
715 
716     if (IS_PFDEV(pdev))
717     {
718         MM_ACQUIRE_ETH_CON_LOCK(pdev);
719     }
720 
721     lm_set_con_state(pdev, cid, LM_CON_STATE_CLOSE);
722     if (IS_PFDEV(pdev))
723     {
724         /* TODO: VF??? */
725         if( LM_CLIENT_ATTRIBUTES_REG_CLI == GET_FLAGS(attributes_bitmap,LM_CLIENT_ATTRIBUTES_REG_CLI ))
726         {
727             // Regular client or OOO CID
728             DbgBreakIf( LM_CLIENT_ATTRIBUTES_RX != GET_FLAGS(attributes_bitmap,LM_CLIENT_ATTRIBUTES_RX ));
729             lm_status = lm_eth_init_client_init_data(pdev, cid, sb_id);
730         }
731         else
732         {
733             // TX only client or FWD
734             DbgBreakIf( LM_CLIENT_ATTRIBUTES_RX == GET_FLAGS(attributes_bitmap,LM_CLIENT_ATTRIBUTES_RX ));
735             lm_status = lm_eth_init_tx_queue_data(pdev, cid, sb_id);
736         }
737 
738         if(LM_STATUS_SUCCESS != lm_status)
739         {
740             DbgBreakMsg("lm_establish_eth_con: lm_eth_init_client_init_data or lm_eth_init_tx_queue_data failed \n ");
741             if (IS_PFDEV(pdev))
742             {
743                 MM_RELEASE_ETH_CON_LOCK(pdev);
744             }
745             return lm_status;
746         }
747 
748         lm_init_connection_context(pdev, cid, sb_id);
749     }
750 
751     /* When we setup the RCQ ring we should advance the CQ cons by MAX_NUM_RAMRODS - the FWD CID is the only connection without an RCQ
752      * therefore we skip this operation for forward */
753     if( LM_CLIENT_ATTRIBUTES_REG_CLI == GET_FLAGS(attributes_bitmap,LM_CLIENT_ATTRIBUTES_REG_CLI ))
754     {
755         DbgBreakIf( LM_CLIENT_ATTRIBUTES_RX != GET_FLAGS(attributes_bitmap,LM_CLIENT_ATTRIBUTES_RX ));
756         cmd_id = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
757         rcq_chain = &LM_RCQ(pdev, cid);
758         if (IS_PFDEV(pdev))
759         {
760             lm_bd_chain_bds_produced(&rcq_chain->bd_chain, ETH_MIN_RX_CQES_WITH_TPA_E1H_E2);
761         }
762     }
763     else
764     {
765         DbgBreakIf( LM_CLIENT_ATTRIBUTES_RX == GET_FLAGS(attributes_bitmap,LM_CLIENT_ATTRIBUTES_RX ));
766         if (cid == FWD_CID(pdev))
767         {
768             cmd_id = RAMROD_CMD_ID_ETH_FORWARD_SETUP;
769         }
770         else if(lm_chain_type_cos_tx_only == lm_mp_get_chain_type(pdev,cid))
771         {
772             cmd_id = RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP;
773         }
774         else
775         {
776             DbgBreakMsg(" lm_establish_eth_con: cmd_id not set ");
777             if (IS_PFDEV(pdev))
778             {
779                 MM_RELEASE_ETH_CON_LOCK(pdev);
780             }
781             return LM_STATUS_FAILURE;
782         }
783     }
784 
785     // Move to state ramrod sent must be done before ramrod is realy sent
786     lm_set_con_state(pdev, cid, LM_CON_STATE_OPEN_SENT);
787 
788     client_info_idx = lm_get_sw_client_idx_from_cid(pdev, cid);
789 
790     if (IS_PFDEV(pdev))
791     {
792         lm_status = lm_sq_post(pdev,
793                                cid,
794                                cmd_id,
795                                CMD_PRIORITY_MEDIUM,
796                                type,
797                                pdev->client_info[client_info_idx].client_init_data_phys.as_u64);
798     }
799 #ifdef VF_INVOLVED
800     else
801     {
802         lm_status = lm_vf_queue_init(pdev, cid);
803     }
804 #endif
805 
806     if (lm_status != LM_STATUS_SUCCESS)
807     {
808         lm_set_con_state(pdev, cid, LM_CON_STATE_CLOSE);
809         if (IS_PFDEV(pdev))
810         {
811             MM_RELEASE_ETH_CON_LOCK(pdev);
812         }
813         return lm_status;
814     }
815 
816     if (IS_PFDEV(pdev))
817     {
818         MM_RELEASE_ETH_CON_LOCK(pdev);
819     }
820 
821     lm_status = lm_eth_wait_state_change(pdev, LM_CON_STATE_OPEN, cid);
822 
823 
824     return lm_status;
825 } /* lm_establish_eth_con */
826 
827 
828 /**
829  * @description
830  * Send all the ramrods and wait for there return.
831  * @param pdev
832  * @param chain_idx_base
833  *
834  * @return lm_status_t
835  * status success is returned if all the ramrods where received.
836  * Status failure is returned if not all the ramrods were
837  * received.
838  */
839 lm_status_t
lm_tpa_send_ramrods_wait(IN lm_device_t * pdev,IN const u8_t chain_idx_base)840 lm_tpa_send_ramrods_wait(IN lm_device_t  *pdev,
841                          IN const u8_t   chain_idx_base)
842 {
843     lm_tpa_info_t   *tpa_info   = &LM_TPA_INFO(pdev);
844     lm_status_t     lm_status   = LM_STATUS_SUCCESS;
845 
846     DbgBreakIf(NULL != tpa_info->update_cookie);
847     DbgBreakIf(0 != tpa_info->ramrod_recv_cnt);
848 
849     lm_status = lm_tpa_send_ramrods(pdev,
850                                     chain_idx_base);
851 
852     if(LM_STATUS_SUCCESS != lm_status)
853     {
854         DbgBreakMsg(" Ramrod send failed ");
855         return lm_status;
856     }
857 
858     lm_status = lm_wait_state_change(pdev, &tpa_info->state, TPA_STATE_NONE);
859 
860     return lm_status;
861 }
862 
863 /**
864  * @description
865  * Update the ramrod IPVX according to the current and required
866  * state.
867  * @param pdev
868  * @param chain_idx
869  * @param vbd_rsc_ipvx_bit - The VBD TPA ipvX bit.
870  *
871  * @return STATIC u8_t - The HSI IPVX eth_tpa_update_command
872  */
873 u8_t
lm_tpa_ramrod_update_ipvx(IN lm_device_t * pdev,IN const u8_t chain_idx,IN const u8_t vbd_tpa_ipvx_bit)874 lm_tpa_ramrod_update_ipvx(IN lm_device_t   *pdev,
875                           IN const u8_t          chain_idx,
876                           IN const u8_t          vbd_tpa_ipvx_bit)
877 {
878     // Add ramrod send code
879     const lm_tpa_info_t*    tpa_info    = &LM_TPA_INFO(pdev);
880     u8_t                    ramrod_ipvx = 0;
881 
882     if(GET_FLAGS(tpa_info->ipvx_enabled_required, vbd_tpa_ipvx_bit) ==
883        GET_FLAGS(tpa_info->ipvx_enabled_current, vbd_tpa_ipvx_bit))
884     {
885         ramrod_ipvx = TPA_UPDATE_NONE_COMMAND;
886     }
887     else if(GET_FLAGS(tpa_info->ipvx_enabled_required, vbd_tpa_ipvx_bit))
888     {
889         ramrod_ipvx = TPA_UPDATE_ENABLE_COMMAND;
890     }
891     else
892     {
893         ramrod_ipvx = TPA_UPDATE_DISABLE_COMMAND;
894     }
895     return ramrod_ipvx;
896 }
897 
898 /**
899  * @description
900  * Fill and send TPA ramrod.
901  * @param pdev
902  * @param chain_idx
903  */
904 STATIC lm_status_t
lm_tpa_send_ramrod(IN lm_device_t * pdev,IN const u8_t chain_idx)905 lm_tpa_send_ramrod(IN lm_device_t   *pdev,
906                    IN const u8_t    chain_idx)
907 {
908     // Add ramrod send code
909     const lm_tpa_chain_t*   tpa_chain       = &LM_TPA( pdev, chain_idx );
910     const lm_bd_chain_t*    tpa_chain_bd    = &LM_TPA_CHAIN_BD(pdev, chain_idx);
911     lm_status_t             lm_status       = LM_STATUS_SUCCESS;
912 
913     if((CHK_NULL(tpa_chain->ramrod_data_virt)) ||
914        (lm_tpa_state_enable != tpa_chain->state)||
915        pdev->params.rss_chain_cnt <= chain_idx)
916     {
917         DbgBreakMsg("lm_tpa_send_ramrod : invalid paramters");
918         return LM_STATUS_FAILURE;
919     }
920 
921     tpa_chain->ramrod_data_virt->update_ipv4   =  lm_tpa_ramrod_update_ipvx(pdev,
922                                                       chain_idx,
923                                                       TPA_IPV4_ENABLED);
924 
925     tpa_chain->ramrod_data_virt->update_ipv6   =  lm_tpa_ramrod_update_ipvx(pdev,
926                                                       chain_idx,
927                                                       TPA_IPV6_ENABLED);
928 
929     /* TPA mode to use (LRO or GRO) */
930     tpa_chain->ramrod_data_virt->tpa_mode       = TPA_LRO;
931 
932     tpa_chain->ramrod_data_virt->client_id     = LM_FW_CLI_ID(pdev, chain_idx);
933     /* maximal TPA queues allowed for this client */
934     tpa_chain->ramrod_data_virt->max_tpa_queues        = LM_TPA_MAX_AGGS;
935     /* The maximal number of SGEs that can be used for one packet. depends on MTU and SGE size. must be 0 if SGEs are disabled */
936     tpa_chain->ramrod_data_virt->max_sges_for_packet   = DIV_ROUND_UP_BITS(pdev->params.l2_cli_con_params[chain_idx].mtu, LM_TPA_PAGE_BITS);
937     // Avoiding rings thresholds verification is aimed for eVBD
938     // which receives its buffers and SGEs only after client init
939     // is completed.(eVBD receives the buffers and SGEs only after
940     // client setup is completed.)
941     tpa_chain->ramrod_data_virt->dont_verify_rings_pause_thr_flg = 1;
942     /* Size of the buffers pointed by SGEs */
943     ASSERT_STATIC(LM_TPA_PAGE_SIZE < MAX_VARIABLE_VALUE(tpa_chain->ramrod_data_virt->sge_buff_size));
944     tpa_chain->ramrod_data_virt->sge_buff_size         = mm_cpu_to_le16(LM_TPA_PAGE_SIZE);
945     /* maximal size for the aggregated TPA packets, reprted by the host */
946     ASSERT_STATIC((LM_TPA_MAX_AGG_SIZE * LM_TPA_PAGE_SIZE) < MAX_VARIABLE_VALUE(tpa_chain->ramrod_data_virt->max_agg_size));
947     tpa_chain->ramrod_data_virt->max_agg_size          = mm_cpu_to_le16(LM_TPA_MAX_AGG_SIZE * LM_TPA_PAGE_SIZE);
948     //u32_t sge_page_base_lo /* The address to fetch the next sges from (low) */;
949     tpa_chain->ramrod_data_virt->sge_page_base_lo      = mm_cpu_to_le32(tpa_chain_bd->bd_chain_phy.as_u32.low);
950     //u32_t sge_page_base_hi /* The address to fetch the next sges from (high) */;
951     tpa_chain->ramrod_data_virt->sge_page_base_hi      = mm_cpu_to_le32(tpa_chain_bd->bd_chain_phy.as_u32.high);
952     //u16_t sge_pause_thr_low /* number of remaining sges under which, we send pause message */;
953     tpa_chain->ramrod_data_virt->sge_pause_thr_low     = mm_cpu_to_le16(LM_TPA_SGE_PAUSE_THR_LOW);
954     //u16_t sge_pause_thr_high /* number of remaining sges above which, we send un-pause message */;
955     tpa_chain->ramrod_data_virt->sge_pause_thr_high    = mm_cpu_to_le16(LM_TPA_SGE_PAUSE_THR_HIGH);
956 
957     lm_sq_post(pdev,
958                chain_idx,
959                RAMROD_CMD_ID_ETH_TPA_UPDATE,
960                CMD_PRIORITY_MEDIUM,
961                ETH_CONNECTION_TYPE,
962                *(u64_t *)&(tpa_chain->ramrod_data_phys));
963 
964     DbgBreakIf(lm_status != LM_STATUS_SUCCESS);
965 
966     return lm_status;
967 }
968 
969 
970 /**
971  * @description
972  * Run on all RSS chains and send the ramrod on each one.
973  * @param pdev
974  * @param chain_idx_base
975  */
976 lm_status_t
lm_tpa_send_ramrods(IN lm_device_t * pdev,IN const u8_t chain_idx_base)977 lm_tpa_send_ramrods(IN lm_device_t  *pdev,
978                     IN const u8_t   chain_idx_base)
979 {
980     lm_tpa_info_t*  tpa_info    = &LM_TPA_INFO(pdev);
981     lm_status_t     lm_status   = LM_STATUS_SUCCESS;
982     u8_t            chain_idx   = 0;
983     u8_t            rss_idx     = 0;
984 
985     // Number of ramrods expected in receive
986     tpa_info->ramrod_recv_cnt = pdev->params.rss_chain_cnt;
987     tpa_info->state = TPA_STATE_RAMROD_SENT;
988 #ifdef VF_INVOLVED
989     if (IS_VFDEV(pdev))
990     {
991         tpa_info->ramrod_recv_cnt++;
992         lm_status = lm_vf_pf_update_rsc(pdev);
993         if (lm_status == LM_STATUS_SUCCESS) {
994             lm_status = lm_vf_pf_wait_no_messages_pending(pdev);
995             if ((lm_status == LM_STATUS_SUCCESS) && (0 == mm_atomic_dec((u32_t*)(&tpa_info->ramrod_recv_cnt))))
996             {
997                 tpa_info->ipvx_enabled_current = tpa_info->ipvx_enabled_required;
998                 if (tpa_info->update_cookie)
999                 {
1000                     void* cookie = (void *)tpa_info->update_cookie;
1001                     tpa_info->update_cookie = NULL;
1002                     mm_set_done(pdev, 0, cookie);
1003                 }
1004             }
1005         }
1006 
1007     }
1008     else
1009 #endif
1010     {
1011         LM_FOREACH_RSS_IDX(pdev, rss_idx)
1012         {
1013             chain_idx = chain_idx_base + RSS_ID_TO_CID(rss_idx);
1014             lm_status = lm_tpa_send_ramrod(pdev,
1015                                            chain_idx);
1016 
1017             if(LM_STATUS_SUCCESS != lm_status)
1018             {
1019                 DbgBreakMsg(" Ramrod send failed ");
1020                 break;
1021             }
1022         }
1023     }
1024 
1025     return lm_status;
1026 }
1027 
1028 /**
1029  * @description
1030  * Fill and send function_update_data ramrod.
1031  * @param pdev
1032  */
1033 lm_status_t
lm_encap_send_ramrod(IN lm_device_t * pdev,u8_t new_encap_offload_state,void * cookie)1034 lm_encap_send_ramrod(IN lm_device_t *pdev, u8_t new_encap_offload_state, void* cookie)
1035 {
1036     lm_encap_info_t* encaps_info = &(pdev->encap_info);
1037     struct function_update_data*    data        = LM_SLOWPATH(pdev, encap_function_update_data);
1038     const lm_address_t              data_phys   = LM_SLOWPATH_PHYS(pdev, encap_function_update_data);
1039     lm_status_t                     lm_status   = LM_STATUS_SUCCESS;
1040 
1041     // check that we are not in the middle of handling another encapsulated packets offload set request (1 pending)
1042     DbgBreakIf(encaps_info->new_encap_offload_state != encaps_info->current_encap_offload_state);
1043     DbgBreakIf(encaps_info->update_cookie);
1044 
1045     encaps_info->new_encap_offload_state = new_encap_offload_state;
1046 
1047     if (encaps_info->new_encap_offload_state == encaps_info->current_encap_offload_state)
1048     {
1049         DbgMessage(pdev, VERBOSEencap, "no change in encapsulated packets offload state\n");
1050         return lm_status;
1051     }
1052 
1053     // remember this for mm_set_done call (called on completion of the ramrod)
1054     // mm_set_done will free memory of query_set_info
1055     encaps_info->update_cookie = cookie;
1056 
1057     // GRE config for the function will be updated according to the gre_tunnel_rss and nvgre_clss_en fields
1058     data->update_tunn_cfg_flg = TRUE;
1059 
1060     if (ENCAP_OFFLOAD_DISABLED == pdev->encap_info.new_encap_offload_state)
1061     {
1062         data->tunn_clss_en  = 0;
1063         data->tunnel_mode = TUNN_MODE_NONE;
1064     }
1065     else
1066     {
1067         data->tunn_clss_en  = 1;
1068         data->tunnel_mode = TUNN_MODE_GRE;
1069         data->gre_tunnel_type = NVGRE_TUNNEL;
1070     }
1071 
1072     data->echo = FUNC_UPDATE_RAMROD_SOURCE_ENCAP;
1073 
1074     lm_status = lm_sq_post(pdev,
1075                            0, //Don't care
1076                            RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE,
1077                            CMD_PRIORITY_NORMAL,
1078                            NONE_CONNECTION_TYPE,
1079                            data_phys.as_u64);
1080 
1081     if (lm_status != LM_STATUS_SUCCESS)
1082     {
1083         return lm_status;
1084     }
1085     return LM_STATUS_PENDING;
1086 }
1087 /**
1088  * This function is a general eq ramrod fuanction that waits
1089  * synchroniously for it's completion.
1090  *
1091  * @param pdev
1092  * cmd_id -The ramrod command ID
1093  * data -ramrod data
1094  * curr_state - what to poll on
1095  * curr_state Current state.
1096  * new_state - what we're waiting for.
1097  * @return lm_status_t SUCCESS / TIMEOUT on waiting for
1098  *         completion
1099  */
1100 lm_status_t
lm_eq_ramrod_post_sync(IN struct _lm_device_t * pdev,IN u8_t cmd_id,IN u64_t data,IN u8_t ramrod_priority,IN volatile u32_t * p_curr_state,IN u32_t curr_state,IN u32_t new_state)1101 lm_eq_ramrod_post_sync( IN struct _lm_device_t  *pdev,
1102                         IN u8_t                 cmd_id,
1103                         IN u64_t                data,
1104                         IN u8_t                 ramrod_priority,
1105                         IN volatile u32_t       *p_curr_state,
1106                         IN u32_t                curr_state,
1107                         IN u32_t                new_state)
1108 {
1109 
1110     lm_status_t lm_status = LM_STATUS_SUCCESS;
1111 
1112     DbgMessage(pdev, INFORMeq|INFORMl2sp, "#lm_eq_ramrod\n");
1113 
1114     *p_curr_state = curr_state;
1115 
1116     lm_status = lm_sq_post(pdev,
1117                            0, //Don't care
1118                            cmd_id,
1119                            ramrod_priority,
1120                            NONE_CONNECTION_TYPE,
1121                            data );
1122 
1123     if (lm_status != LM_STATUS_SUCCESS)
1124     {
1125         return lm_status;
1126     }
1127 
1128     lm_status = lm_wait_state_change(pdev,
1129                                      p_curr_state,
1130                                      new_state);
1131 
1132     return lm_status;
1133 } /* lm_eq_ramrod_post_sync */
1134 
1135 static lm_status_t
lm_halt_eth_con(struct _lm_device_t * pdev,u32_t cid,const u8_t send_ramrod)1136 lm_halt_eth_con(struct _lm_device_t *pdev, u32_t cid,
1137                 const u8_t send_ramrod)
1138 {
1139     union eth_specific_data ramrod_data     = {{0}};
1140     lm_address_t            data_mapping    = {{0}};
1141     lm_status_t             lm_status       = LM_STATUS_SUCCESS  ;
1142     u32_t                   fw_client_idx   = 0xFFFFFFFF;
1143     u32_t                   con_state       = 0;
1144 
1145     fw_client_idx = lm_get_fw_client_idx_from_cid(pdev, cid);
1146 
1147     ASSERT_STATIC(sizeof(ramrod_data) == sizeof(u64_t));
1148 
1149 
1150     con_state = lm_get_con_state(pdev, cid);
1151     DbgMessage(pdev, WARN/*INFORMi|INFORMl2sp*/, "#lm_halt_eth_con cid=%d fw_client_idx=%d client_info=%d(%d)\n",cid, fw_client_idx,
1152                 cid,con_state);
1153 
1154 
1155     if (ERR_IF(con_state != LM_CON_STATE_OPEN))
1156     {
1157         DbgBreakIf(!DBG_BREAK_ON(UNDER_TEST));
1158         return LM_STATUS_FAILURE;
1159     }
1160     if (IS_PFDEV(pdev))
1161     {
1162         MM_ACQUIRE_ETH_CON_LOCK(pdev);
1163     }
1164 
1165     if(FALSE == send_ramrod)
1166     {
1167         lm_set_con_state(pdev, cid, LM_CON_STATE_HALT);
1168         DbgMessage(pdev, WARNl2sp, "lm_close_eth_con:The HALT ramrod isn't sent \n");
1169         if (IS_PFDEV(pdev))
1170         {
1171             MM_RELEASE_ETH_CON_LOCK(pdev);
1172         }
1173         return LM_STATUS_SUCCESS;
1174     }
1175     // Send ramrod
1176     lm_set_con_state(pdev, cid, LM_CON_STATE_HALT_SENT);
1177     ramrod_data.halt_ramrod_data.client_id = fw_client_idx; //LM_FW_CLI_ID(pdev, client_info_idx);
1178 
1179     /* convert halt_ramrod_data to a big-endian friendly format */
1180     data_mapping.as_u32.low = ramrod_data.halt_ramrod_data.client_id;
1181 
1182     lm_status = lm_sq_post(pdev,
1183                            cid,
1184                            RAMROD_CMD_ID_ETH_HALT,
1185                            CMD_PRIORITY_MEDIUM,
1186                            ETH_CONNECTION_TYPE,
1187                            data_mapping.as_u64);
1188 
1189     if (IS_PFDEV(pdev))
1190     {
1191         MM_RELEASE_ETH_CON_LOCK(pdev);
1192     }
1193 
1194     if (lm_status != LM_STATUS_SUCCESS)
1195     {
1196         return lm_status;
1197     }
1198 
1199     lm_status = lm_eth_wait_state_change(pdev, LM_CON_STATE_HALT, cid);
1200 
1201     return lm_status;
1202 } /* lm_halt_eth_con */
1203 
lm_terminate_eth_con(struct _lm_device_t * pdev,u32_t const cid)1204 lm_status_t lm_terminate_eth_con(struct _lm_device_t *pdev,
1205                                  u32_t const          cid)
1206 {
1207     lm_status_t lm_status  = LM_STATUS_SUCCESS;
1208 
1209     DbgMessage(pdev, INFORMi|INFORMl2sp, "#lm_terminate_eth_con, cid=%d \n",cid);
1210 
1211     if (ERR_IF(lm_get_con_state(pdev, cid) != LM_CON_STATE_HALT))
1212     {
1213         DbgBreak();
1214         return LM_STATUS_FAILURE;
1215     }
1216 
1217     if (IS_VFDEV(pdev))
1218     {
1219         lm_set_con_state(pdev, cid, LM_CON_STATE_TERMINATE);
1220         return LM_STATUS_SUCCESS; /* Not supported for VFs */
1221     }
1222 
1223     lm_status = lm_sq_post(pdev,
1224                            cid,
1225                            RAMROD_CMD_ID_ETH_TERMINATE,
1226                            CMD_PRIORITY_MEDIUM,
1227                            ETH_CONNECTION_TYPE,
1228                            0);
1229 
1230     if (lm_status != LM_STATUS_SUCCESS)
1231     {
1232         return lm_status;
1233     }
1234 
1235     lm_status = lm_eth_wait_state_change(pdev, LM_CON_STATE_TERMINATE, cid);
1236 
1237     return lm_status;
1238 }
1239 
lm_cfc_del_eth_con(struct _lm_device_t * pdev,u32_t const cid)1240 static lm_status_t lm_cfc_del_eth_con(struct _lm_device_t *pdev,
1241                                       u32_t const          cid)
1242 {
1243 /* VIA PF!!!!!!*/
1244     lm_status_t lm_status       = LM_STATUS_SUCCESS;
1245 
1246     DbgMessage(pdev, INFORMi|INFORMl2sp, "#lm_cfc_del_eth_con, cid=%d\n",cid);
1247 
1248     if (ERR_IF(lm_get_con_state(pdev, cid) != LM_CON_STATE_TERMINATE))
1249     {
1250         DbgBreak();
1251         return LM_STATUS_FAILURE;
1252     }
1253 
1254     lm_status = lm_sq_post(pdev,
1255                            cid,
1256                            RAMROD_CMD_ID_COMMON_CFC_DEL,
1257                            CMD_PRIORITY_MEDIUM,
1258                            NONE_CONNECTION_TYPE,
1259                            0);
1260 
1261     if (lm_status != LM_STATUS_SUCCESS)
1262     {
1263         return lm_status;
1264     }
1265 
1266     lm_status = lm_eth_wait_state_change(pdev, LM_CON_STATE_CLOSE, cid);
1267 
1268     return lm_status;
1269 } /* lm_cfc_del_eth_con */
1270 
1271 
1272 
lm_establish_forward_con(struct _lm_device_t * pdev)1273 lm_status_t lm_establish_forward_con(struct _lm_device_t *pdev)
1274 {
1275     lm_status_t lm_status = LM_STATUS_SUCCESS;
1276     u8_t const  fwd_cid   = FWD_CID(pdev);
1277 
1278     DbgMessage(pdev, INFORMi | INFORMl2sp, "lm_establish_forward_con\n");
1279     lm_status = lm_establish_eth_con(pdev, fwd_cid, DEF_STATUS_BLOCK_INDEX , LM_CLIENT_ATTRIBUTES_TX);
1280     if (lm_status != LM_STATUS_SUCCESS) {
1281         DbgMessage(pdev, FATAL, "lm_establish_forward_con failed\n");
1282         return lm_status;
1283     }
1284 
1285     DbgMessage(pdev,INFORMi | INFORMl2sp, "Establish forward connection ramrod completed\n");
1286 
1287     return LM_STATUS_SUCCESS;
1288 }
1289 
lm_close_forward_con(struct _lm_device_t * pdev)1290 lm_status_t lm_close_forward_con(struct _lm_device_t *pdev)
1291 {
1292     lm_status_t lm_status = LM_STATUS_SUCCESS;
1293     u8_t const  fwd_cid   = FWD_CID(pdev);
1294 
1295     /* halt and terminate ramrods (lm_{halt,terminate}_eth_con) are not sent for the forward channel connection.
1296        therefore we just change the state from OPEN to TERMINATE, and send the cfc del ramrod */
1297     DbgBreakIf(lm_get_con_state(pdev, fwd_cid) != LM_CON_STATE_OPEN);
1298     lm_set_con_state(pdev, fwd_cid, LM_CON_STATE_TERMINATE);
1299 
1300     lm_status = lm_cfc_del_eth_con(pdev,fwd_cid);
1301     if (lm_status != LM_STATUS_SUCCESS) {
1302         return lm_status;
1303     }
1304 
1305     DbgMessage(pdev,INFORMi | INFORMl2sp, "lm_close_forward_con completed\n");
1306 
1307     return LM_STATUS_SUCCESS;
1308 }
1309 
lm_close_eth_con(struct _lm_device_t * pdev,u32_t const cid,const u8_t send_halt_ramrod)1310 lm_status_t lm_close_eth_con(struct _lm_device_t *pdev,
1311                              u32_t    const cid,
1312                              const  u8_t   send_halt_ramrod)
1313 {
1314     lm_status_t lm_status;
1315     u8_t max_eth_cid;
1316 
1317     if (lm_fl_reset_is_inprogress(pdev)) {
1318         lm_set_con_state(pdev, cid, LM_CON_STATE_CLOSE);
1319         DbgMessage(pdev, FATAL, "lm_chip_stop: Under FLR: \"close\" cid=%d.\n", cid);
1320         return LM_STATUS_SUCCESS;
1321     }
1322 
1323 #ifdef VF_INVOLVED
1324     if (IS_CHANNEL_VFDEV(pdev)) {
1325         lm_status = lm_vf_queue_close(pdev, (u8_t)cid);
1326         return lm_status;
1327     }
1328 #endif
1329 
1330 
1331     lm_status = lm_halt_eth_con(pdev,cid, send_halt_ramrod);
1332 
1333     if (lm_status != LM_STATUS_SUCCESS)
1334     {
1335         return lm_status;
1336     }
1337 
1338     lm_status = lm_terminate_eth_con(pdev,cid);
1339     if (lm_status != LM_STATUS_SUCCESS)
1340     {
1341         return lm_status;
1342     }
1343 
1344     lm_status = lm_cfc_del_eth_con(pdev,cid);
1345     if (lm_status != LM_STATUS_SUCCESS)
1346     {
1347         return lm_status;
1348     }
1349 
1350     if (MM_DCB_MP_L2_IS_ENABLE(pdev))
1351     {
1352         max_eth_cid = lm_mp_max_cos_chain_used(pdev);
1353     }
1354     else
1355     {
1356         max_eth_cid = MAX_RX_CHAIN(pdev);
1357     }
1358     if (cid < max_eth_cid) {
1359         lm_status = lm_clear_eth_con_resc(pdev,(u8_t)cid);
1360     }
1361 
1362     if (lm_status != LM_STATUS_SUCCESS)
1363     {
1364         return lm_status;
1365     }
1366 
1367     DbgMessage(pdev,INFORMi | INFORMl2sp, "lm_close_eth_con completed for cid=%d\n", cid);
1368 
1369     return LM_STATUS_SUCCESS;
1370 }
lm_eth_wait_state_change(struct _lm_device_t * pdev,u32_t new_state,u32_t cid)1371 lm_status_t lm_eth_wait_state_change(struct _lm_device_t *pdev, u32_t new_state, u32_t cid)
1372 {
1373     lm_cid_resc_t * cid_resc = lm_cid_resc(pdev, cid);
1374 
1375     if (CHK_NULL(cid_resc))
1376     {
1377         return LM_STATUS_INVALID_PARAMETER;
1378     }
1379 
1380     return lm_wait_state_change(pdev, &cid_resc->con_state, new_state);
1381 
1382 } /* lm_eth_wait_state_change */
1383 
1384 /**lm_func_update_post_command Post a func_update ramrod and
1385  * wait for its completion.
1386  * Must be called from a work item.
1387  *
1388  * @param pdev the device
1389  * @param command the ramrod cmd_id (NONE_CONNECTION_TYPE is
1390  *                assumed)
1391  * @param data the ramrod data
1392  *
1393  * @return lm_status_t LM_STATUS_SUCCESS on success, some other
1394  *         failure code on failure.
1395  */
1396 lm_status_t
lm_l2mp_func_update_command(IN lm_device_t * pdev,IN const struct function_update_data * func_data)1397 lm_l2mp_func_update_command( IN lm_device_t                         *pdev,
1398                              IN const struct function_update_data   *func_data)
1399 {
1400     lm_status_t                     lm_status   = LM_STATUS_FAILURE;
1401     struct function_update_data*    data        = LM_SLOWPATH(pdev, l2mp_func_update_data);
1402     lm_address_t                    data_phys   = LM_SLOWPATH_PHYS(pdev, l2mp_func_update_data);
1403 
1404     DbgBreakIf(pdev->slowpath_info.l2mp_func_update_ramrod_state != L2MP_FUNC_UPDATE_RAMROD_NOT_POSTED);
1405 
1406     mm_memcpy(data, func_data, sizeof(struct function_update_data));
1407 
1408     data->echo = FUNC_UPDATE_RAMROD_SOURCE_L2MP;
1409 
1410     lm_status = lm_eq_ramrod_post_sync(pdev,RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, data_phys.as_u64,CMD_PRIORITY_NORMAL,&pdev->slowpath_info.l2mp_func_update_ramrod_state, L2MP_FUNC_UPDATE_RAMROD_POSTED, L2MP_FUNC_UPDATE_RAMROD_COMPLETED);
1411     if (LM_STATUS_SUCCESS != lm_status)
1412     {
1413         DbgBreakIf(LM_STATUS_SUCCESS != lm_status);
1414         goto _exit;
1415     }
1416 
1417 _exit:
1418     pdev->slowpath_info.l2mp_func_update_ramrod_state = L2MP_FUNC_UPDATE_RAMROD_NOT_POSTED;
1419     return lm_status;
1420 }
1421 
1422 /*********************** NIV **************************************/
1423 
lm_niv_post_command(struct _lm_device_t * pdev,IN const u8_t command,IN const u64_t data,IN const u32_t curr_state)1424 lm_status_t lm_niv_post_command(struct _lm_device_t         *pdev,
1425                                 IN const u8_t               command,
1426                                 IN const u64_t              data,
1427                                 IN const u32_t              curr_state)
1428 {
1429     lm_status_t              lm_status        = LM_STATUS_SUCCESS;
1430     const niv_ramrod_state_t niv_ramrod_state = curr_state;
1431 
1432     DbgBreakIf((NIV_RAMROD_COMPLETED  == curr_state)||
1433                (NIV_RAMROD_NOT_POSTED == curr_state));
1434 
1435     DbgBreakIf(pdev->slowpath_info.niv_ramrod_state != NIV_RAMROD_NOT_POSTED);
1436 
1437     lm_status = lm_eq_ramrod_post_sync(pdev,command,data,CMD_PRIORITY_NORMAL,&pdev->slowpath_info.niv_ramrod_state, niv_ramrod_state, NIV_RAMROD_COMPLETED);
1438     if (LM_STATUS_SUCCESS != lm_status)
1439     {
1440         DbgBreakIf(LM_STATUS_SUCCESS != lm_status);
1441         goto _exit;
1442     }
1443 
1444 _exit:
1445     pdev->slowpath_info.niv_ramrod_state = NIV_RAMROD_NOT_POSTED;
1446     return lm_status;
1447 }
1448 
lm_niv_vif_update(struct _lm_device_t * pdev,IN const u16_t vif_id,IN const u16_t default_vlan,IN const u8_t allowed_priorities)1449 lm_status_t lm_niv_vif_update(struct _lm_device_t *pdev,
1450                               IN const u16_t       vif_id,
1451                               IN const u16_t       default_vlan,
1452                               IN const u8_t        allowed_priorities)
1453 {
1454     lm_status_t                         lm_status   = LM_STATUS_FAILURE;
1455     struct function_update_data*        data        = LM_SLOWPATH(pdev, niv_function_update_data);
1456     lm_address_t                        data_phys   = LM_SLOWPATH_PHYS(pdev, niv_function_update_data);
1457 
1458     data->vif_id_change_flg              = TRUE;
1459     data->vif_id                         = mm_cpu_to_le16(vif_id);
1460     data->afex_default_vlan_change_flg   = TRUE;
1461     data->afex_default_vlan              = mm_cpu_to_le16(default_vlan);
1462     data->allowed_priorities_change_flg  = TRUE;
1463     data->allowed_priorities             = allowed_priorities;
1464 
1465     data->network_cos_mode_change_flg    = FALSE;
1466     data->lb_mode_en                     = FALSE; //if a VIF update was received it means we're connected to a switch, so we're not in LB mode.
1467     data->lb_mode_en_change_flg          = 1;
1468     data->echo                           = FUNC_UPDATE_RAMROD_SOURCE_NIV;
1469 
1470     lm_status = lm_niv_post_command(pdev,RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, data_phys.as_u64, NIV_RAMROD_VIF_UPDATE_POSTED);
1471 
1472     return lm_status;
1473 }
1474 
lm_niv_vif_list_update(struct _lm_device_t * pdev,IN const enum vif_list_rule_kind command,IN const u16_t list_index,IN const u8_t func_bit_map,IN const u8_t func_to_clear)1475 lm_status_t lm_niv_vif_list_update(struct _lm_device_t *pdev,
1476                                    IN const enum vif_list_rule_kind command,
1477                                    IN const u16_t                   list_index,
1478                                    IN const u8_t                    func_bit_map,
1479                                    IN const u8_t                    func_to_clear)
1480 {
1481     struct afex_vif_list_ramrod_data data      = {0};
1482     lm_status_t                      lm_status = LM_STATUS_FAILURE;
1483 
1484     data.func_bit_map          = func_bit_map;
1485     data.func_to_clear         = func_to_clear;
1486     data.afex_vif_list_command = command;
1487     data.vif_list_index        = list_index;
1488     data.echo                  = command;
1489 
1490     lm_status = lm_niv_post_command(pdev,RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS, *((u64_t*)(&data)), NIV_RAMROD_VIF_LISTS_POSTED);
1491 
1492     return lm_status;
1493 }
1494 
1495 
1496 
1497 /****************** CLASSIFICATION ********************************/
1498 /**
1499  * Set/Unset a mac-address or mac-vlan pair on a given chain.
1500  *
1501  * @param pdev
1502  * @param mac_addr  - array of size ETHERNET_ADDRESS_SIZE
1503  *                    containing a valid mac addresses
1504  * @param vlan_tag  - vlan tag to be set with mac address
1505  * @param chain_idx - which chain to set the mac on. Chain_idx
1506  *                    will be transformed to a l2 client-id
1507  * @param cookie    - will be returned to MM layer on completion
1508  * @param set       - set or remove mac address
1509  * @param is_encap_inner_mac_filter - set if we filter according
1510  *                                  to inner mac (VMQ offload of
1511  *                                  encapsulated packets)
1512  *
1513  * @return lm_status_t SUCCESS on syncrounous success, PENDING
1514  *         if completion will be called later, FAILURE o/w
1515  */
lm_set_mac_addr(struct _lm_device_t * pdev,u8_t * mac_addr,u16_t vlan_tag,u8_t chain_idx,void * cookie,const u8_t b_set,u8_t is_encap_inner_mac_filter)1516 lm_status_t lm_set_mac_addr(struct _lm_device_t *pdev,
1517                             u8_t                *mac_addr,
1518                             u16_t               vlan_tag,
1519                             u8_t                chain_idx,
1520                             void*               cookie,
1521                             const u8_t          b_set,
1522                             u8_t                is_encap_inner_mac_filter)
1523 {
1524     struct ecore_vlan_mac_ramrod_params ramrod_param  = { 0 };
1525     lm_status_t                         lm_status     = LM_STATUS_FAILURE;
1526     ecore_status_t                      ecore_status  = ECORE_SUCCESS;
1527     lm_cli_idx_t                        lm_cli_idx    = LM_CLI_IDX_MAX;
1528     u8_t                                cid           = chain_idx; // FIXME!!!
1529 
1530     if ERR_IF(!mac_addr)
1531     {
1532         DbgBreakMsg("lm_set_mac_addr: invalid params\n");
1533         return LM_STATUS_INVALID_PARAMETER;
1534     }
1535 
1536     if (lm_reset_is_inprogress(pdev))
1537     {
1538         DbgMessage(pdev, FATAL, "lm_set_mac_addr: Under FLR!!!\n");
1539         return  LM_STATUS_SUCCESS;
1540     }
1541 
1542 #ifdef VF_INVOLVED
1543     if (IS_CHANNEL_VFDEV(pdev))
1544     {
1545         lm_status = lm_vf_pf_set_q_filters(pdev, LM_CLI_IDX_NDIS, cookie, Q_FILTER_MAC, mac_addr, ETHERNET_ADDRESS_SIZE,vlan_tag, b_set);
1546         return lm_status;
1547     }
1548 #endif
1549 
1550     DbgMessage(pdev, WARN/*INFORMl2sp*/, "lm_set_mac_addr: b_set=%d chain_idx=%d!!!\n", b_set, chain_idx);
1551     DbgMessage(pdev, INFORMl2sp, "lm_set_mac_addr: [%02x]:[%02x]:[%02x]:[%02x]:[%02x]:[%02x]!!!\n",
1552                                    mac_addr[0], mac_addr[1], mac_addr[2], mac_addr[3], mac_addr[4], mac_addr[5]);
1553 
1554     /* Prepare ramrod params to be sent to ecore layer... */
1555     if (vlan_tag != LM_SET_CAM_NO_VLAN_FILTER)
1556     {
1557         DbgBreakIf(CHIP_IS_E1(pdev));
1558 
1559         ASSERT_STATIC( ETHERNET_ADDRESS_SIZE == sizeof(ramrod_param.user_req.u.vlan_mac.mac) );
1560 
1561         mm_memcpy( ramrod_param.user_req.u.vlan_mac.mac, mac_addr, sizeof(ramrod_param.user_req.u.vlan_mac.mac));
1562         ramrod_param.user_req.u.vlan_mac.vlan = vlan_tag;
1563         ramrod_param.user_req.u.vlan_mac.is_inner_mac = is_encap_inner_mac_filter;
1564 
1565         ramrod_param.vlan_mac_obj = &pdev->client_info[chain_idx].mac_vlan_obj;
1566     }
1567     else
1568     {
1569         ASSERT_STATIC( ETHERNET_ADDRESS_SIZE == sizeof(ramrod_param.user_req.u.mac.mac) );
1570 
1571         mm_memcpy( ramrod_param.user_req.u.mac.mac, mac_addr, sizeof(ramrod_param.user_req.u.mac.mac) );
1572         ramrod_param.user_req.u.mac.is_inner_mac = is_encap_inner_mac_filter;
1573 
1574         ramrod_param.vlan_mac_obj = &pdev->client_info[chain_idx].mac_obj;
1575     }
1576     /* Set the cookie BEFORE sending the ramrod!!!! ramrod may complete in the mean time... */
1577     DbgBreakIf(pdev->client_info[cid].set_mac_cookie != NULL);
1578     pdev->client_info[cid].set_mac_cookie = cookie;
1579 
1580     ramrod_param.user_req.cmd = b_set ? ECORE_VLAN_MAC_ADD : ECORE_VLAN_MAC_DEL;
1581 
1582     lm_cli_idx = LM_CHAIN_IDX_CLI(pdev, chain_idx);
1583 
1584     SET_BIT( ramrod_param.ramrod_flags, RAMROD_EXEC );
1585 
1586     switch (lm_cli_idx)
1587     {
1588     case LM_CLI_IDX_NDIS:
1589         SET_BIT (ramrod_param.user_req.vlan_mac_flags, ECORE_ETH_MAC);
1590         break;
1591 
1592     case LM_CLI_IDX_ISCSI:
1593         SET_BIT (ramrod_param.user_req.vlan_mac_flags, ECORE_ISCSI_ETH_MAC);
1594         break;
1595 
1596     default:
1597         /* Nothing... */
1598         break;
1599     }
1600 
1601     ecore_status = ecore_config_vlan_mac(pdev, &ramrod_param );
1602     lm_status    = lm_ecore_status_to_lm_status(ecore_status);
1603 
1604     if( LM_STATUS_PENDING != lm_status )
1605     {
1606         pdev->client_info[cid].set_mac_cookie = NULL; // rollback
1607     }
1608     return lm_status;
1609 }
1610 
1611 /**
1612  * Set/Unset a vlan on a given chain.
1613  *      Setting/unsetting a vlan is a bit more complex than
1614  *      setting a mac address and is therefore implemented in a
1615  *      separate function. It require deleting a previous vlan
1616  *      tag if one was set, and changing rx-filtering rules. The
1617  *      change in rx-filtering rules has to do with "any-vlan".
1618  *      If no vlan is set we want "any-vlan" otherwise we want
1619  *      to remove the any-vlan, this requires another ramrod.
1620  *      The way this is implemented is as follows:
1621  *          1. prepare vlan add/remove commands without
1622  *          executing them (sp-verbs feature don't send EXEC)
1623  *          2. If need to set rx-mask, turn on a flag that will
1624  *          be checked on completion of rx-mask, in
1625  *          lm_eq_handle_rx_filter.., we look at this flag and
1626  *          if it's on execute the vlan pending command
1627  *          (sp-verbs CONT feature).
1628  *
1629  * @param pdev
1630  * @param vlan_tag  - vlan tag to be set
1631  * @param chain_idx - which chain to set the vlan on. Chain_idx
1632  *                    will be transformed to a l2 client-id
1633  * @param cookie    - will be returned to MM layer on completion
1634  * @param set       - set or remove vlan
1635  *
1636  * @return lm_status_t SUCCESS on syncrounous success, PENDING
1637  *         if completion will be called later, FAILURE o/w
1638  */
lm_set_vlan_only(struct _lm_device_t * pdev,u16_t vlan_tag,u8_t chain_idx,void * cookie,const u8_t b_set)1639 lm_status_t lm_set_vlan_only(struct _lm_device_t *pdev,
1640                              u16_t               vlan_tag,
1641                              u8_t                chain_idx,
1642                              void*               cookie,
1643                              const u8_t          b_set )
1644 {
1645     struct ecore_vlan_mac_ramrod_params ramrod_param       = { 0 };
1646     lm_status_t                         lm_status          = LM_STATUS_FAILURE;
1647     ecore_status_t                      ecore_status       = ECORE_SUCCESS;
1648     lm_cli_idx_t                        lm_cli_idx         = LM_CLI_IDX_MAX;
1649     u8_t                                cid                = chain_idx; // FIXME!!!
1650     u8_t                                b_set_rx_mask      = FALSE;
1651 
1652     if (lm_reset_is_inprogress(pdev))
1653     {
1654         DbgMessage(pdev, FATAL, "lm_set_mac_addr: Under FLR!!!\n");
1655         return  LM_STATUS_SUCCESS;
1656     }
1657 
1658 #ifdef VF_INVOLVED
1659     if (IS_CHANNEL_VFDEV(pdev))
1660     {
1661         /* 9/22/11 Michals: should we support this for VFs??? */
1662         return LM_STATUS_FAILURE;
1663     }
1664 #endif
1665 
1666     DbgMessage(pdev, INFORMl2sp, "lm_set_vlan_only: b_set=%d chain_idx=%d!!!\n", b_set, chain_idx);
1667 
1668     /* Prepare ramrod params to be sent to ecore layer... */
1669     if (CHIP_IS_E1x(pdev))
1670     {
1671         DbgMessage(pdev, WARN/*INFORMl2sp*/, "lm_set_vlan_only: not supported for E1x!!!\n");
1672         return LM_STATUS_FAILURE;
1673     }
1674 
1675     ramrod_param.vlan_mac_obj = &pdev->client_info[chain_idx].vlan_obj;
1676     if (pdev->client_info[chain_idx].current_set_vlan == vlan_tag)
1677     {
1678         return LM_STATUS_EXISTING_OBJECT;
1679     }
1680 
1681     /* Set the cookie BEFORE sending the ramrod!!!! ramrod may complete in the mean time... */
1682     DbgBreakIf(pdev->client_info[cid].set_mac_cookie != NULL);
1683     pdev->client_info[cid].set_mac_cookie = cookie;
1684 
1685 
1686     if (b_set)
1687     {
1688         /* If we're just setting vlan, check if we need to delete the old one first... */
1689         if (pdev->client_info[chain_idx].current_set_vlan != 0)
1690         {
1691             ramrod_param.user_req.u.vlan.vlan = pdev->client_info[chain_idx].current_set_vlan;
1692             ramrod_param.user_req.cmd = ECORE_VLAN_MAC_DEL;
1693 
1694             ecore_status = ecore_config_vlan_mac(pdev, &ramrod_param );
1695             /* don't really care about the status... */
1696         }
1697 
1698         /* Prepare for the setting... */
1699         ramrod_param.user_req.u.vlan.vlan = vlan_tag;
1700     }
1701     else
1702     {
1703         ramrod_param.user_req.u.vlan.vlan = pdev->client_info[chain_idx].current_set_vlan;
1704     }
1705 
1706     pdev->client_info[chain_idx].current_set_vlan = vlan_tag;
1707 
1708     ramrod_param.user_req.cmd = b_set ? ECORE_VLAN_MAC_ADD : ECORE_VLAN_MAC_DEL;
1709 
1710     lm_cli_idx = LM_CHAIN_IDX_CLI(pdev, chain_idx);
1711 
1712     /* Determine if rx-mask needs to be changed as a result of this update. */
1713     b_set_rx_mask = (( b_set &&  pdev->client_info[cid].b_any_vlan_on) ||
1714                      (!b_set && !pdev->client_info[cid].b_any_vlan_on) );
1715 
1716     /* If we don't need to change the mask we need to execute commands now, otherwise they'll
1717        be executed from rx filter completion */
1718     if (!b_set_rx_mask )
1719     {
1720         SET_BIT( ramrod_param.ramrod_flags, RAMROD_EXEC );
1721     }
1722 
1723     ecore_status = ecore_config_vlan_mac(pdev, &ramrod_param );
1724     lm_status    = lm_ecore_status_to_lm_status(ecore_status);
1725 
1726     if( (LM_STATUS_PENDING != lm_status) )
1727     {
1728         pdev->client_info[cid].set_mac_cookie = NULL; /* rollback */
1729         return lm_status;
1730     }
1731 
1732     /* see function description to understand this better... */
1733     if (b_set_rx_mask)
1734     {
1735         pdev->client_info[chain_idx].b_vlan_only_in_process = TRUE;
1736         lm_status = lm_set_rx_mask(pdev, cid, pdev->client_info[cid].last_set_rx_mask, NULL);
1737     }
1738 
1739     return lm_status;
1740 }
1741 /**
1742  *  Move a filter from one chain idx to another atomically
1743  *
1744  * @param pdev
1745  *
1746  * @param mac_addr       - array of size ETHERNET_ADDRESS_SIZE
1747  *                         containing a valid mac addresses
1748  * @param vlan_tag       - vlan tag to be set with mac address
1749  * @param src_chain_idx  - which chain to remove the mac from
1750  * @param dest_chain_idx - which chain to set the mac on
1751  * @param cookie         - will be returned to MM layer on completion
1752  *
1753  * @return lm_status_t
1754  */
lm_move_mac_addr(struct _lm_device_t * pdev,u8_t * mac_addr,u16_t vlan_tag,u8_t src_chain_idx,u8_t dest_chain_idx,void * cookie,u8_t is_encap_inner_mac_filter)1755 lm_status_t lm_move_mac_addr(struct _lm_device_t *pdev, u8_t *mac_addr, u16_t vlan_tag,
1756                              u8_t src_chain_idx,  u8_t dest_chain_idx, void * cookie, u8_t is_encap_inner_mac_filter)
1757 {
1758     struct ecore_vlan_mac_ramrod_params ramrod_param = { 0 };
1759     struct ecore_vlan_mac_obj          *dest_obj     = NULL;
1760     lm_status_t                         lm_status    = LM_STATUS_FAILURE;
1761     ecore_status_t                      ecore_status = ECORE_SUCCESS;
1762     u8_t                                sw_client_id       = src_chain_idx;
1763 
1764     if ERR_IF(!pdev || !mac_addr)
1765     {
1766         DbgBreakMsg("lm_move_mac_addr: invalid params\n");
1767         return LM_STATUS_INVALID_PARAMETER;
1768     }
1769 
1770     if (lm_reset_is_inprogress(pdev))
1771     {
1772         DbgMessage(pdev, FATAL, "lm_move_mac_addr: Under FLR!!!\n");
1773         return  LM_STATUS_SUCCESS;
1774     }
1775 
1776 #ifdef VF_INVOLVED
1777     if (IS_CHANNEL_VFDEV(pdev))
1778     {
1779         DbgBreakMsg("lm_move_mac_addr: Move not expected on VF\n");
1780         return lm_status;
1781     }
1782 #endif
1783 
1784     DbgMessage(pdev, INFORMl2sp, "lm_move_mac_addr: src_chain_idx=%d dest_chain_idx=%d!!!\n",
1785                src_chain_idx, dest_chain_idx);
1786     DbgMessage(pdev, INFORMl2sp, "lm_move_mac_addr: [%d]:[%d]:[%d]:[%d]:[%d]:[%d] set=%d chain_idx=%d!!!\n",
1787                mac_addr[0], mac_addr[1], mac_addr[2], mac_addr[3], mac_addr[4], mac_addr[5], mac_addr[6]);
1788 
1789     /* Prepare ramrod params to be sent to ecore layer... */
1790     if (vlan_tag != LM_SET_CAM_NO_VLAN_FILTER)
1791     {
1792         mm_memcpy( ramrod_param.user_req.u.vlan_mac.mac, mac_addr, sizeof(ramrod_param.user_req.u.vlan_mac.mac));
1793         ramrod_param.user_req.u.vlan_mac.vlan = vlan_tag;
1794 	ramrod_param.user_req.u.vlan_mac.is_inner_mac = is_encap_inner_mac_filter;
1795         ramrod_param.vlan_mac_obj = &pdev->client_info[src_chain_idx].mac_vlan_obj;
1796         dest_obj = &pdev->client_info[dest_chain_idx].mac_vlan_obj;
1797     }
1798     else
1799     {
1800         mm_memcpy( ramrod_param.user_req.u.mac.mac, mac_addr, sizeof(ramrod_param.user_req.u.mac.mac) );
1801 	ramrod_param.user_req.u.mac.is_inner_mac = is_encap_inner_mac_filter;
1802 
1803         ramrod_param.vlan_mac_obj = &pdev->client_info[src_chain_idx].mac_obj;
1804         dest_obj = &pdev->client_info[dest_chain_idx].mac_obj;
1805     }
1806 
1807 
1808     /* Set the cookie BEFORE sending the ramrod!!!! ramrod may complete in the mean time... */
1809     DbgBreakIf(pdev->client_info[sw_client_id].set_mac_cookie != NULL);
1810     pdev->client_info[sw_client_id].set_mac_cookie = cookie;
1811 
1812     ramrod_param.user_req.cmd = ECORE_VLAN_MAC_MOVE;
1813 
1814     ramrod_param.user_req.target_obj = dest_obj;
1815 
1816     SET_BIT( ramrod_param.ramrod_flags, RAMROD_EXEC );
1817 
1818     ecore_status = ecore_config_vlan_mac(pdev, &ramrod_param );
1819 
1820     lm_status    = lm_ecore_status_to_lm_status(ecore_status);
1821 
1822     if ( LM_STATUS_PENDING == lm_status )
1823     {
1824         /* FIXME: VF MACS in NIG stay??*/
1825     }
1826     else
1827     {
1828         pdev->client_info[sw_client_id].set_mac_cookie = NULL; // rollback
1829     }
1830     return lm_status;
1831 }
1832 
1833 /**
1834  * @Description
1835  *      Waits for the last set-mac called to complete, could be
1836  *      set-mac or set-mac-vlan...
1837  * @param pdev
1838  * @param chain_idx - the same chain-idx that the set-mac was
1839  *                  called on
1840  *
1841  * @return lm_status_t SUCCESS or TIMEOUT
1842  */
lm_wait_set_mac_done(struct _lm_device_t * pdev,u8_t chain_idx)1843 lm_status_t lm_wait_set_mac_done(struct _lm_device_t *pdev, u8_t chain_idx)
1844 {
1845     struct ecore_vlan_mac_obj *mac_obj      = &pdev->client_info[chain_idx].mac_obj;
1846     struct ecore_vlan_mac_obj *mac_vlan_obj = &pdev->client_info[chain_idx].mac_vlan_obj;
1847     ecore_status_t            ecore_status  = mac_obj->wait(pdev, mac_obj);
1848     lm_status_t               lm_status     = lm_ecore_status_to_lm_status(ecore_status);
1849 
1850     if (lm_status != LM_STATUS_SUCCESS)
1851     {
1852         DbgBreakIf(lm_status != LM_STATUS_SUCCESS);
1853         return lm_status;
1854     }
1855 
1856     if (!CHIP_IS_E1(pdev))
1857     {
1858         ecore_status = mac_vlan_obj->wait(pdev, mac_vlan_obj);
1859         lm_status    = lm_ecore_status_to_lm_status(ecore_status);
1860 
1861         DbgBreakIf(lm_status != LM_STATUS_SUCCESS);
1862     }
1863 
1864     return lm_status;
1865 }
1866 
1867 /**
1868  * @Description
1869  *      Waits for the last set-vlan called to complete
1870  * @param pdev
1871  * @param chain_idx - the same chain-idx that the set-vlan was
1872  *                  called on
1873  *
1874  * @return lm_status_t SUCCESS or TIMEOUT
1875  */
lm_wait_set_vlan_done(struct _lm_device_t * pdev,u8_t chain_idx)1876 lm_status_t lm_wait_set_vlan_done(struct _lm_device_t *pdev, u8_t chain_idx)
1877 {
1878     struct ecore_vlan_mac_obj *vlan_obj     = &pdev->client_info[chain_idx].vlan_obj;
1879     lm_status_t               lm_status     = LM_STATUS_SUCCESS;
1880     ecore_status_t            ecore_status;
1881 
1882     if (!CHIP_IS_E1x(pdev))
1883     {
1884         ecore_status = vlan_obj->wait(pdev, vlan_obj);
1885         lm_status    = lm_ecore_status_to_lm_status(ecore_status);
1886 
1887         DbgBreakIf(lm_status != LM_STATUS_SUCCESS);
1888     }
1889 
1890     return lm_status;
1891 }
1892 
1893 
1894 /**
1895  * Description
1896  *      Clears all the mac address that are set on a certain cid...
1897  * @param pdev
1898  * @param chain_idx - which chain_idx to clear macs on...
1899  *
1900  * @assumptions: Called in PASSIVE_LEVEL!! function sleeps...
1901  * @return lm_status_t
1902  */
lm_clear_all_mac_addr(struct _lm_device_t * pdev,const u8_t chain_idx)1903 lm_status_t lm_clear_all_mac_addr(struct _lm_device_t *pdev, const u8_t chain_idx)
1904 {
1905 #define THE_REST_OF_ETH_MAC 0xffff
1906 
1907     struct ecore_vlan_mac_ramrod_params   ramrod_params    = {0};
1908     struct ecore_vlan_mac_obj           * vlan_mac_obj     = NULL;
1909     lm_status_t                           lm_status        = LM_STATUS_FAILURE;
1910     ecore_status_t                        ecore_status     = ECORE_SUCCESS;
1911     u32_t                                 mac_types[]      = {ECORE_ETH_MAC, ECORE_ISCSI_ETH_MAC, THE_REST_OF_ETH_MAC};
1912     struct ecore_vlan_mac_obj           * vlan_mac_objs[2] = {NULL, NULL};
1913     u8_t                                  idx              = 0;
1914     u8_t                                  obj_idx          = 0;
1915 
1916     DbgMessage(pdev, INFORMl2sp, "lm_clear_all_mac_addr chain_idx=%d\n", chain_idx);
1917 
1918     vlan_mac_objs[0] = &pdev->client_info[chain_idx].mac_obj;
1919     vlan_mac_objs[1] = &pdev->client_info[chain_idx].mac_vlan_obj;
1920 
1921     for (obj_idx = 0; obj_idx < ARRSIZE(vlan_mac_objs); obj_idx++)
1922     {
1923         vlan_mac_obj = vlan_mac_objs[obj_idx];
1924         ramrod_params.vlan_mac_obj = vlan_mac_obj;
1925 
1926         /* mac_vlan_obj only relevant for chips that are not E1... */
1927         if ((vlan_mac_obj == &pdev->client_info[chain_idx].mac_vlan_obj) &&
1928             CHIP_IS_E1(pdev))
1929         {
1930             break;
1931         }
1932 
1933         for (idx = 0; idx < ARRSIZE(mac_types); idx++)
1934         {
1935             SET_BIT( ramrod_params.ramrod_flags, RAMROD_COMP_WAIT);
1936             ramrod_params.user_req.vlan_mac_flags = 0;
1937             if (mac_types[idx] != THE_REST_OF_ETH_MAC)
1938             {
1939                 SET_BIT( ramrod_params.user_req.vlan_mac_flags, mac_types[idx]);
1940             }
1941 
1942             ecore_status = vlan_mac_obj->delete_all( pdev, ramrod_params.vlan_mac_obj, &ramrod_params.user_req.vlan_mac_flags, &ramrod_params.ramrod_flags );
1943             lm_status    = lm_ecore_status_to_lm_status(ecore_status);
1944 
1945             if (lm_status != LM_STATUS_SUCCESS)
1946             {
1947                 DbgBreakIf(lm_status != LM_STATUS_SUCCESS);
1948                 return lm_status;
1949             }
1950 
1951         }
1952     }
1953 
1954     DbgBreakIf(lm_status != LM_STATUS_SUCCESS);
1955     return lm_status;
1956 }
1957 
1958 
1959 
1960 /**
1961  * Description
1962  *      Restores all the mac address that are set on a certain
1963  *      cid (after sleep / hibernate...)
1964  * @param pdev
1965  * @param chain_idx - which chain_idx to clear macs on...
1966  *
1967  * @assumptions: Called in PASSIVE_LEVEL!! function sleeps...
1968  * @return lm_status_t
1969  */
lm_restore_all_mac_addr(struct _lm_device_t * pdev,u8_t chain_idx)1970 lm_status_t lm_restore_all_mac_addr(struct _lm_device_t *pdev, u8_t chain_idx)
1971 {
1972     struct ecore_vlan_mac_ramrod_params       ramrod_params = {0};
1973     struct ecore_vlan_mac_obj *               vlan_mac_obj  = &pdev->client_info[chain_idx].mac_obj;
1974     lm_status_t                               lm_status     = LM_STATUS_FAILURE;
1975     ecore_status_t                            ecore_status  = ECORE_SUCCESS;
1976     struct ecore_vlan_mac_registry_elem*      pos           = NULL;
1977 
1978     DbgMessage(pdev, INFORMl2sp, "lm_clear_all_mac_addr chain_idx=%d\n", chain_idx);
1979 
1980     ramrod_params.vlan_mac_obj = vlan_mac_obj;
1981 
1982     ECORE_SET_BIT(RAMROD_COMP_WAIT, &ramrod_params.ramrod_flags);
1983 
1984     do
1985     {
1986         ecore_status = vlan_mac_obj->restore(pdev, &ramrod_params, &pos);
1987         lm_status    = lm_ecore_status_to_lm_status(ecore_status);
1988         if (lm_status != LM_STATUS_SUCCESS)
1989         {
1990             DbgBreakIf(lm_status != LM_STATUS_SUCCESS);
1991             return lm_status;
1992         }
1993     }
1994     while (pos != NULL);
1995 
1996     /* Take care of the pairs and vlans as well... */
1997     if (!CHIP_IS_E1(pdev))
1998     {
1999         vlan_mac_obj = &pdev->client_info[chain_idx].mac_vlan_obj;
2000         ramrod_params.vlan_mac_obj = vlan_mac_obj;
2001         ECORE_SET_BIT(RAMROD_COMP_WAIT, &ramrod_params.ramrod_flags);
2002 
2003         pos = NULL;
2004         do
2005         {
2006             ecore_status = vlan_mac_obj->restore(pdev, &ramrod_params, &pos);
2007             lm_status    = lm_ecore_status_to_lm_status(ecore_status);
2008             if (lm_status != LM_STATUS_SUCCESS)
2009             {
2010                 DbgBreakIf(lm_status != LM_STATUS_SUCCESS);
2011                 return lm_status;
2012             }
2013         } while (pos != NULL);
2014     }
2015 
2016     if (!CHIP_IS_E1x(pdev))
2017     {
2018         vlan_mac_obj = &pdev->client_info[chain_idx].vlan_obj;
2019         ramrod_params.vlan_mac_obj = vlan_mac_obj;
2020         ECORE_SET_BIT(RAMROD_COMP_WAIT, &ramrod_params.ramrod_flags);
2021 
2022         pos = NULL;
2023         do
2024         {
2025             ecore_status = vlan_mac_obj->restore(pdev, &ramrod_params, &pos);
2026             lm_status    = lm_ecore_status_to_lm_status(ecore_status);
2027             if (lm_status != LM_STATUS_SUCCESS)
2028             {
2029                 DbgBreakIf(lm_status != LM_STATUS_SUCCESS);
2030                 return lm_status;
2031     }
2032         } while (pos != NULL);
2033     }
2034 
2035     DbgBreakIf(lm_status != LM_STATUS_SUCCESS);
2036     return lm_status;
2037 }
2038 
2039 /************************ RX FILTERING ***************************************/
2040 
2041 /**
2042  * @Description
2043  *  - set/unset rx filtering for a client. The setting is done
2044  *    for RX + TX, since tx switching is enabled FW needs to
2045  *    know the configuration for tx filtering as well. The
2046  *    configuration is almost semmetric for rx / tx except for
2047  *    the case of promiscuous in which case rx is in
2048  *    accept_unmatched and Tx is in accept_all (meaning all
2049  *    traffic is sent to loopback channel)
2050  *
2051  * @Assumptions
2052  *  - An inter client lock is taken by the caller
2053  * @Return
2054  *  - Success / Pending or Failure
2055  */
2056 lm_status_t
lm_set_rx_mask(lm_device_t * pdev,u8_t chain_idx,lm_rx_mask_t rx_mask,void * cookie)2057 lm_set_rx_mask(lm_device_t *pdev, u8_t chain_idx, lm_rx_mask_t rx_mask,  void * cookie)
2058 {
2059     struct ecore_rx_mode_ramrod_params ramrod_param    = {0};
2060     lm_cli_idx_t                       lm_cli_idx      = LM_CLI_IDX_MAX;
2061     unsigned long                      rx_accept_flags = 0;
2062     unsigned long                      tx_accept_flags = 0;
2063     lm_status_t                        lm_status       = LM_STATUS_SUCCESS;
2064     ecore_status_t                     ecore_status    = ECORE_SUCCESS;
2065 
2066     DbgMessage(pdev, INFORMl2sp, "lm_set_rx_mask chain_idx=%d rx_mask=%d\n", chain_idx, rx_mask);
2067 
2068     if (lm_reset_is_inprogress(pdev))
2069     {
2070         DbgMessage(pdev, FATAL, "lm_set_rx_mask: Under FLR!!!\n");
2071         return  LM_STATUS_SUCCESS;
2072     }
2073     #ifdef VF_INVOLVED
2074     if (IS_CHANNEL_VFDEV(pdev))
2075     {
2076         return lm_vf_pf_set_q_filters(pdev, chain_idx, FALSE, Q_FILTER_RX_MASK, (u8_t*)&rx_mask, sizeof(lm_rx_mask_t), LM_SET_CAM_NO_VLAN_FILTER, FALSE);
2077     }
2078     #endif
2079 
2080     if (!pdev->client_info[chain_idx].b_vlan_only_in_process &&
2081          pdev->client_info[chain_idx].last_set_rx_mask == rx_mask)
2082     {
2083         /* No need to send a filter that has already been set...
2084            return immediately */
2085         DbgMessage(pdev, INFORMl2sp, "lm_set_rx_mask returning immediately: mask didn't change!\n");
2086         return LM_STATUS_SUCCESS;
2087     }
2088 
2089     /* initialize accept flags in ECORE language */
2090     if (pdev->client_info[chain_idx].current_set_vlan == 0)
2091     {
2092         ECORE_SET_BIT_NA(ECORE_ACCEPT_ANY_VLAN, &rx_accept_flags);
2093         ECORE_SET_BIT_NA(ECORE_ACCEPT_ANY_VLAN, &tx_accept_flags);
2094         pdev->client_info[chain_idx].b_any_vlan_on = TRUE;
2095     }
2096     else
2097     {
2098         pdev->client_info[chain_idx].b_any_vlan_on = FALSE;
2099     }
2100 
2101     /* find the desired filtering configuration */
2102     if GET_FLAGS(rx_mask ,LM_RX_MASK_PROMISCUOUS_MODE)
2103     {
2104         ECORE_SET_BIT_NA(ECORE_ACCEPT_UNICAST, &rx_accept_flags);
2105         ECORE_SET_BIT_NA(ECORE_ACCEPT_UNMATCHED, &rx_accept_flags);
2106         ECORE_SET_BIT_NA(ECORE_ACCEPT_ALL_MULTICAST, &rx_accept_flags);
2107         ECORE_SET_BIT_NA(ECORE_ACCEPT_BROADCAST, &rx_accept_flags);
2108 
2109         ECORE_SET_BIT_NA(ECORE_ACCEPT_UNICAST, &tx_accept_flags);
2110         ECORE_SET_BIT_NA(ECORE_ACCEPT_ALL_MULTICAST, &tx_accept_flags);
2111         ECORE_SET_BIT_NA(ECORE_ACCEPT_BROADCAST, &tx_accept_flags);
2112 
2113         /* In NPAR + vm_switch_enable mode, we need to turn on the ACCEPT_ALL_UNICAST for TX to make
2114          * sure all traffic passes on the loopback channel to enable non-enlighted vms to communicate (vms that we don't
2115          * have their MAC set) .
2116          * We turn it on once we're in promiscuous, which signals that there is probablly vms up that need
2117          * this feature. */
2118         if (IS_MF_SI_MODE(pdev) && pdev->params.npar_vm_switching_enable)
2119         {
2120             ECORE_SET_BIT_NA(ECORE_ACCEPT_ALL_UNICAST, &tx_accept_flags);
2121         }
2122 
2123     }
2124 
2125     if GET_FLAGS(rx_mask ,LM_RX_MASK_ACCEPT_UNICAST)
2126     {
2127         /* accept matched ucast */
2128         ECORE_SET_BIT_NA(ECORE_ACCEPT_UNICAST, &rx_accept_flags);
2129         ECORE_SET_BIT_NA(ECORE_ACCEPT_UNICAST, &tx_accept_flags);
2130     }
2131 
2132     if GET_FLAGS(rx_mask ,LM_RX_MASK_ACCEPT_MULTICAST)
2133     {
2134         /* accept matched mcast */
2135         ECORE_SET_BIT_NA(ECORE_ACCEPT_MULTICAST, &rx_accept_flags);
2136         ECORE_SET_BIT_NA(ECORE_ACCEPT_MULTICAST, &tx_accept_flags);
2137     }
2138 
2139     if GET_FLAGS(rx_mask ,LM_RX_MASK_ACCEPT_ALL_MULTICAST)
2140     {
2141         /* accept all mcast */
2142         ECORE_SET_BIT_NA(ECORE_ACCEPT_ALL_MULTICAST, &rx_accept_flags);
2143         ECORE_SET_BIT_NA(ECORE_ACCEPT_ALL_MULTICAST, &tx_accept_flags);
2144     }
2145 
2146     if GET_FLAGS(rx_mask ,LM_RX_MASK_ACCEPT_BROADCAST)
2147     {
2148         /* accept matched bcast */
2149         ECORE_SET_BIT_NA(ECORE_ACCEPT_BROADCAST, &rx_accept_flags);
2150         ECORE_SET_BIT_NA(ECORE_ACCEPT_BROADCAST, &tx_accept_flags);
2151     }
2152 
2153     if GET_FLAGS(rx_mask ,LM_RX_MASK_ACCEPT_ERROR_PACKET)
2154     {
2155         /* TBD: there is no usage in Miniport for this flag */
2156     }
2157 
2158     /* Prepare ramrod parameters */
2159     ramrod_param.cid         = chain_idx; // echo..
2160     ramrod_param.cl_id       = LM_FW_CLI_ID(pdev, chain_idx);
2161     ramrod_param.rx_mode_obj = &pdev->slowpath_info.rx_mode_obj;
2162     ramrod_param.func_id     = FUNC_ID(pdev);
2163 
2164     ramrod_param.pstate      = (unsigned long *)&pdev->client_info[chain_idx].sp_rxmode_state;
2165     ramrod_param.state       = ECORE_FILTER_RX_MODE_PENDING;
2166 
2167     // We set in lm_cli_idx always 0 (LM_CLI_IDX_NDIS) for E1x and lm_cli_idx for e2.
2168     // LM_CLI_IDX_NDIS is an occasional choice and could be any of the LM_CLI_IDX
2169     //
2170     // * rx_mode_rdata PER INDEX is problematic because:
2171     //      the rx filtering is same place in internal ram of e1.5/e1.0 and when we work with an array
2172     //      each client run over the bits of the previous client
2173     //
2174     // * rx_mode_rdata NOT PER INDEX is problematic because:
2175     //      in e2.0 when we send a ramrod, the rdata is same memory for all
2176     //      clients and therefore in case of parallel run of rx_mask of clients
2177     //      one of the ramrods actually won't be sent with the correct data
2178     //
2179     // * Conclusion: we have here a problem which make a conflict that both E1.0/E1.5 and E2 work without issues.
2180     //               This issue should be resolved in a proper way which should be discussed.
2181     //
2182     // This note is related to the following two CQ's:
2183     // CQ53609 - eVBD:57712: evbda!lm_sq_complete+7ca; Assert is seen while running ACPI S1 S3 sleep stress test
2184     // CQ53444 - OIS Certs: iSCSI Ping Test Fails
2185 
2186     lm_cli_idx = CHIP_IS_E1x(pdev) ? LM_CLI_IDX_NDIS : LM_CHAIN_IDX_CLI(pdev, chain_idx);
2187 
2188     if(LM_CLI_IDX_MAX <= lm_cli_idx)
2189     {
2190         DbgBreakMsg(" lm_cli_idx has an invalid value");
2191         return LM_STATUS_FAILURE;
2192     }
2193 
2194     ramrod_param.rdata = LM_SLOWPATH(pdev, rx_mode_rdata)[lm_cli_idx];
2195     ramrod_param.rdata_mapping = LM_SLOWPATH_PHYS(pdev, rx_mode_rdata)[lm_cli_idx];
2196 
2197     ECORE_SET_BIT(ECORE_FILTER_RX_MODE_PENDING, &pdev->client_info[chain_idx].sp_rxmode_state);
2198     ECORE_SET_BIT(RAMROD_RX, &ramrod_param.ramrod_flags);
2199     ECORE_SET_BIT(RAMROD_TX, &ramrod_param.ramrod_flags);
2200 
2201     ramrod_param.rx_mode_flags = 0; // FIXME ...
2202     ramrod_param.rx_accept_flags = rx_accept_flags;
2203     ramrod_param.tx_accept_flags = tx_accept_flags;
2204 
2205     /* Must be set before the ramrod... */
2206     DbgBreakIf(pdev->client_info[chain_idx].set_rx_mode_cookie != NULL);
2207     pdev->client_info[chain_idx].last_set_rx_mask = rx_mask;
2208     pdev->client_info[chain_idx].set_rx_mode_cookie = cookie;
2209 
2210     ecore_status = ecore_config_rx_mode(pdev, &ramrod_param);
2211     lm_status    = lm_ecore_status_to_lm_status(ecore_status);
2212     DbgMessage(pdev, INFORMl2sp, "Status returned from ecore_config_rx_mode: %d\n", lm_status);
2213     if (lm_status == LM_STATUS_SUCCESS)
2214     {
2215         pdev->client_info[chain_idx].set_rx_mode_cookie = NULL;
2216     }
2217     else if (lm_status == LM_STATUS_REQUEST_NOT_ACCEPTED)
2218     {
2219         /* Sq is blocked... meaning we're in error recovery, this is our one outstanding oid.
2220          * mark ecore as done, return PENDING to UM, don't clear cookie. This means miniport
2221          * will eventually get a completion as part of the re-initialization of the chip... */
2222         ECORE_CLEAR_BIT(ECORE_FILTER_RX_MODE_PENDING, &pdev->client_info[chain_idx].sp_rxmode_state);
2223     }
2224 
2225     return lm_status;
2226 } /* lm_set_rx_mask */
2227 
2228 /* Waits for the set=-rx-mode to complete*/
lm_wait_set_rx_mask_done(struct _lm_device_t * pdev,u8_t chain_idx)2229 lm_status_t lm_wait_set_rx_mask_done(struct _lm_device_t *pdev, u8_t chain_idx)
2230 {
2231     struct ecore_rx_mode_ramrod_params params = {0};
2232     lm_status_t lm_status;
2233 
2234     params.pstate = (unsigned long *)&pdev->client_info[chain_idx].sp_rxmode_state;
2235     params.state = ECORE_FILTER_RX_MODE_PENDING;
2236 
2237     lm_status = pdev->slowpath_info.rx_mode_obj.wait_comp(pdev, &params);
2238     DbgBreakIf(lm_status != LM_STATUS_SUCCESS);
2239 
2240     return lm_status;
2241 }
2242 
2243 
2244 /*************************  MULTICAST  *****************************************/
_init_mcast_macs_list(lm_device_t * pdev,u8_t * mc_addrs,u32_t buf_len,struct ecore_mcast_ramrod_params * p)2245 static INLINE lm_status_t _init_mcast_macs_list(lm_device_t *pdev,
2246                                                  u8_t*        mc_addrs,
2247                                                  u32_t        buf_len,
2248                                                  struct ecore_mcast_ramrod_params *p)
2249 {
2250     u8                            mc_count = buf_len / ETHERNET_ADDRESS_SIZE;
2251     struct ecore_mcast_list_elem *mc_mac   = NULL;
2252 
2253     mc_mac = mm_rt_alloc_mem(pdev, sizeof(*mc_mac) * mc_count, 0);
2254 
2255     if (!mc_addrs) {
2256         return LM_STATUS_INVALID_PARAMETER;
2257     }
2258 
2259     d_list_clear(&p->mcast_list);
2260 
2261     while(buf_len && mc_addrs)
2262     {
2263         mc_mac->mac = mc_addrs;
2264         DbgMessage(pdev, INFORMl2sp, "mc_addrs[%d]:mc_addrs[%d]:mc_addrs[%d]:mc_addrs[%d]:mc_addrs[%d]:mc_addrs[%d]\n",
2265                    mc_addrs[0],mc_addrs[1],mc_addrs[2],mc_addrs[3],mc_addrs[4],mc_addrs[5]);
2266         d_list_push_tail(&p->mcast_list, &mc_mac->link);
2267         /* move on to next mc addr */
2268         buf_len -= ETHERNET_ADDRESS_SIZE;
2269         mc_addrs += ETHERNET_ADDRESS_SIZE;
2270         mc_mac++;
2271     }
2272 
2273     p->mcast_list_len = mc_count;
2274 
2275     return LM_STATUS_SUCCESS;
2276 }
2277 
__free_mcast_macs_list(lm_device_t * pdev,struct ecore_mcast_ramrod_params * p)2278 static INLINE void __free_mcast_macs_list(lm_device_t *pdev,
2279                                           struct ecore_mcast_ramrod_params *p)
2280 {
2281     struct ecore_mcast_list_elem *mc_mac = NULL;
2282     mc_mac = (struct ecore_mcast_list_elem *)d_list_peek_head(&p->mcast_list);
2283 
2284     if (mc_mac)
2285     {
2286         /* note that p->mcast_list_len is now set to 0 after processing */
2287         mm_rt_free_mem(pdev, mc_mac, sizeof(*mc_mac) * d_list_entry_cnt(&p->mcast_list), 0);
2288     }
2289 }
2290 
2291 /**
2292  * @Description
2293  *      Function configures a list of multicast addresses. Or
2294  *      resets the list previously configured
2295  *
2296  * @param pdev
2297  * @param mc_addrs    - array of multicast addresses. NULL if unset is required
2298  * @param buf_len     - length of the buffer - 0 if unset is required
2299  * @param cookie      - will be returned on completion
2300  * @param lm_cli_idx  - which lm client to send request on
2301  *
2302  * @return lm_status_t - SUCCESS on syncrounous completion
2303  *                       PENDING on asyncounous completion
2304  *                       FAILURE o/w
2305  */
lm_set_mc(struct _lm_device_t * pdev,u8_t * mc_addrs,u32_t buf_len,void * cookie,lm_cli_idx_t lm_cli_idx)2306 lm_status_t lm_set_mc(struct _lm_device_t *pdev,
2307                       u8_t*  mc_addrs, /* may be NULL (for unset) */
2308                       u32_t  buf_len,  /* may be 0 (for unset) */
2309                       void * cookie,  lm_cli_idx_t lm_cli_idx)
2310 {
2311     struct ecore_mcast_ramrod_params rparam       = {0};
2312     lm_status_t                      lm_status    = LM_STATUS_SUCCESS;
2313     ecore_status_t                   ecore_status = ECORE_SUCCESS;
2314 
2315 #ifdef VF_INVOLVED
2316     if (IS_CHANNEL_VFDEV(pdev)) {
2317         return lm_vf_pf_set_q_filters(pdev, lm_cli_idx, cookie, Q_FILTER_MC, mc_addrs, buf_len, LM_SET_CAM_NO_VLAN_FILTER, FALSE);
2318     }
2319 #endif
2320 
2321     if(0 == LM_MC_TABLE_SIZE(pdev,lm_cli_idx))
2322     {
2323         DbgBreakMsg("size must be greater than zero for a valid client\n");
2324         return LM_STATUS_FAILURE;
2325     }
2326 
2327 
2328     /* Initialize params sent to ecore layer */
2329     /* Need to split to groups of 16 for E2... due to hsi restraint*/
2330     if (mc_addrs)
2331     {
2332         _init_mcast_macs_list(pdev, mc_addrs, buf_len, &rparam);
2333     }
2334     rparam.mcast_obj = &pdev->slowpath_info.mcast_obj[lm_cli_idx];
2335 
2336     /* Cookie must be set before sending the ramord, since completion could arrive before
2337      * we return and the cookie must be in place*/
2338     DbgBreakIf(pdev->slowpath_info.set_mcast_cookie[lm_cli_idx] != NULL);
2339     pdev->slowpath_info.set_mcast_cookie[lm_cli_idx] = cookie;
2340 
2341     ecore_status = ecore_config_mcast(pdev, &rparam, (mc_addrs != NULL)? ECORE_MCAST_CMD_ADD : ECORE_MCAST_CMD_DEL);
2342     lm_status    = lm_ecore_status_to_lm_status(ecore_status);
2343     if (lm_status == LM_STATUS_SUCCESS)
2344     {
2345         pdev->slowpath_info.set_mcast_cookie[lm_cli_idx] = NULL;
2346         }
2347 
2348     if (mc_addrs)
2349     {
2350         __free_mcast_macs_list(pdev, &rparam);
2351     }
2352 
2353     return lm_status;
2354 } /* lm_set_mc */
2355 
lm_set_mc_list(struct _lm_device_t * pdev,d_list_t * mc_addrs,void * cookie,lm_cli_idx_t lm_cli_idx)2356 lm_status_t lm_set_mc_list(struct _lm_device_t *pdev,
2357                            d_list_t * mc_addrs, /* may be NULL (for unset) */
2358                            void * cookie,
2359                            lm_cli_idx_t lm_cli_idx)
2360 {
2361     struct ecore_mcast_ramrod_params rparam       = {0};
2362     lm_status_t                      lm_status    = LM_STATUS_SUCCESS;
2363     ecore_status_t                   ecore_status = ECORE_SUCCESS;
2364 
2365 #ifdef VF_INVOLVED
2366     if (IS_CHANNEL_VFDEV(pdev))
2367     {
2368         return lm_vf_pf_set_q_filters_list(pdev, lm_cli_idx, cookie,
2369                                       Q_FILTER_MC, mc_addrs,
2370                                       LM_SET_CAM_NO_VLAN_FILTER, FALSE);
2371     }
2372 #endif
2373 
2374     rparam.mcast_list = *mc_addrs;
2375     rparam.mcast_list_len = d_list_entry_cnt(mc_addrs);
2376 
2377     rparam.mcast_obj = &pdev->slowpath_info.mcast_obj[lm_cli_idx];
2378 
2379     /* Cookie must be set before sending the ramord, since completion could arrive before
2380      * we return and the cookie must be in place*/
2381     DbgBreakIf(pdev->slowpath_info.set_mcast_cookie[lm_cli_idx] != NULL);
2382     pdev->slowpath_info.set_mcast_cookie[lm_cli_idx] = cookie;
2383 
2384     ecore_status = ecore_config_mcast(pdev, &rparam,
2385                                       (mc_addrs != NULL) ? ECORE_MCAST_CMD_ADD :
2386                                                            ECORE_MCAST_CMD_DEL);
2387 
2388     lm_status = lm_ecore_status_to_lm_status(ecore_status);
2389     if (lm_status == LM_STATUS_SUCCESS)
2390     {
2391         pdev->slowpath_info.set_mcast_cookie[lm_cli_idx] = NULL;
2392     }
2393 
2394     return lm_status;
2395 }
2396 
2397 /**
2398  * Description
2399  *      This routine is called to wait for the multicast set
2400  *      completion. It must be called in passive level since it
2401  *      may sleep
2402  * @param pdev
2403  * @param lm_cli_idx the cli-idx that the multicast was sent on.
2404  *
2405  * @return lm_status SUCCESS on done, TIMEOUT o/w
2406  */
lm_wait_set_mc_done(struct _lm_device_t * pdev,lm_cli_idx_t lm_cli_idx)2407 lm_status_t lm_wait_set_mc_done(struct _lm_device_t *pdev, lm_cli_idx_t lm_cli_idx)
2408 {
2409     struct ecore_mcast_obj * mcast_obj    = &pdev->slowpath_info.mcast_obj[lm_cli_idx];
2410     ecore_status_t           ecore_status = mcast_obj->wait_comp(pdev, mcast_obj);
2411     lm_status_t              lm_status    = lm_ecore_status_to_lm_status(ecore_status);
2412 
2413     return lm_status;
2414 }
2415 
2416 /*************************  RSS ***********************************************/
2417 
2418 /**
2419  * Description: update RSS key in slowpath
2420  * Assumptions:
2421  *  - given key_size is promised to be either 40 or 16 (promised by NDIS)
2422  * Return:
2423  */
2424 
2425 /**
2426  * @Description: Update RSS key in driver rss_hash_key array and
2427  *             check if it has changed from previous key.
2428  *
2429  * @param pdev
2430  * @param hash_key  - hash_key received from NDIS
2431  * @param key_size
2432  *
2433  * @return u8_t     TRUE if changed, FALSE o/w
2434  */
lm_update_rss_key(struct _lm_device_t * pdev,u8_t * hash_key,u32_t key_size)2435 static u8_t lm_update_rss_key(struct _lm_device_t *pdev, u8_t *hash_key,
2436                                      u32_t key_size)
2437 {
2438     u32_t val        = 0;
2439     u32_t i          = 0;
2440     s32_t rss_reg    = 0;
2441     u8_t key_changed = FALSE;
2442 
2443     /* check params */
2444     if ERR_IF(!(pdev && hash_key))
2445     {
2446         DbgBreak();
2447         return LM_STATUS_INVALID_PARAMETER;
2448     }
2449 
2450     /* Note: MSB (that is hash_key[0]) should be placed in MSB of register KEYRSS9, regardless the key size */
2451     /* GilR 4/4/2007 - assert on key_size==16/40? */
2452     for (rss_reg = 9, i = 0; rss_reg >= 0; rss_reg--)
2453     {
2454         val = 0;
2455         if (i < key_size)
2456         {
2457             val = ((hash_key[i] << 24) | (hash_key[i+1] << 16) | (hash_key[i+2] << 8) | hash_key[i+3]);
2458             DbgMessage(pdev, INFORMl2sp,
2459                         "KEYRSS[%d:%d]=0x%x, written to RSS_REG=%d\n",
2460                         i, i+3, val, rss_reg);
2461             i += 4;
2462         }
2463         else
2464         {
2465             DbgMessage(pdev, INFORMl2sp,
2466                         "OUT OF KEY size, writing 0x%x to RSS_REG=%d\n",
2467                         val, rss_reg);
2468         }
2469         if (pdev->slowpath_info.rss_hash_key[rss_reg] != val)
2470         { /* key changed */
2471             pdev->slowpath_info.rss_hash_key[rss_reg] = val;
2472             key_changed = TRUE;
2473         }
2474     }
2475 
2476     if (key_changed)
2477     {
2478         DbgMessage(pdev, WARNl2, "update rss: KEY CHANGED\n");
2479     }
2480 
2481     return key_changed;
2482 }
2483 
2484 /**
2485  * @Description
2486  *      Enable RSS for Eth with given indirection table also updates the rss key
2487  *      in searcher (for previous chips...- done by sp-verbs)
2488  *
2489  * @Assumptions
2490  *  - given table_size is promised to be power of 2 (promised by NDIS),
2491  *    or 1 in case of RSS disabling
2492  *  - the indices in the given chain_indirection_table are chain
2493  *    indices converted by UM layer...
2494  *  - given key_size is promised to be either 40 or 16 (promised by NDIS)
2495  *
2496  * @param pdev
2497  * @param chain_indirection_table - array of size @table_size containing chain numbers
2498  * @param table_size - size of @indirection_table
2499  * @param hash_key - new hash_key to be configured. 0 means no key
2500  * @param key_size
2501  * @param hash_type
2502  * @param sync_with_toe - This field indicates that the completion to the mm layer
2503  *                        should take into account the fact that toe rss update will
2504  *                        be sent as well. A counter will be increased in lm for this purpose
2505  * @param cookie        - will be returned on completion
2506  *
2507  * @return lm_status_t - SUCCESS on syncrounous completion
2508  *                       PENDING on asyncounous completion
2509  *                       FAILURE o/w
2510  */
lm_enable_rss(struct _lm_device_t * pdev,u8_t * chain_indirection_table,u32_t table_size,u8_t * hash_key,u32_t key_size,lm_rss_hash_t hash_type,u8 sync_with_toe,void * cookie)2511 lm_status_t lm_enable_rss(struct _lm_device_t *pdev, u8_t *chain_indirection_table,
2512                           u32_t table_size, u8_t *hash_key, u32_t key_size, lm_rss_hash_t hash_type,
2513                           u8 sync_with_toe, void * cookie)
2514 {
2515     struct ecore_config_rss_params params      = {0};
2516     lm_status_t                    lm_status   = LM_STATUS_SUCCESS;
2517     ecore_status_t                 ecore_status = ECORE_SUCCESS;
2518     u8_t                           value       = 0;
2519     u8_t                           reconfigure = FALSE;
2520     u8_t                           key_changed = FALSE;
2521     u8_t                           i           = 0;
2522 
2523     /* check params */
2524     if ERR_IF(!(pdev && chain_indirection_table))
2525     {
2526         DbgBreak();
2527         return LM_STATUS_INVALID_PARAMETER;
2528     }
2529 
2530     if (hash_type &
2531         ~(LM_RSS_HASH_IPV4 | LM_RSS_HASH_TCP_IPV4 | LM_RSS_HASH_IPV6 | LM_RSS_HASH_TCP_IPV6))
2532     {
2533         return LM_STATUS_INVALID_PARAMETER;
2534     }
2535 
2536     params.rss_obj = &pdev->slowpath_info.rss_conf_obj;
2537 
2538     /* RSS mode */
2539     /* Fixme --> anything else ?*/
2540     ECORE_SET_BIT(ECORE_RSS_MODE_REGULAR, &params.rss_flags);
2541 
2542     /* Translate the hash type to "ecore" */
2543     if (GET_FLAGS(hash_type, LM_RSS_HASH_IPV4))
2544     {
2545         ECORE_SET_BIT(ECORE_RSS_IPV4, &params.rss_flags);
2546     }
2547     if (GET_FLAGS(hash_type, LM_RSS_HASH_TCP_IPV4))
2548     {
2549         ECORE_SET_BIT(ECORE_RSS_IPV4_TCP, &params.rss_flags);
2550     }
2551     if (GET_FLAGS(hash_type, LM_RSS_HASH_IPV6))
2552     {
2553         ECORE_SET_BIT(ECORE_RSS_IPV6, &params.rss_flags);
2554     }
2555     if (GET_FLAGS(hash_type, LM_RSS_HASH_TCP_IPV6))
2556     {
2557         ECORE_SET_BIT(ECORE_RSS_IPV6_TCP, &params.rss_flags);
2558     }
2559 
2560     if (pdev->slowpath_info.last_set_rss_flags != params.rss_flags)
2561     {
2562         pdev->slowpath_info.last_set_rss_flags = params.rss_flags;
2563         reconfigure = TRUE;
2564     }
2565 
2566     /* set rss result mask according to table size
2567        (table_size is promised to be power of 2) */
2568     params.rss_result_mask = (u8_t)table_size - 1;
2569     if (pdev->slowpath_info.last_set_rss_result_mask != params.rss_result_mask)
2570     {
2571         /* Hash bits */
2572         pdev->slowpath_info.last_set_rss_result_mask = params.rss_result_mask;
2573         reconfigure = TRUE;
2574     }
2575 
2576     for (i = 0; i < table_size; i++)
2577     {
2578 
2579         value = LM_CHAIN_TO_FW_CLIENT(pdev,chain_indirection_table[i]);
2580 
2581         if (pdev->slowpath_info.last_set_indirection_table[i] != value)
2582         {
2583             DbgMessage(pdev, INFORMl2sp, "RssIndTable[%02d]=0x%x (Changed from 0x%x)\n", i, value, pdev->slowpath_info.last_set_indirection_table[i]);
2584             pdev->slowpath_info.last_set_indirection_table[i] = value;
2585             reconfigure = TRUE;
2586         }
2587     }
2588     mm_memcpy(params.ind_table, pdev->slowpath_info.last_set_indirection_table, sizeof(params.ind_table));
2589 
2590     if (hash_key)
2591     {
2592         key_changed = lm_update_rss_key(pdev, hash_key, key_size);
2593         if (key_changed)
2594         {
2595             reconfigure = TRUE;
2596         }
2597         mm_memcpy(params.rss_key, pdev->slowpath_info.rss_hash_key, sizeof(params.rss_key));
2598         ECORE_SET_BIT(ECORE_RSS_SET_SRCH, &params.rss_flags);
2599     }
2600 
2601     DbgBreakIf(!reconfigure && sync_with_toe);
2602     /* Not expected, that toe will update and ETH not, but just to make sure, if sync_with_toe
2603      * is true it means toe reconfigured... so eth must to to take care of sync... */
2604     if (reconfigure || sync_with_toe)
2605     {
2606         /* If we're not syncing with toe, it means that these counters have not
2607          * been increased by toe, and need to be increased here. */
2608         if (!sync_with_toe)
2609         {
2610             DbgBreakIf(pdev->params.update_comp_cnt);
2611             mm_atomic_inc(&pdev->params.update_comp_cnt);
2612             mm_atomic_inc(&pdev->params.update_suspend_cnt);
2613         }
2614 
2615         DbgBreakIf(pdev->slowpath_info.set_rss_cookie);
2616         pdev->slowpath_info.set_rss_cookie = cookie;
2617 #ifdef VF_INVOLVED
2618         if (IS_CHANNEL_VFDEV(pdev))
2619         {
2620             lm_status = lm_vf_pf_update_rss(pdev, NULL, params.rss_flags, params.rss_result_mask, params.ind_table, params.rss_key);
2621             if (lm_status == LM_STATUS_SUCCESS)
2622             {
2623                 lm_status = lm_vf_pf_wait_no_messages_pending(pdev);
2624                 mm_atomic_dec(&pdev->params.update_comp_cnt);
2625                 mm_atomic_dec(&pdev->params.update_suspend_cnt);
2626 
2627             }
2628         }
2629         else
2630 #endif
2631         {
2632             ecore_status = ecore_config_rss(pdev, &params);
2633             lm_status    = lm_ecore_status_to_lm_status(ecore_status);
2634         }
2635         if (lm_status == LM_STATUS_SUCCESS)
2636         {
2637             lm_status = LM_STATUS_PENDING;
2638         }
2639     }
2640 
2641     return lm_status;
2642 }
2643 
2644 
2645 /**
2646  * @Description
2647  *      This routine disables rss functionality by sending a
2648  *      ramrod to FW.
2649  *
2650  * @param pdev
2651  * @param cookie - will be returned on completion
2652  * @param sync_with_toe - true means this call is synced with
2653  *                      toe, and completion will be called only
2654  *                      when both toe + eth complete. Eth needs
2655  *                      to know this (reason in code) *
2656  *
2657  * @return lm_status_t - SUCCESS on syncrounous completion
2658  *                       PENDING on asyncounous completion
2659  *                       FAILURE o/w
2660  */
lm_disable_rss(struct _lm_device_t * pdev,u8_t sync_with_toe,void * cookie)2661 lm_status_t lm_disable_rss(struct _lm_device_t *pdev, u8_t sync_with_toe, void * cookie)
2662 {
2663     struct ecore_config_rss_params params       = {0};
2664     lm_status_t                    lm_status    = LM_STATUS_FAILURE;
2665     ecore_status_t                 ecore_status = ECORE_SUCCESS;
2666     u8_t                           value        = 0;
2667     u8_t                           i            = 0;
2668 
2669     DbgMessage(pdev, INFORMl2sp, "lm_disable_rss sync_with_toe = %d\n", sync_with_toe);
2670 
2671     DbgBreakIf(pdev->slowpath_info.set_rss_cookie);
2672     pdev->slowpath_info.set_rss_cookie = cookie;
2673 
2674     params.rss_obj = &pdev->slowpath_info.rss_conf_obj;
2675 
2676     /* RSS mode */
2677     ECORE_SET_BIT(ECORE_RSS_MODE_DISABLED, &params.rss_flags);
2678     pdev->slowpath_info.last_set_rss_flags = params.rss_flags;
2679 
2680     /* If we're not syncing with toe, it means that these counters have not
2681      * been increased by toe, and need to be increased here. */
2682     if (!sync_with_toe)
2683     {
2684         mm_atomic_inc(&pdev->params.update_comp_cnt);
2685         mm_atomic_inc(&pdev->params.update_suspend_cnt);
2686     }
2687 
2688     value = LM_CHAIN_TO_FW_CLIENT(pdev,LM_SW_LEADING_RSS_CID(pdev));
2689     for (i = 0; i < ARRSIZE(params.ind_table); i++)
2690     {
2691         pdev->slowpath_info.last_set_indirection_table[i] = value;
2692         params.ind_table[i] = value;
2693     }
2694 
2695 #ifdef VF_INVOLVED
2696     if (IS_CHANNEL_VFDEV(pdev))
2697     {
2698         lm_status = lm_vf_pf_update_rss(pdev, NULL, params.rss_flags, params.rss_result_mask, params.ind_table, params.rss_key);
2699         if (lm_status == LM_STATUS_SUCCESS)
2700         {
2701             lm_status = lm_vf_pf_wait_no_messages_pending(pdev);
2702             mm_atomic_dec(&pdev->params.update_comp_cnt);
2703             mm_atomic_dec(&pdev->params.update_suspend_cnt);
2704         }
2705     }
2706     else
2707 #endif
2708     {
2709         ecore_status = ecore_config_rss(pdev, &params);
2710         lm_status    = lm_ecore_status_to_lm_status(ecore_status);
2711     }
2712 
2713     if (lm_status == LM_STATUS_SUCCESS)
2714     {
2715         lm_status = LM_STATUS_PENDING;
2716     }
2717     return lm_status;
2718 
2719 } /* lm_disable_rss */
2720 
2721 /**
2722  * @Description
2723  *      Wait for the rss disable/enable configuration to
2724  *      complete
2725  *
2726  * @param pdev
2727  *
2728  * @return lm_status_t lm_status_t SUCCESS or TIMEOUT
2729  */
lm_wait_config_rss_done(struct _lm_device_t * pdev)2730 lm_status_t lm_wait_config_rss_done(struct _lm_device_t *pdev)
2731 {
2732     struct ecore_raw_obj   *raw         = &pdev->slowpath_info.rss_conf_obj.raw;
2733     lm_status_t            lm_status    = LM_STATUS_FAILURE;
2734     ecore_status_t         ecore_status = raw->wait_comp(pdev, raw);
2735 
2736     lm_status = lm_ecore_status_to_lm_status(ecore_status);
2737 
2738     return lm_status;
2739 }
2740 
2741 #ifdef VF_INVOLVED
lm_wait_vf_config_rss_done(struct _lm_device_t * pdev,lm_vf_info_t * vf_info)2742 lm_status_t lm_wait_vf_config_rss_done(struct _lm_device_t *pdev, lm_vf_info_t *vf_info)
2743 {
2744     struct ecore_raw_obj *raw = &vf_info->vf_slowpath_info.rss_conf_obj.raw;
2745     lm_status_t            lm_status    = LM_STATUS_FAILURE;
2746     ecore_status_t         ecore_status = raw->wait_comp(pdev, raw);
2747 
2748     lm_status = lm_ecore_status_to_lm_status(ecore_status);
2749 
2750     return lm_status;
2751 }
2752 #endif
2753 
2754 /************************** EQ HANDLING *******************************************/
2755 
lm_eq_handle_function_start_eqe(struct _lm_device_t * pdev,union event_ring_elem * elem)2756 static INLINE void lm_eq_handle_function_start_eqe(struct _lm_device_t * pdev, union event_ring_elem * elem)
2757 {
2758     pdev->eq_info.function_state = FUNCTION_START_COMPLETED;
2759     lm_sq_complete(pdev, CMD_PRIORITY_NORMAL, RAMROD_CMD_ID_COMMON_FUNCTION_START,
2760                    NONE_CONNECTION_TYPE, 0);
2761 }
2762 
lm_eq_handle_function_stop_eqe(struct _lm_device_t * pdev,union event_ring_elem * elem)2763 static INLINE void lm_eq_handle_function_stop_eqe(struct _lm_device_t * pdev, union event_ring_elem * elem)
2764 {
2765     pdev->eq_info.function_state = FUNCTION_STOP_COMPLETED;
2766     lm_sq_complete(pdev, CMD_PRIORITY_NORMAL, RAMROD_CMD_ID_COMMON_FUNCTION_STOP,
2767                    NONE_CONNECTION_TYPE, 0);
2768 
2769 }
2770 
lm_eq_handle_cfc_del_eqe(struct _lm_device_t * pdev,union event_ring_elem * elem)2771 static INLINE void lm_eq_handle_cfc_del_eqe(struct _lm_device_t * pdev, union event_ring_elem * elem)
2772 {
2773     u32_t cid   = 0;
2774     u8_t  error = 0;
2775 
2776     cid = mm_le32_to_cpu(elem->message.data.cfc_del_event.cid);
2777     cid = SW_CID(cid);
2778 
2779     error = elem->message.error;
2780 
2781     if (cid < pdev->context_info->proto_start[TOE_CONNECTION_TYPE]) //(MAX_ETH_CONS + MAX_VF_ETH_CONS))
2782     {   /* cfc del completion for eth cid */
2783         DbgBreakIf(lm_get_con_state(pdev, cid) != LM_CON_STATE_TERMINATE);
2784         lm_set_con_state(pdev, cid, LM_CON_STATE_CLOSE);
2785         DbgMessage(pdev, WARNeq, "lm_service_eq_intr: EVENT_RING_OPCODE_CFC_DEL_WB - calling lm_extract_ramrod_req!\n");
2786     }
2787     else
2788     {   /* cfc del completion for toe cid */
2789         if (error) {
2790 
2791             if (lm_map_cid_to_proto(pdev, cid) != TOE_CONNECTION_TYPE)
2792             {
2793                 DbgMessage(pdev, FATAL, "ERROR completion is not valid for cid=0x%x\n",cid);
2794                 DbgBreakIfAll(1);
2795             }
2796             pdev->toe_info.stats.total_cfc_delete_error++;
2797             if (pdev->context_info->array[cid].cfc_delete_cnt++ < LM_MAX_VALID_CFC_DELETIONS)
2798             {
2799                 DbgMessage(pdev, WARNl4sp, "lm_eth_comp_cb: RAMROD_CMD_ID_ETH_CFC_DEL(0x%x) - %d resending!\n", cid,
2800                             pdev->context_info->array[cid].cfc_delete_cnt);
2801                 lm_command_post(pdev,
2802                                 cid,
2803                                 RAMROD_CMD_ID_COMMON_CFC_DEL,
2804                                 CMD_PRIORITY_NORMAL,
2805                                 NONE_CONNECTION_TYPE,
2806                                 0 );
2807             }
2808             else
2809             {
2810                 DbgMessage(pdev, FATAL, "A number of CFC deletions exceeded valid number of attempts\n");
2811                 DbgBreakIfAll(1);
2812             }
2813         }
2814         else
2815         {
2816             lm_recycle_cid(pdev, cid);
2817         }
2818     }
2819 
2820     lm_sq_complete(pdev, CMD_PRIORITY_NORMAL,
2821                    (elem->message.opcode == EVENT_RING_OPCODE_CFC_DEL)? RAMROD_CMD_ID_COMMON_CFC_DEL : RAMROD_CMD_ID_COMMON_CFC_DEL_WB,
2822                    NONE_CONNECTION_TYPE, cid);
2823 }
2824 
lm_eq_handle_fwd_setup_eqe(struct _lm_device_t * pdev,union event_ring_elem * elem)2825 static INLINE void lm_eq_handle_fwd_setup_eqe(struct _lm_device_t * pdev, union event_ring_elem * elem)
2826 {
2827     DbgBreakIf(lm_get_con_state(pdev, FWD_CID(pdev)) != LM_CON_STATE_OPEN_SENT);
2828     lm_set_con_state(pdev, FWD_CID(pdev), LM_CON_STATE_OPEN);
2829 
2830     DbgMessage(pdev, WARNl2sp, "comp of FWD SETUP -calling lm_extract_ramrod_req!\n");
2831     lm_sq_complete(pdev, CMD_PRIORITY_NORMAL, RAMROD_CMD_ID_ETH_FORWARD_SETUP,
2832                    ETH_CONNECTION_TYPE, FWD_CID(pdev));
2833 
2834 }
2835 
lm_eq_handle_mcast_eqe(struct _lm_device_t * pdev,union event_ring_elem * elem)2836 static INLINE void lm_eq_handle_mcast_eqe(struct _lm_device_t * pdev, union event_ring_elem * elem)
2837 {
2838     struct ecore_mcast_ramrod_params  rparam         = {0};
2839     void                            * cookie         = NULL;
2840     lm_status_t                       lm_status      = LM_STATUS_FAILURE;
2841     ecore_status_t                    ecore_status   = ECORE_SUCCESS;
2842     u32_t                             cid            = mm_le32_to_cpu(elem->message.data.eth_event.echo) & ECORE_SWCID_MASK;
2843     const u8_t                        lm_cli_idx     = LM_CHAIN_IDX_CLI(pdev, cid);
2844     struct ecore_mcast_obj          * obj            = &pdev->slowpath_info.mcast_obj[lm_cli_idx];
2845     u8_t                              indicate_done  = TRUE;
2846 
2847     if(LM_CLI_IDX_MAX <= lm_cli_idx)
2848     {
2849         DbgBreakMsg(" lm_eq_handle_mcast_eqe lm_cli_idx is invalid ");
2850         return;
2851     }
2852 
2853     /* Clear pending state for the last command */
2854     obj->raw.clear_pending(&obj->raw);
2855 
2856     rparam.mcast_obj = obj;
2857 
2858     /* If there are pending mcast commands - send them */
2859     if (obj->check_pending(obj))
2860     {
2861         ecore_status = ecore_config_mcast(pdev, &rparam, ECORE_MCAST_CMD_CONT);
2862         lm_status    = lm_ecore_status_to_lm_status(ecore_status);
2863         if (lm_status == LM_STATUS_PENDING)
2864         {
2865             indicate_done = FALSE;
2866         }
2867         else if (lm_status != LM_STATUS_SUCCESS)
2868         {
2869             DbgMessage(pdev, FATAL, "Failed to send pending mcast commands: %d\n", lm_status);
2870             DbgBreakMsg("Unexpected pending mcast command failed\n");
2871         }
2872     }
2873 
2874     if (indicate_done)
2875     {
2876         if (pdev->slowpath_info.set_mcast_cookie[lm_cli_idx])
2877         {
2878             cookie = (void *)pdev->slowpath_info.set_mcast_cookie[lm_cli_idx];
2879             pdev->slowpath_info.set_mcast_cookie[lm_cli_idx] = NULL;
2880             mm_set_done(pdev, cid, cookie);
2881         }
2882     }
2883 
2884     if (CHIP_IS_E1(pdev))
2885     {
2886         lm_sq_complete(pdev, CMD_PRIORITY_NORMAL, RAMROD_CMD_ID_ETH_SET_MAC,
2887                        ETH_CONNECTION_TYPE, cid);
2888     }
2889     else
2890     {
2891         lm_sq_complete(pdev, CMD_PRIORITY_NORMAL, RAMROD_CMD_ID_ETH_MULTICAST_RULES,
2892                        ETH_CONNECTION_TYPE, cid);
2893     }
2894 }
2895 
lm_eq_handle_classification_eqe(struct _lm_device_t * pdev,union event_ring_elem * elem)2896 static INLINE void lm_eq_handle_classification_eqe(struct _lm_device_t * pdev, union event_ring_elem * elem)
2897 {
2898     struct ecore_raw_obj        *raw                 = NULL;
2899     void                        *cookie              = NULL;
2900     u32_t                       cid                  = GET_FLAGS( mm_le32_to_cpu(elem->message.data.eth_event.echo), ECORE_SWCID_MASK );
2901     u8_t                        type                 = mm_le32_to_cpu(elem->message.data.eth_event.echo) >> ECORE_SWCID_SHIFT;
2902     u32_t                       client_info_idx      = 0;
2903     struct ecore_vlan_mac_obj*  p_ecore_vlan_mac_obj = NULL;
2904     unsigned long               ramrod_flags         = 0;
2905     ecore_status_t              ecore_status         = ECORE_SUCCESS;
2906     int i;
2907 
2908     client_info_idx = lm_get_sw_client_idx_from_cid(pdev,cid);
2909 
2910     /* Relevant to 57710, mcast is implemented as "set-macs"*/
2911     if (type == ECORE_FILTER_MCAST_PENDING)
2912     {
2913         DbgBreakIf(!CHIP_IS_E1(pdev));
2914         lm_eq_handle_mcast_eqe(pdev, elem);
2915         return;
2916     }
2917 
2918     switch (type)
2919     {
2920     case ECORE_FILTER_MAC_PENDING:
2921         raw                  = &pdev->client_info[client_info_idx].mac_obj.raw;
2922         p_ecore_vlan_mac_obj = &pdev->client_info[client_info_idx].mac_obj;
2923         break;
2924     case ECORE_FILTER_VLAN_MAC_PENDING:
2925         raw                  = &pdev->client_info[client_info_idx].mac_vlan_obj.raw;
2926         p_ecore_vlan_mac_obj = &pdev->client_info[client_info_idx].mac_vlan_obj;
2927         break;
2928     case ECORE_FILTER_VLAN_PENDING:
2929         raw = &pdev->client_info[client_info_idx].vlan_obj.raw;
2930         p_ecore_vlan_mac_obj = &pdev->client_info[client_info_idx].vlan_obj;
2931         SET_BIT( ramrod_flags, RAMROD_CONT );
2932         break;
2933     default:
2934         /* unknown ER handling*/
2935         /* Special handling for case that type is unknown (error recovery flow)
2936          * check which object is pending, and clear the relevant one. */
2937         raw                  = &pdev->client_info[client_info_idx].mac_obj.raw;
2938         p_ecore_vlan_mac_obj = &pdev->client_info[client_info_idx].mac_obj;
2939         type                 = ECORE_FILTER_MAC_PENDING;
2940         if (!raw->check_pending(raw))
2941         {
2942             raw                  = &pdev->client_info[client_info_idx].mac_vlan_obj.raw;
2943             p_ecore_vlan_mac_obj = &pdev->client_info[client_info_idx].mac_vlan_obj;
2944             type                 = ECORE_FILTER_VLAN_MAC_PENDING;
2945         }
2946         if (!raw->check_pending(raw))
2947         {
2948             raw                  = &pdev->client_info[client_info_idx].vlan_obj.raw;
2949             p_ecore_vlan_mac_obj = &pdev->client_info[client_info_idx].vlan_obj;
2950             type                 = ECORE_FILTER_VLAN_PENDING;
2951         }
2952         break;
2953     }
2954 
2955     ecore_status = p_ecore_vlan_mac_obj->complete( pdev, p_ecore_vlan_mac_obj, elem, &ramrod_flags );
2956 
2957     // We expect here only these 2 status (CQ61418)
2958     DbgBreakIf ( ( ECORE_SUCCESS != ecore_status ) && ( ECORE_PENDING != ecore_status ) );
2959 
2960     if (( ECORE_SUCCESS != ecore_status ) && (!CHIP_IS_E1x(pdev)))
2961     {
2962         DbgMessage(pdev, WARN,
2963         "lm_eq_handle_classification_eqe: commands' length is above CLASSIFY_RULES_COUNT (the maximum length of commands' list for one execution), ecore_status = %d", ecore_status);
2964     }
2965 
2966     // verify that the mac_local mac_add1 & mac_add2 are continuous
2967     ASSERT_STATIC( OFFSETOF( eth_stats_info_t, mac_local )+ sizeof(pdev->vars.stats.stats_mirror.stats_drv.drv_info_to_mfw.eth_stats.mac_local) ==  OFFSETOF( eth_stats_info_t, mac_add1 ) );
2968     ASSERT_STATIC( OFFSETOF( eth_stats_info_t, mac_add1 ) + sizeof(pdev->vars.stats.stats_mirror.stats_drv.drv_info_to_mfw.eth_stats.mac_add1) ==   OFFSETOF( eth_stats_info_t, mac_add2 ) );
2969 
2970     if( (NDIS_CID(pdev) == client_info_idx) && (type == ECORE_FILTER_MAC_PENDING) )
2971     {
2972         if ( NULL == p_ecore_vlan_mac_obj->get_n_elements )
2973         {
2974             DbgBreakIf( !CHIP_IS_E1x(pdev) );
2975         }
2976         else
2977         {
2978             // We want to keep only eth mac this is for E3 only but we keep it anyway also for E2...
2979             for (i = 0; i < 3; i++)
2980                 mm_mem_zero(pdev->vars.stats.stats_mirror.stats_drv.drv_info_to_mfw.eth_stats.mac_local + i, sizeof(u8_t));
2981             p_ecore_vlan_mac_obj->get_n_elements(pdev, p_ecore_vlan_mac_obj ,3, pdev->vars.stats.stats_mirror.stats_drv.drv_info_to_mfw.eth_stats.mac_local + MAC_PAD, MAC_PAD, ETH_ALEN);
2982         }
2983     }
2984 
2985     if (pdev->client_info[client_info_idx].set_mac_cookie)
2986     {
2987         cookie = (void *)pdev->client_info[client_info_idx].set_mac_cookie;
2988         pdev->client_info[client_info_idx].set_mac_cookie = NULL;
2989         mm_set_done(pdev, cid, cookie);
2990     }
2991 
2992     if (CHIP_IS_E1x(pdev))
2993     {
2994         lm_sq_complete(pdev, CMD_PRIORITY_NORMAL,
2995                        RAMROD_CMD_ID_ETH_SET_MAC, ETH_CONNECTION_TYPE, cid);
2996     }
2997     else
2998     {
2999         lm_sq_complete(pdev, CMD_PRIORITY_NORMAL,
3000                        RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES, ETH_CONNECTION_TYPE, cid);
3001     }
3002 }
3003 
lm_eq_handle_stats_eqe(struct _lm_device_t * pdev,union event_ring_elem * elem)3004 static INLINE void lm_eq_handle_stats_eqe(struct _lm_device_t * pdev, union event_ring_elem * elem)
3005 {
3006     /* Order is important!!!
3007      * stats use a predefined ramrod. We need to make sure that we first complete the ramrod, which will
3008      * take it out of sq-completed list, and only after that mark the ramrod as completed, so that a new
3009      * ramrod can be sent!.
3010      */
3011     lm_sq_complete(pdev, CMD_PRIORITY_HIGH,
3012                    RAMROD_CMD_ID_COMMON_STAT_QUERY, NONE_CONNECTION_TYPE, 0);
3013 
3014     mm_write_barrier(); /* barrier to make sure command before this line completes before executing the next line! */
3015     pdev->vars.stats.stats_collect.stats_fw.b_ramrod_completed = TRUE;
3016 
3017 }
3018 
lm_eq_handle_filter_rules_eqe(struct _lm_device_t * pdev,union event_ring_elem * elem)3019 static INLINE void lm_eq_handle_filter_rules_eqe(struct _lm_device_t * pdev, union event_ring_elem * elem)
3020 {
3021     struct ecore_vlan_mac_ramrod_params p;
3022     void  * cookie = NULL;
3023     u32_t   cid    = 0;
3024 
3025     cid = mm_le32_to_cpu(elem->message.data.eth_event.echo) & ECORE_SWCID_MASK;
3026 
3027     DbgMessage(pdev, INFORMeq | INFORMl2sp, "Filter rule completion: cid %d, client_info %d\n",cid);
3028 
3029     // FIXME: pdev->client_info[cid].mac_obj.raw.clear_pending(&pdev->client_info[cid].mac_obj.raw);
3030     ECORE_CLEAR_BIT(ECORE_FILTER_RX_MODE_PENDING, &pdev->client_info[cid].sp_rxmode_state);
3031 
3032     if (pdev->client_info[cid].set_rx_mode_cookie)
3033     {
3034         cookie = (void *)pdev->client_info[cid].set_rx_mode_cookie;
3035         pdev->client_info[cid].set_rx_mode_cookie = NULL;
3036         DbgMessage(pdev, INFORMl2sp, "Filter rule calling mm_set_done... \n");
3037         mm_set_done(pdev, cid, cookie);
3038     }
3039 
3040     lm_sq_complete(pdev, CMD_PRIORITY_NORMAL, RAMROD_CMD_ID_ETH_FILTER_RULES, ETH_CONNECTION_TYPE, cid);
3041 
3042     if (pdev->client_info[cid].b_vlan_only_in_process)
3043     {
3044         pdev->client_info[cid].b_vlan_only_in_process = FALSE;
3045 
3046            p.vlan_mac_obj = &pdev->client_info[cid].vlan_obj;
3047         p.ramrod_flags = 0;
3048         SET_BIT( (p.ramrod_flags), RAMROD_CONT );
3049 
3050         ecore_config_vlan_mac(pdev, &p);
3051     }
3052 }
3053 
lm_eq_handle_rss_update_eqe(struct _lm_device_t * pdev,union event_ring_elem * elem)3054 static INLINE void lm_eq_handle_rss_update_eqe(struct _lm_device_t * pdev, union event_ring_elem * elem)
3055 {
3056     struct ecore_raw_obj  * raw    = NULL;
3057     void                  * cookie = NULL;
3058     u32_t                   cid    = LM_SW_LEADING_RSS_CID(pdev);
3059 #ifdef VF_INVOLVED
3060     u8_t abs_vf_id;
3061     lm_vf_info_t * vf_info;
3062 #endif
3063 
3064     DbgMessage(pdev, INFORMeq | INFORMl2sp, "lm_eth_comp_cb: EVENT_RING_OPCODE_RSS_UPDATE_RULES\n");
3065 
3066 
3067     cid = mm_le32_to_cpu(elem->message.data.eth_event.echo) & ECORE_SWCID_MASK;
3068 
3069 #ifdef VF_INVOLVED
3070     if (IS_CHANNEL_VIRT_MODE_MASTER_PFDEV(pdev) && (cid >= MAX_RX_CHAIN(pdev)))
3071     {
3072         abs_vf_id = GET_ABS_VF_ID_FROM_PF_CID(cid);
3073         vf_info = lm_pf_find_vf_info_by_abs_id(pdev, abs_vf_id);
3074         DbgBreakIf(!vf_info);
3075         raw = &vf_info->vf_slowpath_info.rss_conf_obj.raw;
3076         raw->clear_pending(raw);
3077     }
3078     else
3079 #endif
3080     {
3081 
3082         raw = &pdev->slowpath_info.rss_conf_obj.raw;
3083         raw->clear_pending(raw);
3084         mm_atomic_dec(&pdev->params.update_comp_cnt);
3085         if (mm_atomic_dec(&pdev->params.update_suspend_cnt) == 0)
3086         {
3087             if (pdev->slowpath_info.set_rss_cookie != NULL)
3088             {
3089                 cookie = (void *)pdev->slowpath_info.set_rss_cookie;
3090                 pdev->slowpath_info.set_rss_cookie = NULL;
3091                 mm_set_done(pdev, cid, cookie);
3092             }
3093         }
3094     }
3095     lm_sq_complete(pdev, CMD_PRIORITY_NORMAL, RAMROD_CMD_ID_ETH_RSS_UPDATE, ETH_CONNECTION_TYPE, cid);
3096 }
3097 
3098 /**lm_eq_handle_niv_function_update_eqe
3099  * handle a NIV function update completion.
3100  *
3101  * @param pdev the device
3102  * @param elem the CQE
3103  */
lm_eq_handle_function_update_eqe(struct _lm_device_t * pdev,union event_ring_elem * elem)3104 static INLINE void lm_eq_handle_function_update_eqe(struct _lm_device_t * pdev, union event_ring_elem * elem)
3105 {
3106     DbgBreakIf((FUNC_UPDATE_RAMROD_SOURCE_NIV != elem->message.data.function_update_event.echo) &&
3107                (FUNC_UPDATE_RAMROD_SOURCE_L2MP != elem->message.data.function_update_event.echo) &&
3108                (FUNC_UPDATE_RAMROD_SOURCE_ENCAP != elem->message.data.function_update_event.echo) &&
3109                (FUNC_UPDATE_RAMROD_SOURCE_UFP != elem->message.data.function_update_event.echo));
3110 
3111     switch(elem->message.data.function_update_event.echo)
3112     {
3113     case FUNC_UPDATE_RAMROD_SOURCE_NIV:
3114         DbgBreakIf((pdev->slowpath_info.niv_ramrod_state == NIV_RAMROD_COMPLETED)||
3115                    (pdev->slowpath_info.niv_ramrod_state == NIV_RAMROD_NOT_POSTED));
3116 
3117         if ( NIV_RAMROD_SET_LOOPBACK_POSTED == pdev->slowpath_info.niv_ramrod_state )
3118         {
3119             MM_ACQUIRE_PHY_LOCK(pdev);
3120             pdev->vars.link_status = LM_STATUS_LINK_ACTIVE;
3121             mm_indicate_link(pdev, pdev->vars.link_status, pdev->vars.medium);
3122             MM_RELEASE_PHY_LOCK(pdev);
3123         }
3124         else if (NIV_RAMROD_CLEAR_LOOPBACK_POSTED == pdev->slowpath_info.niv_ramrod_state)
3125         {
3126             MM_ACQUIRE_PHY_LOCK(pdev);
3127             pdev->vars.link_status = LM_STATUS_LINK_DOWN;
3128             mm_indicate_link(pdev, pdev->vars.link_status, pdev->vars.medium);
3129             MM_RELEASE_PHY_LOCK(pdev);
3130         }
3131 
3132         pdev->slowpath_info.niv_ramrod_state = NIV_RAMROD_COMPLETED;
3133 
3134         break;
3135 
3136     case FUNC_UPDATE_RAMROD_SOURCE_L2MP:
3137         pdev->slowpath_info.l2mp_func_update_ramrod_state = L2MP_FUNC_UPDATE_RAMROD_COMPLETED;
3138 
3139         break;
3140 
3141     case FUNC_UPDATE_RAMROD_SOURCE_ENCAP:
3142         pdev->encap_info.current_encap_offload_state =
3143             pdev->encap_info.new_encap_offload_state;
3144         if (pdev->encap_info.update_cookie)
3145         {
3146             void* cookie = (void*)pdev->encap_info.update_cookie;
3147             pdev->encap_info.update_cookie = NULL;
3148             mm_set_done(pdev, LM_CLI_IDX_NDIS, cookie);
3149         }
3150 
3151         break;
3152     case FUNC_UPDATE_RAMROD_SOURCE_UFP:
3153         DbgBreakIf((pdev->slowpath_info.ufp_func_ramrod_state == UFP_RAMROD_COMPLETED)||
3154                    (pdev->slowpath_info.ufp_func_ramrod_state == UFP_RAMROD_NOT_POSTED));
3155 
3156         // In case of link update, indicate the link status to miniport, else it is just
3157         // svid update which doesnt need anymore processing.
3158         if ( UFP_RAMROD_PF_LINK_UPDATE_POSTED == pdev->slowpath_info.ufp_func_ramrod_state )
3159         {
3160             MM_ACQUIRE_PHY_LOCK(pdev);
3161             pdev->vars.link_status = LM_STATUS_LINK_ACTIVE;
3162             mm_indicate_link(pdev, pdev->vars.link_status, pdev->vars.medium);
3163             MM_RELEASE_PHY_LOCK(pdev);
3164         }
3165         else if (UFP_RAMROD_PF_UPDATE_POSTED != pdev->slowpath_info.ufp_func_ramrod_state)
3166         {
3167             DbgBreak();
3168         }
3169         pdev->slowpath_info.ufp_func_ramrod_state = UFP_RAMROD_COMPLETED;
3170         break;
3171     default:
3172         DbgBreakMsg("lm_eq_handle_function_update_eqe unknown source");
3173         break;
3174     }
3175 
3176     lm_sq_complete(pdev, CMD_PRIORITY_NORMAL, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE,
3177                    NONE_CONNECTION_TYPE, 0);
3178 }
3179 
3180 /**lm_eq_handle_niv_function_update_eqe
3181  * handle a NIV lists update completion.
3182  *
3183  * @param pdev the device
3184  * @param elem the CQE
3185  */
lm_eq_handle_niv_vif_lists_eqe(struct _lm_device_t * pdev,union event_ring_elem * elem)3186 static INLINE void lm_eq_handle_niv_vif_lists_eqe(struct _lm_device_t * pdev, union event_ring_elem * elem)
3187 {
3188     DbgBreakIf((pdev->slowpath_info.niv_ramrod_state != NIV_RAMROD_VIF_LISTS_POSTED) &&
3189                 (!lm_reset_is_inprogress(pdev)));
3190 
3191     DbgBreakIf((elem->message.data.vif_list_event.echo != VIF_LIST_RULE_CLEAR_ALL) &&
3192                (elem->message.data.vif_list_event.echo != VIF_LIST_RULE_CLEAR_FUNC) &&
3193                (elem->message.data.vif_list_event.echo != VIF_LIST_RULE_GET) &&
3194                (elem->message.data.vif_list_event.echo != VIF_LIST_RULE_SET));
3195 
3196     if (elem->message.data.vif_list_event.echo == VIF_LIST_RULE_GET)
3197     {
3198         pdev->slowpath_info.last_vif_list_bitmap = (u8_t)elem->message.data.vif_list_event.func_bit_map;
3199     }
3200 
3201     if(!lm_reset_is_inprogress(pdev))
3202     {
3203         pdev->slowpath_info.niv_ramrod_state = NIV_RAMROD_COMPLETED;
3204     }
3205 
3206     lm_sq_complete(pdev, CMD_PRIORITY_NORMAL, RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS,
3207                    NONE_CONNECTION_TYPE, 0);
3208 }
3209 
3210 #ifdef VF_INVOLVED
lm_eq_handle_vf_flr_eqe(struct _lm_device_t * pdev,union event_ring_elem * elem)3211 static INLINE void lm_eq_handle_vf_flr_eqe(struct _lm_device_t * pdev, union event_ring_elem * elem)
3212 {
3213     lm_vf_info_t * vf_info = NULL;
3214     u8_t abs_vf_id;
3215 
3216     abs_vf_id = elem->message.data.vf_flr_event.vf_id;
3217 
3218     DbgMessage(pdev, WARN, "lm_eq_handle_vf_flr_eqe(%d)\n",elem->message.data.vf_flr_event.vf_id);
3219     vf_info = lm_pf_find_vf_info_by_abs_id(pdev, (u8_t)abs_vf_id);
3220     if (!vf_info) {
3221         DbgBreakMsg("lm_eq_handle_vf_flr_eqe: vf_info is not found\n");
3222         return;
3223     }
3224     vf_info->was_flred = TRUE;
3225     MM_ACQUIRE_VFS_STATS_LOCK_DPC(pdev);
3226     if ((vf_info->vf_stats.vf_stats_state != VF_STATS_NONE) && (vf_info->vf_stats.vf_stats_state != VF_STATS_REQ_IN_PROCESSING)) {
3227         vf_info->vf_stats.vf_stats_state = VF_STATS_REQ_READY;
3228     }
3229     vf_info->vf_stats.stop_collect_stats = TRUE;
3230     vf_info->vf_stats.vf_stats_flag = 0;
3231     MM_RELEASE_VFS_STATS_LOCK_DPC(pdev);
3232 }
3233 
lm_eq_handle_malicious_vf_eqe(struct _lm_device_t * pdev,union event_ring_elem * elem)3234 static INLINE void lm_eq_handle_malicious_vf_eqe(struct _lm_device_t * pdev, union event_ring_elem * elem)
3235 {
3236     lm_vf_info_t * vf_info = NULL;
3237     u8_t abs_vf_id;
3238 
3239     abs_vf_id = elem->message.data.malicious_vf_event.vf_id;
3240     vf_info = lm_pf_find_vf_info_by_abs_id(pdev, (u8_t)abs_vf_id);
3241     if (vf_info) {
3242         vf_info->was_malicious = TRUE;
3243         mm_report_malicious_vf(pdev, vf_info);
3244     }
3245     DbgMessage(pdev, FATAL, "lm_eq_handle_malicious_vf_eqe(%d)\n",abs_vf_id);
3246 }
3247 
3248 #endif
lm_service_eq_elem(struct _lm_device_t * pdev,union event_ring_elem * elem)3249 static INLINE lm_status_t lm_service_eq_elem(struct _lm_device_t * pdev, union event_ring_elem * elem)
3250 {
3251     /* handle eq element */
3252     switch(elem->message.opcode)
3253     {
3254         case EVENT_RING_OPCODE_FUNCTION_START:
3255             lm_eq_handle_function_start_eqe(pdev, elem);
3256             break;
3257 
3258         case EVENT_RING_OPCODE_FUNCTION_STOP:
3259             lm_eq_handle_function_stop_eqe(pdev, elem);
3260             break;
3261 
3262         case EVENT_RING_OPCODE_CFC_DEL:
3263         case EVENT_RING_OPCODE_CFC_DEL_WB:
3264             lm_eq_handle_cfc_del_eqe(pdev, elem);
3265             break;
3266 
3267         case EVENT_RING_OPCODE_SET_MAC:
3268         case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
3269             lm_eq_handle_classification_eqe(pdev, elem);
3270             break;
3271 
3272         case EVENT_RING_OPCODE_STAT_QUERY:
3273             lm_eq_handle_stats_eqe(pdev, elem);
3274             break;
3275 
3276         case EVENT_RING_OPCODE_STOP_TRAFFIC:
3277             pdev->dcbx_info.dcbx_ramrod_state = FUNCTION_DCBX_STOP_COMPLETED;
3278             lm_sq_complete(pdev, CMD_PRIORITY_MEDIUM,
3279                        RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, NONE_CONNECTION_TYPE, 0);
3280             break;
3281 
3282         case EVENT_RING_OPCODE_START_TRAFFIC:
3283             pdev->dcbx_info.dcbx_ramrod_state = FUNCTION_DCBX_START_COMPLETED;
3284             lm_sq_complete(pdev, CMD_PRIORITY_HIGH,
3285                        RAMROD_CMD_ID_COMMON_START_TRAFFIC, NONE_CONNECTION_TYPE, 0);
3286             break;
3287 
3288         case EVENT_RING_OPCODE_FORWARD_SETUP:
3289             lm_eq_handle_fwd_setup_eqe(pdev, elem);
3290             break;
3291 
3292         case EVENT_RING_OPCODE_MULTICAST_RULES:
3293             lm_eq_handle_mcast_eqe(pdev, elem);
3294             break;
3295 
3296         case EVENT_RING_OPCODE_FILTERS_RULES:
3297             lm_eq_handle_filter_rules_eqe(pdev, elem);
3298             break;
3299 
3300         case EVENT_RING_OPCODE_RSS_UPDATE_RULES:
3301             lm_eq_handle_rss_update_eqe(pdev, elem);
3302             break;
3303 
3304         case EVENT_RING_OPCODE_FUNCTION_UPDATE:
3305             lm_eq_handle_function_update_eqe(pdev, elem);
3306             break;
3307 
3308         case EVENT_RING_OPCODE_AFEX_VIF_LISTS:
3309             lm_eq_handle_niv_vif_lists_eqe(pdev, elem);
3310             break;
3311 #ifdef VF_INVOLVED
3312         case EVENT_RING_OPCODE_VF_FLR:
3313             lm_eq_handle_vf_flr_eqe(pdev, elem);
3314             break;
3315         case EVENT_RING_OPCODE_MALICIOUS_VF:
3316             lm_eq_handle_malicious_vf_eqe(pdev, elem);
3317             break;
3318 #endif
3319         default:
3320             DbgBreakMsg("Unknown elem type received on eq\n");
3321             return LM_STATUS_FAILURE;
3322         }
3323 
3324     return LM_STATUS_SUCCESS;
3325 }
3326 
3327 /**
3328  * @Description
3329  *      handle cqes of the event-ring, should be called from dpc if index in status block was changed
3330  * @param pdev
3331  *
3332  * @return lm_status_t SUCCESS or FAILURE (if unknown completion)
3333  */
lm_service_eq_intr(struct _lm_device_t * pdev)3334 lm_status_t lm_service_eq_intr(struct _lm_device_t * pdev)
3335 {
3336     union event_ring_elem * elem       = NULL;
3337     lm_eq_chain_t         * eq_chain   = &pdev->eq_info.eq_chain;
3338     lm_status_t             lm_status  = LM_STATUS_SUCCESS;
3339     u16_t                   cq_new_idx = 0;
3340     u16_t                   cq_old_idx = 0;
3341 
3342     cq_new_idx = mm_le16_to_cpu(*(eq_chain->hw_con_idx_ptr));
3343     if((cq_new_idx & lm_bd_chain_usable_bds_per_page(&eq_chain->bd_chain))
3344        == lm_bd_chain_usable_bds_per_page(&eq_chain->bd_chain))
3345     {
3346         cq_new_idx+=lm_bd_chain_bds_skip_eop(&eq_chain->bd_chain);
3347     }
3348     cq_old_idx = lm_bd_chain_cons_idx(&eq_chain->bd_chain);
3349 
3350     /* there is no change in the EQ consumer index so exit! */
3351     if (cq_old_idx == cq_new_idx)
3352     {
3353         DbgMessage(pdev, INFORMeq , "there is no change in the EQ consumer index so exit!\n");
3354         return LM_STATUS_SUCCESS;
3355     } else {
3356         DbgMessage(pdev, INFORMeq , "EQ consumer index: cq_old_idx=0x%x, cq_new_idx=0x%x!\n",cq_old_idx,cq_new_idx);
3357     }
3358 
3359     while(cq_old_idx != cq_new_idx)
3360     {
3361         DbgBreakIfFastPath(S16_SUB(cq_new_idx, cq_old_idx) <= 0);
3362         /* get hold of the cqe, and find out what it's type corresponds to */
3363         elem = (union event_ring_elem *)lm_bd_chain_consume_bd(&eq_chain->bd_chain);
3364 
3365         if (elem == NULL)
3366         {
3367             DbgBreakIfFastPath(elem == NULL);
3368             return LM_STATUS_FAILURE;
3369         }
3370 
3371         cq_old_idx = lm_bd_chain_cons_idx(&eq_chain->bd_chain);
3372 
3373         lm_status = lm_service_eq_elem(pdev, elem);
3374         if (lm_status != LM_STATUS_SUCCESS)
3375         {
3376             return lm_status;
3377         }
3378 
3379 #ifdef __LINUX
3380         mm_common_ramrod_comp_cb(pdev, &elem->message);
3381 #endif //__LINUX
3382         /* Recycle the cqe */
3383         lm_bd_chain_bd_produced(&eq_chain->bd_chain);
3384     } /* while */
3385 
3386     /* update producer */
3387     LM_INTMEM_WRITE16(pdev,
3388                       eq_chain->iro_prod_offset,
3389                       lm_bd_chain_prod_idx(&eq_chain->bd_chain),
3390                       BAR_CSTRORM_INTMEM);
3391 
3392     return LM_STATUS_SUCCESS;
3393 } /* lm_service_eq_intr */
3394 
3395 /**
3396  * @Description
3397  *     This function completes eq completions immediately
3398  *     (without fw completion).
3399  *
3400  * @param pdev
3401  * @param spe
3402  */
lm_eq_comp_cb(struct _lm_device_t * pdev,struct sq_pending_command * pending)3403 void lm_eq_comp_cb(struct _lm_device_t *pdev, struct sq_pending_command * pending)
3404 {
3405     union event_ring_elem elem = {{0}};
3406     u32_t                 cid  = pending->cid;
3407     u8_t                  cmd  = pending->cmd;
3408 
3409 
3410     /* We need to build the "elem" based on the spe */
3411     if ((pending->type & SPE_HDR_T_CONN_TYPE) == ETH_CONNECTION_TYPE) /* Some Ethernets complete on Eq. */
3412     {
3413         switch (cmd)
3414         {
3415         case RAMROD_CMD_ID_ETH_SET_MAC:
3416             elem.message.opcode = EVENT_RING_OPCODE_SET_MAC;
3417             elem.message.data.eth_event.echo = (0xff << ECORE_SWCID_SHIFT | cid); /*unknown type*/
3418 
3419             break;
3420 
3421         case RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES:
3422             elem.message.opcode = EVENT_RING_OPCODE_CLASSIFICATION_RULES;
3423             elem.message.data.eth_event.echo = (0xff << ECORE_SWCID_SHIFT | cid); /*unknown type*/
3424             break;
3425 
3426         case RAMROD_CMD_ID_ETH_FORWARD_SETUP:
3427             elem.message.opcode = EVENT_RING_OPCODE_FORWARD_SETUP;
3428             break;
3429 
3430         case RAMROD_CMD_ID_ETH_MULTICAST_RULES:
3431             elem.message.opcode = EVENT_RING_OPCODE_MULTICAST_RULES;
3432             elem.message.data.eth_event.echo = cid;
3433             break;
3434 
3435         case RAMROD_CMD_ID_ETH_FILTER_RULES:
3436             elem.message.opcode = EVENT_RING_OPCODE_FILTERS_RULES;
3437             elem.message.data.eth_event.echo = cid;
3438             break;
3439 
3440         case RAMROD_CMD_ID_ETH_RSS_UPDATE:
3441             elem.message.opcode = EVENT_RING_OPCODE_RSS_UPDATE_RULES;
3442             break;
3443 
3444         default:
3445             DbgBreakMsg("Unknown elem type received on eq\n");
3446         }
3447     }
3448     else if ((pending->type & SPE_HDR_T_CONN_TYPE)== NONE_CONNECTION_TYPE)
3449     {
3450         switch (cmd)
3451         {
3452         case RAMROD_CMD_ID_COMMON_FUNCTION_START:
3453             elem.message.opcode = EVENT_RING_OPCODE_FUNCTION_START;
3454             break;
3455 
3456         case RAMROD_CMD_ID_COMMON_FUNCTION_STOP:
3457             elem.message.opcode = EVENT_RING_OPCODE_FUNCTION_STOP;
3458             break;
3459 
3460         case RAMROD_CMD_ID_COMMON_CFC_DEL:
3461             elem.message.opcode = EVENT_RING_OPCODE_CFC_DEL;
3462             elem.message.data.cfc_del_event.cid = cid;
3463             break;
3464 
3465         case RAMROD_CMD_ID_COMMON_CFC_DEL_WB:
3466             elem.message.opcode = EVENT_RING_OPCODE_CFC_DEL_WB;
3467             elem.message.data.cfc_del_event.cid = cid;
3468             break;
3469 
3470         case RAMROD_CMD_ID_COMMON_STAT_QUERY:
3471             elem.message.opcode = EVENT_RING_OPCODE_STAT_QUERY;
3472             break;
3473 
3474         case RAMROD_CMD_ID_COMMON_STOP_TRAFFIC:
3475             elem.message.opcode = EVENT_RING_OPCODE_STOP_TRAFFIC;
3476             break;
3477 
3478         case RAMROD_CMD_ID_COMMON_START_TRAFFIC:
3479             elem.message.opcode = EVENT_RING_OPCODE_START_TRAFFIC;
3480             break;
3481 
3482         case RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE:
3483             elem.message.opcode = EVENT_RING_OPCODE_FUNCTION_UPDATE;
3484             break;
3485 
3486         case RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS:
3487             elem.message.opcode = EVENT_RING_OPCODE_AFEX_VIF_LISTS;
3488             break;
3489 
3490         default:
3491             DbgBreakMsg("Unknown elem type received on eq\n");
3492         }
3493     }
3494 
3495     lm_service_eq_elem(pdev, &elem);
3496 }
3497 
3498 /*********************** SQ RELATED FUNCTIONS ***************************/
3499 /* TODO: move more functions from command.h to here.                    */
lm_cid_recycled_cb_register(struct _lm_device_t * pdev,u8_t type,lm_cid_recycled_cb_t cb)3500 void lm_cid_recycled_cb_register(struct _lm_device_t *pdev, u8_t type, lm_cid_recycled_cb_t cb)
3501 {
3502 
3503     if ( CHK_NULL(pdev) ||
3504          CHK_NULL(cb) ||
3505          ERR_IF( type >= ARRSIZE( pdev->cid_recycled_callbacks ) ) ||
3506          ERR_IF( NULL != pdev->cid_recycled_callbacks[type] ) )
3507     {
3508         DbgBreakIf(!pdev);
3509         DbgBreakIf(!cb) ;
3510         DbgBreakIf( type >= ARRSIZE( pdev->cid_recycled_callbacks ) );
3511         DbgBreakIf( NULL != pdev->cid_recycled_callbacks[type] ) ;
3512         return;
3513     }
3514     pdev->cid_recycled_callbacks[type]= cb;
3515 }
3516 
lm_cid_recycled_cb_deregister(struct _lm_device_t * pdev,u8_t type)3517 void lm_cid_recycled_cb_deregister(struct _lm_device_t *pdev, u8_t type)
3518 {
3519 
3520     if ( CHK_NULL(pdev) ||
3521          ERR_IF( type >= ARRSIZE( pdev->cid_recycled_callbacks ) ) ||
3522          CHK_NULL(pdev->cid_recycled_callbacks[type]) )
3523 
3524     {
3525         DbgBreakIf(!pdev);
3526         DbgBreakIf( type >= ARRSIZE( pdev->cid_recycled_callbacks ) );
3527         return;
3528     }
3529     pdev->cid_recycled_callbacks[type] = (lm_cid_recycled_cb_t)NULL;
3530 }
3531 
lm_sq_change_state(struct _lm_device_t * pdev,lm_sq_state_t state)3532 void lm_sq_change_state(struct _lm_device_t *pdev, lm_sq_state_t state)
3533 {
3534     DbgMessage(pdev, INFORM, "Changing sq state from %d to %d\n", pdev->sq_info.sq_state, state);
3535 
3536     MM_ACQUIRE_SPQ_LOCK(pdev);
3537 
3538     pdev->sq_info.sq_state = state;
3539 
3540     MM_RELEASE_SPQ_LOCK(pdev);
3541 }
3542 
3543 /**
3544  * @Description
3545  *     function completes pending slow path requests instead of
3546  *     FW. Used in error recovery flow.
3547  *
3548  * @Assumptions:
3549  *      interrupts at this point are disabled and dpcs are
3550  *      flushed, thus no one else can complete these...
3551  *
3552  * @param pdev
3553  */
lm_sq_complete_pending_requests(struct _lm_device_t * pdev)3554 void lm_sq_complete_pending_requests(struct _lm_device_t *pdev)
3555 {
3556     enum connection_type        type      = 0;
3557     struct sq_pending_command * pending   = NULL;
3558 
3559     DbgMessage(pdev, WARN, "lm_sq_complete_pending_requests\n");
3560 
3561     /* unexpected if not under error recovery */
3562     DbgBreakIf(!pdev->params.enable_error_recovery);
3563 
3564     do
3565     {
3566         MM_ACQUIRE_SPQ_LOCK(pdev);
3567 
3568         /* Find the first entry that hasn't been handled yet. */
3569         /* We just peek and don't pop since completion of this pending request should contain removing
3570          * it from the completion list. However, it may not happen immediately */
3571         pending = (struct sq_pending_command *)d_list_peek_head(&pdev->sq_info.pending_complete);
3572 
3573         /* Look for the first entry that is "pending" but not completion_called yet. */
3574         while (pending && GET_FLAGS(pending->flags, SQ_PEND_COMP_CALLED))
3575         {
3576             pending = (struct sq_pending_command *)d_list_next_entry(&pending->list);
3577         }
3578 
3579         /* Mark pending completion as "handled" so that we don't handle it again...  */
3580         if (pending)
3581         {
3582             SET_FLAGS(pending->flags, SQ_PEND_COMP_CALLED);
3583         }
3584 
3585         MM_RELEASE_SPQ_LOCK(pdev);
3586 
3587         if (pending)
3588         {
3589             type = pending->type & SPE_HDR_T_CONN_TYPE;
3590 
3591             if (pdev->sq_info.sq_comp_cb[type])
3592             {
3593                 pdev->sq_info.sq_comp_cb[type](pdev, pending);
3594             }
3595             else
3596             {
3597                 DbgBreakMsg("unsupported pending sq: Not implemented yet\n");
3598             }
3599         }
3600 
3601         /*
3602          * lm_sq_post_pending can only cause (via lm_sq_flush)
3603          * lm_sq_complete_pending_requests DPC to be scheduled if
3604          * pdev->sq_info.sq_comp_scheduled==FALSE. Such scheduling
3605          * is acompnied by sq_comp_scheduled being set to TRUE.
3606          *
3607          * If we avoid setting pdev->sq_info.sq_comp_scheduled to FALSE,
3608          * we are gurenteed lm_sq_complete_pending_requests will not be
3609          * re-scheduled here.
3610          */
3611 
3612         lm_sq_post_pending(pdev);
3613 
3614     } while (!d_list_is_empty(&pdev->sq_info.pending_complete));
3615 
3616     /*
3617      * We are done completing pending requests in pending_list. However, any
3618      * new sp requests created by callbacks, need service.
3619      *
3620      * As we are outside the SPQ lock, this DPC may be preempted,
3621      * lm_sq_flush may have been called somewhere before this point.
3622      */
3623 
3624     MM_ACQUIRE_SPQ_LOCK(pdev);
3625 
3626     pdev->sq_info.sq_comp_scheduled = FALSE;
3627 
3628     /*
3629      * check if there is more to be flushed (new SPQ that entered after
3630      * the "while".)
3631      */
3632 
3633     if ((pdev->sq_info.sq_state == SQ_STATE_PENDING) && !d_list_is_empty(&pdev->sq_info.pending_complete))
3634     {
3635         MM_RELEASE_SPQ_LOCK(pdev);
3636         lm_sq_flush(pdev);
3637     }
3638     else
3639     {
3640         MM_RELEASE_SPQ_LOCK(pdev);
3641     }
3642 }
3643 
3644 
lm_sq_flush(struct _lm_device_t * pdev)3645 lm_status_t lm_sq_flush(struct _lm_device_t *pdev)
3646 {
3647     lm_status_t lm_status   = LM_STATUS_SUCCESS;
3648     u8_t        schedule_wi = FALSE;
3649 
3650     MM_ACQUIRE_SPQ_LOCK(pdev);
3651 
3652     if ((pdev->sq_info.sq_comp_scheduled == FALSE) &&
3653         ((pdev->sq_info.num_pending_high != MAX_HIGH_PRIORITY_SPE) ||
3654         (pdev->sq_info.num_pending_normal != MAX_NORMAL_PRIORITY_SPE)))
3655     {
3656         schedule_wi = TRUE;
3657         pdev->sq_info.sq_comp_scheduled = TRUE;
3658     }
3659 
3660     MM_RELEASE_SPQ_LOCK(pdev);
3661 
3662     if (schedule_wi)
3663     {
3664         lm_status = MM_REGISTER_DPC(pdev, lm_sq_complete_pending_requests);
3665         /* Alternative: WorkItem...
3666         lm_status = MM_REGISTER_LPME(pdev, lm_sq_complete_pending_requests, FALSE, FALSE);
3667         if (lm_status == LM_STATUS_SUCCESS)
3668         {
3669             return LM_STATUS_PENDING;
3670         }
3671         */
3672         if (lm_status == LM_STATUS_SUCCESS)
3673         {
3674             lm_status = LM_STATUS_PENDING;
3675         }
3676     }
3677 
3678     return lm_status;
3679 }
3680 
lm_sq_comp_cb_register(struct _lm_device_t * pdev,u8_t type,lm_sq_comp_cb_t cb)3681 lm_status_t lm_sq_comp_cb_register(struct _lm_device_t *pdev, u8_t type, lm_sq_comp_cb_t cb)
3682 {
3683     if ( CHK_NULL(pdev) ||
3684          CHK_NULL(cb) ||
3685          ERR_IF( type >= ARRSIZE( pdev->sq_info.sq_comp_cb ) ) ||
3686          ERR_IF( NULL != pdev->sq_info.sq_comp_cb[type] ) )
3687     {
3688         return LM_STATUS_INVALID_PARAMETER;
3689     }
3690     pdev->sq_info.sq_comp_cb[type]= cb;
3691     return LM_STATUS_SUCCESS;
3692 }
3693 
lm_sq_comp_cb_deregister(struct _lm_device_t * pdev,u8_t type)3694 lm_status_t lm_sq_comp_cb_deregister(struct _lm_device_t *pdev, u8_t type)
3695 {
3696 
3697     if ( CHK_NULL(pdev) ||
3698          ERR_IF( type >= ARRSIZE( pdev->sq_info.sq_comp_cb ) ) ||
3699          CHK_NULL(pdev->sq_info.sq_comp_cb[type]) )
3700 
3701     {
3702         return LM_STATUS_INVALID_PARAMETER;
3703     }
3704     pdev->sq_info.sq_comp_cb[type] = (lm_sq_comp_cb_t)NULL;
3705 
3706     return LM_STATUS_SUCCESS;
3707 }
3708 
lm_sq_is_empty(struct _lm_device_t * pdev)3709 u8_t lm_sq_is_empty(struct _lm_device_t *pdev)
3710 {
3711     u8_t empty = TRUE;
3712 
3713     MM_ACQUIRE_SPQ_LOCK(pdev);
3714 
3715     if ((pdev->sq_info.num_pending_high != MAX_HIGH_PRIORITY_SPE) ||
3716         (pdev->sq_info.num_pending_normal != MAX_NORMAL_PRIORITY_SPE))
3717     {
3718         empty = FALSE;
3719     }
3720 
3721     MM_RELEASE_SPQ_LOCK(pdev);
3722 
3723     return empty;
3724 }
3725 
3726 
3727 /**
3728  * @Description
3729  *     Posts from the normal + high priority lists as much as it
3730  *     can towards the FW.
3731  *
3732  * @Assumptions
3733  *     called under SQ_LOCK!!!
3734  *
3735  * @param pdev
3736  *
3737  * @return lm_status_t PENDING: if indeed requests were posted,
3738  *         SUCCESS o/w
3739  */
lm_sq_post_from_list(struct _lm_device_t * pdev)3740 static lm_status_t lm_sq_post_from_list(struct _lm_device_t *pdev)
3741 {
3742     lm_status_t                 lm_status = LM_STATUS_SUCCESS;
3743     struct sq_pending_command * pending   = NULL;
3744 
3745     while (pdev->sq_info.num_pending_normal)
3746     {
3747         pending = (void*)d_list_pop_head(&pdev->sq_info.pending_normal);
3748 
3749         if(!pending)
3750             break;
3751 
3752         pdev->sq_info.num_pending_normal --;
3753 
3754         DbgMessage(pdev, INFORM, "lm_sq_post: priority=%d, command=%d, type=%d, cid=%d num_pending_normal=%d\n",
3755                CMD_PRIORITY_NORMAL, pending->cmd, pending->type, pending->cid, pdev->sq_info.num_pending_normal);
3756 
3757         d_list_push_tail(&pdev->sq_info.pending_complete, &pending->list);
3758 
3759         _lm_sq_post(pdev,pending);
3760 
3761         lm_status = LM_STATUS_PENDING;
3762 
3763     }
3764 
3765     /* post high priority sp */
3766     while (pdev->sq_info.num_pending_high)
3767     {
3768         pending = (void*)d_list_pop_head(&pdev->sq_info.pending_high);
3769 
3770         if(!pending)
3771             break;
3772 
3773         pdev->sq_info.num_pending_high --;
3774         DbgMessage(pdev, INFORM, "lm_sq_post: priority=%d, command=%d, type=%d, cid=%d num_pending_normal=%d\n",
3775                CMD_PRIORITY_HIGH, pending->cmd, pending->type, pending->cid, pdev->sq_info.num_pending_normal);
3776 
3777         d_list_push_tail(&pdev->sq_info.pending_complete, &pending->list);
3778 
3779         _lm_sq_post(pdev, pending);
3780 
3781         lm_status = LM_STATUS_PENDING;
3782     }
3783 
3784     return lm_status;
3785 }
3786 
3787 /**
3788  * Description
3789  *  Add the entry to the pending SP list.
3790  *  Try to add entry's from the list to the sq_chain if possible.(there is are less then 8 ramrod commands pending)
3791  *
3792  * @param pdev
3793  * @param pending  - The pending list entry.
3794  * @param priority - (high or low) to witch list to insert the pending list entry.
3795  *
3796  * @return lm_status_t: LM_STATUS_SUCCESS on success or
3797  *         LM_STATUS_REQUEST_NOT_ACCEPTED if slowpath queue is
3798  *         in blocked state.
3799  */
lm_sq_post_entry(struct _lm_device_t * pdev,struct sq_pending_command * pending,u8_t priority)3800 lm_status_t lm_sq_post_entry(struct _lm_device_t       * pdev,
3801                              struct sq_pending_command * pending,
3802                              u8_t                        priority)
3803 {
3804     lm_status_t lm_status = LM_STATUS_FAILURE;
3805     u8_t        sq_flush  = FALSE;
3806 
3807     DbgBreakIf(! pdev);
3808 
3809     MM_ACQUIRE_SPQ_LOCK(pdev);
3810 
3811     if (pdev->sq_info.sq_state == SQ_STATE_BLOCKED)
3812     {
3813         // This state is valid in case hw failure such as fan failure happened.
3814         // so we removed assert was here before and changed only to trace CQ62337
3815         DbgMessage(pdev, FATAL, "lm_sq_post_entry: Unexpected slowpath command SQ_STATE_BLOCKED\n");
3816 
3817         MM_RELEASE_SPQ_LOCK(pdev);
3818 
3819         return LM_STATUS_REQUEST_NOT_ACCEPTED;
3820     }
3821 
3822     /* We shouldn't be posting any entries if the function-stop has already been posted... */
3823     if (((mm_le32_to_cpu(pending->command.hdr.conn_and_cmd_data) & SPE_HDR_T_CMD_ID)>>SPE_HDR_T_CMD_ID_SHIFT) != RAMROD_CMD_ID_COMMON_FUNCTION_STOP)
3824     {
3825         DbgBreakIf((pdev->eq_info.function_state == FUNCTION_STOP_POSTED) || (pdev->eq_info.function_state == FUNCTION_STOP_COMPLETED));
3826     }
3827 
3828     switch( priority )
3829     {
3830     case CMD_PRIORITY_NORMAL:
3831         /* add the request to the list tail*/
3832         d_list_push_tail(&pdev->sq_info.pending_normal, &pending->list);
3833         break;
3834     case CMD_PRIORITY_MEDIUM:
3835         /* add the request to the list head*/
3836         d_list_push_head(&pdev->sq_info.pending_normal, &pending->list);
3837         break;
3838     case CMD_PRIORITY_HIGH:
3839         /* add the request to the list head*/
3840         d_list_push_head(&pdev->sq_info.pending_high, &pending->list);
3841         break;
3842     default:
3843         DbgBreakIf( 1 ) ;
3844         // TODO_ROLLBACK - free sq_pending_command
3845         MM_RELEASE_SPQ_LOCK(pdev);
3846         return LM_STATUS_INVALID_PARAMETER ;
3847     }
3848 
3849     if(!(pdev->sq_info.num_pending_normal))
3850     {
3851         LM_COMMON_DRV_STATS_ATOMIC_INC_ETH(pdev, tx_no_sq_wqe);
3852     }
3853 
3854     lm_status = lm_sq_post_from_list(pdev);
3855     if (lm_status == LM_STATUS_PENDING)
3856     {
3857         /* New slowpath was posted in pending state... make sure to flush sq
3858          * after this... */
3859         if (pdev->sq_info.sq_state == SQ_STATE_PENDING)
3860         {
3861             sq_flush = TRUE;
3862         }
3863 
3864         lm_status = LM_STATUS_SUCCESS;
3865     }
3866 
3867     MM_RELEASE_SPQ_LOCK(pdev);
3868 
3869     if (sq_flush)
3870     {
3871         lm_sq_flush(pdev);
3872     }
3873     return lm_status ;
3874 }
3875 
3876 
3877 /*
3878     post a ramrod to the sq
3879     takes the sq pending list spinlock and adds the request
3880     will not block
3881     but the actuall posting to the sq might be deffered until there is room
3882     MUST only have one request pending per CID (this is up to the caller to enforce)
3883 */
lm_sq_post(struct _lm_device_t * pdev,u32_t cid,u8_t command,u8_t priority,u16_t type,u64_t data)3884 lm_status_t lm_sq_post(struct _lm_device_t *pdev,
3885                        u32_t                cid,
3886                        u8_t                 command,
3887                        u8_t                 priority,
3888                        u16_t                type,
3889                        u64_t                data)
3890 {
3891     struct sq_pending_command *pending  = NULL;
3892     lm_status_t               lm_status = LM_STATUS_SUCCESS;
3893     DbgBreakIf(! pdev);
3894     DbgBreakIf(! command); /* todo: make this more detailed*/
3895 
3896     /* allocate a new command struct and fill it */
3897     pending = mm_get_sq_pending_command(pdev);
3898     if( !pending )
3899     {
3900         DbgBreakIf(1);
3901         return LM_STATUS_FAILURE ;
3902     }
3903 
3904     lm_sq_post_fill_entry(pdev,pending,cid,command,type,data,TRUE);
3905 
3906     lm_status = lm_sq_post_entry(pdev,pending,priority);
3907 
3908     return lm_status ;
3909 }
3910 
3911 /*
3912     inform the sq mechanism of completed ramrods
3913     because the completions arrive on the fast-path rings
3914     the fast-path needs to inform the sq that the ramrod has been serviced
3915     will not block
3916     does not take any locks
3917 */
lm_sq_complete(struct _lm_device_t * pdev,u8_t priority,u8_t command,u16_t type,u32_t cid)3918 void lm_sq_complete(struct _lm_device_t *pdev, u8_t priority,
3919                     u8_t command, u16_t type, u32_t cid )
3920 {
3921 
3922     struct sq_pending_command *pending = NULL;
3923 
3924     MM_ACQUIRE_SPQ_LOCK(pdev);
3925 
3926     DbgMessage(pdev, INFORM, "lm_sq_complete: priority=%d, command=%d, type=%d, cid=%d num_pending_normal=%d\n",
3927                priority, command, type, cid, pdev->sq_info.num_pending_normal);
3928 
3929     switch( priority )
3930     {
3931     case CMD_PRIORITY_NORMAL:
3932     case CMD_PRIORITY_MEDIUM:
3933         pdev->sq_info.num_pending_normal ++;
3934         DbgBreakIf(pdev->sq_info.num_pending_normal > MAX_NORMAL_PRIORITY_SPE);
3935         break;
3936     case CMD_PRIORITY_HIGH:
3937         pdev->sq_info.num_pending_high ++;
3938         DbgBreakIf(pdev->sq_info.num_pending_high > MAX_HIGH_PRIORITY_SPE);
3939         break;
3940     default:
3941         DbgBreakIf( 1 ) ;
3942         break;
3943     }
3944 
3945     /* update sq consumer */
3946     pdev->sq_info.sq_chain.con_idx ++;
3947     pdev->sq_info.sq_chain.bd_left ++;
3948 
3949     /* Search for the completion in the pending_complete list*/
3950     /* Currently only supported if error recovery is supported */
3951     pending = (void*)d_list_peek_head(&pdev->sq_info.pending_complete);
3952 
3953     if (pdev->params.validate_sq_complete)
3954     {
3955         DbgBreakIf(!pending); /* not expected, but will deal with it... just won't  */
3956     }
3957 
3958     if (pdev->params.validate_sq_complete)
3959     {
3960         while (pending)
3961         {
3962             if (((pending->type & SPE_HDR_T_CONN_TYPE) == type) &&
3963                 (pending->cmd == command) &&
3964                 (pending->cid == cid))
3965             {
3966                 /* got it... remove from list and free it */
3967                 d_list_remove_entry(&pdev->sq_info.pending_complete, &pending->list);
3968                 if(GET_FLAGS(pending->flags, SQ_PEND_RELEASE_MEM))
3969                 {
3970                     mm_return_sq_pending_command(pdev, pending);
3971                 }
3972                 break;
3973             }
3974             pending = (void*)d_list_next_entry(&pending->list);
3975         }
3976     }
3977     else
3978     {
3979         /* TODO_ER: on no validation, just take the head... Workaround for mc-diag */
3980         pending = (void*)d_list_pop_head(&pdev->sq_info.pending_complete);
3981         if(CHK_NULL(pending))
3982         {
3983             DbgBreakMsg("lm_sq_complete pending is NULL");
3984         }
3985         else
3986         {
3987             if((GET_FLAGS(pending->flags, SQ_PEND_RELEASE_MEM)))
3988             {
3989                 mm_return_sq_pending_command(pdev, pending);
3990             }
3991         }
3992     }
3993 
3994     DbgBreakIf(!pending); /* means none were found, assert but can deal with it... */
3995 
3996     MM_RELEASE_SPQ_LOCK(pdev);
3997 }
3998 
3999 /**
4000  * @description
4001  *    do any deffered posting pending on the sq, will take the list spinlock
4002  *    will not block. Check sq state, if its pending (it means no hw...) call flush
4003  *    at the end, which will take care of completing these completions internally.
4004  * @param pdev
4005  *
4006  * @return lm_status_t SUCCESS: is no pending requests were sent. PENDING if a
4007  *                              if pending request was sent.
4008  */
lm_sq_post_pending(struct _lm_device_t * pdev)4009 lm_status_t lm_sq_post_pending(struct _lm_device_t *pdev)
4010 {
4011     lm_status_t                 lm_status = LM_STATUS_SUCCESS;
4012     u8_t                        sq_flush  = FALSE;
4013 
4014     if ( CHK_NULL(pdev) )
4015     {
4016         DbgBreakIf(!pdev);
4017         return LM_STATUS_INVALID_PARAMETER;
4018     }
4019 
4020     MM_ACQUIRE_SPQ_LOCK(pdev);
4021 
4022     lm_status = lm_sq_post_from_list(pdev);
4023 
4024     if (lm_status == LM_STATUS_PENDING)
4025     {
4026         /* New slowpath was posted in pending state... make sure to flush sq
4027          * after this... */
4028         if (pdev->sq_info.sq_state == SQ_STATE_PENDING)
4029         {
4030             sq_flush = TRUE;
4031         }
4032     }
4033 
4034     MM_RELEASE_SPQ_LOCK(pdev);
4035 
4036     if (sq_flush)
4037     {
4038         lm_sq_flush(pdev);
4039     }
4040     return lm_status;
4041 }
4042 
4043 
4044 /*********************** ETH SLOWPATH RELATED FUNCTIONS ***************************/
4045 
lm_eth_init_command_comp(struct _lm_device_t * pdev,struct common_ramrod_eth_rx_cqe * cqe)4046 void lm_eth_init_command_comp(struct _lm_device_t *pdev, struct common_ramrod_eth_rx_cqe *cqe)
4047 {
4048     lm_tpa_info_t* tpa_info   = &LM_TPA_INFO(pdev);
4049     void *         cookie             = NULL;
4050     u32_t          conn_and_cmd_data   = mm_le32_to_cpu(cqe->conn_and_cmd_data);
4051     u32_t          cid                 = SW_CID(conn_and_cmd_data);
4052     enum           eth_spqe_cmd_id  command   = conn_and_cmd_data >> COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT;
4053     u8_t           ramrod_type         = cqe->ramrod_type;
4054     u32_t          empty_data          = 0;
4055     u32_t          connection_info_idx = 0;
4056 #ifdef VF_INVOLVED
4057     u32_t          max_eth_cid;
4058 #endif
4059 
4060     DbgBreakIf(!pdev);
4061 
4062     DbgMessage(pdev, WARNl2sp,
4063                 "lm_eth_comp_cb: completion for cid=%d, command %d(0x%x)\n", cid, command, command);
4064 
4065     DbgBreakIfAll(ramrod_type & COMMON_RAMROD_ETH_RX_CQE_ERROR);
4066 
4067     connection_info_idx = lm_get_sw_client_idx_from_cid(pdev,cid);
4068 
4069     switch (command)
4070     {
4071         case RAMROD_CMD_ID_ETH_CLIENT_SETUP:
4072             DbgBreakIf(lm_get_con_state(pdev, cid) != LM_CON_STATE_OPEN_SENT);
4073             lm_set_con_state(pdev, cid, LM_CON_STATE_OPEN);
4074             DbgMessage(pdev, WARNl2sp,
4075                         "lm_eth_comp_cb: RAMROD ETH SETUP completed for cid=%d, - calling lm_extract_ramrod_req!\n", cid);
4076             break;
4077 
4078         case RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP:
4079             DbgBreakIf(lm_get_con_state(pdev, cid) != LM_CON_STATE_OPEN_SENT);
4080             lm_set_con_state(pdev, cid, LM_CON_STATE_OPEN);
4081             DbgMessage(pdev, WARNl2sp,
4082                         "lm_eth_comp_cb: RAMROD ETH SETUP completed for cid=%d, - calling lm_extract_ramrod_req!\n", cid);
4083             break;
4084 
4085         case RAMROD_CMD_ID_ETH_CLIENT_UPDATE:
4086             DbgBreakIf(PFDEV(pdev)->client_info[connection_info_idx].update.state != LM_CLI_UPDATE_USED);
4087             PFDEV(pdev)->client_info[connection_info_idx].update.state = LM_CLI_UPDATE_RECV;
4088             DbgMessage(pdev, WARNl2sp,
4089                         "lm_eth_comp_cb: RAMROD ETH Update completed for cid=%d, - calling lm_extract_ramrod_req!\n", cid);
4090             break;
4091 
4092         case RAMROD_CMD_ID_ETH_HALT:
4093             DbgBreakIf(lm_get_con_state(pdev, cid) != LM_CON_STATE_HALT_SENT);
4094             lm_set_con_state(pdev, cid, LM_CON_STATE_HALT);
4095             DbgMessage(pdev, WARNl2sp, "lm_eth_comp_cb:RAMROD_CMD_ID_ETH_HALT- calling lm_extract_ramrod_req!\n");
4096             break;
4097 
4098         case RAMROD_CMD_ID_ETH_EMPTY:
4099             empty_data        = mm_le32_to_cpu(cqe->protocol_data.data_lo);
4100             MM_EMPTY_RAMROD_RECEIVED(pdev,empty_data);
4101             DbgMessage(pdev, WARNl2sp, "lm_eth_comp_cb:RAMROD_CMD_ID_ETH_EMPTY- calling lm_extract_ramrod_req!\n");
4102             break;
4103         case RAMROD_CMD_ID_ETH_TPA_UPDATE:
4104             DbgMessage(pdev, WARNl2sp, "lm_eth_comp_cb:RAMROD_CMD_ID_ETH_TPA_UPDATE- calling lm_extract_ramrod_req!\n");
4105 #ifdef VF_INVOLVED
4106             if (MM_DCB_MP_L2_IS_ENABLE(pdev))
4107             {
4108                 max_eth_cid = lm_mp_max_cos_chain_used(pdev);
4109             }
4110             else
4111             {
4112                 max_eth_cid = LM_SB_CNT(pdev) + MAX_NON_RSS_CHAINS;
4113             }
4114             if (IS_CHANNEL_VIRT_MODE_MASTER_PFDEV(pdev) && (cid >= max_eth_cid))
4115             {
4116                 u8_t           abs_vf_id = 0xff;
4117                 u8_t           vf_q_id   = 0xff;
4118                 lm_vf_info_t * vf_info   = NULL;
4119 
4120                 abs_vf_id = GET_ABS_VF_ID_FROM_PF_CID(cid);
4121                 vf_q_id = GET_VF_Q_ID_FROM_PF_CID(cid);
4122                 vf_info = lm_pf_find_vf_info_by_abs_id(pdev, abs_vf_id);
4123                 DbgBreakIf(!vf_info);
4124                 mm_atomic_dec((u32_t*)(&vf_info->vf_tpa_info.ramrod_recv_cnt));
4125             }
4126             else
4127 #endif
4128             {
4129                 if (IS_VFDEV(pdev))
4130                 {
4131                     cid = GET_VF_Q_ID_FROM_PF_CID(cid);
4132                 }
4133                 if (0 == mm_atomic_dec((u32_t*)(&tpa_info->ramrod_recv_cnt)))
4134                 {
4135                         tpa_info->ipvx_enabled_current = tpa_info->ipvx_enabled_required;
4136                         tpa_info->state = TPA_STATE_NONE; /* Done with ramrods... */
4137                         if (tpa_info->update_cookie)
4138                         {
4139                             cookie = (void *)tpa_info->update_cookie;
4140                             tpa_info->update_cookie = NULL;
4141                             mm_set_done(pdev, cid, cookie);
4142                         }
4143 
4144                 }
4145             }
4146             if (!IS_PFDEV(pdev))
4147             {
4148                 return; /*To prevent lm_sq_completion processing for non existing (not submited) pending item*/
4149             }
4150             break;
4151         case RAMROD_CMD_ID_ETH_TERMINATE:
4152             DbgBreakIf(lm_get_con_state(pdev, cid) != LM_CON_STATE_HALT);
4153             lm_set_con_state(pdev, cid, LM_CON_STATE_TERMINATE);
4154             DbgMessage(pdev, WARNl2sp, "lm_eth_comp_cb:RAMROD_CMD_ID_ETH_TERMINATE- calling lm_extract_ramrod_req!\n");
4155             break;
4156 
4157         default:
4158             DbgMessage(pdev, FATAL,"lm_eth_init_command_comp_cb unhandled ramrod comp command=%d\n",command);
4159             DbgBreakIf(1); // unhandled ramrod!
4160             break;
4161     }
4162 #ifdef __LINUX
4163     mm_eth_ramrod_comp_cb(pdev, cqe);
4164 #endif //__LINUX
4165     lm_sq_complete(pdev, CMD_PRIORITY_NORMAL, command, ETH_CONNECTION_TYPE, cid);
4166 }
4167 
4168 /**
4169  * @Description
4170  *      Function is the callback function for completing eth
4171  *      completions when no chip access exists. Part of
4172  *      "complete-pending-sq" flow
4173  * @param pdev
4174  * @param spe
4175  */
lm_eth_comp_cb(struct _lm_device_t * pdev,struct sq_pending_command * pending)4176 void lm_eth_comp_cb(struct _lm_device_t *pdev, struct sq_pending_command * pending)
4177 {
4178     struct common_ramrod_eth_rx_cqe cqe;
4179 
4180     /* The idea is to prepare a cqe and call: common_ramrod_eth_rx_cqe */
4181     cqe.conn_and_cmd_data     = pending->command.hdr.conn_and_cmd_data;
4182     cqe.ramrod_type           = RX_ETH_CQE_TYPE_ETH_RAMROD;
4183     cqe.protocol_data.data_hi = pending->command.protocol_data.hi;
4184     cqe.protocol_data.data_lo = pending->command.protocol_data.lo;
4185 
4186     switch (pending->cmd)
4187     {
4188         /* Ramrods that complete on the EQ */
4189     case RAMROD_CMD_ID_ETH_RSS_UPDATE:
4190     case RAMROD_CMD_ID_ETH_FILTER_RULES:
4191     case RAMROD_CMD_ID_ETH_MULTICAST_RULES:
4192     case RAMROD_CMD_ID_ETH_FORWARD_SETUP:
4193     case RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES:
4194     case RAMROD_CMD_ID_ETH_SET_MAC:
4195         lm_eq_comp_cb(pdev, pending);
4196         break;
4197 
4198         /* Ramrods that complete on the RCQ */
4199     case RAMROD_CMD_ID_ETH_CLIENT_SETUP:
4200     case RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP:
4201     case RAMROD_CMD_ID_ETH_CLIENT_UPDATE:
4202     case RAMROD_CMD_ID_ETH_HALT:
4203     case RAMROD_CMD_ID_ETH_EMPTY:
4204     case RAMROD_CMD_ID_ETH_TERMINATE:
4205         lm_eth_init_command_comp(pdev, &cqe);
4206         break;
4207 
4208     default:
4209         DbgBreakMsg("Unknown cmd");
4210     }
4211 }
4212 
lm_check_mac_addr_exist(struct _lm_device_t * pdev,u8_t chain_idx,u8_t * mac_addr,u16_t vlan_tag,u8_t is_encap_inner_mac_filter)4213 u8_t lm_check_mac_addr_exist(struct _lm_device_t *pdev, u8_t chain_idx, u8_t *mac_addr, u16_t vlan_tag, u8_t is_encap_inner_mac_filter)
4214 {
4215 	struct ecore_vlan_mac_obj          *dest_obj     = NULL;
4216 	ecore_status_t                      ecore_status = ECORE_SUCCESS;
4217 	u8_t                                is_exist       = FALSE;
4218 	union ecore_classification_ramrod_data
4219 					classification_ramrod_data = {{{0}}};
4220 
4221 	if ERR_IF(!pdev || !mac_addr)
4222 	{
4223 	    DbgBreakMsg("lm_move_mac_addr: invalid params\n");
4224 	    return LM_STATUS_INVALID_PARAMETER;
4225 	}
4226 #if 0
4227 	if (lm_reset_is_inprogress(pdev))
4228 	{
4229 	    DbgMessage(pdev, FATAL, "lm_move_mac_addr: Under FLR!!!\n");
4230 	    return  LM_STATUS_SUCCESS;
4231 	}
4232 #endif
4233 
4234 	if (vlan_tag != LM_SET_CAM_NO_VLAN_FILTER)
4235 	{
4236 	    dest_obj = &pdev->client_info[chain_idx].mac_vlan_obj;
4237 	    mm_memcpy(classification_ramrod_data.vlan_mac.mac, mac_addr, sizeof(classification_ramrod_data.vlan_mac.mac));
4238 	    classification_ramrod_data.vlan_mac.vlan = vlan_tag;
4239 	    classification_ramrod_data.vlan_mac.is_inner_mac = is_encap_inner_mac_filter;
4240 	}
4241 	else
4242 	{
4243 	    dest_obj = &pdev->client_info[chain_idx].mac_obj;
4244             mm_memcpy(classification_ramrod_data.mac.mac, mac_addr, sizeof(classification_ramrod_data.mac.mac) );
4245             classification_ramrod_data.mac.is_inner_mac = is_encap_inner_mac_filter;
4246 	}
4247 
4248 	ecore_status = dest_obj->check_add(pdev,dest_obj,&classification_ramrod_data);
4249 	if (ecore_status == ECORE_EXISTS)
4250 	{
4251             is_exist = TRUE;
4252 	}
4253 	else if (ecore_status == ECORE_SUCCESS)
4254 	{
4255 	    is_exist = FALSE;
4256 	}
4257 	else
4258 	{
4259 	    DbgBreak();
4260 	}
4261 	return is_exist;
4262 }
4263 
lm_update_default_vlan(IN struct _lm_device_t * pdev,IN u8_t client_idx,IN const u16_t silent_vlan_value,IN const u16_t silent_vlan_mask,IN const u8_t silent_vlan_removal_flg,IN const u8_t silent_vlan_change_flg,IN const u16_t default_vlan,IN const u8_t default_vlan_enable_flg,IN const u8_t default_vlan_change_flg)4264 lm_status_t lm_update_default_vlan(IN struct _lm_device_t    *pdev, IN u8_t client_idx,
4265                               IN const u16_t            silent_vlan_value,
4266                               IN const u16_t            silent_vlan_mask,
4267                               IN const u8_t             silent_vlan_removal_flg,
4268                               IN const u8_t             silent_vlan_change_flg,
4269                               IN const u16_t            default_vlan,
4270                               IN const u8_t             default_vlan_enable_flg,
4271                               IN const u8_t             default_vlan_change_flg)
4272 {
4273     struct client_update_ramrod_data * client_update_data_virt = pdev->client_info[client_idx].update.data_virt;
4274     lm_status_t                        lm_status               = LM_STATUS_FAILURE;
4275     u32_t                              con_state               = 0;
4276     const u32_t                        cid                     = client_idx; //lm_get_cid_from_sw_client_idx(pdev);
4277 
4278     if CHK_NULL(client_update_data_virt)
4279     {
4280         return LM_STATUS_FAILURE;
4281     }
4282 
4283     mm_mem_zero(client_update_data_virt , sizeof(struct client_update_ramrod_data));
4284 
4285     MM_ACQUIRE_ETH_CON_LOCK(pdev);
4286 
4287     // We will send a client update ramrod in any case we can we don't optimize this flow.
4288     // Client setup may already took the correct NIV value but the ramrod will be sent anyway
4289     con_state = lm_get_con_state(pdev, cid);
4290     if((LM_CON_STATE_OPEN != con_state) &&
4291         (LM_CON_STATE_OPEN_SENT != con_state))
4292     {
4293         // Clinet is not in a state that it can recieve the ramrod
4294         MM_RELEASE_ETH_CON_LOCK(pdev);
4295         return LM_STATUS_ABORTED;
4296     }
4297 
4298     /* We don't expect this function to be called for non eth regular connections.
4299      * If we hit this assert it means we need support for SRIOV +  AFEX
4300      */
4301     if (cid >= MAX_RX_CHAIN(pdev))
4302     {
4303         DbgBreakIf(cid >= MAX_RX_CHAIN(pdev));
4304         MM_RELEASE_ETH_CON_LOCK(pdev);
4305         return LM_STATUS_FAILURE;
4306     }
4307 
4308     DbgBreakIf( LM_CLI_UPDATE_NOT_USED != pdev->client_info[client_idx].update.state);
4309 
4310     pdev->client_info[client_idx].update.state = LM_CLI_UPDATE_USED;
4311 
4312     client_update_data_virt->client_id  = LM_FW_CLI_ID(pdev, client_idx);
4313     client_update_data_virt->func_id    = FUNC_ID(pdev); /* FIXME: VFID needs to be given here for VFs... */
4314 
4315     client_update_data_virt->silent_vlan_value          = mm_cpu_to_le16(silent_vlan_value);
4316     client_update_data_virt->silent_vlan_mask           = mm_cpu_to_le16(silent_vlan_mask);
4317     client_update_data_virt->silent_vlan_removal_flg    = silent_vlan_removal_flg;
4318     client_update_data_virt->silent_vlan_change_flg     = silent_vlan_change_flg;
4319 
4320     client_update_data_virt->refuse_outband_vlan_flg        = 0;
4321     client_update_data_virt->refuse_outband_vlan_change_flg = 0;
4322     client_update_data_virt->default_vlan = default_vlan;
4323     client_update_data_virt->default_vlan_enable_flg    = default_vlan_enable_flg;
4324     client_update_data_virt->default_vlan_change_flg    = default_vlan_change_flg;
4325 
4326     lm_status = lm_sq_post(pdev,
4327                            cid,
4328                            RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
4329                            CMD_PRIORITY_MEDIUM,
4330                            ETH_CONNECTION_TYPE,
4331                            pdev->client_info[client_idx].update.data_phys.as_u64);
4332 
4333     MM_RELEASE_ETH_CON_LOCK(pdev);
4334 
4335 
4336     if (lm_status != LM_STATUS_SUCCESS)
4337     {
4338         return lm_status;
4339     }
4340 
4341     lm_status = lm_wait_state_change(pdev, &pdev->client_info[client_idx].update.state, LM_CLI_UPDATE_RECV);
4342 
4343     pdev->client_info[client_idx].update.state = LM_CLI_UPDATE_NOT_USED;
4344 
4345     return lm_status;
4346 }
4347 
4348