xref: /illumos-gate/usr/src/uts/common/io/bnxe/577xx/drivers/common/lm/device/lm_recv.c (revision 48bbca816818409505a6e214d0911fda44e622e3)
1 #include "lm5710.h"
2 #include "command.h"
3 #include "bd_chain.h"
4 #include "ecore_common.h"
5 #include "mm.h"
6 
7 #define OOO_CID_USTRORM_PROD_DIFF           (0x4000)
8 
lm_is_rx_completion(lm_device_t * pdev,u8_t chain_idx)9 u8_t lm_is_rx_completion(lm_device_t *pdev, u8_t chain_idx)
10 {
11     u8_t result               = FALSE;
12     lm_rcq_chain_t *rcq_chain = &LM_RCQ(pdev, chain_idx);
13 
14     DbgBreakIf(!(pdev && rcq_chain));
15 
16     //the hw_con_idx_ptr of the rcq_chain points directly to the Rx index in the USTORM part of the non-default status block
17     if (rcq_chain->hw_con_idx_ptr &&
18         (mm_le16_to_cpu(*rcq_chain->hw_con_idx_ptr) !=
19         lm_bd_chain_cons_idx(&rcq_chain->bd_chain)))
20     {
21         result = TRUE;
22     }
23     DbgMessage(pdev, INFORMi, "lm_is_rx_completion: result is:%s\n", result? "TRUE" : "FALSE");
24 
25     return result;
26 }
27 
28 /*******************************************************************************
29  * Description:
30  *  set both rcq, rx bd and rx sge (if valid) prods
31  * Return:
32  ******************************************************************************/
lm_rx_set_prods(lm_device_t * pdev,u16_t const iro_prod_offset,lm_bd_chain_t * rcq_chain_bd,lm_bd_chain_t * rx_chain_bd,lm_bd_chain_t * rx_chain_sge,const u32_t chain_idx)33 static void FORCEINLINE lm_rx_set_prods( lm_device_t     *pdev,
34                                          u16_t const     iro_prod_offset,
35                                          lm_bd_chain_t   *rcq_chain_bd,
36                                          lm_bd_chain_t   *rx_chain_bd,
37                                          lm_bd_chain_t   *rx_chain_sge,
38                                          const u32_t     chain_idx )
39 {
40     lm_rx_chain_t*  rxq_chain           = &LM_RXQ(pdev, chain_idx);
41     u32_t           val32               = 0;
42     u64_t           val64               = 0;
43     u16_t           val16_lo            = lm_bd_chain_prod_idx(rcq_chain_bd);
44     u16_t           val16_hi            = lm_bd_chain_prod_idx(rx_chain_bd);
45     u32_t const     ustorm_bar_offset   = (IS_CHANNEL_VFDEV(pdev)) ? VF_BAR0_USDM_QUEUES_OFFSET: BAR_USTRORM_INTMEM ;
46 
47     if(OOO_CID(pdev) == chain_idx)
48     {
49         DbgBreakIfFastPath( NULL != rx_chain_sge );
50         DbgBreakIfFastPath(IS_CHANNEL_VFDEV(pdev));
51 
52         LM_INTMEM_WRITE16(PFDEV(pdev),
53                           TSTORM_ISCSI_L2_ISCSI_OOO_PROD_OFFSET(FUNC_ID(pdev)),
54                           rxq_chain->common.bd_prod_without_next,
55                           BAR_TSTRORM_INTMEM);
56 
57         // Ugly FW solution OOO FW wants the
58         val16_lo    += OOO_CID_USTRORM_PROD_DIFF;
59         val16_hi    += OOO_CID_USTRORM_PROD_DIFF;
60     }
61 
62     val32       = ((u32_t)(val16_hi << 16) | val16_lo);
63 
64     //notify the fw of the prod of the RCQ. No need to do that for the Rx bd chain.
65     if( rx_chain_sge )
66     {
67         val64 = (((u64_t)lm_bd_chain_prod_idx(rx_chain_sge))<<32) | val32 ;
68 
69         LM_INTMEM_WRITE64(PFDEV(pdev),
70                           iro_prod_offset,
71                           val64,
72                           ustorm_bar_offset);
73     }
74     else
75     {
76         LM_INTMEM_WRITE32(PFDEV(pdev),
77                           iro_prod_offset,
78                           val32,
79                           ustorm_bar_offset);
80     }
81 }
82 /*******************************************************************************
83  * Description:
84  *  rx_chain_bd always valid, rx_chain_sge valid only in case we are LAH enabled in this queue
85  *  all if() checking will be always done on rx_chain_bd since it is always valid and sge should be consistent
86  *  We verify it in case sge is valid
87  *  all bd_xxx operations will be done on both
88  * Return:
89  ******************************************************************************/
90 u32_t
lm_post_buffers(lm_device_t * pdev,u32_t chain_idx,lm_packet_t * packet,u8_t const is_tpa)91 lm_post_buffers(
92     lm_device_t *pdev,
93     u32_t chain_idx,
94     lm_packet_t *packet,/* optional. */
95     u8_t const  is_tpa)
96 {
97     lm_rx_chain_common_t*   rxq_chain_common    = NULL;
98     lm_bd_chain_t*          rx_chain_bd         = NULL;
99     lm_rx_chain_t*          rxq_chain           = NULL;
100     lm_tpa_chain_t *        tpa_chain           = NULL;
101     lm_bd_chain_t*          bd_chain_to_check   = NULL;
102     lm_rcq_chain_t*         rcq_chain           = &LM_RCQ(pdev, chain_idx);
103     lm_bd_chain_t*          rx_chain_sge        = NULL;
104     u32_t                   pkt_queued          = 0;
105     struct eth_rx_bd*       cur_bd              = NULL;
106     struct eth_rx_sge*      cur_sge             = NULL;
107     u32_t                   prod_bseq           = 0;
108     u32_t                   rcq_prod_bseq       = 0;
109     u16_t                   current_prod        = 0;
110     u16_t                   active_entry        = 0;
111 
112     DbgMessage(pdev, INFORMl2 , "### lm_post_buffers\n");
113 
114     // Verify BD's consistent
115     DbgBreakIfFastPath( rx_chain_sge && !lm_bd_chains_are_consistent( rx_chain_sge, rx_chain_bd ) );
116 
117     if(FALSE == is_tpa)
118     {
119         rxq_chain_common    = &LM_RXQ_COMMON(pdev, chain_idx);
120         rx_chain_bd         = &LM_RXQ_CHAIN_BD(pdev, chain_idx);
121         rx_chain_sge        = LM_RXQ_SGE_PTR_IF_VALID(pdev, chain_idx);
122         rxq_chain           = &LM_RXQ(pdev, chain_idx);
123         tpa_chain           = NULL;
124         /* the assumption is that the number of cqes is less or equal to the corresponding rx bds,
125            therefore if there no cqes left, break */
126         bd_chain_to_check   = &rcq_chain->bd_chain;
127     }
128     else
129     {
130         rxq_chain_common    = &LM_TPA_COMMON(pdev, chain_idx);
131         rx_chain_bd         = &LM_TPA_CHAIN_BD(pdev, chain_idx);
132         rx_chain_sge        = NULL;
133         rxq_chain           = NULL;
134         tpa_chain           = &LM_TPA(pdev, chain_idx);
135         // In TPA we don't add to the RCQ when posting buffers
136         bd_chain_to_check   = rx_chain_bd;
137     }
138     /* Make sure we have a bd left for posting a receive buffer. */
139     if(packet)
140     {
141         // Insert given packet.
142         DbgBreakIfFastPath(SIG(packet) != L2PACKET_RX_SIG);
143 
144         if(lm_bd_chain_is_empty(bd_chain_to_check))
145         {
146             s_list_push_tail(&rxq_chain_common->free_descq, &packet->link);
147             packet = NULL;
148         }
149     }
150     else if(!lm_bd_chain_is_empty(bd_chain_to_check))
151     {
152         packet = (lm_packet_t *) s_list_pop_head(&rxq_chain_common->free_descq);
153     }
154     prod_bseq     = rxq_chain_common->prod_bseq;
155 
156     // In TPA we won't increment rcq_prod_bseq
157     rcq_prod_bseq = rcq_chain->prod_bseq;
158 
159     while(packet)
160     {
161 
162         current_prod = lm_bd_chain_prod_idx(rx_chain_bd);
163         cur_bd  = lm_bd_chain_produce_bd(rx_chain_bd);
164         rxq_chain_common->bd_prod_without_next++;
165         cur_sge = rx_chain_sge ? lm_bd_chain_produce_bd(rx_chain_sge) : NULL;
166 
167         prod_bseq += packet->l2pkt_rx_info->mem_size;
168 
169         if(FALSE == is_tpa)
170         {
171             //take care of the RCQ related prod stuff.
172 
173             //update the prod of the RCQ only AFTER the Rx bd!
174             rcq_prod_bseq += packet->l2pkt_rx_info->mem_size;
175 
176             /* These were actually produced before by fw, but we only produce them now to make sure they're synced with the rx-chain */
177             lm_bd_chain_bd_produced(&rcq_chain->bd_chain);
178         }
179 
180         packet->u1.rx.next_bd_idx = lm_bd_chain_prod_idx(rx_chain_bd);
181 #if L2_RX_BUF_SIG
182         /* make sure signitures exist before and after the buffer */
183         DbgBreakIfFastPath(SIG(packet->u1.rx.mem_virt - pdev->params.rcv_buffer_offset) != L2PACKET_RX_SIG);
184         DbgBreakIfFastPath(END_SIG(packet->u1.rx.mem_virt, MAX_L2_CLI_BUFFER_SIZE(pdev, chain_idx)) != L2PACKET_RX_SIG);
185 #endif /* L2_RX_BUF_SIG */
186 
187         cur_bd->addr_lo  = mm_cpu_to_le32(packet->u1.rx.mem_phys[0].as_u32.low);
188         cur_bd->addr_hi  = mm_cpu_to_le32(packet->u1.rx.mem_phys[0].as_u32.high);
189 
190         if( cur_sge )
191         {
192             cur_sge->addr_lo = mm_cpu_to_le32(packet->u1.rx.mem_phys[1].as_u32.low);
193             cur_sge->addr_hi = mm_cpu_to_le32(packet->u1.rx.mem_phys[1].as_u32.high);
194         }
195 
196         pkt_queued++;
197 
198         if(FALSE == is_tpa)
199         {
200             s_list_push_tail(&rxq_chain->active_descq, &packet->link);
201         }
202         else
203         {
204             // Active descriptor must sit in the same entry
205             active_entry = LM_TPA_BD_ENTRY_TO_ACTIVE_ENTRY(pdev, chain_idx, current_prod);
206 
207             LM_TPA_ACTIVE_ENTRY_BOUNDARIES_VERIFY(pdev, chain_idx,active_entry);
208             tpa_chain->sge_chain.active_descq_array[active_entry] = packet;
209         }
210 
211         if(lm_bd_chain_is_empty(bd_chain_to_check))
212             {
213                 break;
214             }
215 
216         /* Make sure we have a bd left for posting a receive buffer. */
217         packet = (lm_packet_t *) s_list_pop_head(&rxq_chain_common->free_descq);
218     }
219 
220     rxq_chain_common->prod_bseq = prod_bseq;
221 
222 
223     //update the prod of the RCQ only AFTER the Rx bd!
224     // This code seems unnecessary maybe should be deleted.
225     // Im TPA we won't increment rcq_prod_bseq
226     rcq_chain->prod_bseq = rcq_prod_bseq;
227 
228     if(pkt_queued)
229     {
230         //notify the fw of the prod
231         if(FALSE == is_tpa)
232         {
233             lm_rx_set_prods(pdev, rcq_chain->iro_prod_offset, &rcq_chain->bd_chain, rx_chain_bd, rx_chain_sge ,chain_idx);
234         }
235         else
236         {
237             lm_rx_set_prods(pdev, rcq_chain->iro_prod_offset, &rcq_chain->bd_chain, &LM_RXQ_CHAIN_BD(pdev, chain_idx), &LM_TPA_CHAIN_BD(pdev, chain_idx) ,chain_idx);
238         }
239     }
240 
241     DbgMessage(pdev, INFORMl2 , "lm_post_buffers - bd con: %d bd prod: %d \n",
242                 lm_bd_chain_cons_idx(rx_chain_bd),lm_bd_chain_prod_idx(rx_chain_bd));
243     DbgMessage(pdev, INFORMl2 , "lm_post_buffers - cq con: %d cq prod: %d \n",
244                 lm_bd_chain_cons_idx(&rcq_chain->bd_chain) ,lm_bd_chain_prod_idx(&rcq_chain->bd_chain));
245 
246     return pkt_queued;
247 } /* lm_post_buffers */
248 
249 /**
250  * @description
251  * Updates  tpa_chain->last_max_cons_sge if there is a new max.
252  * Basic assumption is that is BD prod is always higher that BD
253  * cons.
254  * The minus will tell us who is closer to BD prod.
255  * @param pdev
256  * @param chain_idx
257  * @param new_index
258  *
259  * @return STATIC void
260  */
261 __inline STATIC void
lm_tpa_sge_update_last_max(IN lm_device_t * pdev,IN const u32_t chain_idx,IN const u16_t new_index)262 lm_tpa_sge_update_last_max(IN       lm_device_t*  pdev,
263                            IN const u32_t         chain_idx,
264                            IN const u16_t         new_index)
265 {
266     lm_tpa_sge_chain_t* sge_tpa_chain       = &LM_SGE_TPA_CHAIN(pdev, chain_idx);
267     u16_t const         prod_idx            = lm_bd_chain_prod_idx(&LM_TPA_CHAIN_BD(pdev, chain_idx));
268     u16_t const         prod_minus_new_sge  = prod_idx - new_index;
269     u16_t const         prod_minus_saved    = prod_idx - sge_tpa_chain->last_max_con;
270 
271     if(prod_minus_new_sge < prod_minus_saved)
272     {
273         sge_tpa_chain->last_max_con = new_index;
274     }
275 
276     /*
277     Cyclic would have been a nicer sulotion, but adds a limitation on bd ring size that would be (2^15) instead of 2^16
278     This limitation should be closed done when allocating the TPA BD chain
279     DbgBreakIf(LM_TPA_CHAIN_BD_NUM_ELEM(_pdev, chain_idx) < (2^15) );
280     if (CYCLIC_GT_16(sge_index, sge_tpa_chain->last_max_con))
281         sge_tpa_chain->last_max_con = sge_index;
282     */
283 }
284 
285 /**
286  * @description
287  * The TPA sge consumer will be increments in 64 bit
288  * resolutions.
289  * @param pdev
290  * @param chain_idx
291  *
292  * @return STATIC u32_t
293  */
294 __inline STATIC void
lm_tpa_incr_sge_cons(IN lm_device_t * pdev,IN const u32_t chain_idx,IN const u16_t mask_entry_idx)295 lm_tpa_incr_sge_cons( IN        lm_device_t*    pdev,
296                       IN const  u32_t           chain_idx,
297                       IN const  u16_t           mask_entry_idx)
298 {
299     lm_tpa_sge_chain_t* sge_tpa_chain   = &LM_SGE_TPA_CHAIN(pdev, chain_idx);
300     lm_bd_chain_t*      bd_chain        = &LM_TPA_CHAIN_BD(pdev, chain_idx);
301     u16_t               bd_entry        = 0;
302     u16_t               active_entry    = 0;
303     u16_t               i               = 0;
304 
305     bd_chain->cons_idx += BIT_VEC64_ELEM_SZ;
306 
307     DbgBreakIf(LM_TPA_MASK_LEN(pdev, chain_idx) <= mask_entry_idx);
308     sge_tpa_chain->mask_array[mask_entry_idx] = BIT_VEC64_ELEM_ONE_MASK;
309 
310     // Make sure bds_per_page_mask is a power of 2 that is higher than 64
311     DbgBreakIf(0 != (lm_bd_chain_bds_per_page(bd_chain) & BIT_VEC64_ELEM_MASK));
312     DbgBreakIf(BIT_VEC64_ELEM_SZ >= lm_bd_chain_bds_per_page(bd_chain));
313 
314     if((lm_bd_chain_cons_idx(bd_chain) & lm_bd_chain_bds_per_page_mask(bd_chain)) == 0)
315     {
316         // Just closed a page must refer to page end entries
317         lm_bd_chain_bds_consumed(bd_chain, (BIT_VEC64_ELEM_SZ - lm_bd_chain_bds_skip_eop(bd_chain)));
318 
319         /* clear page-end entries */
320         for(i = 1; i <= lm_bd_chain_bds_skip_eop(bd_chain); i++ )
321         {
322             bd_entry = lm_bd_chain_cons_idx(bd_chain) - i;
323             active_entry = LM_TPA_BD_ENTRY_TO_ACTIVE_ENTRY(pdev, chain_idx, bd_entry);
324             LM_TPA_MASK_CLEAR_ACTIVE_BIT(pdev, chain_idx, active_entry);
325         }
326     }
327     else
328     {
329         // Same page
330         lm_bd_chain_bds_consumed(bd_chain, BIT_VEC64_ELEM_SZ);
331     }
332 }
333 /**
334  * @description
335  * Handle TPA stop code.
336  * @param pdev
337  * @param rcvd_list -Global receive list
338  * @param cqe
339  * @param chain_idx
340  * @param pkt_cnt
341  * @param queue_index
342  *
343  * @return STATIC u32_t pkt_cnt number of packets. The number is
344  *         an input parameter and packets add to the global list
345  *         are add.
346  */
347 STATIC u32_t
lm_tpa_stop(IN lm_device_t * pdev,INOUT s_list_t * rcvd_list,IN const struct eth_end_agg_rx_cqe * cqe,IN const u32_t chain_idx,IN u32_t pkt_cnt,IN const u8_t queue_index)348 lm_tpa_stop( IN         lm_device_t*                pdev,
349              INOUT      s_list_t*                   rcvd_list,
350              IN const   struct eth_end_agg_rx_cqe*  cqe,
351              IN const   u32_t                       chain_idx,
352              IN         u32_t                       pkt_cnt,
353              IN const   u8_t                        queue_index)
354 {
355     lm_tpa_chain_t*     tpa_chain           = &LM_TPA(pdev, chain_idx);
356     lm_tpa_sge_chain_t* sge_tpa_chain       = &LM_SGE_TPA_CHAIN(pdev, chain_idx);
357     lm_bd_chain_t*      bd_chain            = &LM_TPA_CHAIN_BD(pdev, chain_idx);
358     lm_packet_t*        pkt                 = tpa_chain->start_coales_bd[queue_index].packet;//Reads the TPA start coalesce array(PD_R)
359     u32_t               sge_size            = mm_le16_to_cpu(cqe->pkt_len) - pkt->l2pkt_rx_info->size;
360     u32_t const         sge_num_elem        = DIV_ROUND_UP_BITS(sge_size, LM_TPA_PAGE_BITS);
361     u32_t               fw_sge_index        = 0;
362     u16_t               active_entry        = 0;
363     u16_t               first_max_set       = 0;
364     u16_t               last_max_set        = 0;
365     u16_t               i                   = 0;
366     u8_t                b_force_first_enter = FALSE;
367     u16_t               loop_cnt_dbg        = 0;
368     const u32_t         lm_tpa_page_size    = LM_TPA_PAGE_SIZE;
369 
370     // Total packet size given in end aggregation must be larger than the size given in start aggregation.
371     // The only case that the both size are equal is if stop aggregation doesn't contain data.
372     DbgBreakIf( mm_le16_to_cpu(cqe->pkt_len) < pkt->l2pkt_rx_info->size);
373 
374     DbgBreakIf( TRUE != tpa_chain->start_coales_bd[queue_index].is_entry_used);
375     tpa_chain->start_coales_bd[queue_index].is_entry_used = FALSE;
376 
377     // Indicate to upper layer this is a TPA packet
378     SET_FLAGS(pkt->l2pkt_rx_info->flags ,LM_RX_FLAG_START_RSC_TPA);
379     // Updates the TPA only fields from the CQE
380     pkt->l2pkt_rx_info->total_packet_size   = mm_le16_to_cpu(cqe->pkt_len);
381     pkt->l2pkt_rx_info->coal_seg_cnt        = mm_le16_to_cpu(cqe->num_of_coalesced_segs);
382     pkt->l2pkt_rx_info->dup_ack_cnt         = cqe->pure_ack_count;
383     pkt->l2pkt_rx_info->ts_delta            = mm_le32_to_cpu(cqe->timestamp_delta);
384 
385     /* make sure packet size is larger than header size */
386     DbgBreakIfFastPath(pkt->l2pkt_rx_info->total_packet_size < MIN_ETHERNET_PACKET_SIZE);
387 
388     // Adds this packet descriptor to the global receive list (rcvd_list that is later indicated to miniport).
389     s_list_push_tail(rcvd_list, &pkt->link);
390     pkt_cnt++;
391 
392     ASSERT_STATIC(LM_TPA_MAX_AGG_SIZE == ARRSIZE(cqe->sgl_or_raw_data.sgl));
393     DbgBreakIf(ARRSIZE(cqe->sgl_or_raw_data.sgl) < sge_num_elem);
394 
395     // If the TPA stop doesn't contain any new BDs.
396     if(0 == sge_num_elem )
397     {
398         // Total packet size given in end aggregation must be equal to the size given in start aggregation.
399         // if stop aggregation doesn't contain data.
400         DbgBreakIf( mm_le16_to_cpu(cqe->pkt_len) != pkt->l2pkt_rx_info->size);
401 
402         return pkt_cnt;
403     }
404 
405     for(fw_sge_index = 0; fw_sge_index < sge_num_elem; fw_sge_index++)
406     {
407         DbgBreakIf(ARRSIZE(cqe->sgl_or_raw_data.sgl) <= fw_sge_index);
408         active_entry = LM_TPA_BD_ENTRY_TO_ACTIVE_ENTRY(pdev, chain_idx, mm_le16_to_cpu(cqe->sgl_or_raw_data.sgl[fw_sge_index]));
409 
410         LM_TPA_ACTIVE_ENTRY_BOUNDARIES_VERIFY(pdev, chain_idx, active_entry);
411         pkt = tpa_chain->sge_chain.active_descq_array[active_entry];
412         LM_TPA_MASK_CLEAR_ACTIVE_BIT(pdev, chain_idx, active_entry);
413 
414 #if (DBG)
415         /************start TPA debbug code******************************/
416         tpa_chain->dbg_params.pck_ret_from_chip++;
417         /************end TPA debbug code******************************/
418 #endif //(DBG)
419         // For last SGE
420         DbgBreakIf((fw_sge_index != (sge_num_elem - 1)) && (sge_size < LM_TPA_PAGE_SIZE ));
421         pkt->l2pkt_rx_info->size = min(sge_size ,lm_tpa_page_size);
422         s_list_push_tail(rcvd_list, &(pkt->link));
423         pkt_cnt++;
424         sge_size -= LM_TPA_PAGE_SIZE;
425     }
426 
427 #if defined(_NTDDK_)
428 //PreFast 28182 :Prefast reviewed and suppress this situation shouldn't occur.
429 #pragma warning (push)
430 #pragma warning( disable:6385 )
431 #endif // !_NTDDK_
432     /* Here we assume that the last SGE index is the biggest  */
433     lm_tpa_sge_update_last_max(pdev,
434                               chain_idx,
435                               mm_le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_num_elem -1]));
436 
437 #if defined(_NTDDK_)
438 #pragma warning (pop)
439 #endif // !_NTDDK_
440     // Find the first cosumer that is a candidate to free and the last.
441     first_max_set = LM_TPA_BD_ENTRY_TO_MASK_ENTRY(pdev, chain_idx, lm_bd_chain_cons_idx(bd_chain));
442     last_max_set  = LM_TPA_BD_ENTRY_TO_MASK_ENTRY(pdev, chain_idx, sge_tpa_chain->last_max_con);
443 
444     DbgBreakIf(0 != (lm_bd_chain_cons_idx(bd_chain) & BIT_VEC64_ELEM_MASK));
445     /* If ring is full enter anyway*/
446     if((last_max_set == first_max_set) && (lm_bd_chain_is_full(bd_chain)))
447     {
448         b_force_first_enter = TRUE;
449     }
450     /* Now update the cons */
451     for (i = first_max_set;((i != last_max_set) || (TRUE == b_force_first_enter)); i = LM_TPA_MASK_NEXT_ELEM(pdev, chain_idx, i))
452     {
453         DbgBreakIf(LM_TPA_MASK_LEN(pdev, chain_idx) <= i);
454         if (sge_tpa_chain->mask_array[i])
455         {
456             break;
457         }
458         b_force_first_enter = FALSE;
459 
460         lm_tpa_incr_sge_cons(pdev,
461                              chain_idx,
462                              i);
463         loop_cnt_dbg++;
464         DbgBreakIf(LM_TPA_MASK_LEN(pdev,chain_idx) < loop_cnt_dbg);
465     }
466 
467     return pkt_cnt;
468 }
469 /**
470  * @description
471  * Handle TPA start code.
472  * @param pdev
473  * @param pkt
474  * @param chain_idx
475  * @param queue_index
476  *
477  * @return STATIC void
478  */
479 __inline STATIC void
lm_tpa_start(IN lm_device_t * pdev,IN lm_packet_t * pkt,IN const u32_t chain_idx,IN const u8_t queue_index)480 lm_tpa_start( IN        lm_device_t*    pdev,
481               IN        lm_packet_t*    pkt,
482               IN const  u32_t           chain_idx,
483               IN const  u8_t            queue_index)
484 {
485     lm_tpa_chain_t*   tpa_chain    = &LM_TPA(pdev, chain_idx);
486 
487     DbgBreakIf( FALSE != tpa_chain->start_coales_bd[queue_index].is_entry_used);
488 
489     tpa_chain->start_coales_bd[queue_index].is_entry_used   = TRUE;
490     tpa_chain->start_coales_bd[queue_index].packet          = pkt;
491 }
492 /**
493  * @description
494  * Set TPA start known flags.
495  * This is only an optimization to avoid known if's
496  * @param pdev
497  *
498  * @return STATIC void
499  */
500 __inline STATIC void
lm_tpa_start_flags_handle(IN lm_device_t * pdev,IN const struct eth_fast_path_rx_cqe * cqe,INOUT lm_packet_t * pkt,IN const u16_t parse_flags)501 lm_tpa_start_flags_handle( IN       lm_device_t*                    pdev,
502                            IN const struct eth_fast_path_rx_cqe*    cqe,
503                            INOUT    lm_packet_t*                    pkt,
504                            IN const u16_t                           parse_flags)
505 {
506     // TPA is always(only) above IPV4 or IPV6.
507     DbgBreakIf(FALSE ==
508                ((GET_FLAGS_WITH_OFFSET(parse_flags,PARSING_FLAGS_OVER_ETHERNET_PROTOCOL,
509                    PARSING_FLAGS_OVER_ETHERNET_PROTOCOL_SHIFT) == PRS_FLAG_OVERETH_IPV4) ||
510                  (GET_FLAGS_WITH_OFFSET(parse_flags,PARSING_FLAGS_OVER_ETHERNET_PROTOCOL,
511                    PARSING_FLAGS_OVER_ETHERNET_PROTOCOL_SHIFT) == PRS_FLAG_OVERETH_IPV6)));
512 
513     if(PRS_FLAG_OVERETH_IPV4 == GET_FLAGS_WITH_OFFSET(parse_flags,PARSING_FLAGS_OVER_ETHERNET_PROTOCOL,
514          PARSING_FLAGS_OVER_ETHERNET_PROTOCOL_SHIFT))
515     {
516         SET_FLAGS(pkt->l2pkt_rx_info->flags, LM_RX_FLAG_IS_IPV4_DATAGRAM);
517 
518         DbgBreakIf(GET_FLAGS(cqe->status_flags, ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG));
519         // In IPV4 there is always a checksum
520         // TPA ip cksum is always valid
521         DbgBreakIf(GET_FLAGS(cqe->type_error_flags, ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG));
522 
523         SET_FLAGS(pkt->l2pkt_rx_info->flags, LM_RX_FLAG_IP_CKSUM_IS_GOOD);
524     }
525     else
526     {
527         SET_FLAGS(pkt->l2pkt_rx_info->flags, LM_RX_FLAG_IS_IPV6_DATAGRAM);
528         // In IPV6 there is no checksum
529         DbgBreakIf(0 == GET_FLAGS(cqe->status_flags, ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG));
530     }
531 
532 
533     // If there was a fagmentation it will be delivered by a regular BD (the TPA aggregation is stoped).
534     DbgBreakIf( GET_FLAGS(parse_flags,PARSING_FLAGS_FRAGMENTATION_STATUS));
535     /* check if TCP segment */
536     // TPA is always above TCP.
537     DbgBreakIf(PRS_FLAG_OVERIP_TCP != GET_FLAGS_WITH_OFFSET(parse_flags,PARSING_FLAGS_OVER_IP_PROTOCOL,
538                                                             PARSING_FLAGS_OVER_IP_PROTOCOL_SHIFT));
539 
540     SET_FLAGS(pkt->l2pkt_rx_info->flags, LM_RX_FLAG_IS_TCP_SEGMENT);
541 
542 
543     // TCP was checked before. TCP checksum must be done by FW in TPA.
544     DbgBreakIf(GET_FLAGS(cqe->status_flags, ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG));
545     // TCP checksum must be valid in a successful TPA aggregation.
546     DbgBreakIf(GET_FLAGS(cqe->type_error_flags, ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG));
547 
548 /* IN TPA tcp cksum is always validated */
549 /* valid tcp/udp cksum */
550 #define SHIFT_IS_GOOD  1
551 #define SHIFT_IS_BAD   2
552     ASSERT_STATIC(LM_RX_FLAG_UDP_CKSUM_IS_GOOD == LM_RX_FLAG_IS_UDP_DATAGRAM << SHIFT_IS_GOOD);
553     ASSERT_STATIC(LM_RX_FLAG_UDP_CKSUM_IS_BAD  == LM_RX_FLAG_IS_UDP_DATAGRAM << SHIFT_IS_BAD);
554     ASSERT_STATIC(LM_RX_FLAG_TCP_CKSUM_IS_GOOD == LM_RX_FLAG_IS_TCP_SEGMENT  << SHIFT_IS_GOOD);
555     ASSERT_STATIC(LM_RX_FLAG_TCP_CKSUM_IS_BAD  == LM_RX_FLAG_IS_TCP_SEGMENT  << SHIFT_IS_BAD);
556 
557     SET_FLAGS(pkt->l2pkt_rx_info->flags , ( GET_FLAGS(pkt->l2pkt_rx_info->flags, (LM_RX_FLAG_IS_TCP_SEGMENT)) << SHIFT_IS_GOOD ) );
558 }
559 
560 /**
561  * @description
562  * Set regular flags.
563  * This is only an optimization
564  * @param pdev
565  *
566  * @return STATIC void
567  */
568 STATIC void
lm_regular_flags_handle(IN lm_device_t * pdev,IN const struct eth_fast_path_rx_cqe * cqe,INOUT lm_packet_t * pkt,IN const u16_t parse_flags)569 lm_regular_flags_handle( IN         lm_device_t*    pdev,
570                          IN const struct eth_fast_path_rx_cqe*    cqe,
571                          INOUT      lm_packet_t*    pkt,
572                          IN const   u16_t           parse_flags)
573 {
574     /* check if IP datagram (either IPv4 or IPv6) */
575     if(((GET_FLAGS(parse_flags,PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) >>
576         PARSING_FLAGS_OVER_ETHERNET_PROTOCOL_SHIFT) == PRS_FLAG_OVERETH_IPV4) ||
577        ((GET_FLAGS(parse_flags,PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) >>
578         PARSING_FLAGS_OVER_ETHERNET_PROTOCOL_SHIFT) == PRS_FLAG_OVERETH_IPV6))
579     {
580         pkt->l2pkt_rx_info->flags  |=
581             (GET_FLAGS(parse_flags,PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) >>
582              PARSING_FLAGS_OVER_ETHERNET_PROTOCOL_SHIFT) == PRS_FLAG_OVERETH_IPV4 ?
583             LM_RX_FLAG_IS_IPV4_DATAGRAM :
584             LM_RX_FLAG_IS_IPV6_DATAGRAM;
585         if(!GET_FLAGS(cqe->status_flags, ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG))
586         {
587             /* ip cksum validated */
588             if GET_FLAGS(cqe->type_error_flags, ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG)
589             {
590                 /* invalid ip cksum */
591                 SET_FLAGS(pkt->l2pkt_rx_info->flags, LM_RX_FLAG_IP_CKSUM_IS_BAD);
592 
593                 LM_COMMON_DRV_STATS_ATOMIC_INC_ETH(pdev, rx_ip_cs_error_count);
594             }
595             else
596             {
597                 /* valid ip cksum */
598                 SET_FLAGS(pkt->l2pkt_rx_info->flags, LM_RX_FLAG_IP_CKSUM_IS_GOOD);
599             }
600         }
601     }
602 
603     // TCP or UDP segment.
604     if(!GET_FLAGS(parse_flags,PARSING_FLAGS_FRAGMENTATION_STATUS))
605     {
606         /* check if TCP segment */
607         if((GET_FLAGS(parse_flags,PARSING_FLAGS_OVER_IP_PROTOCOL) >>
608             PARSING_FLAGS_OVER_IP_PROTOCOL_SHIFT) == PRS_FLAG_OVERIP_TCP)
609         {
610             SET_FLAGS(pkt->l2pkt_rx_info->flags, LM_RX_FLAG_IS_TCP_SEGMENT);
611             DbgMessage(pdev, INFORM, "--- TCP Packet --- \n");
612         }
613         /* check if UDP segment */
614         else if((GET_FLAGS(parse_flags,PARSING_FLAGS_OVER_IP_PROTOCOL) >>
615                  PARSING_FLAGS_OVER_IP_PROTOCOL_SHIFT) == PRS_FLAG_OVERIP_UDP)
616         {
617             SET_FLAGS(pkt->l2pkt_rx_info->flags , LM_RX_FLAG_IS_UDP_DATAGRAM);
618             DbgMessage(pdev, INFORM, "--- UDP Packet --- \n");
619         }
620     }
621 
622 
623     if( GET_FLAGS(pkt->l2pkt_rx_info->flags, (LM_RX_FLAG_IS_TCP_SEGMENT | LM_RX_FLAG_IS_UDP_DATAGRAM)) &&
624        !GET_FLAGS(cqe->status_flags, ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG))
625     {
626         ASSERT_STATIC(LM_RX_FLAG_UDP_CKSUM_IS_GOOD == LM_RX_FLAG_IS_UDP_DATAGRAM << SHIFT_IS_GOOD);
627         ASSERT_STATIC(LM_RX_FLAG_UDP_CKSUM_IS_BAD  == LM_RX_FLAG_IS_UDP_DATAGRAM << SHIFT_IS_BAD);
628         ASSERT_STATIC(LM_RX_FLAG_TCP_CKSUM_IS_GOOD == LM_RX_FLAG_IS_TCP_SEGMENT  << SHIFT_IS_GOOD);
629         ASSERT_STATIC(LM_RX_FLAG_TCP_CKSUM_IS_BAD  == LM_RX_FLAG_IS_TCP_SEGMENT  << SHIFT_IS_BAD);
630 
631         DbgMessage(pdev, INFORM, "  Checksum validated.\n");
632 
633         /* tcp/udp cksum validated */
634         if GET_FLAGS(cqe->type_error_flags, ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)
635         {
636             /* invalid tcp/udp cksum */
637             SET_FLAGS(pkt->l2pkt_rx_info->flags , ( GET_FLAGS(pkt->l2pkt_rx_info->flags, (LM_RX_FLAG_IS_TCP_SEGMENT | LM_RX_FLAG_IS_UDP_DATAGRAM)) << SHIFT_IS_BAD ) );
638 
639             LM_COMMON_DRV_STATS_ATOMIC_INC_ETH(pdev, rx_tcp_cs_error_count);
640             DbgMessage(pdev, INFORM, "  BAD checksum.\n");
641         }
642         else if (GET_FLAGS(pkt->l2pkt_rx_info->flags , LM_RX_FLAG_IP_CKSUM_IS_BAD))
643         {
644             /* invalid tcp/udp cksum due to invalid ip cksum */
645             SET_FLAGS(pkt->l2pkt_rx_info->flags , ( GET_FLAGS(pkt->l2pkt_rx_info->flags, (LM_RX_FLAG_IS_TCP_SEGMENT | LM_RX_FLAG_IS_UDP_DATAGRAM)) << SHIFT_IS_BAD ) );
646             DbgMessage(pdev, INFORM, "  BAD IP checksum\n");
647         }
648         else
649         {
650             /* valid tcp/udp cksum */
651             SET_FLAGS(pkt->l2pkt_rx_info->flags , ( GET_FLAGS(pkt->l2pkt_rx_info->flags, (LM_RX_FLAG_IS_TCP_SEGMENT | LM_RX_FLAG_IS_UDP_DATAGRAM)) << SHIFT_IS_GOOD ) );
652             DbgMessage(pdev, INFORM, "  GOOD checksum.\n");
653         }
654     }
655     else
656     {
657         DbgMessage(pdev, INFORM, "  Checksum NOT validated.\n");
658         /*Packets with invalid TCP options are reported with L4_XSUM_NO_VALIDATION due to HW limitation. In this case we assume that
659           their checksum is OK.*/
660         if(GET_FLAGS(pkt->l2pkt_rx_info->flags, (LM_RX_FLAG_IS_TCP_SEGMENT | LM_RX_FLAG_IS_UDP_DATAGRAM)) &&
661            GET_FLAGS(cqe->status_flags, ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG) &&
662            GET_FLAGS(cqe->pars_flags.flags, PARSING_FLAGS_TCP_OPTIONS_EXIST))
663         {
664             DbgMessage(pdev, INFORM, "  TCP Options exist - forcing return value.\n");
665             if(GET_FLAGS(pkt->l2pkt_rx_info->flags , LM_RX_FLAG_IP_CKSUM_IS_BAD))
666             {
667                 DbgMessage(pdev, INFORM, "  IP checksum invalid - reporting BAD checksum.\n");
668                 SET_FLAGS(pkt->l2pkt_rx_info->flags , ( GET_FLAGS(pkt->l2pkt_rx_info->flags, (LM_RX_FLAG_IS_TCP_SEGMENT | LM_RX_FLAG_IS_UDP_DATAGRAM)) << SHIFT_IS_BAD ) );
669             }
670             else
671             {
672                 DbgMessage(pdev, INFORM, "  IP checksum ok - reporting GOOD checksum.\n");
673                 SET_FLAGS(pkt->l2pkt_rx_info->flags , ( GET_FLAGS(pkt->l2pkt_rx_info->flags, (LM_RX_FLAG_IS_TCP_SEGMENT | LM_RX_FLAG_IS_UDP_DATAGRAM)) << SHIFT_IS_GOOD ) );
674             }
675         }
676     }
677 }
678 
679 __inline STATIC void
lm_recv_set_pkt_len(IN lm_device_t * pdev,INOUT lm_packet_t * pkt,IN const u16_t pkt_len,IN const u32_t chain_idx)680 lm_recv_set_pkt_len( IN       lm_device_t*   pdev,
681                      INOUT    lm_packet_t*   pkt,
682                      IN const u16_t          pkt_len,
683                      IN const u32_t          chain_idx)
684 {
685     //changed, as we dont have fhdr infrastructure
686     pkt->l2pkt_rx_info->size = pkt_len; //- 4; /* CRC32 */
687 
688     DbgMessage(pdev, VERBOSEl2, "pkt_size: %d\n",pkt->l2pkt_rx_info->size);
689 }
690 
691 INLINE STATIC u32_t
calc_cksum(u16_t * hdr,u32_t len_in_bytes,u32_t sum)692 calc_cksum(u16_t *hdr, u32_t len_in_bytes, u32_t sum)
693 {
694     // len_in_bytes - the length in bytes of the header
695     // sum - initial checksum
696     while (len_in_bytes > 1)
697     {
698         sum += NTOH16(*hdr);
699         len_in_bytes -= 2;
700         hdr++;
701     }
702 
703     /* add left-over byte, if any */
704     if (len_in_bytes)
705     {
706         sum += ((NTOH16(*hdr)) & 0xFF00);
707     }
708 
709     return sum;
710 }
711 
712 INLINE STATIC u8_t
validate_cksum(u32_t sum)713 validate_cksum(u32_t sum)
714 {
715     // len - the length in words of the header
716     // returns true iff the checksum (already written in the headr) is valid
717 
718     // fold 32-bit sum to 16 bits
719     while (sum >> 16)
720     {
721         sum = (sum & 0xffff) + (sum >> 16);
722     }
723 
724     return ((u16_t)(sum) == 0xffff);
725 }
726 
727 INLINE STATIC u16_t
get_ip_hdr_len(u8_t * hdr)728 get_ip_hdr_len(u8_t *hdr)
729 {
730     // returns the ip header length in bytes
731     u16_t ip_hdr_len = 40; // ipv6 header length, we won't support ipv6 with extension header for now
732 
733     if ((hdr[0] & 0xf0) == 0x40)
734     {
735         // ipv4, the lower 4 bit of the 1st byte of ip header
736         // contains the ip header length in unit of dword(32-bit)
737         ip_hdr_len = ((hdr[0] & 0xf) << 2);
738     }
739     return ip_hdr_len;
740 }
741 
742 INLINE void
encap_pkt_parsing(struct _lm_device_t * pdev,lm_packet_t * pkt)743 encap_pkt_parsing(struct _lm_device_t *pdev,
744                   lm_packet_t         *pkt)
745 {
746     u16_t tmp, inner_ip_hdr_len, tcp_length;
747     u32_t psuedo_cksum;
748     u8_t *hdr;
749 
750     // encapsulated packet:
751     // outer mac | outer ip | gre | inner mac | inner ip | tcp
752     // minimum encapsultaed packet size is:
753     // two mac headers + gre header size + tcp header size + two ipv4 headers
754     if (pkt->l2pkt_rx_info->total_packet_size < (2*ETHERNET_PACKET_HEADER_SIZE + 2*20 + ETHERNET_GRE_SIZE + 20))
755     {
756         return;
757     }
758 
759 
760     // set hdr to the outer ip header
761     hdr = pkt->l2pkt_rx_info->mem_virt + pdev->params.rcv_buffer_offset + ETHERNET_PACKET_HEADER_SIZE;
762     if (pkt->l2pkt_rx_info->flags & LM_RX_FLAG_VALID_VLAN_TAG)
763     {
764         hdr += ETHERNET_VLAN_TAG_SIZE;
765     }
766 
767     // in case this is not standard ETH packet (e.g. managment, or in general non ipv4/ipv6), it is for sure
768     // not gre so we can end here
769     // if outer header is ipv4, protocol is the nine'th octet
770     // if outer header is ipv6, next header is the sixth octet
771     if (!(((pkt->l2pkt_rx_info->flags & LM_RX_FLAG_IS_IPV4_DATAGRAM) && (hdr[9] == 0x2f)) ||
772           ((pkt->l2pkt_rx_info->flags & LM_RX_FLAG_IS_IPV6_DATAGRAM) && (hdr[6] == 0x2f))))
773     {
774         // this is not encapsulated packet, no gre tunneling
775 		// on ipv6 we don't support extension header
776         return;
777     }
778 
779     // get the length of the outer ip header and set hdr to the gre header
780     hdr += get_ip_hdr_len(hdr);
781 
782 /* GRE header
783    | Bits 0�4 | 5�7   | 8�12  | 13�15   | 16�31         |
784    | C|0|K|S  | Recur | Flags | Version | Protocol Type |
785    |           Checksum (optional)      | Reserved      |
786    |           Key (optional)                           |
787    |           Sequence Number (optional)               | */
788 
789     // check that:
790     // checksum present bit is set to 0
791     // key present bit is set to 1
792     // sequence number present bit is set to 0
793     // protocol type should be always equal to 0x6558 (for encapsulating ethernet packets in GRE)
794     if (((hdr[0] & 0xb0) != 0x20) || (hdr[2] != 0x65) || (hdr[3] != 0x58))
795     {
796         return;
797     }
798     // set hdr to the inner mac header
799     hdr += ETHERNET_GRE_SIZE;
800 
801     // The first two octets of the tag are the Tag Protocol Identifier (TPID) value of 0x8100.
802     // This is located in the same place as the EtherType/Length field in untagged frames
803     if ((hdr[12] == 0x81) && (hdr[13] == 0x00))
804     {
805         hdr += ETHERNET_VLAN_TAG_SIZE;
806     }
807     // set hdr to the inner ip header
808     hdr += ETHERNET_PACKET_HEADER_SIZE;
809 
810     // get the length of the inner ip header
811     inner_ip_hdr_len = get_ip_hdr_len(hdr);
812 
813     if ((hdr[0] & 0xf0) == 0x40)
814     {
815         // inner ip header is ipv4
816         // if the ip header checksum of the outer header is ok than validate the ip checksum of the inner header
817         if (pkt->l2pkt_rx_info->flags & LM_RX_FLAG_IP_CKSUM_IS_GOOD)
818         {
819             // validate the checksum
820             if (!validate_cksum(calc_cksum((u16_t*)hdr, inner_ip_hdr_len, 0)))
821             {
822                 SET_FLAGS(pkt->l2pkt_rx_info->flags, LM_RX_FLAG_IP_CKSUM_IS_BAD);
823                 RESET_FLAGS(pkt->l2pkt_rx_info->flags, LM_RX_FLAG_IP_CKSUM_IS_GOOD);
824             }
825         }
826         // check if protocol field is tcp
827         if (hdr[9] == 0x06)
828         {
829             // create the psuedo header
830 /* | Bit offset | 0�7    |    8�15  |    16�31   |
831    |     0      |    Source address              |
832    |    32      |  Destination address           |
833    |    64      | Zeros  | Protocol | TCP length | */
834 
835             // adding 1 byte of zeros + protocol to the sum
836             // and adding source and destination address
837             psuedo_cksum = calc_cksum((u16_t*)&hdr[12], 8, 0x06);
838             // calculate the tcp length
839             mm_memcpy(&tmp, &hdr[2], sizeof(u16_t));
840             tcp_length = NTOH16(tmp) - inner_ip_hdr_len;
841             // the TCP length field is the length of the TCP header and data (measured in octets).
842             psuedo_cksum += tcp_length;
843         }
844         else
845         {
846             // no tcp over ip
847             return;
848         }
849     }
850     else if ((hdr[0] & 0xf0) == 0x60)
851     {
852         // inner ip header is ipv6
853         // check if next header field is tcp
854         if (hdr[6] == 0x06)
855         {
856             // tcp over ipv6
857             // create the psuedo header
858 /* | Bit offset | 0�7 | 8�15 | 16�23 |  24�31     |
859    |     0      |     Source address              |
860    |    32      |                                 |
861    |    64      |                                 |
862    |    96      |                                 |
863    |   128      |   Destination address           |
864    |   160      |                                 |
865    |   192      |                                 |
866    |   224      |                                 |
867    |   256      |        TCP length               |
868    |   288      |        Zeros       |Next header |*/
869 
870             // adding 3 byte of zeros + protocol to the sum
871             // and adding source and destination address
872             psuedo_cksum = calc_cksum((u16_t*)&hdr[8], 32, 0x06);
873             // calculate the tcp length
874             // in the ip header: the size of the payload in octets, including any extension headers
875             mm_memcpy(&tmp, &hdr[4], sizeof(u16_t));
876             // reduce the length of the extension headers
877             tcp_length = NTOH16(tmp) - (inner_ip_hdr_len - 40);
878             psuedo_cksum += tcp_length;
879         }
880         else
881         {
882             // no tcp over ip
883             return;
884         }
885     }
886     else
887     {
888         // no ipv4 or ipv6
889         return;
890     }
891     // set hdr to the tcp header
892     hdr += inner_ip_hdr_len;
893 
894     SET_FLAGS(pkt->l2pkt_rx_info->flags, LM_RX_FLAG_IS_TCP_SEGMENT);
895     // claculate the checksum of the rest of the packet
896     // validate the checksum
897     if (validate_cksum(calc_cksum((u16_t*)hdr, tcp_length, psuedo_cksum)))
898     {
899         SET_FLAGS(pkt->l2pkt_rx_info->flags, LM_RX_FLAG_TCP_CKSUM_IS_GOOD);
900         RESET_FLAGS(pkt->l2pkt_rx_info->flags, LM_RX_FLAG_TCP_CKSUM_IS_BAD);
901     }
902     else
903     {
904         SET_FLAGS(pkt->l2pkt_rx_info->flags, LM_RX_FLAG_TCP_CKSUM_IS_BAD);
905         RESET_FLAGS(pkt->l2pkt_rx_info->flags, LM_RX_FLAG_TCP_CKSUM_IS_GOOD);
906     }
907 }
908 
909 /*******************************************************************************
910  * Description:
911  * Here the RCQ chain is the chain coordinated with the status block, that is,
912  * the index in the status block describes the RCQ and NOT the rx_bd chain as in
913  * the case of Teton. We run on the delta between the new consumer index of the RCQ
914  * which we get from the sb and the old consumer index of the RCQ.
915  * In cases of both slow and fast path, the consumer of the RCQ is always incremented.
916  *
917  * The assumption which we must stick to all the way is: RCQ and Rx bd chain
918  * have the same size at all times! Otherwise, so help us Alan Bertkey!
919  *
920  * Return:
921  ******************************************************************************/
922 u32_t
lm_get_packets_rcvd(struct _lm_device_t * pdev,u32_t const chain_idx,s_list_t * rcvd_list,struct _sp_cqes_info * sp_cqes)923 lm_get_packets_rcvd( struct _lm_device_t  *pdev,
924                      u32_t const          chain_idx,
925                      s_list_t             *rcvd_list,
926                      struct _sp_cqes_info *sp_cqes)
927 {
928     lm_rx_chain_t*          rxq_chain    = &LM_RXQ(pdev, chain_idx); //get a hold of the matching Rx bd chain according to index
929     lm_rcq_chain_t*         rcq_chain    = &LM_RCQ(pdev, chain_idx); //get a hold of the matching RCQ chain according to index
930     lm_bd_chain_t*          rx_chain_bd  = &LM_RXQ_CHAIN_BD(pdev, chain_idx);
931     lm_bd_chain_t*          rx_chain_sge = LM_RXQ_SGE_PTR_IF_VALID(pdev, chain_idx);
932     lm_tpa_chain_t*         tpa_chain    = &LM_TPA(pdev, chain_idx);
933     union eth_rx_cqe*       cqe          = NULL;
934     lm_packet_t*            pkt          = NULL;
935     u32_t                   pkt_cnt      = 0;
936     u16_t                   rx_old_idx   = 0;
937     u16_t                   cq_new_idx   = 0;
938     u16_t                   cq_old_idx   = 0;
939     enum eth_rx_cqe_type    cqe_type     = MAX_ETH_RX_CQE_TYPE;
940 
941     DbgMessage(pdev, INFORMl2 , "lm_get_packets_rcvd inside!\n");
942 
943     /* make sure to zeroize the sp_cqes... */
944     mm_mem_zero( sp_cqes, sizeof(struct _sp_cqes_info) );
945 
946     /* Get the new consumer idx.  The bd's between rcq_new_idx and rcq_old_idx
947      * are bd's containing receive packets.
948      */
949     cq_new_idx = mm_le16_to_cpu(*(rcq_chain->hw_con_idx_ptr));
950 
951     /* The consumer index of the RCQ only, may stop at the end of a page boundary.  In
952      * this case, we need to advance the next to the next one.
953      * In here we do not increase the cons_bd as well! this is since we're dealing here
954      * with the new cons index and not with the actual old one for which, as we progress, we
955      * need to maintain the bd_cons as well.
956      */
957     if((cq_new_idx & lm_bd_chain_usable_bds_per_page(&rcq_chain->bd_chain)) == lm_bd_chain_usable_bds_per_page(&rcq_chain->bd_chain))
958     {
959         cq_new_idx+= lm_bd_chain_bds_skip_eop(&rcq_chain->bd_chain);
960     }
961 
962     DbgBreakIfFastPath( rx_chain_sge && !lm_bd_chains_are_consistent( rx_chain_sge, rx_chain_bd ) );
963 
964     rx_old_idx = lm_bd_chain_cons_idx(rx_chain_bd);
965     cq_old_idx = lm_bd_chain_cons_idx(&rcq_chain->bd_chain);
966 
967     //there is no change in the RCQ consumer index so exit!
968     if (cq_old_idx == cq_new_idx)
969     {
970         DbgMessage(pdev, INFORMl2rx , "there is no change in the RCQ consumer index so exit!\n");
971         return pkt_cnt;
972     }
973 
974     while(cq_old_idx != cq_new_idx)
975     {
976         DbgBreakIfFastPath(S16_SUB(cq_new_idx, cq_old_idx) <= 0);
977         //get hold of the cqe, and find out what it's type corresponds to
978         cqe = (union eth_rx_cqe *)lm_bd_chain_consume_bd(&rcq_chain->bd_chain);
979         DbgBreakIfFastPath(cqe == NULL);
980 
981         //update the cons of the RCQ and the bd_prod pointer of the RCQ as well!
982         //this holds both for slow and fast path!
983         cq_old_idx = lm_bd_chain_cons_idx(&rcq_chain->bd_chain);
984 
985         cqe_type = GET_FLAGS_WITH_OFFSET(cqe->ramrod_cqe.ramrod_type, COMMON_RAMROD_ETH_RX_CQE_TYPE, COMMON_RAMROD_ETH_RX_CQE_TYPE_SHIFT);
986         DbgBreakIf(MAX_ETH_RX_CQE_TYPE <= cqe_type);
987 
988         //the cqe is a ramrod, so do the ramrod and recycle the cqe.
989         //TODO: replace this with the #defines: 1- eth ramrod, 2- toe init ofld ramrod
990         switch(cqe_type)
991         {
992         case RX_ETH_CQE_TYPE_ETH_RAMROD:
993         {
994             /* 13/08/08 NirV: bugbug, temp workaround for dpc watch dog bug,
995              * ignore toe completions on L2 ring - initiate offload */
996             if (cqe->ramrod_cqe.conn_type != TOE_CONNECTION_TYPE)
997             {
998                 if (ERR_IF(sp_cqes->idx >= MAX_NUM_SPE))
999                 {
1000                     DbgBreakMsgFastPath("too many spe completed\n");
1001                     /* we shouldn't get here - there is something very wrong if we did... in this case we will risk
1002                      * completing the ramrods - even though we're holding a lock!!! */
1003                     /* bugbug... */
1004                     DbgBreakIfAll(sp_cqes->idx >= MAX_NUM_SPE);
1005                     return pkt_cnt;
1006                 }
1007                 mm_memcpy((void*)(&(sp_cqes->sp_cqe[sp_cqes->idx++])), (const void*)cqe, sizeof(*cqe));
1008             }
1009 
1010             //update the prod of the RCQ - by this, we recycled the CQE.
1011             lm_bd_chain_bd_produced(&rcq_chain->bd_chain);
1012 
1013 #if 0
1014             //in case of ramrod, pop out the Rx bd and push it to the free descriptors list
1015             pkt = (lm_packet_t *) s_list_pop_head(&rxq_chain->active_descq);
1016 
1017             DbgBreakIfFastPath(pkt == NULL);
1018 
1019             s_list_push_tail( &LM_RXQ(pdev, chain_idx).free_descq,
1020                               &pkt->link);
1021 #endif
1022             break;
1023         }
1024         case RX_ETH_CQE_TYPE_ETH_FASTPATH:
1025         case RX_ETH_CQE_TYPE_ETH_START_AGG: //Fall through case
1026         { //enter here in case the cqe is a fast path type (data)
1027             u16_t parse_flags = 0;
1028 
1029             DbgMessage(pdev, INFORMl2rx, "lm_get_packets_rcvd- it is fast path, func=%d\n", FUNC_ID(pdev));
1030 
1031             DbgBreakIf( (RX_ETH_CQE_TYPE_ETH_START_AGG == cqe_type)&&
1032                         (lm_tpa_state_disable == tpa_chain->state));
1033 
1034             pkt = (lm_packet_t *) s_list_pop_head(&rxq_chain->active_descq);
1035             parse_flags = mm_le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags);
1036 
1037             DbgBreakIfFastPath( NULL == pkt );
1038 
1039 #if DBG
1040             if CHK_NULL( pkt )
1041             {
1042                 return 0;
1043             }
1044 #endif // DBG
1045 
1046             DbgBreakIfFastPath(SIG(pkt) != L2PACKET_RX_SIG);
1047 
1048 #if L2_RX_BUF_SIG
1049             /* make sure signitures exist before and after the buffer */
1050             DbgBreakIfFastPath(SIG(pkt->u1.rx.mem_virt - pdev->params.rcv_buffer_offset) != L2PACKET_RX_SIG);
1051             DbgBreakIfFastPath(END_SIG(pkt->u1.rx.mem_virt, MAX_L2_CLI_BUFFER_SIZE(pdev, chain_idx)) != L2PACKET_RX_SIG);
1052 #endif /* L2_RX_BUF_SIG */
1053 
1054             lm_bd_chain_bds_consumed(rx_chain_bd, 1);
1055             if( rx_chain_sge )
1056             {
1057                 lm_bd_chain_bds_consumed(rx_chain_sge, 1);
1058             }
1059 #if defined(_NTDDK_)
1060 //PreFast 28182 :Prefast reviewed and suppress this situation shouldn't occur.
1061 #pragma warning (push)
1062 #pragma warning( disable:28182 )
1063 #endif // !_NTDDK_
1064             /* Advance the rx_old_idx to the start bd_idx of the next packet. */
1065             rx_old_idx = pkt->u1.rx.next_bd_idx;
1066             //cq_old_idx = pkt->u1.rx.next_bd_idx;
1067 
1068             CLEAR_FLAGS( pkt->l2pkt_rx_info->flags );
1069 
1070 
1071             if(RX_ETH_CQE_TYPE_ETH_START_AGG == cqe_type)
1072             {
1073                 lm_recv_set_pkt_len(pdev, pkt, mm_le16_to_cpu(cqe->fast_path_cqe.len_on_bd), chain_idx);
1074                 // total_packet_size is only known in stop_TPA
1075 
1076                 DbgBreakIf(0 != cqe->fast_path_cqe.pkt_len_or_gro_seg_len);
1077 
1078                 lm_tpa_start(pdev,
1079                              pkt,
1080                              chain_idx,
1081                              cqe->fast_path_cqe.queue_index);
1082 
1083                 lm_tpa_start_flags_handle(pdev,
1084                                           &(cqe->fast_path_cqe),
1085                                           pkt,
1086                                           parse_flags);
1087             }
1088             else
1089             {
1090                 lm_recv_set_pkt_len(pdev, pkt, mm_le16_to_cpu(cqe->fast_path_cqe.pkt_len_or_gro_seg_len), chain_idx);
1091 
1092                 // In regular mode pkt->l2pkt_rx_info->size == pkt->l2pkt_rx_info->total_packet_size
1093                 // We need total_packet_size for Dynamic HC in order not to ask a question there if we are RSC or regular flow.
1094                 pkt->l2pkt_rx_info->total_packet_size = pkt->l2pkt_rx_info->size;
1095 
1096                 /* make sure packet size if larger than header size and smaller than max packet size of the specific L2 client */
1097                 DbgBreakIfFastPath((pkt->l2pkt_rx_info->total_packet_size < MIN_ETHERNET_PACKET_SIZE) || (pkt->l2pkt_rx_info->total_packet_size > MAX_CLI_PACKET_SIZE(pdev, chain_idx)));
1098 
1099                 // ShayH:packet->size isn't useed anymore by windows we directly put the data on l2pkt_rx_info->size and l2pkt_rx_info->total_packet_size.
1100                 // Need to ask if other UM clients use/need packet->size.
1101                 pkt->size = pkt->l2pkt_rx_info->size;
1102 
1103                 if(OOO_CID(pdev) == chain_idx)
1104                 {
1105                     DbgBreakIfFastPath( ETH_FP_CQE_RAW != (GET_FLAGS( cqe->fast_path_cqe.type_error_flags, ETH_FAST_PATH_RX_CQE_SGL_RAW_SEL ) >>
1106                                                            ETH_FAST_PATH_RX_CQE_SGL_RAW_SEL_SHIFT));
1107 
1108                     //optimized
1109                     /* make sure packet size if larger than header size and smaller than max packet size of the specific L2 client */
1110                     // TODO_OOO - check with flag
1111                     ASSERT_STATIC( sizeof(pkt->u1.rx.sgl_or_raw_data.raw_data) == sizeof(cqe->fast_path_cqe.sgl_or_raw_data.raw_data) );
1112                     mm_memcpy( pkt->u1.rx.sgl_or_raw_data.raw_data, cqe->fast_path_cqe.sgl_or_raw_data.raw_data, sizeof(pkt->u1.rx.sgl_or_raw_data.raw_data) );
1113                 }
1114                 else
1115                 {
1116                     DbgBreakIfFastPath( ETH_FP_CQE_REGULAR != (GET_FLAGS( cqe->fast_path_cqe.type_error_flags, ETH_FAST_PATH_RX_CQE_SGL_RAW_SEL )>>
1117                                                            ETH_FAST_PATH_RX_CQE_SGL_RAW_SEL_SHIFT)  ) ;
1118                 }
1119 
1120                 lm_regular_flags_handle(pdev,
1121                                         &(cqe->fast_path_cqe),
1122                                         pkt,
1123                                         parse_flags);
1124 
1125                 if (GET_FLAGS(pdev->params.ofld_cap_to_ndis, LM_OFFLOAD_ENCAP_PACKET))
1126                 {
1127                     // SW rx checksum for gre encapsulated packets
1128                     encap_pkt_parsing(pdev, pkt);
1129                 }
1130 
1131                 pkt_cnt++;
1132                 s_list_push_tail(rcvd_list, &pkt->link);
1133             }
1134 
1135             if GET_FLAGS(cqe->fast_path_cqe.status_flags, ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)
1136             {
1137                 SET_FLAGS(pkt->l2pkt_rx_info->flags, LM_RX_FLAG_VALID_HASH_VALUE );
1138                 *pkt->u1.rx.hash_val_ptr = mm_le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
1139             }
1140 
1141             if(GET_FLAGS(parse_flags,PARSING_FLAGS_INNER_VLAN_EXIST))
1142             {
1143                 u16_t vlan_tag = mm_le16_to_cpu(cqe->fast_path_cqe.vlan_tag);
1144 
1145                 DbgMessage(pdev, INFORMl2, "vlan frame recieved: %x\n",vlan_tag);
1146                   /* fw always set ETH_FAST_PATH_RX_CQE_VLAN_TAG_FLG and pass vlan tag when
1147                      packet with vlan arrives but it remove the vlan from the packet only when
1148                      it configured to remove vlan using params.vlan_removal_enable
1149                   */
1150                   if ((!pdev->params.keep_vlan_tag) &&
1151                       ( OOO_CID(pdev) != chain_idx))
1152                   {
1153                       SET_FLAGS(pkt->l2pkt_rx_info->flags , LM_RX_FLAG_VALID_VLAN_TAG);
1154                       pkt->l2pkt_rx_info->vlan_tag = vlan_tag;
1155                       DbgMessage(pdev, INFORMl2rx, "vlan removed from frame: %x\n",vlan_tag);
1156                   }
1157             }
1158 
1159 #if defined(_NTDDK_)
1160 #pragma warning (pop)
1161 #endif // !_NTDDK_
1162 #if DBG
1163             if(GET_FLAGS(parse_flags,PARSING_FLAGS_FRAGMENTATION_STATUS))
1164             {
1165                 LM_COMMON_DRV_STATS_ATOMIC_INC_ETH(pdev, rx_ipv4_frag_count);
1166             }
1167             if(GET_FLAGS(parse_flags,PARSING_FLAGS_LLC_SNAP))
1168             {
1169                 LM_COMMON_DRV_STATS_ATOMIC_INC_ETH(pdev, rx_llc_snap_count);
1170             }
1171             if(GET_FLAGS(parse_flags,PARSING_FLAGS_IP_OPTIONS) &&
1172                 GET_FLAGS(pkt->l2pkt_rx_info->flags ,LM_RX_FLAG_IS_IPV6_DATAGRAM))
1173             {
1174                 LM_COMMON_DRV_STATS_ATOMIC_INC_ETH(pdev, rx_ipv6_ext_count);
1175             }
1176 #endif // DBG
1177 
1178             /* We use to assert that if we got the PHY_DECODE_ERROR it was always a result of DROP_MAC_ERR, since we don't configure
1179              * DROP_MAC_ERR anymore, we don't expect this flag to ever be on.*/
1180             DbgBreakIfFastPath( GET_FLAGS(cqe->fast_path_cqe.type_error_flags, ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG) );
1181 
1182             DbgBreakIfFastPath(cqe->fast_path_cqe.type_error_flags &
1183                             ~(ETH_FAST_PATH_RX_CQE_TYPE |
1184                               ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG |
1185                               ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
1186                               ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG |
1187                               ETH_FAST_PATH_RX_CQE_SGL_RAW_SEL));
1188 
1189 
1190             break;
1191         }
1192         case RX_ETH_CQE_TYPE_ETH_STOP_AGG:
1193         {//TPA stop
1194             DbgBreakIf( lm_tpa_state_disable == tpa_chain->state);
1195 
1196             pkt_cnt = lm_tpa_stop(pdev,
1197                                   rcvd_list,
1198                                   &(cqe->end_agg_cqe),
1199                                   chain_idx,
1200                                   pkt_cnt,
1201                                   cqe->end_agg_cqe.queue_index);
1202 
1203             //update the prod of the RCQ - by this, we recycled the CQE.
1204             lm_bd_chain_bd_produced(&rcq_chain->bd_chain);
1205             break;
1206         }
1207         case MAX_ETH_RX_CQE_TYPE:
1208         default:
1209             {
1210                 DbgBreakMsg("CQE type not supported");
1211             }
1212 
1213         }
1214     }
1215 
1216     // TODO: Move index update to a more suitable place
1217     rx_chain_bd->cons_idx = rx_old_idx;
1218     if( rx_chain_sge )
1219     {
1220         rx_chain_sge->cons_idx = rx_old_idx;
1221     }
1222 
1223     //notify the fw of the prod
1224     lm_rx_set_prods(pdev, rcq_chain->iro_prod_offset, &rcq_chain->bd_chain, rx_chain_bd, rx_chain_sge ,chain_idx);
1225 
1226     DbgMessage(pdev, INFORMl2rx, "lm_get_packets_rcvd- bd con: %d bd prod: %d \n",
1227                                 lm_bd_chain_cons_idx(rx_chain_bd), lm_bd_chain_prod_idx(rx_chain_bd));
1228     DbgMessage(pdev, INFORMl2rx, "lm_get_packets_rcvd- cq con: %d cq prod: %d \n",
1229                                 lm_bd_chain_cons_idx(&rcq_chain->bd_chain), lm_bd_chain_prod_idx(&rcq_chain->bd_chain));
1230     return pkt_cnt;
1231 } /* lm_get_packets_rcvd */
1232 
lm_complete_ramrods(struct _lm_device_t * pdev,struct _sp_cqes_info * sp_cqes)1233 lm_status_t lm_complete_ramrods(
1234     struct _lm_device_t *pdev,
1235     struct _sp_cqes_info *sp_cqes)
1236 {
1237     u8_t idx;
1238 
1239     for (idx = 0; idx < sp_cqes->idx; idx++) {
1240         lm_eth_init_command_comp(pdev, &(sp_cqes->sp_cqe[idx].ramrod_cqe));
1241     }
1242 
1243     return LM_STATUS_SUCCESS;
1244 }
1245 
1246 /* called by um whenever packets are returned by client
1247    rxq lock is taken by caller */
1248 void
lm_return_packet_bytes(struct _lm_device_t * pdev,u32_t const qidx,u32_t const returned_bytes)1249 lm_return_packet_bytes( struct _lm_device_t *pdev,
1250                         u32_t const          qidx,
1251                         u32_t const          returned_bytes)
1252 {
1253     lm_rx_chain_t *rxq = &LM_RXQ(pdev, qidx);
1254 
1255     rxq->ret_bytes += returned_bytes;
1256 
1257     /* aggregate updates over PCI */
1258 
1259     /* HC_RET_BYTES_TH = min(l2_hc_threshold0 / 2 , 16KB) */
1260     #define HC_RET_BYTES_TH(pdev) (((pdev)->params.hc_threshold0[SM_RX_ID] < 32768) ? ((pdev)->params.hc_threshold0[SM_RX_ID] >> 1) : 16384)
1261 
1262     /* TODO: Future: Add #updatesTH = 20 */
1263 
1264     /* time to update fw ? */
1265     if(S32_SUB(rxq->ret_bytes, rxq->ret_bytes_last_fw_update + HC_RET_BYTES_TH(pdev)) >= 0)
1266     {
1267         /*
1268           !!DP
1269           The test below is to disable dynamic HC for the iSCSI chains
1270         */
1271         // TODO: VF dhc
1272         if (qidx < LM_MAX_RSS_CHAINS(pdev) && IS_PFDEV(pdev)) /* should be fine, if not, you can go for less robust case of != LM_CLI_RX_CHAIN_IDX(pdev, LM_CLI_IDX_ISCSI) */
1273         {
1274             /* There are HC_USTORM_SB_NUM_INDICES (4) index values for each SB to set and we're using the corresponding U indexes from the microcode consts */
1275             LM_INTMEM_WRITE32(PFDEV(pdev), rxq->hc_sb_info.iro_dhc_offset, rxq->ret_bytes, BAR_CSTRORM_INTMEM);
1276             rxq->ret_bytes_last_fw_update = rxq->ret_bytes;
1277         } else if (IS_VFDEV(pdev)) {
1278             VF_REG_WR(pdev, VF_BAR0_CSDM_QUEUES_OFFSET + rxq->hc_sb_info.iro_dhc_offset, rxq->ret_bytes);
1279             rxq->ret_bytes_last_fw_update = rxq->ret_bytes;
1280         }
1281     }
1282 }
1283 
1284