xref: /titanic_41/usr/src/uts/common/io/bnxe/577xx/drivers/common/lm/device/bnxe_context.c (revision f391a51a4e9639750045473dba1cc2831267c93e)
1 
2 /*
3 functions for managing Chip per-connection context
4 */
5 #include "context.h"
6 #include "command.h"
7 #include "cdu_def.h"
8 #include "bd_chain.h"
9 
10 /* returns a pionter to a connections chip context*/
lm_get_context(struct _lm_device_t * pdev,u32_t cid)11 void * lm_get_context(struct _lm_device_t *pdev, u32_t cid){
12 
13     void * ret = NULL;
14     u32_t page,off;
15 
16     DbgBreakIf(cid > pdev->params.max_func_connections);
17     DbgBreakIf(pdev->context_info->array[cid].invalid != LM_CONTEXT_VALID);
18 
19     /* calculate which context page the CID is on*/
20     page = cid / (pdev->params.num_context_in_page);
21 
22     /* calculate at what offset inside the page CID is on*/
23     off = cid % (pdev->params.num_context_in_page);
24 
25     /* now goto page,off */
26     ret = (void*)((char*)pdev->vars.context_cdu_virt_addr_table[page] + (pdev->params.context_line_size * off));
27     /* warrning, this assumes context line size is in chars, need to check!!!*/
28 
29     return ret;
30 }
31 
32 /* same as above but returns phys address in 64 bit pointer */
lm_get_context_phys(struct _lm_device_t * pdev,u32_t cid)33 u64_t lm_get_context_phys(struct _lm_device_t *pdev, u32_t cid){
34 
35     u64_t ret = 0;
36     u32_t page,off;
37 
38     DbgBreakIf(cid > pdev->params.max_func_connections);
39     DbgBreakIf(pdev->context_info->array[cid].invalid != LM_CONTEXT_VALID);
40 
41     /* calculate which context page the CID is on*/
42     page = cid / (pdev->params.num_context_in_page);
43 
44     /* calculate at what offset inside the page CID is on*/
45     off = cid % (pdev->params.num_context_in_page);
46 
47     /* now goto page,off */
48     ret = (pdev->vars.context_cdu_phys_addr_table[page].as_u64 + (pdev->params.context_line_size * off));
49     /* warrning, this assumes context line size is in chars, need to check!!!*/
50 
51     return ret;
52 }
53 
54 extern u32_t LOG2(u32_t v);
lm_setup_searcher_hash_info(struct _lm_device_t * pdev)55 static lm_status_t lm_setup_searcher_hash_info(struct _lm_device_t *pdev)
56 {
57     u32_t                    num_con    = 0 ;
58     u32_t                    alloc_size = 0 ;
59     lm_context_info_t*       context    = NULL;
60     lm_searcher_hash_info_t* hash_info  = NULL;
61     int                      offset     = 0 ;
62 
63     /* sanity */
64     if ( CHK_NULL(pdev) || CHK_NULL( pdev->context_info ) )
65     {
66         DbgBreakMsg("Invalid Parameters") ;
67         return LM_STATUS_INVALID_PARAMETER ;
68     }
69     context   = pdev->context_info;
70     hash_info = &context->searcher_hash;
71 
72     DbgBreakIf(!pdev->params.max_func_connections);
73 
74     if CHK_NULL( hash_info->searcher_table)
75     {
76         DbgBreakIf(!( hash_info->searcher_table));
77         return LM_STATUS_FAILURE;
78     }
79     num_con    = pdev->params.max_func_connections;
80     alloc_size = sizeof(lm_searcher_hash_entry_t) * num_con;
81     mm_mem_zero(hash_info->searcher_table, alloc_size);
82 
83     /* init value for searcher key */
84     // TODO: for now a fixed key, need to change at runtime
85     *(u32_t *)(&hash_info->searcher_key[0])  = 0x63285672;
86     *(u32_t *)(&hash_info->searcher_key[4])  = 0x24B8F2CC;
87     *(u32_t *)(&hash_info->searcher_key[8])  = 0x223AEF9B;
88     *(u32_t *)(&hash_info->searcher_key[12]) = 0x26001E3A;
89     *(u32_t *)(&hash_info->searcher_key[16]) = 0x7AE91116;
90     *(u32_t *)(&hash_info->searcher_key[20]) = 0x5CE5230B;
91     *(u32_t *)(&hash_info->searcher_key[24]) = 0x298D8ADF;
92     *(u32_t *)(&hash_info->searcher_key[28]) = 0x6EB0FF09;
93     *(u32_t *)(&hash_info->searcher_key[32]) = 0x1830F82F;
94     *(u32_t *)(&hash_info->searcher_key[36]) = 0x1E46BE7;
95 
96     /* Microsoft's example key */
97 //      *(u32_t *)(&hash_info->searcher_key[0]) = 0xda565a6d;
98 //      *(u32_t *)(&hash_info->searcher_key[4]) = 0xc20e5b25;
99 //      *(u32_t *)(&hash_info->searcher_key[8]) = 0x3d256741;
100 //      *(u32_t *)(&hash_info->searcher_key[12]) = 0xb08fa343;
101 //      *(u32_t *)(&hash_info->searcher_key[16]) = 0xcb2bcad0;
102 //      *(u32_t *)(&hash_info->searcher_key[20]) = 0xb4307bae;
103 //      *(u32_t *)(&hash_info->searcher_key[24]) = 0xa32dcb77;
104 //      *(u32_t *)(&hash_info->searcher_key[28]) = 0x0cf23080;
105 //      *(u32_t *)(&hash_info->searcher_key[32]) = 0x3bb7426a;
106 //      *(u32_t *)(&hash_info->searcher_key[36]) = 0xfa01acbe;
107 
108     /* init searcher_key_bits array */
109     for (offset = 0; offset < 10; offset++)
110     {
111         int j,k;
112         u32_t bitsOffset = 32*offset;
113         u8_t _byte;
114 
115         for (j= 0; j < 4; j++)
116         {
117             _byte  = (u8_t)((*(u32_t *)(&hash_info->searcher_key[offset*4]) >> (j*8)) & 0xff);
118             for (k = 0; k < 8; k++)
119             {
120                 hash_info->searcher_key_bits[bitsOffset+(j*8)+k] = ((_byte<<(k%8))& 0x80) ? 1 : 0;
121             }
122         }
123     }
124 
125     /* init value for num hash bits */
126     hash_info->num_hash_bits = (u8_t)LOG2(num_con);
127 
128     return LM_STATUS_SUCCESS ;
129 }
130 
lm_alloc_searcher_hash_info(struct _lm_device_t * pdev)131 static lm_status_t lm_alloc_searcher_hash_info(struct _lm_device_t *pdev)
132 {
133     u32_t                    num_con    = 0 ;
134     u32_t                    alloc_size = 0 ;
135     lm_searcher_hash_info_t* hash_info  = NULL ;
136     u8_t                     mm_cli_idx = 0 ;
137 
138     if CHK_NULL(pdev)
139     {
140         return LM_STATUS_INVALID_PARAMETER ;
141     }
142 
143     mm_cli_idx = LM_RESOURCE_COMMON;//!!DP mm_cli_idx_to_um_idx(LM_CLI_IDX_MAX);
144 
145     /* searcher is defined with per-function #connections */
146     num_con    = pdev->params.max_func_connections;
147     alloc_size = sizeof(lm_searcher_hash_entry_t) * num_con;
148 
149     hash_info  = &pdev->context_info->searcher_hash;
150 
151     if CHK_NULL(hash_info)
152     {
153         return LM_STATUS_INVALID_PARAMETER ;
154     }
155 
156     /* allocate searcher mirror hash table */
157     hash_info->searcher_table = mm_alloc_mem(pdev, alloc_size, mm_cli_idx);
158 
159     if CHK_NULL( hash_info->searcher_table )
160     {
161         DbgBreakIf(DBG_BREAK_ON(MEMORY_ALLOCATION_FAILURE));
162         return  LM_STATUS_RESOURCE ;
163     }
164     return LM_STATUS_SUCCESS ;
165 }
166 
lm_init_cid_resc(struct _lm_device_t * pdev,u32_t cid)167 lm_status_t lm_init_cid_resc(struct _lm_device_t *pdev, u32_t cid)
168 {
169     lm_cid_resc_t *cid_resc = NULL;
170     int            i        = 0;
171 
172     if CHK_NULL(pdev)
173     {
174         return LM_STATUS_INVALID_PARAMETER;
175     }
176 
177     cid_resc = &pdev->context_info->array[cid].cid_resc;
178     if CHK_NULL(cid_resc)
179     {
180         return LM_STATUS_INVALID_PARAMETER;
181     }
182 
183     for (i = 0; i < ARRSIZE(cid_resc->cookies); i++)
184     {
185         cid_resc->cookies[i] = NULL;
186     }
187 
188     cid_resc->cid_pending = LM_CID_STATE_VALID;
189     lm_sp_req_manager_init(pdev, cid);
190 
191     return LM_STATUS_SUCCESS;
192 }
193 
lm_setup_context_pool(struct _lm_device_t * pdev)194 lm_status_t lm_setup_context_pool(struct _lm_device_t *pdev)
195 {
196     u32_t                     num_con         = 0;
197     lm_context_info_t *       context         = NULL ;
198     u32_t                     i,j;
199     struct lm_context_cookie* array           = NULL ;
200     lm_searcher_hash_entry_t* searcher_table  = NULL ;
201 
202     if CHK_NULL(pdev)
203     {
204         DbgBreakIf(!pdev);
205         return LM_STATUS_INVALID_PARAMETER;
206     }
207 
208     context = pdev->context_info;
209 
210     if CHK_NULL(context)
211     {
212         DbgBreakIf( context == NULL );
213         return LM_STATUS_INVALID_PARAMETER;
214     }
215 
216     num_con = pdev->params.max_func_connections;
217 
218     array           = context->array ;
219     searcher_table  = context->searcher_hash.searcher_table ;
220 
221     mm_mem_zero( context, sizeof(lm_context_info_t) ) ;
222 
223     context->array                        = array ;
224     context->searcher_hash.searcher_table = searcher_table ;
225 
226     context->proto_start[ETH_CONNECTION_TYPE]   = 0;
227     context->proto_end  [ETH_CONNECTION_TYPE]   = pdev->params.max_eth_including_vfs_conns - 1;
228     context->proto_start[TOE_CONNECTION_TYPE]   = context->proto_end  [ETH_CONNECTION_TYPE]   + 1;
229     context->proto_end  [TOE_CONNECTION_TYPE]   = context->proto_start[TOE_CONNECTION_TYPE]   + pdev->params.max_func_toe_cons - 1;
230     context->proto_start[RDMA_CONNECTION_TYPE]  = context->proto_end  [TOE_CONNECTION_TYPE]   + 1;
231     context->proto_end  [RDMA_CONNECTION_TYPE]  = context->proto_start[RDMA_CONNECTION_TYPE]  + pdev->params.max_func_rdma_cons - 1;
232     context->proto_start[ISCSI_CONNECTION_TYPE] = context->proto_end  [RDMA_CONNECTION_TYPE]  + 1;
233     context->proto_end  [ISCSI_CONNECTION_TYPE] = context->proto_start[ISCSI_CONNECTION_TYPE] + pdev->params.max_func_iscsi_cons - 1;
234     context->proto_start[FCOE_CONNECTION_TYPE]  = context->proto_end  [ISCSI_CONNECTION_TYPE] + 1;
235     context->proto_end  [FCOE_CONNECTION_TYPE]  = context->proto_start[FCOE_CONNECTION_TYPE]  + pdev->params.max_func_fcoe_cons - 1;
236     DbgBreakIf(context->proto_end[MAX_PROTO - 1] > pdev->params.max_func_connections -1);
237 
238     if CHK_NULL(context->array)
239     {
240         DbgBreakIf(!( context->array));
241         return LM_STATUS_INVALID_PARAMETER;
242     }
243 
244     mm_mem_zero(context->array, sizeof(struct lm_context_cookie)*num_con);
245 
246     ASSERT_STATIC( ARRSIZE(context->proto_start) == ARRSIZE(context->proto_end) );
247 
248     /* zero cookies and populate the free lists */
249     for (i = 0; i < ARRSIZE(context->proto_start); i++ )
250     {
251         for (j = context->proto_start[i]; j <= context->proto_end[i]; j++)
252         {
253             context->array[j].next    = j+1;
254             context->array[j].invalid = LM_CONTEXT_VALID;
255             context->array[j].ip_type = 0;
256             context->array[j].h_val   = 0;
257             lm_init_cid_resc(pdev, j);
258         }
259         /* set the first free item if max_func_XX_cons > 0 */
260         if (context->proto_start[i] <= context->proto_end[i]) {
261             context->proto_ffree[i] = context->proto_start[i];
262         }
263         else
264         {
265             context->proto_ffree[i] = 0;
266         }
267         context->proto_pending[i] = 0;
268         /* put 0 (end of freelist in the last entry for the proto */
269         context->array[context->proto_end[i]].next = 0;
270     }
271     //The ETH cid doorbell space was remapped just fixing the pointers.
272     for (j = context->proto_start[ETH_CONNECTION_TYPE]; j <= context->proto_end[ETH_CONNECTION_TYPE]; j++)
273     {
274 #ifdef VF_INVOLVED
275         if (IS_CHANNEL_VFDEV(pdev)) {
276             context->array[j].cid_resc.mapped_cid_bar_addr =
277                 (volatile void *)((u8_t*)pdev->vars.mapped_bar_addr[BAR_0] + j*lm_vf_get_doorbell_size(pdev) + VF_BAR0_DB_OFFSET);
278 #ifdef __SunOS
279             context->array[j].cid_resc.reg_handle = pdev->vars.reg_handle[BAR_0];
280 #endif /* __SunOS */
281         } else
282 #endif /* VF_INVOLVED */
283         {
284             context->array[j].cid_resc.mapped_cid_bar_addr =
285                 (volatile void *)((u8_t*)pdev->vars.mapped_bar_addr[BAR_1] + j*LM_DQ_CID_SIZE);
286 #ifdef __SunOS
287             context->array[j].cid_resc.reg_handle = pdev->vars.reg_handle[BAR_1];
288 #endif /* __SunOS */
289         }
290     }
291     return lm_setup_searcher_hash_info(pdev) ;
292 }
293 
294 /* context pool initializer */
lm_alloc_context_pool(struct _lm_device_t * pdev)295 lm_status_t lm_alloc_context_pool(struct _lm_device_t *pdev){
296 
297     u32_t               num_con    = 0 ;
298     lm_context_info_t * context    = NULL ;
299     u8_t                mm_cli_idx = 0;
300 
301     if CHK_NULL(pdev)
302     {
303         DbgBreakIf(!pdev);
304         return LM_STATUS_INVALID_PARAMETER ;
305     }
306 
307     /* must not be called if allready initialized */
308     if ERR_IF( NULL != pdev->context_info )
309     {
310         DbgBreakIf( pdev->context_info != NULL ) ;
311         return LM_STATUS_FAILURE ;
312     }
313 
314     mm_cli_idx = LM_RESOURCE_COMMON;//!!DP mm_cli_idx_to_um_idx(LM_CLI_IDX_MAX);
315 
316     /* number of context is per-function, the cdu has a per-port register that can be set to be higher than the max_func_connections, but
317      * the amount of memory actually allocated for the CDU matches max_func_connections. */
318     num_con = pdev->params.max_func_connections ;
319 
320     /* allocate context info and cookie array */
321     context = mm_alloc_mem(pdev, sizeof(lm_context_info_t), mm_cli_idx);
322     if CHK_NULL(context)
323     {
324         DbgBreakIf(DBG_BREAK_ON(MEMORY_ALLOCATION_FAILURE));
325         return LM_STATUS_RESOURCE ;
326     }
327 
328     /* allocate list entries */
329     context->array = mm_alloc_mem(pdev, sizeof(struct lm_context_cookie)*num_con, mm_cli_idx);
330     if CHK_NULL(context->array)
331     {
332         DbgBreakIf(DBG_BREAK_ON(MEMORY_ALLOCATION_FAILURE));
333         return LM_STATUS_RESOURCE ;
334     }
335 
336     /* initilize the lock */
337 
338     /* put the context where it belongs */
339     pdev->context_info = context;
340 
341     /* init searcher hash info */
342     return lm_alloc_searcher_hash_info(pdev);
343     /* return success */
344 }
345 
346 /* context pool release function */
lm_release_context_pool(struct _lm_device_t * pdev)347 void lm_release_context_pool(struct _lm_device_t *pdev){
348 
349     lm_context_info_t* context = NULL;
350     u32_t i, j;
351 
352     /* must only be called if initialized */
353     DbgBreakIf( pdev->context_info == NULL );
354 
355     /* first make a copy and kill the original refference */
356     context            = pdev->context_info;
357     pdev->context_info = NULL;
358 
359     /* free context cookie array
360        sanity check: scan it and make sure it is empty */
361     for (i=0; i<(pdev->params.max_func_connections); i++  )
362     {
363         for (j = 0; j < MAX_PROTO; j++)
364         {
365             DbgBreakIf( context->array[i].cid_resc.cookies[j] != NULL );
366         }
367 
368         /* NirV: can't call from here, context_info is NULL */
369         /*DbgBreakIf(lm_sp_req_manager_shutdown(pdev, i) != LM_STATUS_SUCCESS);*/
370     }
371     /* mm_free_mem(context->array); */
372 
373     /* sanity check - searcher mirror hash must be empty */
374     DbgBreakIf(context->searcher_hash.num_tuples);
375 
376     /* de-initilize the lock? if in debug mode we can leave it taken to chatch errors */
377 
378     /* free context info */
379     /* mm_free_mem(context); */
380 
381 
382     /* return success */
383 
384 }
385 
_lm_searcher_mirror_hash_calc(lm_searcher_hash_info_t * hash_info,lm_4tuple_t * tuple)386 static u32_t _lm_searcher_mirror_hash_calc(lm_searcher_hash_info_t *hash_info, lm_4tuple_t *tuple)
387 {
388     u8_t  in_str[MAX_SEARCHER_IN_STR] = {0};
389     u8_t* in_str_bits                 = hash_info->searcher_in_str_bits;
390     u8_t* key_bits                    = hash_info->searcher_key_bits;
391     u32_t in_bits                     = 0;
392     u32_t result                      = 0;
393     u16_t i                           = 0;
394     u16_t j                           = 0;
395 
396     /* prepare input string */
397     if (tuple->ip_type == LM_IP_TYPE_V4)
398     {
399         *(u32_t *)(&in_str[0])  = HTON32(tuple->src_ip[0]);
400         *(u32_t *)(&in_str[4])  = HTON32(tuple->dst_ip[0]);
401         *(u16_t *)(&in_str[8])  = tuple->src_port;
402         *(u16_t *)(&in_str[10]) = tuple->dst_port;
403         in_bits = 12 * 8;
404     }
405     else
406     {
407         *(u32_t *)(&in_str[0])   = HTON32(tuple->src_ip[0]);
408         *(u32_t *)(&in_str[4])   = HTON32(tuple->src_ip[1]);
409         *(u32_t *)(&in_str[8])   = HTON32(tuple->src_ip[2]);
410         *(u32_t *)(&in_str[12])  = HTON32(tuple->src_ip[3]);
411 
412         *(u32_t *)(&in_str[16])  = HTON32(tuple->dst_ip[0]);
413         *(u32_t *)(&in_str[20])  = HTON32(tuple->dst_ip[1]);
414         *(u32_t *)(&in_str[24])  = HTON32(tuple->dst_ip[2]);
415         *(u32_t *)(&in_str[28])  = HTON32(tuple->dst_ip[3]);
416 
417         *(u16_t *)(&in_str[32]) = tuple->src_port;
418         *(u16_t *)(&in_str[34]) = tuple->dst_port;
419         in_bits = 36 * 8;
420     }
421 
422     /* prepare searcher_in_str_bits from in_str */
423     for (i = 0; i < in_bits; i++)
424     {
425         /* 0x80 - the leftmost bit. */
426         in_str_bits[i] = ((in_str[i/8]<<(i%8)) & 0x80) ? 1 : 0;
427     }
428 
429     /* calc ToeplitzHash */
430     for (i = 0; i < 32; i++)
431     {
432         u8_t h = 0;
433 
434         for (j = 0; j < in_bits; j++)
435         {
436             h ^= key_bits[i+j] & in_str_bits[j];
437         }
438 
439         result |= (h<<(32-i-1));
440     }
441 
442     return result;
443 }
444 
445 /* assumption: CID lock NOT taken by caller */
lm_searcher_mirror_hash_insert(struct _lm_device_t * pdev,u32_t cid,lm_4tuple_t * tuple)446 lm_status_t lm_searcher_mirror_hash_insert(struct _lm_device_t *pdev, u32_t cid, lm_4tuple_t *tuple)
447 {
448     lm_context_info_t        *context    = NULL;
449     lm_searcher_hash_entry_t *hash_entry = NULL;
450     u32_t                    h_val       = 0;
451     u8_t temp_ipv6, temp_ipv4, temp_depth_ipv4, is_ipv4;
452     lm_status_t              lm_status   = LM_STATUS_SUCCESS;
453     #define SRC_HASH_DEPTH_TH 15 /* that is searcher's default MaxNumHops - 1 */
454 
455     /* take spinlock */
456     MM_ACQUIRE_CID_LOCK(pdev);
457 
458     context = pdev->context_info;
459     is_ipv4 = (tuple->ip_type == LM_IP_TYPE_V4 ? 1 : 0);
460 
461     /* calc hash val */
462     h_val = _lm_searcher_mirror_hash_calc(&context->searcher_hash, tuple);
463 
464     /* take only num_hash_bits LSBs */
465     h_val &= ((1 << context->searcher_hash.num_hash_bits) - 1);
466 
467     /* init num_hash_bits in the searcher: if the h_val is all FFFFs - set it to 0 */
468     if (h_val == ((1 << context->searcher_hash.num_hash_bits) - 1)) {
469         h_val = 0;
470     }
471 
472     /* get the hash entry */
473     hash_entry = &context->searcher_hash.searcher_table[h_val];
474 
475     /* start the alg. to find if there is a place available in that entry */
476     temp_ipv6 = hash_entry->num_ipv6 + (is_ipv4 ? 0 : 1);
477     temp_ipv4 = hash_entry->num_ipv4 + is_ipv4;
478 
479     /* tempDepthIpv4 = max ( depthIpv4(H), roundup(tempIpv4/2) ) */
480     temp_depth_ipv4 = (temp_ipv4 / 2) + (temp_ipv4 % 2);
481     if (temp_depth_ipv4 < hash_entry->depth_ipv4) {
482         temp_depth_ipv4 = hash_entry->depth_ipv4;
483     }
484 
485     if (temp_depth_ipv4 + temp_ipv6 > SRC_HASH_DEPTH_TH) {
486         /* each hash entry has SRC_HASH_DEPTH_TH available places.
487          * each place can contain 1 ipv6 connection or 2 ipv4 connections */
488         DbgBreakMsg("Reached searcher hash limit\n");
489         lm_status = LM_STATUS_FAILURE;
490     } else {
491         hash_entry->num_ipv6 = temp_ipv6;
492         hash_entry->num_ipv4 = temp_ipv4;
493         hash_entry->depth_ipv4 = temp_depth_ipv4;
494 
495         /* for debug, save the max depth reached */
496         if (context->searcher_hash.hash_depth_reached < hash_entry->depth_ipv4 + hash_entry->num_ipv6) {
497             context->searcher_hash.hash_depth_reached = hash_entry->depth_ipv4 + hash_entry->num_ipv6;
498         }
499         context->searcher_hash.num_tuples++;
500 
501         /* remeber the IP type and h_val to know where and how much
502          * to decrease upon CID recycling */
503         DbgBreakIf(context->array[cid].ip_type); /* cid can't be inserted twice */
504         context->array[cid].ip_type = tuple->ip_type;
505         context->array[cid].h_val = h_val;
506     }
507 
508     /* release spinlock */
509     MM_RELEASE_CID_LOCK(pdev);
510 
511     return lm_status;
512 }
513 
514 /* assumption: CID lock NOT taken by caller */
lm_searcher_mirror_hash_remove(struct _lm_device_t * pdev,u32_t cid)515 void lm_searcher_mirror_hash_remove(struct _lm_device_t *pdev, u32_t cid)
516 {
517     lm_context_info_t        *context    = NULL;
518     lm_searcher_hash_entry_t *hash_entry = NULL;
519     u32_t                    h_val       = 0;
520 
521     /* take spinlock */
522     MM_ACQUIRE_CID_LOCK(pdev);
523 
524     context = pdev->context_info;
525 
526     if(!context->array[cid].ip_type) {
527         /* i.e lm_searcher_mirror_hash_insert was not called for this cid */
528         DbgMessage(pdev, WARN,
529                    "not removing CID %d from SRC hash (hash insert was not called for this cid)\n"
530                    ,cid);
531 
532         /* release spinlock */
533         MM_RELEASE_CID_LOCK(pdev);
534 
535         return;
536     }
537 
538     h_val = context->array[cid].h_val;
539     hash_entry = &context->searcher_hash.searcher_table[h_val];
540 
541     if (context->array[cid].ip_type == LM_IP_TYPE_V6) {
542         DbgBreakIf(!hash_entry->num_ipv6);
543         hash_entry->num_ipv6--;
544     }
545     else
546     {
547         DbgBreakIf(!hash_entry->num_ipv4);
548         hash_entry->num_ipv4--;
549         if (hash_entry->num_ipv4 < hash_entry->depth_ipv4)
550         {
551             hash_entry->depth_ipv4 = hash_entry->num_ipv4;
552         }
553     }
554 
555     /* for debug */
556     context->searcher_hash.num_tuples--;
557 
558     /* clear the entry of the context */
559     context->array[cid].ip_type = 0;
560     context->array[cid].h_val = 0;
561 
562     /* release spinlock */
563     MM_RELEASE_CID_LOCK(pdev);
564 }
565 
566 /*  allocate a free context by type
567     returns CID in the out_cid param
568     return LM_STATUS_SUCCESS for available cid
569     LM_STATUS_RESOURCE if no cids are available
570     LM_STATUS_PENDING if there is a pending cfc-delete cid
571     takes the list spinlock */
lm_allocate_cid(struct _lm_device_t * pdev,u32_t type,void * cookie,s32_t * out_cid)572 lm_status_t lm_allocate_cid(struct _lm_device_t *pdev, u32_t type, void * cookie, s32_t * out_cid){
573 
574     lm_context_info_t  *context  = NULL;
575     lm_status_t        lm_status = LM_STATUS_SUCCESS;
576     u32_t              cid       = (u32_t)-1;
577     lm_address_t       phy_addr  = {{0}} ;
578 
579     if ( CHK_NULL(out_cid) ||
580          CHK_NULL(pdev) ||
581          CHK_NULL(pdev->context_info) ||
582          CHK_NULL(pdev->context_info->array) ||
583          CHK_NULL(cookie) ||
584          ERR_IF(type >= ARRSIZE(pdev->context_info->proto_pending)) )
585 
586     {
587         DbgBreakIf(!out_cid) ;
588         DbgBreakIf(!pdev);
589         DbgBreakIf(!pdev->context_info);
590         DbgBreakIf(!pdev->context_info->array);
591         DbgBreakIf(!cookie);
592         DbgBreakIf(type >= ARRSIZE(pdev->context_info->proto_pending)) ;
593         return LM_STATUS_INVALID_PARAMETER ;
594     }
595 
596     context = pdev->context_info;
597     *out_cid = 0;
598     /* take spinlock */
599     MM_ACQUIRE_CID_LOCK(pdev);
600 
601     // if the free list is empty return error
602     if (context->proto_ffree[type]==0) {
603         if ((pdev->params.cid_allocation_mode == LM_CID_ALLOC_REGULAR) || (context->proto_pending[type] == 0)) {
604             // if the free list is empty AND the pending list is empty return error OR
605             // the free list is empty and we're in the regular allocating mode
606             lm_status = LM_STATUS_RESOURCE;
607         }
608         else
609         {
610             /* pop pendinglist entry and place cookie */
611             /* we only use the cid to connect between the pending connection and this cid, but
612              * the connection can't know of this cid before it is acually freed, for this reason
613              * we return cid = 0, which means, 'pending' */
614             cid = context->proto_pending[type];
615             context->proto_pending[type] = context->array[cid].next;
616             context->array[cid].next = 0;
617             context->array[cid].cid_resc.cookies[type] = cookie;
618             context->array[cid].cid_resc.cid_pending = LM_CID_STATE_PENDING;
619             lm_sp_req_manager_init(pdev, cid);
620             *out_cid = cid;
621 
622             /* make sure the first cid previous is set correctly*/
623             cid = context->proto_pending[type];
624             if (cid) {
625                 context->array[cid].prev = 0;
626             }
627             lm_status = LM_STATUS_PENDING;
628         }
629     }else{
630         /* pop freelist entry and place cookie*/
631         cid = context->proto_ffree[type];
632         context->proto_ffree[type] = context->array[cid].next;
633         context->array[cid].next = 0;
634         context->array[cid].prev = 0;
635         context->array[cid].cid_resc.cookies[type] = cookie;
636         lm_sp_req_manager_init(pdev, cid);
637         *out_cid = cid;
638         lm_status = LM_STATUS_SUCCESS;
639     }
640 
641     MM_RELEASE_CID_LOCK(pdev);
642 
643     if(LM_STATUS_SUCCESS == lm_status)
644     {
645         //If the function allocated a new free CID, (not pending) the function MmMapIoSpace will be called
646         //to map the specific physical cid doorbell space to a virtual address.
647         //In case of a pending CID, the map doorbell space will not be remapped. The pending CID will use
648         //the old mapping cid doorbell space.
649         phy_addr.as_u32.low = (pdev->hw_info.mem_base[BAR_1].as_u32.low) & 0xfffffff0;
650         phy_addr.as_u32.high = pdev->hw_info.mem_base[BAR_1].as_u32.high;
651 
652         LM_INC64(&phy_addr,(cid*LM_DQ_CID_SIZE));
653 
654 #ifdef __SunOS
655 
656         context->array[cid].cid_resc.mapped_cid_bar_addr =
657 #ifdef VF_INVOLVED
658             (volatile void *)((u8_t*)pdev->vars.mapped_bar_addr[BAR_1] + cid*LM_DQ_CID_SIZE);
659         context->array[cid].cid_resc.reg_handle = pdev->vars.reg_handle[BAR_1];
660 #else /* !VF_INVOLVED */
661             (volatile void *)mm_map_io_space_solaris(pdev,
662                                                      phy_addr,
663                                                      BAR_1,
664                                                      (cid * LM_DQ_CID_SIZE),
665                                                      LM_DQ_CID_SIZE,
666                                                      &context->array[cid].cid_resc.reg_handle);
667 #endif /* VF_INVOLVED */
668 
669 #else /* !__SunOS */
670 
671         context->array[cid].cid_resc.mapped_cid_bar_addr =
672 #ifdef VF_INVOLVED
673             (volatile void *)((u8_t*)pdev->vars.mapped_bar_addr[BAR_1] + cid*LM_DQ_CID_SIZE);
674 #else /* !VF_INVOLVED */
675             (volatile void *)mm_map_io_space(pdev, phy_addr, LM_DQ_CID_SIZE);
676 #endif /* VF_INVOLVED */
677 
678 #endif /* __SunOS */
679 
680         // If the mapping failed we will return LM_STATUS_RESOURCE and return the cid resource.
681         if CHK_NULL(context->array[cid].cid_resc.mapped_cid_bar_addr)
682         {
683             DbgMessage(pdev, FATAL, "lm_allocate_cid: mm_map_io_space failed. address low=%d address high=%d\n", phy_addr.as_u32.low,phy_addr.as_u32.high );
684 
685             /* take spinlock */
686             MM_ACQUIRE_CID_LOCK(pdev);
687             /* return the cid to free list */
688             context->array[cid].next = pdev->context_info->proto_ffree[type];
689             context->proto_ffree[type] = cid;
690             context->array[cid].invalid = LM_CONTEXT_VALID;
691             MM_RELEASE_CID_LOCK(pdev);
692 
693             lm_status = LM_STATUS_RESOURCE;
694             *out_cid =0;
695         }
696     }
697     return lm_status;
698 }
699 
lm_cfc_delete(struct _lm_device_t * pdev,void * param)700 void lm_cfc_delete(struct _lm_device_t *pdev, void *param)
701 {
702     u32_t cid             = *((u32_t *)&param);
703     u8_t  flr_in_progress = lm_fl_reset_is_inprogress(pdev);
704 
705     if ( CHK_NULL(pdev) ||
706          ERR_IF(cid > pdev->params.max_func_connections) ||
707          ERR_IF(pdev->context_info->array[cid].invalid != LM_CONTEXT_INVALID_WAIT) )
708     {
709         DbgBreakIf(!pdev);
710         DbgBreakIf(cid > pdev->params.max_func_connections);
711 
712         if (!flr_in_progress)
713         {
714             DbgBreakIf(pdev->context_info->array[cid].invalid != LM_CONTEXT_INVALID_WAIT);
715         }
716         else
717         {
718             DbgMessage(pdev, FATAL, "lm_cfc_delete: invalid %d for cid=%d\n", pdev->context_info->array[cid].invalid,cid);
719 
720             if (pdev->context_info->array[cid].invalid != LM_CONTEXT_INVALID_DELETE)
721             {
722                 DbgBreakIf(1);
723             }
724         }
725     }
726 
727     DbgMessage(pdev, WARN, "lm_cfc_delete: cid=0x%x\n",cid);
728     pdev->context_info->array[cid].invalid = LM_CONTEXT_INVALID_DELETE;
729 
730     if (lm_fl_reset_is_inprogress(pdev))
731     {
732          lm_recycle_cid(pdev, cid);
733     }
734     else
735     {
736         /* use common bit */
737         lm_command_post(pdev,
738                         cid,
739                         RAMROD_CMD_ID_COMMON_CFC_DEL,
740                         CMD_PRIORITY_NORMAL,
741                         NONE_CONNECTION_TYPE,
742                         0 );
743     }
744     return;
745 }
746 
747 /* free a context
748    takes the list spinlock */
lm_free_cid(struct _lm_device_t * pdev,u32_t type,u32_t cid,u8_t notify_fw)749 void lm_free_cid(struct _lm_device_t *pdev, u32_t type, u32_t cid, u8_t notify_fw){
750     u32_t delay_time  = 0;
751     u32_t curr_cid    = 0;
752     u8_t  recycle_now = 0;
753     u8_t  proto_idx   = 0;
754 
755     if ( CHK_NULL(pdev) ||
756          CHK_NULL(pdev->context_info) ||
757          ERR_IF(type >= ARRSIZE(pdev->context_info->proto_end)) ||
758          ERR_IF(cid > (pdev->context_info->proto_end[type])) ||
759          ERR_IF(cid < (pdev->context_info->proto_start[type])) ||
760          (!lm_fl_reset_is_inprogress(pdev) && (pdev->context_info->array[cid].invalid != LM_CONTEXT_VALID)))
761     {
762         DbgBreakIf(!pdev);
763         DbgBreakIf(!pdev->context_info);
764         DbgBreakIf(type >= ARRSIZE(pdev->context_info->proto_end));
765         DbgBreakIf(cid > (pdev->context_info->proto_end[type]));
766         DbgBreakIf(cid < (pdev->context_info->proto_start[type]));
767         DbgBreakIf(pdev->context_info->array[cid].invalid != LM_CONTEXT_VALID);
768         return;
769     }
770     MM_ACQUIRE_CID_LOCK(pdev);
771 
772     for (proto_idx = 0; proto_idx < MAX_PROTO; proto_idx++)
773     {
774         DbgBreakIf(pdev->context_info->array[cid].cid_resc.cookies[proto_idx]);
775     }
776 
777     lm_sp_req_manager_shutdown(pdev, cid);
778 
779     if (notify_fw)
780     {
781         /* Vladz: Added in order to optimize CID release in DOS */
782 #if !(defined(DOS) || defined(__LINUX))
783         delay_time = LM_FREE_CID_DELAY_TIME(pdev);
784 #else
785         delay_time = 0;
786 #endif
787 
788         pdev->context_info->array[cid].invalid = LM_CONTEXT_INVALID_WAIT;
789 
790         recycle_now = FALSE;
791         /* add the cid to proto-pending: it'll be freed soon when cfc-delete is done */
792         curr_cid = pdev->context_info->proto_pending[type];
793         pdev->context_info->array[cid].next = curr_cid;
794         pdev->context_info->array[cid].prev = 0;
795         if (curr_cid != 0)
796         {
797             pdev->context_info->array[curr_cid].prev = cid;
798         }
799         pdev->context_info->proto_pending[type] = cid;
800     }
801     else
802     {
803         pdev->context_info->array[cid].invalid = LM_CONTEXT_INVALID_DELETE;
804         recycle_now = TRUE;
805         /* If we're recylcing now, there's no point in adding it to the pending list */
806     }
807 
808     MM_RELEASE_CID_LOCK(pdev);
809 
810     if (recycle_now) {
811         lm_recycle_cid(pdev, cid);
812     }
813     else
814     {
815         if (type == TOE_CONNECTION_TYPE)
816         {
817             DbgMessage(pdev, WARN, "lm_free_cid: CFC delete: cid=0x%x\n",cid);
818             lm_cfc_delete(pdev,*((void **)&cid));
819         }
820         else
821         {
822             DbgMessage(pdev, WARN, "lm_free_cid: schedule CFC delete: cid=0x%x\n",cid);
823             mm_schedule_task(pdev,delay_time,lm_cfc_delete,*((void **)&cid));
824         }
825     }
826 
827 }
828 
lm_recycle_cid(struct _lm_device_t * pdev,u32_t cid)829 void lm_recycle_cid(struct _lm_device_t *pdev, u32_t cid){
830 
831     u32_t type = MAX_PROTO+1;
832     u32_t prev_cid, next_cid;
833     u32_t i;
834     u8_t  call_cb = TRUE;
835 
836     if ( CHK_NULL(pdev) ||
837          ERR_IF(pdev->context_info->array[cid].invalid != LM_CONTEXT_INVALID_DELETE) ||
838          ERR_IF(cid > pdev->params.max_func_connections) )
839     {
840         DbgBreakIf(!pdev);
841         DbgBreakIf(pdev->context_info->array[cid].invalid != LM_CONTEXT_INVALID_DELETE);
842         DbgBreakIf(cid > pdev->params.max_func_connections);
843         return;
844     }
845 
846     for (i=0; i < MAX_PROTO; i++ ) {
847         if ((cid >= pdev->context_info->proto_start[i]) && (cid <= pdev->context_info->proto_end[i]))
848         {
849             type = i;
850             break;
851         }
852     }
853     if ERR_IF(type >= ARRSIZE(pdev->context_info->proto_pending))
854     {
855         DbgBreakIf(type >= ARRSIZE(pdev->context_info->proto_pending)) ;
856         return;
857     }
858     /* take spinlock */
859     MM_ACQUIRE_CID_LOCK(pdev);
860 #ifdef _VBD_
861     if ((type == TOE_CONNECTION_TYPE) && (pdev->ofld_info.l4_params.ticks_per_second != 0))
862     {
863         pdev->vars.last_recycling_timestamp = mm_get_current_time(pdev) * 1000 / pdev->ofld_info.l4_params.ticks_per_second; /*time in ms*/
864     }
865 #endif
866     /* If no cookie is waiting on this cid extract from pending and push enrty into the freelist */
867     if (pdev->context_info->array[cid].cid_resc.cid_pending == FALSE) {
868         /* take the cid out of the proto_pending cids if it's there */
869         prev_cid = pdev->context_info->array[cid].prev;
870         next_cid = pdev->context_info->array[cid].next;
871         if (prev_cid) {
872             pdev->context_info->array[prev_cid].next = next_cid;
873         }
874         if (next_cid) {
875             pdev->context_info->array[next_cid].prev = prev_cid;
876         }
877         if (pdev->context_info->proto_pending[type] == cid) {
878             DbgBreakIf(prev_cid != 0);
879             pdev->context_info->proto_pending[type] = next_cid;
880         }
881         pdev->context_info->array[cid].prev = pdev->context_info->array[cid].next = 0;
882         /* add to free list */
883         pdev->context_info->array[cid].next = pdev->context_info->proto_ffree[type];
884         pdev->context_info->array[cid].invalid = LM_CONTEXT_VALID;
885         pdev->context_info->array[cid].cfc_delete_cnt = 0;
886         pdev->context_info->proto_ffree[type] = cid;
887         call_cb = FALSE; /* no one is waiting on this... */
888         //free virtual memory for cids not in use.
889 #ifndef VF_INVOLVED
890         mm_unmap_io_space(pdev,(void *)pdev->context_info->array[cid].cid_resc.mapped_cid_bar_addr, LM_DQ_CID_SIZE);
891 #endif
892     }
893     else
894     {
895         /* No need to extract from pending - it's not there. */
896 
897         /* NirV: we still can't set cid_resc.cid_pending to false, */
898         /* will be possible only in the callback */
899 
900         pdev->context_info->array[cid].invalid = LM_CONTEXT_VALID;
901         call_cb = TRUE;
902     }
903 
904     /* time to clear the active bit (cdu-validation ) we can only do this after cfc-delete has completed, at this point, invalid==LM_CONTEXT_VALID */
905     lm_set_cdu_validation_data(pdev, cid, TRUE /* Invalidate */);
906 
907 
908     /* rlease spinlock */
909     MM_RELEASE_CID_LOCK(pdev);
910 
911     /* call here the cid recycle callback of that
912        protocol type if such cb exists*/
913     if (pdev->cid_recycled_callbacks[type] && call_cb) {
914         pdev->cid_recycled_callbacks[type](pdev, pdev->context_info->array[cid].cid_resc.cookies[type], cid);
915     }
916 
917     return;
918 }
919 
920 /* lookup the protocol cookie for a given CID
921    does not take a lock
922    will DbgBreakIf( if the CID is not allocated. */
lm_cid_cookie(struct _lm_device_t * pdev,u32_t type,u32_t cid)923 void * lm_cid_cookie(struct _lm_device_t *pdev, u32_t type, u32_t cid){
924 
925     if ( CHK_NULL(pdev) ||
926          CHK_NULL(pdev->context_info) ||
927          ERR_IF(type >= MAX_PROTO) ||
928          ERR_IF(cid > (pdev->context_info->proto_end[MAX_PROTO - 1])) ||
929          CHK_NULL(pdev->context_info->array[cid].cid_resc.cookies[type]) ||
930          ERR_IF(pdev->context_info->array[cid].invalid != LM_CONTEXT_VALID) )
931     {
932         DbgBreakIf(!pdev);
933         DbgBreakIf(!pdev->context_info);
934         DbgBreakIf(type >= MAX_PROTO);
935         DbgBreakIf(cid > (pdev->context_info->proto_end[MAX_PROTO - 1]));
936         DbgBreakIf(pdev->context_info->array[cid].invalid != LM_CONTEXT_VALID);
937     }
938 
939     if (pdev->context_info->array[cid].cid_resc.cookies[type] == NULL)
940     {
941         return NULL;
942     }
943 
944 
945     /* if the cid is pending, return null */
946     if (pdev->context_info->array[cid].cid_resc.cid_pending != LM_CID_STATE_VALID)
947     {
948         return NULL;
949     }
950 
951     return pdev->context_info->array[cid].cid_resc.cookies[type];
952 }
953 
954 /* lookup the protocol cid_resc for a given CID
955    does not take a lock
956    will DbgBreakIf( if the CID is not allocated */
lm_cid_resc(struct _lm_device_t * pdev,u32_t cid)957 lm_cid_resc_t * lm_cid_resc(struct _lm_device_t *pdev, u32_t cid){
958 
959     if ( CHK_NULL(pdev) ||
960          CHK_NULL(pdev->context_info) ||
961          ERR_IF(cid > (pdev->context_info->proto_end[MAX_PROTO - 1])) )
962     {
963         DbgBreakIf(!pdev);
964         DbgBreakIf(!pdev->context_info);
965         DbgBreakIf(cid > (pdev->context_info->proto_end[MAX_PROTO - 1]));
966     }
967 
968     return &pdev->context_info->array[cid].cid_resc;
969 }
970 
lm_map_cid_to_proto(struct _lm_device_t * pdev,u32_t cid)971 u8_t lm_map_cid_to_proto(struct _lm_device_t * pdev, u32_t cid)
972 {
973     u8_t type = MAX_PROTO+1;
974     u8_t i;
975 
976     if (!pdev || cid > pdev->params.max_func_connections) {
977         return type;
978     }
979 
980     for (i=0; i < MAX_PROTO; i++ ) {
981         if ((cid >= pdev->context_info->proto_start[i]) && (cid <= pdev->context_info->proto_end[i]))  {
982             type = i;
983             break;
984         }
985     }
986     return type;
987 }
988 
lm_init_connection_context(struct _lm_device_t * pdev,u32_t const sw_cid,u8_t sb_id)989 void lm_init_connection_context(struct _lm_device_t *pdev, u32_t const sw_cid, u8_t sb_id)
990 {
991     struct eth_context * context      = NULL;
992 
993     if ( CHK_NULL(pdev) ||
994          ERR_IF(sw_cid < PFDEV(pdev)->context_info->proto_start[ETH_CONNECTION_TYPE]) ||
995          ERR_IF(sw_cid > PFDEV(pdev)->context_info->proto_end[ETH_CONNECTION_TYPE]) )
996     {
997         DbgBreakIf(!pdev);
998         DbgBreakIf(sw_cid < PFDEV(pdev)->context_info->proto_start[ETH_CONNECTION_TYPE]); /* first legal NIC CID */
999         DbgBreakIf(sw_cid > PFDEV(pdev)->context_info->proto_end[ETH_CONNECTION_TYPE]);   /* last legal NIC CID */
1000     }
1001 
1002     context = lm_get_context(PFDEV(pdev), VF_TO_PF_CID(pdev,sw_cid));
1003 
1004     mm_mem_zero( context, sizeof(struct eth_context) ) ;
1005 
1006     /* calculate the cdu-validation value. */
1007     lm_set_cdu_validation_data(pdev, VF_TO_PF_CID(pdev,sw_cid), FALSE /* don't invalidate */);
1008 
1009 }
1010 
1011 lm_status_t
lm_set_cid_resc(IN struct _lm_device_t * pdev,IN u32_t type,IN void * cookie,IN u32_t cid)1012 lm_set_cid_resc(
1013     IN struct _lm_device_t *pdev,
1014     IN u32_t type,
1015     IN void *cookie,
1016     IN u32_t cid)
1017 {
1018     lm_status_t     lm_status  = LM_STATUS_SUCCESS;
1019     lm_cid_resc_t   *cid_resc  = NULL;
1020 
1021     if CHK_NULL(pdev)
1022     {
1023         return LM_STATUS_INVALID_PARAMETER;
1024     }
1025 
1026     /* take spinlock */
1027     MM_ACQUIRE_CID_LOCK(pdev);
1028 
1029     cid_resc = lm_cid_resc(pdev, cid);
1030 
1031     if CHK_NULL(cid_resc)
1032     {
1033         MM_RELEASE_CID_LOCK(pdev);
1034         return LM_STATUS_INVALID_PARAMETER;
1035     }
1036 
1037     cid_resc->cookies[type] = cookie;
1038 
1039     /* rlease spinlock */
1040     MM_RELEASE_CID_LOCK(pdev);
1041 
1042     return lm_status;
1043 }
1044 
1045 lm_status_t
lm_free_cid_resc(IN struct _lm_device_t * pdev,IN u32_t type,IN u32_t cid,IN u8_t notify_fw)1046 lm_free_cid_resc(
1047     IN    struct _lm_device_t *pdev,
1048     IN    u32_t type,
1049     IN    u32_t cid,
1050     IN    u8_t notify_fw)
1051 {
1052     lm_cid_resc_t   *cid_resc = NULL;
1053     u8_t            proto_idx = 0;
1054 
1055 
1056     if (CHK_NULL(pdev) || (cid == 0))
1057     {
1058         return LM_STATUS_INVALID_PARAMETER;
1059     }
1060 
1061     /* take spinlock */
1062     MM_ACQUIRE_CID_LOCK(pdev);
1063 
1064     cid_resc = lm_cid_resc(pdev, cid);
1065 
1066     if CHK_NULL(cid_resc)
1067     {
1068         MM_RELEASE_CID_LOCK(pdev);
1069         return LM_STATUS_INVALID_PARAMETER;
1070     }
1071 
1072     cid_resc->cookies[type] = NULL;
1073 
1074     while ((proto_idx < MAX_PROTO) && (cid_resc->cookies[proto_idx] == NULL))
1075     {
1076         proto_idx++;
1077     }
1078     /* rlease spinlock */
1079     MM_RELEASE_CID_LOCK(pdev);
1080 
1081     if (proto_idx == MAX_PROTO)
1082     {
1083         /* We'll call lm_map_cid_to_proto() to compute the appropriate type that was associated with that CID,
1084          * this is done to avoid assert upon race scenarios in which the last cookie resource that gets freed is not from the type of the CID */
1085         lm_free_cid(pdev, lm_map_cid_to_proto(pdev, cid), cid, notify_fw);
1086     }
1087 
1088     return LM_STATUS_SUCCESS;
1089 }
1090 
1091 
1092 
1093 lm_sp_req_manager_t *
lm_cid_sp_req_mgr(IN struct _lm_device_t * pdev,IN u32_t cid)1094 lm_cid_sp_req_mgr(
1095     IN struct _lm_device_t *pdev,
1096     IN u32_t cid
1097     )
1098 {
1099     lm_cid_resc_t   *cid_resc   = NULL;
1100 
1101     if CHK_NULL(pdev)
1102     {
1103         return NULL;
1104     }
1105 
1106     cid_resc = lm_cid_resc(pdev, cid);
1107 
1108     if CHK_NULL(cid_resc)
1109     {
1110         return NULL;
1111     }
1112 
1113     return &cid_resc->sp_req_mgr;
1114 }
1115 
1116 
1117 
1118 lm_cid_state_enum
lm_cid_state(IN struct _lm_device_t * pdev,IN u32_t cid)1119 lm_cid_state(
1120     IN struct _lm_device_t *pdev,
1121     IN u32_t cid
1122     )
1123 {
1124     lm_cid_resc_t   *cid_resc   = NULL;
1125 
1126     if CHK_NULL(pdev)
1127     {
1128         return LM_CID_STATE_ERROR;
1129     }
1130 
1131     cid_resc = lm_cid_resc(pdev, cid);
1132 
1133     if CHK_NULL(cid_resc)
1134     {
1135         return LM_CID_STATE_ERROR;
1136     }
1137 
1138     return (lm_cid_state_enum)cid_resc->cid_pending;
1139 }
1140 
1141 
1142 
1143 lm_status_t
lm_set_cid_state(IN struct _lm_device_t * pdev,IN u32_t cid,IN lm_cid_state_enum state)1144 lm_set_cid_state(
1145     IN struct _lm_device_t *pdev,
1146     IN u32_t cid,
1147     IN lm_cid_state_enum state
1148     )
1149 {
1150     lm_cid_resc_t   *cid_resc   = NULL;
1151 
1152     if CHK_NULL(pdev)
1153     {
1154         return LM_STATUS_INVALID_PARAMETER;
1155     }
1156 
1157     /* take spinlock */
1158     MM_ACQUIRE_CID_LOCK(pdev);
1159 
1160     cid_resc = lm_cid_resc(pdev, cid);
1161 
1162     if CHK_NULL(cid_resc)
1163     {
1164         MM_RELEASE_CID_LOCK(pdev);
1165         return LM_STATUS_INVALID_PARAMETER;
1166     }
1167 
1168     cid_resc->cid_pending = state;
1169 
1170     /* rlease spinlock */
1171     MM_RELEASE_CID_LOCK(pdev);
1172 
1173     return LM_STATUS_SUCCESS;
1174 }
1175 
1176 /**
1177  * sets the CDU validation data to be valid for a given cid
1178  *
1179  * @param pdev - the physical device handle
1180  * @param cid - the context of this cid will be initialized with the cdu validataion data
1181  *
1182  * @return lm_status_t
1183  */
lm_set_cdu_validation_data(struct _lm_device_t * pdev,s32_t cid,u8_t invalidate)1184 lm_status_t lm_set_cdu_validation_data(struct _lm_device_t *pdev, s32_t cid, u8_t invalidate)
1185 {
1186     lm_status_t lm_status = LM_STATUS_SUCCESS;
1187     void        *context        = NULL;
1188     u8_t        *cdu_reserved   = NULL; /* Pointer to the actual location of cdu_reserved field according to protocol */
1189     u8_t        *cdu_usage      = NULL; /* Pointer to the actual location of cdu_usage field according to protocol */
1190     u8_t        proto_type      = 0;
1191 
1192     context = lm_get_context(PFDEV(pdev), cid);
1193 
1194     if (!context) {
1195         return LM_STATUS_FAILURE;
1196     }
1197 
1198     proto_type = lm_map_cid_to_proto(PFDEV(pdev), cid);
1199 
1200     switch (proto_type) {
1201     case TOE_CONNECTION_TYPE:
1202         cdu_reserved = &((struct toe_context *)context)->xstorm_ag_context.cdu_reserved;
1203         cdu_usage = &(((struct toe_context *)context)->ustorm_ag_context.cdu_usage);
1204         break;
1205     case ETH_CONNECTION_TYPE:
1206         cdu_reserved = &(((struct eth_context *)context)->xstorm_ag_context.cdu_reserved);
1207         cdu_usage =  &(((struct eth_context *)context)->ustorm_ag_context.cdu_usage);
1208         break;
1209     case ISCSI_CONNECTION_TYPE:
1210         cdu_reserved = &(((struct iscsi_context *)context)->xstorm_ag_context.cdu_reserved);
1211         cdu_usage = &(((struct iscsi_context *)context)->ustorm_ag_context.cdu_usage);
1212         break;
1213     case FCOE_CONNECTION_TYPE:
1214         cdu_reserved = &(((struct fcoe_context *)context)->xstorm_ag_context.cdu_reserved);
1215         cdu_usage = &(((struct fcoe_context *)context)->ustorm_ag_context.cdu_usage);
1216         break;
1217     default:
1218         lm_status = LM_STATUS_FAILURE;
1219         break;
1220     }
1221 
1222     if (cdu_reserved && cdu_usage) {
1223         if (invalidate) {
1224             *cdu_reserved = CDU_RSRVD_INVALIDATE_CONTEXT_VALUE(*cdu_reserved);
1225             *cdu_usage    = CDU_RSRVD_INVALIDATE_CONTEXT_VALUE(*cdu_usage);
1226         } else {
1227             *cdu_reserved = CDU_RSRVD_VALUE_TYPE_A(HW_CID(pdev, cid), CDU_REGION_NUMBER_XCM_AG, proto_type);
1228             *cdu_usage    = CDU_RSRVD_VALUE_TYPE_A(HW_CID(pdev, cid), CDU_REGION_NUMBER_UCM_AG, proto_type);
1229         }
1230     }
1231 
1232     return lm_status;
1233 }
1234 
1235 
lm_get_context_size(struct _lm_device_t * pdev,s32_t * context_size)1236 lm_status_t lm_get_context_size(struct _lm_device_t *pdev, s32_t * context_size)
1237 {
1238     *context_size = LM_CONTEXT_SIZE;
1239     return LM_STATUS_SUCCESS;
1240 }
1241 
lm_set_con_state(struct _lm_device_t * pdev,u32_t cid,u32_t state)1242 lm_status_t lm_set_con_state(struct _lm_device_t *pdev, u32_t cid, u32_t state)
1243 {
1244     lm_cid_resc_t * cid_resc = lm_cid_resc(pdev, cid);
1245 
1246     if CHK_NULL(cid_resc)
1247     {
1248         return LM_STATUS_INVALID_PARAMETER;
1249     }
1250 
1251     cid_resc->con_state = state;
1252 
1253     return LM_STATUS_SUCCESS;
1254 }
1255 
lm_get_con_state(struct _lm_device_t * pdev,u32_t cid)1256 u32_t lm_get_con_state(struct _lm_device_t *pdev, u32_t cid)
1257 {
1258     const lm_cid_resc_t * cid_resc = lm_cid_resc(pdev, cid);
1259 
1260     if CHK_NULL(cid_resc)
1261     {
1262         return LM_CON_STATE_CLOSE;
1263     }
1264 
1265     return cid_resc->con_state;
1266 }
1267 
1268 
1269