xref: /titanic_41/usr/src/uts/common/io/bnxe/577xx/drivers/common/lm/device/bd_chain.h (revision f391a51a4e9639750045473dba1cc2831267c93e)
1 
2 /*******************************************************************************
3 * bd_chain.h - bd chain interface
4 *******************************************************************************/
5 #ifndef _BD_CHAIN_H
6 #define _BD_CHAIN_H
7 
8 /* The number of bd's per page including the last bd which is used as
9  * a pointer to the next bd page. */
10 #define BD_PER_PAGE(bd_size)        (LM_PAGE_SIZE/(bd_size))
11 
12 /* Number of bds that are used for the 'next' prt. The next ptr is constant size (sizeof lm_bd_chain_next). however,
13  * we will always work with 'full' bds. So if the bd-size is smaller than the next-ptr, we will use several, if it is
14  * larger, we will use a full one (no partial bds...) The equation 1+((next_bd_size-1)/bd_size) gives us the number of bds
15  * we need for this purpose. */
16 #define NUM_BDS_USED_FOR_NEXT_PTR(bd_size,is_chain_mode) ((is_chain_mode)? (1 + ((sizeof(lm_bd_chain_next)-1) / (bd_size))): 0)
17 
18 /* The number of useable bd's per page.  This number does not include the bds at the end of the page used for the 'next-bd' */
19 #define USABLE_BDS_PER_PAGE(bd_size,is_chain_mode)    ((u32_t) (BD_PER_PAGE(bd_size)-NUM_BDS_USED_FOR_NEXT_PTR(bd_size,is_chain_mode)))
20 
21 
22 /* return number of available bds, i.e. _usable_ not produced bds */
lm_bd_chain_avail_bds(lm_bd_chain_t * bd_chain)23 __inline static u16_t lm_bd_chain_avail_bds(lm_bd_chain_t* bd_chain)
24 {
25     return bd_chain->bd_left;
26 }
27 
28 /* return the cyclic prod idx */
lm_bd_chain_prod_idx(lm_bd_chain_t * bd_chain)29 __inline static u16_t lm_bd_chain_prod_idx(lm_bd_chain_t* bd_chain)
30 {
31     return bd_chain->prod_idx;
32 }
33 
34 /* return the cyclic cons idx */
lm_bd_chain_cons_idx(lm_bd_chain_t * bd_chain)35 __inline static u16_t lm_bd_chain_cons_idx(lm_bd_chain_t* bd_chain)
36 {
37     return bd_chain->cons_idx;
38 }
39 
40 /* return the usable_bds_per_page */
lm_bd_chain_usable_bds_per_page(lm_bd_chain_t * bd_chain)41 __inline static u16_t lm_bd_chain_usable_bds_per_page(lm_bd_chain_t* bd_chain)
42 {
43     return bd_chain->usable_bds_per_page;
44 }
45 
46 /* return the page_cnt */
lm_bd_chain_page_cnt(lm_bd_chain_t * bd_chain)47 __inline static u16_t lm_bd_chain_page_cnt(lm_bd_chain_t* bd_chain)
48 {
49     return bd_chain->page_cnt;
50 }
51 
52 /* return the bds_per_page */
lm_bd_chain_bds_per_page(lm_bd_chain_t * bd_chain)53 __inline static u16_t lm_bd_chain_bds_per_page(lm_bd_chain_t* bd_chain)
54 {
55     return bd_chain->bds_per_page;
56 }
57 
58 /* return the bds_per_page_mask */
lm_bd_chain_bds_per_page_mask(lm_bd_chain_t * bd_chain)59 __inline static u16_t lm_bd_chain_bds_per_page_mask(lm_bd_chain_t* bd_chain)
60 {
61     return bd_chain->bds_per_page_mask;
62 }
63 
64 /* return the bds_skip_eop */
lm_bd_chain_bds_skip_eop(lm_bd_chain_t * bd_chain)65 __inline static u16_t lm_bd_chain_bds_skip_eop(lm_bd_chain_t* bd_chain)
66 {
67     return bd_chain->bds_skip_eop;
68 }
69 
70 /* return empty state */
lm_bd_chain_is_empty(lm_bd_chain_t * bd_chain)71 __inline static u8_t lm_bd_chain_is_empty(lm_bd_chain_t* bd_chain)
72 {
73     return (bd_chain->bd_left == 0);
74 }
75 
76 /* return full state */
lm_bd_chain_is_full(lm_bd_chain_t * bd_chain)77 __inline static u8_t lm_bd_chain_is_full(lm_bd_chain_t* bd_chain)
78 {
79     return (bd_chain->bd_left == bd_chain->capacity);
80 }
81 
82 /* returns the phys addr of the page of given page_idx. (page_idx >= 0) */
lm_bd_chain_phys_addr(lm_bd_chain_t * bd_chain,u8_t page_idx)83 __inline static lm_address_t lm_bd_chain_phys_addr(lm_bd_chain_t* bd_chain, u8_t page_idx)
84 {
85     lm_address_t mem_phys = bd_chain->bd_chain_phy;
86     u8_t idx;
87 
88     page_idx = page_idx % bd_chain->page_cnt;
89 
90     if (bd_chain->b_is_chain_mode)
91     {
92         /* TODO: assumption that memory is contiguous.. */
93         for(idx = 0; idx < page_idx; idx++)
94         {
95             /* Increment mem_phy to the next page. */
96             LM_INC64(&mem_phys, LM_PAGE_SIZE);
97         }
98     }
99     else
100     {
101         mem_phys = bd_chain->pbl_phys_addr_table[page_idx];
102     }
103     return mem_phys;
104 }
105 
106 
107 /*******************************************************************************
108  * Description:
109  * afrer allocating the ring, this func fixes the last BD pointers at the
110  * end of a page to point to the first BD in the next page.
111  * Return:
112  ******************************************************************************/
lm_bd_chain_set_next_ptrs(lm_bd_chain_t * bd_chain)113 __inline static void lm_bd_chain_set_next_ptrs(lm_bd_chain_t * bd_chain)
114 {
115     lm_address_t start_mem_phy;
116     lm_address_t mem_phy;
117     lm_bd_chain_next * next_bd;
118     u8_t *start_mem_virt;
119     u8_t *mem_virt;
120     u16_t idx;
121 
122     mem_virt = bd_chain->bd_chain_virt;
123     mem_phy = bd_chain->bd_chain_phy;
124 
125     DbgBreakIf(
126         ((u32_t) PTR_SUB(mem_virt, 0) & LM_PAGE_MASK) !=
127             (mem_phy.as_u32.low & LM_PAGE_MASK));
128 
129     DbgBreakIf(!bd_chain->b_is_chain_mode);
130 
131     /* make sure all known bds structure equals to lm_bd_chain_next structure before  */
132     /* tx bd */
133     ASSERT_STATIC(OFFSETOF(struct eth_tx_next_bd, addr_hi) == OFFSETOF(lm_bd_chain_next, addr_hi)) ;
134     ASSERT_STATIC(OFFSETOF(struct eth_tx_next_bd, addr_lo) == OFFSETOF(lm_bd_chain_next, addr_lo)) ;
135     ASSERT_STATIC(OFFSETOF(struct eth_tx_next_bd, reserved)== OFFSETOF(lm_bd_chain_next, reserved) ) ;
136 
137     /* rx bd */
138     ASSERT_STATIC(OFFSETOF(struct eth_rx_bd_next_page, addr_hi) == OFFSETOF(lm_bd_chain_next, addr_hi)) ;
139     ASSERT_STATIC(OFFSETOF(struct eth_rx_bd_next_page, addr_lo) == OFFSETOF(lm_bd_chain_next, addr_lo)) ;
140     ASSERT_STATIC(OFFSETOF(struct eth_rx_bd_next_page, reserved)== OFFSETOF(lm_bd_chain_next, reserved) ) ;
141 
142     /* rcq */
143     ASSERT_STATIC(OFFSETOF(struct eth_rx_cqe_next_page, addr_hi) == OFFSETOF(lm_bd_chain_next, addr_hi)) ;
144     ASSERT_STATIC(OFFSETOF(struct eth_rx_cqe_next_page, addr_lo) == OFFSETOF(lm_bd_chain_next, addr_lo)) ;
145     ASSERT_STATIC(OFFSETOF(struct eth_rx_cqe_next_page, reserved)== OFFSETOF(lm_bd_chain_next, reserved) ) ;
146 
147     /* Toe stuff */
148     ASSERT_STATIC(OFFSETOF(struct toe_page_addr_bd, addr_hi) == OFFSETOF(lm_bd_chain_next, addr_hi)) ;
149     ASSERT_STATIC(OFFSETOF(struct toe_page_addr_bd, addr_lo) == OFFSETOF(lm_bd_chain_next, addr_lo)) ;
150     ASSERT_STATIC(OFFSETOF(struct toe_page_addr_bd, reserved)== OFFSETOF(lm_bd_chain_next, reserved) ) ;
151 
152     start_mem_phy = mem_phy;
153     start_mem_virt = mem_virt;
154 
155     for(idx = 0; idx < bd_chain->page_cnt-1; idx++)
156     {
157         if CHK_NULL(mem_virt)
158         {
159             DbgBreakIfAll(!mem_virt) ;
160             return ;
161         }
162 
163         /* Increment mem_phy to the next page. */
164         LM_INC64(&mem_phy, LM_PAGE_SIZE);
165 
166         /* Initialize the physical address of the next bd chain. */
167         next_bd = (lm_bd_chain_next *)(mem_virt + (bd_chain->bd_size) * (bd_chain->usable_bds_per_page));
168 
169         next_bd->addr_hi = mm_cpu_to_le32(mem_phy.as_u32.high);
170         next_bd->addr_lo = mm_cpu_to_le32(mem_phy.as_u32.low);
171 
172         /* Initialize the virtual address of the next bd chain. */
173         *((u8_t **) next_bd->reserved) =  mem_virt + LM_PAGE_SIZE;
174 
175         /* Move to the next bd chain. */
176         mem_virt += LM_PAGE_SIZE;
177     }
178 
179     next_bd = (lm_bd_chain_next *)(mem_virt + (bd_chain->bd_size) * (bd_chain->usable_bds_per_page));
180     next_bd->addr_hi = mm_cpu_to_le32(start_mem_phy.as_u32.high);
181     next_bd->addr_lo = mm_cpu_to_le32(start_mem_phy.as_u32.low);
182     *((u8_t **) next_bd->reserved) = start_mem_virt;
183 } /* lm_bd_chain_set_next_ptrs */
184 
185 /* setup bd chain.
186  * - currently only physically contiguous chain format is supported
187  * -  */
188 
189 unsigned long log2_align(unsigned long n);
190 
lm_bd_chain_add_page(struct _lm_device_t * pdev,lm_bd_chain_t * bd_chain,void * mem_virt,lm_address_t mem_phys,u8_t bd_size,u8_t is_chain_mode)191 __inline static lm_status_t lm_bd_chain_add_page(
192     struct _lm_device_t *pdev,
193     lm_bd_chain_t*       bd_chain,
194     void                *mem_virt,  /* ptr to caller pre-allocated buffer */
195     lm_address_t         mem_phys,   /* phys addr of buffer */
196     u8_t                 bd_size,    /* currently only 8 and 16 bytes are possible */
197     u8_t                 is_chain_mode) /* Is the next pointer the last entry*/
198 {
199 
200     lm_bd_chain_next * next_bd;
201 
202     UNREFERENCED_PARAMETER_(pdev);
203 
204     DbgBreakIf((bd_chain->page_cnt + 1) * BD_PER_PAGE(bd_size) > 0xffff);
205     if (is_chain_mode)
206     {
207         if (bd_chain->page_cnt) {
208             u16_t page_index;
209             DbgBreakIf(bd_chain->bd_size != bd_size);
210             next_bd = (lm_bd_chain_next *)((u8_t*)bd_chain->bd_chain_virt + (bd_chain->bd_size) * (bd_chain->usable_bds_per_page));
211             for (page_index = 0; page_index < bd_chain->page_cnt - 1; page_index++) {
212                 next_bd = (lm_bd_chain_next *)((u8_t*)(*(void **)(next_bd->reserved)) + (bd_chain->bd_size) * (bd_chain->usable_bds_per_page));
213             }
214             next_bd->addr_hi = mm_cpu_to_le32(mem_phys.as_u32.high);
215             next_bd->addr_lo = mm_cpu_to_le32(mem_phys.as_u32.low);
216             *((u8_t **) next_bd->reserved) =  mem_virt;
217             next_bd = (lm_bd_chain_next *)((u8_t*)mem_virt + (bd_chain->bd_size) * (bd_chain->usable_bds_per_page));
218             next_bd->addr_hi = mm_cpu_to_le32(bd_chain->bd_chain_phy.as_u32.high);
219             next_bd->addr_lo = mm_cpu_to_le32(bd_chain->bd_chain_phy.as_u32.low);
220             *((u8_t **) next_bd->reserved) =  bd_chain->bd_chain_virt;
221         } else {
222             bd_chain->bd_chain_phy = mem_phys;
223             bd_chain->bd_chain_virt = mem_virt;
224             bd_chain->bd_size = bd_size;
225             bd_chain->bds_skip_eop = NUM_BDS_USED_FOR_NEXT_PTR(bd_size,is_chain_mode);
226             bd_chain->usable_bds_per_page = USABLE_BDS_PER_PAGE(bd_size,is_chain_mode);
227             bd_chain->bds_per_page = BD_PER_PAGE(bd_size);
228             bd_chain->b_is_chain_mode = TRUE;
229             bd_chain->num_bd_to_sub   = 0;
230             bd_chain->usable_bds_mask = bd_chain->usable_bds_per_page;
231 
232             /* we assume power of 2 for bd_chain->bds_per_page */
233             DbgBreakIf(bd_chain->bds_per_page != log2_align((u32_t)bd_chain->bds_per_page));
234             bd_chain->bds_per_page_mask = bd_chain->bds_per_page - 1;
235             bd_chain->cons_idx = 0;
236             bd_chain->prod_idx = 0;
237             bd_chain->next_bd = bd_chain->bd_chain_virt;
238             /* Initialize the physical address of the next bd chain. */
239             next_bd = (lm_bd_chain_next *)((u8_t*)mem_virt + (bd_chain->bd_size) * (bd_chain->usable_bds_per_page));
240 
241             next_bd->addr_hi = mm_cpu_to_le32(mem_phys.as_u32.high);
242             next_bd->addr_lo = mm_cpu_to_le32(mem_phys.as_u32.low);
243 
244             /* Initialize the virtual address of the next bd chain. */
245             *((u8_t **) next_bd->reserved) =  mem_virt;
246         }
247     }
248     else
249     {
250         //TODO: currently TOE only, implement for PBL
251         //      add the physical address of the page to the next pbl_page_idx
252         //      ensure that the pbl_virt in this case is valid..
253         DbgBreak();
254     }
255 
256     bd_chain->page_cnt++;
257     bd_chain->capacity = bd_chain->page_cnt * bd_chain->usable_bds_per_page;
258     bd_chain->bd_left = bd_chain->capacity;
259 
260     return LM_STATUS_SUCCESS;
261 }
262 
lm_bd_chain_setup(struct _lm_device_t * pdev,lm_bd_chain_t * bd_chain,void * mem_virt,lm_address_t mem_phys,u16_t page_cnt,u8_t bd_size,u8_t is_full,u8_t is_chain_mode)263 __inline static lm_status_t lm_bd_chain_setup(
264     struct _lm_device_t *pdev,
265     lm_bd_chain_t*       bd_chain,
266     void                *mem_virt,  /* ptr to caller pre-allocated buffer */
267     lm_address_t         mem_phys,   /* phys addr of buffer */
268     u16_t                page_cnt,   /* #pages in given buffer */
269     u8_t                 bd_size,    /* currently only 8 and 16 bytes are possible */
270     u8_t                 is_full,   /* chain initial state (full or empty) */
271     u8_t                 is_chain_mode) /* Is the next pointer the last entry*/
272 {
273     DbgBreakIf(page_cnt * BD_PER_PAGE(bd_size) > 0xffff);
274 
275     UNREFERENCED_PARAMETER_(pdev);
276 
277     bd_chain->bd_chain_phy = mem_phys;
278     bd_chain->bd_chain_virt = mem_virt;
279     bd_chain->bd_size = bd_size;
280     bd_chain->bds_skip_eop = NUM_BDS_USED_FOR_NEXT_PTR(bd_size,is_chain_mode);
281     bd_chain->usable_bds_per_page = USABLE_BDS_PER_PAGE(bd_size,is_chain_mode);
282     bd_chain->bds_per_page = BD_PER_PAGE(bd_size);
283 
284     /* we assume power of 2 for bd_chain->bds_per_page */
285     DbgBreakIf(bd_chain->bds_per_page != log2_align((u32_t)bd_chain->bds_per_page));
286     bd_chain->bds_per_page_mask = bd_chain->bds_per_page - 1;
287 
288 #ifdef __SunOS
289     /*
290      * This minor code change fixes a compiler error in SunStudio 12u1.  The
291      * bug is that an "imulw $-0x80,..." is generated which wrecks the capacity
292      * value specifically when initializing the FCoE EQ chain.  Shifting code
293      * around and/or removing the deep inline access to this function will fix
294      * the issue but would be a kludge.  Note that I've created this ifdef to
295      * ensure someone doesn't come in later and merge these two lines together
296      * thereby reverting it to what it was before.
297      */
298     bd_chain->capacity = page_cnt;
299     bd_chain->capacity *= bd_chain->usable_bds_per_page;
300 #else
301     bd_chain->capacity = page_cnt * bd_chain->usable_bds_per_page;
302 #endif
303     bd_chain->page_cnt = page_cnt;
304     bd_chain->next_bd = bd_chain->bd_chain_virt;
305     bd_chain->cons_idx = 0;
306 
307     if(is_full) {
308         bd_chain->prod_idx = page_cnt * bd_chain->bds_per_page;
309         bd_chain->bd_left = 0;
310     } else {
311         bd_chain->prod_idx = 0;
312         /* Don't count the last bd of a BD page.  A full BD chain must
313          * have at least one empty entry.  */
314         bd_chain->bd_left = bd_chain->capacity;
315     }
316     if(is_chain_mode)
317     {
318         bd_chain->b_is_chain_mode = TRUE;
319         bd_chain->num_bd_to_sub   = 0;
320         bd_chain->usable_bds_mask = bd_chain->usable_bds_per_page;
321         lm_bd_chain_set_next_ptrs(bd_chain);
322     }
323 
324     return LM_STATUS_SUCCESS;
325 }
326 
lm_bd_chain_pbl_set_ptrs(IN void * buf_base_virt,IN lm_address_t buf_base_phy,IN lm_address_t * pbl_phys_table,IN void * pbl_virt_table,IN u32_t pbl_entries)327 __inline static lm_status_t lm_bd_chain_pbl_set_ptrs(
328     IN  void         *buf_base_virt,    /* ptr to caller pre-allocated buffer */
329     IN  lm_address_t buf_base_phy,      /* phys addr of the pre-allocated buffer */
330     IN  lm_address_t *pbl_phys_table,   /* ptr to caller pre-allocated buffer of phys pbl */
331     IN  void         *pbl_virt_table,   /* ptr to caller pre-allocated buffer of virt pbl */
332     IN  u32_t         pbl_entries       /* #pages in given buffer */
333     )
334 {
335 	u32_t i;
336 
337 	if (CHK_NULL(buf_base_virt) ||
338         CHK_NULL(pbl_phys_table) ||
339         CHK_NULL(pbl_virt_table) ||
340         (pbl_entries == 0))
341 	{
342         return LM_STATUS_INVALID_PARAMETER;
343     }
344 
345 	/* fill page table elements */
346 	for (i = 0; i < pbl_entries; i++)
347     {
348 #ifdef BIG_ENDIAN
349         pbl_phys_table[i].as_u32.low = mm_cpu_to_le32(buf_base_phy.as_u32.high);
350         pbl_phys_table[i].as_u32.high = mm_cpu_to_le32(buf_base_phy.as_u32.low);
351 #else // LITTLE_ENDIAN
352         pbl_phys_table[i].as_u64 = buf_base_phy.as_u64;
353 #endif
354 
355         *(void **)(((u8_t *)pbl_virt_table + (sizeof(void *) * i))) = buf_base_virt;
356 
357         /* Increment mem_phy to the next page. */
358         /* TODO: assumption that memory is contiguous.. */
359         LM_INC64(&buf_base_phy, LM_PAGE_SIZE);
360 
361         buf_base_virt = (u8_t *)buf_base_virt + LM_PAGE_SIZE;
362      }
363 
364 	return LM_STATUS_SUCCESS;
365 }
366 
367 
lm_bd_chain_pbl_setup(struct _lm_device_t * pdev,lm_bd_chain_t * bd_chain,void * mem_virt,lm_address_t mem_phys,void * pbl_virt_table,lm_address_t * pbl_phys_table,u16_t page_cnt,u8_t bd_size,u8_t is_full)368 __inline static lm_status_t lm_bd_chain_pbl_setup(
369     struct _lm_device_t *pdev,
370     lm_bd_chain_t*       bd_chain,
371     void                *mem_virt,           /* ptr to caller pre-allocated buffer */
372     lm_address_t         mem_phys,           /* phys addr of buffer */
373     void                *pbl_virt_table,     /* ptr to caller pre-allocated buffer of virt pbl */
374     lm_address_t        *pbl_phys_table,     /* ptr to caller pre-allocated buffer of phys pbl */
375     u16_t                page_cnt,           /* #pages in given buffer */
376     u8_t                 bd_size,            /* currently only 8 and 16 bytes are possible */
377     u8_t                 is_full)            /* chain initial state (full or empty) */
378 {
379     lm_status_t lm_status;
380 
381     lm_status = lm_bd_chain_setup(pdev,
382                                   bd_chain,
383                                   mem_virt,
384                                   mem_phys,
385                                   page_cnt,
386                                   bd_size,
387                                   is_full,
388                                   FALSE);
389     if (lm_status != LM_STATUS_SUCCESS)
390     {
391         return lm_status;
392     }
393 
394     //assign additional pbl members
395     bd_chain->pbl_phys_addr_table = pbl_phys_table;
396     bd_chain->pbl_virt_addr_table = pbl_virt_table;
397     bd_chain->b_is_chain_mode     = FALSE;
398     bd_chain->num_bd_to_sub       = 1;
399     bd_chain->usable_bds_mask     = bd_chain->usable_bds_per_page - 1;
400     // Upon first be consume or produce, page will be advanced,
401     // so set the initial page index to the last one
402     bd_chain->pbe_idx             = page_cnt - 1;
403 
404     lm_status = lm_bd_chain_pbl_set_ptrs(mem_virt,
405                                          mem_phys,
406                                          bd_chain->pbl_phys_addr_table,
407                                          bd_chain->pbl_virt_addr_table,
408                                          page_cnt);
409     if (lm_status != LM_STATUS_SUCCESS)
410     {
411         return lm_status;
412     }
413 
414     return LM_STATUS_SUCCESS;
415 }
416 
417 /** Description
418  *  Function resets a bd chain: initializes the bds to 'all zeros'
419  *  chain remains valid though, (last bd points to the next page of the bd chain)
420  */
lm_bd_chain_reset(struct _lm_device_t * pdev,lm_bd_chain_t * bd_chain)421 __inline static void lm_bd_chain_reset(struct _lm_device_t * pdev, lm_bd_chain_t * bd_chain)
422 {
423     DbgBreakIf(!bd_chain->bd_chain_virt);
424     /* FIXME: assumption that memory is contiguous.. */
425     mm_memset(bd_chain->bd_chain_virt, 0, bd_chain->page_cnt * LM_PAGE_SIZE);
426     if (bd_chain->b_is_chain_mode)
427     {
428         lm_bd_chain_setup(pdev,
429                           bd_chain,
430                           bd_chain->bd_chain_virt,
431                           bd_chain->bd_chain_phy,
432                           bd_chain->page_cnt,
433                           bd_chain->bd_size,
434                           FALSE,
435                           bd_chain->b_is_chain_mode);
436     }
437     else
438     {
439         lm_bd_chain_pbl_setup(pdev,
440                               bd_chain,
441                               bd_chain->bd_chain_virt,
442                               bd_chain->bd_chain_phy,
443                               bd_chain->pbl_virt_addr_table,
444                               bd_chain->pbl_phys_addr_table,
445                               bd_chain->page_cnt,
446                               bd_chain->bd_size,
447                               FALSE);
448     }
449 }
450 
451 /* Receives a bd_idx, pointer to bd and increases them.
452  * the physical address is the physical address of the base of the page
453  * Assumptions:
454  * - virt is initialized with the virtual address of the current bd
455  * - phys is initialized with the physical address of the current page
456  */
lm_bd_chain_incr_bd(lm_bd_chain_t * bd_chain,lm_address_t * phys,void ** virt,u16_t * bd_idx)457 __inline static void lm_bd_chain_incr_bd(
458     lm_bd_chain_t     * bd_chain,
459     lm_address_t      * phys,
460     void             ** virt,
461     u16_t             * bd_idx)
462 {
463 
464     (*bd_idx)++;
465     *virt = ((char *)*virt) + bd_chain->bd_size;
466 
467     if((*bd_idx & bd_chain->usable_bds_per_page) == bd_chain->usable_bds_per_page) {
468         if (bd_chain->b_is_chain_mode) {
469             lm_bd_chain_next *next_bd = (lm_bd_chain_next *)(*virt);
470             (*bd_idx) += bd_chain->bds_skip_eop;
471              *virt = *(void **)(next_bd->reserved);
472              phys->as_u32.high = next_bd->addr_hi;
473              phys->as_u32.low  = next_bd->addr_lo;
474         } else {
475             //TODO: currently TOE only, implement for PBL
476             DbgBreak();
477         }
478     }
479 
480 }
481 
lm_bd_advance_page(lm_bd_chain_t * bd_chain,u16_t * idx_to_inc)482 __inline static void lm_bd_advance_page(lm_bd_chain_t* bd_chain, u16_t *idx_to_inc)
483 {
484     if (bd_chain->b_is_chain_mode)
485     {
486         lm_bd_chain_next *next_bd = (lm_bd_chain_next *)bd_chain->next_bd;
487         bd_chain->next_bd = *(void **)(next_bd->reserved);
488         *idx_to_inc += bd_chain->bds_skip_eop;
489     }
490     else
491     {
492         bd_chain->pbe_idx++;
493         if (bd_chain->pbe_idx == bd_chain->page_cnt) {
494             bd_chain->pbe_idx = 0;
495         }
496         bd_chain->next_bd = *(void **)((u8_t *)bd_chain->pbl_virt_addr_table + (sizeof(void *) * bd_chain->pbe_idx));
497     }
498 }
499 
500 /*******************************************************************************
501 * API For a bd-chain that the driver "Produces"
502 *******************************************************************************/
503 
504 /* update bds availabily.
505  * - nbds - number of _usable_ consumed bds
506  * - NOTE: the chain consumer idx+pointer are not maintained! */
lm_bd_chain_bds_consumed(lm_bd_chain_t * bd_chain,u16_t nbds)507 __inline static void lm_bd_chain_bds_consumed(lm_bd_chain_t* bd_chain, u16_t nbds)
508 {
509     bd_chain->bd_left += nbds;
510     DbgBreakIfFastPath(bd_chain->bd_left > bd_chain->capacity);
511 }
512 
513 /* returns ptr to next _usable_ bd to be produced,
514  * decreases bds availability by 1, and updates prod idx.
515  * NOTE: special case for TOE: prod idx jumps to the next page only when the first bd of the next page is produced */
lm_toe_bd_chain_produce_bd(lm_bd_chain_t * bd_chain)516 __inline static void *lm_toe_bd_chain_produce_bd(lm_bd_chain_t* bd_chain)
517 {
518     void *ret_bd = NULL;
519     u16_t prod_idx = 0;
520 
521     DbgBreakIf(!bd_chain->bd_left);
522 
523     prod_idx = bd_chain->prod_idx - bd_chain->num_bd_to_sub;
524     if((prod_idx & bd_chain->usable_bds_mask) == bd_chain->usable_bds_mask) {
525         lm_bd_advance_page(bd_chain, &bd_chain->prod_idx);
526     }
527 
528     ret_bd = bd_chain->next_bd;
529     bd_chain->bd_left--;
530     bd_chain->prod_idx++;
531     bd_chain->next_bd += bd_chain->bd_size;
532 
533     return ret_bd;
534 }
535 
536 /* returns ptr to next _usable_ bd to be produced,
537  * decreases bds availability by 1, and updates prod idx.
538  */
lm_bd_chain_produce_bd(lm_bd_chain_t * bd_chain)539 __inline static void *lm_bd_chain_produce_bd(lm_bd_chain_t* bd_chain)
540 {
541     void *ret_bd = NULL;
542     u16_t prod_idx = 0;
543 
544     DbgBreakIfFastPath(!bd_chain->bd_left);
545 
546     ret_bd = bd_chain->next_bd;
547     bd_chain->bd_left--;
548     bd_chain->prod_idx++;
549     bd_chain->next_bd += bd_chain->bd_size;
550 
551     prod_idx = bd_chain->prod_idx - bd_chain->num_bd_to_sub;
552     if((prod_idx & bd_chain->usable_bds_mask) == bd_chain->usable_bds_mask) {
553         lm_bd_advance_page(bd_chain, &bd_chain->prod_idx);
554     }
555 
556     return ret_bd;
557 }
558 
559 
560 /*******************************************************************************
561 * API For a bd-chain that the driver "Consumes"
562 *******************************************************************************/
563 
564 /* returns ptr to next _usable_ bd to be consume,
565  * increases bds availability by 1, and updates cons idx.
566  * NOTE: cons idx jumps to the next page only when the first bd of the next page is consumed */
lm_toe_bd_chain_consume_bd(lm_bd_chain_t * bd_chain)567 __inline static void *lm_toe_bd_chain_consume_bd(lm_bd_chain_t* bd_chain)
568 {
569     void *ret_bd = NULL;
570     u16_t cons_idx = 0;
571 
572     DbgBreakIf(bd_chain->bd_left == bd_chain->capacity);
573 
574     cons_idx = bd_chain->cons_idx - bd_chain->num_bd_to_sub;
575     if((cons_idx & bd_chain->usable_bds_mask) == bd_chain->usable_bds_mask) {
576         lm_bd_advance_page(bd_chain, &bd_chain->cons_idx);
577     }
578     ret_bd = bd_chain->next_bd;
579 
580     bd_chain->bd_left++;
581     bd_chain->cons_idx++;
582     bd_chain->next_bd += bd_chain->bd_size;
583 
584     return ret_bd;
585 }
586 
lm_bd_chain_consume_bd(lm_bd_chain_t * bd_chain)587 __inline static void *lm_bd_chain_consume_bd(lm_bd_chain_t* bd_chain)
588 {
589     void *ret_bd = NULL;
590     u16_t cons_idx = 0;
591 
592     DbgBreakIfFastPath(bd_chain->bd_left == bd_chain->capacity);
593 
594     ret_bd = bd_chain->next_bd;
595 
596     bd_chain->bd_left++;
597     bd_chain->cons_idx++;
598     bd_chain->next_bd += bd_chain->bd_size;
599 
600     cons_idx = bd_chain->cons_idx - bd_chain->num_bd_to_sub;
601     if((cons_idx & bd_chain->usable_bds_mask) == bd_chain->usable_bds_mask) {
602         lm_bd_advance_page(bd_chain, &bd_chain->cons_idx);
603     }
604 
605     return ret_bd;
606 }
607 
608 /* returns a bd only if it is contiguous to the previously requested bd... otherwise NULL.
609  * The algorithm is based on the fact that we don't double-increase a consumer if we've reached the
610  * end of the page. we have one call that is called when the next_bd points to the last_bd, in which case
611  * we recognize that the next_bd is no longer contiguous, return NULL and move forward. The next call will
612  * return the next bd...
613  */
lm_bd_chain_consume_bd_contiguous(lm_bd_chain_t * bd_chain)614 __inline static void *lm_bd_chain_consume_bd_contiguous(lm_bd_chain_t* bd_chain)
615 {
616     void *ret_bd = NULL;
617     u16_t cons_idx = 0;
618 
619     DbgBreakIf(bd_chain->bd_left == bd_chain->capacity);
620 
621     cons_idx = bd_chain->cons_idx - bd_chain->num_bd_to_sub;
622     if((cons_idx & bd_chain->usable_bds_mask) == bd_chain->usable_bds_mask) {
623         lm_bd_advance_page(bd_chain, &bd_chain->cons_idx);
624 
625         return NULL; /* we've just skipped the last bd... */
626     }
627 
628     ret_bd = bd_chain->next_bd;
629 
630     bd_chain->bd_left++;
631     bd_chain->cons_idx++;
632     bd_chain->next_bd += bd_chain->bd_size;
633 
634     return ret_bd;
635 }
636 
637 
638 
639 /* update bds availabily and prod idx.
640  * - nbds - number of _usable_ produced bds
641  * Special case for TOE, they need producer increased only if we've moved to the next page...  */
lm_toe_bd_chain_bds_produced(lm_bd_chain_t * bd_chain,u16_t nbds)642 __inline static void lm_toe_bd_chain_bds_produced(lm_bd_chain_t* bd_chain, u16_t nbds)
643 {
644     u16_t nbds_mod_usable_bds;
645     u8_t next_bds = 0;
646 
647     DbgBreakIfFastPath(bd_chain->bd_left < nbds);
648     bd_chain->bd_left -= nbds;
649 
650    /* perform the operation "nbds % bd_chain->usable_bds_per_page" manually
651    (in order to avoid explicit modulo instruction that lead to very
652     expensive IDIV asm instruction) */
653     nbds_mod_usable_bds = nbds;
654     while (nbds_mod_usable_bds >= bd_chain->usable_bds_per_page)
655     {
656         nbds_mod_usable_bds -= bd_chain->usable_bds_per_page;
657     }
658 
659     /* calculate the number of _next_ bds passed */
660     next_bds += nbds / bd_chain->usable_bds_per_page;
661     if(next_bds && ((bd_chain->prod_idx & bd_chain->bds_per_page_mask) == 0)) {
662         next_bds--; /* special care here, this next bd will be counted only next time bds are produced */
663     }
664     if((bd_chain->prod_idx & bd_chain->bds_per_page_mask) + nbds_mod_usable_bds > bd_chain->usable_bds_per_page) {
665         next_bds++;
666     }
667 
668     /* update prod idx */
669     bd_chain->prod_idx += nbds + next_bds * bd_chain->bds_skip_eop;
670 
671     DbgBreakIfFastPath((bd_chain->prod_idx & bd_chain->bds_per_page_mask) > bd_chain->usable_bds_per_page); /* assertion relevant to 8b bd chain */
672     DbgBreakIfFastPath((bd_chain->prod_idx & bd_chain->bds_per_page_mask) == 0); /* GilR 5/13/2006 - this is currently the agreement with FW */
673 }
674 
675 /* update bds availabily and prod idx.
676  * - nbds - number of _usable_ produced bds */
lm_bd_chain_bds_produced(lm_bd_chain_t * bd_chain,u16_t nbds)677 __inline static void lm_bd_chain_bds_produced(lm_bd_chain_t* bd_chain, u16_t nbds)
678 {
679     u16_t nbds_mod_usable_bds;
680     u8_t next_bds = 0;
681 
682     DbgBreakIfFastPath(bd_chain->bd_left < nbds);
683     bd_chain->bd_left -= nbds;
684 
685     /* perform the operation "nbds % bd_chain->usable_bds_per_page" manually
686    (in order to avoid explicit modulo instruction that lead to very
687     expensive IDIV asm instruction) */
688     nbds_mod_usable_bds = nbds;
689     while (nbds_mod_usable_bds >= bd_chain->usable_bds_per_page)
690     {
691         nbds_mod_usable_bds -= bd_chain->usable_bds_per_page;
692     }
693 
694     /* calculate the number of _next_ bds passed */
695     next_bds += nbds / bd_chain->usable_bds_per_page;
696     if((bd_chain->prod_idx & bd_chain->bds_per_page_mask) + nbds_mod_usable_bds > bd_chain->usable_bds_per_page) {
697         next_bds++;
698     }
699 
700     /* update prod idx */
701     bd_chain->prod_idx += nbds + next_bds * bd_chain->bds_skip_eop;
702 }
703 
704 /* lm_bd_chain_bd_produced -
705    a performance optimated version of lm_bd_chain_bds_produced:
706    update bds availabily and prod idx, when only one bd is produced.
707  */
lm_bd_chain_bd_produced(lm_bd_chain_t * bd_chain)708 __inline static void lm_bd_chain_bd_produced(lm_bd_chain_t* bd_chain)
709 {
710     DbgBreakIfFastPath(bd_chain->bd_left < 1);
711     bd_chain->bd_left--;
712 
713     /* if we passed a _next_ bd, increase prod_idx accordingly */
714     if((bd_chain->prod_idx & bd_chain->bds_per_page_mask) + 1 > bd_chain->usable_bds_per_page) {
715         bd_chain->prod_idx += bd_chain->bds_skip_eop;
716     }
717 
718     /* update prod idx for the produced bd */
719     bd_chain->prod_idx++;
720 }
721 
722 /* TRUE if all params in bd_chains are equal but the pointers */
lm_bd_chains_are_consistent(lm_bd_chain_t * bd_chain,lm_bd_chain_t * bd_chain2)723 __inline static u8_t lm_bd_chains_are_consistent( lm_bd_chain_t* bd_chain,
724                                                   lm_bd_chain_t* bd_chain2 )
725 {
726     const u32_t cmp_size = OFFSETOF(lm_bd_chain_t, reserved) - OFFSETOF(lm_bd_chain_t, page_cnt) ;
727     u8_t        b_ret    = 0;
728 
729     ASSERT_STATIC( OFFSETOF(lm_bd_chain_t, page_cnt) < OFFSETOF(lm_bd_chain_t, reserved)) ;
730 
731     b_ret = mm_memcmp( (u8_t*)bd_chain + OFFSETOF(lm_bd_chain_t, page_cnt),
732                        (u8_t*)bd_chain2 + OFFSETOF(lm_bd_chain_t, page_cnt),
733                        cmp_size );
734 
735     return b_ret;
736 }
737 
738 #endif /* _BD_CHAIN_H */
739