1 /* 2 * Copyright (c) 2017-2018 Cavium, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * POSSIBILITY OF SUCH DAMAGE. 26 * 27 */ 28 29 #ifndef __ECORE_CHAIN_H__ 30 #define __ECORE_CHAIN_H__ 31 32 #include "common_hsi.h" 33 #include "ecore_utils.h" 34 35 enum ecore_chain_mode 36 { 37 /* Each Page contains a next pointer at its end */ 38 ECORE_CHAIN_MODE_NEXT_PTR, 39 40 /* Chain is a single page (next ptr) is unrequired */ 41 ECORE_CHAIN_MODE_SINGLE, 42 43 /* Page pointers are located in a side list */ 44 ECORE_CHAIN_MODE_PBL, 45 }; 46 47 enum ecore_chain_use_mode 48 { 49 ECORE_CHAIN_USE_TO_PRODUCE, /* Chain starts empty */ 50 ECORE_CHAIN_USE_TO_CONSUME, /* Chain starts full */ 51 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE, /* Chain starts empty */ 52 }; 53 54 enum ecore_chain_cnt_type { 55 /* The chain's size/prod/cons are kept in 16-bit variables */ 56 ECORE_CHAIN_CNT_TYPE_U16, 57 58 /* The chain's size/prod/cons are kept in 32-bit variables */ 59 ECORE_CHAIN_CNT_TYPE_U32, 60 }; 61 62 struct ecore_chain_next 63 { 64 struct regpair next_phys; 65 void *next_virt; 66 }; 67 68 struct ecore_chain_pbl_u16 { 69 u16 prod_page_idx; 70 u16 cons_page_idx; 71 }; 72 73 struct ecore_chain_pbl_u32 { 74 u32 prod_page_idx; 75 u32 cons_page_idx; 76 }; 77 78 struct ecore_chain_ext_pbl 79 { 80 dma_addr_t p_pbl_phys; 81 void *p_pbl_virt; 82 }; 83 84 struct ecore_chain_u16 { 85 /* Cyclic index of next element to produce/consme */ 86 u16 prod_idx; 87 u16 cons_idx; 88 }; 89 90 struct ecore_chain_u32 { 91 /* Cyclic index of next element to produce/consme */ 92 u32 prod_idx; 93 u32 cons_idx; 94 }; 95 96 struct ecore_chain 97 { 98 /* fastpath portion of the chain - required for commands such 99 * as produce / consume. 100 */ 101 /* Point to next element to produce/consume */ 102 void *p_prod_elem; 103 void *p_cons_elem; 104 105 /* Fastpath portions of the PBL [if exists] */ 106 107 struct { 108 /* Table for keeping the virtual addresses of the chain pages, 109 * respectively to the physical addresses in the pbl table. 110 */ 111 void **pp_virt_addr_tbl; 112 113 union { 114 struct ecore_chain_pbl_u16 pbl_u16; 115 struct ecore_chain_pbl_u32 pbl_u32; 116 } c; 117 } pbl; 118 119 union { 120 struct ecore_chain_u16 chain16; 121 struct ecore_chain_u32 chain32; 122 } u; 123 124 /* Capacity counts only usable elements */ 125 u32 capacity; 126 u32 page_cnt; 127 128 /* A u8 would suffice for mode, but it would save as a lot of headaches 129 * on castings & defaults. 130 */ 131 enum ecore_chain_mode mode; 132 133 /* Elements information for fast calculations */ 134 u16 elem_per_page; 135 u16 elem_per_page_mask; 136 u16 elem_size; 137 u16 next_page_mask; 138 u16 usable_per_page; 139 u8 elem_unusable; 140 141 u8 cnt_type; 142 143 /* Slowpath of the chain - required for initialization and destruction, 144 * but isn't involved in regular functionality. 145 */ 146 147 /* Base address of a pre-allocated buffer for pbl */ 148 struct { 149 dma_addr_t p_phys_table; 150 void *p_virt_table; 151 } pbl_sp; 152 153 /* Address of first page of the chain - the address is required 154 * for fastpath operation [consume/produce] but only for the the SINGLE 155 * flavour which isn't considered fastpath [== SPQ]. 156 */ 157 void *p_virt_addr; 158 dma_addr_t p_phys_addr; 159 160 /* Total number of elements [for entire chain] */ 161 u32 size; 162 163 u8 intended_use; 164 165 /* TBD - do we really need this? Couldn't find usage for it */ 166 bool b_external_pbl; 167 168 void *dp_ctx; 169 }; 170 171 #define ECORE_CHAIN_PBL_ENTRY_SIZE (8) 172 #define ECORE_CHAIN_PAGE_SIZE (0x1000) 173 #define ELEMS_PER_PAGE(elem_size) (ECORE_CHAIN_PAGE_SIZE/(elem_size)) 174 175 #define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode) \ 176 ((mode == ECORE_CHAIN_MODE_NEXT_PTR) ? \ 177 (u8)(1 + ((sizeof(struct ecore_chain_next)-1) / \ 178 (elem_size))) : 0) 179 180 #define USABLE_ELEMS_PER_PAGE(elem_size, mode) \ 181 ((u32) (ELEMS_PER_PAGE(elem_size) - \ 182 UNUSABLE_ELEMS_PER_PAGE(elem_size, mode))) 183 184 #define ECORE_CHAIN_PAGE_CNT(elem_cnt, elem_size, mode) \ 185 DIV_ROUND_UP(elem_cnt, USABLE_ELEMS_PER_PAGE(elem_size, mode)) 186 187 #define is_chain_u16(p) ((p)->cnt_type == ECORE_CHAIN_CNT_TYPE_U16) 188 #define is_chain_u32(p) ((p)->cnt_type == ECORE_CHAIN_CNT_TYPE_U32) 189 190 /* Accessors */ 191 static OSAL_INLINE u16 ecore_chain_get_prod_idx(struct ecore_chain *p_chain) 192 { 193 OSAL_ASSERT(is_chain_u16(p_chain)); 194 return p_chain->u.chain16.prod_idx; 195 } 196 197 #ifndef LINUX_REMOVE 198 static OSAL_INLINE u32 ecore_chain_get_prod_idx_u32(struct ecore_chain *p_chain) 199 { 200 OSAL_ASSERT(is_chain_u32(p_chain)); 201 return p_chain->u.chain32.prod_idx; 202 } 203 #endif 204 205 static OSAL_INLINE u16 ecore_chain_get_cons_idx(struct ecore_chain *p_chain) 206 { 207 OSAL_ASSERT(is_chain_u16(p_chain)); 208 return p_chain->u.chain16.cons_idx; 209 } 210 211 static OSAL_INLINE u32 ecore_chain_get_cons_idx_u32(struct ecore_chain *p_chain) 212 { 213 OSAL_ASSERT(is_chain_u32(p_chain)); 214 return p_chain->u.chain32.cons_idx; 215 } 216 217 /* FIXME: 218 * Should create OSALs for the below definitions. 219 * For Linux, replace them with the existing U16_MAX and U32_MAX, and handle 220 * kernel versions that lack them. 221 */ 222 #define ECORE_U16_MAX ((u16)~0U) 223 #define ECORE_U32_MAX ((u32)~0U) 224 225 static OSAL_INLINE u16 ecore_chain_get_elem_left(struct ecore_chain *p_chain) 226 { 227 u16 used; 228 229 OSAL_ASSERT(is_chain_u16(p_chain)); 230 231 used = (u16)(((u32)ECORE_U16_MAX + 1 + 232 (u32)(p_chain->u.chain16.prod_idx)) - 233 (u32)p_chain->u.chain16.cons_idx); 234 if (p_chain->mode == ECORE_CHAIN_MODE_NEXT_PTR) 235 used -= (((u32)ECORE_U16_MAX + 1) / p_chain->elem_per_page + 236 p_chain->u.chain16.prod_idx / p_chain->elem_per_page - 237 p_chain->u.chain16.cons_idx / p_chain->elem_per_page) % 238 p_chain->page_cnt; 239 240 return (u16)(p_chain->capacity - used); 241 } 242 243 static OSAL_INLINE u32 244 ecore_chain_get_elem_left_u32(struct ecore_chain *p_chain) 245 { 246 u32 used; 247 248 OSAL_ASSERT(is_chain_u32(p_chain)); 249 250 used = (u32)(((u64)ECORE_U32_MAX + 1 + 251 (u64)(p_chain->u.chain32.prod_idx)) - 252 (u64)p_chain->u.chain32.cons_idx); 253 if (p_chain->mode == ECORE_CHAIN_MODE_NEXT_PTR) 254 used -= (((u64)ECORE_U32_MAX + 1) / p_chain->elem_per_page + 255 p_chain->u.chain32.prod_idx / p_chain->elem_per_page - 256 p_chain->u.chain32.cons_idx / p_chain->elem_per_page) % 257 p_chain->page_cnt; 258 259 return p_chain->capacity - used; 260 } 261 262 #ifndef LINUX_REMOVE 263 static OSAL_INLINE u8 ecore_chain_is_full(struct ecore_chain *p_chain) 264 { 265 if (is_chain_u16(p_chain)) 266 return (ecore_chain_get_elem_left(p_chain) == 267 p_chain->capacity); 268 else 269 return (ecore_chain_get_elem_left_u32(p_chain) == 270 p_chain->capacity); 271 } 272 273 static OSAL_INLINE u8 ecore_chain_is_empty(struct ecore_chain *p_chain) 274 { 275 if (is_chain_u16(p_chain)) 276 return (ecore_chain_get_elem_left(p_chain) == 0); 277 else 278 return (ecore_chain_get_elem_left_u32(p_chain) == 0); 279 } 280 281 static OSAL_INLINE 282 u16 ecore_chain_get_elem_per_page(struct ecore_chain *p_chain) 283 { 284 return p_chain->elem_per_page; 285 } 286 #endif 287 288 static OSAL_INLINE 289 u16 ecore_chain_get_usable_per_page(struct ecore_chain *p_chain) 290 { 291 return p_chain->usable_per_page; 292 } 293 294 static OSAL_INLINE 295 u8 ecore_chain_get_unusable_per_page(struct ecore_chain *p_chain) 296 { 297 return p_chain->elem_unusable; 298 } 299 300 #ifndef LINUX_REMOVE 301 static OSAL_INLINE u32 ecore_chain_get_size(struct ecore_chain *p_chain) 302 { 303 return p_chain->size; 304 } 305 #endif 306 307 static OSAL_INLINE u32 ecore_chain_get_page_cnt(struct ecore_chain *p_chain) 308 { 309 return p_chain->page_cnt; 310 } 311 312 static OSAL_INLINE 313 dma_addr_t ecore_chain_get_pbl_phys(struct ecore_chain *p_chain) 314 { 315 return p_chain->pbl_sp.p_phys_table; 316 } 317 318 /** 319 * @brief ecore_chain_advance_page - 320 * 321 * Advance the next element accros pages for a linked chain 322 * 323 * @param p_chain 324 * @param p_next_elem 325 * @param idx_to_inc 326 * @param page_to_inc 327 */ 328 static OSAL_INLINE void 329 ecore_chain_advance_page(struct ecore_chain *p_chain, void **p_next_elem, 330 void *idx_to_inc, void *page_to_inc) 331 { 332 struct ecore_chain_next *p_next = OSAL_NULL; 333 u32 page_index = 0; 334 335 switch(p_chain->mode) { 336 case ECORE_CHAIN_MODE_NEXT_PTR: 337 p_next = (struct ecore_chain_next *)(*p_next_elem); 338 *p_next_elem = p_next->next_virt; 339 if (is_chain_u16(p_chain)) 340 *(u16 *)idx_to_inc += (u16)p_chain->elem_unusable; 341 else 342 *(u32 *)idx_to_inc += (u16)p_chain->elem_unusable; 343 break; 344 case ECORE_CHAIN_MODE_SINGLE: 345 *p_next_elem = p_chain->p_virt_addr; 346 break; 347 case ECORE_CHAIN_MODE_PBL: 348 if (is_chain_u16(p_chain)) { 349 if (++(*(u16 *)page_to_inc) == p_chain->page_cnt) 350 *(u16 *)page_to_inc = 0; 351 page_index = *(u16 *)page_to_inc; 352 } else { 353 if (++(*(u32 *)page_to_inc) == p_chain->page_cnt) 354 *(u32 *)page_to_inc = 0; 355 page_index = *(u32 *)page_to_inc; 356 } 357 *p_next_elem = p_chain->pbl.pp_virt_addr_tbl[page_index]; 358 } 359 } 360 361 #define is_unusable_idx(p, idx) \ 362 (((p)->u.chain16.idx & (p)->elem_per_page_mask) == (p)->usable_per_page) 363 364 #define is_unusable_idx_u32(p, idx) \ 365 (((p)->u.chain32.idx & (p)->elem_per_page_mask) == (p)->usable_per_page) 366 367 #define is_unusable_next_idx(p, idx) \ 368 ((((p)->u.chain16.idx + 1) & (p)->elem_per_page_mask) == (p)->usable_per_page) 369 370 #define is_unusable_next_idx_u32(p, idx) \ 371 ((((p)->u.chain32.idx + 1) & (p)->elem_per_page_mask) == (p)->usable_per_page) 372 373 #define test_and_skip(p, idx) \ 374 do { \ 375 if (is_chain_u16(p)) { \ 376 if (is_unusable_idx(p, idx)) \ 377 (p)->u.chain16.idx += (p)->elem_unusable; \ 378 } else { \ 379 if (is_unusable_idx_u32(p, idx)) \ 380 (p)->u.chain32.idx += (p)->elem_unusable; \ 381 } \ 382 } while (0) 383 384 #ifndef LINUX_REMOVE 385 /** 386 * @brief ecore_chain_return_multi_produced - 387 * 388 * A chain in which the driver "Produces" elements should use this API 389 * to indicate previous produced elements are now consumed. 390 * 391 * @param p_chain 392 * @param num 393 */ 394 static OSAL_INLINE 395 void ecore_chain_return_multi_produced(struct ecore_chain *p_chain, u32 num) 396 { 397 if (is_chain_u16(p_chain)) 398 p_chain->u.chain16.cons_idx += (u16)num; 399 else 400 p_chain->u.chain32.cons_idx += num; 401 test_and_skip(p_chain, cons_idx); 402 } 403 #endif 404 405 /** 406 * @brief ecore_chain_return_produced - 407 * 408 * A chain in which the driver "Produces" elements should use this API 409 * to indicate previous produced elements are now consumed. 410 * 411 * @param p_chain 412 */ 413 static OSAL_INLINE void ecore_chain_return_produced(struct ecore_chain *p_chain) 414 { 415 if (is_chain_u16(p_chain)) 416 p_chain->u.chain16.cons_idx++; 417 else 418 p_chain->u.chain32.cons_idx++; 419 test_and_skip(p_chain, cons_idx); 420 } 421 422 /** 423 * @brief ecore_chain_produce - 424 * 425 * A chain in which the driver "Produces" elements should use this to get 426 * a pointer to the next element which can be "Produced". It's driver 427 * responsibility to validate that the chain has room for new element. 428 * 429 * @param p_chain 430 * 431 * @return void*, a pointer to next element 432 */ 433 static OSAL_INLINE void *ecore_chain_produce(struct ecore_chain *p_chain) 434 { 435 void *p_ret = OSAL_NULL, *p_prod_idx, *p_prod_page_idx; 436 437 if (is_chain_u16(p_chain)) { 438 if ((p_chain->u.chain16.prod_idx & 439 p_chain->elem_per_page_mask) == 440 p_chain->next_page_mask) { 441 p_prod_idx = &p_chain->u.chain16.prod_idx; 442 p_prod_page_idx = &p_chain->pbl.c.pbl_u16.prod_page_idx; 443 ecore_chain_advance_page(p_chain, &p_chain->p_prod_elem, 444 p_prod_idx, p_prod_page_idx); 445 } 446 p_chain->u.chain16.prod_idx++; 447 } else { 448 if ((p_chain->u.chain32.prod_idx & 449 p_chain->elem_per_page_mask) == 450 p_chain->next_page_mask) { 451 p_prod_idx = &p_chain->u.chain32.prod_idx; 452 p_prod_page_idx = &p_chain->pbl.c.pbl_u32.prod_page_idx; 453 ecore_chain_advance_page(p_chain, &p_chain->p_prod_elem, 454 p_prod_idx, p_prod_page_idx); 455 } 456 p_chain->u.chain32.prod_idx++; 457 } 458 459 p_ret = p_chain->p_prod_elem; 460 p_chain->p_prod_elem = (void*)(((u8*)p_chain->p_prod_elem) + 461 p_chain->elem_size); 462 463 return p_ret; 464 } 465 466 /** 467 * @brief ecore_chain_get_capacity - 468 * 469 * Get the maximum number of BDs in chain 470 * 471 * @param p_chain 472 * @param num 473 * 474 * @return number of unusable BDs 475 */ 476 static OSAL_INLINE u32 ecore_chain_get_capacity(struct ecore_chain *p_chain) 477 { 478 return p_chain->capacity; 479 } 480 481 /** 482 * @brief ecore_chain_recycle_consumed - 483 * 484 * Returns an element which was previously consumed; 485 * Increments producers so they could be written to FW. 486 * 487 * @param p_chain 488 */ 489 static OSAL_INLINE 490 void ecore_chain_recycle_consumed(struct ecore_chain *p_chain) 491 { 492 test_and_skip(p_chain, prod_idx); 493 if (is_chain_u16(p_chain)) 494 p_chain->u.chain16.prod_idx++; 495 else 496 p_chain->u.chain32.prod_idx++; 497 } 498 499 /** 500 * @brief ecore_chain_consume - 501 * 502 * A Chain in which the driver utilizes data written by a different source 503 * (i.e., FW) should use this to access passed buffers. 504 * 505 * @param p_chain 506 * 507 * @return void*, a pointer to the next buffer written 508 */ 509 static OSAL_INLINE void *ecore_chain_consume(struct ecore_chain *p_chain) 510 { 511 void *p_ret = OSAL_NULL, *p_cons_idx, *p_cons_page_idx; 512 513 if (is_chain_u16(p_chain)) { 514 if ((p_chain->u.chain16.cons_idx & 515 p_chain->elem_per_page_mask) == 516 p_chain->next_page_mask) { 517 p_cons_idx = &p_chain->u.chain16.cons_idx; 518 p_cons_page_idx = &p_chain->pbl.c.pbl_u16.cons_page_idx; 519 ecore_chain_advance_page(p_chain, &p_chain->p_cons_elem, 520 p_cons_idx, p_cons_page_idx); 521 } 522 p_chain->u.chain16.cons_idx++; 523 } else { 524 if ((p_chain->u.chain32.cons_idx & 525 p_chain->elem_per_page_mask) == 526 p_chain->next_page_mask) { 527 p_cons_idx = &p_chain->u.chain32.cons_idx; 528 p_cons_page_idx = &p_chain->pbl.c.pbl_u32.cons_page_idx; 529 ecore_chain_advance_page(p_chain, &p_chain->p_cons_elem, 530 p_cons_idx, p_cons_page_idx); 531 } 532 p_chain->u.chain32.cons_idx++; 533 } 534 535 p_ret = p_chain->p_cons_elem; 536 p_chain->p_cons_elem = (void*)(((u8*)p_chain->p_cons_elem) + 537 p_chain->elem_size); 538 539 return p_ret; 540 } 541 542 /** 543 * @brief ecore_chain_reset - 544 * 545 * Resets the chain to its start state 546 * 547 * @param p_chain pointer to a previously allocted chain 548 */ 549 static OSAL_INLINE void ecore_chain_reset(struct ecore_chain *p_chain) 550 { 551 u32 i; 552 553 if (is_chain_u16(p_chain)) { 554 p_chain->u.chain16.prod_idx = 0; 555 p_chain->u.chain16.cons_idx = 0; 556 } else { 557 p_chain->u.chain32.prod_idx = 0; 558 p_chain->u.chain32.cons_idx = 0; 559 } 560 p_chain->p_cons_elem = p_chain->p_virt_addr; 561 p_chain->p_prod_elem = p_chain->p_virt_addr; 562 563 if (p_chain->mode == ECORE_CHAIN_MODE_PBL) { 564 /* Use "page_cnt-1" as a reset value for the prod/cons page's 565 * indices, to avoid unnecessary page advancing on the first 566 * call to ecore_chain_produce/consume. Instead, the indices 567 * will be advanced to page_cnt and then will be wrapped to 0. 568 */ 569 u32 reset_val = p_chain->page_cnt - 1; 570 571 if (is_chain_u16(p_chain)) { 572 p_chain->pbl.c.pbl_u16.prod_page_idx = (u16)reset_val; 573 p_chain->pbl.c.pbl_u16.cons_page_idx = (u16)reset_val; 574 } else { 575 p_chain->pbl.c.pbl_u32.prod_page_idx = reset_val; 576 p_chain->pbl.c.pbl_u32.cons_page_idx = reset_val; 577 } 578 } 579 580 switch (p_chain->intended_use) { 581 case ECORE_CHAIN_USE_TO_CONSUME: 582 /* produce empty elements */ 583 for (i = 0; i < p_chain->capacity; i++) 584 ecore_chain_recycle_consumed(p_chain); 585 break; 586 587 case ECORE_CHAIN_USE_TO_CONSUME_PRODUCE: 588 case ECORE_CHAIN_USE_TO_PRODUCE: 589 default: 590 /* Do nothing */ 591 break; 592 } 593 } 594 595 /** 596 * @brief ecore_chain_init_params - 597 * 598 * Initalizes a basic chain struct 599 * 600 * @param p_chain 601 * @param page_cnt number of pages in the allocated buffer 602 * @param elem_size size of each element in the chain 603 * @param intended_use 604 * @param mode 605 * @param cnt_type 606 * @param dp_ctx 607 */ 608 static OSAL_INLINE void 609 ecore_chain_init_params(struct ecore_chain *p_chain, u32 page_cnt, u8 elem_size, 610 enum ecore_chain_use_mode intended_use, 611 enum ecore_chain_mode mode, 612 enum ecore_chain_cnt_type cnt_type, void *dp_ctx) 613 { 614 /* chain fixed parameters */ 615 p_chain->p_virt_addr = OSAL_NULL; 616 p_chain->p_phys_addr = 0; 617 p_chain->elem_size = elem_size; 618 p_chain->intended_use = (u8)intended_use; 619 p_chain->mode = mode; 620 p_chain->cnt_type = (u8)cnt_type; 621 622 p_chain->elem_per_page = ELEMS_PER_PAGE(elem_size); 623 p_chain->usable_per_page = USABLE_ELEMS_PER_PAGE(elem_size, mode); 624 p_chain->elem_per_page_mask = p_chain->elem_per_page - 1; 625 p_chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(elem_size, mode); 626 p_chain->next_page_mask = (p_chain->usable_per_page & 627 p_chain->elem_per_page_mask); 628 629 p_chain->page_cnt = page_cnt; 630 p_chain->capacity = p_chain->usable_per_page * page_cnt; 631 p_chain->size = p_chain->elem_per_page * page_cnt; 632 p_chain->b_external_pbl = false; 633 p_chain->pbl_sp.p_phys_table = 0; 634 p_chain->pbl_sp.p_virt_table = OSAL_NULL; 635 p_chain->pbl.pp_virt_addr_tbl = OSAL_NULL; 636 637 p_chain->dp_ctx = dp_ctx; 638 } 639 640 /** 641 * @brief ecore_chain_init_mem - 642 * 643 * Initalizes a basic chain struct with its chain buffers 644 * 645 * @param p_chain 646 * @param p_virt_addr virtual address of allocated buffer's beginning 647 * @param p_phys_addr physical address of allocated buffer's beginning 648 * 649 */ 650 static OSAL_INLINE void ecore_chain_init_mem(struct ecore_chain *p_chain, 651 void *p_virt_addr, 652 dma_addr_t p_phys_addr) 653 { 654 p_chain->p_virt_addr = p_virt_addr; 655 p_chain->p_phys_addr = p_phys_addr; 656 } 657 658 /** 659 * @brief ecore_chain_init_pbl_mem - 660 * 661 * Initalizes a basic chain struct with its pbl buffers 662 * 663 * @param p_chain 664 * @param p_virt_pbl pointer to a pre allocated side table which will hold 665 * virtual page addresses. 666 * @param p_phys_pbl pointer to a pre-allocated side table which will hold 667 * physical page addresses. 668 * @param pp_virt_addr_tbl 669 * pointer to a pre-allocated side table which will hold 670 * the virtual addresses of the chain pages. 671 * 672 */ 673 static OSAL_INLINE void ecore_chain_init_pbl_mem(struct ecore_chain *p_chain, 674 void *p_virt_pbl, 675 dma_addr_t p_phys_pbl, 676 void **pp_virt_addr_tbl) 677 { 678 p_chain->pbl_sp.p_phys_table = p_phys_pbl; 679 p_chain->pbl_sp.p_virt_table = p_virt_pbl; 680 p_chain->pbl.pp_virt_addr_tbl = pp_virt_addr_tbl; 681 } 682 683 /** 684 * @brief ecore_chain_init_next_ptr_elem - 685 * 686 * Initalizes a next pointer element 687 * 688 * @param p_chain 689 * @param p_virt_curr virtual address of a chain page of which the next 690 * pointer element is initialized 691 * @param p_virt_next virtual address of the next chain page 692 * @param p_phys_next physical address of the next chain page 693 * 694 */ 695 static OSAL_INLINE void 696 ecore_chain_init_next_ptr_elem(struct ecore_chain *p_chain, void *p_virt_curr, 697 void *p_virt_next, dma_addr_t p_phys_next) 698 { 699 struct ecore_chain_next *p_next; 700 u32 size; 701 702 size = p_chain->elem_size * p_chain->usable_per_page; 703 p_next = (struct ecore_chain_next *)((u8 *)p_virt_curr + size); 704 705 DMA_REGPAIR_LE(p_next->next_phys, p_phys_next); 706 707 p_next->next_virt = p_virt_next; 708 } 709 710 /** 711 * @brief ecore_chain_get_last_elem - 712 * 713 * Returns a pointer to the last element of the chain 714 * 715 * @param p_chain 716 * 717 * @return void* 718 */ 719 static OSAL_INLINE void *ecore_chain_get_last_elem(struct ecore_chain *p_chain) 720 { 721 struct ecore_chain_next *p_next = OSAL_NULL; 722 void *p_virt_addr = OSAL_NULL; 723 u32 size, last_page_idx; 724 725 if (!p_chain->p_virt_addr) 726 goto out; 727 728 switch (p_chain->mode) { 729 case ECORE_CHAIN_MODE_NEXT_PTR: 730 size = p_chain->elem_size * p_chain->usable_per_page; 731 p_virt_addr = p_chain->p_virt_addr; 732 p_next = (struct ecore_chain_next *)((u8 *)p_virt_addr + size); 733 while (p_next->next_virt != p_chain->p_virt_addr) { 734 p_virt_addr = p_next->next_virt; 735 p_next = (struct ecore_chain_next *)((u8 *)p_virt_addr + 736 size); 737 } 738 break; 739 case ECORE_CHAIN_MODE_SINGLE: 740 p_virt_addr = p_chain->p_virt_addr; 741 break; 742 case ECORE_CHAIN_MODE_PBL: 743 last_page_idx = p_chain->page_cnt - 1; 744 p_virt_addr = p_chain->pbl.pp_virt_addr_tbl[last_page_idx]; 745 break; 746 } 747 /* p_virt_addr points at this stage to the last page of the chain */ 748 size = p_chain->elem_size * (p_chain->usable_per_page - 1); 749 p_virt_addr = (u8 *)p_virt_addr + size; 750 out: 751 return p_virt_addr; 752 } 753 754 /** 755 * @brief ecore_chain_set_prod - sets the prod to the given value 756 * 757 * @param prod_idx 758 * @param p_prod_elem 759 */ 760 static OSAL_INLINE void ecore_chain_set_prod(struct ecore_chain *p_chain, 761 u32 prod_idx, void *p_prod_elem) 762 { 763 if (p_chain->mode == ECORE_CHAIN_MODE_PBL) { 764 /* Use "prod_idx-1" since ecore_chain_produce() advances the 765 * page index before the producer index when getting to 766 * "next_page_mask". 767 */ 768 u32 elem_idx = 769 (prod_idx - 1 + p_chain->capacity) % p_chain->capacity; 770 u32 page_idx = elem_idx / p_chain->elem_per_page; 771 772 if (is_chain_u16(p_chain)) 773 p_chain->pbl.c.pbl_u16.prod_page_idx = (u16)page_idx; 774 else 775 p_chain->pbl.c.pbl_u32.prod_page_idx = page_idx; 776 } 777 778 if (is_chain_u16(p_chain)) 779 p_chain->u.chain16.prod_idx = (u16)prod_idx; 780 else 781 p_chain->u.chain32.prod_idx = prod_idx; 782 p_chain->p_prod_elem = p_prod_elem; 783 } 784 785 /** 786 * @brief ecore_chain_set_cons - sets the cons to the given value 787 * 788 * @param cons_idx 789 * @param p_cons_elem 790 */ 791 static OSAL_INLINE void ecore_chain_set_cons(struct ecore_chain *p_chain, 792 u32 cons_idx, void *p_cons_elem) 793 { 794 if (p_chain->mode == ECORE_CHAIN_MODE_PBL) { 795 /* Use "cons_idx-1" since ecore_chain_consume() advances the 796 * page index before the consumer index when getting to 797 * "next_page_mask". 798 */ 799 u32 elem_idx = 800 (cons_idx - 1 + p_chain->capacity) % p_chain->capacity; 801 u32 page_idx = elem_idx / p_chain->elem_per_page; 802 803 if (is_chain_u16(p_chain)) 804 p_chain->pbl.c.pbl_u16.cons_page_idx = (u16)page_idx; 805 else 806 p_chain->pbl.c.pbl_u32.cons_page_idx = page_idx; 807 } 808 809 if (is_chain_u16(p_chain)) 810 p_chain->u.chain16.cons_idx = (u16)cons_idx; 811 else 812 p_chain->u.chain32.cons_idx = cons_idx; 813 814 p_chain->p_cons_elem = p_cons_elem; 815 } 816 817 /** 818 * @brief ecore_chain_pbl_zero_mem - set chain memory to 0 819 * 820 * @param p_chain 821 */ 822 static OSAL_INLINE void ecore_chain_pbl_zero_mem(struct ecore_chain *p_chain) 823 { 824 u32 i, page_cnt; 825 826 if (p_chain->mode != ECORE_CHAIN_MODE_PBL) 827 return; 828 829 page_cnt = ecore_chain_get_page_cnt(p_chain); 830 831 for (i = 0; i < page_cnt; i++) 832 OSAL_MEM_ZERO(p_chain->pbl.pp_virt_addr_tbl[i], 833 ECORE_CHAIN_PAGE_SIZE); 834 } 835 836 int ecore_chain_print(struct ecore_chain *p_chain, char *buffer, 837 u32 buffer_size, u32 *element_indx, u32 stop_indx, 838 bool print_metadata, 839 int (*func_ptr_print_element)(struct ecore_chain *p_chain, 840 void *p_element, 841 char *buffer), 842 int (*func_ptr_print_metadata)(struct ecore_chain *p_chain, 843 char *buffer)); 844 845 #endif /* __ECORE_CHAIN_H__ */ 846