1 #ifndef ECORE_ERASE 2 #ifdef __LINUX 3 4 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 5 6 #include <linux/kernel.h> 7 #include <linux/types.h> 8 #include <asm/byteorder.h> 9 #include <linux/version.h> 10 #include <linux/module.h> 11 #include <linux/crc32.h> 12 #include <linux/etherdevice.h> 13 14 #define ECORE_ALIGN(x, a) ALIGN(x, a) 15 #endif 16 17 /* Always define ECORE_OOO for VBD */ 18 #define ECORE_OOO 19 20 #include "bcmtype.h" 21 #include "utils.h" 22 #include "lm5710.h" 23 #include "ecore_sp_verbs.h" 24 #include "command.h" 25 #include "debug.h" 26 #include "ecore_common.h" 27 28 /************************ Debug print macros **********************************/ 29 #if !defined(UEFI) && defined(DBG) 30 #define ECORE_MSG(pdev, m, ...) \ 31 DbgMessage(pdev, WARNi, m, ##__VA_ARGS__) 32 #else 33 #define ECORE_MSG 34 #endif 35 36 /************************ Error prints ****************************************/ 37 #if !defined(UEFI) && defined(DBG) 38 #define ECORE_ERR(str, ...) DbgMessage(pdev, FATAL, str, ##__VA_ARGS__) 39 #else 40 #define ECORE_ERR 41 #endif 42 43 44 /*********************** ECORE WRAPPER MACROS ********************************/ 45 46 #define ECORE_RET_PENDING(pending_bit, pending) \ 47 (ECORE_TEST_BIT(pending_bit, pending) ? ECORE_PENDING : ECORE_SUCCESS) 48 49 #define ECORE_ZALLOC(_size, _flags, _pdev) mm_rt_zalloc_mem(_pdev, _size) 50 #define ECORE_CALLOC(_len, _size, _flags, _pdev) mm_rt_zalloc_mem(_pdev, _len * _size) 51 #define ECORE_FREE(_pdev, _buf, _size) mm_rt_free_mem(_pdev, _buf, _size, 0) 52 53 /* 54 * Ecore implementation of set/get flag 55 * (differs from VBD set_flags, get_flags) 56 */ 57 #define ECORE_SET_FLAG(value, mask, flag) \ 58 do {\ 59 (value) &= ~(mask);\ 60 (value) |= ((flag) << (mask##_SHIFT));\ 61 } while (0) 62 63 #define ECORE_GET_FLAG(value, mask) \ 64 (((value) &= (mask)) >> (mask##_SHIFT)) 65 66 #define ecore_sp_post(_pdev, _cmd , _cid, _data, _con_type) \ 67 lm_sq_post(_pdev, _cid, (u8)(_cmd), CMD_PRIORITY_NORMAL, _con_type, \ 68 _data) 69 70 #define ECORE_SET_CTX_VALIDATION(_pdev, _cxt, _cid) \ 71 lm_set_cdu_validation_data(_pdev, _cid, FALSE) /* context? type? */ 72 /************************ TODO for LM people!!! *******************************/ 73 #define ECORE_TODO_UPDATE_COALESCE_SB_INDEX(a1, a2, a3, a4, a5) 74 #define ECORE_TODO_LINK_REPORT(pdev) 75 #define ECORE_TODO_FW_COMMAND(_pdev, _drv_msg_code, _val) (-1) 76 77 /************************ Lists ***********************************************/ 78 #define ECORE_LIST_FOR_EACH_ENTRY(pos, _head, _link, cast) \ 79 for (pos = (cast *)d_list_peek_head(_head); \ 80 pos; \ 81 pos = (cast *)d_list_next_entry(&pos->_link)) 82 83 /** 84 * ECORE_LIST_FOR_EACH_ENTRY_SAFE - iterate over list of given type 85 * @pos: the type * to use as a loop cursor. 86 * @n: another type * to use as temporary storage 87 * @head: the head for your list. 88 * @member: the name of the list_struct within the struct. 89 * 90 * iterate over list of given type safe against removal of list entry 91 */ 92 #define ECORE_LIST_FOR_EACH_ENTRY_SAFE(pos, n, head, member, cast) \ 93 for (pos = (cast *)d_list_peek_head(head), \ 94 n = (pos) ? (cast *)d_list_next_entry(&pos->member) : NULL; \ 95 pos != NULL; \ 96 pos = (cast *)n, \ 97 n = (pos) ? (cast *)d_list_next_entry(&pos->member) : NULL) 98 99 #define ECORE_LIST_IS_LAST(_link, _list) (_link == (_list)->tail) 100 101 #define ECORE_LIST_IS_EMPTY(head) \ 102 d_list_is_empty(head) 103 104 #define ECORE_LIST_FIRST_ENTRY(head, cast, link) \ 105 (cast *)d_list_peek_head(head) 106 107 #define ECORE_LIST_NEXT(pos, link, cast) \ 108 (cast *)d_list_next_entry(&((pos)->link)) 109 110 #define ECORE_LIST_INIT(head) \ 111 do { \ 112 d_list_clear(head); \ 113 } while (0) 114 115 #define ECORE_LIST_PUSH_TAIL(link, head) \ 116 do { \ 117 d_list_push_tail(head, link); \ 118 } while (0) 119 120 #define ECORE_LIST_PUSH_HEAD(link, head) \ 121 do { \ 122 d_list_push_head(head, link); \ 123 } while (0) 124 125 #define ECORE_LIST_REMOVE_ENTRY(link, head) \ 126 do { \ 127 d_list_remove_entry(head, link); \ 128 } while (0) 129 130 #define ECORE_LIST_SPLICE_INIT(new_head, head) \ 131 do { \ 132 d_list_add_head(head, new_head); \ 133 d_list_clear(new_head); \ 134 } while (0) 135 136 static __inline u32_t ecore_crc32_le(u32_t seed, u8_t *mac, u32_t len) 137 { 138 u32_t packet_buf[2] = {0}; 139 140 memcpy(((u8_t *)(&packet_buf[0]))+2, &mac[0], 2); 141 memcpy(&packet_buf[1], &mac[2], 4); 142 return SWAP_BYTES32(calc_crc32((u8_t *)packet_buf, 8, seed, 0)); 143 } 144 145 /************************ Per compilation target ******************************/ 146 #ifdef __LINUX 147 148 #define ECORE_UNLIKELY unlikely 149 #define ECORE_LIKELY likely 150 151 #define ecore_atomic_read mm_atomic_read 152 #define ecore_atomic_cmpxchg mm_atomic_cmpxchg 153 #define ecore_atomic_set(a, v) mm_atomic_set((u32_t *)(a), v) 154 #define smp_mb__before_atomic() mm_barrier() 155 #define smp_mb__after_atomic() mm_barrier() 156 157 /* Other */ 158 #define ECORE_IS_VALID_ETHER_ADDR(_mac) is_valid_ether_addr(_mac) 159 #define ECORE_SET_WAIT_COUNT(_cnt) 160 #define ECORE_SET_WAIT_DELAY_US(_cnt, _delay_us) 161 162 /* Mutex related */ 163 #define ECORE_MUTEX_INIT(_mutex) mutex_init(_mutex) 164 #define ECORE_MUTEX_LOCK(_mutex) mutex_lock(_mutex) 165 #define ECORE_MUTEX_UNLOCK(_mutex) mutex_unlock(_mutex) 166 167 #define ECORE_MIGHT_SLEEP() ediag_might_sleep() 168 #define ECORE_TEST_BIT(bit, var) test_bit(bit, var) 169 #define ECORE_TEST_AND_CLEAR_BIT(bit, var) test_and_clear_bit(bit, var) 170 171 #else /* ! LINUX */ 172 173 typedef u16 __le16; 174 175 #define ecore_atomic_read mm_atomic_read 176 #define ecore_atomic_cmpxchg mm_atomic_cmpxchg 177 #define ecore_atomic_set(a, val) mm_atomic_set((u32_t *)(a), val) 178 179 #define ECORE_UNLIKELY(x) (x) 180 #define ECORE_LIKELY(x) (x) 181 #define BUG() DbgBreakMsg("Bug") 182 #define smp_mb() mm_barrier() 183 #define smp_mb__before_atomic() mm_barrier() 184 #define smp_mb__after_atomic() mm_barrier() 185 #define mb() mm_barrier() 186 #define wmb() mm_barrier() 187 #define mmiowb() mm_barrier() 188 189 #define ECORE_MIGHT_SLEEP() /* IRQL_PASSIVE_CODE() */ 190 191 /* Mutex related */ 192 #define ECORE_MUTEX_INIT(_mutex) 193 #define ECORE_MUTEX_LOCK(_mutex) 194 #define ECORE_MUTEX_UNLOCK(_mutex) 195 196 /* Atomic Bit Manipulation */ 197 #define ECORE_TEST_BIT(_bit, _var) \ 198 (mm_atomic_long_read(_var) & (1 << (_bit))) 199 200 /* Other */ 201 #define ECORE_IS_VALID_ETHER_ADDR(_mac) TRUE 202 #define ECORE_SET_WAIT_DELAY_US(_cnt, _delay_us) \ 203 do { \ 204 _delay_us = (_cnt >= 2360) ? 100 : 25000; \ 205 } while (0) 206 207 /* 208 * In VBD We'll wait 10,000 times 100us (1 second) + 209 * 2360 times 25000us (59sec) = total 60 sec 210 * (Winodws only note) the 25000 wait will cause 211 * wait to be without CPU stall (look in win_util.c) 212 */ 213 #define ECORE_SET_WAIT_COUNT(_cnt) \ 214 do { \ 215 _cnt = 10000 + 2360; \ 216 } while (0) 217 218 static __inline BOOL ECORE_TEST_AND_CLEAR_BIT(int bit, unsigned long *vec) 219 { 220 BOOL set = ECORE_TEST_BIT(bit, vec); 221 ECORE_CLEAR_BIT(bit, vec); 222 223 return set; 224 } 225 226 #endif /* END if "per LM target type" */ 227 228 /* Spin lock related */ 229 #define ECORE_SPIN_LOCK_INIT(_spin, _pdev) mm_init_lock(_pdev, _spin) 230 #define ECORE_SPIN_LOCK_BH(_spin) mm_acquire_lock(_spin) 231 #define ECORE_SPIN_UNLOCK_BH(_spin) mm_release_lock(_spin) 232 233 #endif /* not ECORE_ERASE */ 234 #if defined(__FreeBSD__) && !defined(NOT_LINUX) 235 #include "bxe.h" 236 #include "ecore_init.h" 237 #elif !defined(EDIAG) 238 #ifdef ECORE_ERASE 239 #include <linux/version.h> 240 #include <linux/module.h> 241 #include <linux/crc32.h> 242 #include <linux/netdevice.h> 243 #include <linux/etherdevice.h> 244 #if (LINUX_VERSION_CODE >= 0x02061b) && !defined(BNX2X_DRIVER_DISK) && !defined(__VMKLNX__) /* BNX2X_UPSTREAM */ 245 #include <linux/crc32c.h> 246 #endif 247 #include "bnx2x.h" 248 #include "bnx2x_cmn.h" 249 #include "bnx2x_sp.h" 250 251 #define ECORE_MAX_EMUL_MULTI 16 252 #endif 253 #endif 254 255 /**** Exe Queue interfaces ****/ 256 257 /** 258 * ecore_exe_queue_init - init the Exe Queue object 259 * 260 * @o: pointer to the object 261 * @exe_len: length 262 * @owner: pointer to the owner 263 * @validate: validate function pointer 264 * @optimize: optimize function pointer 265 * @exec: execute function pointer 266 * @get: get function pointer 267 */ 268 static INLINE void ecore_exe_queue_init(struct _lm_device_t *pdev, 269 struct ecore_exe_queue_obj *o, 270 int exe_len, 271 union ecore_qable_obj *owner, 272 exe_q_validate validate, 273 exe_q_remove remove, 274 exe_q_optimize optimize, 275 exe_q_execute exec, 276 exe_q_get get) 277 { 278 mm_memset(o, 0, sizeof(*o)); 279 280 ECORE_LIST_INIT(&o->exe_queue); 281 ECORE_LIST_INIT(&o->pending_comp); 282 283 ECORE_SPIN_LOCK_INIT(&o->lock, pdev); 284 285 o->exe_chunk_len = exe_len; 286 o->owner = owner; 287 288 /* Owner specific callbacks */ 289 o->validate = validate; 290 o->remove = remove; 291 o->optimize = optimize; 292 o->execute = exec; 293 o->get = get; 294 295 ECORE_MSG(pdev, "Setup the execution queue with the chunk length of %d\n", 296 exe_len); 297 } 298 299 static INLINE void ecore_exe_queue_free_elem(struct _lm_device_t *pdev, 300 struct ecore_exeq_elem *elem) 301 { 302 ECORE_MSG(pdev, "Deleting an exe_queue element\n"); 303 ECORE_FREE(pdev, elem, sizeof(*elem)); 304 } 305 306 static INLINE int ecore_exe_queue_length(struct ecore_exe_queue_obj *o) 307 { 308 struct ecore_exeq_elem *elem; 309 int cnt = 0; 310 311 #ifdef ECORE_ERASE 312 spin_lock_bh(&o->lock); 313 #endif 314 315 ECORE_LIST_FOR_EACH_ENTRY(elem, &o->exe_queue, link, 316 struct ecore_exeq_elem) 317 cnt++; 318 319 #ifdef ECORE_ERASE 320 spin_unlock_bh(&o->lock); 321 #endif 322 323 return cnt; 324 } 325 326 /** 327 * ecore_exe_queue_add - add a new element to the execution queue 328 * 329 * @pdev: driver handle 330 * @o: queue 331 * @cmd: new command to add 332 * @restore: true - do not optimize the command 333 * 334 * If the element is optimized or is illegal, frees it. 335 */ 336 static INLINE int ecore_exe_queue_add(struct _lm_device_t *pdev, 337 struct ecore_exe_queue_obj *o, 338 struct ecore_exeq_elem *elem, 339 BOOL restore) 340 { 341 int rc; 342 343 ECORE_SPIN_LOCK_BH(&o->lock); 344 345 if (!restore) { 346 /* Try to cancel this element queue */ 347 rc = o->optimize(pdev, o->owner, elem); 348 if (rc) 349 goto free_and_exit; 350 351 /* Check if this request is ok */ 352 rc = o->validate(pdev, o->owner, elem); 353 if (rc) { 354 ECORE_MSG(pdev, "Preamble failed: %d\n", rc); 355 goto free_and_exit; 356 } 357 } 358 359 /* If so, add it to the execution queue */ 360 ECORE_LIST_PUSH_TAIL(&elem->link, &o->exe_queue); 361 362 ECORE_SPIN_UNLOCK_BH(&o->lock); 363 364 return ECORE_SUCCESS; 365 366 free_and_exit: 367 ecore_exe_queue_free_elem(pdev, elem); 368 369 ECORE_SPIN_UNLOCK_BH(&o->lock); 370 371 return rc; 372 } 373 374 static INLINE void __ecore_exe_queue_reset_pending( 375 struct _lm_device_t *pdev, 376 struct ecore_exe_queue_obj *o) 377 { 378 struct ecore_exeq_elem *elem; 379 380 while (!ECORE_LIST_IS_EMPTY(&o->pending_comp)) { 381 elem = ECORE_LIST_FIRST_ENTRY(&o->pending_comp, 382 struct ecore_exeq_elem, 383 link); 384 385 ECORE_LIST_REMOVE_ENTRY(&elem->link, &o->pending_comp); 386 ecore_exe_queue_free_elem(pdev, elem); 387 } 388 } 389 390 /** 391 * ecore_exe_queue_step - execute one execution chunk atomically 392 * 393 * @pdev: driver handle 394 * @o: queue 395 * @ramrod_flags: flags 396 * 397 * (Should be called while holding the exe_queue->lock). 398 */ 399 static INLINE int ecore_exe_queue_step(struct _lm_device_t *pdev, 400 struct ecore_exe_queue_obj *o, 401 unsigned long *ramrod_flags) 402 { 403 struct ecore_exeq_elem *elem, spacer; 404 int cur_len = 0, rc; 405 406 mm_memset(&spacer, 0, sizeof(spacer)); 407 408 /* Next step should not be performed until the current is finished, 409 * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to 410 * properly clear object internals without sending any command to the FW 411 * which also implies there won't be any completion to clear the 412 * 'pending' list. 413 */ 414 if (!ECORE_LIST_IS_EMPTY(&o->pending_comp)) { 415 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags)) { 416 ECORE_MSG(pdev, "RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list\n"); 417 __ecore_exe_queue_reset_pending(pdev, o); 418 } else { 419 return ECORE_PENDING; 420 } 421 } 422 423 /* Run through the pending commands list and create a next 424 * execution chunk. 425 */ 426 while (!ECORE_LIST_IS_EMPTY(&o->exe_queue)) { 427 elem = ECORE_LIST_FIRST_ENTRY(&o->exe_queue, 428 struct ecore_exeq_elem, 429 link); 430 DbgBreakIf(!elem->cmd_len); 431 432 if (cur_len + elem->cmd_len <= o->exe_chunk_len) { 433 cur_len += elem->cmd_len; 434 /* Prevent from both lists being empty when moving an 435 * element. This will allow the call of 436 * ecore_exe_queue_empty() without locking. 437 */ 438 ECORE_LIST_PUSH_TAIL(&spacer.link, &o->pending_comp); 439 mb(); 440 ECORE_LIST_REMOVE_ENTRY(&elem->link, &o->exe_queue); 441 ECORE_LIST_PUSH_TAIL(&elem->link, &o->pending_comp); 442 ECORE_LIST_REMOVE_ENTRY(&spacer.link, &o->pending_comp); 443 } else 444 break; 445 } 446 447 /* Sanity check */ 448 if (!cur_len) 449 return ECORE_SUCCESS; 450 451 rc = o->execute(pdev, o->owner, &o->pending_comp, ramrod_flags); 452 if (rc < 0) 453 /* In case of an error return the commands back to the queue 454 * and reset the pending_comp. 455 */ 456 ECORE_LIST_SPLICE_INIT(&o->pending_comp, &o->exe_queue); 457 else if (!rc) 458 /* If zero is returned, means there are no outstanding pending 459 * completions and we may dismiss the pending list. 460 */ 461 __ecore_exe_queue_reset_pending(pdev, o); 462 463 return rc; 464 } 465 466 static INLINE BOOL ecore_exe_queue_empty(struct ecore_exe_queue_obj *o) 467 { 468 BOOL empty = ECORE_LIST_IS_EMPTY(&o->exe_queue); 469 470 /* Don't reorder!!! */ 471 mb(); 472 473 return empty && ECORE_LIST_IS_EMPTY(&o->pending_comp); 474 } 475 476 static INLINE struct ecore_exeq_elem *ecore_exe_queue_alloc_elem( 477 struct _lm_device_t *pdev) 478 { 479 ECORE_MSG(pdev, "Allocating a new exe_queue element\n"); 480 return ECORE_ZALLOC(sizeof(struct ecore_exeq_elem), GFP_ATOMIC, 481 pdev); 482 } 483 484 /************************ raw_obj functions ***********************************/ 485 static BOOL ecore_raw_check_pending(struct ecore_raw_obj *o) 486 { 487 /* 488 * !! converts the value returned by ECORE_TEST_BIT such that it 489 * is guaranteed not to be truncated regardless of BOOL definition. 490 * 491 * Note we cannot simply define the function's return value type 492 * to match the type returned by ECORE_TEST_BIT, as it varies by 493 * platform/implementation. 494 */ 495 496 return !!ECORE_TEST_BIT(o->state, o->pstate); 497 } 498 499 static void ecore_raw_clear_pending(struct ecore_raw_obj *o) 500 { 501 smp_mb__before_atomic(); 502 ECORE_CLEAR_BIT(o->state, o->pstate); 503 smp_mb__after_atomic(); 504 } 505 506 static void ecore_raw_set_pending(struct ecore_raw_obj *o) 507 { 508 smp_mb__before_atomic(); 509 ECORE_SET_BIT(o->state, o->pstate); 510 smp_mb__after_atomic(); 511 } 512 513 /** 514 * ecore_state_wait - wait until the given bit(state) is cleared 515 * 516 * @pdev: device handle 517 * @state: state which is to be cleared 518 * @state_p: state buffer 519 * 520 */ 521 static INLINE int ecore_state_wait(struct _lm_device_t *pdev, int state, 522 unsigned long *pstate) 523 { 524 /* can take a while if any port is running */ 525 int cnt = 5000; 526 527 #ifndef ECORE_ERASE 528 int delay_us = 1000; 529 530 /* In VBD We'll wait 10,000 times 100us (1 second) + 531 * 2360 times 25000us (59sec) = total 60 sec 532 * (Winodws only note) the 25000 wait will cause wait 533 * to be without CPU stall (look in win_util.c) 534 */ 535 cnt = 10000 + 2360; 536 #endif 537 538 if (CHIP_REV_IS_EMUL(pdev)) 539 cnt *= 20; 540 541 ECORE_MSG(pdev, "waiting for state to become %d\n", state); 542 543 ECORE_MIGHT_SLEEP(); 544 while (cnt--) { 545 if (!ECORE_TEST_BIT(state, pstate)) { 546 #ifdef ECORE_STOP_ON_ERROR 547 ECORE_MSG(pdev, "exit (cnt %d)\n", 5000 - cnt); 548 #endif 549 return ECORE_SUCCESS; 550 } 551 552 #ifndef ECORE_ERASE 553 /* in case reset is in progress we won't get completion */ 554 if (lm_reset_is_inprogress(pdev)) 555 return 0; 556 557 delay_us = (cnt >= 2360) ? 100 : 25000; 558 #endif 559 mm_wait(pdev, delay_us); 560 561 if (pdev->panic) 562 return ECORE_IO; 563 } 564 565 /* timeout! */ 566 ECORE_ERR("timeout waiting for state %d\n", state); 567 #ifdef ECORE_STOP_ON_ERROR 568 ecore_panic(); 569 #endif 570 571 return ECORE_TIMEOUT; 572 } 573 574 static int ecore_raw_wait(struct _lm_device_t *pdev, struct ecore_raw_obj *raw) 575 { 576 return ecore_state_wait(pdev, raw->state, raw->pstate); 577 } 578 579 /***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/ 580 /* credit handling callbacks */ 581 static BOOL ecore_get_cam_offset_mac(struct ecore_vlan_mac_obj *o, int *offset) 582 { 583 struct ecore_credit_pool_obj *mp = o->macs_pool; 584 585 DbgBreakIf(!mp); 586 587 return mp->get_entry(mp, offset); 588 } 589 590 static BOOL ecore_get_credit_mac(struct ecore_vlan_mac_obj *o) 591 { 592 struct ecore_credit_pool_obj *mp = o->macs_pool; 593 594 DbgBreakIf(!mp); 595 596 return mp->get(mp, 1); 597 } 598 599 static BOOL ecore_get_cam_offset_vlan(struct ecore_vlan_mac_obj *o, int *offset) 600 { 601 struct ecore_credit_pool_obj *vp = o->vlans_pool; 602 603 DbgBreakIf(!vp); 604 605 return vp->get_entry(vp, offset); 606 } 607 608 static BOOL ecore_get_credit_vlan(struct ecore_vlan_mac_obj *o) 609 { 610 struct ecore_credit_pool_obj *vp = o->vlans_pool; 611 612 DbgBreakIf(!vp); 613 614 return vp->get(vp, 1); 615 } 616 617 static BOOL ecore_get_credit_vlan_mac(struct ecore_vlan_mac_obj *o) 618 { 619 struct ecore_credit_pool_obj *mp = o->macs_pool; 620 struct ecore_credit_pool_obj *vp = o->vlans_pool; 621 622 if (!mp->get(mp, 1)) 623 return FALSE; 624 625 if (!vp->get(vp, 1)) { 626 mp->put(mp, 1); 627 return FALSE; 628 } 629 630 return TRUE; 631 } 632 633 static BOOL ecore_put_cam_offset_mac(struct ecore_vlan_mac_obj *o, int offset) 634 { 635 struct ecore_credit_pool_obj *mp = o->macs_pool; 636 637 return mp->put_entry(mp, offset); 638 } 639 640 static BOOL ecore_put_credit_mac(struct ecore_vlan_mac_obj *o) 641 { 642 struct ecore_credit_pool_obj *mp = o->macs_pool; 643 644 return mp->put(mp, 1); 645 } 646 647 static BOOL ecore_put_cam_offset_vlan(struct ecore_vlan_mac_obj *o, int offset) 648 { 649 struct ecore_credit_pool_obj *vp = o->vlans_pool; 650 651 return vp->put_entry(vp, offset); 652 } 653 654 static BOOL ecore_put_credit_vlan(struct ecore_vlan_mac_obj *o) 655 { 656 struct ecore_credit_pool_obj *vp = o->vlans_pool; 657 658 return vp->put(vp, 1); 659 } 660 661 static BOOL ecore_put_credit_vlan_mac(struct ecore_vlan_mac_obj *o) 662 { 663 struct ecore_credit_pool_obj *mp = o->macs_pool; 664 struct ecore_credit_pool_obj *vp = o->vlans_pool; 665 666 if (!mp->put(mp, 1)) 667 return FALSE; 668 669 if (!vp->put(vp, 1)) { 670 mp->get(mp, 1); 671 return FALSE; 672 } 673 674 return TRUE; 675 } 676 677 /** 678 * __ecore_vlan_mac_h_write_trylock - try getting the writer lock on vlan mac 679 * head list. 680 * 681 * @pdev: device handle 682 * @o: vlan_mac object 683 * 684 * @details: Non-blocking implementation; should be called under execution 685 * queue lock. 686 */ 687 static int __ecore_vlan_mac_h_write_trylock(struct _lm_device_t *pdev, 688 struct ecore_vlan_mac_obj *o) 689 { 690 if (o->head_reader) { 691 ECORE_MSG(pdev, "vlan_mac_lock writer - There are readers; Busy\n"); 692 return ECORE_BUSY; 693 } 694 695 ECORE_MSG(pdev, "vlan_mac_lock writer - Taken\n"); 696 return ECORE_SUCCESS; 697 } 698 699 /** 700 * __ecore_vlan_mac_h_exec_pending - execute step instead of a previous step 701 * which wasn't able to run due to a taken lock on vlan mac head list. 702 * 703 * @pdev: device handle 704 * @o: vlan_mac object 705 * 706 * @details Should be called under execution queue lock; notice it might release 707 * and reclaim it during its run. 708 */ 709 static void __ecore_vlan_mac_h_exec_pending(struct _lm_device_t *pdev, 710 struct ecore_vlan_mac_obj *o) 711 { 712 int rc; 713 unsigned long ramrod_flags = o->saved_ramrod_flags; 714 715 ECORE_MSG(pdev, "vlan_mac_lock execute pending command with ramrod flags %lu\n", 716 ramrod_flags); 717 o->head_exe_request = FALSE; 718 o->saved_ramrod_flags = 0; 719 rc = ecore_exe_queue_step(pdev, &o->exe_queue, &ramrod_flags); 720 if (rc != ECORE_SUCCESS) { 721 ECORE_ERR("execution of pending commands failed with rc %d\n", 722 rc); 723 #ifdef ECORE_STOP_ON_ERROR 724 ecore_panic(); 725 #endif 726 } 727 } 728 729 /** 730 * __ecore_vlan_mac_h_pend - Pend an execution step which couldn't have been 731 * called due to vlan mac head list lock being taken. 732 * 733 * @pdev: device handle 734 * @o: vlan_mac object 735 * @ramrod_flags: ramrod flags of missed execution 736 * 737 * @details Should be called under execution queue lock. 738 */ 739 static void __ecore_vlan_mac_h_pend(struct _lm_device_t *pdev, 740 struct ecore_vlan_mac_obj *o, 741 unsigned long ramrod_flags) 742 { 743 o->head_exe_request = TRUE; 744 o->saved_ramrod_flags = ramrod_flags; 745 ECORE_MSG(pdev, "Placing pending execution with ramrod flags %lu\n", 746 ramrod_flags); 747 } 748 749 /** 750 * __ecore_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock 751 * 752 * @pdev: device handle 753 * @o: vlan_mac object 754 * 755 * @details Should be called under execution queue lock. Notice if a pending 756 * execution exists, it would perform it - possibly releasing and 757 * reclaiming the execution queue lock. 758 */ 759 static void __ecore_vlan_mac_h_write_unlock(struct _lm_device_t *pdev, 760 struct ecore_vlan_mac_obj *o) 761 { 762 /* It's possible a new pending execution was added since this writer 763 * executed. If so, execute again. [Ad infinitum] 764 */ 765 while(o->head_exe_request) { 766 ECORE_MSG(pdev, "vlan_mac_lock - writer release encountered a pending request\n"); 767 __ecore_vlan_mac_h_exec_pending(pdev, o); 768 } 769 } 770 771 /** 772 * ecore_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock 773 * 774 * @pdev: device handle 775 * @o: vlan_mac object 776 * 777 * @details Notice if a pending execution exists, it would perform it - 778 * possibly releasing and reclaiming the execution queue lock. 779 */ 780 void ecore_vlan_mac_h_write_unlock(struct _lm_device_t *pdev, 781 struct ecore_vlan_mac_obj *o) 782 { 783 ECORE_SPIN_LOCK_BH(&o->exe_queue.lock); 784 __ecore_vlan_mac_h_write_unlock(pdev, o); 785 ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock); 786 } 787 788 /** 789 * __ecore_vlan_mac_h_read_lock - lock the vlan mac head list reader lock 790 * 791 * @pdev: device handle 792 * @o: vlan_mac object 793 * 794 * @details Should be called under the execution queue lock. May sleep. May 795 * release and reclaim execution queue lock during its run. 796 */ 797 static int __ecore_vlan_mac_h_read_lock(struct _lm_device_t *pdev, 798 struct ecore_vlan_mac_obj *o) 799 { 800 /* If we got here, we're holding lock --> no WRITER exists */ 801 o->head_reader++; 802 ECORE_MSG(pdev, "vlan_mac_lock - locked reader - number %d\n", 803 o->head_reader); 804 805 return ECORE_SUCCESS; 806 } 807 808 /** 809 * ecore_vlan_mac_h_read_lock - lock the vlan mac head list reader lock 810 * 811 * @pdev: device handle 812 * @o: vlan_mac object 813 * 814 * @details May sleep. Claims and releases execution queue lock during its run. 815 */ 816 int ecore_vlan_mac_h_read_lock(struct _lm_device_t *pdev, 817 struct ecore_vlan_mac_obj *o) 818 { 819 int rc; 820 821 ECORE_SPIN_LOCK_BH(&o->exe_queue.lock); 822 rc = __ecore_vlan_mac_h_read_lock(pdev, o); 823 ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock); 824 825 return rc; 826 } 827 828 /** 829 * __ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock 830 * 831 * @pdev: device handle 832 * @o: vlan_mac object 833 * 834 * @details Should be called under execution queue lock. Notice if a pending 835 * execution exists, it would be performed if this was the last 836 * reader. possibly releasing and reclaiming the execution queue lock. 837 */ 838 static void __ecore_vlan_mac_h_read_unlock(struct _lm_device_t *pdev, 839 struct ecore_vlan_mac_obj *o) 840 { 841 if (!o->head_reader) { 842 ECORE_ERR("Need to release vlan mac reader lock, but lock isn't taken\n"); 843 #ifdef ECORE_STOP_ON_ERROR 844 ecore_panic(); 845 #endif 846 } else { 847 o->head_reader--; 848 ECORE_MSG(pdev, "vlan_mac_lock - decreased readers to %d\n", 849 o->head_reader); 850 } 851 852 /* It's possible a new pending execution was added, and that this reader 853 * was last - if so we need to execute the command. 854 */ 855 if (!o->head_reader && o->head_exe_request) { 856 ECORE_MSG(pdev, "vlan_mac_lock - reader release encountered a pending request\n"); 857 858 /* Writer release will do the trick */ 859 __ecore_vlan_mac_h_write_unlock(pdev, o); 860 } 861 } 862 863 /** 864 * ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock 865 * 866 * @pdev: device handle 867 * @o: vlan_mac object 868 * 869 * @details Notice if a pending execution exists, it would be performed if this 870 * was the last reader. Claims and releases the execution queue lock 871 * during its run. 872 */ 873 void ecore_vlan_mac_h_read_unlock(struct _lm_device_t *pdev, 874 struct ecore_vlan_mac_obj *o) 875 { 876 ECORE_SPIN_LOCK_BH(&o->exe_queue.lock); 877 __ecore_vlan_mac_h_read_unlock(pdev, o); 878 ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock); 879 } 880 881 /** 882 * ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock 883 * 884 * @pdev: device handle 885 * @o: vlan_mac object 886 * @n: number of elements to get 887 * @base: base address for element placement 888 * @stride: stride between elements (in bytes) 889 */ 890 static int ecore_get_n_elements(struct _lm_device_t *pdev, struct ecore_vlan_mac_obj *o, 891 int n, u8 *base, u8 stride, u8 size) 892 { 893 struct ecore_vlan_mac_registry_elem *pos; 894 u8 *next = base; 895 int counter = 0; 896 int read_lock; 897 898 ECORE_MSG(pdev, "get_n_elements - taking vlan_mac_lock (reader)\n"); 899 read_lock = ecore_vlan_mac_h_read_lock(pdev, o); 900 if (read_lock != ECORE_SUCCESS) 901 ECORE_ERR("get_n_elements failed to get vlan mac reader lock; Access without lock\n"); 902 903 /* traverse list */ 904 ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link, 905 struct ecore_vlan_mac_registry_elem) { 906 if (counter < n) { 907 mm_memcpy(next, &pos->u, size); 908 counter++; 909 ECORE_MSG(pdev, "copied element number %d to address %p element was:\n", 910 counter, next); 911 next += stride + size; 912 } 913 } 914 915 if (read_lock == ECORE_SUCCESS) { 916 ECORE_MSG(pdev, "get_n_elements - releasing vlan_mac_lock (reader)\n"); 917 ecore_vlan_mac_h_read_unlock(pdev, o); 918 } 919 920 return counter * ETH_ALEN; 921 } 922 923 /* check_add() callbacks */ 924 static int ecore_check_mac_add(struct _lm_device_t *pdev, 925 struct ecore_vlan_mac_obj *o, 926 union ecore_classification_ramrod_data *data) 927 { 928 struct ecore_vlan_mac_registry_elem *pos; 929 930 ECORE_MSG(pdev, "Checking MAC %02x:%02x:%02x:%02x:%02x:%02x for ADD command\n", data->mac.mac[0], data->mac.mac[1], data->mac.mac[2], data->mac.mac[3], data->mac.mac[4], data->mac.mac[5]); 931 932 if (!ECORE_IS_VALID_ETHER_ADDR(data->mac.mac)) 933 return ECORE_INVAL; 934 935 /* Check if a requested MAC already exists */ 936 ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link, 937 struct ecore_vlan_mac_registry_elem) 938 if (mm_memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN) && 939 (data->mac.is_inner_mac == pos->u.mac.is_inner_mac)) 940 return ECORE_EXISTS; 941 942 return ECORE_SUCCESS; 943 } 944 945 static int ecore_check_vlan_add(struct _lm_device_t *pdev, 946 struct ecore_vlan_mac_obj *o, 947 union ecore_classification_ramrod_data *data) 948 { 949 struct ecore_vlan_mac_registry_elem *pos; 950 951 ECORE_MSG(pdev, "Checking VLAN %d for ADD command\n", data->vlan.vlan); 952 953 ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link, 954 struct ecore_vlan_mac_registry_elem) 955 if (data->vlan.vlan == pos->u.vlan.vlan) 956 return ECORE_EXISTS; 957 958 return ECORE_SUCCESS; 959 } 960 961 static int ecore_check_vlan_mac_add(struct _lm_device_t *pdev, 962 struct ecore_vlan_mac_obj *o, 963 union ecore_classification_ramrod_data *data) 964 { 965 struct ecore_vlan_mac_registry_elem *pos; 966 967 ECORE_MSG(pdev, "Checking VLAN_MAC (%02x:%02x:%02x:%02x:%02x:%02x, %d) for ADD command\n", 968 data->vlan_mac.mac[0], data->vlan_mac.mac[1], data->vlan_mac.mac[2], data->vlan_mac.mac[3], data->vlan_mac.mac[4], data->vlan_mac.mac[5], data->vlan_mac.vlan); 969 970 ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link, 971 struct ecore_vlan_mac_registry_elem) 972 if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) && 973 (mm_memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac, 974 ETH_ALEN)) && 975 (data->vlan_mac.is_inner_mac == 976 pos->u.vlan_mac.is_inner_mac)) 977 return ECORE_EXISTS; 978 979 return ECORE_SUCCESS; 980 } 981 982 /* check_del() callbacks */ 983 static struct ecore_vlan_mac_registry_elem * 984 ecore_check_mac_del(struct _lm_device_t *pdev, 985 struct ecore_vlan_mac_obj *o, 986 union ecore_classification_ramrod_data *data) 987 { 988 struct ecore_vlan_mac_registry_elem *pos; 989 990 ECORE_MSG(pdev, "Checking MAC %02x:%02x:%02x:%02x:%02x:%02x for DEL command\n", data->mac.mac[0], data->mac.mac[1], data->mac.mac[2], data->mac.mac[3], data->mac.mac[4], data->mac.mac[5]); 991 992 ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link, 993 struct ecore_vlan_mac_registry_elem) 994 if ((mm_memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN)) && 995 (data->mac.is_inner_mac == pos->u.mac.is_inner_mac)) 996 return pos; 997 998 return NULL; 999 } 1000 1001 static struct ecore_vlan_mac_registry_elem * 1002 ecore_check_vlan_del(struct _lm_device_t *pdev, 1003 struct ecore_vlan_mac_obj *o, 1004 union ecore_classification_ramrod_data *data) 1005 { 1006 struct ecore_vlan_mac_registry_elem *pos; 1007 1008 ECORE_MSG(pdev, "Checking VLAN %d for DEL command\n", data->vlan.vlan); 1009 1010 ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link, 1011 struct ecore_vlan_mac_registry_elem) 1012 if (data->vlan.vlan == pos->u.vlan.vlan) 1013 return pos; 1014 1015 return NULL; 1016 } 1017 1018 static struct ecore_vlan_mac_registry_elem * 1019 ecore_check_vlan_mac_del(struct _lm_device_t *pdev, 1020 struct ecore_vlan_mac_obj *o, 1021 union ecore_classification_ramrod_data *data) 1022 { 1023 struct ecore_vlan_mac_registry_elem *pos; 1024 1025 ECORE_MSG(pdev, "Checking VLAN_MAC (%02x:%02x:%02x:%02x:%02x:%02x, %d) for DEL command\n", 1026 data->vlan_mac.mac[0], data->vlan_mac.mac[1], data->vlan_mac.mac[2], data->vlan_mac.mac[3], data->vlan_mac.mac[4], data->vlan_mac.mac[5], data->vlan_mac.vlan); 1027 1028 ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link, 1029 struct ecore_vlan_mac_registry_elem) 1030 if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) && 1031 (mm_memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac, 1032 ETH_ALEN)) && 1033 (data->vlan_mac.is_inner_mac == 1034 pos->u.vlan_mac.is_inner_mac)) 1035 return pos; 1036 1037 return NULL; 1038 } 1039 1040 /* check_move() callback */ 1041 static BOOL ecore_check_move(struct _lm_device_t *pdev, 1042 struct ecore_vlan_mac_obj *src_o, 1043 struct ecore_vlan_mac_obj *dst_o, 1044 union ecore_classification_ramrod_data *data) 1045 { 1046 struct ecore_vlan_mac_registry_elem *pos; 1047 int rc; 1048 1049 /* Check if we can delete the requested configuration from the first 1050 * object. 1051 */ 1052 pos = src_o->check_del(pdev, src_o, data); 1053 1054 /* check if configuration can be added */ 1055 rc = dst_o->check_add(pdev, dst_o, data); 1056 1057 /* If this classification can not be added (is already set) 1058 * or can't be deleted - return an error. 1059 */ 1060 if (rc || !pos) 1061 return FALSE; 1062 1063 return TRUE; 1064 } 1065 1066 static BOOL ecore_check_move_always_err( 1067 struct _lm_device_t *pdev, 1068 struct ecore_vlan_mac_obj *src_o, 1069 struct ecore_vlan_mac_obj *dst_o, 1070 union ecore_classification_ramrod_data *data) 1071 { 1072 return FALSE; 1073 } 1074 1075 static INLINE u8 ecore_vlan_mac_get_rx_tx_flag(struct ecore_vlan_mac_obj *o) 1076 { 1077 struct ecore_raw_obj *raw = &o->raw; 1078 u8 rx_tx_flag = 0; 1079 1080 if ((raw->obj_type == ECORE_OBJ_TYPE_TX) || 1081 (raw->obj_type == ECORE_OBJ_TYPE_RX_TX)) 1082 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_TX_CMD; 1083 1084 if ((raw->obj_type == ECORE_OBJ_TYPE_RX) || 1085 (raw->obj_type == ECORE_OBJ_TYPE_RX_TX)) 1086 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_RX_CMD; 1087 1088 return rx_tx_flag; 1089 } 1090 1091 void ecore_set_mac_in_nig(struct _lm_device_t *pdev, 1092 BOOL add, unsigned char *dev_addr, int index) 1093 { 1094 u32 wb_data[2]; 1095 u32 reg_offset = PORT_ID(pdev) ? NIG_REG_LLH1_FUNC_MEM : 1096 NIG_REG_LLH0_FUNC_MEM; 1097 1098 if (!IS_MF_SI_MODE(pdev) && !IS_MF_AFEX(pdev)) 1099 return; 1100 1101 if (index > ECORE_LLH_CAM_MAX_PF_LINE) 1102 return; 1103 1104 ECORE_MSG(pdev, "Going to %s LLH configuration at entry %d\n", 1105 (add ? "ADD" : "DELETE"), index); 1106 1107 if (add) { 1108 /* LLH_FUNC_MEM is a u64 WB register */ 1109 reg_offset += 8*index; 1110 1111 wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) | 1112 (dev_addr[4] << 8) | dev_addr[5]); 1113 wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]); 1114 1115 REG_WR_DMAE_LEN(pdev, reg_offset, wb_data, 2); 1116 } 1117 1118 REG_WR(pdev, (PORT_ID(pdev) ? NIG_REG_LLH1_FUNC_MEM_ENABLE : 1119 NIG_REG_LLH0_FUNC_MEM_ENABLE) + 4*index, add); 1120 } 1121 1122 /** 1123 * ecore_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod 1124 * 1125 * @pdev: device handle 1126 * @o: queue for which we want to configure this rule 1127 * @add: if TRUE the command is an ADD command, DEL otherwise 1128 * @opcode: CLASSIFY_RULE_OPCODE_XXX 1129 * @hdr: pointer to a header to setup 1130 * 1131 */ 1132 static INLINE void ecore_vlan_mac_set_cmd_hdr_e2(struct _lm_device_t *pdev, 1133 struct ecore_vlan_mac_obj *o, BOOL add, int opcode, 1134 struct eth_classify_cmd_header *hdr) 1135 { 1136 struct ecore_raw_obj *raw = &o->raw; 1137 1138 hdr->client_id = raw->cl_id; 1139 hdr->func_id = raw->func_id; 1140 1141 /* Rx or/and Tx (internal switching) configuration ? */ 1142 hdr->cmd_general_data |= 1143 ecore_vlan_mac_get_rx_tx_flag(o); 1144 1145 if (add) 1146 hdr->cmd_general_data |= ETH_CLASSIFY_CMD_HEADER_IS_ADD; 1147 1148 hdr->cmd_general_data |= 1149 (opcode << ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT); 1150 } 1151 1152 /** 1153 * ecore_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header 1154 * 1155 * @cid: connection id 1156 * @type: ECORE_FILTER_XXX_PENDING 1157 * @hdr: pointer to header to setup 1158 * @rule_cnt: 1159 * 1160 * currently we always configure one rule and echo field to contain a CID and an 1161 * opcode type. 1162 */ 1163 static INLINE void ecore_vlan_mac_set_rdata_hdr_e2(u32 cid, int type, 1164 struct eth_classify_header *hdr, int rule_cnt) 1165 { 1166 hdr->echo = mm_cpu_to_le32((cid & ECORE_SWCID_MASK) | 1167 (type << ECORE_SWCID_SHIFT)); 1168 hdr->rule_cnt = (u8)rule_cnt; 1169 } 1170 1171 /* hw_config() callbacks */ 1172 static void ecore_set_one_mac_e2(struct _lm_device_t *pdev, 1173 struct ecore_vlan_mac_obj *o, 1174 struct ecore_exeq_elem *elem, int rule_idx, 1175 int cam_offset) 1176 { 1177 struct ecore_raw_obj *raw = &o->raw; 1178 struct eth_classify_rules_ramrod_data *data = 1179 (struct eth_classify_rules_ramrod_data *)(raw->rdata); 1180 int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd; 1181 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx]; 1182 BOOL add = (cmd == ECORE_VLAN_MAC_ADD) ? TRUE : FALSE; 1183 unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags; 1184 u8 *mac = elem->cmd_data.vlan_mac.u.mac.mac; 1185 1186 /* Set LLH CAM entry: currently only iSCSI and ETH macs are 1187 * relevant. In addition, current implementation is tuned for a 1188 * single ETH MAC. 1189 * 1190 * When multiple unicast ETH MACs PF configuration in switch 1191 * independent mode is required (NetQ, multiple netdev MACs, 1192 * etc.), consider better utilisation of 8 per function MAC 1193 * entries in the LLH register. There is also 1194 * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the 1195 * total number of CAM entries to 16. 1196 * 1197 * Currently we won't configure NIG for MACs other than a primary ETH 1198 * MAC and iSCSI L2 MAC. 1199 * 1200 * If this MAC is moving from one Queue to another, no need to change 1201 * NIG configuration. 1202 */ 1203 if (cmd != ECORE_VLAN_MAC_MOVE) { 1204 if (ECORE_TEST_BIT(ECORE_ISCSI_ETH_MAC, vlan_mac_flags)) 1205 ecore_set_mac_in_nig(pdev, add, mac, 1206 ECORE_LLH_CAM_ISCSI_ETH_LINE); 1207 else if (ECORE_TEST_BIT(ECORE_ETH_MAC, vlan_mac_flags)) 1208 ecore_set_mac_in_nig(pdev, add, mac, 1209 ECORE_LLH_CAM_ETH_LINE); 1210 } 1211 1212 /* Reset the ramrod data buffer for the first rule */ 1213 if (rule_idx == 0) 1214 mm_memset(data, 0, sizeof(*data)); 1215 1216 /* Setup a command header */ 1217 ecore_vlan_mac_set_cmd_hdr_e2(pdev, o, add, CLASSIFY_RULE_OPCODE_MAC, 1218 &rule_entry->mac.header); 1219 1220 ECORE_MSG(pdev, "About to %s MAC %02x:%02x:%02x:%02x:%02x:%02x for Queue %d\n", 1221 (add ? "add" : "delete"), mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], raw->cl_id); 1222 1223 /* Set a MAC itself */ 1224 ecore_set_fw_mac_addr(&rule_entry->mac.mac_msb, 1225 &rule_entry->mac.mac_mid, 1226 &rule_entry->mac.mac_lsb, mac); 1227 rule_entry->mac.inner_mac = 1228 mm_cpu_to_le16(elem->cmd_data.vlan_mac.u.mac.is_inner_mac); 1229 1230 /* MOVE: Add a rule that will add this MAC to the target Queue */ 1231 if (cmd == ECORE_VLAN_MAC_MOVE) { 1232 rule_entry++; 1233 rule_cnt++; 1234 1235 /* Setup ramrod data */ 1236 ecore_vlan_mac_set_cmd_hdr_e2(pdev, 1237 elem->cmd_data.vlan_mac.target_obj, 1238 TRUE, CLASSIFY_RULE_OPCODE_MAC, 1239 &rule_entry->mac.header); 1240 1241 /* Set a MAC itself */ 1242 ecore_set_fw_mac_addr(&rule_entry->mac.mac_msb, 1243 &rule_entry->mac.mac_mid, 1244 &rule_entry->mac.mac_lsb, mac); 1245 rule_entry->mac.inner_mac = 1246 mm_cpu_to_le16(elem->cmd_data.vlan_mac. 1247 u.mac.is_inner_mac); 1248 } 1249 1250 /* Set the ramrod data header */ 1251 /* TODO: take this to the higher level in order to prevent multiple 1252 writing */ 1253 ecore_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header, 1254 rule_cnt); 1255 } 1256 1257 /** 1258 * ecore_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod 1259 * 1260 * @pdev: device handle 1261 * @o: queue 1262 * @type: 1263 * @cam_offset: offset in cam memory 1264 * @hdr: pointer to a header to setup 1265 * 1266 * E1/E1H 1267 */ 1268 static INLINE void ecore_vlan_mac_set_rdata_hdr_e1x(struct _lm_device_t *pdev, 1269 struct ecore_vlan_mac_obj *o, int type, int cam_offset, 1270 struct mac_configuration_hdr *hdr) 1271 { 1272 struct ecore_raw_obj *r = &o->raw; 1273 1274 hdr->length = 1; 1275 hdr->offset = (u8)cam_offset; 1276 hdr->client_id = mm_cpu_to_le16(0xff); 1277 hdr->echo = mm_cpu_to_le32((r->cid & ECORE_SWCID_MASK) | 1278 (type << ECORE_SWCID_SHIFT)); 1279 } 1280 1281 static INLINE void ecore_vlan_mac_set_cfg_entry_e1x(struct _lm_device_t *pdev, 1282 struct ecore_vlan_mac_obj *o, BOOL add, int opcode, u8 *mac, 1283 u16 vlan_id, struct mac_configuration_entry *cfg_entry) 1284 { 1285 struct ecore_raw_obj *r = &o->raw; 1286 u32 cl_bit_vec = (1 << r->cl_id); 1287 1288 cfg_entry->clients_bit_vector = mm_cpu_to_le32(cl_bit_vec); 1289 cfg_entry->pf_id = r->func_id; 1290 cfg_entry->vlan_id = mm_cpu_to_le16(vlan_id); 1291 1292 if (add) { 1293 ECORE_SET_FLAG(cfg_entry->flags, 1294 MAC_CONFIGURATION_ENTRY_ACTION_TYPE, 1295 T_ETH_MAC_COMMAND_SET); 1296 ECORE_SET_FLAG(cfg_entry->flags, 1297 MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE, 1298 opcode); 1299 1300 /* Set a MAC in a ramrod data */ 1301 ecore_set_fw_mac_addr(&cfg_entry->msb_mac_addr, 1302 &cfg_entry->middle_mac_addr, 1303 &cfg_entry->lsb_mac_addr, mac); 1304 } else 1305 ECORE_SET_FLAG(cfg_entry->flags, 1306 MAC_CONFIGURATION_ENTRY_ACTION_TYPE, 1307 T_ETH_MAC_COMMAND_INVALIDATE); 1308 } 1309 1310 static INLINE void ecore_vlan_mac_set_rdata_e1x(struct _lm_device_t *pdev, 1311 struct ecore_vlan_mac_obj *o, int type, int cam_offset, BOOL add, 1312 u8 *mac, u16 vlan_id, int opcode, struct mac_configuration_cmd *config) 1313 { 1314 struct mac_configuration_entry *cfg_entry = &config->config_table[0]; 1315 struct ecore_raw_obj *raw = &o->raw; 1316 1317 ecore_vlan_mac_set_rdata_hdr_e1x(pdev, o, type, cam_offset, 1318 &config->hdr); 1319 ecore_vlan_mac_set_cfg_entry_e1x(pdev, o, add, opcode, mac, vlan_id, 1320 cfg_entry); 1321 1322 ECORE_MSG(pdev, "%s MAC %02x:%02x:%02x:%02x:%02x:%02x CLID %d CAM offset %d\n", 1323 (add ? "setting" : "clearing"), 1324 mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], raw->cl_id, cam_offset); 1325 } 1326 1327 /** 1328 * ecore_set_one_mac_e1x - fill a single MAC rule ramrod data 1329 * 1330 * @pdev: device handle 1331 * @o: ecore_vlan_mac_obj 1332 * @elem: ecore_exeq_elem 1333 * @rule_idx: rule_idx 1334 * @cam_offset: cam_offset 1335 */ 1336 static void ecore_set_one_mac_e1x(struct _lm_device_t *pdev, 1337 struct ecore_vlan_mac_obj *o, 1338 struct ecore_exeq_elem *elem, int rule_idx, 1339 int cam_offset) 1340 { 1341 struct ecore_raw_obj *raw = &o->raw; 1342 struct mac_configuration_cmd *config = 1343 (struct mac_configuration_cmd *)(raw->rdata); 1344 /* 57710 and 57711 do not support MOVE command, 1345 * so it's either ADD or DEL 1346 */ 1347 BOOL add = (elem->cmd_data.vlan_mac.cmd == ECORE_VLAN_MAC_ADD) ? 1348 TRUE : FALSE; 1349 1350 /* Reset the ramrod data buffer */ 1351 mm_memset(config, 0, sizeof(*config)); 1352 1353 ecore_vlan_mac_set_rdata_e1x(pdev, o, raw->state, 1354 cam_offset, add, 1355 elem->cmd_data.vlan_mac.u.mac.mac, 0, 1356 ETH_VLAN_FILTER_ANY_VLAN, config); 1357 } 1358 1359 static void ecore_set_one_vlan_e2(struct _lm_device_t *pdev, 1360 struct ecore_vlan_mac_obj *o, 1361 struct ecore_exeq_elem *elem, int rule_idx, 1362 int cam_offset) 1363 { 1364 struct ecore_raw_obj *raw = &o->raw; 1365 struct eth_classify_rules_ramrod_data *data = 1366 (struct eth_classify_rules_ramrod_data *)(raw->rdata); 1367 int rule_cnt = rule_idx + 1; 1368 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx]; 1369 enum ecore_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd; 1370 BOOL add = (cmd == ECORE_VLAN_MAC_ADD) ? TRUE : FALSE; 1371 u16 vlan = elem->cmd_data.vlan_mac.u.vlan.vlan; 1372 1373 /* Reset the ramrod data buffer for the first rule */ 1374 if (rule_idx == 0) 1375 mm_memset(data, 0, sizeof(*data)); 1376 1377 /* Set a rule header */ 1378 ecore_vlan_mac_set_cmd_hdr_e2(pdev, o, add, CLASSIFY_RULE_OPCODE_VLAN, 1379 &rule_entry->vlan.header); 1380 1381 ECORE_MSG(pdev, "About to %s VLAN %d\n", (add ? "add" : "delete"), 1382 vlan); 1383 1384 /* Set a VLAN itself */ 1385 rule_entry->vlan.vlan = mm_cpu_to_le16(vlan); 1386 1387 /* MOVE: Add a rule that will add this MAC to the target Queue */ 1388 if (cmd == ECORE_VLAN_MAC_MOVE) { 1389 rule_entry++; 1390 rule_cnt++; 1391 1392 /* Setup ramrod data */ 1393 ecore_vlan_mac_set_cmd_hdr_e2(pdev, 1394 elem->cmd_data.vlan_mac.target_obj, 1395 TRUE, CLASSIFY_RULE_OPCODE_VLAN, 1396 &rule_entry->vlan.header); 1397 1398 /* Set a VLAN itself */ 1399 rule_entry->vlan.vlan = mm_cpu_to_le16(vlan); 1400 } 1401 1402 /* Set the ramrod data header */ 1403 /* TODO: take this to the higher level in order to prevent multiple 1404 writing */ 1405 ecore_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header, 1406 rule_cnt); 1407 } 1408 1409 static void ecore_set_one_vlan_mac_e2(struct _lm_device_t *pdev, 1410 struct ecore_vlan_mac_obj *o, 1411 struct ecore_exeq_elem *elem, 1412 int rule_idx, int cam_offset) 1413 { 1414 struct ecore_raw_obj *raw = &o->raw; 1415 struct eth_classify_rules_ramrod_data *data = 1416 (struct eth_classify_rules_ramrod_data *)(raw->rdata); 1417 int rule_cnt = rule_idx + 1; 1418 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx]; 1419 enum ecore_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd; 1420 BOOL add = (cmd == ECORE_VLAN_MAC_ADD) ? TRUE : FALSE; 1421 u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan; 1422 u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac; 1423 1424 /* Reset the ramrod data buffer for the first rule */ 1425 if (rule_idx == 0) 1426 mm_memset(data, 0, sizeof(*data)); 1427 1428 /* Set a rule header */ 1429 ecore_vlan_mac_set_cmd_hdr_e2(pdev, o, add, CLASSIFY_RULE_OPCODE_PAIR, 1430 &rule_entry->pair.header); 1431 1432 /* Set VLAN and MAC themselves */ 1433 rule_entry->pair.vlan = mm_cpu_to_le16(vlan); 1434 ecore_set_fw_mac_addr(&rule_entry->pair.mac_msb, 1435 &rule_entry->pair.mac_mid, 1436 &rule_entry->pair.mac_lsb, mac); 1437 rule_entry->pair.inner_mac = 1438 elem->cmd_data.vlan_mac.u.vlan_mac.is_inner_mac; 1439 /* MOVE: Add a rule that will add this MAC to the target Queue */ 1440 if (cmd == ECORE_VLAN_MAC_MOVE) { 1441 rule_entry++; 1442 rule_cnt++; 1443 1444 /* Setup ramrod data */ 1445 ecore_vlan_mac_set_cmd_hdr_e2(pdev, 1446 elem->cmd_data.vlan_mac.target_obj, 1447 TRUE, CLASSIFY_RULE_OPCODE_PAIR, 1448 &rule_entry->pair.header); 1449 1450 /* Set a VLAN itself */ 1451 rule_entry->pair.vlan = mm_cpu_to_le16(vlan); 1452 ecore_set_fw_mac_addr(&rule_entry->pair.mac_msb, 1453 &rule_entry->pair.mac_mid, 1454 &rule_entry->pair.mac_lsb, mac); 1455 rule_entry->pair.inner_mac = 1456 elem->cmd_data.vlan_mac.u.vlan_mac.is_inner_mac; 1457 } 1458 1459 /* Set the ramrod data header */ 1460 /* TODO: take this to the higher level in order to prevent multiple 1461 writing */ 1462 ecore_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header, 1463 rule_cnt); 1464 } 1465 1466 /** 1467 * ecore_set_one_vlan_mac_e1h - 1468 * 1469 * @pdev: device handle 1470 * @o: ecore_vlan_mac_obj 1471 * @elem: ecore_exeq_elem 1472 * @rule_idx: rule_idx 1473 * @cam_offset: cam_offset 1474 */ 1475 static void ecore_set_one_vlan_mac_e1h(struct _lm_device_t *pdev, 1476 struct ecore_vlan_mac_obj *o, 1477 struct ecore_exeq_elem *elem, 1478 int rule_idx, int cam_offset) 1479 { 1480 struct ecore_raw_obj *raw = &o->raw; 1481 struct mac_configuration_cmd *config = 1482 (struct mac_configuration_cmd *)(raw->rdata); 1483 /* 57710 and 57711 do not support MOVE command, 1484 * so it's either ADD or DEL 1485 */ 1486 BOOL add = (elem->cmd_data.vlan_mac.cmd == ECORE_VLAN_MAC_ADD) ? 1487 TRUE : FALSE; 1488 1489 /* Reset the ramrod data buffer */ 1490 mm_memset(config, 0, sizeof(*config)); 1491 1492 ecore_vlan_mac_set_rdata_e1x(pdev, o, ECORE_FILTER_VLAN_MAC_PENDING, 1493 cam_offset, add, 1494 elem->cmd_data.vlan_mac.u.vlan_mac.mac, 1495 elem->cmd_data.vlan_mac.u.vlan_mac.vlan, 1496 ETH_VLAN_FILTER_CLASSIFY, config); 1497 } 1498 1499 #define list_next_entry(pos, member) \ 1500 list_entry((pos)->member.next, typeof(*(pos)), member) 1501 1502 /** 1503 * ecore_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element 1504 * 1505 * @pdev: device handle 1506 * @p: command parameters 1507 * @ppos: pointer to the cookie 1508 * 1509 * reconfigure next MAC/VLAN/VLAN-MAC element from the 1510 * previously configured elements list. 1511 * 1512 * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is taken 1513 * into an account 1514 * 1515 * pointer to the cookie - that should be given back in the next call to make 1516 * function handle the next element. If *ppos is set to NULL it will restart the 1517 * iterator. If returned *ppos == NULL this means that the last element has been 1518 * handled. 1519 * 1520 */ 1521 static int ecore_vlan_mac_restore(struct _lm_device_t *pdev, 1522 struct ecore_vlan_mac_ramrod_params *p, 1523 struct ecore_vlan_mac_registry_elem **ppos) 1524 { 1525 struct ecore_vlan_mac_registry_elem *pos; 1526 struct ecore_vlan_mac_obj *o = p->vlan_mac_obj; 1527 1528 /* If list is empty - there is nothing to do here */ 1529 if (ECORE_LIST_IS_EMPTY(&o->head)) { 1530 *ppos = NULL; 1531 return 0; 1532 } 1533 1534 /* make a step... */ 1535 if (*ppos == NULL) 1536 *ppos = ECORE_LIST_FIRST_ENTRY(&o->head, 1537 struct ecore_vlan_mac_registry_elem, 1538 link); 1539 else 1540 *ppos = ECORE_LIST_NEXT(*ppos, link, 1541 struct ecore_vlan_mac_registry_elem); 1542 1543 pos = *ppos; 1544 1545 /* If it's the last step - return NULL */ 1546 if (ECORE_LIST_IS_LAST(&pos->link, &o->head)) 1547 *ppos = NULL; 1548 1549 /* Prepare a 'user_req' */ 1550 mm_memcpy(&p->user_req.u, &pos->u, sizeof(pos->u)); 1551 1552 /* Set the command */ 1553 p->user_req.cmd = ECORE_VLAN_MAC_ADD; 1554 1555 /* Set vlan_mac_flags */ 1556 p->user_req.vlan_mac_flags = pos->vlan_mac_flags; 1557 1558 /* Set a restore bit */ 1559 ECORE_SET_BIT_NA(RAMROD_RESTORE, &p->ramrod_flags); 1560 1561 return ecore_config_vlan_mac(pdev, p); 1562 } 1563 1564 /* ecore_exeq_get_mac/ecore_exeq_get_vlan/ecore_exeq_get_vlan_mac return a 1565 * pointer to an element with a specific criteria and NULL if such an element 1566 * hasn't been found. 1567 */ 1568 static struct ecore_exeq_elem *ecore_exeq_get_mac( 1569 struct ecore_exe_queue_obj *o, 1570 struct ecore_exeq_elem *elem) 1571 { 1572 struct ecore_exeq_elem *pos; 1573 struct ecore_mac_ramrod_data *data = &elem->cmd_data.vlan_mac.u.mac; 1574 1575 /* Check pending for execution commands */ 1576 ECORE_LIST_FOR_EACH_ENTRY(pos, &o->exe_queue, link, 1577 struct ecore_exeq_elem) 1578 if (mm_memcmp(&pos->cmd_data.vlan_mac.u.mac, data, 1579 sizeof(*data)) && 1580 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd)) 1581 return pos; 1582 1583 return NULL; 1584 } 1585 1586 static struct ecore_exeq_elem *ecore_exeq_get_vlan( 1587 struct ecore_exe_queue_obj *o, 1588 struct ecore_exeq_elem *elem) 1589 { 1590 struct ecore_exeq_elem *pos; 1591 struct ecore_vlan_ramrod_data *data = &elem->cmd_data.vlan_mac.u.vlan; 1592 1593 /* Check pending for execution commands */ 1594 ECORE_LIST_FOR_EACH_ENTRY(pos, &o->exe_queue, link, 1595 struct ecore_exeq_elem) 1596 if (mm_memcmp(&pos->cmd_data.vlan_mac.u.vlan, data, 1597 sizeof(*data)) && 1598 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd)) 1599 return pos; 1600 1601 return NULL; 1602 } 1603 1604 static struct ecore_exeq_elem *ecore_exeq_get_vlan_mac( 1605 struct ecore_exe_queue_obj *o, 1606 struct ecore_exeq_elem *elem) 1607 { 1608 struct ecore_exeq_elem *pos; 1609 struct ecore_vlan_mac_ramrod_data *data = 1610 &elem->cmd_data.vlan_mac.u.vlan_mac; 1611 1612 /* Check pending for execution commands */ 1613 ECORE_LIST_FOR_EACH_ENTRY(pos, &o->exe_queue, link, 1614 struct ecore_exeq_elem) 1615 if (mm_memcmp(&pos->cmd_data.vlan_mac.u.vlan_mac, data, 1616 sizeof(*data)) && 1617 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd)) 1618 return pos; 1619 1620 return NULL; 1621 } 1622 1623 /** 1624 * ecore_validate_vlan_mac_add - check if an ADD command can be executed 1625 * 1626 * @pdev: device handle 1627 * @qo: ecore_qable_obj 1628 * @elem: ecore_exeq_elem 1629 * 1630 * Checks that the requested configuration can be added. If yes and if 1631 * requested, consume CAM credit. 1632 * 1633 * The 'validate' is run after the 'optimize'. 1634 * 1635 */ 1636 static INLINE int ecore_validate_vlan_mac_add(struct _lm_device_t *pdev, 1637 union ecore_qable_obj *qo, 1638 struct ecore_exeq_elem *elem) 1639 { 1640 struct ecore_vlan_mac_obj *o = &qo->vlan_mac; 1641 struct ecore_exe_queue_obj *exeq = &o->exe_queue; 1642 int rc; 1643 1644 /* Check the registry */ 1645 rc = o->check_add(pdev, o, &elem->cmd_data.vlan_mac.u); 1646 if (rc) { 1647 ECORE_MSG(pdev, "ADD command is not allowed considering current registry state.\n"); 1648 return rc; 1649 } 1650 1651 /* Check if there is a pending ADD command for this 1652 * MAC/VLAN/VLAN-MAC. Return an error if there is. 1653 */ 1654 if (exeq->get(exeq, elem)) { 1655 ECORE_MSG(pdev, "There is a pending ADD command already\n"); 1656 return ECORE_EXISTS; 1657 } 1658 1659 /* TODO: Check the pending MOVE from other objects where this 1660 * object is a destination object. 1661 */ 1662 1663 /* Consume the credit if not requested not to */ 1664 if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT, 1665 &elem->cmd_data.vlan_mac.vlan_mac_flags) || 1666 o->get_credit(o))) 1667 return ECORE_INVAL; 1668 1669 return ECORE_SUCCESS; 1670 } 1671 1672 /** 1673 * ecore_validate_vlan_mac_del - check if the DEL command can be executed 1674 * 1675 * @pdev: device handle 1676 * @qo: quable object to check 1677 * @elem: element that needs to be deleted 1678 * 1679 * Checks that the requested configuration can be deleted. If yes and if 1680 * requested, returns a CAM credit. 1681 * 1682 * The 'validate' is run after the 'optimize'. 1683 */ 1684 static INLINE int ecore_validate_vlan_mac_del(struct _lm_device_t *pdev, 1685 union ecore_qable_obj *qo, 1686 struct ecore_exeq_elem *elem) 1687 { 1688 struct ecore_vlan_mac_obj *o = &qo->vlan_mac; 1689 struct ecore_vlan_mac_registry_elem *pos; 1690 struct ecore_exe_queue_obj *exeq = &o->exe_queue; 1691 struct ecore_exeq_elem query_elem; 1692 1693 /* If this classification can not be deleted (doesn't exist) 1694 * - return a ECORE_EXIST. 1695 */ 1696 pos = o->check_del(pdev, o, &elem->cmd_data.vlan_mac.u); 1697 if (!pos) { 1698 ECORE_MSG(pdev, "DEL command is not allowed considering current registry state\n"); 1699 return ECORE_EXISTS; 1700 } 1701 1702 /* Check if there are pending DEL or MOVE commands for this 1703 * MAC/VLAN/VLAN-MAC. Return an error if so. 1704 */ 1705 mm_memcpy(&query_elem, elem, sizeof(query_elem)); 1706 1707 /* Check for MOVE commands */ 1708 query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_MOVE; 1709 if (exeq->get(exeq, &query_elem)) { 1710 ECORE_ERR("There is a pending MOVE command already\n"); 1711 return ECORE_INVAL; 1712 } 1713 1714 /* Check for DEL commands */ 1715 if (exeq->get(exeq, elem)) { 1716 ECORE_MSG(pdev, "There is a pending DEL command already\n"); 1717 return ECORE_EXISTS; 1718 } 1719 1720 /* Return the credit to the credit pool if not requested not to */ 1721 if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT, 1722 &elem->cmd_data.vlan_mac.vlan_mac_flags) || 1723 o->put_credit(o))) { 1724 ECORE_ERR("Failed to return a credit\n"); 1725 return ECORE_INVAL; 1726 } 1727 1728 return ECORE_SUCCESS; 1729 } 1730 1731 /** 1732 * ecore_validate_vlan_mac_move - check if the MOVE command can be executed 1733 * 1734 * @pdev: device handle 1735 * @qo: quable object to check (source) 1736 * @elem: element that needs to be moved 1737 * 1738 * Checks that the requested configuration can be moved. If yes and if 1739 * requested, returns a CAM credit. 1740 * 1741 * The 'validate' is run after the 'optimize'. 1742 */ 1743 static INLINE int ecore_validate_vlan_mac_move(struct _lm_device_t *pdev, 1744 union ecore_qable_obj *qo, 1745 struct ecore_exeq_elem *elem) 1746 { 1747 struct ecore_vlan_mac_obj *src_o = &qo->vlan_mac; 1748 struct ecore_vlan_mac_obj *dest_o = elem->cmd_data.vlan_mac.target_obj; 1749 struct ecore_exeq_elem query_elem; 1750 struct ecore_exe_queue_obj *src_exeq = &src_o->exe_queue; 1751 struct ecore_exe_queue_obj *dest_exeq = &dest_o->exe_queue; 1752 1753 /* Check if we can perform this operation based on the current registry 1754 * state. 1755 */ 1756 if (!src_o->check_move(pdev, src_o, dest_o, 1757 &elem->cmd_data.vlan_mac.u)) { 1758 ECORE_MSG(pdev, "MOVE command is not allowed considering current registry state\n"); 1759 return ECORE_INVAL; 1760 } 1761 1762 /* Check if there is an already pending DEL or MOVE command for the 1763 * source object or ADD command for a destination object. Return an 1764 * error if so. 1765 */ 1766 mm_memcpy(&query_elem, elem, sizeof(query_elem)); 1767 1768 /* Check DEL on source */ 1769 query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_DEL; 1770 if (src_exeq->get(src_exeq, &query_elem)) { 1771 ECORE_ERR("There is a pending DEL command on the source queue already\n"); 1772 return ECORE_INVAL; 1773 } 1774 1775 /* Check MOVE on source */ 1776 if (src_exeq->get(src_exeq, elem)) { 1777 ECORE_MSG(pdev, "There is a pending MOVE command already\n"); 1778 return ECORE_EXISTS; 1779 } 1780 1781 /* Check ADD on destination */ 1782 query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_ADD; 1783 if (dest_exeq->get(dest_exeq, &query_elem)) { 1784 ECORE_ERR("There is a pending ADD command on the destination queue already\n"); 1785 return ECORE_INVAL; 1786 } 1787 1788 /* Consume the credit if not requested not to */ 1789 if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT_DEST, 1790 &elem->cmd_data.vlan_mac.vlan_mac_flags) || 1791 dest_o->get_credit(dest_o))) 1792 return ECORE_INVAL; 1793 1794 if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT, 1795 &elem->cmd_data.vlan_mac.vlan_mac_flags) || 1796 src_o->put_credit(src_o))) { 1797 /* return the credit taken from dest... */ 1798 dest_o->put_credit(dest_o); 1799 return ECORE_INVAL; 1800 } 1801 1802 return ECORE_SUCCESS; 1803 } 1804 1805 static int ecore_validate_vlan_mac(struct _lm_device_t *pdev, 1806 union ecore_qable_obj *qo, 1807 struct ecore_exeq_elem *elem) 1808 { 1809 switch (elem->cmd_data.vlan_mac.cmd) { 1810 case ECORE_VLAN_MAC_ADD: 1811 return ecore_validate_vlan_mac_add(pdev, qo, elem); 1812 case ECORE_VLAN_MAC_DEL: 1813 return ecore_validate_vlan_mac_del(pdev, qo, elem); 1814 case ECORE_VLAN_MAC_MOVE: 1815 return ecore_validate_vlan_mac_move(pdev, qo, elem); 1816 default: 1817 return ECORE_INVAL; 1818 } 1819 } 1820 1821 static int ecore_remove_vlan_mac(struct _lm_device_t *pdev, 1822 union ecore_qable_obj *qo, 1823 struct ecore_exeq_elem *elem) 1824 { 1825 int rc = 0; 1826 1827 /* If consumption wasn't required, nothing to do */ 1828 if (ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT, 1829 &elem->cmd_data.vlan_mac.vlan_mac_flags)) 1830 return ECORE_SUCCESS; 1831 1832 switch (elem->cmd_data.vlan_mac.cmd) { 1833 case ECORE_VLAN_MAC_ADD: 1834 case ECORE_VLAN_MAC_MOVE: 1835 rc = qo->vlan_mac.put_credit(&qo->vlan_mac); 1836 break; 1837 case ECORE_VLAN_MAC_DEL: 1838 rc = qo->vlan_mac.get_credit(&qo->vlan_mac); 1839 break; 1840 default: 1841 return ECORE_INVAL; 1842 } 1843 1844 if (rc != TRUE) 1845 return ECORE_INVAL; 1846 1847 return ECORE_SUCCESS; 1848 } 1849 1850 /** 1851 * ecore_wait_vlan_mac - passively wait for 5 seconds until all work completes. 1852 * 1853 * @pdev: device handle 1854 * @o: ecore_vlan_mac_obj 1855 * 1856 */ 1857 static int ecore_wait_vlan_mac(struct _lm_device_t *pdev, 1858 struct ecore_vlan_mac_obj *o) 1859 { 1860 int cnt = 5000, rc; 1861 struct ecore_exe_queue_obj *exeq = &o->exe_queue; 1862 struct ecore_raw_obj *raw = &o->raw; 1863 1864 while (cnt--) { 1865 /* Wait for the current command to complete */ 1866 rc = raw->wait_comp(pdev, raw); 1867 if (rc) 1868 return rc; 1869 1870 /* Wait until there are no pending commands */ 1871 if (!ecore_exe_queue_empty(exeq)) 1872 mm_wait(pdev, 1000); 1873 else 1874 return ECORE_SUCCESS; 1875 } 1876 1877 return ECORE_TIMEOUT; 1878 } 1879 1880 static int __ecore_vlan_mac_execute_step(struct _lm_device_t *pdev, 1881 struct ecore_vlan_mac_obj *o, 1882 unsigned long *ramrod_flags) 1883 { 1884 int rc = ECORE_SUCCESS; 1885 1886 ECORE_SPIN_LOCK_BH(&o->exe_queue.lock); 1887 1888 ECORE_MSG(pdev, "vlan_mac_execute_step - trying to take writer lock\n"); 1889 rc = __ecore_vlan_mac_h_write_trylock(pdev, o); 1890 1891 if (rc != ECORE_SUCCESS) { 1892 __ecore_vlan_mac_h_pend(pdev, o, *ramrod_flags); 1893 1894 /** Calling function should not diffrentiate between this case 1895 * and the case in which there is already a pending ramrod 1896 */ 1897 rc = ECORE_PENDING; 1898 } else { 1899 rc = ecore_exe_queue_step(pdev, &o->exe_queue, ramrod_flags); 1900 } 1901 ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock); 1902 1903 return rc; 1904 } 1905 1906 /** 1907 * ecore_complete_vlan_mac - complete one VLAN-MAC ramrod 1908 * 1909 * @pdev: device handle 1910 * @o: ecore_vlan_mac_obj 1911 * @cqe: 1912 * @cont: if TRUE schedule next execution chunk 1913 * 1914 */ 1915 static int ecore_complete_vlan_mac(struct _lm_device_t *pdev, 1916 struct ecore_vlan_mac_obj *o, 1917 union event_ring_elem *cqe, 1918 unsigned long *ramrod_flags) 1919 { 1920 struct ecore_raw_obj *r = &o->raw; 1921 int rc; 1922 1923 /* Clearing the pending list & raw state should be made 1924 * atomically (as execution flow assumes they represent the same) 1925 */ 1926 ECORE_SPIN_LOCK_BH(&o->exe_queue.lock); 1927 1928 /* Reset pending list */ 1929 __ecore_exe_queue_reset_pending(pdev, &o->exe_queue); 1930 1931 /* Clear pending */ 1932 r->clear_pending(r); 1933 1934 ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock); 1935 1936 /* If ramrod failed this is most likely a SW bug */ 1937 if (cqe->message.error) 1938 return ECORE_INVAL; 1939 1940 /* Run the next bulk of pending commands if requested */ 1941 if (ECORE_TEST_BIT(RAMROD_CONT, ramrod_flags)) { 1942 rc = __ecore_vlan_mac_execute_step(pdev, o, ramrod_flags); 1943 if (rc < 0) 1944 return rc; 1945 } 1946 1947 /* If there is more work to do return PENDING */ 1948 if (!ecore_exe_queue_empty(&o->exe_queue)) 1949 return ECORE_PENDING; 1950 1951 return ECORE_SUCCESS; 1952 } 1953 1954 /** 1955 * ecore_optimize_vlan_mac - optimize ADD and DEL commands. 1956 * 1957 * @pdev: device handle 1958 * @o: ecore_qable_obj 1959 * @elem: ecore_exeq_elem 1960 */ 1961 static int ecore_optimize_vlan_mac(struct _lm_device_t *pdev, 1962 union ecore_qable_obj *qo, 1963 struct ecore_exeq_elem *elem) 1964 { 1965 struct ecore_exeq_elem query, *pos; 1966 struct ecore_vlan_mac_obj *o = &qo->vlan_mac; 1967 struct ecore_exe_queue_obj *exeq = &o->exe_queue; 1968 1969 mm_memcpy(&query, elem, sizeof(query)); 1970 1971 switch (elem->cmd_data.vlan_mac.cmd) { 1972 case ECORE_VLAN_MAC_ADD: 1973 query.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_DEL; 1974 break; 1975 case ECORE_VLAN_MAC_DEL: 1976 query.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_ADD; 1977 break; 1978 default: 1979 /* Don't handle anything other than ADD or DEL */ 1980 return 0; 1981 } 1982 1983 /* If we found the appropriate element - delete it */ 1984 pos = exeq->get(exeq, &query); 1985 if (pos) { 1986 1987 /* Return the credit of the optimized command */ 1988 if (!ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT, 1989 &pos->cmd_data.vlan_mac.vlan_mac_flags)) { 1990 if ((query.cmd_data.vlan_mac.cmd == 1991 ECORE_VLAN_MAC_ADD) && !o->put_credit(o)) { 1992 ECORE_ERR("Failed to return the credit for the optimized ADD command\n"); 1993 return ECORE_INVAL; 1994 } else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */ 1995 ECORE_ERR("Failed to recover the credit from the optimized DEL command\n"); 1996 return ECORE_INVAL; 1997 } 1998 } 1999 2000 ECORE_MSG(pdev, "Optimizing %s command\n", 2001 (elem->cmd_data.vlan_mac.cmd == ECORE_VLAN_MAC_ADD) ? 2002 "ADD" : "DEL"); 2003 2004 ECORE_LIST_REMOVE_ENTRY(&pos->link, &exeq->exe_queue); 2005 ecore_exe_queue_free_elem(pdev, pos); 2006 return 1; 2007 } 2008 2009 return 0; 2010 } 2011 2012 /** 2013 * ecore_vlan_mac_get_registry_elem - prepare a registry element 2014 * 2015 * @pdev: device handle 2016 * @o: 2017 * @elem: 2018 * @restore: 2019 * @re: 2020 * 2021 * prepare a registry element according to the current command request. 2022 */ 2023 static INLINE int ecore_vlan_mac_get_registry_elem( 2024 struct _lm_device_t *pdev, 2025 struct ecore_vlan_mac_obj *o, 2026 struct ecore_exeq_elem *elem, 2027 BOOL restore, 2028 struct ecore_vlan_mac_registry_elem **re) 2029 { 2030 enum ecore_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd; 2031 struct ecore_vlan_mac_registry_elem *reg_elem; 2032 2033 /* Allocate a new registry element if needed. */ 2034 if (!restore && 2035 ((cmd == ECORE_VLAN_MAC_ADD) || (cmd == ECORE_VLAN_MAC_MOVE))) { 2036 reg_elem = ECORE_ZALLOC(sizeof(*reg_elem), GFP_ATOMIC, pdev); 2037 if (!reg_elem) 2038 return ECORE_NOMEM; 2039 2040 /* Get a new CAM offset */ 2041 if (!o->get_cam_offset(o, ®_elem->cam_offset)) { 2042 /* This shall never happen, because we have checked the 2043 * CAM availability in the 'validate'. 2044 */ 2045 DbgBreakIf(1); 2046 ECORE_FREE(pdev, reg_elem, sizeof(*reg_elem)); 2047 return ECORE_INVAL; 2048 } 2049 2050 ECORE_MSG(pdev, "Got cam offset %d\n", reg_elem->cam_offset); 2051 2052 /* Set a VLAN-MAC data */ 2053 mm_memcpy(®_elem->u, &elem->cmd_data.vlan_mac.u, 2054 sizeof(reg_elem->u)); 2055 2056 /* Copy the flags (needed for DEL and RESTORE flows) */ 2057 reg_elem->vlan_mac_flags = 2058 elem->cmd_data.vlan_mac.vlan_mac_flags; 2059 } else /* DEL, RESTORE */ 2060 reg_elem = o->check_del(pdev, o, &elem->cmd_data.vlan_mac.u); 2061 2062 *re = reg_elem; 2063 return ECORE_SUCCESS; 2064 } 2065 2066 /** 2067 * ecore_execute_vlan_mac - execute vlan mac command 2068 * 2069 * @pdev: device handle 2070 * @qo: 2071 * @exe_chunk: 2072 * @ramrod_flags: 2073 * 2074 * go and send a ramrod! 2075 */ 2076 static int ecore_execute_vlan_mac(struct _lm_device_t *pdev, 2077 union ecore_qable_obj *qo, 2078 d_list_t *exe_chunk, 2079 unsigned long *ramrod_flags) 2080 { 2081 struct ecore_exeq_elem *elem; 2082 struct ecore_vlan_mac_obj *o = &qo->vlan_mac, *cam_obj; 2083 struct ecore_raw_obj *r = &o->raw; 2084 int rc, idx = 0; 2085 BOOL restore = ECORE_TEST_BIT(RAMROD_RESTORE, ramrod_flags); 2086 BOOL drv_only = ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags); 2087 struct ecore_vlan_mac_registry_elem *reg_elem; 2088 enum ecore_vlan_mac_cmd cmd; 2089 2090 /* If DRIVER_ONLY execution is requested, cleanup a registry 2091 * and exit. Otherwise send a ramrod to FW. 2092 */ 2093 if (!drv_only) { 2094 DbgBreakIf(r->check_pending(r)); 2095 2096 /* Set pending */ 2097 r->set_pending(r); 2098 2099 /* Fill the ramrod data */ 2100 ECORE_LIST_FOR_EACH_ENTRY(elem, exe_chunk, link, 2101 struct ecore_exeq_elem) { 2102 cmd = elem->cmd_data.vlan_mac.cmd; 2103 /* We will add to the target object in MOVE command, so 2104 * change the object for a CAM search. 2105 */ 2106 if (cmd == ECORE_VLAN_MAC_MOVE) 2107 cam_obj = elem->cmd_data.vlan_mac.target_obj; 2108 else 2109 cam_obj = o; 2110 2111 rc = ecore_vlan_mac_get_registry_elem(pdev, cam_obj, 2112 elem, restore, 2113 ®_elem); 2114 if (rc) 2115 goto error_exit; 2116 2117 DbgBreakIf(!reg_elem); 2118 2119 /* Push a new entry into the registry */ 2120 if (!restore && 2121 ((cmd == ECORE_VLAN_MAC_ADD) || 2122 (cmd == ECORE_VLAN_MAC_MOVE))) 2123 ECORE_LIST_PUSH_HEAD(®_elem->link, 2124 &cam_obj->head); 2125 2126 /* Configure a single command in a ramrod data buffer */ 2127 o->set_one_rule(pdev, o, elem, idx, 2128 reg_elem->cam_offset); 2129 2130 /* MOVE command consumes 2 entries in the ramrod data */ 2131 if (cmd == ECORE_VLAN_MAC_MOVE) 2132 idx += 2; 2133 else 2134 idx++; 2135 } 2136 2137 /* No need for an explicit memory barrier here as long as we 2138 * ensure the ordering of writing to the SPQ element 2139 * and updating of the SPQ producer which involves a memory 2140 * read. If the memory read is removed we will have to put a 2141 * full memory barrier there (inside ecore_sp_post()). 2142 */ 2143 rc = ecore_sp_post(pdev, o->ramrod_cmd, r->cid, 2144 r->rdata_mapping.as_u64, 2145 ETH_CONNECTION_TYPE); 2146 if (rc) 2147 goto error_exit; 2148 } 2149 2150 /* Now, when we are done with the ramrod - clean up the registry */ 2151 ECORE_LIST_FOR_EACH_ENTRY(elem, exe_chunk, link, 2152 struct ecore_exeq_elem) { 2153 cmd = elem->cmd_data.vlan_mac.cmd; 2154 if ((cmd == ECORE_VLAN_MAC_DEL) || 2155 (cmd == ECORE_VLAN_MAC_MOVE)) { 2156 reg_elem = o->check_del(pdev, o, 2157 &elem->cmd_data.vlan_mac.u); 2158 2159 DbgBreakIf(!reg_elem); 2160 2161 o->put_cam_offset(o, reg_elem->cam_offset); 2162 ECORE_LIST_REMOVE_ENTRY(®_elem->link, &o->head); 2163 ECORE_FREE(pdev, reg_elem, sizeof(*reg_elem)); 2164 } 2165 } 2166 2167 if (!drv_only) 2168 return ECORE_PENDING; 2169 else 2170 return ECORE_SUCCESS; 2171 2172 error_exit: 2173 r->clear_pending(r); 2174 2175 /* Cleanup a registry in case of a failure */ 2176 ECORE_LIST_FOR_EACH_ENTRY(elem, exe_chunk, link, 2177 struct ecore_exeq_elem) { 2178 cmd = elem->cmd_data.vlan_mac.cmd; 2179 2180 if (cmd == ECORE_VLAN_MAC_MOVE) 2181 cam_obj = elem->cmd_data.vlan_mac.target_obj; 2182 else 2183 cam_obj = o; 2184 2185 /* Delete all newly added above entries */ 2186 if (!restore && 2187 ((cmd == ECORE_VLAN_MAC_ADD) || 2188 (cmd == ECORE_VLAN_MAC_MOVE))) { 2189 reg_elem = o->check_del(pdev, cam_obj, 2190 &elem->cmd_data.vlan_mac.u); 2191 if (reg_elem) { 2192 ECORE_LIST_REMOVE_ENTRY(®_elem->link, 2193 &cam_obj->head); 2194 ECORE_FREE(pdev, reg_elem, sizeof(*reg_elem)); 2195 } 2196 } 2197 } 2198 2199 return rc; 2200 } 2201 2202 static INLINE int ecore_vlan_mac_push_new_cmd( 2203 struct _lm_device_t *pdev, 2204 struct ecore_vlan_mac_ramrod_params *p) 2205 { 2206 struct ecore_exeq_elem *elem; 2207 struct ecore_vlan_mac_obj *o = p->vlan_mac_obj; 2208 BOOL restore = ECORE_TEST_BIT(RAMROD_RESTORE, &p->ramrod_flags); 2209 2210 /* Allocate the execution queue element */ 2211 elem = ecore_exe_queue_alloc_elem(pdev); 2212 if (!elem) 2213 return ECORE_NOMEM; 2214 2215 /* Set the command 'length' */ 2216 switch (p->user_req.cmd) { 2217 case ECORE_VLAN_MAC_MOVE: 2218 elem->cmd_len = 2; 2219 break; 2220 default: 2221 elem->cmd_len = 1; 2222 } 2223 2224 /* Fill the object specific info */ 2225 mm_memcpy(&elem->cmd_data.vlan_mac, &p->user_req, sizeof(p->user_req)); 2226 2227 /* Try to add a new command to the pending list */ 2228 return ecore_exe_queue_add(pdev, &o->exe_queue, elem, restore); 2229 } 2230 2231 /** 2232 * ecore_config_vlan_mac - configure VLAN/MAC/VLAN_MAC filtering rules. 2233 * 2234 * @pdev: device handle 2235 * @p: 2236 * 2237 */ 2238 int ecore_config_vlan_mac(struct _lm_device_t *pdev, 2239 struct ecore_vlan_mac_ramrod_params *p) 2240 { 2241 int rc = ECORE_SUCCESS; 2242 struct ecore_vlan_mac_obj *o = p->vlan_mac_obj; 2243 unsigned long *ramrod_flags = &p->ramrod_flags; 2244 BOOL cont = ECORE_TEST_BIT(RAMROD_CONT, ramrod_flags); 2245 struct ecore_raw_obj *raw = &o->raw; 2246 2247 /* 2248 * Add new elements to the execution list for commands that require it. 2249 */ 2250 if (!cont) { 2251 rc = ecore_vlan_mac_push_new_cmd(pdev, p); 2252 if (rc) 2253 return rc; 2254 } 2255 2256 /* If nothing will be executed further in this iteration we want to 2257 * return PENDING if there are pending commands 2258 */ 2259 if (!ecore_exe_queue_empty(&o->exe_queue)) 2260 rc = ECORE_PENDING; 2261 2262 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags)) { 2263 ECORE_MSG(pdev, "RAMROD_DRV_CLR_ONLY requested: clearing a pending bit.\n"); 2264 raw->clear_pending(raw); 2265 } 2266 2267 /* Execute commands if required */ 2268 if (cont || ECORE_TEST_BIT(RAMROD_EXEC, ramrod_flags) || 2269 ECORE_TEST_BIT(RAMROD_COMP_WAIT, ramrod_flags)) { 2270 rc = __ecore_vlan_mac_execute_step(pdev, p->vlan_mac_obj, 2271 &p->ramrod_flags); 2272 if (rc < 0) 2273 return rc; 2274 } 2275 2276 /* RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set 2277 * then user want to wait until the last command is done. 2278 */ 2279 if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags)) { 2280 /* Wait maximum for the current exe_queue length iterations plus 2281 * one (for the current pending command). 2282 */ 2283 int max_iterations = ecore_exe_queue_length(&o->exe_queue) + 1; 2284 2285 while (!ecore_exe_queue_empty(&o->exe_queue) && 2286 max_iterations--) { 2287 2288 /* Wait for the current command to complete */ 2289 rc = raw->wait_comp(pdev, raw); 2290 if (rc) 2291 return rc; 2292 2293 /* Make a next step */ 2294 rc = __ecore_vlan_mac_execute_step(pdev, 2295 p->vlan_mac_obj, 2296 &p->ramrod_flags); 2297 if (rc < 0) 2298 return rc; 2299 } 2300 2301 return ECORE_SUCCESS; 2302 } 2303 2304 return rc; 2305 } 2306 2307 /** 2308 * ecore_vlan_mac_del_all - delete elements with given vlan_mac_flags spec 2309 * 2310 * @pdev: device handle 2311 * @o: 2312 * @vlan_mac_flags: 2313 * @ramrod_flags: execution flags to be used for this deletion 2314 * 2315 * if the last operation has completed successfully and there are no 2316 * more elements left, positive value if the last operation has completed 2317 * successfully and there are more previously configured elements, negative 2318 * value is current operation has failed. 2319 */ 2320 static int ecore_vlan_mac_del_all(struct _lm_device_t *pdev, 2321 struct ecore_vlan_mac_obj *o, 2322 unsigned long *vlan_mac_flags, 2323 unsigned long *ramrod_flags) 2324 { 2325 struct ecore_vlan_mac_registry_elem *pos = NULL; 2326 struct ecore_vlan_mac_ramrod_params p; 2327 struct ecore_exe_queue_obj *exeq = &o->exe_queue; 2328 struct ecore_exeq_elem *exeq_pos, *exeq_pos_n; 2329 unsigned long flags; 2330 int read_lock; 2331 int rc = 0; 2332 2333 /* Clear pending commands first */ 2334 2335 ECORE_SPIN_LOCK_BH(&exeq->lock); 2336 2337 ECORE_LIST_FOR_EACH_ENTRY_SAFE(exeq_pos, exeq_pos_n, 2338 &exeq->exe_queue, link, 2339 struct ecore_exeq_elem) { 2340 flags = exeq_pos->cmd_data.vlan_mac.vlan_mac_flags; 2341 if (ECORE_VLAN_MAC_CMP_FLAGS(flags) == 2342 ECORE_VLAN_MAC_CMP_FLAGS(*vlan_mac_flags)) { 2343 rc = exeq->remove(pdev, exeq->owner, exeq_pos); 2344 if (rc) { 2345 ECORE_ERR("Failed to remove command\n"); 2346 ECORE_SPIN_UNLOCK_BH(&exeq->lock); 2347 return rc; 2348 } 2349 ECORE_LIST_REMOVE_ENTRY(&exeq_pos->link, 2350 &exeq->exe_queue); 2351 ecore_exe_queue_free_elem(pdev, exeq_pos); 2352 } 2353 } 2354 2355 ECORE_SPIN_UNLOCK_BH(&exeq->lock); 2356 2357 /* Prepare a command request */ 2358 mm_memset(&p, 0, sizeof(p)); 2359 p.vlan_mac_obj = o; 2360 p.ramrod_flags = *ramrod_flags; 2361 p.user_req.cmd = ECORE_VLAN_MAC_DEL; 2362 2363 /* Add all but the last VLAN-MAC to the execution queue without actually 2364 * execution anything. 2365 */ 2366 ECORE_CLEAR_BIT_NA(RAMROD_COMP_WAIT, &p.ramrod_flags); 2367 ECORE_CLEAR_BIT_NA(RAMROD_EXEC, &p.ramrod_flags); 2368 ECORE_CLEAR_BIT_NA(RAMROD_CONT, &p.ramrod_flags); 2369 2370 ECORE_MSG(pdev, "vlan_mac_del_all -- taking vlan_mac_lock (reader)\n"); 2371 read_lock = ecore_vlan_mac_h_read_lock(pdev, o); 2372 if (read_lock != ECORE_SUCCESS) 2373 return read_lock; 2374 2375 ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link, 2376 struct ecore_vlan_mac_registry_elem) { 2377 flags = pos->vlan_mac_flags; 2378 if (ECORE_VLAN_MAC_CMP_FLAGS(flags) == 2379 ECORE_VLAN_MAC_CMP_FLAGS(*vlan_mac_flags)) { 2380 p.user_req.vlan_mac_flags = pos->vlan_mac_flags; 2381 mm_memcpy(&p.user_req.u, &pos->u, sizeof(pos->u)); 2382 rc = ecore_config_vlan_mac(pdev, &p); 2383 if (rc < 0) { 2384 ECORE_ERR("Failed to add a new DEL command\n"); 2385 ecore_vlan_mac_h_read_unlock(pdev, o); 2386 return rc; 2387 } 2388 } 2389 } 2390 2391 ECORE_MSG(pdev, "vlan_mac_del_all -- releasing vlan_mac_lock (reader)\n"); 2392 ecore_vlan_mac_h_read_unlock(pdev, o); 2393 2394 p.ramrod_flags = *ramrod_flags; 2395 ECORE_SET_BIT_NA(RAMROD_CONT, &p.ramrod_flags); 2396 2397 return ecore_config_vlan_mac(pdev, &p); 2398 } 2399 2400 static INLINE void ecore_init_raw_obj(struct ecore_raw_obj *raw, u8 cl_id, 2401 u32 cid, u8 func_id, void *rdata, lm_address_t rdata_mapping, int state, 2402 unsigned long *pstate, ecore_obj_type type) 2403 { 2404 raw->func_id = func_id; 2405 raw->cid = cid; 2406 raw->cl_id = cl_id; 2407 raw->rdata = rdata; 2408 raw->rdata_mapping = rdata_mapping; 2409 raw->state = state; 2410 raw->pstate = pstate; 2411 raw->obj_type = type; 2412 raw->check_pending = ecore_raw_check_pending; 2413 raw->clear_pending = ecore_raw_clear_pending; 2414 raw->set_pending = ecore_raw_set_pending; 2415 raw->wait_comp = ecore_raw_wait; 2416 } 2417 2418 static INLINE void ecore_init_vlan_mac_common(struct ecore_vlan_mac_obj *o, 2419 u8 cl_id, u32 cid, u8 func_id, void *rdata, lm_address_t rdata_mapping, 2420 int state, unsigned long *pstate, ecore_obj_type type, 2421 struct ecore_credit_pool_obj *macs_pool, 2422 struct ecore_credit_pool_obj *vlans_pool) 2423 { 2424 ECORE_LIST_INIT(&o->head); 2425 o->head_reader = 0; 2426 o->head_exe_request = FALSE; 2427 o->saved_ramrod_flags = 0; 2428 2429 o->macs_pool = macs_pool; 2430 o->vlans_pool = vlans_pool; 2431 2432 o->delete_all = ecore_vlan_mac_del_all; 2433 o->restore = ecore_vlan_mac_restore; 2434 o->complete = ecore_complete_vlan_mac; 2435 o->wait = ecore_wait_vlan_mac; 2436 2437 ecore_init_raw_obj(&o->raw, cl_id, cid, func_id, rdata, rdata_mapping, 2438 state, pstate, type); 2439 } 2440 2441 void ecore_init_mac_obj(struct _lm_device_t *pdev, 2442 struct ecore_vlan_mac_obj *mac_obj, 2443 u8 cl_id, u32 cid, u8 func_id, void *rdata, 2444 lm_address_t rdata_mapping, int state, 2445 unsigned long *pstate, ecore_obj_type type, 2446 struct ecore_credit_pool_obj *macs_pool) 2447 { 2448 union ecore_qable_obj *qable_obj = (union ecore_qable_obj *)mac_obj; 2449 2450 ecore_init_vlan_mac_common(mac_obj, cl_id, cid, func_id, rdata, 2451 rdata_mapping, state, pstate, type, 2452 macs_pool, NULL); 2453 2454 /* CAM credit pool handling */ 2455 mac_obj->get_credit = ecore_get_credit_mac; 2456 mac_obj->put_credit = ecore_put_credit_mac; 2457 mac_obj->get_cam_offset = ecore_get_cam_offset_mac; 2458 mac_obj->put_cam_offset = ecore_put_cam_offset_mac; 2459 2460 if (CHIP_IS_E1x(pdev)) { 2461 mac_obj->set_one_rule = ecore_set_one_mac_e1x; 2462 mac_obj->check_del = ecore_check_mac_del; 2463 mac_obj->check_add = ecore_check_mac_add; 2464 mac_obj->check_move = ecore_check_move_always_err; 2465 mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC; 2466 2467 /* Exe Queue */ 2468 ecore_exe_queue_init(pdev, 2469 &mac_obj->exe_queue, 1, qable_obj, 2470 ecore_validate_vlan_mac, 2471 ecore_remove_vlan_mac, 2472 ecore_optimize_vlan_mac, 2473 ecore_execute_vlan_mac, 2474 ecore_exeq_get_mac); 2475 } else { 2476 mac_obj->set_one_rule = ecore_set_one_mac_e2; 2477 mac_obj->check_del = ecore_check_mac_del; 2478 mac_obj->check_add = ecore_check_mac_add; 2479 mac_obj->check_move = ecore_check_move; 2480 mac_obj->ramrod_cmd = 2481 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES; 2482 mac_obj->get_n_elements = ecore_get_n_elements; 2483 2484 /* Exe Queue */ 2485 ecore_exe_queue_init(pdev, 2486 &mac_obj->exe_queue, CLASSIFY_RULES_COUNT, 2487 qable_obj, ecore_validate_vlan_mac, 2488 ecore_remove_vlan_mac, 2489 ecore_optimize_vlan_mac, 2490 ecore_execute_vlan_mac, 2491 ecore_exeq_get_mac); 2492 } 2493 } 2494 2495 void ecore_init_vlan_obj(struct _lm_device_t *pdev, 2496 struct ecore_vlan_mac_obj *vlan_obj, 2497 u8 cl_id, u32 cid, u8 func_id, void *rdata, 2498 lm_address_t rdata_mapping, int state, 2499 unsigned long *pstate, ecore_obj_type type, 2500 struct ecore_credit_pool_obj *vlans_pool) 2501 { 2502 union ecore_qable_obj *qable_obj = (union ecore_qable_obj *)vlan_obj; 2503 2504 ecore_init_vlan_mac_common(vlan_obj, cl_id, cid, func_id, rdata, 2505 rdata_mapping, state, pstate, type, NULL, 2506 vlans_pool); 2507 2508 vlan_obj->get_credit = ecore_get_credit_vlan; 2509 vlan_obj->put_credit = ecore_put_credit_vlan; 2510 vlan_obj->get_cam_offset = ecore_get_cam_offset_vlan; 2511 vlan_obj->put_cam_offset = ecore_put_cam_offset_vlan; 2512 2513 if (CHIP_IS_E1x(pdev)) { 2514 ECORE_ERR("Do not support chips others than E2 and newer\n"); 2515 BUG(); 2516 } else { 2517 vlan_obj->set_one_rule = ecore_set_one_vlan_e2; 2518 vlan_obj->check_del = ecore_check_vlan_del; 2519 vlan_obj->check_add = ecore_check_vlan_add; 2520 vlan_obj->check_move = ecore_check_move; 2521 vlan_obj->ramrod_cmd = 2522 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES; 2523 vlan_obj->get_n_elements = ecore_get_n_elements; 2524 2525 /* Exe Queue */ 2526 ecore_exe_queue_init(pdev, 2527 &vlan_obj->exe_queue, CLASSIFY_RULES_COUNT, 2528 qable_obj, ecore_validate_vlan_mac, 2529 ecore_remove_vlan_mac, 2530 ecore_optimize_vlan_mac, 2531 ecore_execute_vlan_mac, 2532 ecore_exeq_get_vlan); 2533 } 2534 } 2535 2536 void ecore_init_vlan_mac_obj(struct _lm_device_t *pdev, 2537 struct ecore_vlan_mac_obj *vlan_mac_obj, 2538 u8 cl_id, u32 cid, u8 func_id, void *rdata, 2539 lm_address_t rdata_mapping, int state, 2540 unsigned long *pstate, ecore_obj_type type, 2541 struct ecore_credit_pool_obj *macs_pool, 2542 struct ecore_credit_pool_obj *vlans_pool) 2543 { 2544 union ecore_qable_obj *qable_obj = 2545 (union ecore_qable_obj *)vlan_mac_obj; 2546 2547 ecore_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id, rdata, 2548 rdata_mapping, state, pstate, type, 2549 macs_pool, vlans_pool); 2550 2551 /* CAM pool handling */ 2552 vlan_mac_obj->get_credit = ecore_get_credit_vlan_mac; 2553 vlan_mac_obj->put_credit = ecore_put_credit_vlan_mac; 2554 /* CAM offset is relevant for 57710 and 57711 chips only which have a 2555 * single CAM for both MACs and VLAN-MAC pairs. So the offset 2556 * will be taken from MACs' pool object only. 2557 */ 2558 vlan_mac_obj->get_cam_offset = ecore_get_cam_offset_mac; 2559 vlan_mac_obj->put_cam_offset = ecore_put_cam_offset_mac; 2560 2561 if (CHIP_IS_E1(pdev)) { 2562 ECORE_ERR("Do not support chips others than E2\n"); 2563 BUG(); 2564 } else if (CHIP_IS_E1H(pdev)) { 2565 vlan_mac_obj->set_one_rule = ecore_set_one_vlan_mac_e1h; 2566 vlan_mac_obj->check_del = ecore_check_vlan_mac_del; 2567 vlan_mac_obj->check_add = ecore_check_vlan_mac_add; 2568 vlan_mac_obj->check_move = ecore_check_move_always_err; 2569 vlan_mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC; 2570 2571 /* Exe Queue */ 2572 ecore_exe_queue_init(pdev, 2573 &vlan_mac_obj->exe_queue, 1, qable_obj, 2574 ecore_validate_vlan_mac, 2575 ecore_remove_vlan_mac, 2576 ecore_optimize_vlan_mac, 2577 ecore_execute_vlan_mac, 2578 ecore_exeq_get_vlan_mac); 2579 } else { 2580 vlan_mac_obj->set_one_rule = ecore_set_one_vlan_mac_e2; 2581 vlan_mac_obj->check_del = ecore_check_vlan_mac_del; 2582 vlan_mac_obj->check_add = ecore_check_vlan_mac_add; 2583 vlan_mac_obj->check_move = ecore_check_move; 2584 vlan_mac_obj->ramrod_cmd = 2585 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES; 2586 2587 /* Exe Queue */ 2588 ecore_exe_queue_init(pdev, 2589 &vlan_mac_obj->exe_queue, 2590 CLASSIFY_RULES_COUNT, 2591 qable_obj, ecore_validate_vlan_mac, 2592 ecore_remove_vlan_mac, 2593 ecore_optimize_vlan_mac, 2594 ecore_execute_vlan_mac, 2595 ecore_exeq_get_vlan_mac); 2596 } 2597 } 2598 2599 /* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */ 2600 static INLINE void __storm_memset_mac_filters(struct _lm_device_t *pdev, 2601 struct tstorm_eth_mac_filter_config *mac_filters, 2602 u16 pf_id) 2603 { 2604 size_t size = sizeof(struct tstorm_eth_mac_filter_config); 2605 2606 u32 addr = BAR_TSTRORM_INTMEM + 2607 TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id); 2608 2609 __storm_memset_struct(pdev, addr, size, (u32 *)mac_filters); 2610 } 2611 2612 static int ecore_set_rx_mode_e1x(struct _lm_device_t *pdev, 2613 struct ecore_rx_mode_ramrod_params *p) 2614 { 2615 /* update the pdev MAC filter structure */ 2616 u32 mask = (1 << p->cl_id); 2617 2618 struct tstorm_eth_mac_filter_config *mac_filters = 2619 (struct tstorm_eth_mac_filter_config *)p->rdata; 2620 2621 /* initial setting is drop-all */ 2622 u8 drop_all_ucast = 1, drop_all_mcast = 1; 2623 u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0; 2624 u8 unmatched_unicast = 0; 2625 2626 /* In e1x there we only take into account rx accept flag since tx switching 2627 * isn't enabled. */ 2628 if (ECORE_TEST_BIT(ECORE_ACCEPT_UNICAST, &p->rx_accept_flags)) 2629 /* accept matched ucast */ 2630 drop_all_ucast = 0; 2631 2632 if (ECORE_TEST_BIT(ECORE_ACCEPT_MULTICAST, &p->rx_accept_flags)) 2633 /* accept matched mcast */ 2634 drop_all_mcast = 0; 2635 2636 if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_UNICAST, &p->rx_accept_flags)) { 2637 /* accept all mcast */ 2638 drop_all_ucast = 0; 2639 accp_all_ucast = 1; 2640 } 2641 if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_MULTICAST, &p->rx_accept_flags)) { 2642 /* accept all mcast */ 2643 drop_all_mcast = 0; 2644 accp_all_mcast = 1; 2645 } 2646 if (ECORE_TEST_BIT(ECORE_ACCEPT_BROADCAST, &p->rx_accept_flags)) 2647 /* accept (all) bcast */ 2648 accp_all_bcast = 1; 2649 if (ECORE_TEST_BIT(ECORE_ACCEPT_UNMATCHED, &p->rx_accept_flags)) 2650 /* accept unmatched unicasts */ 2651 unmatched_unicast = 1; 2652 2653 mac_filters->ucast_drop_all = drop_all_ucast ? 2654 mac_filters->ucast_drop_all | mask : 2655 mac_filters->ucast_drop_all & ~mask; 2656 2657 mac_filters->mcast_drop_all = drop_all_mcast ? 2658 mac_filters->mcast_drop_all | mask : 2659 mac_filters->mcast_drop_all & ~mask; 2660 2661 mac_filters->ucast_accept_all = accp_all_ucast ? 2662 mac_filters->ucast_accept_all | mask : 2663 mac_filters->ucast_accept_all & ~mask; 2664 2665 mac_filters->mcast_accept_all = accp_all_mcast ? 2666 mac_filters->mcast_accept_all | mask : 2667 mac_filters->mcast_accept_all & ~mask; 2668 2669 mac_filters->bcast_accept_all = accp_all_bcast ? 2670 mac_filters->bcast_accept_all | mask : 2671 mac_filters->bcast_accept_all & ~mask; 2672 2673 mac_filters->unmatched_unicast = unmatched_unicast ? 2674 mac_filters->unmatched_unicast | mask : 2675 mac_filters->unmatched_unicast & ~mask; 2676 2677 ECORE_MSG(pdev, "drop_ucast 0x%x\ndrop_mcast 0x%x\n accp_ucast 0x%x\n" 2678 "accp_mcast 0x%x\naccp_bcast 0x%x\n", 2679 mac_filters->ucast_drop_all, mac_filters->mcast_drop_all, 2680 mac_filters->ucast_accept_all, mac_filters->mcast_accept_all, 2681 mac_filters->bcast_accept_all); 2682 2683 /* write the MAC filter structure*/ 2684 __storm_memset_mac_filters(pdev, mac_filters, p->func_id); 2685 2686 /* The operation is completed */ 2687 ECORE_CLEAR_BIT(p->state, p->pstate); 2688 smp_mb__after_atomic(); 2689 2690 return ECORE_SUCCESS; 2691 } 2692 2693 /* Setup ramrod data */ 2694 static INLINE void ecore_rx_mode_set_rdata_hdr_e2(u32 cid, 2695 struct eth_classify_header *hdr, 2696 u8 rule_cnt) 2697 { 2698 hdr->echo = mm_cpu_to_le32(cid); 2699 hdr->rule_cnt = rule_cnt; 2700 } 2701 2702 static INLINE void ecore_rx_mode_set_cmd_state_e2(struct _lm_device_t *pdev, 2703 unsigned long *accept_flags, 2704 struct eth_filter_rules_cmd *cmd, 2705 BOOL clear_accept_all) 2706 { 2707 u16 state; 2708 2709 /* start with 'drop-all' */ 2710 state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL | 2711 ETH_FILTER_RULES_CMD_MCAST_DROP_ALL; 2712 2713 if (ECORE_TEST_BIT(ECORE_ACCEPT_UNICAST, accept_flags)) 2714 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL; 2715 2716 if (ECORE_TEST_BIT(ECORE_ACCEPT_MULTICAST, accept_flags)) 2717 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL; 2718 2719 if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_UNICAST, accept_flags)) { 2720 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL; 2721 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL; 2722 } 2723 2724 if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_MULTICAST, accept_flags)) { 2725 state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL; 2726 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL; 2727 } 2728 if (ECORE_TEST_BIT(ECORE_ACCEPT_BROADCAST, accept_flags)) 2729 state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL; 2730 2731 if (ECORE_TEST_BIT(ECORE_ACCEPT_UNMATCHED, accept_flags)) { 2732 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL; 2733 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED; 2734 } 2735 if (ECORE_TEST_BIT(ECORE_ACCEPT_ANY_VLAN, accept_flags)) 2736 state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN; 2737 2738 /* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */ 2739 if (clear_accept_all) { 2740 state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL; 2741 state &= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL; 2742 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL; 2743 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED; 2744 } 2745 2746 cmd->state = mm_cpu_to_le16(state); 2747 } 2748 2749 static int ecore_set_rx_mode_e2(struct _lm_device_t *pdev, 2750 struct ecore_rx_mode_ramrod_params *p) 2751 { 2752 struct eth_filter_rules_ramrod_data *data = p->rdata; 2753 int rc; 2754 u8 rule_idx = 0; 2755 2756 /* Reset the ramrod data buffer */ 2757 mm_memset(data, 0, sizeof(*data)); 2758 2759 /* Setup ramrod data */ 2760 2761 /* Tx (internal switching) */ 2762 if (ECORE_TEST_BIT(RAMROD_TX, &p->ramrod_flags)) { 2763 data->rules[rule_idx].client_id = p->cl_id; 2764 data->rules[rule_idx].func_id = p->func_id; 2765 2766 data->rules[rule_idx].cmd_general_data = 2767 ETH_FILTER_RULES_CMD_TX_CMD; 2768 2769 ecore_rx_mode_set_cmd_state_e2(pdev, &p->tx_accept_flags, 2770 &(data->rules[rule_idx++]), 2771 FALSE); 2772 } 2773 2774 /* Rx */ 2775 if (ECORE_TEST_BIT(RAMROD_RX, &p->ramrod_flags)) { 2776 data->rules[rule_idx].client_id = p->cl_id; 2777 data->rules[rule_idx].func_id = p->func_id; 2778 2779 data->rules[rule_idx].cmd_general_data = 2780 ETH_FILTER_RULES_CMD_RX_CMD; 2781 2782 ecore_rx_mode_set_cmd_state_e2(pdev, &p->rx_accept_flags, 2783 &(data->rules[rule_idx++]), 2784 FALSE); 2785 } 2786 2787 /* If FCoE Queue configuration has been requested configure the Rx and 2788 * internal switching modes for this queue in separate rules. 2789 * 2790 * FCoE queue shell never be set to ACCEPT_ALL packets of any sort: 2791 * MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED. 2792 */ 2793 if (ECORE_TEST_BIT(ECORE_RX_MODE_FCOE_ETH, &p->rx_mode_flags)) { 2794 /* Tx (internal switching) */ 2795 if (ECORE_TEST_BIT(RAMROD_TX, &p->ramrod_flags)) { 2796 data->rules[rule_idx].client_id = FCOE_CID(pdev); 2797 data->rules[rule_idx].func_id = p->func_id; 2798 2799 data->rules[rule_idx].cmd_general_data = 2800 ETH_FILTER_RULES_CMD_TX_CMD; 2801 2802 ecore_rx_mode_set_cmd_state_e2(pdev, &p->tx_accept_flags, 2803 &(data->rules[rule_idx]), 2804 TRUE); 2805 rule_idx++; 2806 } 2807 2808 /* Rx */ 2809 if (ECORE_TEST_BIT(RAMROD_RX, &p->ramrod_flags)) { 2810 data->rules[rule_idx].client_id = FCOE_CID(pdev); 2811 data->rules[rule_idx].func_id = p->func_id; 2812 2813 data->rules[rule_idx].cmd_general_data = 2814 ETH_FILTER_RULES_CMD_RX_CMD; 2815 2816 ecore_rx_mode_set_cmd_state_e2(pdev, &p->rx_accept_flags, 2817 &(data->rules[rule_idx]), 2818 TRUE); 2819 rule_idx++; 2820 } 2821 } 2822 2823 /* Set the ramrod header (most importantly - number of rules to 2824 * configure). 2825 */ 2826 ecore_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx); 2827 2828 ECORE_MSG(pdev, "About to configure %d rules, rx_accept_flags 0x%lx, tx_accept_flags 0x%lx\n", 2829 data->header.rule_cnt, p->rx_accept_flags, 2830 p->tx_accept_flags); 2831 2832 /* No need for an explicit memory barrier here as long as we 2833 * ensure the ordering of writing to the SPQ element 2834 * and updating of the SPQ producer which involves a memory 2835 * read. If the memory read is removed we will have to put a 2836 * full memory barrier there (inside ecore_sp_post()). 2837 */ 2838 2839 /* Send a ramrod */ 2840 rc = ecore_sp_post(pdev, 2841 RAMROD_CMD_ID_ETH_FILTER_RULES, 2842 p->cid, 2843 p->rdata_mapping.as_u64, 2844 ETH_CONNECTION_TYPE); 2845 if (rc) 2846 return rc; 2847 2848 /* Ramrod completion is pending */ 2849 return ECORE_PENDING; 2850 } 2851 2852 static int ecore_wait_rx_mode_comp_e2(struct _lm_device_t *pdev, 2853 struct ecore_rx_mode_ramrod_params *p) 2854 { 2855 return ecore_state_wait(pdev, p->state, p->pstate); 2856 } 2857 2858 static int ecore_empty_rx_mode_wait(struct _lm_device_t *pdev, 2859 struct ecore_rx_mode_ramrod_params *p) 2860 { 2861 /* Do nothing */ 2862 return ECORE_SUCCESS; 2863 } 2864 2865 int ecore_config_rx_mode(struct _lm_device_t *pdev, 2866 struct ecore_rx_mode_ramrod_params *p) 2867 { 2868 int rc; 2869 2870 /* Configure the new classification in the chip */ 2871 rc = p->rx_mode_obj->config_rx_mode(pdev, p); 2872 if (rc < 0) 2873 return rc; 2874 2875 /* Wait for a ramrod completion if was requested */ 2876 if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags)) { 2877 rc = p->rx_mode_obj->wait_comp(pdev, p); 2878 if (rc) 2879 return rc; 2880 } 2881 2882 return rc; 2883 } 2884 2885 void ecore_init_rx_mode_obj(struct _lm_device_t *pdev, 2886 struct ecore_rx_mode_obj *o) 2887 { 2888 if (CHIP_IS_E1x(pdev)) { 2889 o->wait_comp = ecore_empty_rx_mode_wait; 2890 o->config_rx_mode = ecore_set_rx_mode_e1x; 2891 } else { 2892 o->wait_comp = ecore_wait_rx_mode_comp_e2; 2893 o->config_rx_mode = ecore_set_rx_mode_e2; 2894 } 2895 } 2896 2897 /********************* Multicast verbs: SET, CLEAR ****************************/ 2898 static INLINE u8 ecore_mcast_bin_from_mac(u8 *mac) 2899 { 2900 return (ecore_crc32_le(0, mac, ETH_ALEN) >> 24) & 0xff; 2901 } 2902 2903 struct ecore_mcast_mac_elem { 2904 d_list_entry_t link; 2905 u8 mac[ETH_ALEN]; 2906 u8 pad[2]; /* For a natural alignment of the following buffer */ 2907 }; 2908 2909 struct ecore_pending_mcast_cmd { 2910 d_list_entry_t link; 2911 int type; /* ECORE_MCAST_CMD_X */ 2912 union { 2913 d_list_t macs_head; 2914 u32 macs_num; /* Needed for DEL command */ 2915 int next_bin; /* Needed for RESTORE flow with aprox match */ 2916 } data; 2917 2918 BOOL done; /* set to TRUE, when the command has been handled, 2919 * practically used in 57712 handling only, where one pending 2920 * command may be handled in a few operations. As long as for 2921 * other chips every operation handling is completed in a 2922 * single ramrod, there is no need to utilize this field. 2923 */ 2924 #ifndef ECORE_ERASE 2925 u32 alloc_len; /* passed to ECORE_FREE */ 2926 #endif 2927 }; 2928 2929 static int ecore_mcast_wait(struct _lm_device_t *pdev, 2930 struct ecore_mcast_obj *o) 2931 { 2932 if (ecore_state_wait(pdev, o->sched_state, o->raw.pstate) || 2933 o->raw.wait_comp(pdev, &o->raw)) 2934 return ECORE_TIMEOUT; 2935 2936 return ECORE_SUCCESS; 2937 } 2938 2939 static int ecore_mcast_enqueue_cmd(struct _lm_device_t *pdev, 2940 struct ecore_mcast_obj *o, 2941 struct ecore_mcast_ramrod_params *p, 2942 enum ecore_mcast_cmd cmd) 2943 { 2944 int total_sz; 2945 struct ecore_pending_mcast_cmd *new_cmd; 2946 struct ecore_mcast_mac_elem *cur_mac = NULL; 2947 struct ecore_mcast_list_elem *pos; 2948 int macs_list_len = ((cmd == ECORE_MCAST_CMD_ADD) ? 2949 p->mcast_list_len : 0); 2950 2951 /* If the command is empty ("handle pending commands only"), break */ 2952 if (!p->mcast_list_len) 2953 return ECORE_SUCCESS; 2954 2955 total_sz = sizeof(*new_cmd) + 2956 macs_list_len * sizeof(struct ecore_mcast_mac_elem); 2957 2958 /* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */ 2959 new_cmd = ECORE_ZALLOC(total_sz, GFP_ATOMIC, pdev); 2960 2961 if (!new_cmd) 2962 return ECORE_NOMEM; 2963 2964 ECORE_MSG(pdev, "About to enqueue a new %d command. macs_list_len=%d\n", 2965 cmd, macs_list_len); 2966 2967 ECORE_LIST_INIT(&new_cmd->data.macs_head); 2968 2969 new_cmd->type = cmd; 2970 new_cmd->done = FALSE; 2971 #ifndef ECORE_ERASE 2972 new_cmd->alloc_len = total_sz; 2973 #endif 2974 2975 switch (cmd) { 2976 case ECORE_MCAST_CMD_ADD: 2977 cur_mac = (struct ecore_mcast_mac_elem *) 2978 ((u8 *)new_cmd + sizeof(*new_cmd)); 2979 2980 /* Push the MACs of the current command into the pending command 2981 * MACs list: FIFO 2982 */ 2983 ECORE_LIST_FOR_EACH_ENTRY(pos, &p->mcast_list, link, 2984 struct ecore_mcast_list_elem) { 2985 mm_memcpy(cur_mac->mac, pos->mac, ETH_ALEN); 2986 ECORE_LIST_PUSH_TAIL(&cur_mac->link, 2987 &new_cmd->data.macs_head); 2988 cur_mac++; 2989 } 2990 2991 break; 2992 2993 case ECORE_MCAST_CMD_DEL: 2994 new_cmd->data.macs_num = p->mcast_list_len; 2995 break; 2996 2997 case ECORE_MCAST_CMD_RESTORE: 2998 new_cmd->data.next_bin = 0; 2999 break; 3000 3001 default: 3002 ECORE_FREE(pdev, new_cmd, total_sz); 3003 ECORE_ERR("Unknown command: %d\n", cmd); 3004 return ECORE_INVAL; 3005 } 3006 3007 /* Push the new pending command to the tail of the pending list: FIFO */ 3008 ECORE_LIST_PUSH_TAIL(&new_cmd->link, &o->pending_cmds_head); 3009 3010 o->set_sched(o); 3011 3012 return ECORE_PENDING; 3013 } 3014 3015 /** 3016 * ecore_mcast_get_next_bin - get the next set bin (index) 3017 * 3018 * @o: 3019 * @last: index to start looking from (including) 3020 * 3021 * returns the next found (set) bin or a negative value if none is found. 3022 */ 3023 static INLINE int ecore_mcast_get_next_bin(struct ecore_mcast_obj *o, int last) 3024 { 3025 int i, j, inner_start = last % BIT_VEC64_ELEM_SZ; 3026 3027 for (i = last / BIT_VEC64_ELEM_SZ; i < ECORE_MCAST_VEC_SZ; i++) { 3028 if (o->registry.aprox_match.vec[i]) 3029 for (j = inner_start; j < BIT_VEC64_ELEM_SZ; j++) { 3030 int cur_bit = j + BIT_VEC64_ELEM_SZ * i; 3031 if (BIT_VEC64_TEST_BIT(o->registry.aprox_match. 3032 vec, cur_bit)) { 3033 return cur_bit; 3034 } 3035 } 3036 inner_start = 0; 3037 } 3038 3039 /* None found */ 3040 return -1; 3041 } 3042 3043 /** 3044 * ecore_mcast_clear_first_bin - find the first set bin and clear it 3045 * 3046 * @o: 3047 * 3048 * returns the index of the found bin or -1 if none is found 3049 */ 3050 static INLINE int ecore_mcast_clear_first_bin(struct ecore_mcast_obj *o) 3051 { 3052 int cur_bit = ecore_mcast_get_next_bin(o, 0); 3053 3054 if (cur_bit >= 0) 3055 BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, cur_bit); 3056 3057 return cur_bit; 3058 } 3059 3060 static INLINE u8 ecore_mcast_get_rx_tx_flag(struct ecore_mcast_obj *o) 3061 { 3062 struct ecore_raw_obj *raw = &o->raw; 3063 u8 rx_tx_flag = 0; 3064 3065 if ((raw->obj_type == ECORE_OBJ_TYPE_TX) || 3066 (raw->obj_type == ECORE_OBJ_TYPE_RX_TX)) 3067 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_TX_CMD; 3068 3069 if ((raw->obj_type == ECORE_OBJ_TYPE_RX) || 3070 (raw->obj_type == ECORE_OBJ_TYPE_RX_TX)) 3071 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_RX_CMD; 3072 3073 return rx_tx_flag; 3074 } 3075 3076 static void ecore_mcast_set_one_rule_e2(struct _lm_device_t *pdev, 3077 struct ecore_mcast_obj *o, int idx, 3078 union ecore_mcast_config_data *cfg_data, 3079 enum ecore_mcast_cmd cmd) 3080 { 3081 struct ecore_raw_obj *r = &o->raw; 3082 struct eth_multicast_rules_ramrod_data *data = 3083 (struct eth_multicast_rules_ramrod_data *)(r->rdata); 3084 u8 func_id = r->func_id; 3085 u8 rx_tx_add_flag = ecore_mcast_get_rx_tx_flag(o); 3086 int bin; 3087 3088 if ((cmd == ECORE_MCAST_CMD_ADD) || (cmd == ECORE_MCAST_CMD_RESTORE)) 3089 rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD; 3090 3091 data->rules[idx].cmd_general_data |= rx_tx_add_flag; 3092 3093 /* Get a bin and update a bins' vector */ 3094 switch (cmd) { 3095 case ECORE_MCAST_CMD_ADD: 3096 bin = ecore_mcast_bin_from_mac(cfg_data->mac); 3097 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin); 3098 break; 3099 3100 case ECORE_MCAST_CMD_DEL: 3101 /* If there were no more bins to clear 3102 * (ecore_mcast_clear_first_bin() returns -1) then we would 3103 * clear any (0xff) bin. 3104 * See ecore_mcast_validate_e2() for explanation when it may 3105 * happen. 3106 */ 3107 bin = ecore_mcast_clear_first_bin(o); 3108 break; 3109 3110 case ECORE_MCAST_CMD_RESTORE: 3111 bin = cfg_data->bin; 3112 break; 3113 3114 default: 3115 ECORE_ERR("Unknown command: %d\n", cmd); 3116 return; 3117 } 3118 3119 ECORE_MSG(pdev, "%s bin %d\n", 3120 ((rx_tx_add_flag & ETH_MULTICAST_RULES_CMD_IS_ADD) ? 3121 "Setting" : "Clearing"), bin); 3122 3123 data->rules[idx].bin_id = (u8)bin; 3124 data->rules[idx].func_id = func_id; 3125 data->rules[idx].engine_id = o->engine_id; 3126 } 3127 3128 /** 3129 * ecore_mcast_handle_restore_cmd_e2 - restore configuration from the registry 3130 * 3131 * @pdev: device handle 3132 * @o: 3133 * @start_bin: index in the registry to start from (including) 3134 * @rdata_idx: index in the ramrod data to start from 3135 * 3136 * returns last handled bin index or -1 if all bins have been handled 3137 */ 3138 static INLINE int ecore_mcast_handle_restore_cmd_e2( 3139 struct _lm_device_t *pdev, struct ecore_mcast_obj *o , int start_bin, 3140 int *rdata_idx) 3141 { 3142 int cur_bin, cnt = *rdata_idx; 3143 union ecore_mcast_config_data cfg_data = {NULL}; 3144 3145 /* go through the registry and configure the bins from it */ 3146 for (cur_bin = ecore_mcast_get_next_bin(o, start_bin); cur_bin >= 0; 3147 cur_bin = ecore_mcast_get_next_bin(o, cur_bin + 1)) { 3148 3149 cfg_data.bin = (u8)cur_bin; 3150 o->set_one_rule(pdev, o, cnt, &cfg_data, 3151 ECORE_MCAST_CMD_RESTORE); 3152 3153 cnt++; 3154 3155 ECORE_MSG(pdev, "About to configure a bin %d\n", cur_bin); 3156 3157 /* Break if we reached the maximum number 3158 * of rules. 3159 */ 3160 if (cnt >= o->max_cmd_len) 3161 break; 3162 } 3163 3164 *rdata_idx = cnt; 3165 3166 return cur_bin; 3167 } 3168 3169 static INLINE void ecore_mcast_hdl_pending_add_e2(struct _lm_device_t *pdev, 3170 struct ecore_mcast_obj *o, struct ecore_pending_mcast_cmd *cmd_pos, 3171 int *line_idx) 3172 { 3173 struct ecore_mcast_mac_elem *pmac_pos, *pmac_pos_n; 3174 int cnt = *line_idx; 3175 union ecore_mcast_config_data cfg_data = {NULL}; 3176 3177 ECORE_LIST_FOR_EACH_ENTRY_SAFE(pmac_pos, pmac_pos_n, 3178 &cmd_pos->data.macs_head, link, struct ecore_mcast_mac_elem) { 3179 3180 cfg_data.mac = &pmac_pos->mac[0]; 3181 o->set_one_rule(pdev, o, cnt, &cfg_data, cmd_pos->type); 3182 3183 cnt++; 3184 3185 ECORE_MSG(pdev, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC\n", 3186 pmac_pos->mac[0], pmac_pos->mac[1], pmac_pos->mac[2], pmac_pos->mac[3], pmac_pos->mac[4], pmac_pos->mac[5]); 3187 3188 ECORE_LIST_REMOVE_ENTRY(&pmac_pos->link, 3189 &cmd_pos->data.macs_head); 3190 3191 /* Break if we reached the maximum number 3192 * of rules. 3193 */ 3194 if (cnt >= o->max_cmd_len) 3195 break; 3196 } 3197 3198 *line_idx = cnt; 3199 3200 /* if no more MACs to configure - we are done */ 3201 if (ECORE_LIST_IS_EMPTY(&cmd_pos->data.macs_head)) 3202 cmd_pos->done = TRUE; 3203 } 3204 3205 static INLINE void ecore_mcast_hdl_pending_del_e2(struct _lm_device_t *pdev, 3206 struct ecore_mcast_obj *o, struct ecore_pending_mcast_cmd *cmd_pos, 3207 int *line_idx) 3208 { 3209 int cnt = *line_idx; 3210 3211 while (cmd_pos->data.macs_num) { 3212 o->set_one_rule(pdev, o, cnt, NULL, cmd_pos->type); 3213 3214 cnt++; 3215 3216 cmd_pos->data.macs_num--; 3217 3218 ECORE_MSG(pdev, "Deleting MAC. %d left,cnt is %d\n", 3219 cmd_pos->data.macs_num, cnt); 3220 3221 /* Break if we reached the maximum 3222 * number of rules. 3223 */ 3224 if (cnt >= o->max_cmd_len) 3225 break; 3226 } 3227 3228 *line_idx = cnt; 3229 3230 /* If we cleared all bins - we are done */ 3231 if (!cmd_pos->data.macs_num) 3232 cmd_pos->done = TRUE; 3233 } 3234 3235 static INLINE void ecore_mcast_hdl_pending_restore_e2(struct _lm_device_t *pdev, 3236 struct ecore_mcast_obj *o, struct ecore_pending_mcast_cmd *cmd_pos, 3237 int *line_idx) 3238 { 3239 cmd_pos->data.next_bin = o->hdl_restore(pdev, o, cmd_pos->data.next_bin, 3240 line_idx); 3241 3242 if (cmd_pos->data.next_bin < 0) 3243 /* If o->set_restore returned -1 we are done */ 3244 cmd_pos->done = TRUE; 3245 else 3246 /* Start from the next bin next time */ 3247 cmd_pos->data.next_bin++; 3248 } 3249 3250 static INLINE int ecore_mcast_handle_pending_cmds_e2(struct _lm_device_t *pdev, 3251 struct ecore_mcast_ramrod_params *p) 3252 { 3253 struct ecore_pending_mcast_cmd *cmd_pos, *cmd_pos_n; 3254 int cnt = 0; 3255 struct ecore_mcast_obj *o = p->mcast_obj; 3256 3257 ECORE_LIST_FOR_EACH_ENTRY_SAFE(cmd_pos, cmd_pos_n, 3258 &o->pending_cmds_head, link, struct ecore_pending_mcast_cmd) { 3259 switch (cmd_pos->type) { 3260 case ECORE_MCAST_CMD_ADD: 3261 ecore_mcast_hdl_pending_add_e2(pdev, o, cmd_pos, &cnt); 3262 break; 3263 3264 case ECORE_MCAST_CMD_DEL: 3265 ecore_mcast_hdl_pending_del_e2(pdev, o, cmd_pos, &cnt); 3266 break; 3267 3268 case ECORE_MCAST_CMD_RESTORE: 3269 ecore_mcast_hdl_pending_restore_e2(pdev, o, cmd_pos, 3270 &cnt); 3271 break; 3272 3273 default: 3274 ECORE_ERR("Unknown command: %d\n", cmd_pos->type); 3275 return ECORE_INVAL; 3276 } 3277 3278 /* If the command has been completed - remove it from the list 3279 * and free the memory 3280 */ 3281 if (cmd_pos->done) { 3282 ECORE_LIST_REMOVE_ENTRY(&cmd_pos->link, 3283 &o->pending_cmds_head); 3284 ECORE_FREE(pdev, cmd_pos, cmd_pos->alloc_len); 3285 } 3286 3287 /* Break if we reached the maximum number of rules */ 3288 if (cnt >= o->max_cmd_len) 3289 break; 3290 } 3291 3292 return cnt; 3293 } 3294 3295 static INLINE void ecore_mcast_hdl_add(struct _lm_device_t *pdev, 3296 struct ecore_mcast_obj *o, struct ecore_mcast_ramrod_params *p, 3297 int *line_idx) 3298 { 3299 struct ecore_mcast_list_elem *mlist_pos; 3300 union ecore_mcast_config_data cfg_data = {NULL}; 3301 int cnt = *line_idx; 3302 3303 ECORE_LIST_FOR_EACH_ENTRY(mlist_pos, &p->mcast_list, link, 3304 struct ecore_mcast_list_elem) { 3305 cfg_data.mac = mlist_pos->mac; 3306 o->set_one_rule(pdev, o, cnt, &cfg_data, ECORE_MCAST_CMD_ADD); 3307 3308 cnt++; 3309 3310 ECORE_MSG(pdev, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC\n", 3311 mlist_pos->mac[0], mlist_pos->mac[1], mlist_pos->mac[2], mlist_pos->mac[3], mlist_pos->mac[4], mlist_pos->mac[5]); 3312 } 3313 3314 *line_idx = cnt; 3315 } 3316 3317 static INLINE void ecore_mcast_hdl_del(struct _lm_device_t *pdev, 3318 struct ecore_mcast_obj *o, struct ecore_mcast_ramrod_params *p, 3319 int *line_idx) 3320 { 3321 int cnt = *line_idx, i; 3322 3323 for (i = 0; i < p->mcast_list_len; i++) { 3324 o->set_one_rule(pdev, o, cnt, NULL, ECORE_MCAST_CMD_DEL); 3325 3326 cnt++; 3327 3328 ECORE_MSG(pdev, "Deleting MAC. %d left\n", 3329 p->mcast_list_len - i - 1); 3330 } 3331 3332 *line_idx = cnt; 3333 } 3334 3335 /** 3336 * ecore_mcast_handle_current_cmd - 3337 * 3338 * @pdev: device handle 3339 * @p: 3340 * @cmd: 3341 * @start_cnt: first line in the ramrod data that may be used 3342 * 3343 * This function is called iff there is enough place for the current command in 3344 * the ramrod data. 3345 * Returns number of lines filled in the ramrod data in total. 3346 */ 3347 static INLINE int ecore_mcast_handle_current_cmd(struct _lm_device_t *pdev, 3348 struct ecore_mcast_ramrod_params *p, 3349 enum ecore_mcast_cmd cmd, 3350 int start_cnt) 3351 { 3352 struct ecore_mcast_obj *o = p->mcast_obj; 3353 int cnt = start_cnt; 3354 3355 ECORE_MSG(pdev, "p->mcast_list_len=%d\n", p->mcast_list_len); 3356 3357 switch (cmd) { 3358 case ECORE_MCAST_CMD_ADD: 3359 ecore_mcast_hdl_add(pdev, o, p, &cnt); 3360 break; 3361 3362 case ECORE_MCAST_CMD_DEL: 3363 ecore_mcast_hdl_del(pdev, o, p, &cnt); 3364 break; 3365 3366 case ECORE_MCAST_CMD_RESTORE: 3367 o->hdl_restore(pdev, o, 0, &cnt); 3368 break; 3369 3370 default: 3371 ECORE_ERR("Unknown command: %d\n", cmd); 3372 return ECORE_INVAL; 3373 } 3374 3375 /* The current command has been handled */ 3376 p->mcast_list_len = 0; 3377 3378 return cnt; 3379 } 3380 3381 static int ecore_mcast_validate_e2(struct _lm_device_t *pdev, 3382 struct ecore_mcast_ramrod_params *p, 3383 enum ecore_mcast_cmd cmd) 3384 { 3385 struct ecore_mcast_obj *o = p->mcast_obj; 3386 int reg_sz = o->get_registry_size(o); 3387 3388 switch (cmd) { 3389 /* DEL command deletes all currently configured MACs */ 3390 case ECORE_MCAST_CMD_DEL: 3391 o->set_registry_size(o, 0); 3392 /* Don't break */ 3393 3394 /* RESTORE command will restore the entire multicast configuration */ 3395 case ECORE_MCAST_CMD_RESTORE: 3396 /* Here we set the approximate amount of work to do, which in 3397 * fact may be only less as some MACs in postponed ADD 3398 * command(s) scheduled before this command may fall into 3399 * the same bin and the actual number of bins set in the 3400 * registry would be less than we estimated here. See 3401 * ecore_mcast_set_one_rule_e2() for further details. 3402 */ 3403 p->mcast_list_len = reg_sz; 3404 break; 3405 3406 case ECORE_MCAST_CMD_ADD: 3407 case ECORE_MCAST_CMD_CONT: 3408 /* Here we assume that all new MACs will fall into new bins. 3409 * However we will correct the real registry size after we 3410 * handle all pending commands. 3411 */ 3412 o->set_registry_size(o, reg_sz + p->mcast_list_len); 3413 break; 3414 3415 default: 3416 ECORE_ERR("Unknown command: %d\n", cmd); 3417 return ECORE_INVAL; 3418 } 3419 3420 /* Increase the total number of MACs pending to be configured */ 3421 o->total_pending_num += p->mcast_list_len; 3422 3423 return ECORE_SUCCESS; 3424 } 3425 3426 static void ecore_mcast_revert_e2(struct _lm_device_t *pdev, 3427 struct ecore_mcast_ramrod_params *p, 3428 int old_num_bins) 3429 { 3430 struct ecore_mcast_obj *o = p->mcast_obj; 3431 3432 o->set_registry_size(o, old_num_bins); 3433 o->total_pending_num -= p->mcast_list_len; 3434 } 3435 3436 /** 3437 * ecore_mcast_set_rdata_hdr_e2 - sets a header values 3438 * 3439 * @pdev: device handle 3440 * @p: 3441 * @len: number of rules to handle 3442 */ 3443 static INLINE void ecore_mcast_set_rdata_hdr_e2(struct _lm_device_t *pdev, 3444 struct ecore_mcast_ramrod_params *p, 3445 u8 len) 3446 { 3447 struct ecore_raw_obj *r = &p->mcast_obj->raw; 3448 struct eth_multicast_rules_ramrod_data *data = 3449 (struct eth_multicast_rules_ramrod_data *)(r->rdata); 3450 3451 data->header.echo = mm_cpu_to_le32((r->cid & ECORE_SWCID_MASK) | 3452 (ECORE_FILTER_MCAST_PENDING << 3453 ECORE_SWCID_SHIFT)); 3454 data->header.rule_cnt = len; 3455 } 3456 3457 /** 3458 * ecore_mcast_refresh_registry_e2 - recalculate the actual number of set bins 3459 * 3460 * @pdev: device handle 3461 * @o: 3462 * 3463 * Recalculate the actual number of set bins in the registry using Brian 3464 * Kernighan's algorithm: it's execution complexity is as a number of set bins. 3465 * 3466 * returns 0 for the compliance with ecore_mcast_refresh_registry_e1(). 3467 */ 3468 static INLINE int ecore_mcast_refresh_registry_e2(struct _lm_device_t *pdev, 3469 struct ecore_mcast_obj *o) 3470 { 3471 int i, cnt = 0; 3472 u64 elem; 3473 3474 for (i = 0; i < ECORE_MCAST_VEC_SZ; i++) { 3475 elem = o->registry.aprox_match.vec[i]; 3476 for (; elem; cnt++) 3477 elem &= elem - 1; 3478 } 3479 3480 o->set_registry_size(o, cnt); 3481 3482 return ECORE_SUCCESS; 3483 } 3484 3485 static int ecore_mcast_setup_e2(struct _lm_device_t *pdev, 3486 struct ecore_mcast_ramrod_params *p, 3487 enum ecore_mcast_cmd cmd) 3488 { 3489 struct ecore_raw_obj *raw = &p->mcast_obj->raw; 3490 struct ecore_mcast_obj *o = p->mcast_obj; 3491 struct eth_multicast_rules_ramrod_data *data = 3492 (struct eth_multicast_rules_ramrod_data *)(raw->rdata); 3493 int cnt = 0, rc; 3494 3495 /* Reset the ramrod data buffer */ 3496 mm_memset(data, 0, sizeof(*data)); 3497 3498 cnt = ecore_mcast_handle_pending_cmds_e2(pdev, p); 3499 3500 /* If there are no more pending commands - clear SCHEDULED state */ 3501 if (ECORE_LIST_IS_EMPTY(&o->pending_cmds_head)) 3502 o->clear_sched(o); 3503 3504 /* The below may be TRUE iff there was enough room in ramrod 3505 * data for all pending commands and for the current 3506 * command. Otherwise the current command would have been added 3507 * to the pending commands and p->mcast_list_len would have been 3508 * zeroed. 3509 */ 3510 if (p->mcast_list_len > 0) 3511 cnt = ecore_mcast_handle_current_cmd(pdev, p, cmd, cnt); 3512 3513 /* We've pulled out some MACs - update the total number of 3514 * outstanding. 3515 */ 3516 o->total_pending_num -= cnt; 3517 3518 /* send a ramrod */ 3519 DbgBreakIf(o->total_pending_num < 0); 3520 DbgBreakIf(cnt > o->max_cmd_len); 3521 3522 ecore_mcast_set_rdata_hdr_e2(pdev, p, (u8)cnt); 3523 3524 /* Update a registry size if there are no more pending operations. 3525 * 3526 * We don't want to change the value of the registry size if there are 3527 * pending operations because we want it to always be equal to the 3528 * exact or the approximate number (see ecore_mcast_validate_e2()) of 3529 * set bins after the last requested operation in order to properly 3530 * evaluate the size of the next DEL/RESTORE operation. 3531 * 3532 * Note that we update the registry itself during command(s) handling 3533 * - see ecore_mcast_set_one_rule_e2(). That's because for 57712 we 3534 * aggregate multiple commands (ADD/DEL/RESTORE) into one ramrod but 3535 * with a limited amount of update commands (per MAC/bin) and we don't 3536 * know in this scope what the actual state of bins configuration is 3537 * going to be after this ramrod. 3538 */ 3539 if (!o->total_pending_num) 3540 ecore_mcast_refresh_registry_e2(pdev, o); 3541 3542 /* If CLEAR_ONLY was requested - don't send a ramrod and clear 3543 * RAMROD_PENDING status immediately. 3544 */ 3545 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) { 3546 raw->clear_pending(raw); 3547 return ECORE_SUCCESS; 3548 } else { 3549 /* No need for an explicit memory barrier here as long as we 3550 * ensure the ordering of writing to the SPQ element 3551 * and updating of the SPQ producer which involves a memory 3552 * read. If the memory read is removed we will have to put a 3553 * full memory barrier there (inside ecore_sp_post()). 3554 */ 3555 3556 /* Send a ramrod */ 3557 rc = ecore_sp_post( pdev, 3558 RAMROD_CMD_ID_ETH_MULTICAST_RULES, 3559 raw->cid, 3560 raw->rdata_mapping.as_u64, 3561 ETH_CONNECTION_TYPE); 3562 if (rc) 3563 return rc; 3564 3565 /* Ramrod completion is pending */ 3566 return ECORE_PENDING; 3567 } 3568 } 3569 3570 static int ecore_mcast_validate_e1h(struct _lm_device_t *pdev, 3571 struct ecore_mcast_ramrod_params *p, 3572 enum ecore_mcast_cmd cmd) 3573 { 3574 /* Mark, that there is a work to do */ 3575 if ((cmd == ECORE_MCAST_CMD_DEL) || (cmd == ECORE_MCAST_CMD_RESTORE)) 3576 p->mcast_list_len = 1; 3577 3578 return ECORE_SUCCESS; 3579 } 3580 3581 static void ecore_mcast_revert_e1h(struct _lm_device_t *pdev, 3582 struct ecore_mcast_ramrod_params *p, 3583 int old_num_bins) 3584 { 3585 /* Do nothing */ 3586 } 3587 3588 #define ECORE_57711_SET_MC_FILTER(filter, bit) \ 3589 do { \ 3590 (filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \ 3591 } while (0) 3592 3593 static INLINE void ecore_mcast_hdl_add_e1h(struct _lm_device_t *pdev, 3594 struct ecore_mcast_obj *o, 3595 struct ecore_mcast_ramrod_params *p, 3596 u32 *mc_filter) 3597 { 3598 struct ecore_mcast_list_elem *mlist_pos; 3599 int bit; 3600 3601 ECORE_LIST_FOR_EACH_ENTRY(mlist_pos, &p->mcast_list, link, 3602 struct ecore_mcast_list_elem) { 3603 bit = ecore_mcast_bin_from_mac(mlist_pos->mac); 3604 ECORE_57711_SET_MC_FILTER(mc_filter, bit); 3605 3606 ECORE_MSG(pdev, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC, bin %d\n", 3607 mlist_pos->mac[0], mlist_pos->mac[1], mlist_pos->mac[2], mlist_pos->mac[3], mlist_pos->mac[4], mlist_pos->mac[5], bit); 3608 3609 /* bookkeeping... */ 3610 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, 3611 bit); 3612 } 3613 } 3614 3615 static INLINE void ecore_mcast_hdl_restore_e1h(struct _lm_device_t *pdev, 3616 struct ecore_mcast_obj *o, struct ecore_mcast_ramrod_params *p, 3617 u32 *mc_filter) 3618 { 3619 int bit; 3620 3621 for (bit = ecore_mcast_get_next_bin(o, 0); 3622 bit >= 0; 3623 bit = ecore_mcast_get_next_bin(o, bit + 1)) { 3624 ECORE_57711_SET_MC_FILTER(mc_filter, bit); 3625 ECORE_MSG(pdev, "About to set bin %d\n", bit); 3626 } 3627 } 3628 3629 /* On 57711 we write the multicast MACs' approximate match 3630 * table by directly into the TSTORM's internal RAM. So we don't 3631 * really need to handle any tricks to make it work. 3632 */ 3633 static int ecore_mcast_setup_e1h(struct _lm_device_t *pdev, 3634 struct ecore_mcast_ramrod_params *p, 3635 enum ecore_mcast_cmd cmd) 3636 { 3637 int i; 3638 struct ecore_mcast_obj *o = p->mcast_obj; 3639 struct ecore_raw_obj *r = &o->raw; 3640 3641 /* If CLEAR_ONLY has been requested - clear the registry 3642 * and clear a pending bit. 3643 */ 3644 if (!ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) { 3645 u32 mc_filter[MC_HASH_SIZE] = {0}; 3646 3647 /* Set the multicast filter bits before writing it into 3648 * the internal memory. 3649 */ 3650 switch (cmd) { 3651 case ECORE_MCAST_CMD_ADD: 3652 ecore_mcast_hdl_add_e1h(pdev, o, p, mc_filter); 3653 break; 3654 3655 case ECORE_MCAST_CMD_DEL: 3656 ECORE_MSG(pdev, 3657 "Invalidating multicast MACs configuration\n"); 3658 3659 /* clear the registry */ 3660 mm_memset(o->registry.aprox_match.vec, 0, 3661 sizeof(o->registry.aprox_match.vec)); 3662 break; 3663 3664 case ECORE_MCAST_CMD_RESTORE: 3665 ecore_mcast_hdl_restore_e1h(pdev, o, p, mc_filter); 3666 break; 3667 3668 default: 3669 ECORE_ERR("Unknown command: %d\n", cmd); 3670 return ECORE_INVAL; 3671 } 3672 3673 /* Set the mcast filter in the internal memory */ 3674 for (i = 0; i < MC_HASH_SIZE; i++) 3675 REG_WR(pdev, MC_HASH_OFFSET(pdev, i), mc_filter[i]); 3676 } else 3677 /* clear the registry */ 3678 mm_memset(o->registry.aprox_match.vec, 0, 3679 sizeof(o->registry.aprox_match.vec)); 3680 3681 /* We are done */ 3682 r->clear_pending(r); 3683 3684 return ECORE_SUCCESS; 3685 } 3686 3687 static int ecore_mcast_validate_e1(struct _lm_device_t *pdev, 3688 struct ecore_mcast_ramrod_params *p, 3689 enum ecore_mcast_cmd cmd) 3690 { 3691 struct ecore_mcast_obj *o = p->mcast_obj; 3692 int reg_sz = o->get_registry_size(o); 3693 3694 switch (cmd) { 3695 /* DEL command deletes all currently configured MACs */ 3696 case ECORE_MCAST_CMD_DEL: 3697 o->set_registry_size(o, 0); 3698 /* Don't break */ 3699 3700 /* RESTORE command will restore the entire multicast configuration */ 3701 case ECORE_MCAST_CMD_RESTORE: 3702 p->mcast_list_len = reg_sz; 3703 ECORE_MSG(pdev, "Command %d, p->mcast_list_len=%d\n", 3704 cmd, p->mcast_list_len); 3705 break; 3706 3707 case ECORE_MCAST_CMD_ADD: 3708 case ECORE_MCAST_CMD_CONT: 3709 /* Multicast MACs on 57710 are configured as unicast MACs and 3710 * there is only a limited number of CAM entries for that 3711 * matter. 3712 */ 3713 if (p->mcast_list_len > o->max_cmd_len) { 3714 ECORE_ERR("Can't configure more than %d multicast MACs on 57710\n", 3715 o->max_cmd_len); 3716 return ECORE_INVAL; 3717 } 3718 /* Every configured MAC should be cleared if DEL command is 3719 * called. Only the last ADD command is relevant as long as 3720 * every ADD commands overrides the previous configuration. 3721 */ 3722 ECORE_MSG(pdev, "p->mcast_list_len=%d\n", p->mcast_list_len); 3723 if (p->mcast_list_len > 0) 3724 o->set_registry_size(o, p->mcast_list_len); 3725 3726 break; 3727 3728 default: 3729 ECORE_ERR("Unknown command: %d\n", cmd); 3730 return ECORE_INVAL; 3731 } 3732 3733 /* We want to ensure that commands are executed one by one for 57710. 3734 * Therefore each none-empty command will consume o->max_cmd_len. 3735 */ 3736 if (p->mcast_list_len) 3737 o->total_pending_num += o->max_cmd_len; 3738 3739 return ECORE_SUCCESS; 3740 } 3741 3742 static void ecore_mcast_revert_e1(struct _lm_device_t *pdev, 3743 struct ecore_mcast_ramrod_params *p, 3744 int old_num_macs) 3745 { 3746 struct ecore_mcast_obj *o = p->mcast_obj; 3747 3748 o->set_registry_size(o, old_num_macs); 3749 3750 /* If current command hasn't been handled yet and we are 3751 * here means that it's meant to be dropped and we have to 3752 * update the number of outstanding MACs accordingly. 3753 */ 3754 if (p->mcast_list_len) 3755 o->total_pending_num -= o->max_cmd_len; 3756 } 3757 3758 static void ecore_mcast_set_one_rule_e1(struct _lm_device_t *pdev, 3759 struct ecore_mcast_obj *o, int idx, 3760 union ecore_mcast_config_data *cfg_data, 3761 enum ecore_mcast_cmd cmd) 3762 { 3763 struct ecore_raw_obj *r = &o->raw; 3764 struct mac_configuration_cmd *data = 3765 (struct mac_configuration_cmd *)(r->rdata); 3766 3767 /* copy mac */ 3768 if ((cmd == ECORE_MCAST_CMD_ADD) || (cmd == ECORE_MCAST_CMD_RESTORE)) { 3769 ecore_set_fw_mac_addr(&data->config_table[idx].msb_mac_addr, 3770 &data->config_table[idx].middle_mac_addr, 3771 &data->config_table[idx].lsb_mac_addr, 3772 cfg_data->mac); 3773 3774 data->config_table[idx].vlan_id = 0; 3775 data->config_table[idx].pf_id = r->func_id; 3776 data->config_table[idx].clients_bit_vector = 3777 mm_cpu_to_le32(1 << r->cl_id); 3778 3779 ECORE_SET_FLAG(data->config_table[idx].flags, 3780 MAC_CONFIGURATION_ENTRY_ACTION_TYPE, 3781 T_ETH_MAC_COMMAND_SET); 3782 } 3783 } 3784 3785 /** 3786 * ecore_mcast_set_rdata_hdr_e1 - set header values in mac_configuration_cmd 3787 * 3788 * @pdev: device handle 3789 * @p: 3790 * @len: number of rules to handle 3791 */ 3792 static INLINE void ecore_mcast_set_rdata_hdr_e1(struct _lm_device_t *pdev, 3793 struct ecore_mcast_ramrod_params *p, 3794 u8 len) 3795 { 3796 struct ecore_raw_obj *r = &p->mcast_obj->raw; 3797 struct mac_configuration_cmd *data = 3798 (struct mac_configuration_cmd *)(r->rdata); 3799 3800 u8 offset = (CHIP_REV_IS_SLOW(pdev) ? 3801 ECORE_MAX_EMUL_MULTI*(1 + r->func_id) : 3802 ECORE_MAX_MULTICAST*(1 + r->func_id)); 3803 3804 data->hdr.offset = offset; 3805 data->hdr.client_id = mm_cpu_to_le16(0xff); 3806 data->hdr.echo = mm_cpu_to_le32((r->cid & ECORE_SWCID_MASK) | 3807 (ECORE_FILTER_MCAST_PENDING << 3808 ECORE_SWCID_SHIFT)); 3809 data->hdr.length = len; 3810 } 3811 3812 /** 3813 * ecore_mcast_handle_restore_cmd_e1 - restore command for 57710 3814 * 3815 * @pdev: device handle 3816 * @o: 3817 * @start_idx: index in the registry to start from 3818 * @rdata_idx: index in the ramrod data to start from 3819 * 3820 * restore command for 57710 is like all other commands - always a stand alone 3821 * command - start_idx and rdata_idx will always be 0. This function will always 3822 * succeed. 3823 * returns -1 to comply with 57712 variant. 3824 */ 3825 static INLINE int ecore_mcast_handle_restore_cmd_e1( 3826 struct _lm_device_t *pdev, struct ecore_mcast_obj *o , int start_idx, 3827 int *rdata_idx) 3828 { 3829 struct ecore_mcast_mac_elem *elem; 3830 int i = 0; 3831 union ecore_mcast_config_data cfg_data = {NULL}; 3832 3833 /* go through the registry and configure the MACs from it. */ 3834 ECORE_LIST_FOR_EACH_ENTRY(elem, &o->registry.exact_match.macs, link, 3835 struct ecore_mcast_mac_elem) { 3836 cfg_data.mac = &elem->mac[0]; 3837 o->set_one_rule(pdev, o, i, &cfg_data, ECORE_MCAST_CMD_RESTORE); 3838 3839 i++; 3840 3841 ECORE_MSG(pdev, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC\n", 3842 cfg_data.mac[0], cfg_data.mac[1], cfg_data.mac[2], cfg_data.mac[3], cfg_data.mac[4], cfg_data.mac[5]); 3843 } 3844 3845 *rdata_idx = i; 3846 3847 return -1; 3848 } 3849 3850 static INLINE int ecore_mcast_handle_pending_cmds_e1( 3851 struct _lm_device_t *pdev, struct ecore_mcast_ramrod_params *p) 3852 { 3853 struct ecore_pending_mcast_cmd *cmd_pos; 3854 struct ecore_mcast_mac_elem *pmac_pos; 3855 struct ecore_mcast_obj *o = p->mcast_obj; 3856 union ecore_mcast_config_data cfg_data = {NULL}; 3857 int cnt = 0; 3858 3859 /* If nothing to be done - return */ 3860 if (ECORE_LIST_IS_EMPTY(&o->pending_cmds_head)) 3861 return 0; 3862 3863 /* Handle the first command */ 3864 cmd_pos = ECORE_LIST_FIRST_ENTRY(&o->pending_cmds_head, 3865 struct ecore_pending_mcast_cmd, link); 3866 3867 switch (cmd_pos->type) { 3868 case ECORE_MCAST_CMD_ADD: 3869 ECORE_LIST_FOR_EACH_ENTRY(pmac_pos, &cmd_pos->data.macs_head, 3870 link, struct ecore_mcast_mac_elem) { 3871 cfg_data.mac = &pmac_pos->mac[0]; 3872 o->set_one_rule(pdev, o, cnt, &cfg_data, cmd_pos->type); 3873 3874 cnt++; 3875 3876 ECORE_MSG(pdev, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC\n", 3877 pmac_pos->mac[0], pmac_pos->mac[1], pmac_pos->mac[2], pmac_pos->mac[3], pmac_pos->mac[4], pmac_pos->mac[5]); 3878 } 3879 break; 3880 3881 case ECORE_MCAST_CMD_DEL: 3882 cnt = cmd_pos->data.macs_num; 3883 ECORE_MSG(pdev, "About to delete %d multicast MACs\n", cnt); 3884 break; 3885 3886 case ECORE_MCAST_CMD_RESTORE: 3887 o->hdl_restore(pdev, o, 0, &cnt); 3888 break; 3889 3890 default: 3891 ECORE_ERR("Unknown command: %d\n", cmd_pos->type); 3892 return ECORE_INVAL; 3893 } 3894 3895 ECORE_LIST_REMOVE_ENTRY(&cmd_pos->link, &o->pending_cmds_head); 3896 ECORE_FREE(pdev, cmd_pos, cmd_pos->alloc_len); 3897 3898 return cnt; 3899 } 3900 3901 /** 3902 * ecore_get_fw_mac_addr - revert the ecore_set_fw_mac_addr(). 3903 * 3904 * @fw_hi: 3905 * @fw_mid: 3906 * @fw_lo: 3907 * @mac: 3908 */ 3909 static INLINE void ecore_get_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid, 3910 __le16 *fw_lo, u8 *mac) 3911 { 3912 mac[1] = ((u8 *)fw_hi)[0]; 3913 mac[0] = ((u8 *)fw_hi)[1]; 3914 mac[3] = ((u8 *)fw_mid)[0]; 3915 mac[2] = ((u8 *)fw_mid)[1]; 3916 mac[5] = ((u8 *)fw_lo)[0]; 3917 mac[4] = ((u8 *)fw_lo)[1]; 3918 } 3919 3920 /** 3921 * ecore_mcast_refresh_registry_e1 - 3922 * 3923 * @pdev: device handle 3924 * @cnt: 3925 * 3926 * Check the ramrod data first entry flag to see if it's a DELETE or ADD command 3927 * and update the registry correspondingly: if ADD - allocate a memory and add 3928 * the entries to the registry (list), if DELETE - clear the registry and free 3929 * the memory. 3930 */ 3931 static INLINE int ecore_mcast_refresh_registry_e1(struct _lm_device_t *pdev, 3932 struct ecore_mcast_obj *o) 3933 { 3934 struct ecore_raw_obj *raw = &o->raw; 3935 struct ecore_mcast_mac_elem *elem; 3936 struct mac_configuration_cmd *data = 3937 (struct mac_configuration_cmd *)(raw->rdata); 3938 3939 /* If first entry contains a SET bit - the command was ADD, 3940 * otherwise - DEL_ALL 3941 */ 3942 if (ECORE_GET_FLAG(data->config_table[0].flags, 3943 MAC_CONFIGURATION_ENTRY_ACTION_TYPE)) { 3944 int i, len = data->hdr.length; 3945 3946 /* Break if it was a RESTORE command */ 3947 if (!ECORE_LIST_IS_EMPTY(&o->registry.exact_match.macs)) 3948 return ECORE_SUCCESS; 3949 3950 elem = ECORE_CALLOC(len, sizeof(*elem), GFP_ATOMIC, pdev); 3951 if (!elem) { 3952 ECORE_ERR("Failed to allocate registry memory\n"); 3953 return ECORE_NOMEM; 3954 } 3955 3956 for (i = 0; i < len; i++, elem++) { 3957 ecore_get_fw_mac_addr( 3958 &data->config_table[i].msb_mac_addr, 3959 &data->config_table[i].middle_mac_addr, 3960 &data->config_table[i].lsb_mac_addr, 3961 elem->mac); 3962 ECORE_MSG(pdev, "Adding registry entry for [%02x:%02x:%02x:%02x:%02x:%02x]\n", 3963 elem->mac[0], elem->mac[1], elem->mac[2], elem->mac[3], elem->mac[4], elem->mac[5]); 3964 ECORE_LIST_PUSH_TAIL(&elem->link, 3965 &o->registry.exact_match.macs); 3966 } 3967 } else { 3968 elem = ECORE_LIST_FIRST_ENTRY(&o->registry.exact_match.macs, 3969 struct ecore_mcast_mac_elem, 3970 link); 3971 ECORE_MSG(pdev, "Deleting a registry\n"); 3972 ECORE_FREE(pdev, elem, sizeof(*elem)); 3973 ECORE_LIST_INIT(&o->registry.exact_match.macs); 3974 } 3975 3976 return ECORE_SUCCESS; 3977 } 3978 3979 static int ecore_mcast_setup_e1(struct _lm_device_t *pdev, 3980 struct ecore_mcast_ramrod_params *p, 3981 enum ecore_mcast_cmd cmd) 3982 { 3983 struct ecore_mcast_obj *o = p->mcast_obj; 3984 struct ecore_raw_obj *raw = &o->raw; 3985 struct mac_configuration_cmd *data = 3986 (struct mac_configuration_cmd *)(raw->rdata); 3987 int cnt = 0, i, rc; 3988 3989 /* Reset the ramrod data buffer */ 3990 mm_memset(data, 0, sizeof(*data)); 3991 3992 /* First set all entries as invalid */ 3993 for (i = 0; i < o->max_cmd_len ; i++) 3994 ECORE_SET_FLAG(data->config_table[i].flags, 3995 MAC_CONFIGURATION_ENTRY_ACTION_TYPE, 3996 T_ETH_MAC_COMMAND_INVALIDATE); 3997 3998 /* Handle pending commands first */ 3999 cnt = ecore_mcast_handle_pending_cmds_e1(pdev, p); 4000 4001 /* If there are no more pending commands - clear SCHEDULED state */ 4002 if (ECORE_LIST_IS_EMPTY(&o->pending_cmds_head)) 4003 o->clear_sched(o); 4004 4005 /* The below may be TRUE iff there were no pending commands */ 4006 if (!cnt) 4007 cnt = ecore_mcast_handle_current_cmd(pdev, p, cmd, 0); 4008 4009 /* For 57710 every command has o->max_cmd_len length to ensure that 4010 * commands are done one at a time. 4011 */ 4012 o->total_pending_num -= o->max_cmd_len; 4013 4014 /* send a ramrod */ 4015 4016 DbgBreakIf(cnt > o->max_cmd_len); 4017 4018 /* Set ramrod header (in particular, a number of entries to update) */ 4019 ecore_mcast_set_rdata_hdr_e1(pdev, p, (u8)cnt); 4020 4021 /* update a registry: we need the registry contents to be always up 4022 * to date in order to be able to execute a RESTORE opcode. Here 4023 * we use the fact that for 57710 we sent one command at a time 4024 * hence we may take the registry update out of the command handling 4025 * and do it in a simpler way here. 4026 */ 4027 rc = ecore_mcast_refresh_registry_e1(pdev, o); 4028 if (rc) 4029 return rc; 4030 4031 /* If CLEAR_ONLY was requested - don't send a ramrod and clear 4032 * RAMROD_PENDING status immediately. 4033 */ 4034 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) { 4035 raw->clear_pending(raw); 4036 return ECORE_SUCCESS; 4037 } else { 4038 /* No need for an explicit memory barrier here as long as we 4039 * ensure the ordering of writing to the SPQ element 4040 * and updating of the SPQ producer which involves a memory 4041 * read. If the memory read is removed we will have to put a 4042 * full memory barrier there (inside ecore_sp_post()). 4043 */ 4044 4045 /* Send a ramrod */ 4046 rc = ecore_sp_post( pdev, 4047 RAMROD_CMD_ID_ETH_SET_MAC, 4048 raw->cid, 4049 raw->rdata_mapping.as_u64, 4050 ETH_CONNECTION_TYPE); 4051 if (rc) 4052 return rc; 4053 4054 /* Ramrod completion is pending */ 4055 return ECORE_PENDING; 4056 } 4057 } 4058 4059 static int ecore_mcast_get_registry_size_exact(struct ecore_mcast_obj *o) 4060 { 4061 return o->registry.exact_match.num_macs_set; 4062 } 4063 4064 static int ecore_mcast_get_registry_size_aprox(struct ecore_mcast_obj *o) 4065 { 4066 return o->registry.aprox_match.num_bins_set; 4067 } 4068 4069 static void ecore_mcast_set_registry_size_exact(struct ecore_mcast_obj *o, 4070 int n) 4071 { 4072 o->registry.exact_match.num_macs_set = n; 4073 } 4074 4075 static void ecore_mcast_set_registry_size_aprox(struct ecore_mcast_obj *o, 4076 int n) 4077 { 4078 o->registry.aprox_match.num_bins_set = n; 4079 } 4080 4081 int ecore_config_mcast(struct _lm_device_t *pdev, 4082 struct ecore_mcast_ramrod_params *p, 4083 enum ecore_mcast_cmd cmd) 4084 { 4085 struct ecore_mcast_obj *o = p->mcast_obj; 4086 struct ecore_raw_obj *r = &o->raw; 4087 int rc = 0, old_reg_size; 4088 4089 /* This is needed to recover number of currently configured mcast macs 4090 * in case of failure. 4091 */ 4092 old_reg_size = o->get_registry_size(o); 4093 4094 /* Do some calculations and checks */ 4095 rc = o->validate(pdev, p, cmd); 4096 if (rc) 4097 return rc; 4098 4099 /* Return if there is no work to do */ 4100 if ((!p->mcast_list_len) && (!o->check_sched(o))) 4101 return ECORE_SUCCESS; 4102 4103 ECORE_MSG(pdev, "o->total_pending_num=%d p->mcast_list_len=%d o->max_cmd_len=%d\n", 4104 o->total_pending_num, p->mcast_list_len, o->max_cmd_len); 4105 4106 /* Enqueue the current command to the pending list if we can't complete 4107 * it in the current iteration 4108 */ 4109 if (r->check_pending(r) || 4110 ((o->max_cmd_len > 0) && (o->total_pending_num > o->max_cmd_len))) { 4111 rc = o->enqueue_cmd(pdev, p->mcast_obj, p, cmd); 4112 if (rc < 0) 4113 goto error_exit1; 4114 4115 /* As long as the current command is in a command list we 4116 * don't need to handle it separately. 4117 */ 4118 p->mcast_list_len = 0; 4119 } 4120 4121 if (!r->check_pending(r)) { 4122 4123 /* Set 'pending' state */ 4124 r->set_pending(r); 4125 4126 /* Configure the new classification in the chip */ 4127 rc = o->config_mcast(pdev, p, cmd); 4128 if (rc < 0) 4129 goto error_exit2; 4130 4131 /* Wait for a ramrod completion if was requested */ 4132 if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags)) 4133 rc = o->wait_comp(pdev, o); 4134 } 4135 4136 return rc; 4137 4138 error_exit2: 4139 r->clear_pending(r); 4140 4141 error_exit1: 4142 o->revert(pdev, p, old_reg_size); 4143 4144 return rc; 4145 } 4146 4147 static void ecore_mcast_clear_sched(struct ecore_mcast_obj *o) 4148 { 4149 smp_mb__before_atomic(); 4150 ECORE_CLEAR_BIT(o->sched_state, o->raw.pstate); 4151 smp_mb__after_atomic(); 4152 } 4153 4154 static void ecore_mcast_set_sched(struct ecore_mcast_obj *o) 4155 { 4156 smp_mb__before_atomic(); 4157 ECORE_SET_BIT(o->sched_state, o->raw.pstate); 4158 smp_mb__after_atomic(); 4159 } 4160 4161 static BOOL ecore_mcast_check_sched(struct ecore_mcast_obj *o) 4162 { 4163 return !!ECORE_TEST_BIT(o->sched_state, o->raw.pstate); 4164 } 4165 4166 static BOOL ecore_mcast_check_pending(struct ecore_mcast_obj *o) 4167 { 4168 return o->raw.check_pending(&o->raw) || o->check_sched(o); 4169 } 4170 #ifndef ECORE_ERASE 4171 typedef int (*enqueue_cmd_func)(struct _lm_device_t *pdev, 4172 struct ecore_mcast_obj *o, 4173 struct ecore_mcast_ramrod_params *p, 4174 enum ecore_mcast_cmd cmd); 4175 4176 typedef int (*hdl_restore_func)(struct _lm_device_t *pdev, 4177 struct ecore_mcast_obj *o, 4178 int start_bin, int *rdata_idx); 4179 4180 typedef void (*set_one_rule_func)(struct _lm_device_t *pdev, 4181 struct ecore_mcast_obj *o, int idx, 4182 union ecore_mcast_config_data *cfg_data, 4183 enum ecore_mcast_cmd cmd); 4184 #endif 4185 4186 void ecore_init_mcast_obj(struct _lm_device_t *pdev, 4187 struct ecore_mcast_obj *mcast_obj, 4188 u8 mcast_cl_id, u32 mcast_cid, u8 func_id, 4189 u8 engine_id, void *rdata, lm_address_t rdata_mapping, 4190 int state, unsigned long *pstate, ecore_obj_type type) 4191 { 4192 mm_memset(mcast_obj, 0, sizeof(*mcast_obj)); 4193 4194 ecore_init_raw_obj(&mcast_obj->raw, mcast_cl_id, mcast_cid, func_id, 4195 rdata, rdata_mapping, state, pstate, type); 4196 4197 mcast_obj->engine_id = engine_id; 4198 4199 ECORE_LIST_INIT(&mcast_obj->pending_cmds_head); 4200 4201 mcast_obj->sched_state = ECORE_FILTER_MCAST_SCHED; 4202 mcast_obj->check_sched = ecore_mcast_check_sched; 4203 mcast_obj->set_sched = ecore_mcast_set_sched; 4204 mcast_obj->clear_sched = ecore_mcast_clear_sched; 4205 4206 if (CHIP_IS_E1(pdev)) { 4207 mcast_obj->config_mcast = ecore_mcast_setup_e1; 4208 mcast_obj->enqueue_cmd = ecore_mcast_enqueue_cmd; 4209 mcast_obj->hdl_restore = 4210 ecore_mcast_handle_restore_cmd_e1; 4211 mcast_obj->check_pending = ecore_mcast_check_pending; 4212 4213 if (CHIP_REV_IS_SLOW(pdev)) 4214 mcast_obj->max_cmd_len = ECORE_MAX_EMUL_MULTI; 4215 else 4216 mcast_obj->max_cmd_len = ECORE_MAX_MULTICAST; 4217 4218 mcast_obj->wait_comp = ecore_mcast_wait; 4219 mcast_obj->set_one_rule = ecore_mcast_set_one_rule_e1; 4220 mcast_obj->validate = ecore_mcast_validate_e1; 4221 mcast_obj->revert = ecore_mcast_revert_e1; 4222 mcast_obj->get_registry_size = 4223 ecore_mcast_get_registry_size_exact; 4224 mcast_obj->set_registry_size = 4225 ecore_mcast_set_registry_size_exact; 4226 4227 /* 57710 is the only chip that uses the exact match for mcast 4228 * at the moment. 4229 */ 4230 ECORE_LIST_INIT(&mcast_obj->registry.exact_match.macs); 4231 4232 } else if (CHIP_IS_E1H(pdev)) { 4233 mcast_obj->config_mcast = ecore_mcast_setup_e1h; 4234 mcast_obj->enqueue_cmd = (enqueue_cmd_func)NULL; 4235 mcast_obj->hdl_restore = (hdl_restore_func)NULL; 4236 mcast_obj->check_pending = ecore_mcast_check_pending; 4237 4238 /* 57711 doesn't send a ramrod, so it has unlimited credit 4239 * for one command. 4240 */ 4241 mcast_obj->max_cmd_len = -1; 4242 mcast_obj->wait_comp = ecore_mcast_wait; 4243 mcast_obj->set_one_rule = (set_one_rule_func)NULL; 4244 mcast_obj->validate = ecore_mcast_validate_e1h; 4245 mcast_obj->revert = ecore_mcast_revert_e1h; 4246 mcast_obj->get_registry_size = 4247 ecore_mcast_get_registry_size_aprox; 4248 mcast_obj->set_registry_size = 4249 ecore_mcast_set_registry_size_aprox; 4250 } else { 4251 mcast_obj->config_mcast = ecore_mcast_setup_e2; 4252 mcast_obj->enqueue_cmd = ecore_mcast_enqueue_cmd; 4253 mcast_obj->hdl_restore = 4254 ecore_mcast_handle_restore_cmd_e2; 4255 mcast_obj->check_pending = ecore_mcast_check_pending; 4256 /* TODO: There should be a proper HSI define for this number!!! 4257 */ 4258 mcast_obj->max_cmd_len = 16; 4259 mcast_obj->wait_comp = ecore_mcast_wait; 4260 mcast_obj->set_one_rule = ecore_mcast_set_one_rule_e2; 4261 mcast_obj->validate = ecore_mcast_validate_e2; 4262 mcast_obj->revert = ecore_mcast_revert_e2; 4263 mcast_obj->get_registry_size = 4264 ecore_mcast_get_registry_size_aprox; 4265 mcast_obj->set_registry_size = 4266 ecore_mcast_set_registry_size_aprox; 4267 } 4268 } 4269 4270 /*************************** Credit handling **********************************/ 4271 4272 /** 4273 * atomic_add_ifless - add if the result is less than a given value. 4274 * 4275 * @v: pointer of type atomic_t 4276 * @a: the amount to add to v... 4277 * @u: ...if (v + a) is less than u. 4278 * 4279 * returns TRUE if (v + a) was less than u, and FALSE otherwise. 4280 * 4281 */ 4282 static INLINE BOOL __atomic_add_ifless(atomic_t *v, int a, int u) 4283 { 4284 int c, old; 4285 4286 c = ecore_atomic_read(v); 4287 for (;;) { 4288 if (ECORE_UNLIKELY(c + a >= u)) 4289 return FALSE; 4290 4291 old = ecore_atomic_cmpxchg((v), c, c + a); 4292 if (ECORE_LIKELY(old == c)) 4293 break; 4294 c = old; 4295 } 4296 4297 return TRUE; 4298 } 4299 4300 /** 4301 * atomic_dec_ifmoe - dec if the result is more or equal than a given value. 4302 * 4303 * @v: pointer of type atomic_t 4304 * @a: the amount to dec from v... 4305 * @u: ...if (v - a) is more or equal than u. 4306 * 4307 * returns TRUE if (v - a) was more or equal than u, and FALSE 4308 * otherwise. 4309 */ 4310 static INLINE BOOL __atomic_dec_ifmoe(atomic_t *v, int a, int u) 4311 { 4312 int c, old; 4313 4314 c = ecore_atomic_read(v); 4315 for (;;) { 4316 if (ECORE_UNLIKELY(c - a < u)) 4317 return FALSE; 4318 4319 old = ecore_atomic_cmpxchg((v), c, c - a); 4320 if (ECORE_LIKELY(old == c)) 4321 break; 4322 c = old; 4323 } 4324 4325 return TRUE; 4326 } 4327 4328 static BOOL ecore_credit_pool_get(struct ecore_credit_pool_obj *o, int cnt) 4329 { 4330 BOOL rc; 4331 4332 smp_mb(); 4333 rc = __atomic_dec_ifmoe(&o->credit, cnt, 0); 4334 smp_mb(); 4335 4336 return rc; 4337 } 4338 4339 static BOOL ecore_credit_pool_put(struct ecore_credit_pool_obj *o, int cnt) 4340 { 4341 BOOL rc; 4342 4343 smp_mb(); 4344 4345 /* Don't let to refill if credit + cnt > pool_sz */ 4346 rc = __atomic_add_ifless(&o->credit, cnt, o->pool_sz + 1); 4347 4348 smp_mb(); 4349 4350 return rc; 4351 } 4352 4353 static int ecore_credit_pool_check(struct ecore_credit_pool_obj *o) 4354 { 4355 int cur_credit; 4356 4357 smp_mb(); 4358 cur_credit = ecore_atomic_read(&o->credit); 4359 4360 return cur_credit; 4361 } 4362 4363 static BOOL ecore_credit_pool_always_TRUE(struct ecore_credit_pool_obj *o, 4364 int cnt) 4365 { 4366 return TRUE; 4367 } 4368 4369 static BOOL ecore_credit_pool_get_entry( 4370 struct ecore_credit_pool_obj *o, 4371 int *offset) 4372 { 4373 int idx, vec, i; 4374 4375 *offset = -1; 4376 4377 /* Find "internal cam-offset" then add to base for this object... */ 4378 for (vec = 0; vec < ECORE_POOL_VEC_SIZE; vec++) { 4379 4380 /* Skip the current vector if there are no free entries in it */ 4381 if (!o->pool_mirror[vec]) 4382 continue; 4383 4384 /* If we've got here we are going to find a free entry */ 4385 for (idx = vec * BIT_VEC64_ELEM_SZ, i = 0; 4386 i < BIT_VEC64_ELEM_SZ; idx++, i++) 4387 4388 if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) { 4389 /* Got one!! */ 4390 BIT_VEC64_CLEAR_BIT(o->pool_mirror, idx); 4391 *offset = o->base_pool_offset + idx; 4392 return TRUE; 4393 } 4394 } 4395 4396 return FALSE; 4397 } 4398 4399 static BOOL ecore_credit_pool_put_entry( 4400 struct ecore_credit_pool_obj *o, 4401 int offset) 4402 { 4403 if (offset < o->base_pool_offset) 4404 return FALSE; 4405 4406 offset -= o->base_pool_offset; 4407 4408 if (offset >= o->pool_sz) 4409 return FALSE; 4410 4411 /* Return the entry to the pool */ 4412 BIT_VEC64_SET_BIT(o->pool_mirror, offset); 4413 4414 return TRUE; 4415 } 4416 4417 static BOOL ecore_credit_pool_put_entry_always_TRUE( 4418 struct ecore_credit_pool_obj *o, 4419 int offset) 4420 { 4421 return TRUE; 4422 } 4423 4424 static BOOL ecore_credit_pool_get_entry_always_TRUE( 4425 struct ecore_credit_pool_obj *o, 4426 int *offset) 4427 { 4428 *offset = -1; 4429 return TRUE; 4430 } 4431 /** 4432 * ecore_init_credit_pool - initialize credit pool internals. 4433 * 4434 * @p: 4435 * @base: Base entry in the CAM to use. 4436 * @credit: pool size. 4437 * 4438 * If base is negative no CAM entries handling will be performed. 4439 * If credit is negative pool operations will always succeed (unlimited pool). 4440 * 4441 */ 4442 static INLINE void ecore_init_credit_pool(struct ecore_credit_pool_obj *p, 4443 int base, int credit) 4444 { 4445 /* Zero the object first */ 4446 mm_memset(p, 0, sizeof(*p)); 4447 4448 /* Set the table to all 1s */ 4449 mm_memset(&p->pool_mirror, 0xff, sizeof(p->pool_mirror)); 4450 4451 /* Init a pool as full */ 4452 ecore_atomic_set(&p->credit, credit); 4453 4454 /* The total poll size */ 4455 p->pool_sz = credit; 4456 4457 p->base_pool_offset = base; 4458 4459 /* Commit the change */ 4460 smp_mb(); 4461 4462 p->check = ecore_credit_pool_check; 4463 4464 /* if pool credit is negative - disable the checks */ 4465 if (credit >= 0) { 4466 p->put = ecore_credit_pool_put; 4467 p->get = ecore_credit_pool_get; 4468 p->put_entry = ecore_credit_pool_put_entry; 4469 p->get_entry = ecore_credit_pool_get_entry; 4470 } else { 4471 p->put = ecore_credit_pool_always_TRUE; 4472 p->get = ecore_credit_pool_always_TRUE; 4473 p->put_entry = ecore_credit_pool_put_entry_always_TRUE; 4474 p->get_entry = ecore_credit_pool_get_entry_always_TRUE; 4475 } 4476 4477 /* If base is negative - disable entries handling */ 4478 if (base < 0) { 4479 p->put_entry = ecore_credit_pool_put_entry_always_TRUE; 4480 p->get_entry = ecore_credit_pool_get_entry_always_TRUE; 4481 } 4482 } 4483 4484 void ecore_init_mac_credit_pool(struct _lm_device_t *pdev, 4485 struct ecore_credit_pool_obj *p, u8 func_id, 4486 u8 func_num) 4487 { 4488 /* TODO: this will be defined in consts as well... */ 4489 #define ECORE_CAM_SIZE_EMUL 5 4490 4491 int cam_sz; 4492 4493 if (CHIP_IS_E1(pdev)) { 4494 /* In E1, Multicast is saved in cam... */ 4495 if (!CHIP_REV_IS_SLOW(pdev)) 4496 cam_sz = (MAX_MAC_CREDIT_E1 / 2) - ECORE_MAX_MULTICAST; 4497 else 4498 cam_sz = ECORE_CAM_SIZE_EMUL - ECORE_MAX_EMUL_MULTI; 4499 4500 ecore_init_credit_pool(p, func_id * cam_sz, cam_sz); 4501 4502 } else if (CHIP_IS_E1H(pdev)) { 4503 /* CAM credit is equally divided between all active functions 4504 * on the PORT!. 4505 */ 4506 if ((func_num > 0)) { 4507 if (!CHIP_REV_IS_SLOW(pdev)) 4508 cam_sz = (MAX_MAC_CREDIT_E1H / (2*func_num)); 4509 else 4510 cam_sz = ECORE_CAM_SIZE_EMUL; 4511 ecore_init_credit_pool(p, func_id * cam_sz, cam_sz); 4512 } else { 4513 /* this should never happen! Block MAC operations. */ 4514 ecore_init_credit_pool(p, 0, 0); 4515 } 4516 4517 } else { 4518 4519 /* 4520 * CAM credit is equaly divided between all active functions 4521 * on the PATH. 4522 */ 4523 if ((func_num > 1)) { 4524 if (!CHIP_REV_IS_SLOW(pdev)) 4525 cam_sz = (MAX_MAC_CREDIT_E2 4526 - GET_NUM_VFS_PER_PATH(pdev)) 4527 / func_num 4528 + GET_NUM_VFS_PER_PF(pdev); 4529 else 4530 cam_sz = ECORE_CAM_SIZE_EMUL; 4531 4532 /* No need for CAM entries handling for 57712 and 4533 * newer. 4534 */ 4535 ecore_init_credit_pool(p, -1, cam_sz); 4536 } else if (func_num == 1) { 4537 if (!CHIP_REV_IS_SLOW(pdev)) 4538 cam_sz = MAX_MAC_CREDIT_E2; 4539 else 4540 cam_sz = ECORE_CAM_SIZE_EMUL; 4541 4542 /* No need for CAM entries handling for 57712 and 4543 * newer. 4544 */ 4545 ecore_init_credit_pool(p, -1, cam_sz); 4546 } else { 4547 /* this should never happen! Block MAC operations. */ 4548 ecore_init_credit_pool(p, 0, 0); 4549 } 4550 } 4551 } 4552 4553 void ecore_init_vlan_credit_pool(struct _lm_device_t *pdev, 4554 struct ecore_credit_pool_obj *p, 4555 u8 func_id, 4556 u8 func_num) 4557 { 4558 if (CHIP_IS_E1x(pdev)) { 4559 /* There is no VLAN credit in HW on 57710 and 57711 only 4560 * MAC / MAC-VLAN can be set 4561 */ 4562 ecore_init_credit_pool(p, 0, -1); 4563 } else { 4564 /* CAM credit is equally divided between all active functions 4565 * on the PATH. 4566 */ 4567 if (func_num > 0) { 4568 int credit = MAX_VLAN_CREDIT_E2 / func_num; 4569 ecore_init_credit_pool(p, func_id * credit, credit); 4570 } else 4571 /* this should never happen! Block VLAN operations. */ 4572 ecore_init_credit_pool(p, 0, 0); 4573 } 4574 } 4575 4576 /****************** RSS Configuration ******************/ 4577 #if defined(ECORE_ERASE) && !defined(__FreeBSD__) 4578 /** 4579 * bnx2x_debug_print_ind_table - prints the indirection table configuration. 4580 * 4581 * @bp: driver handle 4582 * @p: pointer to rss configuration 4583 * 4584 * Prints it when NETIF_MSG_IFUP debug level is configured. 4585 */ 4586 static inline void bnx2x_debug_print_ind_table(struct bnx2x *bp, 4587 struct bnx2x_config_rss_params *p) 4588 { 4589 int i; 4590 4591 DP(BNX2X_MSG_SP, "Setting indirection table to:\n"); 4592 DP(BNX2X_MSG_SP, "0x0000: "); 4593 for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) { 4594 DP_CONT(BNX2X_MSG_SP, "0x%02x ", p->ind_table[i]); 4595 4596 /* Print 4 bytes in a line */ 4597 if ((i + 1 < T_ETH_INDIRECTION_TABLE_SIZE) && 4598 (((i + 1) & 0x3) == 0)) { 4599 DP_CONT(BNX2X_MSG_SP, "\n"); 4600 DP(BNX2X_MSG_SP, "0x%04x: ", i + 1); 4601 } 4602 } 4603 4604 DP_CONT(BNX2X_MSG_SP, "\n"); 4605 } 4606 #endif /* ECORE_ERASE && !__FreeBSD__ */ 4607 4608 /** 4609 * ecore_setup_rss - configure RSS 4610 * 4611 * @pdev: device handle 4612 * @p: rss configuration 4613 * 4614 * sends on UPDATE ramrod for that matter. 4615 */ 4616 static int ecore_setup_rss(struct _lm_device_t *pdev, 4617 struct ecore_config_rss_params *p) 4618 { 4619 struct ecore_rss_config_obj *o = p->rss_obj; 4620 struct ecore_raw_obj *r = &o->raw; 4621 struct eth_rss_update_ramrod_data *data = 4622 (struct eth_rss_update_ramrod_data *)(r->rdata); 4623 u16 caps = 0; 4624 u8 rss_mode = 0; 4625 int rc; 4626 4627 mm_memset(data, 0, sizeof(*data)); 4628 4629 ECORE_MSG(pdev, "Configuring RSS\n"); 4630 4631 /* Set an echo field */ 4632 data->echo = mm_cpu_to_le32((r->cid & ECORE_SWCID_MASK) | 4633 (r->state << ECORE_SWCID_SHIFT)); 4634 4635 /* RSS mode */ 4636 if (ECORE_TEST_BIT(ECORE_RSS_MODE_DISABLED, &p->rss_flags)) 4637 rss_mode = ETH_RSS_MODE_DISABLED; 4638 else if (ECORE_TEST_BIT(ECORE_RSS_MODE_REGULAR, &p->rss_flags)) 4639 rss_mode = ETH_RSS_MODE_REGULAR; 4640 #if defined(__VMKLNX__) && (VMWARE_ESX_DDK_VERSION < 55000) /* ! BNX2X_UPSTREAM */ 4641 else if (ECORE_TEST_BIT(ECORE_RSS_MODE_ESX51, &p->rss_flags)) 4642 rss_mode = ETH_RSS_MODE_ESX51; 4643 #endif 4644 4645 data->rss_mode = rss_mode; 4646 4647 ECORE_MSG(pdev, "rss_mode=%d\n", rss_mode); 4648 4649 /* RSS capabilities */ 4650 if (ECORE_TEST_BIT(ECORE_RSS_IPV4, &p->rss_flags)) 4651 caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY; 4652 4653 if (ECORE_TEST_BIT(ECORE_RSS_IPV4_TCP, &p->rss_flags)) 4654 caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY; 4655 4656 if (ECORE_TEST_BIT(ECORE_RSS_IPV4_UDP, &p->rss_flags)) 4657 caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY; 4658 4659 if (ECORE_TEST_BIT(ECORE_RSS_IPV6, &p->rss_flags)) 4660 caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY; 4661 4662 if (ECORE_TEST_BIT(ECORE_RSS_IPV6_TCP, &p->rss_flags)) 4663 caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY; 4664 4665 if (ECORE_TEST_BIT(ECORE_RSS_IPV6_UDP, &p->rss_flags)) 4666 caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY; 4667 4668 if (ECORE_TEST_BIT(ECORE_RSS_IPV4_VXLAN, &p->rss_flags)) 4669 caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_VXLAN_CAPABILITY; 4670 4671 if (ECORE_TEST_BIT(ECORE_RSS_IPV6_VXLAN, &p->rss_flags)) 4672 caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_VXLAN_CAPABILITY; 4673 4674 if (ECORE_TEST_BIT(ECORE_RSS_NVGRE_KEY_ENTROPY, &p->rss_flags)) 4675 caps |= ETH_RSS_UPDATE_RAMROD_DATA_NVGRE_KEY_ENTROPY_CAPABILITY; 4676 4677 if (ECORE_TEST_BIT(ECORE_RSS_GRE_INNER_HDRS, &p->rss_flags)) 4678 caps |= ETH_RSS_UPDATE_RAMROD_DATA_GRE_INNER_HDRS_CAPABILITY; 4679 4680 data->capabilities = mm_cpu_to_le16(caps); 4681 4682 /* Hashing mask */ 4683 data->rss_result_mask = p->rss_result_mask; 4684 4685 /* RSS engine ID */ 4686 data->rss_engine_id = o->engine_id; 4687 4688 ECORE_MSG(pdev, "rss_engine_id=%d\n", data->rss_engine_id); 4689 4690 /* Indirection table */ 4691 mm_memcpy(data->indirection_table, p->ind_table, 4692 T_ETH_INDIRECTION_TABLE_SIZE); 4693 4694 /* Remember the last configuration */ 4695 mm_memcpy(o->ind_table, p->ind_table, T_ETH_INDIRECTION_TABLE_SIZE); 4696 4697 #if defined(ECORE_ERASE) && !defined(__FreeBSD__) 4698 /* Print the indirection table */ 4699 if (netif_msg_ifup(bp)) 4700 bnx2x_debug_print_ind_table(bp, p); 4701 #endif 4702 4703 /* RSS keys */ 4704 if (ECORE_TEST_BIT(ECORE_RSS_SET_SRCH, &p->rss_flags)) { 4705 mm_memcpy(&data->rss_key[0], &p->rss_key[0], 4706 sizeof(data->rss_key)); 4707 data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY; 4708 } 4709 4710 /* No need for an explicit memory barrier here as long as we 4711 * ensure the ordering of writing to the SPQ element 4712 * and updating of the SPQ producer which involves a memory 4713 * read. If the memory read is removed we will have to put a 4714 * full memory barrier there (inside ecore_sp_post()). 4715 */ 4716 4717 /* Send a ramrod */ 4718 rc = ecore_sp_post(pdev, 4719 RAMROD_CMD_ID_ETH_RSS_UPDATE, 4720 r->cid, 4721 r->rdata_mapping.as_u64, 4722 ETH_CONNECTION_TYPE); 4723 4724 if (rc < 0) 4725 return rc; 4726 4727 return ECORE_PENDING; 4728 } 4729 4730 void ecore_get_rss_ind_table(struct ecore_rss_config_obj *rss_obj, 4731 u8 *ind_table) 4732 { 4733 mm_memcpy(ind_table, rss_obj->ind_table, sizeof(rss_obj->ind_table)); 4734 } 4735 4736 int ecore_config_rss(struct _lm_device_t *pdev, 4737 struct ecore_config_rss_params *p) 4738 { 4739 int rc; 4740 struct ecore_rss_config_obj *o = p->rss_obj; 4741 struct ecore_raw_obj *r = &o->raw; 4742 4743 /* Do nothing if only driver cleanup was requested */ 4744 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) { 4745 ECORE_MSG(pdev, "Not configuring RSS ramrod_flags=%lx\n", 4746 p->ramrod_flags); 4747 return ECORE_SUCCESS; 4748 } 4749 4750 r->set_pending(r); 4751 4752 rc = o->config_rss(pdev, p); 4753 if (rc < 0) { 4754 r->clear_pending(r); 4755 return rc; 4756 } 4757 4758 if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags)) 4759 rc = r->wait_comp(pdev, r); 4760 4761 return rc; 4762 } 4763 4764 void ecore_init_rss_config_obj(struct _lm_device_t *pdev, 4765 struct ecore_rss_config_obj *rss_obj, 4766 u8 cl_id, u32 cid, u8 func_id, u8 engine_id, 4767 void *rdata, lm_address_t rdata_mapping, 4768 int state, unsigned long *pstate, 4769 ecore_obj_type type) 4770 { 4771 ecore_init_raw_obj(&rss_obj->raw, cl_id, cid, func_id, rdata, 4772 rdata_mapping, state, pstate, type); 4773 4774 rss_obj->engine_id = engine_id; 4775 rss_obj->config_rss = ecore_setup_rss; 4776 } 4777 4778 #ifdef ECORE_ERASE 4779 /********************** Queue state object ***********************************/ 4780 4781 /** 4782 * ecore_queue_state_change - perform Queue state change transition 4783 * 4784 * @pdev: device handle 4785 * @params: parameters to perform the transition 4786 * 4787 * returns 0 in case of successfully completed transition, negative error 4788 * code in case of failure, positive (EBUSY) value if there is a completion 4789 * to that is still pending (possible only if RAMROD_COMP_WAIT is 4790 * not set in params->ramrod_flags for asynchronous commands). 4791 * 4792 */ 4793 int ecore_queue_state_change(struct _lm_device_t *pdev, 4794 struct ecore_queue_state_params *params) 4795 { 4796 struct ecore_queue_sp_obj *o = params->q_obj; 4797 int rc, pending_bit; 4798 unsigned long *pending = &o->pending; 4799 4800 /* Check that the requested transition is legal */ 4801 rc = o->check_transition(pdev, o, params); 4802 if (rc) { 4803 ECORE_ERR("check transition returned an error. rc %d\n", rc); 4804 return ECORE_INVAL; 4805 } 4806 4807 /* Set "pending" bit */ 4808 ECORE_MSG(pdev, "pending bit was=%lx\n", o->pending); 4809 pending_bit = o->set_pending(o, params); 4810 ECORE_MSG(pdev, "pending bit now=%lx\n", o->pending); 4811 4812 /* Don't send a command if only driver cleanup was requested */ 4813 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) 4814 o->complete_cmd(pdev, o, pending_bit); 4815 else { 4816 /* Send a ramrod */ 4817 rc = o->send_cmd(pdev, params); 4818 if (rc) { 4819 o->next_state = ECORE_Q_STATE_MAX; 4820 ECORE_CLEAR_BIT(pending_bit, pending); 4821 smp_mb__after_atomic(); 4822 return rc; 4823 } 4824 4825 if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, ¶ms->ramrod_flags)) { 4826 rc = o->wait_comp(pdev, o, pending_bit); 4827 if (rc) 4828 return rc; 4829 4830 return ECORE_SUCCESS; 4831 } 4832 } 4833 4834 return ECORE_RET_PENDING(pending_bit, pending); 4835 } 4836 4837 static int ecore_queue_set_pending(struct ecore_queue_sp_obj *obj, 4838 struct ecore_queue_state_params *params) 4839 { 4840 enum ecore_queue_cmd cmd = params->cmd, bit; 4841 4842 /* ACTIVATE and DEACTIVATE commands are implemented on top of 4843 * UPDATE command. 4844 */ 4845 if ((cmd == ECORE_Q_CMD_ACTIVATE) || 4846 (cmd == ECORE_Q_CMD_DEACTIVATE)) 4847 bit = ECORE_Q_CMD_UPDATE; 4848 else 4849 bit = cmd; 4850 4851 ECORE_SET_BIT(bit, &obj->pending); 4852 return bit; 4853 } 4854 4855 static int ecore_queue_wait_comp(struct _lm_device_t *pdev, 4856 struct ecore_queue_sp_obj *o, 4857 enum ecore_queue_cmd cmd) 4858 { 4859 return ecore_state_wait(pdev, cmd, &o->pending); 4860 } 4861 4862 /** 4863 * ecore_queue_comp_cmd - complete the state change command. 4864 * 4865 * @pdev: device handle 4866 * @o: 4867 * @cmd: 4868 * 4869 * Checks that the arrived completion is expected. 4870 */ 4871 static int ecore_queue_comp_cmd(struct _lm_device_t *pdev, 4872 struct ecore_queue_sp_obj *o, 4873 enum ecore_queue_cmd cmd) 4874 { 4875 unsigned long cur_pending = o->pending; 4876 4877 if (!ECORE_TEST_AND_CLEAR_BIT(cmd, &cur_pending)) { 4878 ECORE_ERR("Bad MC reply %d for queue %d in state %d pending 0x%lx, next_state %d\n", 4879 cmd, o->cids[ECORE_PRIMARY_CID_INDEX], 4880 o->state, cur_pending, o->next_state); 4881 return ECORE_INVAL; 4882 } 4883 4884 if (o->next_tx_only >= o->max_cos) 4885 /* >= because tx only must always be smaller than cos since the 4886 * primary connection supports COS 0 4887 */ 4888 ECORE_ERR("illegal value for next tx_only: %d. max cos was %d", 4889 o->next_tx_only, o->max_cos); 4890 4891 ECORE_MSG(pdev, 4892 "Completing command %d for queue %d, setting state to %d\n", 4893 cmd, o->cids[ECORE_PRIMARY_CID_INDEX], o->next_state); 4894 4895 if (o->next_tx_only) /* print num tx-only if any exist */ 4896 ECORE_MSG(pdev, "primary cid %d: num tx-only cons %d\n", 4897 o->cids[ECORE_PRIMARY_CID_INDEX], o->next_tx_only); 4898 4899 o->state = o->next_state; 4900 o->num_tx_only = o->next_tx_only; 4901 o->next_state = ECORE_Q_STATE_MAX; 4902 4903 /* It's important that o->state and o->next_state are 4904 * updated before o->pending. 4905 */ 4906 wmb(); 4907 4908 ECORE_CLEAR_BIT(cmd, &o->pending); 4909 smp_mb__after_atomic(); 4910 4911 return ECORE_SUCCESS; 4912 } 4913 4914 static void ecore_q_fill_setup_data_e2(struct _lm_device_t *pdev, 4915 struct ecore_queue_state_params *cmd_params, 4916 struct client_init_ramrod_data *data) 4917 { 4918 struct ecore_queue_setup_params *params = &cmd_params->params.setup; 4919 4920 /* Rx data */ 4921 4922 /* IPv6 TPA supported for E2 and above only */ 4923 data->rx.tpa_en |= ECORE_TEST_BIT(ECORE_Q_FLG_TPA_IPV6, 4924 ¶ms->flags) * 4925 CLIENT_INIT_RX_DATA_TPA_EN_IPV6; 4926 } 4927 4928 static void ecore_q_fill_init_general_data(struct _lm_device_t *pdev, 4929 struct ecore_queue_sp_obj *o, 4930 struct ecore_general_setup_params *params, 4931 struct client_init_general_data *gen_data, 4932 unsigned long *flags) 4933 { 4934 gen_data->client_id = o->cl_id; 4935 4936 if (ECORE_TEST_BIT(ECORE_Q_FLG_STATS, flags)) { 4937 gen_data->statistics_counter_id = 4938 params->stat_id; 4939 gen_data->statistics_en_flg = 1; 4940 gen_data->statistics_zero_flg = 4941 ECORE_TEST_BIT(ECORE_Q_FLG_ZERO_STATS, flags); 4942 } else 4943 gen_data->statistics_counter_id = 4944 DISABLE_STATISTIC_COUNTER_ID_VALUE; 4945 4946 gen_data->is_fcoe_flg = ECORE_TEST_BIT(ECORE_Q_FLG_FCOE, 4947 flags); 4948 gen_data->activate_flg = ECORE_TEST_BIT(ECORE_Q_FLG_ACTIVE, 4949 flags); 4950 gen_data->sp_client_id = params->spcl_id; 4951 gen_data->mtu = mm_cpu_to_le16(params->mtu); 4952 gen_data->func_id = o->func_id; 4953 4954 gen_data->cos = params->cos; 4955 4956 gen_data->traffic_type = 4957 ECORE_TEST_BIT(ECORE_Q_FLG_FCOE, flags) ? 4958 LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW; 4959 4960 gen_data->fp_hsi_ver = ETH_FP_HSI_VERSION; 4961 4962 ECORE_MSG(pdev, "flags: active %d, cos %d, stats en %d\n", 4963 gen_data->activate_flg, gen_data->cos, gen_data->statistics_en_flg); 4964 } 4965 4966 static void ecore_q_fill_init_tx_data(struct ecore_queue_sp_obj *o, 4967 struct ecore_txq_setup_params *params, 4968 struct client_init_tx_data *tx_data, 4969 unsigned long *flags) 4970 { 4971 tx_data->enforce_security_flg = 4972 ECORE_TEST_BIT(ECORE_Q_FLG_TX_SEC, flags); 4973 tx_data->default_vlan = 4974 mm_cpu_to_le16(params->default_vlan); 4975 tx_data->default_vlan_flg = 4976 ECORE_TEST_BIT(ECORE_Q_FLG_DEF_VLAN, flags); 4977 tx_data->tx_switching_flg = 4978 ECORE_TEST_BIT(ECORE_Q_FLG_TX_SWITCH, flags); 4979 tx_data->anti_spoofing_flg = 4980 ECORE_TEST_BIT(ECORE_Q_FLG_ANTI_SPOOF, flags); 4981 tx_data->force_default_pri_flg = 4982 ECORE_TEST_BIT(ECORE_Q_FLG_FORCE_DEFAULT_PRI, flags); 4983 tx_data->refuse_outband_vlan_flg = 4984 ECORE_TEST_BIT(ECORE_Q_FLG_REFUSE_OUTBAND_VLAN, flags); 4985 tx_data->tunnel_lso_inc_ip_id = 4986 ECORE_TEST_BIT(ECORE_Q_FLG_TUN_INC_INNER_IP_ID, flags); 4987 tx_data->tunnel_non_lso_pcsum_location = 4988 ECORE_TEST_BIT(ECORE_Q_FLG_PCSUM_ON_PKT, flags) ? CSUM_ON_PKT : 4989 CSUM_ON_BD; 4990 4991 tx_data->tx_status_block_id = params->fw_sb_id; 4992 tx_data->tx_sb_index_number = params->sb_cq_index; 4993 tx_data->tss_leading_client_id = params->tss_leading_cl_id; 4994 4995 tx_data->tx_bd_page_base.lo = 4996 mm_cpu_to_le32(U64_LO(params->dscr_map.as_u64)); 4997 tx_data->tx_bd_page_base.hi = 4998 mm_cpu_to_le32(U64_HI(params->dscr_map.as_u64)); 4999 5000 /* Don't configure any Tx switching mode during queue SETUP */ 5001 tx_data->state = 0; 5002 } 5003 5004 static void ecore_q_fill_init_pause_data(struct ecore_queue_sp_obj *o, 5005 struct rxq_pause_params *params, 5006 struct client_init_rx_data *rx_data) 5007 { 5008 /* flow control data */ 5009 rx_data->cqe_pause_thr_low = mm_cpu_to_le16(params->rcq_th_lo); 5010 rx_data->cqe_pause_thr_high = mm_cpu_to_le16(params->rcq_th_hi); 5011 rx_data->bd_pause_thr_low = mm_cpu_to_le16(params->bd_th_lo); 5012 rx_data->bd_pause_thr_high = mm_cpu_to_le16(params->bd_th_hi); 5013 rx_data->sge_pause_thr_low = mm_cpu_to_le16(params->sge_th_lo); 5014 rx_data->sge_pause_thr_high = mm_cpu_to_le16(params->sge_th_hi); 5015 rx_data->rx_cos_mask = mm_cpu_to_le16(params->pri_map); 5016 } 5017 5018 static void ecore_q_fill_init_rx_data(struct ecore_queue_sp_obj *o, 5019 struct ecore_rxq_setup_params *params, 5020 struct client_init_rx_data *rx_data, 5021 unsigned long *flags) 5022 { 5023 rx_data->tpa_en = ECORE_TEST_BIT(ECORE_Q_FLG_TPA, flags) * 5024 CLIENT_INIT_RX_DATA_TPA_EN_IPV4; 5025 rx_data->tpa_en |= ECORE_TEST_BIT(ECORE_Q_FLG_TPA_GRO, flags) * 5026 CLIENT_INIT_RX_DATA_TPA_MODE; 5027 #ifdef ECORE_UPSTREAM /* ECORE_UPSTREAM */ 5028 rx_data->vmqueue_mode_en_flg = 0; 5029 #else 5030 rx_data->vmqueue_mode_en_flg = 5031 ECORE_TEST_BIT(ECORE_Q_FLG_VMQUEUE_MODE, flags); 5032 #endif 5033 5034 #ifdef ECORE_OOO /* ! ECORE_UPSTREAM */ 5035 rx_data->extra_data_over_sgl_en_flg = 5036 ECORE_TEST_BIT(ECORE_Q_FLG_OOO, flags); 5037 #endif 5038 rx_data->cache_line_alignment_log_size = 5039 params->cache_line_log; 5040 rx_data->enable_dynamic_hc = 5041 ECORE_TEST_BIT(ECORE_Q_FLG_DHC, flags); 5042 rx_data->max_sges_for_packet = params->max_sges_pkt; 5043 rx_data->client_qzone_id = params->cl_qzone_id; 5044 rx_data->max_agg_size = mm_cpu_to_le16(params->tpa_agg_sz); 5045 5046 /* Always start in DROP_ALL mode */ 5047 rx_data->state = mm_cpu_to_le16(CLIENT_INIT_RX_DATA_UCAST_DROP_ALL | 5048 CLIENT_INIT_RX_DATA_MCAST_DROP_ALL); 5049 5050 /* We don't set drop flags */ 5051 rx_data->drop_ip_cs_err_flg = 0; 5052 rx_data->drop_tcp_cs_err_flg = 0; 5053 rx_data->drop_ttl0_flg = 0; 5054 rx_data->drop_udp_cs_err_flg = 0; 5055 rx_data->inner_vlan_removal_enable_flg = 5056 ECORE_TEST_BIT(ECORE_Q_FLG_VLAN, flags); 5057 rx_data->outer_vlan_removal_enable_flg = 5058 ECORE_TEST_BIT(ECORE_Q_FLG_OV, flags); 5059 rx_data->status_block_id = params->fw_sb_id; 5060 rx_data->rx_sb_index_number = params->sb_cq_index; 5061 rx_data->max_tpa_queues = params->max_tpa_queues; 5062 rx_data->max_bytes_on_bd = mm_cpu_to_le16(params->buf_sz); 5063 rx_data->sge_buff_size = mm_cpu_to_le16(params->sge_buf_sz); 5064 rx_data->bd_page_base.lo = 5065 mm_cpu_to_le32(U64_LO(params->dscr_map.as_u64)); 5066 rx_data->bd_page_base.hi = 5067 mm_cpu_to_le32(U64_HI(params->dscr_map.as_u64)); 5068 rx_data->sge_page_base.lo = 5069 mm_cpu_to_le32(U64_LO(params->sge_map.as_u64)); 5070 rx_data->sge_page_base.hi = 5071 mm_cpu_to_le32(U64_HI(params->sge_map.as_u64)); 5072 rx_data->cqe_page_base.lo = 5073 mm_cpu_to_le32(U64_LO(params->rcq_map.as_u64)); 5074 rx_data->cqe_page_base.hi = 5075 mm_cpu_to_le32(U64_HI(params->rcq_map.as_u64)); 5076 rx_data->is_leading_rss = ECORE_TEST_BIT(ECORE_Q_FLG_LEADING_RSS, 5077 flags); 5078 5079 if (ECORE_TEST_BIT(ECORE_Q_FLG_MCAST, flags)) { 5080 rx_data->approx_mcast_engine_id = params->mcast_engine_id; 5081 rx_data->is_approx_mcast = 1; 5082 } 5083 5084 rx_data->rss_engine_id = params->rss_engine_id; 5085 5086 /* silent vlan removal */ 5087 rx_data->silent_vlan_removal_flg = 5088 ECORE_TEST_BIT(ECORE_Q_FLG_SILENT_VLAN_REM, flags); 5089 rx_data->silent_vlan_value = 5090 mm_cpu_to_le16(params->silent_removal_value); 5091 rx_data->silent_vlan_mask = 5092 mm_cpu_to_le16(params->silent_removal_mask); 5093 } 5094 5095 /* initialize the general, tx and rx parts of a queue object */ 5096 static void ecore_q_fill_setup_data_cmn(struct _lm_device_t *pdev, 5097 struct ecore_queue_state_params *cmd_params, 5098 struct client_init_ramrod_data *data) 5099 { 5100 ecore_q_fill_init_general_data(pdev, cmd_params->q_obj, 5101 &cmd_params->params.setup.gen_params, 5102 &data->general, 5103 &cmd_params->params.setup.flags); 5104 5105 ecore_q_fill_init_tx_data(cmd_params->q_obj, 5106 &cmd_params->params.setup.txq_params, 5107 &data->tx, 5108 &cmd_params->params.setup.flags); 5109 5110 ecore_q_fill_init_rx_data(cmd_params->q_obj, 5111 &cmd_params->params.setup.rxq_params, 5112 &data->rx, 5113 &cmd_params->params.setup.flags); 5114 5115 ecore_q_fill_init_pause_data(cmd_params->q_obj, 5116 &cmd_params->params.setup.pause_params, 5117 &data->rx); 5118 } 5119 5120 /* initialize the general and tx parts of a tx-only queue object */ 5121 static void ecore_q_fill_setup_tx_only(struct _lm_device_t *pdev, 5122 struct ecore_queue_state_params *cmd_params, 5123 struct tx_queue_init_ramrod_data *data) 5124 { 5125 ecore_q_fill_init_general_data(pdev, cmd_params->q_obj, 5126 &cmd_params->params.tx_only.gen_params, 5127 &data->general, 5128 &cmd_params->params.tx_only.flags); 5129 5130 ecore_q_fill_init_tx_data(cmd_params->q_obj, 5131 &cmd_params->params.tx_only.txq_params, 5132 &data->tx, 5133 &cmd_params->params.tx_only.flags); 5134 5135 ECORE_MSG(pdev, "cid %d, tx bd page lo %x hi %x", 5136 cmd_params->q_obj->cids[0], 5137 data->tx.tx_bd_page_base.lo, 5138 data->tx.tx_bd_page_base.hi); 5139 } 5140 5141 /** 5142 * ecore_q_init - init HW/FW queue 5143 * 5144 * @pdev: device handle 5145 * @params: 5146 * 5147 * HW/FW initial Queue configuration: 5148 * - HC: Rx and Tx 5149 * - CDU context validation 5150 * 5151 */ 5152 static INLINE int ecore_q_init(struct _lm_device_t *pdev, 5153 struct ecore_queue_state_params *params) 5154 { 5155 struct ecore_queue_sp_obj *o = params->q_obj; 5156 struct ecore_queue_init_params *init = ¶ms->params.init; 5157 u16 hc_usec; 5158 u8 cos; 5159 5160 /* Tx HC configuration */ 5161 if (ECORE_TEST_BIT(ECORE_Q_TYPE_HAS_TX, &o->type) && 5162 ECORE_TEST_BIT(ECORE_Q_FLG_HC, &init->tx.flags)) { 5163 hc_usec = init->tx.hc_rate ? 1000000 / init->tx.hc_rate : 0; 5164 5165 ECORE_TODO_UPDATE_COALESCE_SB_INDEX(pdev, init->tx.fw_sb_id, 5166 init->tx.sb_cq_index, 5167 !ECORE_TEST_BIT(ECORE_Q_FLG_HC_EN, &init->tx.flags), 5168 hc_usec); 5169 } 5170 5171 /* Rx HC configuration */ 5172 if (ECORE_TEST_BIT(ECORE_Q_TYPE_HAS_RX, &o->type) && 5173 ECORE_TEST_BIT(ECORE_Q_FLG_HC, &init->rx.flags)) { 5174 hc_usec = init->rx.hc_rate ? 1000000 / init->rx.hc_rate : 0; 5175 5176 ECORE_TODO_UPDATE_COALESCE_SB_INDEX(pdev, init->rx.fw_sb_id, 5177 init->rx.sb_cq_index, 5178 !ECORE_TEST_BIT(ECORE_Q_FLG_HC_EN, &init->rx.flags), 5179 hc_usec); 5180 } 5181 5182 /* Set CDU context validation values */ 5183 for (cos = 0; cos < o->max_cos; cos++) { 5184 ECORE_MSG(pdev, "setting context validation. cid %d, cos %d\n", 5185 o->cids[cos], cos); 5186 ECORE_MSG(pdev, "context pointer %p\n", init->cxts[cos]); 5187 ECORE_SET_CTX_VALIDATION(pdev, init->cxts[cos], o->cids[cos]); 5188 } 5189 5190 /* As no ramrod is sent, complete the command immediately */ 5191 o->complete_cmd(pdev, o, ECORE_Q_CMD_INIT); 5192 5193 mmiowb(); 5194 smp_mb(); 5195 5196 return ECORE_SUCCESS; 5197 } 5198 5199 static INLINE int ecore_q_send_setup_e1x(struct _lm_device_t *pdev, 5200 struct ecore_queue_state_params *params) 5201 { 5202 struct ecore_queue_sp_obj *o = params->q_obj; 5203 struct client_init_ramrod_data *rdata = 5204 (struct client_init_ramrod_data *)o->rdata; 5205 lm_address_t data_mapping = o->rdata_mapping; 5206 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP; 5207 5208 /* Clear the ramrod data */ 5209 mm_memset(rdata, 0, sizeof(*rdata)); 5210 5211 /* Fill the ramrod data */ 5212 ecore_q_fill_setup_data_cmn(pdev, params, rdata); 5213 5214 /* No need for an explicit memory barrier here as long as we 5215 * ensure the ordering of writing to the SPQ element 5216 * and updating of the SPQ producer which involves a memory 5217 * read. If the memory read is removed we will have to put a 5218 * full memory barrier there (inside ecore_sp_post()). 5219 */ 5220 return ecore_sp_post(pdev, 5221 ramrod, 5222 o->cids[ECORE_PRIMARY_CID_INDEX], 5223 data_mapping.as_u64, 5224 ETH_CONNECTION_TYPE); 5225 } 5226 5227 static INLINE int ecore_q_send_setup_e2(struct _lm_device_t *pdev, 5228 struct ecore_queue_state_params *params) 5229 { 5230 struct ecore_queue_sp_obj *o = params->q_obj; 5231 struct client_init_ramrod_data *rdata = 5232 (struct client_init_ramrod_data *)o->rdata; 5233 lm_address_t data_mapping = o->rdata_mapping; 5234 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP; 5235 5236 /* Clear the ramrod data */ 5237 mm_memset(rdata, 0, sizeof(*rdata)); 5238 5239 /* Fill the ramrod data */ 5240 ecore_q_fill_setup_data_cmn(pdev, params, rdata); 5241 ecore_q_fill_setup_data_e2(pdev, params, rdata); 5242 5243 /* No need for an explicit memory barrier here as long as we 5244 * ensure the ordering of writing to the SPQ element 5245 * and updating of the SPQ producer which involves a memory 5246 * read. If the memory read is removed we will have to put a 5247 * full memory barrier there (inside ecore_sp_post()). 5248 */ 5249 return ecore_sp_post(pdev, 5250 ramrod, 5251 o->cids[ECORE_PRIMARY_CID_INDEX], 5252 data_mapping.as_u64, 5253 ETH_CONNECTION_TYPE); 5254 } 5255 5256 static inline int ecore_q_send_setup_tx_only(struct _lm_device_t *pdev, 5257 struct ecore_queue_state_params *params) 5258 { 5259 struct ecore_queue_sp_obj *o = params->q_obj; 5260 struct tx_queue_init_ramrod_data *rdata = 5261 (struct tx_queue_init_ramrod_data *)o->rdata; 5262 lm_address_t data_mapping = o->rdata_mapping; 5263 int ramrod = RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP; 5264 struct ecore_queue_setup_tx_only_params *tx_only_params = 5265 ¶ms->params.tx_only; 5266 u8 cid_index = tx_only_params->cid_index; 5267 5268 #ifdef ECORE_OOO /* ! ECORE_UPSTREAM */ 5269 if (ECORE_TEST_BIT(ECORE_Q_TYPE_FWD, &o->type)) 5270 ramrod = RAMROD_CMD_ID_ETH_FORWARD_SETUP; 5271 ECORE_MSG(pdev, "sending forward tx-only ramrod"); 5272 #endif 5273 5274 if (cid_index >= o->max_cos) { 5275 ECORE_ERR("queue[%d]: cid_index (%d) is out of range\n", 5276 o->cl_id, cid_index); 5277 return ECORE_INVAL; 5278 } 5279 5280 ECORE_MSG(pdev, "parameters received: cos: %d sp-id: %d\n", 5281 tx_only_params->gen_params.cos, 5282 tx_only_params->gen_params.spcl_id); 5283 5284 /* Clear the ramrod data */ 5285 mm_memset(rdata, 0, sizeof(*rdata)); 5286 5287 /* Fill the ramrod data */ 5288 ecore_q_fill_setup_tx_only(pdev, params, rdata); 5289 5290 ECORE_MSG(pdev, "sending tx-only ramrod: cid %d, client-id %d, sp-client id %d, cos %d\n", 5291 o->cids[cid_index], rdata->general.client_id, 5292 rdata->general.sp_client_id, rdata->general.cos); 5293 5294 /* No need for an explicit memory barrier here as long as we 5295 * ensure the ordering of writing to the SPQ element 5296 * and updating of the SPQ producer which involves a memory 5297 * read. If the memory read is removed we will have to put a 5298 * full memory barrier there (inside ecore_sp_post()). 5299 */ 5300 return ecore_sp_post(pdev, ramrod, o->cids[cid_index], 5301 data_mapping.as_u64, ETH_CONNECTION_TYPE); 5302 } 5303 5304 static void ecore_q_fill_update_data(struct _lm_device_t *pdev, 5305 struct ecore_queue_sp_obj *obj, 5306 struct ecore_queue_update_params *params, 5307 struct client_update_ramrod_data *data) 5308 { 5309 /* Client ID of the client to update */ 5310 data->client_id = obj->cl_id; 5311 5312 /* Function ID of the client to update */ 5313 data->func_id = obj->func_id; 5314 5315 /* Default VLAN value */ 5316 data->default_vlan = mm_cpu_to_le16(params->def_vlan); 5317 5318 /* Inner VLAN stripping */ 5319 data->inner_vlan_removal_enable_flg = 5320 ECORE_TEST_BIT(ECORE_Q_UPDATE_IN_VLAN_REM, 5321 ¶ms->update_flags); 5322 data->inner_vlan_removal_change_flg = 5323 ECORE_TEST_BIT(ECORE_Q_UPDATE_IN_VLAN_REM_CHNG, 5324 ¶ms->update_flags); 5325 5326 /* Outer VLAN stripping */ 5327 data->outer_vlan_removal_enable_flg = 5328 ECORE_TEST_BIT(ECORE_Q_UPDATE_OUT_VLAN_REM, 5329 ¶ms->update_flags); 5330 data->outer_vlan_removal_change_flg = 5331 ECORE_TEST_BIT(ECORE_Q_UPDATE_OUT_VLAN_REM_CHNG, 5332 ¶ms->update_flags); 5333 5334 /* Drop packets that have source MAC that doesn't belong to this 5335 * Queue. 5336 */ 5337 data->anti_spoofing_enable_flg = 5338 ECORE_TEST_BIT(ECORE_Q_UPDATE_ANTI_SPOOF, 5339 ¶ms->update_flags); 5340 data->anti_spoofing_change_flg = 5341 ECORE_TEST_BIT(ECORE_Q_UPDATE_ANTI_SPOOF_CHNG, 5342 ¶ms->update_flags); 5343 5344 /* Activate/Deactivate */ 5345 data->activate_flg = 5346 ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE, ¶ms->update_flags); 5347 data->activate_change_flg = 5348 ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG, 5349 ¶ms->update_flags); 5350 5351 /* Enable default VLAN */ 5352 data->default_vlan_enable_flg = 5353 ECORE_TEST_BIT(ECORE_Q_UPDATE_DEF_VLAN_EN, 5354 ¶ms->update_flags); 5355 data->default_vlan_change_flg = 5356 ECORE_TEST_BIT(ECORE_Q_UPDATE_DEF_VLAN_EN_CHNG, 5357 ¶ms->update_flags); 5358 5359 /* silent vlan removal */ 5360 data->silent_vlan_change_flg = 5361 ECORE_TEST_BIT(ECORE_Q_UPDATE_SILENT_VLAN_REM_CHNG, 5362 ¶ms->update_flags); 5363 data->silent_vlan_removal_flg = 5364 ECORE_TEST_BIT(ECORE_Q_UPDATE_SILENT_VLAN_REM, 5365 ¶ms->update_flags); 5366 data->silent_vlan_value = mm_cpu_to_le16(params->silent_removal_value); 5367 data->silent_vlan_mask = mm_cpu_to_le16(params->silent_removal_mask); 5368 5369 /* tx switching */ 5370 data->tx_switching_flg = 5371 ECORE_TEST_BIT(ECORE_Q_UPDATE_TX_SWITCHING, 5372 ¶ms->update_flags); 5373 data->tx_switching_change_flg = 5374 ECORE_TEST_BIT(ECORE_Q_UPDATE_TX_SWITCHING_CHNG, 5375 ¶ms->update_flags); 5376 5377 /* PTP */ 5378 data->handle_ptp_pkts_flg = 5379 ECORE_TEST_BIT(ECORE_Q_UPDATE_PTP_PKTS, 5380 ¶ms->update_flags); 5381 data->handle_ptp_pkts_change_flg = 5382 ECORE_TEST_BIT(ECORE_Q_UPDATE_PTP_PKTS_CHNG, 5383 ¶ms->update_flags); 5384 } 5385 5386 static INLINE int ecore_q_send_update(struct _lm_device_t *pdev, 5387 struct ecore_queue_state_params *params) 5388 { 5389 struct ecore_queue_sp_obj *o = params->q_obj; 5390 struct client_update_ramrod_data *rdata = 5391 (struct client_update_ramrod_data *)o->rdata; 5392 lm_address_t data_mapping = o->rdata_mapping; 5393 struct ecore_queue_update_params *update_params = 5394 ¶ms->params.update; 5395 u8 cid_index = update_params->cid_index; 5396 5397 if (cid_index >= o->max_cos) { 5398 ECORE_ERR("queue[%d]: cid_index (%d) is out of range\n", 5399 o->cl_id, cid_index); 5400 return ECORE_INVAL; 5401 } 5402 5403 /* Clear the ramrod data */ 5404 mm_memset(rdata, 0, sizeof(*rdata)); 5405 5406 /* Fill the ramrod data */ 5407 ecore_q_fill_update_data(pdev, o, update_params, rdata); 5408 5409 /* No need for an explicit memory barrier here as long as we 5410 * ensure the ordering of writing to the SPQ element 5411 * and updating of the SPQ producer which involves a memory 5412 * read. If the memory read is removed we will have to put a 5413 * full memory barrier there (inside ecore_sp_post()). 5414 */ 5415 return ecore_sp_post(pdev, RAMROD_CMD_ID_ETH_CLIENT_UPDATE, 5416 o->cids[cid_index], data_mapping.as_u64, 5417 ETH_CONNECTION_TYPE); 5418 } 5419 5420 /** 5421 * ecore_q_send_deactivate - send DEACTIVATE command 5422 * 5423 * @pdev: device handle 5424 * @params: 5425 * 5426 * implemented using the UPDATE command. 5427 */ 5428 static INLINE int ecore_q_send_deactivate(struct _lm_device_t *pdev, 5429 struct ecore_queue_state_params *params) 5430 { 5431 struct ecore_queue_update_params *update = ¶ms->params.update; 5432 5433 mm_memset(update, 0, sizeof(*update)); 5434 5435 ECORE_SET_BIT_NA(ECORE_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags); 5436 5437 return ecore_q_send_update(pdev, params); 5438 } 5439 5440 /** 5441 * ecore_q_send_activate - send ACTIVATE command 5442 * 5443 * @pdev: device handle 5444 * @params: 5445 * 5446 * implemented using the UPDATE command. 5447 */ 5448 static INLINE int ecore_q_send_activate(struct _lm_device_t *pdev, 5449 struct ecore_queue_state_params *params) 5450 { 5451 struct ecore_queue_update_params *update = ¶ms->params.update; 5452 5453 mm_memset(update, 0, sizeof(*update)); 5454 5455 ECORE_SET_BIT_NA(ECORE_Q_UPDATE_ACTIVATE, &update->update_flags); 5456 ECORE_SET_BIT_NA(ECORE_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags); 5457 5458 return ecore_q_send_update(pdev, params); 5459 } 5460 5461 static void ecore_q_fill_update_tpa_data(struct _lm_device_t *pdev, 5462 struct ecore_queue_sp_obj *obj, 5463 struct ecore_queue_update_tpa_params *params, 5464 struct tpa_update_ramrod_data *data) 5465 { 5466 data->client_id = obj->cl_id; 5467 data->complete_on_both_clients = params->complete_on_both_clients; 5468 data->dont_verify_rings_pause_thr_flg = 5469 params->dont_verify_thr; 5470 data->max_agg_size = mm_cpu_to_le16(params->max_agg_sz); 5471 data->max_sges_for_packet = params->max_sges_pkt; 5472 data->max_tpa_queues = params->max_tpa_queues; 5473 data->sge_buff_size = mm_cpu_to_le16(params->sge_buff_sz); 5474 data->sge_page_base_hi = mm_cpu_to_le32(U64_HI(params->sge_map.as_u64)); 5475 data->sge_page_base_lo = mm_cpu_to_le32(U64_LO(params->sge_map.as_u64)); 5476 data->sge_pause_thr_high = mm_cpu_to_le16(params->sge_pause_thr_high); 5477 data->sge_pause_thr_low = mm_cpu_to_le16(params->sge_pause_thr_low); 5478 data->tpa_mode = params->tpa_mode; 5479 data->update_ipv4 = params->update_ipv4; 5480 data->update_ipv6 = params->update_ipv6; 5481 } 5482 5483 static INLINE int ecore_q_send_update_tpa(struct _lm_device_t *pdev, 5484 struct ecore_queue_state_params *params) 5485 { 5486 struct ecore_queue_sp_obj *o = params->q_obj; 5487 struct tpa_update_ramrod_data *rdata = 5488 (struct tpa_update_ramrod_data *)o->rdata; 5489 lm_address_t data_mapping = o->rdata_mapping; 5490 struct ecore_queue_update_tpa_params *update_tpa_params = 5491 ¶ms->params.update_tpa; 5492 u16 type; 5493 5494 /* Clear the ramrod data */ 5495 mm_memset(rdata, 0, sizeof(*rdata)); 5496 5497 /* Fill the ramrod data */ 5498 ecore_q_fill_update_tpa_data(pdev, o, update_tpa_params, rdata); 5499 5500 /* Add the function id inside the type, so that sp post function 5501 * doesn't automatically add the PF func-id, this is required 5502 * for operations done by PFs on behalf of their VFs 5503 */ 5504 type = ETH_CONNECTION_TYPE | 5505 ((o->func_id) << SPE_HDR_FUNCTION_ID_SHIFT); 5506 5507 /* No need for an explicit memory barrier here as long as we 5508 * ensure the ordering of writing to the SPQ element 5509 * and updating of the SPQ producer which involves a memory 5510 * read. If the memory read is removed we will have to put a 5511 * full memory barrier there (inside ecore_sp_post()). 5512 */ 5513 return ecore_sp_post(pdev, RAMROD_CMD_ID_ETH_TPA_UPDATE, 5514 o->cids[ECORE_PRIMARY_CID_INDEX], 5515 data_mapping.as_u64, type); 5516 } 5517 5518 static INLINE int ecore_q_send_halt(struct _lm_device_t *pdev, 5519 struct ecore_queue_state_params *params) 5520 { 5521 struct ecore_queue_sp_obj *o = params->q_obj; 5522 5523 #if !defined(ECORE_ERASE) || defined(__FreeBSD__) 5524 /* build eth_halt_ramrod_data.client_id in a big-endian friendly way */ 5525 lm_address_t data_mapping = { {0} }; 5526 data_mapping.as_u32.low = o->cl_id; 5527 5528 /* No need for an explicit memory barrier here as long as we 5529 * ensure the ordering of writing to the SPQ element 5530 * and updating of the SPQ producer which involves a memory 5531 * read. If the memory read is removed we will have to put a 5532 * full memory barrier there (inside ecore_sp_post()). 5533 */ 5534 return ecore_sp_post(pdev, 5535 RAMROD_CMD_ID_ETH_HALT, 5536 o->cids[ECORE_PRIMARY_CID_INDEX], 5537 data_mapping.as_u64, 5538 ETH_CONNECTION_TYPE); 5539 #else 5540 return bnx2x_sp_post(pdev, RAMROD_CMD_ID_ETH_HALT, 5541 o->cids[ECORE_PRIMARY_CID_INDEX], 0, o->cl_id, 5542 ETH_CONNECTION_TYPE); 5543 #endif 5544 } 5545 5546 static INLINE int ecore_q_send_cfc_del(struct _lm_device_t *pdev, 5547 struct ecore_queue_state_params *params) 5548 { 5549 struct ecore_queue_sp_obj *o = params->q_obj; 5550 u8 cid_idx = params->params.cfc_del.cid_index; 5551 5552 if (cid_idx >= o->max_cos) { 5553 ECORE_ERR("queue[%d]: cid_index (%d) is out of range\n", 5554 o->cl_id, cid_idx); 5555 return ECORE_INVAL; 5556 } 5557 5558 return ecore_sp_post(pdev, RAMROD_CMD_ID_COMMON_CFC_DEL, 5559 o->cids[cid_idx], 0, 5560 NONE_CONNECTION_TYPE); 5561 } 5562 5563 static INLINE int ecore_q_send_terminate(struct _lm_device_t *pdev, 5564 struct ecore_queue_state_params *params) 5565 { 5566 struct ecore_queue_sp_obj *o = params->q_obj; 5567 u8 cid_index = params->params.terminate.cid_index; 5568 5569 if (cid_index >= o->max_cos) { 5570 ECORE_ERR("queue[%d]: cid_index (%d) is out of range\n", 5571 o->cl_id, cid_index); 5572 return ECORE_INVAL; 5573 } 5574 5575 return ecore_sp_post(pdev, RAMROD_CMD_ID_ETH_TERMINATE, 5576 o->cids[cid_index], 0, 5577 ETH_CONNECTION_TYPE); 5578 } 5579 5580 static INLINE int ecore_q_send_empty(struct _lm_device_t *pdev, 5581 struct ecore_queue_state_params *params) 5582 { 5583 struct ecore_queue_sp_obj *o = params->q_obj; 5584 5585 return ecore_sp_post(pdev, RAMROD_CMD_ID_ETH_EMPTY, 5586 o->cids[ECORE_PRIMARY_CID_INDEX], 0, 5587 ETH_CONNECTION_TYPE); 5588 } 5589 5590 static INLINE int ecore_queue_send_cmd_cmn(struct _lm_device_t *pdev, 5591 struct ecore_queue_state_params *params) 5592 { 5593 switch (params->cmd) { 5594 case ECORE_Q_CMD_INIT: 5595 return ecore_q_init(pdev, params); 5596 case ECORE_Q_CMD_SETUP_TX_ONLY: 5597 return ecore_q_send_setup_tx_only(pdev, params); 5598 case ECORE_Q_CMD_DEACTIVATE: 5599 return ecore_q_send_deactivate(pdev, params); 5600 case ECORE_Q_CMD_ACTIVATE: 5601 return ecore_q_send_activate(pdev, params); 5602 case ECORE_Q_CMD_UPDATE: 5603 return ecore_q_send_update(pdev, params); 5604 case ECORE_Q_CMD_UPDATE_TPA: 5605 return ecore_q_send_update_tpa(pdev, params); 5606 case ECORE_Q_CMD_HALT: 5607 return ecore_q_send_halt(pdev, params); 5608 case ECORE_Q_CMD_CFC_DEL: 5609 return ecore_q_send_cfc_del(pdev, params); 5610 case ECORE_Q_CMD_TERMINATE: 5611 return ecore_q_send_terminate(pdev, params); 5612 case ECORE_Q_CMD_EMPTY: 5613 return ecore_q_send_empty(pdev, params); 5614 default: 5615 ECORE_ERR("Unknown command: %d\n", params->cmd); 5616 return ECORE_INVAL; 5617 } 5618 } 5619 5620 static int ecore_queue_send_cmd_e1x(struct _lm_device_t *pdev, 5621 struct ecore_queue_state_params *params) 5622 { 5623 switch (params->cmd) { 5624 case ECORE_Q_CMD_SETUP: 5625 return ecore_q_send_setup_e1x(pdev, params); 5626 case ECORE_Q_CMD_INIT: 5627 case ECORE_Q_CMD_SETUP_TX_ONLY: 5628 case ECORE_Q_CMD_DEACTIVATE: 5629 case ECORE_Q_CMD_ACTIVATE: 5630 case ECORE_Q_CMD_UPDATE: 5631 case ECORE_Q_CMD_UPDATE_TPA: 5632 case ECORE_Q_CMD_HALT: 5633 case ECORE_Q_CMD_CFC_DEL: 5634 case ECORE_Q_CMD_TERMINATE: 5635 case ECORE_Q_CMD_EMPTY: 5636 return ecore_queue_send_cmd_cmn(pdev, params); 5637 default: 5638 ECORE_ERR("Unknown command: %d\n", params->cmd); 5639 return ECORE_INVAL; 5640 } 5641 } 5642 5643 static int ecore_queue_send_cmd_e2(struct _lm_device_t *pdev, 5644 struct ecore_queue_state_params *params) 5645 { 5646 switch (params->cmd) { 5647 case ECORE_Q_CMD_SETUP: 5648 return ecore_q_send_setup_e2(pdev, params); 5649 case ECORE_Q_CMD_INIT: 5650 case ECORE_Q_CMD_SETUP_TX_ONLY: 5651 case ECORE_Q_CMD_DEACTIVATE: 5652 case ECORE_Q_CMD_ACTIVATE: 5653 case ECORE_Q_CMD_UPDATE: 5654 case ECORE_Q_CMD_UPDATE_TPA: 5655 case ECORE_Q_CMD_HALT: 5656 case ECORE_Q_CMD_CFC_DEL: 5657 case ECORE_Q_CMD_TERMINATE: 5658 case ECORE_Q_CMD_EMPTY: 5659 return ecore_queue_send_cmd_cmn(pdev, params); 5660 default: 5661 ECORE_ERR("Unknown command: %d\n", params->cmd); 5662 return ECORE_INVAL; 5663 } 5664 } 5665 5666 /** 5667 * ecore_queue_chk_transition - check state machine of a regular Queue 5668 * 5669 * @pdev: device handle 5670 * @o: 5671 * @params: 5672 * 5673 * (not Forwarding) 5674 * It both checks if the requested command is legal in a current 5675 * state and, if it's legal, sets a `next_state' in the object 5676 * that will be used in the completion flow to set the `state' 5677 * of the object. 5678 * 5679 * returns 0 if a requested command is a legal transition, 5680 * ECORE_INVAL otherwise. 5681 */ 5682 static int ecore_queue_chk_transition(struct _lm_device_t *pdev, 5683 struct ecore_queue_sp_obj *o, 5684 struct ecore_queue_state_params *params) 5685 { 5686 enum ecore_q_state state = o->state, next_state = ECORE_Q_STATE_MAX; 5687 enum ecore_queue_cmd cmd = params->cmd; 5688 struct ecore_queue_update_params *update_params = 5689 ¶ms->params.update; 5690 u8 next_tx_only = o->num_tx_only; 5691 5692 /* Forget all pending for completion commands if a driver only state 5693 * transition has been requested. 5694 */ 5695 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) { 5696 o->pending = 0; 5697 o->next_state = ECORE_Q_STATE_MAX; 5698 } 5699 5700 /* Don't allow a next state transition if we are in the middle of 5701 * the previous one. 5702 */ 5703 if (o->pending) { 5704 ECORE_ERR("Blocking transition since pending was %lx\n", 5705 o->pending); 5706 return ECORE_BUSY; 5707 } 5708 5709 switch (state) { 5710 case ECORE_Q_STATE_RESET: 5711 if (cmd == ECORE_Q_CMD_INIT) 5712 next_state = ECORE_Q_STATE_INITIALIZED; 5713 5714 break; 5715 case ECORE_Q_STATE_INITIALIZED: 5716 if (cmd == ECORE_Q_CMD_SETUP) { 5717 if (ECORE_TEST_BIT(ECORE_Q_FLG_ACTIVE, 5718 ¶ms->params.setup.flags)) 5719 next_state = ECORE_Q_STATE_ACTIVE; 5720 else 5721 next_state = ECORE_Q_STATE_INACTIVE; 5722 } 5723 5724 break; 5725 case ECORE_Q_STATE_ACTIVE: 5726 if (cmd == ECORE_Q_CMD_DEACTIVATE) 5727 next_state = ECORE_Q_STATE_INACTIVE; 5728 5729 else if ((cmd == ECORE_Q_CMD_EMPTY) || 5730 (cmd == ECORE_Q_CMD_UPDATE_TPA)) 5731 next_state = ECORE_Q_STATE_ACTIVE; 5732 5733 else if (cmd == ECORE_Q_CMD_SETUP_TX_ONLY) { 5734 next_state = ECORE_Q_STATE_MULTI_COS; 5735 next_tx_only = 1; 5736 } 5737 5738 else if (cmd == ECORE_Q_CMD_HALT) 5739 next_state = ECORE_Q_STATE_STOPPED; 5740 5741 else if (cmd == ECORE_Q_CMD_UPDATE) { 5742 /* If "active" state change is requested, update the 5743 * state accordingly. 5744 */ 5745 if (ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG, 5746 &update_params->update_flags) && 5747 !ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE, 5748 &update_params->update_flags)) 5749 next_state = ECORE_Q_STATE_INACTIVE; 5750 else 5751 next_state = ECORE_Q_STATE_ACTIVE; 5752 } 5753 5754 break; 5755 case ECORE_Q_STATE_MULTI_COS: 5756 if (cmd == ECORE_Q_CMD_TERMINATE) 5757 next_state = ECORE_Q_STATE_MCOS_TERMINATED; 5758 5759 else if (cmd == ECORE_Q_CMD_SETUP_TX_ONLY) { 5760 next_state = ECORE_Q_STATE_MULTI_COS; 5761 next_tx_only = o->num_tx_only + 1; 5762 } 5763 5764 else if ((cmd == ECORE_Q_CMD_EMPTY) || 5765 (cmd == ECORE_Q_CMD_UPDATE_TPA)) 5766 next_state = ECORE_Q_STATE_MULTI_COS; 5767 5768 else if (cmd == ECORE_Q_CMD_UPDATE) { 5769 /* If "active" state change is requested, update the 5770 * state accordingly. 5771 */ 5772 if (ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG, 5773 &update_params->update_flags) && 5774 !ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE, 5775 &update_params->update_flags)) 5776 next_state = ECORE_Q_STATE_INACTIVE; 5777 else 5778 next_state = ECORE_Q_STATE_MULTI_COS; 5779 } 5780 5781 break; 5782 case ECORE_Q_STATE_MCOS_TERMINATED: 5783 if (cmd == ECORE_Q_CMD_CFC_DEL) { 5784 next_tx_only = o->num_tx_only - 1; 5785 if (next_tx_only == 0) 5786 next_state = ECORE_Q_STATE_ACTIVE; 5787 else 5788 next_state = ECORE_Q_STATE_MULTI_COS; 5789 } 5790 5791 break; 5792 case ECORE_Q_STATE_INACTIVE: 5793 if (cmd == ECORE_Q_CMD_ACTIVATE) 5794 next_state = ECORE_Q_STATE_ACTIVE; 5795 5796 else if ((cmd == ECORE_Q_CMD_EMPTY) || 5797 (cmd == ECORE_Q_CMD_UPDATE_TPA)) 5798 next_state = ECORE_Q_STATE_INACTIVE; 5799 5800 else if (cmd == ECORE_Q_CMD_HALT) 5801 next_state = ECORE_Q_STATE_STOPPED; 5802 5803 else if (cmd == ECORE_Q_CMD_UPDATE) { 5804 /* If "active" state change is requested, update the 5805 * state accordingly. 5806 */ 5807 if (ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG, 5808 &update_params->update_flags) && 5809 ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE, 5810 &update_params->update_flags)){ 5811 if (o->num_tx_only == 0) 5812 next_state = ECORE_Q_STATE_ACTIVE; 5813 else /* tx only queues exist for this queue */ 5814 next_state = ECORE_Q_STATE_MULTI_COS; 5815 } else 5816 next_state = ECORE_Q_STATE_INACTIVE; 5817 } 5818 5819 break; 5820 case ECORE_Q_STATE_STOPPED: 5821 if (cmd == ECORE_Q_CMD_TERMINATE) 5822 next_state = ECORE_Q_STATE_TERMINATED; 5823 5824 break; 5825 case ECORE_Q_STATE_TERMINATED: 5826 if (cmd == ECORE_Q_CMD_CFC_DEL) 5827 next_state = ECORE_Q_STATE_RESET; 5828 5829 break; 5830 default: 5831 ECORE_ERR("Illegal state: %d\n", state); 5832 } 5833 5834 /* Transition is assured */ 5835 if (next_state != ECORE_Q_STATE_MAX) { 5836 ECORE_MSG(pdev, "Good state transition: %d(%d)->%d\n", 5837 state, cmd, next_state); 5838 o->next_state = next_state; 5839 o->next_tx_only = next_tx_only; 5840 return ECORE_SUCCESS; 5841 } 5842 5843 ECORE_MSG(pdev, "Bad state transition request: %d %d\n", state, cmd); 5844 5845 return ECORE_INVAL; 5846 } 5847 #ifdef ECORE_OOO /* ! ECORE_UPSTREAM */ 5848 5849 /** 5850 * ecore_queue_chk_fwd_transition - check state machine of a Forwarding Queue. 5851 * 5852 * @pdev: device handle 5853 * @o: 5854 * @params: 5855 * 5856 * It both checks if the requested command is legal in a current 5857 * state and, if it's legal, sets a `next_state' in the object 5858 * that will be used in the completion flow to set the `state' 5859 * of the object. 5860 * 5861 * returns 0 if a requested command is a legal transition, 5862 * ECORE_INVAL otherwise. 5863 */ 5864 static int ecore_queue_chk_fwd_transition(struct _lm_device_t *pdev, 5865 struct ecore_queue_sp_obj *o, 5866 struct ecore_queue_state_params *params) 5867 { 5868 enum ecore_q_state state = o->state, next_state = ECORE_Q_STATE_MAX; 5869 enum ecore_queue_cmd cmd = params->cmd; 5870 5871 switch (state) { 5872 case ECORE_Q_STATE_RESET: 5873 if (cmd == ECORE_Q_CMD_INIT) 5874 next_state = ECORE_Q_STATE_INITIALIZED; 5875 5876 break; 5877 case ECORE_Q_STATE_INITIALIZED: 5878 if (cmd == ECORE_Q_CMD_SETUP_TX_ONLY) { 5879 if (ECORE_TEST_BIT(ECORE_Q_FLG_ACTIVE, 5880 ¶ms->params.tx_only.flags)) 5881 next_state = ECORE_Q_STATE_ACTIVE; 5882 else 5883 next_state = ECORE_Q_STATE_INACTIVE; 5884 } 5885 5886 break; 5887 case ECORE_Q_STATE_ACTIVE: 5888 case ECORE_Q_STATE_INACTIVE: 5889 if (cmd == ECORE_Q_CMD_CFC_DEL) 5890 next_state = ECORE_Q_STATE_RESET; 5891 5892 break; 5893 default: 5894 ECORE_ERR("Illegal state: %d\n", state); 5895 } 5896 5897 /* Transition is assured */ 5898 if (next_state != ECORE_Q_STATE_MAX) { 5899 ECORE_MSG(pdev, "Good state transition: %d(%d)->%d\n", 5900 state, cmd, next_state); 5901 o->next_state = next_state; 5902 return ECORE_SUCCESS; 5903 } 5904 5905 ECORE_MSG(pdev, "Bad state transition request: %d %d\n", state, cmd); 5906 return ECORE_INVAL; 5907 } 5908 #endif 5909 5910 void ecore_init_queue_obj(struct _lm_device_t *pdev, 5911 struct ecore_queue_sp_obj *obj, 5912 u8 cl_id, u32 *cids, u8 cid_cnt, u8 func_id, 5913 void *rdata, 5914 lm_address_t rdata_mapping, unsigned long type) 5915 { 5916 mm_memset(obj, 0, sizeof(*obj)); 5917 5918 /* We support only ECORE_MULTI_TX_COS Tx CoS at the moment */ 5919 BUG_ON(ECORE_MULTI_TX_COS < cid_cnt); 5920 5921 memcpy(obj->cids, cids, sizeof(obj->cids[0]) * cid_cnt); 5922 obj->max_cos = cid_cnt; 5923 obj->cl_id = cl_id; 5924 obj->func_id = func_id; 5925 obj->rdata = rdata; 5926 obj->rdata_mapping = rdata_mapping; 5927 obj->type = type; 5928 obj->next_state = ECORE_Q_STATE_MAX; 5929 5930 if (CHIP_IS_E1x(pdev)) 5931 obj->send_cmd = ecore_queue_send_cmd_e1x; 5932 else 5933 obj->send_cmd = ecore_queue_send_cmd_e2; 5934 5935 #ifdef ECORE_OOO /* ! ECORE_UPSTREAM */ 5936 if (ECORE_TEST_BIT(ECORE_Q_TYPE_FWD, &type)) 5937 obj->check_transition = ecore_queue_chk_fwd_transition; 5938 else 5939 #endif 5940 obj->check_transition = ecore_queue_chk_transition; 5941 5942 obj->complete_cmd = ecore_queue_comp_cmd; 5943 obj->wait_comp = ecore_queue_wait_comp; 5944 obj->set_pending = ecore_queue_set_pending; 5945 } 5946 5947 /* return a queue object's logical state*/ 5948 int ecore_get_q_logical_state(struct _lm_device_t *pdev, 5949 struct ecore_queue_sp_obj *obj) 5950 { 5951 switch (obj->state) { 5952 case ECORE_Q_STATE_ACTIVE: 5953 case ECORE_Q_STATE_MULTI_COS: 5954 return ECORE_Q_LOGICAL_STATE_ACTIVE; 5955 case ECORE_Q_STATE_RESET: 5956 case ECORE_Q_STATE_INITIALIZED: 5957 case ECORE_Q_STATE_MCOS_TERMINATED: 5958 case ECORE_Q_STATE_INACTIVE: 5959 case ECORE_Q_STATE_STOPPED: 5960 case ECORE_Q_STATE_TERMINATED: 5961 case ECORE_Q_STATE_FLRED: 5962 return ECORE_Q_LOGICAL_STATE_STOPPED; 5963 default: 5964 return ECORE_INVAL; 5965 } 5966 } 5967 5968 /********************** Function state object *********************************/ 5969 enum ecore_func_state ecore_func_get_state(struct _lm_device_t *pdev, 5970 struct ecore_func_sp_obj *o) 5971 { 5972 /* in the middle of transaction - return INVALID state */ 5973 if (o->pending) 5974 return ECORE_F_STATE_MAX; 5975 5976 /* unsure the order of reading of o->pending and o->state 5977 * o->pending should be read first 5978 */ 5979 rmb(); 5980 5981 return o->state; 5982 } 5983 5984 static int ecore_func_wait_comp(struct _lm_device_t *pdev, 5985 struct ecore_func_sp_obj *o, 5986 enum ecore_func_cmd cmd) 5987 { 5988 return ecore_state_wait(pdev, cmd, &o->pending); 5989 } 5990 5991 /** 5992 * ecore_func_state_change_comp - complete the state machine transition 5993 * 5994 * @pdev: device handle 5995 * @o: 5996 * @cmd: 5997 * 5998 * Called on state change transition. Completes the state 5999 * machine transition only - no HW interaction. 6000 */ 6001 static INLINE int ecore_func_state_change_comp(struct _lm_device_t *pdev, 6002 struct ecore_func_sp_obj *o, 6003 enum ecore_func_cmd cmd) 6004 { 6005 unsigned long cur_pending = o->pending; 6006 6007 if (!ECORE_TEST_AND_CLEAR_BIT(cmd, &cur_pending)) { 6008 ECORE_ERR("Bad MC reply %d for func %d in state %d pending 0x%lx, next_state %d\n", 6009 cmd, FUNC_ID(pdev), o->state, 6010 cur_pending, o->next_state); 6011 return ECORE_INVAL; 6012 } 6013 6014 ECORE_MSG(pdev, 6015 "Completing command %d for func %d, setting state to %d\n", 6016 cmd, FUNC_ID(pdev), o->next_state); 6017 6018 o->state = o->next_state; 6019 o->next_state = ECORE_F_STATE_MAX; 6020 6021 /* It's important that o->state and o->next_state are 6022 * updated before o->pending. 6023 */ 6024 wmb(); 6025 6026 ECORE_CLEAR_BIT(cmd, &o->pending); 6027 smp_mb__after_atomic(); 6028 6029 return ECORE_SUCCESS; 6030 } 6031 6032 /** 6033 * ecore_func_comp_cmd - complete the state change command 6034 * 6035 * @pdev: device handle 6036 * @o: 6037 * @cmd: 6038 * 6039 * Checks that the arrived completion is expected. 6040 */ 6041 static int ecore_func_comp_cmd(struct _lm_device_t *pdev, 6042 struct ecore_func_sp_obj *o, 6043 enum ecore_func_cmd cmd) 6044 { 6045 /* Complete the state machine part first, check if it's a 6046 * legal completion. 6047 */ 6048 int rc = ecore_func_state_change_comp(pdev, o, cmd); 6049 return rc; 6050 } 6051 6052 /** 6053 * ecore_func_chk_transition - perform function state machine transition 6054 * 6055 * @pdev: device handle 6056 * @o: 6057 * @params: 6058 * 6059 * It both checks if the requested command is legal in a current 6060 * state and, if it's legal, sets a `next_state' in the object 6061 * that will be used in the completion flow to set the `state' 6062 * of the object. 6063 * 6064 * returns 0 if a requested command is a legal transition, 6065 * ECORE_INVAL otherwise. 6066 */ 6067 static int ecore_func_chk_transition(struct _lm_device_t *pdev, 6068 struct ecore_func_sp_obj *o, 6069 struct ecore_func_state_params *params) 6070 { 6071 enum ecore_func_state state = o->state, next_state = ECORE_F_STATE_MAX; 6072 enum ecore_func_cmd cmd = params->cmd; 6073 6074 /* Forget all pending for completion commands if a driver only state 6075 * transition has been requested. 6076 */ 6077 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) { 6078 o->pending = 0; 6079 o->next_state = ECORE_F_STATE_MAX; 6080 } 6081 6082 /* Don't allow a next state transition if we are in the middle of 6083 * the previous one. 6084 */ 6085 if (o->pending) 6086 return ECORE_BUSY; 6087 6088 switch (state) { 6089 case ECORE_F_STATE_RESET: 6090 if (cmd == ECORE_F_CMD_HW_INIT) 6091 next_state = ECORE_F_STATE_INITIALIZED; 6092 6093 break; 6094 case ECORE_F_STATE_INITIALIZED: 6095 if (cmd == ECORE_F_CMD_START) 6096 next_state = ECORE_F_STATE_STARTED; 6097 6098 else if (cmd == ECORE_F_CMD_HW_RESET) 6099 next_state = ECORE_F_STATE_RESET; 6100 6101 break; 6102 case ECORE_F_STATE_STARTED: 6103 if (cmd == ECORE_F_CMD_STOP) 6104 next_state = ECORE_F_STATE_INITIALIZED; 6105 /* afex ramrods can be sent only in started mode, and only 6106 * if not pending for function_stop ramrod completion 6107 * for these events - next state remained STARTED. 6108 */ 6109 else if ((cmd == ECORE_F_CMD_AFEX_UPDATE) && 6110 (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending))) 6111 next_state = ECORE_F_STATE_STARTED; 6112 6113 else if ((cmd == ECORE_F_CMD_AFEX_VIFLISTS) && 6114 (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending))) 6115 next_state = ECORE_F_STATE_STARTED; 6116 6117 /* Switch_update ramrod can be sent in either started or 6118 * tx_stopped state, and it doesn't change the state. 6119 */ 6120 else if ((cmd == ECORE_F_CMD_SWITCH_UPDATE) && 6121 (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending))) 6122 next_state = ECORE_F_STATE_STARTED; 6123 6124 else if ((cmd == ECORE_F_CMD_SET_TIMESYNC) && 6125 (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending))) 6126 next_state = ECORE_F_STATE_STARTED; 6127 6128 else if (cmd == ECORE_F_CMD_TX_STOP) 6129 next_state = ECORE_F_STATE_TX_STOPPED; 6130 6131 break; 6132 case ECORE_F_STATE_TX_STOPPED: 6133 if ((cmd == ECORE_F_CMD_SWITCH_UPDATE) && 6134 (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending))) 6135 next_state = ECORE_F_STATE_TX_STOPPED; 6136 6137 else if ((cmd == ECORE_F_CMD_SET_TIMESYNC) && 6138 (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending))) 6139 next_state = ECORE_F_STATE_TX_STOPPED; 6140 6141 else if (cmd == ECORE_F_CMD_TX_START) 6142 next_state = ECORE_F_STATE_STARTED; 6143 6144 break; 6145 default: 6146 ECORE_ERR("Unknown state: %d\n", state); 6147 } 6148 6149 /* Transition is assured */ 6150 if (next_state != ECORE_F_STATE_MAX) { 6151 ECORE_MSG(pdev, "Good function state transition: %d(%d)->%d\n", 6152 state, cmd, next_state); 6153 o->next_state = next_state; 6154 return ECORE_SUCCESS; 6155 } 6156 6157 ECORE_MSG(pdev, "Bad function state transition request: %d %d\n", 6158 state, cmd); 6159 6160 return ECORE_INVAL; 6161 } 6162 6163 /** 6164 * ecore_func_init_func - performs HW init at function stage 6165 * 6166 * @pdev: device handle 6167 * @drv: 6168 * 6169 * Init HW when the current phase is 6170 * FW_MSG_CODE_DRV_LOAD_FUNCTION: initialize only FUNCTION-only 6171 * HW blocks. 6172 */ 6173 static INLINE int ecore_func_init_func(struct _lm_device_t *pdev, 6174 const struct ecore_func_sp_drv_ops *drv) 6175 { 6176 return drv->init_hw_func(pdev); 6177 } 6178 6179 /** 6180 * ecore_func_init_port - performs HW init at port stage 6181 * 6182 * @pdev: device handle 6183 * @drv: 6184 * 6185 * Init HW when the current phase is 6186 * FW_MSG_CODE_DRV_LOAD_PORT: initialize PORT-only and 6187 * FUNCTION-only HW blocks. 6188 * 6189 */ 6190 static INLINE int ecore_func_init_port(struct _lm_device_t *pdev, 6191 const struct ecore_func_sp_drv_ops *drv) 6192 { 6193 int rc = drv->init_hw_port(pdev); 6194 if (rc) 6195 return rc; 6196 6197 return ecore_func_init_func(pdev, drv); 6198 } 6199 6200 /** 6201 * ecore_func_init_cmn_chip - performs HW init at chip-common stage 6202 * 6203 * @pdev: device handle 6204 * @drv: 6205 * 6206 * Init HW when the current phase is 6207 * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON_CHIP, 6208 * PORT-only and FUNCTION-only HW blocks. 6209 */ 6210 static INLINE int ecore_func_init_cmn_chip(struct _lm_device_t *pdev, 6211 const struct ecore_func_sp_drv_ops *drv) 6212 { 6213 int rc = drv->init_hw_cmn_chip(pdev); 6214 if (rc) 6215 return rc; 6216 6217 return ecore_func_init_port(pdev, drv); 6218 } 6219 6220 /** 6221 * ecore_func_init_cmn - performs HW init at common stage 6222 * 6223 * @pdev: device handle 6224 * @drv: 6225 * 6226 * Init HW when the current phase is 6227 * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON, 6228 * PORT-only and FUNCTION-only HW blocks. 6229 */ 6230 static INLINE int ecore_func_init_cmn(struct _lm_device_t *pdev, 6231 const struct ecore_func_sp_drv_ops *drv) 6232 { 6233 int rc = drv->init_hw_cmn(pdev); 6234 if (rc) 6235 return rc; 6236 6237 return ecore_func_init_port(pdev, drv); 6238 } 6239 6240 static int ecore_func_hw_init(struct _lm_device_t *pdev, 6241 struct ecore_func_state_params *params) 6242 { 6243 u32 load_code = params->params.hw_init.load_phase; 6244 struct ecore_func_sp_obj *o = params->f_obj; 6245 const struct ecore_func_sp_drv_ops *drv = o->drv; 6246 int rc = 0; 6247 6248 ECORE_MSG(pdev, "function %d load_code %x\n", 6249 ABS_FUNC_ID(pdev), load_code); 6250 6251 /* Prepare buffers for unzipping the FW */ 6252 rc = drv->gunzip_init(pdev); 6253 if (rc) 6254 return rc; 6255 6256 /* Prepare FW */ 6257 rc = drv->init_fw(pdev); 6258 if (rc) { 6259 ECORE_ERR("Error loading firmware\n"); 6260 goto init_err; 6261 } 6262 6263 /* Handle the beginning of COMMON_XXX pases separately... */ 6264 switch (load_code) { 6265 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: 6266 rc = ecore_func_init_cmn_chip(pdev, drv); 6267 if (rc) 6268 goto init_err; 6269 6270 break; 6271 case FW_MSG_CODE_DRV_LOAD_COMMON: 6272 rc = ecore_func_init_cmn(pdev, drv); 6273 if (rc) 6274 goto init_err; 6275 6276 break; 6277 case FW_MSG_CODE_DRV_LOAD_PORT: 6278 rc = ecore_func_init_port(pdev, drv); 6279 if (rc) 6280 goto init_err; 6281 6282 break; 6283 case FW_MSG_CODE_DRV_LOAD_FUNCTION: 6284 rc = ecore_func_init_func(pdev, drv); 6285 if (rc) 6286 goto init_err; 6287 6288 break; 6289 default: 6290 ECORE_ERR("Unknown load_code (0x%x) from MCP\n", load_code); 6291 rc = ECORE_INVAL; 6292 } 6293 6294 init_err: 6295 drv->gunzip_end(pdev); 6296 6297 /* In case of success, complete the command immediately: no ramrods 6298 * have been sent. 6299 */ 6300 if (!rc) 6301 o->complete_cmd(pdev, o, ECORE_F_CMD_HW_INIT); 6302 6303 return rc; 6304 } 6305 6306 /** 6307 * ecore_func_reset_func - reset HW at function stage 6308 * 6309 * @pdev: device handle 6310 * @drv: 6311 * 6312 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_FUNCTION stage: reset only 6313 * FUNCTION-only HW blocks. 6314 */ 6315 static INLINE void ecore_func_reset_func(struct _lm_device_t *pdev, 6316 const struct ecore_func_sp_drv_ops *drv) 6317 { 6318 drv->reset_hw_func(pdev); 6319 } 6320 6321 /** 6322 * ecore_func_reset_port - reser HW at port stage 6323 * 6324 * @pdev: device handle 6325 * @drv: 6326 * 6327 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_PORT stage: reset 6328 * FUNCTION-only and PORT-only HW blocks. 6329 * 6330 * !!!IMPORTANT!!! 6331 * 6332 * It's important to call reset_port before reset_func() as the last thing 6333 * reset_func does is pf_disable() thus disabling PGLUE_B, which 6334 * makes impossible any DMAE transactions. 6335 */ 6336 static INLINE void ecore_func_reset_port(struct _lm_device_t *pdev, 6337 const struct ecore_func_sp_drv_ops *drv) 6338 { 6339 drv->reset_hw_port(pdev); 6340 ecore_func_reset_func(pdev, drv); 6341 } 6342 6343 /** 6344 * ecore_func_reset_cmn - reser HW at common stage 6345 * 6346 * @pdev: device handle 6347 * @drv: 6348 * 6349 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_COMMON and 6350 * FW_MSG_CODE_DRV_UNLOAD_COMMON_CHIP stages: reset COMMON, 6351 * COMMON_CHIP, FUNCTION-only and PORT-only HW blocks. 6352 */ 6353 static INLINE void ecore_func_reset_cmn(struct _lm_device_t *pdev, 6354 const struct ecore_func_sp_drv_ops *drv) 6355 { 6356 ecore_func_reset_port(pdev, drv); 6357 drv->reset_hw_cmn(pdev); 6358 } 6359 6360 static INLINE int ecore_func_hw_reset(struct _lm_device_t *pdev, 6361 struct ecore_func_state_params *params) 6362 { 6363 u32 reset_phase = params->params.hw_reset.reset_phase; 6364 struct ecore_func_sp_obj *o = params->f_obj; 6365 const struct ecore_func_sp_drv_ops *drv = o->drv; 6366 6367 ECORE_MSG(pdev, "function %d reset_phase %x\n", ABS_FUNC_ID(pdev), 6368 reset_phase); 6369 6370 switch (reset_phase) { 6371 case FW_MSG_CODE_DRV_UNLOAD_COMMON: 6372 ecore_func_reset_cmn(pdev, drv); 6373 break; 6374 case FW_MSG_CODE_DRV_UNLOAD_PORT: 6375 ecore_func_reset_port(pdev, drv); 6376 break; 6377 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION: 6378 ecore_func_reset_func(pdev, drv); 6379 break; 6380 default: 6381 ECORE_ERR("Unknown reset_phase (0x%x) from MCP\n", 6382 reset_phase); 6383 break; 6384 } 6385 6386 /* Complete the command immediately: no ramrods have been sent. */ 6387 o->complete_cmd(pdev, o, ECORE_F_CMD_HW_RESET); 6388 6389 return ECORE_SUCCESS; 6390 } 6391 6392 static INLINE int ecore_func_send_start(struct _lm_device_t *pdev, 6393 struct ecore_func_state_params *params) 6394 { 6395 struct ecore_func_sp_obj *o = params->f_obj; 6396 struct function_start_data *rdata = 6397 (struct function_start_data *)o->rdata; 6398 lm_address_t data_mapping = o->rdata_mapping; 6399 struct ecore_func_start_params *start_params = ¶ms->params.start; 6400 6401 mm_memset(rdata, 0, sizeof(*rdata)); 6402 6403 /* Fill the ramrod data with provided parameters */ 6404 rdata->function_mode = (u8)start_params->mf_mode; 6405 rdata->sd_vlan_tag = mm_cpu_to_le16(start_params->sd_vlan_tag); 6406 rdata->path_id = PATH_ID(pdev); 6407 rdata->network_cos_mode = start_params->network_cos_mode; 6408 rdata->tunnel_mode = start_params->tunnel_mode; 6409 rdata->gre_tunnel_type = start_params->gre_tunnel_type; 6410 rdata->inner_gre_rss_en = start_params->inner_gre_rss_en; 6411 rdata->vxlan_dst_port = start_params->vxlan_dst_port; 6412 rdata->sd_accept_mf_clss_fail = start_params->class_fail; 6413 if (start_params->class_fail_ethtype) { 6414 rdata->sd_accept_mf_clss_fail_match_ethtype = 1; 6415 rdata->sd_accept_mf_clss_fail_ethtype = 6416 mm_cpu_to_le16(start_params->class_fail_ethtype); 6417 } 6418 rdata->sd_vlan_force_pri_flg = start_params->sd_vlan_force_pri; 6419 rdata->sd_vlan_force_pri_val = start_params->sd_vlan_force_pri_val; 6420 6421 /** @@@TMP - until FW 7.10.7 (which will introduce an HSI change) 6422 * `sd_vlan_eth_type' will replace ethertype in SD mode even if 6423 * it's set to 0; This will probably break SD, so we're setting it 6424 * to ethertype 0x8100 for now. 6425 */ 6426 if (start_params->sd_vlan_eth_type) 6427 rdata->sd_vlan_eth_type = 6428 mm_cpu_to_le16(start_params->sd_vlan_eth_type); 6429 else 6430 rdata->sd_vlan_eth_type = 6431 mm_cpu_to_le16((u16) 0x8100); 6432 6433 rdata->no_added_tags = start_params->no_added_tags; 6434 6435 /* No need for an explicit memory barrier here as long as we 6436 * ensure the ordering of writing to the SPQ element 6437 * and updating of the SPQ producer which involves a memory 6438 * read. If the memory read is removed we will have to put a 6439 * full memory barrier there (inside ecore_sp_post()). 6440 */ 6441 return ecore_sp_post(pdev, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, 6442 data_mapping.as_u64, NONE_CONNECTION_TYPE); 6443 } 6444 6445 static INLINE int ecore_func_send_switch_update(struct _lm_device_t *pdev, 6446 struct ecore_func_state_params *params) 6447 { 6448 struct ecore_func_sp_obj *o = params->f_obj; 6449 struct function_update_data *rdata = 6450 (struct function_update_data *)o->rdata; 6451 lm_address_t data_mapping = o->rdata_mapping; 6452 struct ecore_func_switch_update_params *switch_update_params = 6453 ¶ms->params.switch_update; 6454 6455 mm_memset(rdata, 0, sizeof(*rdata)); 6456 6457 /* Fill the ramrod data with provided parameters */ 6458 if (ECORE_TEST_BIT(ECORE_F_UPDATE_TX_SWITCH_SUSPEND_CHNG, 6459 &switch_update_params->changes)) { 6460 rdata->tx_switch_suspend_change_flg = 1; 6461 rdata->tx_switch_suspend = 6462 ECORE_TEST_BIT(ECORE_F_UPDATE_TX_SWITCH_SUSPEND, 6463 &switch_update_params->changes); 6464 } 6465 6466 if (ECORE_TEST_BIT(ECORE_F_UPDATE_SD_VLAN_TAG_CHNG, 6467 &switch_update_params->changes)) { 6468 rdata->sd_vlan_tag_change_flg = 1; 6469 rdata->sd_vlan_tag = 6470 mm_cpu_to_le16(switch_update_params->vlan); 6471 } 6472 6473 if (ECORE_TEST_BIT(ECORE_F_UPDATE_SD_VLAN_ETH_TYPE_CHNG, 6474 &switch_update_params->changes)) { 6475 rdata->sd_vlan_eth_type_change_flg = 1; 6476 rdata->sd_vlan_eth_type = 6477 mm_cpu_to_le16(switch_update_params->vlan_eth_type); 6478 } 6479 6480 if (ECORE_TEST_BIT(ECORE_F_UPDATE_VLAN_FORCE_PRIO_CHNG, 6481 &switch_update_params->changes)) { 6482 rdata->sd_vlan_force_pri_change_flg = 1; 6483 if (ECORE_TEST_BIT(ECORE_F_UPDATE_VLAN_FORCE_PRIO_FLAG, 6484 &switch_update_params->changes)) 6485 rdata->sd_vlan_force_pri_flg = 1; 6486 rdata->sd_vlan_force_pri_flg = 6487 switch_update_params->vlan_force_prio; 6488 } 6489 6490 if (ECORE_TEST_BIT(ECORE_F_UPDATE_TUNNEL_CFG_CHNG, 6491 &switch_update_params->changes)) { 6492 rdata->update_tunn_cfg_flg = 1; 6493 if (ECORE_TEST_BIT(ECORE_F_UPDATE_TUNNEL_CLSS_EN, 6494 &switch_update_params->changes)) 6495 rdata->tunn_clss_en = 1; 6496 if (ECORE_TEST_BIT(ECORE_F_UPDATE_TUNNEL_INNER_GRE_RSS_EN, 6497 &switch_update_params->changes)) 6498 rdata->inner_gre_rss_en = 1; 6499 rdata->tunnel_mode = switch_update_params->tunnel_mode; 6500 rdata->gre_tunnel_type = switch_update_params->gre_tunnel_type; 6501 rdata->vxlan_dst_port = 6502 mm_cpu_to_le16(switch_update_params->vxlan_dst_port); 6503 } 6504 6505 rdata->echo = SWITCH_UPDATE; 6506 6507 /* No need for an explicit memory barrier here as long as we 6508 * ensure the ordering of writing to the SPQ element 6509 * and updating of the SPQ producer which involves a memory 6510 * read. If the memory read is removed we will have to put a 6511 * full memory barrier there (inside ecore_sp_post()). 6512 */ 6513 return ecore_sp_post(pdev, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0, 6514 data_mapping.as_u64, NONE_CONNECTION_TYPE); 6515 } 6516 6517 static INLINE int ecore_func_send_afex_update(struct _lm_device_t *pdev, 6518 struct ecore_func_state_params *params) 6519 { 6520 struct ecore_func_sp_obj *o = params->f_obj; 6521 struct function_update_data *rdata = 6522 (struct function_update_data *)o->afex_rdata; 6523 lm_address_t data_mapping = o->afex_rdata_mapping; 6524 struct ecore_func_afex_update_params *afex_update_params = 6525 ¶ms->params.afex_update; 6526 6527 mm_memset(rdata, 0, sizeof(*rdata)); 6528 6529 /* Fill the ramrod data with provided parameters */ 6530 rdata->vif_id_change_flg = 1; 6531 rdata->vif_id = mm_cpu_to_le16(afex_update_params->vif_id); 6532 rdata->afex_default_vlan_change_flg = 1; 6533 rdata->afex_default_vlan = 6534 mm_cpu_to_le16(afex_update_params->afex_default_vlan); 6535 rdata->allowed_priorities_change_flg = 1; 6536 rdata->allowed_priorities = afex_update_params->allowed_priorities; 6537 rdata->echo = AFEX_UPDATE; 6538 6539 /* No need for an explicit memory barrier here as long as we 6540 * ensure the ordering of writing to the SPQ element 6541 * and updating of the SPQ producer which involves a memory 6542 * read. If the memory read is removed we will have to put a 6543 * full memory barrier there (inside ecore_sp_post()). 6544 */ 6545 ECORE_MSG(pdev, 6546 "afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x\n", 6547 rdata->vif_id, 6548 rdata->afex_default_vlan, rdata->allowed_priorities); 6549 6550 return ecore_sp_post(pdev, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0, 6551 data_mapping.as_u64, NONE_CONNECTION_TYPE); 6552 } 6553 6554 static 6555 INLINE int ecore_func_send_afex_viflists(struct _lm_device_t *pdev, 6556 struct ecore_func_state_params *params) 6557 { 6558 struct ecore_func_sp_obj *o = params->f_obj; 6559 struct afex_vif_list_ramrod_data *rdata = 6560 (struct afex_vif_list_ramrod_data *)o->afex_rdata; 6561 struct ecore_func_afex_viflists_params *afex_vif_params = 6562 ¶ms->params.afex_viflists; 6563 u64 *p_rdata = (u64 *)rdata; 6564 6565 mm_memset(rdata, 0, sizeof(*rdata)); 6566 6567 /* Fill the ramrod data with provided parameters */ 6568 rdata->vif_list_index = mm_cpu_to_le16(afex_vif_params->vif_list_index); 6569 rdata->func_bit_map = afex_vif_params->func_bit_map; 6570 rdata->afex_vif_list_command = afex_vif_params->afex_vif_list_command; 6571 rdata->func_to_clear = afex_vif_params->func_to_clear; 6572 6573 /* send in echo type of sub command */ 6574 rdata->echo = afex_vif_params->afex_vif_list_command; 6575 6576 ECORE_MSG(pdev, "afex: ramrod lists, cmd 0x%x index 0x%x func_bit_map 0x%x func_to_clr 0x%x\n", 6577 rdata->afex_vif_list_command, rdata->vif_list_index, 6578 rdata->func_bit_map, rdata->func_to_clear); 6579 6580 /* No need for an explicit memory barrier here as long as we 6581 * ensure the ordering of writing to the SPQ element 6582 * and updating of the SPQ producer which involves a memory 6583 * read. If the memory read is removed we will have to put a 6584 * full memory barrier there (inside ecore_sp_post()). 6585 */ 6586 6587 /* this ramrod sends data directly and not through DMA mapping */ 6588 return ecore_sp_post(pdev, RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS, 0, 6589 *p_rdata, NONE_CONNECTION_TYPE); 6590 } 6591 6592 static INLINE int ecore_func_send_stop(struct _lm_device_t *pdev, 6593 struct ecore_func_state_params *params) 6594 { 6595 return ecore_sp_post(pdev, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 6596 NONE_CONNECTION_TYPE); 6597 } 6598 6599 static INLINE int ecore_func_send_tx_stop(struct _lm_device_t *pdev, 6600 struct ecore_func_state_params *params) 6601 { 6602 return ecore_sp_post(pdev, RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, 0, 0, 6603 NONE_CONNECTION_TYPE); 6604 } 6605 static INLINE int ecore_func_send_tx_start(struct _lm_device_t *pdev, 6606 struct ecore_func_state_params *params) 6607 { 6608 struct ecore_func_sp_obj *o = params->f_obj; 6609 struct flow_control_configuration *rdata = 6610 (struct flow_control_configuration *)o->rdata; 6611 lm_address_t data_mapping = o->rdata_mapping; 6612 struct ecore_func_tx_start_params *tx_start_params = 6613 ¶ms->params.tx_start; 6614 int i; 6615 6616 mm_memset(rdata, 0, sizeof(*rdata)); 6617 6618 rdata->dcb_enabled = tx_start_params->dcb_enabled; 6619 rdata->dcb_version = tx_start_params->dcb_version; 6620 rdata->dont_add_pri_0_en = tx_start_params->dont_add_pri_0_en; 6621 6622 for (i = 0; i < ARRAY_SIZE(rdata->traffic_type_to_priority_cos); i++) 6623 rdata->traffic_type_to_priority_cos[i] = 6624 tx_start_params->traffic_type_to_priority_cos[i]; 6625 6626 /* No need for an explicit memory barrier here as long as we 6627 * ensure the ordering of writing to the SPQ element 6628 * and updating of the SPQ producer which involves a memory 6629 * read. If the memory read is removed we will have to put a 6630 * full memory barrier there (inside ecore_sp_post()). 6631 */ 6632 return ecore_sp_post(pdev, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0, 6633 data_mapping.as_u64, NONE_CONNECTION_TYPE); 6634 } 6635 6636 static INLINE int ecore_func_send_set_timesync(struct _lm_device_t *pdev, 6637 struct ecore_func_state_params *params) 6638 { 6639 struct ecore_func_sp_obj *o = params->f_obj; 6640 struct set_timesync_ramrod_data *rdata = 6641 (struct set_timesync_ramrod_data *)o->rdata; 6642 lm_address_t data_mapping = o->rdata_mapping; 6643 struct ecore_func_set_timesync_params *set_timesync_params = 6644 ¶ms->params.set_timesync; 6645 6646 mm_memset(rdata, 0, sizeof(*rdata)); 6647 6648 /* Fill the ramrod data with provided parameters */ 6649 rdata->drift_adjust_cmd = set_timesync_params->drift_adjust_cmd; 6650 rdata->offset_cmd = set_timesync_params->offset_cmd; 6651 rdata->add_sub_drift_adjust_value = 6652 set_timesync_params->add_sub_drift_adjust_value; 6653 rdata->drift_adjust_value = set_timesync_params->drift_adjust_value; 6654 rdata->drift_adjust_period = set_timesync_params->drift_adjust_period; 6655 rdata->offset_delta.lo = 6656 mm_cpu_to_le32(U64_LO(set_timesync_params->offset_delta)); 6657 rdata->offset_delta.hi = 6658 mm_cpu_to_le32(U64_HI(set_timesync_params->offset_delta)); 6659 6660 DP(BNX2X_MSG_SP, "Set timesync command params: drift_cmd = %d, offset_cmd = %d, add_sub_drift = %d, drift_val = %d, drift_period = %d, offset_lo = %d, offset_hi = %d\n", 6661 rdata->drift_adjust_cmd, rdata->offset_cmd, 6662 rdata->add_sub_drift_adjust_value, rdata->drift_adjust_value, 6663 rdata->drift_adjust_period, rdata->offset_delta.lo, 6664 rdata->offset_delta.hi); 6665 6666 return ecore_sp_post(pdev, RAMROD_CMD_ID_COMMON_SET_TIMESYNC, 0, 6667 data_mapping.as_u64, NONE_CONNECTION_TYPE); 6668 } 6669 6670 static int ecore_func_send_cmd(struct _lm_device_t *pdev, 6671 struct ecore_func_state_params *params) 6672 { 6673 switch (params->cmd) { 6674 case ECORE_F_CMD_HW_INIT: 6675 return ecore_func_hw_init(pdev, params); 6676 case ECORE_F_CMD_START: 6677 return ecore_func_send_start(pdev, params); 6678 case ECORE_F_CMD_STOP: 6679 return ecore_func_send_stop(pdev, params); 6680 case ECORE_F_CMD_HW_RESET: 6681 return ecore_func_hw_reset(pdev, params); 6682 case ECORE_F_CMD_AFEX_UPDATE: 6683 return ecore_func_send_afex_update(pdev, params); 6684 case ECORE_F_CMD_AFEX_VIFLISTS: 6685 return ecore_func_send_afex_viflists(pdev, params); 6686 case ECORE_F_CMD_TX_STOP: 6687 return ecore_func_send_tx_stop(pdev, params); 6688 case ECORE_F_CMD_TX_START: 6689 return ecore_func_send_tx_start(pdev, params); 6690 case ECORE_F_CMD_SWITCH_UPDATE: 6691 return ecore_func_send_switch_update(pdev, params); 6692 case ECORE_F_CMD_SET_TIMESYNC: 6693 return ecore_func_send_set_timesync(pdev, params); 6694 default: 6695 ECORE_ERR("Unknown command: %d\n", params->cmd); 6696 return ECORE_INVAL; 6697 } 6698 } 6699 6700 void ecore_init_func_obj(struct _lm_device_t *pdev, 6701 struct ecore_func_sp_obj *obj, 6702 void *rdata, lm_address_t rdata_mapping, 6703 void *afex_rdata, lm_address_t afex_rdata_mapping, 6704 struct ecore_func_sp_drv_ops *drv_iface) 6705 { 6706 mm_memset(obj, 0, sizeof(*obj)); 6707 6708 ECORE_MUTEX_INIT(&obj->one_pending_mutex); 6709 6710 obj->rdata = rdata; 6711 obj->rdata_mapping = rdata_mapping; 6712 obj->afex_rdata = afex_rdata; 6713 obj->afex_rdata_mapping = afex_rdata_mapping; 6714 obj->send_cmd = ecore_func_send_cmd; 6715 obj->check_transition = ecore_func_chk_transition; 6716 obj->complete_cmd = ecore_func_comp_cmd; 6717 obj->wait_comp = ecore_func_wait_comp; 6718 obj->drv = drv_iface; 6719 } 6720 6721 /** 6722 * ecore_func_state_change - perform Function state change transition 6723 * 6724 * @pdev: device handle 6725 * @params: parameters to perform the transaction 6726 * 6727 * returns 0 in case of successfully completed transition, 6728 * negative error code in case of failure, positive 6729 * (EBUSY) value if there is a completion to that is 6730 * still pending (possible only if RAMROD_COMP_WAIT is 6731 * not set in params->ramrod_flags for asynchronous 6732 * commands). 6733 */ 6734 int ecore_func_state_change(struct _lm_device_t *pdev, 6735 struct ecore_func_state_params *params) 6736 { 6737 struct ecore_func_sp_obj *o = params->f_obj; 6738 int rc, cnt = 300; 6739 enum ecore_func_cmd cmd = params->cmd; 6740 unsigned long *pending = &o->pending; 6741 6742 ECORE_MUTEX_LOCK(&o->one_pending_mutex); 6743 6744 /* Check that the requested transition is legal */ 6745 rc = o->check_transition(pdev, o, params); 6746 if ((rc == ECORE_BUSY) && 6747 (ECORE_TEST_BIT(RAMROD_RETRY, ¶ms->ramrod_flags))) { 6748 while ((rc == ECORE_BUSY) && (--cnt > 0)) { 6749 ECORE_MUTEX_UNLOCK(&o->one_pending_mutex); 6750 msleep(10); 6751 ECORE_MUTEX_LOCK(&o->one_pending_mutex); 6752 rc = o->check_transition(pdev, o, params); 6753 } 6754 if (rc == ECORE_BUSY) { 6755 ECORE_MUTEX_UNLOCK(&o->one_pending_mutex); 6756 ECORE_ERR("timeout waiting for previous ramrod completion\n"); 6757 return rc; 6758 } 6759 } else if (rc) { 6760 ECORE_MUTEX_UNLOCK(&o->one_pending_mutex); 6761 return rc; 6762 } 6763 6764 /* Set "pending" bit */ 6765 ECORE_SET_BIT(cmd, pending); 6766 6767 /* Don't send a command if only driver cleanup was requested */ 6768 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) { 6769 ecore_func_state_change_comp(pdev, o, cmd); 6770 ECORE_MUTEX_UNLOCK(&o->one_pending_mutex); 6771 } else { 6772 /* Send a ramrod */ 6773 rc = o->send_cmd(pdev, params); 6774 6775 ECORE_MUTEX_UNLOCK(&o->one_pending_mutex); 6776 6777 if (rc) { 6778 o->next_state = ECORE_F_STATE_MAX; 6779 ECORE_CLEAR_BIT(cmd, pending); 6780 smp_mb__after_atomic(); 6781 return rc; 6782 } 6783 6784 if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, ¶ms->ramrod_flags)) { 6785 rc = o->wait_comp(pdev, o, cmd); 6786 if (rc) 6787 return rc; 6788 6789 return ECORE_SUCCESS; 6790 } 6791 } 6792 6793 return ECORE_RET_PENDING(cmd, pending); 6794 } 6795 #endif 6796