1 /*- 2 * Copyright (c) 2007-2013 Broadcom Corporation. All rights reserved. 3 * 4 * Eric Davis <edavis@broadcom.com> 5 * David Christensen <davidch@broadcom.com> 6 * Gary Zambrano <zambrano@broadcom.com> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. Neither the name of Broadcom Corporation nor the name of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written consent. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS' 22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS 25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 31 * THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include "bxe.h" 38 #include "ecore_init.h" 39 40 /**** Exe Queue interfaces ****/ 41 42 /** 43 * ecore_exe_queue_init - init the Exe Queue object 44 * 45 * @o: pointer to the object 46 * @exe_len: length 47 * @owner: pointer to the owner 48 * @validate: validate function pointer 49 * @optimize: optimize function pointer 50 * @exec: execute function pointer 51 * @get: get function pointer 52 */ 53 static inline void ecore_exe_queue_init(struct bxe_softc *sc, 54 struct ecore_exe_queue_obj *o, 55 int exe_len, 56 union ecore_qable_obj *owner, 57 exe_q_validate validate, 58 exe_q_remove remove, 59 exe_q_optimize optimize, 60 exe_q_execute exec, 61 exe_q_get get) 62 { 63 ECORE_MEMSET(o, 0, sizeof(*o)); 64 65 ECORE_LIST_INIT(&o->exe_queue); 66 ECORE_LIST_INIT(&o->pending_comp); 67 68 ECORE_SPIN_LOCK_INIT(&o->lock, sc); 69 70 o->exe_chunk_len = exe_len; 71 o->owner = owner; 72 73 /* Owner specific callbacks */ 74 o->validate = validate; 75 o->remove = remove; 76 o->optimize = optimize; 77 o->execute = exec; 78 o->get = get; 79 80 ECORE_MSG(sc, "Setup the execution queue with the chunk length of %d\n", 81 exe_len); 82 } 83 84 static inline void ecore_exe_queue_free_elem(struct bxe_softc *sc, 85 struct ecore_exeq_elem *elem) 86 { 87 ECORE_MSG(sc, "Deleting an exe_queue element\n"); 88 ECORE_FREE(sc, elem, sizeof(*elem)); 89 } 90 91 static inline int ecore_exe_queue_length(struct ecore_exe_queue_obj *o) 92 { 93 struct ecore_exeq_elem *elem; 94 int cnt = 0; 95 96 ECORE_SPIN_LOCK_BH(&o->lock); 97 98 ECORE_LIST_FOR_EACH_ENTRY(elem, &o->exe_queue, link, 99 struct ecore_exeq_elem) 100 cnt++; 101 102 ECORE_SPIN_UNLOCK_BH(&o->lock); 103 104 return cnt; 105 } 106 107 /** 108 * ecore_exe_queue_add - add a new element to the execution queue 109 * 110 * @sc: driver handle 111 * @o: queue 112 * @cmd: new command to add 113 * @restore: true - do not optimize the command 114 * 115 * If the element is optimized or is illegal, frees it. 116 */ 117 static inline int ecore_exe_queue_add(struct bxe_softc *sc, 118 struct ecore_exe_queue_obj *o, 119 struct ecore_exeq_elem *elem, 120 bool restore) 121 { 122 int rc; 123 124 ECORE_SPIN_LOCK_BH(&o->lock); 125 126 if (!restore) { 127 /* Try to cancel this element queue */ 128 rc = o->optimize(sc, o->owner, elem); 129 if (rc) 130 goto free_and_exit; 131 132 /* Check if this request is ok */ 133 rc = o->validate(sc, o->owner, elem); 134 if (rc) { 135 ECORE_MSG(sc, "Preamble failed: %d\n", rc); 136 goto free_and_exit; 137 } 138 } 139 140 /* If so, add it to the execution queue */ 141 ECORE_LIST_PUSH_TAIL(&elem->link, &o->exe_queue); 142 143 ECORE_SPIN_UNLOCK_BH(&o->lock); 144 145 return ECORE_SUCCESS; 146 147 free_and_exit: 148 ecore_exe_queue_free_elem(sc, elem); 149 150 ECORE_SPIN_UNLOCK_BH(&o->lock); 151 152 return rc; 153 } 154 155 static inline void __ecore_exe_queue_reset_pending( 156 struct bxe_softc *sc, 157 struct ecore_exe_queue_obj *o) 158 { 159 struct ecore_exeq_elem *elem; 160 161 while (!ECORE_LIST_IS_EMPTY(&o->pending_comp)) { 162 elem = ECORE_LIST_FIRST_ENTRY(&o->pending_comp, 163 struct ecore_exeq_elem, 164 link); 165 166 ECORE_LIST_REMOVE_ENTRY(&elem->link, &o->pending_comp); 167 ecore_exe_queue_free_elem(sc, elem); 168 } 169 } 170 171 /** 172 * ecore_exe_queue_step - execute one execution chunk atomically 173 * 174 * @sc: driver handle 175 * @o: queue 176 * @ramrod_flags: flags 177 * 178 * (Should be called while holding the exe_queue->lock). 179 */ 180 static inline int ecore_exe_queue_step(struct bxe_softc *sc, 181 struct ecore_exe_queue_obj *o, 182 unsigned long *ramrod_flags) 183 { 184 struct ecore_exeq_elem *elem, spacer; 185 int cur_len = 0, rc; 186 187 ECORE_MEMSET(&spacer, 0, sizeof(spacer)); 188 189 /* Next step should not be performed until the current is finished, 190 * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to 191 * properly clear object internals without sending any command to the FW 192 * which also implies there won't be any completion to clear the 193 * 'pending' list. 194 */ 195 if (!ECORE_LIST_IS_EMPTY(&o->pending_comp)) { 196 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags)) { 197 ECORE_MSG(sc, "RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list\n"); 198 __ecore_exe_queue_reset_pending(sc, o); 199 } else { 200 return ECORE_PENDING; 201 } 202 } 203 204 /* Run through the pending commands list and create a next 205 * execution chunk. 206 */ 207 while (!ECORE_LIST_IS_EMPTY(&o->exe_queue)) { 208 elem = ECORE_LIST_FIRST_ENTRY(&o->exe_queue, 209 struct ecore_exeq_elem, 210 link); 211 ECORE_DBG_BREAK_IF(!elem->cmd_len); 212 213 if (cur_len + elem->cmd_len <= o->exe_chunk_len) { 214 cur_len += elem->cmd_len; 215 /* Prevent from both lists being empty when moving an 216 * element. This will allow the call of 217 * ecore_exe_queue_empty() without locking. 218 */ 219 ECORE_LIST_PUSH_TAIL(&spacer.link, &o->pending_comp); 220 mb(); 221 ECORE_LIST_REMOVE_ENTRY(&elem->link, &o->exe_queue); 222 ECORE_LIST_PUSH_TAIL(&elem->link, &o->pending_comp); 223 ECORE_LIST_REMOVE_ENTRY(&spacer.link, &o->pending_comp); 224 } else 225 break; 226 } 227 228 /* Sanity check */ 229 if (!cur_len) 230 return ECORE_SUCCESS; 231 232 rc = o->execute(sc, o->owner, &o->pending_comp, ramrod_flags); 233 if (rc < 0) 234 /* In case of an error return the commands back to the queue 235 * and reset the pending_comp. 236 */ 237 ECORE_LIST_SPLICE_INIT(&o->pending_comp, &o->exe_queue); 238 else if (!rc) 239 /* If zero is returned, means there are no outstanding pending 240 * completions and we may dismiss the pending list. 241 */ 242 __ecore_exe_queue_reset_pending(sc, o); 243 244 return rc; 245 } 246 247 static inline bool ecore_exe_queue_empty(struct ecore_exe_queue_obj *o) 248 { 249 bool empty = ECORE_LIST_IS_EMPTY(&o->exe_queue); 250 251 /* Don't reorder!!! */ 252 mb(); 253 254 return empty && ECORE_LIST_IS_EMPTY(&o->pending_comp); 255 } 256 257 static inline struct ecore_exeq_elem *ecore_exe_queue_alloc_elem( 258 struct bxe_softc *sc) 259 { 260 ECORE_MSG(sc, "Allocating a new exe_queue element\n"); 261 return ECORE_ZALLOC(sizeof(struct ecore_exeq_elem), GFP_ATOMIC, 262 sc); 263 } 264 265 /************************ raw_obj functions ***********************************/ 266 static bool ecore_raw_check_pending(struct ecore_raw_obj *o) 267 { 268 /* 269 * !! converts the value returned by ECORE_TEST_BIT such that it 270 * is guaranteed not to be truncated regardless of bool definition. 271 * 272 * Note we cannot simply define the function's return value type 273 * to match the type returned by ECORE_TEST_BIT, as it varies by 274 * platform/implementation. 275 */ 276 277 return !!ECORE_TEST_BIT(o->state, o->pstate); 278 } 279 280 static void ecore_raw_clear_pending(struct ecore_raw_obj *o) 281 { 282 ECORE_SMP_MB_BEFORE_CLEAR_BIT(); 283 ECORE_CLEAR_BIT(o->state, o->pstate); 284 ECORE_SMP_MB_AFTER_CLEAR_BIT(); 285 } 286 287 static void ecore_raw_set_pending(struct ecore_raw_obj *o) 288 { 289 ECORE_SMP_MB_BEFORE_CLEAR_BIT(); 290 ECORE_SET_BIT(o->state, o->pstate); 291 ECORE_SMP_MB_AFTER_CLEAR_BIT(); 292 } 293 294 /** 295 * ecore_state_wait - wait until the given bit(state) is cleared 296 * 297 * @sc: device handle 298 * @state: state which is to be cleared 299 * @state_p: state buffer 300 * 301 */ 302 static inline int ecore_state_wait(struct bxe_softc *sc, int state, 303 unsigned long *pstate) 304 { 305 /* can take a while if any port is running */ 306 int cnt = 5000; 307 308 309 if (CHIP_REV_IS_EMUL(sc)) 310 cnt *= 20; 311 312 ECORE_MSG(sc, "waiting for state to become %d\n", state); 313 314 ECORE_MIGHT_SLEEP(); 315 while (cnt--) { 316 if (!ECORE_TEST_BIT(state, pstate)) { 317 #ifdef ECORE_STOP_ON_ERROR 318 ECORE_MSG(sc, "exit (cnt %d)\n", 5000 - cnt); 319 #endif 320 return ECORE_SUCCESS; 321 } 322 323 ECORE_WAIT(sc, delay_us); 324 325 if (sc->panic) 326 return ECORE_IO; 327 } 328 329 /* timeout! */ 330 ECORE_ERR("timeout waiting for state %d\n", state); 331 #ifdef ECORE_STOP_ON_ERROR 332 ecore_panic(); 333 #endif 334 335 return ECORE_TIMEOUT; 336 } 337 338 static int ecore_raw_wait(struct bxe_softc *sc, struct ecore_raw_obj *raw) 339 { 340 return ecore_state_wait(sc, raw->state, raw->pstate); 341 } 342 343 /***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/ 344 /* credit handling callbacks */ 345 static bool ecore_get_cam_offset_mac(struct ecore_vlan_mac_obj *o, int *offset) 346 { 347 struct ecore_credit_pool_obj *mp = o->macs_pool; 348 349 ECORE_DBG_BREAK_IF(!mp); 350 351 return mp->get_entry(mp, offset); 352 } 353 354 static bool ecore_get_credit_mac(struct ecore_vlan_mac_obj *o) 355 { 356 struct ecore_credit_pool_obj *mp = o->macs_pool; 357 358 ECORE_DBG_BREAK_IF(!mp); 359 360 return mp->get(mp, 1); 361 } 362 363 static bool ecore_get_cam_offset_vlan(struct ecore_vlan_mac_obj *o, int *offset) 364 { 365 struct ecore_credit_pool_obj *vp = o->vlans_pool; 366 367 ECORE_DBG_BREAK_IF(!vp); 368 369 return vp->get_entry(vp, offset); 370 } 371 372 static bool ecore_get_credit_vlan(struct ecore_vlan_mac_obj *o) 373 { 374 struct ecore_credit_pool_obj *vp = o->vlans_pool; 375 376 ECORE_DBG_BREAK_IF(!vp); 377 378 return vp->get(vp, 1); 379 } 380 381 static bool ecore_get_credit_vlan_mac(struct ecore_vlan_mac_obj *o) 382 { 383 struct ecore_credit_pool_obj *mp = o->macs_pool; 384 struct ecore_credit_pool_obj *vp = o->vlans_pool; 385 386 if (!mp->get(mp, 1)) 387 return FALSE; 388 389 if (!vp->get(vp, 1)) { 390 mp->put(mp, 1); 391 return FALSE; 392 } 393 394 return TRUE; 395 } 396 397 static bool ecore_put_cam_offset_mac(struct ecore_vlan_mac_obj *o, int offset) 398 { 399 struct ecore_credit_pool_obj *mp = o->macs_pool; 400 401 return mp->put_entry(mp, offset); 402 } 403 404 static bool ecore_put_credit_mac(struct ecore_vlan_mac_obj *o) 405 { 406 struct ecore_credit_pool_obj *mp = o->macs_pool; 407 408 return mp->put(mp, 1); 409 } 410 411 static bool ecore_put_cam_offset_vlan(struct ecore_vlan_mac_obj *o, int offset) 412 { 413 struct ecore_credit_pool_obj *vp = o->vlans_pool; 414 415 return vp->put_entry(vp, offset); 416 } 417 418 static bool ecore_put_credit_vlan(struct ecore_vlan_mac_obj *o) 419 { 420 struct ecore_credit_pool_obj *vp = o->vlans_pool; 421 422 return vp->put(vp, 1); 423 } 424 425 static bool ecore_put_credit_vlan_mac(struct ecore_vlan_mac_obj *o) 426 { 427 struct ecore_credit_pool_obj *mp = o->macs_pool; 428 struct ecore_credit_pool_obj *vp = o->vlans_pool; 429 430 if (!mp->put(mp, 1)) 431 return FALSE; 432 433 if (!vp->put(vp, 1)) { 434 mp->get(mp, 1); 435 return FALSE; 436 } 437 438 return TRUE; 439 } 440 441 /** 442 * __ecore_vlan_mac_h_write_trylock - try getting the writer lock on vlan mac 443 * head list. 444 * 445 * @sc: device handle 446 * @o: vlan_mac object 447 * 448 * @details: Non-blocking implementation; should be called under execution 449 * queue lock. 450 */ 451 static int __ecore_vlan_mac_h_write_trylock(struct bxe_softc *sc, 452 struct ecore_vlan_mac_obj *o) 453 { 454 if (o->head_reader) { 455 ECORE_MSG(sc, "vlan_mac_lock writer - There are readers; Busy\n"); 456 return ECORE_BUSY; 457 } 458 459 ECORE_MSG(sc, "vlan_mac_lock writer - Taken\n"); 460 return ECORE_SUCCESS; 461 } 462 463 /** 464 * __ecore_vlan_mac_h_exec_pending - execute step instead of a previous step 465 * which wasn't able to run due to a taken lock on vlan mac head list. 466 * 467 * @sc: device handle 468 * @o: vlan_mac object 469 * 470 * @details Should be called under execution queue lock; notice it might release 471 * and reclaim it during its run. 472 */ 473 static void __ecore_vlan_mac_h_exec_pending(struct bxe_softc *sc, 474 struct ecore_vlan_mac_obj *o) 475 { 476 int rc; 477 unsigned long ramrod_flags = o->saved_ramrod_flags; 478 479 ECORE_MSG(sc, "vlan_mac_lock execute pending command with ramrod flags %lu\n", 480 ramrod_flags); 481 o->head_exe_request = FALSE; 482 o->saved_ramrod_flags = 0; 483 rc = ecore_exe_queue_step(sc, &o->exe_queue, &ramrod_flags); 484 if (rc != ECORE_SUCCESS) { 485 ECORE_ERR("execution of pending commands failed with rc %d\n", 486 rc); 487 #ifdef ECORE_STOP_ON_ERROR 488 ecore_panic(); 489 #endif 490 } 491 } 492 493 /** 494 * __ecore_vlan_mac_h_pend - Pend an execution step which couldn't have been 495 * called due to vlan mac head list lock being taken. 496 * 497 * @sc: device handle 498 * @o: vlan_mac object 499 * @ramrod_flags: ramrod flags of missed execution 500 * 501 * @details Should be called under execution queue lock. 502 */ 503 static void __ecore_vlan_mac_h_pend(struct bxe_softc *sc, 504 struct ecore_vlan_mac_obj *o, 505 unsigned long ramrod_flags) 506 { 507 o->head_exe_request = TRUE; 508 o->saved_ramrod_flags = ramrod_flags; 509 ECORE_MSG(sc, "Placing pending execution with ramrod flags %lu\n", 510 ramrod_flags); 511 } 512 513 /** 514 * __ecore_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock 515 * 516 * @sc: device handle 517 * @o: vlan_mac object 518 * 519 * @details Should be called under execution queue lock. Notice if a pending 520 * execution exists, it would perform it - possibly releasing and 521 * reclaiming the execution queue lock. 522 */ 523 static void __ecore_vlan_mac_h_write_unlock(struct bxe_softc *sc, 524 struct ecore_vlan_mac_obj *o) 525 { 526 /* It's possible a new pending execution was added since this writer 527 * executed. If so, execute again. [Ad infinitum] 528 */ 529 while(o->head_exe_request) { 530 ECORE_MSG(sc, "vlan_mac_lock - writer release encountered a pending request\n"); 531 __ecore_vlan_mac_h_exec_pending(sc, o); 532 } 533 } 534 535 /** 536 * ecore_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock 537 * 538 * @sc: device handle 539 * @o: vlan_mac object 540 * 541 * @details Notice if a pending execution exists, it would perform it - 542 * possibly releasing and reclaiming the execution queue lock. 543 */ 544 void ecore_vlan_mac_h_write_unlock(struct bxe_softc *sc, 545 struct ecore_vlan_mac_obj *o) 546 { 547 ECORE_SPIN_LOCK_BH(&o->exe_queue.lock); 548 __ecore_vlan_mac_h_write_unlock(sc, o); 549 ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock); 550 } 551 552 /** 553 * __ecore_vlan_mac_h_read_lock - lock the vlan mac head list reader lock 554 * 555 * @sc: device handle 556 * @o: vlan_mac object 557 * 558 * @details Should be called under the execution queue lock. May sleep. May 559 * release and reclaim execution queue lock during its run. 560 */ 561 static int __ecore_vlan_mac_h_read_lock(struct bxe_softc *sc, 562 struct ecore_vlan_mac_obj *o) 563 { 564 /* If we got here, we're holding lock --> no WRITER exists */ 565 o->head_reader++; 566 ECORE_MSG(sc, "vlan_mac_lock - locked reader - number %d\n", 567 o->head_reader); 568 569 return ECORE_SUCCESS; 570 } 571 572 /** 573 * ecore_vlan_mac_h_read_lock - lock the vlan mac head list reader lock 574 * 575 * @sc: device handle 576 * @o: vlan_mac object 577 * 578 * @details May sleep. Claims and releases execution queue lock during its run. 579 */ 580 int ecore_vlan_mac_h_read_lock(struct bxe_softc *sc, 581 struct ecore_vlan_mac_obj *o) 582 { 583 int rc; 584 585 ECORE_SPIN_LOCK_BH(&o->exe_queue.lock); 586 rc = __ecore_vlan_mac_h_read_lock(sc, o); 587 ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock); 588 589 return rc; 590 } 591 592 /** 593 * __ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock 594 * 595 * @sc: device handle 596 * @o: vlan_mac object 597 * 598 * @details Should be called under execution queue lock. Notice if a pending 599 * execution exists, it would be performed if this was the last 600 * reader. possibly releasing and reclaiming the execution queue lock. 601 */ 602 static void __ecore_vlan_mac_h_read_unlock(struct bxe_softc *sc, 603 struct ecore_vlan_mac_obj *o) 604 { 605 if (!o->head_reader) { 606 ECORE_ERR("Need to release vlan mac reader lock, but lock isn't taken\n"); 607 #ifdef ECORE_STOP_ON_ERROR 608 ecore_panic(); 609 #endif 610 } else { 611 o->head_reader--; 612 ECORE_MSG(sc, "vlan_mac_lock - decreased readers to %d\n", 613 o->head_reader); 614 } 615 616 /* It's possible a new pending execution was added, and that this reader 617 * was last - if so we need to execute the command. 618 */ 619 if (!o->head_reader && o->head_exe_request) { 620 ECORE_MSG(sc, "vlan_mac_lock - reader release encountered a pending request\n"); 621 622 /* Writer release will do the trick */ 623 __ecore_vlan_mac_h_write_unlock(sc, o); 624 } 625 } 626 627 /** 628 * ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock 629 * 630 * @sc: device handle 631 * @o: vlan_mac object 632 * 633 * @details Notice if a pending execution exists, it would be performed if this 634 * was the last reader. Claims and releases the execution queue lock 635 * during its run. 636 */ 637 void ecore_vlan_mac_h_read_unlock(struct bxe_softc *sc, 638 struct ecore_vlan_mac_obj *o) 639 { 640 ECORE_SPIN_LOCK_BH(&o->exe_queue.lock); 641 __ecore_vlan_mac_h_read_unlock(sc, o); 642 ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock); 643 } 644 645 /** 646 * ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock 647 * 648 * @sc: device handle 649 * @o: vlan_mac object 650 * @n: number of elements to get 651 * @base: base address for element placement 652 * @stride: stride between elements (in bytes) 653 */ 654 static int ecore_get_n_elements(struct bxe_softc *sc, struct ecore_vlan_mac_obj *o, 655 int n, uint8_t *base, uint8_t stride, uint8_t size) 656 { 657 struct ecore_vlan_mac_registry_elem *pos; 658 uint8_t *next = base; 659 int counter = 0; 660 int read_lock; 661 662 ECORE_MSG(sc, "get_n_elements - taking vlan_mac_lock (reader)\n"); 663 read_lock = ecore_vlan_mac_h_read_lock(sc, o); 664 if (read_lock != ECORE_SUCCESS) 665 ECORE_ERR("get_n_elements failed to get vlan mac reader lock; Access without lock\n"); 666 667 /* traverse list */ 668 ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link, 669 struct ecore_vlan_mac_registry_elem) { 670 if (counter < n) { 671 ECORE_MEMCPY(next, &pos->u, size); 672 counter++; 673 ECORE_MSG(sc, "copied element number %d to address %p element was:\n", 674 counter, next); 675 next += stride + size; 676 } 677 } 678 679 if (read_lock == ECORE_SUCCESS) { 680 ECORE_MSG(sc, "get_n_elements - releasing vlan_mac_lock (reader)\n"); 681 ecore_vlan_mac_h_read_unlock(sc, o); 682 } 683 684 return counter * ETH_ALEN; 685 } 686 687 /* check_add() callbacks */ 688 static int ecore_check_mac_add(struct bxe_softc *sc, 689 struct ecore_vlan_mac_obj *o, 690 union ecore_classification_ramrod_data *data) 691 { 692 struct ecore_vlan_mac_registry_elem *pos; 693 694 ECORE_MSG(sc, "Checking MAC %02x:%02x:%02x:%02x:%02x:%02x for ADD command\n", data->mac.mac[0], data->mac.mac[1], data->mac.mac[2], data->mac.mac[3], data->mac.mac[4], data->mac.mac[5]); 695 696 if (!ECORE_IS_VALID_ETHER_ADDR(data->mac.mac)) 697 return ECORE_INVAL; 698 699 /* Check if a requested MAC already exists */ 700 ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link, 701 struct ecore_vlan_mac_registry_elem) 702 if (!ECORE_MEMCMP(data->mac.mac, pos->u.mac.mac, ETH_ALEN) && 703 (data->mac.is_inner_mac == pos->u.mac.is_inner_mac)) 704 return ECORE_EXISTS; 705 706 return ECORE_SUCCESS; 707 } 708 709 static int ecore_check_vlan_add(struct bxe_softc *sc, 710 struct ecore_vlan_mac_obj *o, 711 union ecore_classification_ramrod_data *data) 712 { 713 struct ecore_vlan_mac_registry_elem *pos; 714 715 ECORE_MSG(sc, "Checking VLAN %d for ADD command\n", data->vlan.vlan); 716 717 ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link, 718 struct ecore_vlan_mac_registry_elem) 719 if (data->vlan.vlan == pos->u.vlan.vlan) 720 return ECORE_EXISTS; 721 722 return ECORE_SUCCESS; 723 } 724 725 static int ecore_check_vlan_mac_add(struct bxe_softc *sc, 726 struct ecore_vlan_mac_obj *o, 727 union ecore_classification_ramrod_data *data) 728 { 729 struct ecore_vlan_mac_registry_elem *pos; 730 731 ECORE_MSG(sc, "Checking VLAN_MAC (%02x:%02x:%02x:%02x:%02x:%02x, %d) for ADD command\n", 732 data->vlan_mac.mac[0], data->vlan_mac.mac[1], data->vlan_mac.mac[2], data->vlan_mac.mac[3], data->vlan_mac.mac[4], data->vlan_mac.mac[5], data->vlan_mac.vlan); 733 734 ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link, 735 struct ecore_vlan_mac_registry_elem) 736 if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) && 737 (!ECORE_MEMCMP(data->vlan_mac.mac, pos->u.vlan_mac.mac, 738 ETH_ALEN)) && 739 (data->vlan_mac.is_inner_mac == 740 pos->u.vlan_mac.is_inner_mac)) 741 return ECORE_EXISTS; 742 743 return ECORE_SUCCESS; 744 } 745 746 /* check_del() callbacks */ 747 static struct ecore_vlan_mac_registry_elem * 748 ecore_check_mac_del(struct bxe_softc *sc, 749 struct ecore_vlan_mac_obj *o, 750 union ecore_classification_ramrod_data *data) 751 { 752 struct ecore_vlan_mac_registry_elem *pos; 753 754 ECORE_MSG(sc, "Checking MAC %02x:%02x:%02x:%02x:%02x:%02x for DEL command\n", data->mac.mac[0], data->mac.mac[1], data->mac.mac[2], data->mac.mac[3], data->mac.mac[4], data->mac.mac[5]); 755 756 ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link, 757 struct ecore_vlan_mac_registry_elem) 758 if ((!ECORE_MEMCMP(data->mac.mac, pos->u.mac.mac, ETH_ALEN)) && 759 (data->mac.is_inner_mac == pos->u.mac.is_inner_mac)) 760 return pos; 761 762 return NULL; 763 } 764 765 static struct ecore_vlan_mac_registry_elem * 766 ecore_check_vlan_del(struct bxe_softc *sc, 767 struct ecore_vlan_mac_obj *o, 768 union ecore_classification_ramrod_data *data) 769 { 770 struct ecore_vlan_mac_registry_elem *pos; 771 772 ECORE_MSG(sc, "Checking VLAN %d for DEL command\n", data->vlan.vlan); 773 774 ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link, 775 struct ecore_vlan_mac_registry_elem) 776 if (data->vlan.vlan == pos->u.vlan.vlan) 777 return pos; 778 779 return NULL; 780 } 781 782 static struct ecore_vlan_mac_registry_elem * 783 ecore_check_vlan_mac_del(struct bxe_softc *sc, 784 struct ecore_vlan_mac_obj *o, 785 union ecore_classification_ramrod_data *data) 786 { 787 struct ecore_vlan_mac_registry_elem *pos; 788 789 ECORE_MSG(sc, "Checking VLAN_MAC (%02x:%02x:%02x:%02x:%02x:%02x, %d) for DEL command\n", 790 data->vlan_mac.mac[0], data->vlan_mac.mac[1], data->vlan_mac.mac[2], data->vlan_mac.mac[3], data->vlan_mac.mac[4], data->vlan_mac.mac[5], data->vlan_mac.vlan); 791 792 ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link, 793 struct ecore_vlan_mac_registry_elem) 794 if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) && 795 (!ECORE_MEMCMP(data->vlan_mac.mac, pos->u.vlan_mac.mac, 796 ETH_ALEN)) && 797 (data->vlan_mac.is_inner_mac == 798 pos->u.vlan_mac.is_inner_mac)) 799 return pos; 800 801 return NULL; 802 } 803 804 /* check_move() callback */ 805 static bool ecore_check_move(struct bxe_softc *sc, 806 struct ecore_vlan_mac_obj *src_o, 807 struct ecore_vlan_mac_obj *dst_o, 808 union ecore_classification_ramrod_data *data) 809 { 810 struct ecore_vlan_mac_registry_elem *pos; 811 int rc; 812 813 /* Check if we can delete the requested configuration from the first 814 * object. 815 */ 816 pos = src_o->check_del(sc, src_o, data); 817 818 /* check if configuration can be added */ 819 rc = dst_o->check_add(sc, dst_o, data); 820 821 /* If this classification can not be added (is already set) 822 * or can't be deleted - return an error. 823 */ 824 if (rc || !pos) 825 return FALSE; 826 827 return TRUE; 828 } 829 830 static bool ecore_check_move_always_err( 831 struct bxe_softc *sc, 832 struct ecore_vlan_mac_obj *src_o, 833 struct ecore_vlan_mac_obj *dst_o, 834 union ecore_classification_ramrod_data *data) 835 { 836 return FALSE; 837 } 838 839 static inline uint8_t ecore_vlan_mac_get_rx_tx_flag(struct ecore_vlan_mac_obj *o) 840 { 841 struct ecore_raw_obj *raw = &o->raw; 842 uint8_t rx_tx_flag = 0; 843 844 if ((raw->obj_type == ECORE_OBJ_TYPE_TX) || 845 (raw->obj_type == ECORE_OBJ_TYPE_RX_TX)) 846 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_TX_CMD; 847 848 if ((raw->obj_type == ECORE_OBJ_TYPE_RX) || 849 (raw->obj_type == ECORE_OBJ_TYPE_RX_TX)) 850 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_RX_CMD; 851 852 return rx_tx_flag; 853 } 854 855 void ecore_set_mac_in_nig(struct bxe_softc *sc, 856 bool add, unsigned char *dev_addr, int index) 857 { 858 uint32_t wb_data[2]; 859 uint32_t reg_offset = ECORE_PORT_ID(sc) ? NIG_REG_LLH1_FUNC_MEM : 860 NIG_REG_LLH0_FUNC_MEM; 861 862 if (!ECORE_IS_MF_SI_MODE(sc) && !IS_MF_AFEX(sc)) 863 return; 864 865 if (index > ECORE_LLH_CAM_MAX_PF_LINE) 866 return; 867 868 ECORE_MSG(sc, "Going to %s LLH configuration at entry %d\n", 869 (add ? "ADD" : "DELETE"), index); 870 871 if (add) { 872 /* LLH_FUNC_MEM is a uint64_t WB register */ 873 reg_offset += 8*index; 874 875 wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) | 876 (dev_addr[4] << 8) | dev_addr[5]); 877 wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]); 878 879 ECORE_REG_WR_DMAE_LEN(sc, reg_offset, wb_data, 2); 880 } 881 882 REG_WR(sc, (ECORE_PORT_ID(sc) ? NIG_REG_LLH1_FUNC_MEM_ENABLE : 883 NIG_REG_LLH0_FUNC_MEM_ENABLE) + 4*index, add); 884 } 885 886 /** 887 * ecore_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod 888 * 889 * @sc: device handle 890 * @o: queue for which we want to configure this rule 891 * @add: if TRUE the command is an ADD command, DEL otherwise 892 * @opcode: CLASSIFY_RULE_OPCODE_XXX 893 * @hdr: pointer to a header to setup 894 * 895 */ 896 static inline void ecore_vlan_mac_set_cmd_hdr_e2(struct bxe_softc *sc, 897 struct ecore_vlan_mac_obj *o, bool add, int opcode, 898 struct eth_classify_cmd_header *hdr) 899 { 900 struct ecore_raw_obj *raw = &o->raw; 901 902 hdr->client_id = raw->cl_id; 903 hdr->func_id = raw->func_id; 904 905 /* Rx or/and Tx (internal switching) configuration ? */ 906 hdr->cmd_general_data |= 907 ecore_vlan_mac_get_rx_tx_flag(o); 908 909 if (add) 910 hdr->cmd_general_data |= ETH_CLASSIFY_CMD_HEADER_IS_ADD; 911 912 hdr->cmd_general_data |= 913 (opcode << ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT); 914 } 915 916 /** 917 * ecore_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header 918 * 919 * @cid: connection id 920 * @type: ECORE_FILTER_XXX_PENDING 921 * @hdr: pointer to header to setup 922 * @rule_cnt: 923 * 924 * currently we always configure one rule and echo field to contain a CID and an 925 * opcode type. 926 */ 927 static inline void ecore_vlan_mac_set_rdata_hdr_e2(uint32_t cid, int type, 928 struct eth_classify_header *hdr, int rule_cnt) 929 { 930 hdr->echo = ECORE_CPU_TO_LE32((cid & ECORE_SWCID_MASK) | 931 (type << ECORE_SWCID_SHIFT)); 932 hdr->rule_cnt = (uint8_t)rule_cnt; 933 } 934 935 /* hw_config() callbacks */ 936 static void ecore_set_one_mac_e2(struct bxe_softc *sc, 937 struct ecore_vlan_mac_obj *o, 938 struct ecore_exeq_elem *elem, int rule_idx, 939 int cam_offset) 940 { 941 struct ecore_raw_obj *raw = &o->raw; 942 struct eth_classify_rules_ramrod_data *data = 943 (struct eth_classify_rules_ramrod_data *)(raw->rdata); 944 int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd; 945 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx]; 946 bool add = (cmd == ECORE_VLAN_MAC_ADD) ? TRUE : FALSE; 947 unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags; 948 uint8_t *mac = elem->cmd_data.vlan_mac.u.mac.mac; 949 950 /* Set LLH CAM entry: currently only iSCSI and ETH macs are 951 * relevant. In addition, current implementation is tuned for a 952 * single ETH MAC. 953 * 954 * When multiple unicast ETH MACs PF configuration in switch 955 * independent mode is required (NetQ, multiple netdev MACs, 956 * etc.), consider better utilisation of 8 per function MAC 957 * entries in the LLH register. There is also 958 * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the 959 * total number of CAM entries to 16. 960 * 961 * Currently we won't configure NIG for MACs other than a primary ETH 962 * MAC and iSCSI L2 MAC. 963 * 964 * If this MAC is moving from one Queue to another, no need to change 965 * NIG configuration. 966 */ 967 if (cmd != ECORE_VLAN_MAC_MOVE) { 968 if (ECORE_TEST_BIT(ECORE_ISCSI_ETH_MAC, vlan_mac_flags)) 969 ecore_set_mac_in_nig(sc, add, mac, 970 ECORE_LLH_CAM_ISCSI_ETH_LINE); 971 else if (ECORE_TEST_BIT(ECORE_ETH_MAC, vlan_mac_flags)) 972 ecore_set_mac_in_nig(sc, add, mac, 973 ECORE_LLH_CAM_ETH_LINE); 974 } 975 976 /* Reset the ramrod data buffer for the first rule */ 977 if (rule_idx == 0) 978 ECORE_MEMSET(data, 0, sizeof(*data)); 979 980 /* Setup a command header */ 981 ecore_vlan_mac_set_cmd_hdr_e2(sc, o, add, CLASSIFY_RULE_OPCODE_MAC, 982 &rule_entry->mac.header); 983 984 ECORE_MSG(sc, "About to %s MAC %02x:%02x:%02x:%02x:%02x:%02x for Queue %d\n", 985 (add ? "add" : "delete"), mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], raw->cl_id); 986 987 /* Set a MAC itself */ 988 ecore_set_fw_mac_addr(&rule_entry->mac.mac_msb, 989 &rule_entry->mac.mac_mid, 990 &rule_entry->mac.mac_lsb, mac); 991 rule_entry->mac.inner_mac = 992 elem->cmd_data.vlan_mac.u.mac.is_inner_mac; 993 994 /* MOVE: Add a rule that will add this MAC to the target Queue */ 995 if (cmd == ECORE_VLAN_MAC_MOVE) { 996 rule_entry++; 997 rule_cnt++; 998 999 /* Setup ramrod data */ 1000 ecore_vlan_mac_set_cmd_hdr_e2(sc, 1001 elem->cmd_data.vlan_mac.target_obj, 1002 TRUE, CLASSIFY_RULE_OPCODE_MAC, 1003 &rule_entry->mac.header); 1004 1005 /* Set a MAC itself */ 1006 ecore_set_fw_mac_addr(&rule_entry->mac.mac_msb, 1007 &rule_entry->mac.mac_mid, 1008 &rule_entry->mac.mac_lsb, mac); 1009 rule_entry->mac.inner_mac = 1010 elem->cmd_data.vlan_mac.u.mac.is_inner_mac; 1011 } 1012 1013 /* Set the ramrod data header */ 1014 /* TODO: take this to the higher level in order to prevent multiple 1015 writing */ 1016 ecore_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header, 1017 rule_cnt); 1018 } 1019 1020 /** 1021 * ecore_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod 1022 * 1023 * @sc: device handle 1024 * @o: queue 1025 * @type: 1026 * @cam_offset: offset in cam memory 1027 * @hdr: pointer to a header to setup 1028 * 1029 * E1/E1H 1030 */ 1031 static inline void ecore_vlan_mac_set_rdata_hdr_e1x(struct bxe_softc *sc, 1032 struct ecore_vlan_mac_obj *o, int type, int cam_offset, 1033 struct mac_configuration_hdr *hdr) 1034 { 1035 struct ecore_raw_obj *r = &o->raw; 1036 1037 hdr->length = 1; 1038 hdr->offset = (uint8_t)cam_offset; 1039 hdr->client_id = ECORE_CPU_TO_LE16(0xff); 1040 hdr->echo = ECORE_CPU_TO_LE32((r->cid & ECORE_SWCID_MASK) | 1041 (type << ECORE_SWCID_SHIFT)); 1042 } 1043 1044 static inline void ecore_vlan_mac_set_cfg_entry_e1x(struct bxe_softc *sc, 1045 struct ecore_vlan_mac_obj *o, bool add, int opcode, uint8_t *mac, 1046 uint16_t vlan_id, struct mac_configuration_entry *cfg_entry) 1047 { 1048 struct ecore_raw_obj *r = &o->raw; 1049 uint32_t cl_bit_vec = (1 << r->cl_id); 1050 1051 cfg_entry->clients_bit_vector = ECORE_CPU_TO_LE32(cl_bit_vec); 1052 cfg_entry->pf_id = r->func_id; 1053 cfg_entry->vlan_id = ECORE_CPU_TO_LE16(vlan_id); 1054 1055 if (add) { 1056 ECORE_SET_FLAG(cfg_entry->flags, 1057 MAC_CONFIGURATION_ENTRY_ACTION_TYPE, 1058 T_ETH_MAC_COMMAND_SET); 1059 ECORE_SET_FLAG(cfg_entry->flags, 1060 MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE, 1061 opcode); 1062 1063 /* Set a MAC in a ramrod data */ 1064 ecore_set_fw_mac_addr(&cfg_entry->msb_mac_addr, 1065 &cfg_entry->middle_mac_addr, 1066 &cfg_entry->lsb_mac_addr, mac); 1067 } else 1068 ECORE_SET_FLAG(cfg_entry->flags, 1069 MAC_CONFIGURATION_ENTRY_ACTION_TYPE, 1070 T_ETH_MAC_COMMAND_INVALIDATE); 1071 } 1072 1073 static inline void ecore_vlan_mac_set_rdata_e1x(struct bxe_softc *sc, 1074 struct ecore_vlan_mac_obj *o, int type, int cam_offset, bool add, 1075 uint8_t *mac, uint16_t vlan_id, int opcode, struct mac_configuration_cmd *config) 1076 { 1077 struct mac_configuration_entry *cfg_entry = &config->config_table[0]; 1078 struct ecore_raw_obj *raw = &o->raw; 1079 1080 ecore_vlan_mac_set_rdata_hdr_e1x(sc, o, type, cam_offset, 1081 &config->hdr); 1082 ecore_vlan_mac_set_cfg_entry_e1x(sc, o, add, opcode, mac, vlan_id, 1083 cfg_entry); 1084 1085 ECORE_MSG(sc, "%s MAC %02x:%02x:%02x:%02x:%02x:%02x CLID %d CAM offset %d\n", 1086 (add ? "setting" : "clearing"), 1087 mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], raw->cl_id, cam_offset); 1088 } 1089 1090 /** 1091 * ecore_set_one_mac_e1x - fill a single MAC rule ramrod data 1092 * 1093 * @sc: device handle 1094 * @o: ecore_vlan_mac_obj 1095 * @elem: ecore_exeq_elem 1096 * @rule_idx: rule_idx 1097 * @cam_offset: cam_offset 1098 */ 1099 static void ecore_set_one_mac_e1x(struct bxe_softc *sc, 1100 struct ecore_vlan_mac_obj *o, 1101 struct ecore_exeq_elem *elem, int rule_idx, 1102 int cam_offset) 1103 { 1104 struct ecore_raw_obj *raw = &o->raw; 1105 struct mac_configuration_cmd *config = 1106 (struct mac_configuration_cmd *)(raw->rdata); 1107 /* 57710 and 57711 do not support MOVE command, 1108 * so it's either ADD or DEL 1109 */ 1110 bool add = (elem->cmd_data.vlan_mac.cmd == ECORE_VLAN_MAC_ADD) ? 1111 TRUE : FALSE; 1112 1113 /* Reset the ramrod data buffer */ 1114 ECORE_MEMSET(config, 0, sizeof(*config)); 1115 1116 ecore_vlan_mac_set_rdata_e1x(sc, o, raw->state, 1117 cam_offset, add, 1118 elem->cmd_data.vlan_mac.u.mac.mac, 0, 1119 ETH_VLAN_FILTER_ANY_VLAN, config); 1120 } 1121 1122 static void ecore_set_one_vlan_e2(struct bxe_softc *sc, 1123 struct ecore_vlan_mac_obj *o, 1124 struct ecore_exeq_elem *elem, int rule_idx, 1125 int cam_offset) 1126 { 1127 struct ecore_raw_obj *raw = &o->raw; 1128 struct eth_classify_rules_ramrod_data *data = 1129 (struct eth_classify_rules_ramrod_data *)(raw->rdata); 1130 int rule_cnt = rule_idx + 1; 1131 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx]; 1132 enum ecore_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd; 1133 bool add = (cmd == ECORE_VLAN_MAC_ADD) ? TRUE : FALSE; 1134 uint16_t vlan = elem->cmd_data.vlan_mac.u.vlan.vlan; 1135 1136 /* Reset the ramrod data buffer for the first rule */ 1137 if (rule_idx == 0) 1138 ECORE_MEMSET(data, 0, sizeof(*data)); 1139 1140 /* Set a rule header */ 1141 ecore_vlan_mac_set_cmd_hdr_e2(sc, o, add, CLASSIFY_RULE_OPCODE_VLAN, 1142 &rule_entry->vlan.header); 1143 1144 ECORE_MSG(sc, "About to %s VLAN %d\n", (add ? "add" : "delete"), 1145 vlan); 1146 1147 /* Set a VLAN itself */ 1148 rule_entry->vlan.vlan = ECORE_CPU_TO_LE16(vlan); 1149 1150 /* MOVE: Add a rule that will add this MAC to the target Queue */ 1151 if (cmd == ECORE_VLAN_MAC_MOVE) { 1152 rule_entry++; 1153 rule_cnt++; 1154 1155 /* Setup ramrod data */ 1156 ecore_vlan_mac_set_cmd_hdr_e2(sc, 1157 elem->cmd_data.vlan_mac.target_obj, 1158 TRUE, CLASSIFY_RULE_OPCODE_VLAN, 1159 &rule_entry->vlan.header); 1160 1161 /* Set a VLAN itself */ 1162 rule_entry->vlan.vlan = ECORE_CPU_TO_LE16(vlan); 1163 } 1164 1165 /* Set the ramrod data header */ 1166 /* TODO: take this to the higher level in order to prevent multiple 1167 writing */ 1168 ecore_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header, 1169 rule_cnt); 1170 } 1171 1172 static void ecore_set_one_vlan_mac_e2(struct bxe_softc *sc, 1173 struct ecore_vlan_mac_obj *o, 1174 struct ecore_exeq_elem *elem, 1175 int rule_idx, int cam_offset) 1176 { 1177 struct ecore_raw_obj *raw = &o->raw; 1178 struct eth_classify_rules_ramrod_data *data = 1179 (struct eth_classify_rules_ramrod_data *)(raw->rdata); 1180 int rule_cnt = rule_idx + 1; 1181 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx]; 1182 enum ecore_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd; 1183 bool add = (cmd == ECORE_VLAN_MAC_ADD) ? TRUE : FALSE; 1184 uint16_t vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan; 1185 uint8_t *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac; 1186 1187 /* Reset the ramrod data buffer for the first rule */ 1188 if (rule_idx == 0) 1189 ECORE_MEMSET(data, 0, sizeof(*data)); 1190 1191 /* Set a rule header */ 1192 ecore_vlan_mac_set_cmd_hdr_e2(sc, o, add, CLASSIFY_RULE_OPCODE_PAIR, 1193 &rule_entry->pair.header); 1194 1195 /* Set VLAN and MAC themselves */ 1196 rule_entry->pair.vlan = ECORE_CPU_TO_LE16(vlan); 1197 ecore_set_fw_mac_addr(&rule_entry->pair.mac_msb, 1198 &rule_entry->pair.mac_mid, 1199 &rule_entry->pair.mac_lsb, mac); 1200 rule_entry->pair.inner_mac = 1201 elem->cmd_data.vlan_mac.u.vlan_mac.is_inner_mac; 1202 /* MOVE: Add a rule that will add this MAC to the target Queue */ 1203 if (cmd == ECORE_VLAN_MAC_MOVE) { 1204 rule_entry++; 1205 rule_cnt++; 1206 1207 /* Setup ramrod data */ 1208 ecore_vlan_mac_set_cmd_hdr_e2(sc, 1209 elem->cmd_data.vlan_mac.target_obj, 1210 TRUE, CLASSIFY_RULE_OPCODE_PAIR, 1211 &rule_entry->pair.header); 1212 1213 /* Set a VLAN itself */ 1214 rule_entry->pair.vlan = ECORE_CPU_TO_LE16(vlan); 1215 ecore_set_fw_mac_addr(&rule_entry->pair.mac_msb, 1216 &rule_entry->pair.mac_mid, 1217 &rule_entry->pair.mac_lsb, mac); 1218 rule_entry->pair.inner_mac = 1219 elem->cmd_data.vlan_mac.u.vlan_mac.is_inner_mac; 1220 } 1221 1222 /* Set the ramrod data header */ 1223 /* TODO: take this to the higher level in order to prevent multiple 1224 writing */ 1225 ecore_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header, 1226 rule_cnt); 1227 } 1228 1229 /** 1230 * ecore_set_one_vlan_mac_e1h - 1231 * 1232 * @sc: device handle 1233 * @o: ecore_vlan_mac_obj 1234 * @elem: ecore_exeq_elem 1235 * @rule_idx: rule_idx 1236 * @cam_offset: cam_offset 1237 */ 1238 static void ecore_set_one_vlan_mac_e1h(struct bxe_softc *sc, 1239 struct ecore_vlan_mac_obj *o, 1240 struct ecore_exeq_elem *elem, 1241 int rule_idx, int cam_offset) 1242 { 1243 struct ecore_raw_obj *raw = &o->raw; 1244 struct mac_configuration_cmd *config = 1245 (struct mac_configuration_cmd *)(raw->rdata); 1246 /* 57710 and 57711 do not support MOVE command, 1247 * so it's either ADD or DEL 1248 */ 1249 bool add = (elem->cmd_data.vlan_mac.cmd == ECORE_VLAN_MAC_ADD) ? 1250 TRUE : FALSE; 1251 1252 /* Reset the ramrod data buffer */ 1253 ECORE_MEMSET(config, 0, sizeof(*config)); 1254 1255 ecore_vlan_mac_set_rdata_e1x(sc, o, ECORE_FILTER_VLAN_MAC_PENDING, 1256 cam_offset, add, 1257 elem->cmd_data.vlan_mac.u.vlan_mac.mac, 1258 elem->cmd_data.vlan_mac.u.vlan_mac.vlan, 1259 ETH_VLAN_FILTER_CLASSIFY, config); 1260 } 1261 1262 #define list_next_entry(pos, member) \ 1263 list_entry((pos)->member.next, typeof(*(pos)), member) 1264 1265 /** 1266 * ecore_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element 1267 * 1268 * @sc: device handle 1269 * @p: command parameters 1270 * @ppos: pointer to the cookie 1271 * 1272 * reconfigure next MAC/VLAN/VLAN-MAC element from the 1273 * previously configured elements list. 1274 * 1275 * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is taken 1276 * into an account 1277 * 1278 * pointer to the cookie - that should be given back in the next call to make 1279 * function handle the next element. If *ppos is set to NULL it will restart the 1280 * iterator. If returned *ppos == NULL this means that the last element has been 1281 * handled. 1282 * 1283 */ 1284 static int ecore_vlan_mac_restore(struct bxe_softc *sc, 1285 struct ecore_vlan_mac_ramrod_params *p, 1286 struct ecore_vlan_mac_registry_elem **ppos) 1287 { 1288 struct ecore_vlan_mac_registry_elem *pos; 1289 struct ecore_vlan_mac_obj *o = p->vlan_mac_obj; 1290 1291 /* If list is empty - there is nothing to do here */ 1292 if (ECORE_LIST_IS_EMPTY(&o->head)) { 1293 *ppos = NULL; 1294 return 0; 1295 } 1296 1297 /* make a step... */ 1298 if (*ppos == NULL) 1299 *ppos = ECORE_LIST_FIRST_ENTRY(&o->head, 1300 struct ecore_vlan_mac_registry_elem, 1301 link); 1302 else 1303 *ppos = ECORE_LIST_NEXT(*ppos, link, 1304 struct ecore_vlan_mac_registry_elem); 1305 1306 pos = *ppos; 1307 1308 /* If it's the last step - return NULL */ 1309 if (ECORE_LIST_IS_LAST(&pos->link, &o->head)) 1310 *ppos = NULL; 1311 1312 /* Prepare a 'user_req' */ 1313 ECORE_MEMCPY(&p->user_req.u, &pos->u, sizeof(pos->u)); 1314 1315 /* Set the command */ 1316 p->user_req.cmd = ECORE_VLAN_MAC_ADD; 1317 1318 /* Set vlan_mac_flags */ 1319 p->user_req.vlan_mac_flags = pos->vlan_mac_flags; 1320 1321 /* Set a restore bit */ 1322 ECORE_SET_BIT_NA(RAMROD_RESTORE, &p->ramrod_flags); 1323 1324 return ecore_config_vlan_mac(sc, p); 1325 } 1326 1327 /* ecore_exeq_get_mac/ecore_exeq_get_vlan/ecore_exeq_get_vlan_mac return a 1328 * pointer to an element with a specific criteria and NULL if such an element 1329 * hasn't been found. 1330 */ 1331 static struct ecore_exeq_elem *ecore_exeq_get_mac( 1332 struct ecore_exe_queue_obj *o, 1333 struct ecore_exeq_elem *elem) 1334 { 1335 struct ecore_exeq_elem *pos; 1336 struct ecore_mac_ramrod_data *data = &elem->cmd_data.vlan_mac.u.mac; 1337 1338 /* Check pending for execution commands */ 1339 ECORE_LIST_FOR_EACH_ENTRY(pos, &o->exe_queue, link, 1340 struct ecore_exeq_elem) 1341 if (!ECORE_MEMCMP(&pos->cmd_data.vlan_mac.u.mac, data, 1342 sizeof(*data)) && 1343 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd)) 1344 return pos; 1345 1346 return NULL; 1347 } 1348 1349 static struct ecore_exeq_elem *ecore_exeq_get_vlan( 1350 struct ecore_exe_queue_obj *o, 1351 struct ecore_exeq_elem *elem) 1352 { 1353 struct ecore_exeq_elem *pos; 1354 struct ecore_vlan_ramrod_data *data = &elem->cmd_data.vlan_mac.u.vlan; 1355 1356 /* Check pending for execution commands */ 1357 ECORE_LIST_FOR_EACH_ENTRY(pos, &o->exe_queue, link, 1358 struct ecore_exeq_elem) 1359 if (!ECORE_MEMCMP(&pos->cmd_data.vlan_mac.u.vlan, data, 1360 sizeof(*data)) && 1361 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd)) 1362 return pos; 1363 1364 return NULL; 1365 } 1366 1367 static struct ecore_exeq_elem *ecore_exeq_get_vlan_mac( 1368 struct ecore_exe_queue_obj *o, 1369 struct ecore_exeq_elem *elem) 1370 { 1371 struct ecore_exeq_elem *pos; 1372 struct ecore_vlan_mac_ramrod_data *data = 1373 &elem->cmd_data.vlan_mac.u.vlan_mac; 1374 1375 /* Check pending for execution commands */ 1376 ECORE_LIST_FOR_EACH_ENTRY(pos, &o->exe_queue, link, 1377 struct ecore_exeq_elem) 1378 if (!ECORE_MEMCMP(&pos->cmd_data.vlan_mac.u.vlan_mac, data, 1379 sizeof(*data)) && 1380 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd)) 1381 return pos; 1382 1383 return NULL; 1384 } 1385 1386 /** 1387 * ecore_validate_vlan_mac_add - check if an ADD command can be executed 1388 * 1389 * @sc: device handle 1390 * @qo: ecore_qable_obj 1391 * @elem: ecore_exeq_elem 1392 * 1393 * Checks that the requested configuration can be added. If yes and if 1394 * requested, consume CAM credit. 1395 * 1396 * The 'validate' is run after the 'optimize'. 1397 * 1398 */ 1399 static inline int ecore_validate_vlan_mac_add(struct bxe_softc *sc, 1400 union ecore_qable_obj *qo, 1401 struct ecore_exeq_elem *elem) 1402 { 1403 struct ecore_vlan_mac_obj *o = &qo->vlan_mac; 1404 struct ecore_exe_queue_obj *exeq = &o->exe_queue; 1405 int rc; 1406 1407 /* Check the registry */ 1408 rc = o->check_add(sc, o, &elem->cmd_data.vlan_mac.u); 1409 if (rc) { 1410 ECORE_MSG(sc, "ADD command is not allowed considering current registry state.\n"); 1411 return rc; 1412 } 1413 1414 /* Check if there is a pending ADD command for this 1415 * MAC/VLAN/VLAN-MAC. Return an error if there is. 1416 */ 1417 if (exeq->get(exeq, elem)) { 1418 ECORE_MSG(sc, "There is a pending ADD command already\n"); 1419 return ECORE_EXISTS; 1420 } 1421 1422 /* TODO: Check the pending MOVE from other objects where this 1423 * object is a destination object. 1424 */ 1425 1426 /* Consume the credit if not requested not to */ 1427 if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT, 1428 &elem->cmd_data.vlan_mac.vlan_mac_flags) || 1429 o->get_credit(o))) 1430 return ECORE_INVAL; 1431 1432 return ECORE_SUCCESS; 1433 } 1434 1435 /** 1436 * ecore_validate_vlan_mac_del - check if the DEL command can be executed 1437 * 1438 * @sc: device handle 1439 * @qo: quable object to check 1440 * @elem: element that needs to be deleted 1441 * 1442 * Checks that the requested configuration can be deleted. If yes and if 1443 * requested, returns a CAM credit. 1444 * 1445 * The 'validate' is run after the 'optimize'. 1446 */ 1447 static inline int ecore_validate_vlan_mac_del(struct bxe_softc *sc, 1448 union ecore_qable_obj *qo, 1449 struct ecore_exeq_elem *elem) 1450 { 1451 struct ecore_vlan_mac_obj *o = &qo->vlan_mac; 1452 struct ecore_vlan_mac_registry_elem *pos; 1453 struct ecore_exe_queue_obj *exeq = &o->exe_queue; 1454 struct ecore_exeq_elem query_elem; 1455 1456 /* If this classification can not be deleted (doesn't exist) 1457 * - return a ECORE_EXIST. 1458 */ 1459 pos = o->check_del(sc, o, &elem->cmd_data.vlan_mac.u); 1460 if (!pos) { 1461 ECORE_MSG(sc, "DEL command is not allowed considering current registry state\n"); 1462 return ECORE_EXISTS; 1463 } 1464 1465 /* Check if there are pending DEL or MOVE commands for this 1466 * MAC/VLAN/VLAN-MAC. Return an error if so. 1467 */ 1468 ECORE_MEMCPY(&query_elem, elem, sizeof(query_elem)); 1469 1470 /* Check for MOVE commands */ 1471 query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_MOVE; 1472 if (exeq->get(exeq, &query_elem)) { 1473 ECORE_ERR("There is a pending MOVE command already\n"); 1474 return ECORE_INVAL; 1475 } 1476 1477 /* Check for DEL commands */ 1478 if (exeq->get(exeq, elem)) { 1479 ECORE_MSG(sc, "There is a pending DEL command already\n"); 1480 return ECORE_EXISTS; 1481 } 1482 1483 /* Return the credit to the credit pool if not requested not to */ 1484 if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT, 1485 &elem->cmd_data.vlan_mac.vlan_mac_flags) || 1486 o->put_credit(o))) { 1487 ECORE_ERR("Failed to return a credit\n"); 1488 return ECORE_INVAL; 1489 } 1490 1491 return ECORE_SUCCESS; 1492 } 1493 1494 /** 1495 * ecore_validate_vlan_mac_move - check if the MOVE command can be executed 1496 * 1497 * @sc: device handle 1498 * @qo: quable object to check (source) 1499 * @elem: element that needs to be moved 1500 * 1501 * Checks that the requested configuration can be moved. If yes and if 1502 * requested, returns a CAM credit. 1503 * 1504 * The 'validate' is run after the 'optimize'. 1505 */ 1506 static inline int ecore_validate_vlan_mac_move(struct bxe_softc *sc, 1507 union ecore_qable_obj *qo, 1508 struct ecore_exeq_elem *elem) 1509 { 1510 struct ecore_vlan_mac_obj *src_o = &qo->vlan_mac; 1511 struct ecore_vlan_mac_obj *dest_o = elem->cmd_data.vlan_mac.target_obj; 1512 struct ecore_exeq_elem query_elem; 1513 struct ecore_exe_queue_obj *src_exeq = &src_o->exe_queue; 1514 struct ecore_exe_queue_obj *dest_exeq = &dest_o->exe_queue; 1515 1516 /* Check if we can perform this operation based on the current registry 1517 * state. 1518 */ 1519 if (!src_o->check_move(sc, src_o, dest_o, 1520 &elem->cmd_data.vlan_mac.u)) { 1521 ECORE_MSG(sc, "MOVE command is not allowed considering current registry state\n"); 1522 return ECORE_INVAL; 1523 } 1524 1525 /* Check if there is an already pending DEL or MOVE command for the 1526 * source object or ADD command for a destination object. Return an 1527 * error if so. 1528 */ 1529 ECORE_MEMCPY(&query_elem, elem, sizeof(query_elem)); 1530 1531 /* Check DEL on source */ 1532 query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_DEL; 1533 if (src_exeq->get(src_exeq, &query_elem)) { 1534 ECORE_ERR("There is a pending DEL command on the source queue already\n"); 1535 return ECORE_INVAL; 1536 } 1537 1538 /* Check MOVE on source */ 1539 if (src_exeq->get(src_exeq, elem)) { 1540 ECORE_MSG(sc, "There is a pending MOVE command already\n"); 1541 return ECORE_EXISTS; 1542 } 1543 1544 /* Check ADD on destination */ 1545 query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_ADD; 1546 if (dest_exeq->get(dest_exeq, &query_elem)) { 1547 ECORE_ERR("There is a pending ADD command on the destination queue already\n"); 1548 return ECORE_INVAL; 1549 } 1550 1551 /* Consume the credit if not requested not to */ 1552 if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT_DEST, 1553 &elem->cmd_data.vlan_mac.vlan_mac_flags) || 1554 dest_o->get_credit(dest_o))) 1555 return ECORE_INVAL; 1556 1557 if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT, 1558 &elem->cmd_data.vlan_mac.vlan_mac_flags) || 1559 src_o->put_credit(src_o))) { 1560 /* return the credit taken from dest... */ 1561 dest_o->put_credit(dest_o); 1562 return ECORE_INVAL; 1563 } 1564 1565 return ECORE_SUCCESS; 1566 } 1567 1568 static int ecore_validate_vlan_mac(struct bxe_softc *sc, 1569 union ecore_qable_obj *qo, 1570 struct ecore_exeq_elem *elem) 1571 { 1572 switch (elem->cmd_data.vlan_mac.cmd) { 1573 case ECORE_VLAN_MAC_ADD: 1574 return ecore_validate_vlan_mac_add(sc, qo, elem); 1575 case ECORE_VLAN_MAC_DEL: 1576 return ecore_validate_vlan_mac_del(sc, qo, elem); 1577 case ECORE_VLAN_MAC_MOVE: 1578 return ecore_validate_vlan_mac_move(sc, qo, elem); 1579 default: 1580 return ECORE_INVAL; 1581 } 1582 } 1583 1584 static int ecore_remove_vlan_mac(struct bxe_softc *sc, 1585 union ecore_qable_obj *qo, 1586 struct ecore_exeq_elem *elem) 1587 { 1588 int rc = 0; 1589 1590 /* If consumption wasn't required, nothing to do */ 1591 if (ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT, 1592 &elem->cmd_data.vlan_mac.vlan_mac_flags)) 1593 return ECORE_SUCCESS; 1594 1595 switch (elem->cmd_data.vlan_mac.cmd) { 1596 case ECORE_VLAN_MAC_ADD: 1597 case ECORE_VLAN_MAC_MOVE: 1598 rc = qo->vlan_mac.put_credit(&qo->vlan_mac); 1599 break; 1600 case ECORE_VLAN_MAC_DEL: 1601 rc = qo->vlan_mac.get_credit(&qo->vlan_mac); 1602 break; 1603 default: 1604 return ECORE_INVAL; 1605 } 1606 1607 if (rc != TRUE) 1608 return ECORE_INVAL; 1609 1610 return ECORE_SUCCESS; 1611 } 1612 1613 /** 1614 * ecore_wait_vlan_mac - passively wait for 5 seconds until all work completes. 1615 * 1616 * @sc: device handle 1617 * @o: ecore_vlan_mac_obj 1618 * 1619 */ 1620 static int ecore_wait_vlan_mac(struct bxe_softc *sc, 1621 struct ecore_vlan_mac_obj *o) 1622 { 1623 int cnt = 5000, rc; 1624 struct ecore_exe_queue_obj *exeq = &o->exe_queue; 1625 struct ecore_raw_obj *raw = &o->raw; 1626 1627 while (cnt--) { 1628 /* Wait for the current command to complete */ 1629 rc = raw->wait_comp(sc, raw); 1630 if (rc) 1631 return rc; 1632 1633 /* Wait until there are no pending commands */ 1634 if (!ecore_exe_queue_empty(exeq)) 1635 ECORE_WAIT(sc, 1000); 1636 else 1637 return ECORE_SUCCESS; 1638 } 1639 1640 return ECORE_TIMEOUT; 1641 } 1642 1643 static int __ecore_vlan_mac_execute_step(struct bxe_softc *sc, 1644 struct ecore_vlan_mac_obj *o, 1645 unsigned long *ramrod_flags) 1646 { 1647 int rc = ECORE_SUCCESS; 1648 1649 ECORE_SPIN_LOCK_BH(&o->exe_queue.lock); 1650 1651 ECORE_MSG(sc, "vlan_mac_execute_step - trying to take writer lock\n"); 1652 rc = __ecore_vlan_mac_h_write_trylock(sc, o); 1653 1654 if (rc != ECORE_SUCCESS) { 1655 __ecore_vlan_mac_h_pend(sc, o, *ramrod_flags); 1656 1657 /** Calling function should not diffrentiate between this case 1658 * and the case in which there is already a pending ramrod 1659 */ 1660 rc = ECORE_PENDING; 1661 } else { 1662 rc = ecore_exe_queue_step(sc, &o->exe_queue, ramrod_flags); 1663 } 1664 ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock); 1665 1666 return rc; 1667 } 1668 1669 /** 1670 * ecore_complete_vlan_mac - complete one VLAN-MAC ramrod 1671 * 1672 * @sc: device handle 1673 * @o: ecore_vlan_mac_obj 1674 * @cqe: 1675 * @cont: if TRUE schedule next execution chunk 1676 * 1677 */ 1678 static int ecore_complete_vlan_mac(struct bxe_softc *sc, 1679 struct ecore_vlan_mac_obj *o, 1680 union event_ring_elem *cqe, 1681 unsigned long *ramrod_flags) 1682 { 1683 struct ecore_raw_obj *r = &o->raw; 1684 int rc; 1685 1686 /* Clearing the pending list & raw state should be made 1687 * atomically (as execution flow assumes they represent the same) 1688 */ 1689 ECORE_SPIN_LOCK_BH(&o->exe_queue.lock); 1690 1691 /* Reset pending list */ 1692 __ecore_exe_queue_reset_pending(sc, &o->exe_queue); 1693 1694 /* Clear pending */ 1695 r->clear_pending(r); 1696 1697 ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock); 1698 1699 /* If ramrod failed this is most likely a SW bug */ 1700 if (cqe->message.error) 1701 return ECORE_INVAL; 1702 1703 /* Run the next bulk of pending commands if requested */ 1704 if (ECORE_TEST_BIT(RAMROD_CONT, ramrod_flags)) { 1705 rc = __ecore_vlan_mac_execute_step(sc, o, ramrod_flags); 1706 if (rc < 0) 1707 return rc; 1708 } 1709 1710 /* If there is more work to do return PENDING */ 1711 if (!ecore_exe_queue_empty(&o->exe_queue)) 1712 return ECORE_PENDING; 1713 1714 return ECORE_SUCCESS; 1715 } 1716 1717 /** 1718 * ecore_optimize_vlan_mac - optimize ADD and DEL commands. 1719 * 1720 * @sc: device handle 1721 * @o: ecore_qable_obj 1722 * @elem: ecore_exeq_elem 1723 */ 1724 static int ecore_optimize_vlan_mac(struct bxe_softc *sc, 1725 union ecore_qable_obj *qo, 1726 struct ecore_exeq_elem *elem) 1727 { 1728 struct ecore_exeq_elem query, *pos; 1729 struct ecore_vlan_mac_obj *o = &qo->vlan_mac; 1730 struct ecore_exe_queue_obj *exeq = &o->exe_queue; 1731 1732 ECORE_MEMCPY(&query, elem, sizeof(query)); 1733 1734 switch (elem->cmd_data.vlan_mac.cmd) { 1735 case ECORE_VLAN_MAC_ADD: 1736 query.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_DEL; 1737 break; 1738 case ECORE_VLAN_MAC_DEL: 1739 query.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_ADD; 1740 break; 1741 default: 1742 /* Don't handle anything other than ADD or DEL */ 1743 return 0; 1744 } 1745 1746 /* If we found the appropriate element - delete it */ 1747 pos = exeq->get(exeq, &query); 1748 if (pos) { 1749 1750 /* Return the credit of the optimized command */ 1751 if (!ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT, 1752 &pos->cmd_data.vlan_mac.vlan_mac_flags)) { 1753 if ((query.cmd_data.vlan_mac.cmd == 1754 ECORE_VLAN_MAC_ADD) && !o->put_credit(o)) { 1755 ECORE_ERR("Failed to return the credit for the optimized ADD command\n"); 1756 return ECORE_INVAL; 1757 } else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */ 1758 ECORE_ERR("Failed to recover the credit from the optimized DEL command\n"); 1759 return ECORE_INVAL; 1760 } 1761 } 1762 1763 ECORE_MSG(sc, "Optimizing %s command\n", 1764 (elem->cmd_data.vlan_mac.cmd == ECORE_VLAN_MAC_ADD) ? 1765 "ADD" : "DEL"); 1766 1767 ECORE_LIST_REMOVE_ENTRY(&pos->link, &exeq->exe_queue); 1768 ecore_exe_queue_free_elem(sc, pos); 1769 return 1; 1770 } 1771 1772 return 0; 1773 } 1774 1775 /** 1776 * ecore_vlan_mac_get_registry_elem - prepare a registry element 1777 * 1778 * @sc: device handle 1779 * @o: 1780 * @elem: 1781 * @restore: 1782 * @re: 1783 * 1784 * prepare a registry element according to the current command request. 1785 */ 1786 static inline int ecore_vlan_mac_get_registry_elem( 1787 struct bxe_softc *sc, 1788 struct ecore_vlan_mac_obj *o, 1789 struct ecore_exeq_elem *elem, 1790 bool restore, 1791 struct ecore_vlan_mac_registry_elem **re) 1792 { 1793 enum ecore_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd; 1794 struct ecore_vlan_mac_registry_elem *reg_elem; 1795 1796 /* Allocate a new registry element if needed. */ 1797 if (!restore && 1798 ((cmd == ECORE_VLAN_MAC_ADD) || (cmd == ECORE_VLAN_MAC_MOVE))) { 1799 reg_elem = ECORE_ZALLOC(sizeof(*reg_elem), GFP_ATOMIC, sc); 1800 if (!reg_elem) 1801 return ECORE_NOMEM; 1802 1803 /* Get a new CAM offset */ 1804 if (!o->get_cam_offset(o, ®_elem->cam_offset)) { 1805 /* This shall never happen, because we have checked the 1806 * CAM availability in the 'validate'. 1807 */ 1808 ECORE_DBG_BREAK_IF(1); 1809 ECORE_FREE(sc, reg_elem, sizeof(*reg_elem)); 1810 return ECORE_INVAL; 1811 } 1812 1813 ECORE_MSG(sc, "Got cam offset %d\n", reg_elem->cam_offset); 1814 1815 /* Set a VLAN-MAC data */ 1816 ECORE_MEMCPY(®_elem->u, &elem->cmd_data.vlan_mac.u, 1817 sizeof(reg_elem->u)); 1818 1819 /* Copy the flags (needed for DEL and RESTORE flows) */ 1820 reg_elem->vlan_mac_flags = 1821 elem->cmd_data.vlan_mac.vlan_mac_flags; 1822 } else /* DEL, RESTORE */ 1823 reg_elem = o->check_del(sc, o, &elem->cmd_data.vlan_mac.u); 1824 1825 *re = reg_elem; 1826 return ECORE_SUCCESS; 1827 } 1828 1829 /** 1830 * ecore_execute_vlan_mac - execute vlan mac command 1831 * 1832 * @sc: device handle 1833 * @qo: 1834 * @exe_chunk: 1835 * @ramrod_flags: 1836 * 1837 * go and send a ramrod! 1838 */ 1839 static int ecore_execute_vlan_mac(struct bxe_softc *sc, 1840 union ecore_qable_obj *qo, 1841 ecore_list_t *exe_chunk, 1842 unsigned long *ramrod_flags) 1843 { 1844 struct ecore_exeq_elem *elem; 1845 struct ecore_vlan_mac_obj *o = &qo->vlan_mac, *cam_obj; 1846 struct ecore_raw_obj *r = &o->raw; 1847 int rc, idx = 0; 1848 bool restore = ECORE_TEST_BIT(RAMROD_RESTORE, ramrod_flags); 1849 bool drv_only = ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags); 1850 struct ecore_vlan_mac_registry_elem *reg_elem; 1851 enum ecore_vlan_mac_cmd cmd; 1852 1853 /* If DRIVER_ONLY execution is requested, cleanup a registry 1854 * and exit. Otherwise send a ramrod to FW. 1855 */ 1856 if (!drv_only) { 1857 ECORE_DBG_BREAK_IF(r->check_pending(r)); 1858 1859 /* Set pending */ 1860 r->set_pending(r); 1861 1862 /* Fill the ramrod data */ 1863 ECORE_LIST_FOR_EACH_ENTRY(elem, exe_chunk, link, 1864 struct ecore_exeq_elem) { 1865 cmd = elem->cmd_data.vlan_mac.cmd; 1866 /* We will add to the target object in MOVE command, so 1867 * change the object for a CAM search. 1868 */ 1869 if (cmd == ECORE_VLAN_MAC_MOVE) 1870 cam_obj = elem->cmd_data.vlan_mac.target_obj; 1871 else 1872 cam_obj = o; 1873 1874 rc = ecore_vlan_mac_get_registry_elem(sc, cam_obj, 1875 elem, restore, 1876 ®_elem); 1877 if (rc) 1878 goto error_exit; 1879 1880 ECORE_DBG_BREAK_IF(!reg_elem); 1881 1882 /* Push a new entry into the registry */ 1883 if (!restore && 1884 ((cmd == ECORE_VLAN_MAC_ADD) || 1885 (cmd == ECORE_VLAN_MAC_MOVE))) 1886 ECORE_LIST_PUSH_HEAD(®_elem->link, 1887 &cam_obj->head); 1888 1889 /* Configure a single command in a ramrod data buffer */ 1890 o->set_one_rule(sc, o, elem, idx, 1891 reg_elem->cam_offset); 1892 1893 /* MOVE command consumes 2 entries in the ramrod data */ 1894 if (cmd == ECORE_VLAN_MAC_MOVE) 1895 idx += 2; 1896 else 1897 idx++; 1898 } 1899 1900 /* 1901 * No need for an explicit memory barrier here as long we would 1902 * need to ensure the ordering of writing to the SPQ element 1903 * and updating of the SPQ producer which involves a memory 1904 * read and we will have to put a full memory barrier there 1905 * (inside ecore_sp_post()). 1906 */ 1907 1908 rc = ecore_sp_post(sc, o->ramrod_cmd, r->cid, 1909 r->rdata_mapping, 1910 ETH_CONNECTION_TYPE); 1911 if (rc) 1912 goto error_exit; 1913 } 1914 1915 /* Now, when we are done with the ramrod - clean up the registry */ 1916 ECORE_LIST_FOR_EACH_ENTRY(elem, exe_chunk, link, 1917 struct ecore_exeq_elem) { 1918 cmd = elem->cmd_data.vlan_mac.cmd; 1919 if ((cmd == ECORE_VLAN_MAC_DEL) || 1920 (cmd == ECORE_VLAN_MAC_MOVE)) { 1921 reg_elem = o->check_del(sc, o, 1922 &elem->cmd_data.vlan_mac.u); 1923 1924 ECORE_DBG_BREAK_IF(!reg_elem); 1925 1926 o->put_cam_offset(o, reg_elem->cam_offset); 1927 ECORE_LIST_REMOVE_ENTRY(®_elem->link, &o->head); 1928 ECORE_FREE(sc, reg_elem, sizeof(*reg_elem)); 1929 } 1930 } 1931 1932 if (!drv_only) 1933 return ECORE_PENDING; 1934 else 1935 return ECORE_SUCCESS; 1936 1937 error_exit: 1938 r->clear_pending(r); 1939 1940 /* Cleanup a registry in case of a failure */ 1941 ECORE_LIST_FOR_EACH_ENTRY(elem, exe_chunk, link, 1942 struct ecore_exeq_elem) { 1943 cmd = elem->cmd_data.vlan_mac.cmd; 1944 1945 if (cmd == ECORE_VLAN_MAC_MOVE) 1946 cam_obj = elem->cmd_data.vlan_mac.target_obj; 1947 else 1948 cam_obj = o; 1949 1950 /* Delete all newly added above entries */ 1951 if (!restore && 1952 ((cmd == ECORE_VLAN_MAC_ADD) || 1953 (cmd == ECORE_VLAN_MAC_MOVE))) { 1954 reg_elem = o->check_del(sc, cam_obj, 1955 &elem->cmd_data.vlan_mac.u); 1956 if (reg_elem) { 1957 ECORE_LIST_REMOVE_ENTRY(®_elem->link, 1958 &cam_obj->head); 1959 ECORE_FREE(sc, reg_elem, sizeof(*reg_elem)); 1960 } 1961 } 1962 } 1963 1964 return rc; 1965 } 1966 1967 static inline int ecore_vlan_mac_push_new_cmd( 1968 struct bxe_softc *sc, 1969 struct ecore_vlan_mac_ramrod_params *p) 1970 { 1971 struct ecore_exeq_elem *elem; 1972 struct ecore_vlan_mac_obj *o = p->vlan_mac_obj; 1973 bool restore = ECORE_TEST_BIT(RAMROD_RESTORE, &p->ramrod_flags); 1974 1975 /* Allocate the execution queue element */ 1976 elem = ecore_exe_queue_alloc_elem(sc); 1977 if (!elem) 1978 return ECORE_NOMEM; 1979 1980 /* Set the command 'length' */ 1981 switch (p->user_req.cmd) { 1982 case ECORE_VLAN_MAC_MOVE: 1983 elem->cmd_len = 2; 1984 break; 1985 default: 1986 elem->cmd_len = 1; 1987 } 1988 1989 /* Fill the object specific info */ 1990 ECORE_MEMCPY(&elem->cmd_data.vlan_mac, &p->user_req, sizeof(p->user_req)); 1991 1992 /* Try to add a new command to the pending list */ 1993 return ecore_exe_queue_add(sc, &o->exe_queue, elem, restore); 1994 } 1995 1996 /** 1997 * ecore_config_vlan_mac - configure VLAN/MAC/VLAN_MAC filtering rules. 1998 * 1999 * @sc: device handle 2000 * @p: 2001 * 2002 */ 2003 int ecore_config_vlan_mac(struct bxe_softc *sc, 2004 struct ecore_vlan_mac_ramrod_params *p) 2005 { 2006 int rc = ECORE_SUCCESS; 2007 struct ecore_vlan_mac_obj *o = p->vlan_mac_obj; 2008 unsigned long *ramrod_flags = &p->ramrod_flags; 2009 bool cont = ECORE_TEST_BIT(RAMROD_CONT, ramrod_flags); 2010 struct ecore_raw_obj *raw = &o->raw; 2011 2012 /* 2013 * Add new elements to the execution list for commands that require it. 2014 */ 2015 if (!cont) { 2016 rc = ecore_vlan_mac_push_new_cmd(sc, p); 2017 if (rc) 2018 return rc; 2019 } 2020 2021 /* If nothing will be executed further in this iteration we want to 2022 * return PENDING if there are pending commands 2023 */ 2024 if (!ecore_exe_queue_empty(&o->exe_queue)) 2025 rc = ECORE_PENDING; 2026 2027 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags)) { 2028 ECORE_MSG(sc, "RAMROD_DRV_CLR_ONLY requested: clearing a pending bit.\n"); 2029 raw->clear_pending(raw); 2030 } 2031 2032 /* Execute commands if required */ 2033 if (cont || ECORE_TEST_BIT(RAMROD_EXEC, ramrod_flags) || 2034 ECORE_TEST_BIT(RAMROD_COMP_WAIT, ramrod_flags)) { 2035 rc = __ecore_vlan_mac_execute_step(sc, p->vlan_mac_obj, 2036 &p->ramrod_flags); 2037 if (rc < 0) 2038 return rc; 2039 } 2040 2041 /* RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set 2042 * then user want to wait until the last command is done. 2043 */ 2044 if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags)) { 2045 /* Wait maximum for the current exe_queue length iterations plus 2046 * one (for the current pending command). 2047 */ 2048 int max_iterations = ecore_exe_queue_length(&o->exe_queue) + 1; 2049 2050 while (!ecore_exe_queue_empty(&o->exe_queue) && 2051 max_iterations--) { 2052 2053 /* Wait for the current command to complete */ 2054 rc = raw->wait_comp(sc, raw); 2055 if (rc) 2056 return rc; 2057 2058 /* Make a next step */ 2059 rc = __ecore_vlan_mac_execute_step(sc, 2060 p->vlan_mac_obj, 2061 &p->ramrod_flags); 2062 if (rc < 0) 2063 return rc; 2064 } 2065 2066 return ECORE_SUCCESS; 2067 } 2068 2069 return rc; 2070 } 2071 2072 /** 2073 * ecore_vlan_mac_del_all - delete elements with given vlan_mac_flags spec 2074 * 2075 * @sc: device handle 2076 * @o: 2077 * @vlan_mac_flags: 2078 * @ramrod_flags: execution flags to be used for this deletion 2079 * 2080 * if the last operation has completed successfully and there are no 2081 * more elements left, positive value if the last operation has completed 2082 * successfully and there are more previously configured elements, negative 2083 * value is current operation has failed. 2084 */ 2085 static int ecore_vlan_mac_del_all(struct bxe_softc *sc, 2086 struct ecore_vlan_mac_obj *o, 2087 unsigned long *vlan_mac_flags, 2088 unsigned long *ramrod_flags) 2089 { 2090 struct ecore_vlan_mac_registry_elem *pos = NULL; 2091 struct ecore_vlan_mac_ramrod_params p; 2092 struct ecore_exe_queue_obj *exeq = &o->exe_queue; 2093 struct ecore_exeq_elem *exeq_pos, *exeq_pos_n; 2094 int read_lock; 2095 int rc = 0; 2096 2097 /* Clear pending commands first */ 2098 2099 ECORE_SPIN_LOCK_BH(&exeq->lock); 2100 2101 ECORE_LIST_FOR_EACH_ENTRY_SAFE(exeq_pos, exeq_pos_n, 2102 &exeq->exe_queue, link, 2103 struct ecore_exeq_elem) { 2104 if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags == 2105 *vlan_mac_flags) { 2106 rc = exeq->remove(sc, exeq->owner, exeq_pos); 2107 if (rc) { 2108 ECORE_ERR("Failed to remove command\n"); 2109 ECORE_SPIN_UNLOCK_BH(&exeq->lock); 2110 return rc; 2111 } 2112 ECORE_LIST_REMOVE_ENTRY(&exeq_pos->link, 2113 &exeq->exe_queue); 2114 ecore_exe_queue_free_elem(sc, exeq_pos); 2115 } 2116 } 2117 2118 ECORE_SPIN_UNLOCK_BH(&exeq->lock); 2119 2120 /* Prepare a command request */ 2121 ECORE_MEMSET(&p, 0, sizeof(p)); 2122 p.vlan_mac_obj = o; 2123 p.ramrod_flags = *ramrod_flags; 2124 p.user_req.cmd = ECORE_VLAN_MAC_DEL; 2125 2126 /* Add all but the last VLAN-MAC to the execution queue without actually 2127 * execution anything. 2128 */ 2129 ECORE_CLEAR_BIT_NA(RAMROD_COMP_WAIT, &p.ramrod_flags); 2130 ECORE_CLEAR_BIT_NA(RAMROD_EXEC, &p.ramrod_flags); 2131 ECORE_CLEAR_BIT_NA(RAMROD_CONT, &p.ramrod_flags); 2132 2133 ECORE_MSG(sc, "vlan_mac_del_all -- taking vlan_mac_lock (reader)\n"); 2134 read_lock = ecore_vlan_mac_h_read_lock(sc, o); 2135 if (read_lock != ECORE_SUCCESS) 2136 return read_lock; 2137 2138 ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link, 2139 struct ecore_vlan_mac_registry_elem) { 2140 if (pos->vlan_mac_flags == *vlan_mac_flags) { 2141 p.user_req.vlan_mac_flags = pos->vlan_mac_flags; 2142 ECORE_MEMCPY(&p.user_req.u, &pos->u, sizeof(pos->u)); 2143 rc = ecore_config_vlan_mac(sc, &p); 2144 if (rc < 0) { 2145 ECORE_ERR("Failed to add a new DEL command\n"); 2146 ecore_vlan_mac_h_read_unlock(sc, o); 2147 return rc; 2148 } 2149 } 2150 } 2151 2152 ECORE_MSG(sc, "vlan_mac_del_all -- releasing vlan_mac_lock (reader)\n"); 2153 ecore_vlan_mac_h_read_unlock(sc, o); 2154 2155 p.ramrod_flags = *ramrod_flags; 2156 ECORE_SET_BIT_NA(RAMROD_CONT, &p.ramrod_flags); 2157 2158 return ecore_config_vlan_mac(sc, &p); 2159 } 2160 2161 static inline void ecore_init_raw_obj(struct ecore_raw_obj *raw, uint8_t cl_id, 2162 uint32_t cid, uint8_t func_id, void *rdata, ecore_dma_addr_t rdata_mapping, int state, 2163 unsigned long *pstate, ecore_obj_type type) 2164 { 2165 raw->func_id = func_id; 2166 raw->cid = cid; 2167 raw->cl_id = cl_id; 2168 raw->rdata = rdata; 2169 raw->rdata_mapping = rdata_mapping; 2170 raw->state = state; 2171 raw->pstate = pstate; 2172 raw->obj_type = type; 2173 raw->check_pending = ecore_raw_check_pending; 2174 raw->clear_pending = ecore_raw_clear_pending; 2175 raw->set_pending = ecore_raw_set_pending; 2176 raw->wait_comp = ecore_raw_wait; 2177 } 2178 2179 static inline void ecore_init_vlan_mac_common(struct ecore_vlan_mac_obj *o, 2180 uint8_t cl_id, uint32_t cid, uint8_t func_id, void *rdata, ecore_dma_addr_t rdata_mapping, 2181 int state, unsigned long *pstate, ecore_obj_type type, 2182 struct ecore_credit_pool_obj *macs_pool, 2183 struct ecore_credit_pool_obj *vlans_pool) 2184 { 2185 ECORE_LIST_INIT(&o->head); 2186 o->head_reader = 0; 2187 o->head_exe_request = FALSE; 2188 o->saved_ramrod_flags = 0; 2189 2190 o->macs_pool = macs_pool; 2191 o->vlans_pool = vlans_pool; 2192 2193 o->delete_all = ecore_vlan_mac_del_all; 2194 o->restore = ecore_vlan_mac_restore; 2195 o->complete = ecore_complete_vlan_mac; 2196 o->wait = ecore_wait_vlan_mac; 2197 2198 ecore_init_raw_obj(&o->raw, cl_id, cid, func_id, rdata, rdata_mapping, 2199 state, pstate, type); 2200 } 2201 2202 void ecore_init_mac_obj(struct bxe_softc *sc, 2203 struct ecore_vlan_mac_obj *mac_obj, 2204 uint8_t cl_id, uint32_t cid, uint8_t func_id, void *rdata, 2205 ecore_dma_addr_t rdata_mapping, int state, 2206 unsigned long *pstate, ecore_obj_type type, 2207 struct ecore_credit_pool_obj *macs_pool) 2208 { 2209 union ecore_qable_obj *qable_obj = (union ecore_qable_obj *)mac_obj; 2210 2211 ecore_init_vlan_mac_common(mac_obj, cl_id, cid, func_id, rdata, 2212 rdata_mapping, state, pstate, type, 2213 macs_pool, NULL); 2214 2215 /* CAM credit pool handling */ 2216 mac_obj->get_credit = ecore_get_credit_mac; 2217 mac_obj->put_credit = ecore_put_credit_mac; 2218 mac_obj->get_cam_offset = ecore_get_cam_offset_mac; 2219 mac_obj->put_cam_offset = ecore_put_cam_offset_mac; 2220 2221 if (CHIP_IS_E1x(sc)) { 2222 mac_obj->set_one_rule = ecore_set_one_mac_e1x; 2223 mac_obj->check_del = ecore_check_mac_del; 2224 mac_obj->check_add = ecore_check_mac_add; 2225 mac_obj->check_move = ecore_check_move_always_err; 2226 mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC; 2227 2228 /* Exe Queue */ 2229 ecore_exe_queue_init(sc, 2230 &mac_obj->exe_queue, 1, qable_obj, 2231 ecore_validate_vlan_mac, 2232 ecore_remove_vlan_mac, 2233 ecore_optimize_vlan_mac, 2234 ecore_execute_vlan_mac, 2235 ecore_exeq_get_mac); 2236 } else { 2237 mac_obj->set_one_rule = ecore_set_one_mac_e2; 2238 mac_obj->check_del = ecore_check_mac_del; 2239 mac_obj->check_add = ecore_check_mac_add; 2240 mac_obj->check_move = ecore_check_move; 2241 mac_obj->ramrod_cmd = 2242 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES; 2243 mac_obj->get_n_elements = ecore_get_n_elements; 2244 2245 /* Exe Queue */ 2246 ecore_exe_queue_init(sc, 2247 &mac_obj->exe_queue, CLASSIFY_RULES_COUNT, 2248 qable_obj, ecore_validate_vlan_mac, 2249 ecore_remove_vlan_mac, 2250 ecore_optimize_vlan_mac, 2251 ecore_execute_vlan_mac, 2252 ecore_exeq_get_mac); 2253 } 2254 } 2255 2256 void ecore_init_vlan_obj(struct bxe_softc *sc, 2257 struct ecore_vlan_mac_obj *vlan_obj, 2258 uint8_t cl_id, uint32_t cid, uint8_t func_id, void *rdata, 2259 ecore_dma_addr_t rdata_mapping, int state, 2260 unsigned long *pstate, ecore_obj_type type, 2261 struct ecore_credit_pool_obj *vlans_pool) 2262 { 2263 union ecore_qable_obj *qable_obj = (union ecore_qable_obj *)vlan_obj; 2264 2265 ecore_init_vlan_mac_common(vlan_obj, cl_id, cid, func_id, rdata, 2266 rdata_mapping, state, pstate, type, NULL, 2267 vlans_pool); 2268 2269 vlan_obj->get_credit = ecore_get_credit_vlan; 2270 vlan_obj->put_credit = ecore_put_credit_vlan; 2271 vlan_obj->get_cam_offset = ecore_get_cam_offset_vlan; 2272 vlan_obj->put_cam_offset = ecore_put_cam_offset_vlan; 2273 2274 if (CHIP_IS_E1x(sc)) { 2275 ECORE_ERR("Do not support chips others than E2 and newer\n"); 2276 ECORE_BUG(); 2277 } else { 2278 vlan_obj->set_one_rule = ecore_set_one_vlan_e2; 2279 vlan_obj->check_del = ecore_check_vlan_del; 2280 vlan_obj->check_add = ecore_check_vlan_add; 2281 vlan_obj->check_move = ecore_check_move; 2282 vlan_obj->ramrod_cmd = 2283 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES; 2284 vlan_obj->get_n_elements = ecore_get_n_elements; 2285 2286 /* Exe Queue */ 2287 ecore_exe_queue_init(sc, 2288 &vlan_obj->exe_queue, CLASSIFY_RULES_COUNT, 2289 qable_obj, ecore_validate_vlan_mac, 2290 ecore_remove_vlan_mac, 2291 ecore_optimize_vlan_mac, 2292 ecore_execute_vlan_mac, 2293 ecore_exeq_get_vlan); 2294 } 2295 } 2296 2297 void ecore_init_vlan_mac_obj(struct bxe_softc *sc, 2298 struct ecore_vlan_mac_obj *vlan_mac_obj, 2299 uint8_t cl_id, uint32_t cid, uint8_t func_id, void *rdata, 2300 ecore_dma_addr_t rdata_mapping, int state, 2301 unsigned long *pstate, ecore_obj_type type, 2302 struct ecore_credit_pool_obj *macs_pool, 2303 struct ecore_credit_pool_obj *vlans_pool) 2304 { 2305 union ecore_qable_obj *qable_obj = 2306 (union ecore_qable_obj *)vlan_mac_obj; 2307 2308 ecore_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id, rdata, 2309 rdata_mapping, state, pstate, type, 2310 macs_pool, vlans_pool); 2311 2312 /* CAM pool handling */ 2313 vlan_mac_obj->get_credit = ecore_get_credit_vlan_mac; 2314 vlan_mac_obj->put_credit = ecore_put_credit_vlan_mac; 2315 /* CAM offset is relevant for 57710 and 57711 chips only which have a 2316 * single CAM for both MACs and VLAN-MAC pairs. So the offset 2317 * will be taken from MACs' pool object only. 2318 */ 2319 vlan_mac_obj->get_cam_offset = ecore_get_cam_offset_mac; 2320 vlan_mac_obj->put_cam_offset = ecore_put_cam_offset_mac; 2321 2322 if (CHIP_IS_E1(sc)) { 2323 ECORE_ERR("Do not support chips others than E2\n"); 2324 ECORE_BUG(); 2325 } else if (CHIP_IS_E1H(sc)) { 2326 vlan_mac_obj->set_one_rule = ecore_set_one_vlan_mac_e1h; 2327 vlan_mac_obj->check_del = ecore_check_vlan_mac_del; 2328 vlan_mac_obj->check_add = ecore_check_vlan_mac_add; 2329 vlan_mac_obj->check_move = ecore_check_move_always_err; 2330 vlan_mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC; 2331 2332 /* Exe Queue */ 2333 ecore_exe_queue_init(sc, 2334 &vlan_mac_obj->exe_queue, 1, qable_obj, 2335 ecore_validate_vlan_mac, 2336 ecore_remove_vlan_mac, 2337 ecore_optimize_vlan_mac, 2338 ecore_execute_vlan_mac, 2339 ecore_exeq_get_vlan_mac); 2340 } else { 2341 vlan_mac_obj->set_one_rule = ecore_set_one_vlan_mac_e2; 2342 vlan_mac_obj->check_del = ecore_check_vlan_mac_del; 2343 vlan_mac_obj->check_add = ecore_check_vlan_mac_add; 2344 vlan_mac_obj->check_move = ecore_check_move; 2345 vlan_mac_obj->ramrod_cmd = 2346 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES; 2347 2348 /* Exe Queue */ 2349 ecore_exe_queue_init(sc, 2350 &vlan_mac_obj->exe_queue, 2351 CLASSIFY_RULES_COUNT, 2352 qable_obj, ecore_validate_vlan_mac, 2353 ecore_remove_vlan_mac, 2354 ecore_optimize_vlan_mac, 2355 ecore_execute_vlan_mac, 2356 ecore_exeq_get_vlan_mac); 2357 } 2358 } 2359 2360 /* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */ 2361 static inline void __storm_memset_mac_filters(struct bxe_softc *sc, 2362 struct tstorm_eth_mac_filter_config *mac_filters, 2363 uint16_t pf_id) 2364 { 2365 size_t size = sizeof(struct tstorm_eth_mac_filter_config); 2366 2367 uint32_t addr = BAR_TSTRORM_INTMEM + 2368 TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id); 2369 2370 ecore_storm_memset_struct(sc, addr, size, (uint32_t *)mac_filters); 2371 } 2372 2373 static int ecore_set_rx_mode_e1x(struct bxe_softc *sc, 2374 struct ecore_rx_mode_ramrod_params *p) 2375 { 2376 /* update the sc MAC filter structure */ 2377 uint32_t mask = (1 << p->cl_id); 2378 2379 struct tstorm_eth_mac_filter_config *mac_filters = 2380 (struct tstorm_eth_mac_filter_config *)p->rdata; 2381 2382 /* initial setting is drop-all */ 2383 uint8_t drop_all_ucast = 1, drop_all_mcast = 1; 2384 uint8_t accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0; 2385 uint8_t unmatched_unicast = 0; 2386 2387 /* In e1x there we only take into account rx accept flag since tx switching 2388 * isn't enabled. */ 2389 if (ECORE_TEST_BIT(ECORE_ACCEPT_UNICAST, &p->rx_accept_flags)) 2390 /* accept matched ucast */ 2391 drop_all_ucast = 0; 2392 2393 if (ECORE_TEST_BIT(ECORE_ACCEPT_MULTICAST, &p->rx_accept_flags)) 2394 /* accept matched mcast */ 2395 drop_all_mcast = 0; 2396 2397 if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_UNICAST, &p->rx_accept_flags)) { 2398 /* accept all mcast */ 2399 drop_all_ucast = 0; 2400 accp_all_ucast = 1; 2401 } 2402 if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_MULTICAST, &p->rx_accept_flags)) { 2403 /* accept all mcast */ 2404 drop_all_mcast = 0; 2405 accp_all_mcast = 1; 2406 } 2407 if (ECORE_TEST_BIT(ECORE_ACCEPT_BROADCAST, &p->rx_accept_flags)) 2408 /* accept (all) bcast */ 2409 accp_all_bcast = 1; 2410 if (ECORE_TEST_BIT(ECORE_ACCEPT_UNMATCHED, &p->rx_accept_flags)) 2411 /* accept unmatched unicasts */ 2412 unmatched_unicast = 1; 2413 2414 mac_filters->ucast_drop_all = drop_all_ucast ? 2415 mac_filters->ucast_drop_all | mask : 2416 mac_filters->ucast_drop_all & ~mask; 2417 2418 mac_filters->mcast_drop_all = drop_all_mcast ? 2419 mac_filters->mcast_drop_all | mask : 2420 mac_filters->mcast_drop_all & ~mask; 2421 2422 mac_filters->ucast_accept_all = accp_all_ucast ? 2423 mac_filters->ucast_accept_all | mask : 2424 mac_filters->ucast_accept_all & ~mask; 2425 2426 mac_filters->mcast_accept_all = accp_all_mcast ? 2427 mac_filters->mcast_accept_all | mask : 2428 mac_filters->mcast_accept_all & ~mask; 2429 2430 mac_filters->bcast_accept_all = accp_all_bcast ? 2431 mac_filters->bcast_accept_all | mask : 2432 mac_filters->bcast_accept_all & ~mask; 2433 2434 mac_filters->unmatched_unicast = unmatched_unicast ? 2435 mac_filters->unmatched_unicast | mask : 2436 mac_filters->unmatched_unicast & ~mask; 2437 2438 ECORE_MSG(sc, "drop_ucast 0x%x\ndrop_mcast 0x%x\n accp_ucast 0x%x\n" 2439 "accp_mcast 0x%x\naccp_bcast 0x%x\n", 2440 mac_filters->ucast_drop_all, mac_filters->mcast_drop_all, 2441 mac_filters->ucast_accept_all, mac_filters->mcast_accept_all, 2442 mac_filters->bcast_accept_all); 2443 2444 /* write the MAC filter structure*/ 2445 __storm_memset_mac_filters(sc, mac_filters, p->func_id); 2446 2447 /* The operation is completed */ 2448 ECORE_CLEAR_BIT(p->state, p->pstate); 2449 ECORE_SMP_MB_AFTER_CLEAR_BIT(); 2450 2451 return ECORE_SUCCESS; 2452 } 2453 2454 /* Setup ramrod data */ 2455 static inline void ecore_rx_mode_set_rdata_hdr_e2(uint32_t cid, 2456 struct eth_classify_header *hdr, 2457 uint8_t rule_cnt) 2458 { 2459 hdr->echo = ECORE_CPU_TO_LE32(cid); 2460 hdr->rule_cnt = rule_cnt; 2461 } 2462 2463 static inline void ecore_rx_mode_set_cmd_state_e2(struct bxe_softc *sc, 2464 unsigned long *accept_flags, 2465 struct eth_filter_rules_cmd *cmd, 2466 bool clear_accept_all) 2467 { 2468 uint16_t state; 2469 2470 /* start with 'drop-all' */ 2471 state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL | 2472 ETH_FILTER_RULES_CMD_MCAST_DROP_ALL; 2473 2474 if (ECORE_TEST_BIT(ECORE_ACCEPT_UNICAST, accept_flags)) 2475 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL; 2476 2477 if (ECORE_TEST_BIT(ECORE_ACCEPT_MULTICAST, accept_flags)) 2478 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL; 2479 2480 if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_UNICAST, accept_flags)) { 2481 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL; 2482 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL; 2483 } 2484 2485 if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_MULTICAST, accept_flags)) { 2486 state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL; 2487 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL; 2488 } 2489 if (ECORE_TEST_BIT(ECORE_ACCEPT_BROADCAST, accept_flags)) 2490 state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL; 2491 2492 if (ECORE_TEST_BIT(ECORE_ACCEPT_UNMATCHED, accept_flags)) { 2493 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL; 2494 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED; 2495 } 2496 if (ECORE_TEST_BIT(ECORE_ACCEPT_ANY_VLAN, accept_flags)) 2497 state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN; 2498 2499 /* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */ 2500 if (clear_accept_all) { 2501 state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL; 2502 state &= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL; 2503 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL; 2504 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED; 2505 } 2506 2507 cmd->state = ECORE_CPU_TO_LE16(state); 2508 } 2509 2510 static int ecore_set_rx_mode_e2(struct bxe_softc *sc, 2511 struct ecore_rx_mode_ramrod_params *p) 2512 { 2513 struct eth_filter_rules_ramrod_data *data = p->rdata; 2514 int rc; 2515 uint8_t rule_idx = 0; 2516 2517 /* Reset the ramrod data buffer */ 2518 ECORE_MEMSET(data, 0, sizeof(*data)); 2519 2520 /* Setup ramrod data */ 2521 2522 /* Tx (internal switching) */ 2523 if (ECORE_TEST_BIT(RAMROD_TX, &p->ramrod_flags)) { 2524 data->rules[rule_idx].client_id = p->cl_id; 2525 data->rules[rule_idx].func_id = p->func_id; 2526 2527 data->rules[rule_idx].cmd_general_data = 2528 ETH_FILTER_RULES_CMD_TX_CMD; 2529 2530 ecore_rx_mode_set_cmd_state_e2(sc, &p->tx_accept_flags, 2531 &(data->rules[rule_idx++]), 2532 FALSE); 2533 } 2534 2535 /* Rx */ 2536 if (ECORE_TEST_BIT(RAMROD_RX, &p->ramrod_flags)) { 2537 data->rules[rule_idx].client_id = p->cl_id; 2538 data->rules[rule_idx].func_id = p->func_id; 2539 2540 data->rules[rule_idx].cmd_general_data = 2541 ETH_FILTER_RULES_CMD_RX_CMD; 2542 2543 ecore_rx_mode_set_cmd_state_e2(sc, &p->rx_accept_flags, 2544 &(data->rules[rule_idx++]), 2545 FALSE); 2546 } 2547 2548 /* If FCoE Queue configuration has been requested configure the Rx and 2549 * internal switching modes for this queue in separate rules. 2550 * 2551 * FCoE queue shell never be set to ACCEPT_ALL packets of any sort: 2552 * MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED. 2553 */ 2554 if (ECORE_TEST_BIT(ECORE_RX_MODE_FCOE_ETH, &p->rx_mode_flags)) { 2555 /* Tx (internal switching) */ 2556 if (ECORE_TEST_BIT(RAMROD_TX, &p->ramrod_flags)) { 2557 data->rules[rule_idx].client_id = ECORE_FCOE_CID(sc); 2558 data->rules[rule_idx].func_id = p->func_id; 2559 2560 data->rules[rule_idx].cmd_general_data = 2561 ETH_FILTER_RULES_CMD_TX_CMD; 2562 2563 ecore_rx_mode_set_cmd_state_e2(sc, &p->tx_accept_flags, 2564 &(data->rules[rule_idx]), 2565 TRUE); 2566 rule_idx++; 2567 } 2568 2569 /* Rx */ 2570 if (ECORE_TEST_BIT(RAMROD_RX, &p->ramrod_flags)) { 2571 data->rules[rule_idx].client_id = ECORE_FCOE_CID(sc); 2572 data->rules[rule_idx].func_id = p->func_id; 2573 2574 data->rules[rule_idx].cmd_general_data = 2575 ETH_FILTER_RULES_CMD_RX_CMD; 2576 2577 ecore_rx_mode_set_cmd_state_e2(sc, &p->rx_accept_flags, 2578 &(data->rules[rule_idx]), 2579 TRUE); 2580 rule_idx++; 2581 } 2582 } 2583 2584 /* Set the ramrod header (most importantly - number of rules to 2585 * configure). 2586 */ 2587 ecore_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx); 2588 2589 ECORE_MSG(sc, "About to configure %d rules, rx_accept_flags 0x%lx, tx_accept_flags 0x%lx\n", 2590 data->header.rule_cnt, p->rx_accept_flags, 2591 p->tx_accept_flags); 2592 2593 /* No need for an explicit memory barrier here as long we would 2594 * need to ensure the ordering of writing to the SPQ element 2595 * and updating of the SPQ producer which involves a memory 2596 * read and we will have to put a full memory barrier there 2597 * (inside ecore_sp_post()). 2598 */ 2599 2600 /* Send a ramrod */ 2601 rc = ecore_sp_post(sc, 2602 RAMROD_CMD_ID_ETH_FILTER_RULES, 2603 p->cid, 2604 p->rdata_mapping, 2605 ETH_CONNECTION_TYPE); 2606 if (rc) 2607 return rc; 2608 2609 /* Ramrod completion is pending */ 2610 return ECORE_PENDING; 2611 } 2612 2613 static int ecore_wait_rx_mode_comp_e2(struct bxe_softc *sc, 2614 struct ecore_rx_mode_ramrod_params *p) 2615 { 2616 return ecore_state_wait(sc, p->state, p->pstate); 2617 } 2618 2619 static int ecore_empty_rx_mode_wait(struct bxe_softc *sc, 2620 struct ecore_rx_mode_ramrod_params *p) 2621 { 2622 /* Do nothing */ 2623 return ECORE_SUCCESS; 2624 } 2625 2626 int ecore_config_rx_mode(struct bxe_softc *sc, 2627 struct ecore_rx_mode_ramrod_params *p) 2628 { 2629 int rc; 2630 2631 /* Configure the new classification in the chip */ 2632 rc = p->rx_mode_obj->config_rx_mode(sc, p); 2633 if (rc < 0) 2634 return rc; 2635 2636 /* Wait for a ramrod completion if was requested */ 2637 if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags)) { 2638 rc = p->rx_mode_obj->wait_comp(sc, p); 2639 if (rc) 2640 return rc; 2641 } 2642 2643 return rc; 2644 } 2645 2646 void ecore_init_rx_mode_obj(struct bxe_softc *sc, 2647 struct ecore_rx_mode_obj *o) 2648 { 2649 if (CHIP_IS_E1x(sc)) { 2650 o->wait_comp = ecore_empty_rx_mode_wait; 2651 o->config_rx_mode = ecore_set_rx_mode_e1x; 2652 } else { 2653 o->wait_comp = ecore_wait_rx_mode_comp_e2; 2654 o->config_rx_mode = ecore_set_rx_mode_e2; 2655 } 2656 } 2657 2658 /********************* Multicast verbs: SET, CLEAR ****************************/ 2659 static inline uint8_t ecore_mcast_bin_from_mac(uint8_t *mac) 2660 { 2661 return (ECORE_CRC32_LE(0, mac, ETH_ALEN) >> 24) & 0xff; 2662 } 2663 2664 struct ecore_mcast_mac_elem { 2665 ecore_list_entry_t link; 2666 uint8_t mac[ETH_ALEN]; 2667 uint8_t pad[2]; /* For a natural alignment of the following buffer */ 2668 }; 2669 2670 struct ecore_pending_mcast_cmd { 2671 ecore_list_entry_t link; 2672 int type; /* ECORE_MCAST_CMD_X */ 2673 union { 2674 ecore_list_t macs_head; 2675 uint32_t macs_num; /* Needed for DEL command */ 2676 int next_bin; /* Needed for RESTORE flow with aprox match */ 2677 } data; 2678 2679 bool done; /* set to TRUE, when the command has been handled, 2680 * practically used in 57712 handling only, where one pending 2681 * command may be handled in a few operations. As long as for 2682 * other chips every operation handling is completed in a 2683 * single ramrod, there is no need to utilize this field. 2684 */ 2685 }; 2686 2687 static int ecore_mcast_wait(struct bxe_softc *sc, 2688 struct ecore_mcast_obj *o) 2689 { 2690 if (ecore_state_wait(sc, o->sched_state, o->raw.pstate) || 2691 o->raw.wait_comp(sc, &o->raw)) 2692 return ECORE_TIMEOUT; 2693 2694 return ECORE_SUCCESS; 2695 } 2696 2697 static int ecore_mcast_enqueue_cmd(struct bxe_softc *sc, 2698 struct ecore_mcast_obj *o, 2699 struct ecore_mcast_ramrod_params *p, 2700 enum ecore_mcast_cmd cmd) 2701 { 2702 int total_sz; 2703 struct ecore_pending_mcast_cmd *new_cmd; 2704 struct ecore_mcast_mac_elem *cur_mac = NULL; 2705 struct ecore_mcast_list_elem *pos; 2706 int macs_list_len = ((cmd == ECORE_MCAST_CMD_ADD) ? 2707 p->mcast_list_len : 0); 2708 2709 /* If the command is empty ("handle pending commands only"), break */ 2710 if (!p->mcast_list_len) 2711 return ECORE_SUCCESS; 2712 2713 total_sz = sizeof(*new_cmd) + 2714 macs_list_len * sizeof(struct ecore_mcast_mac_elem); 2715 2716 /* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */ 2717 new_cmd = ECORE_ZALLOC(total_sz, GFP_ATOMIC, sc); 2718 2719 if (!new_cmd) 2720 return ECORE_NOMEM; 2721 2722 ECORE_MSG(sc, "About to enqueue a new %d command. macs_list_len=%d\n", 2723 cmd, macs_list_len); 2724 2725 ECORE_LIST_INIT(&new_cmd->data.macs_head); 2726 2727 new_cmd->type = cmd; 2728 new_cmd->done = FALSE; 2729 2730 switch (cmd) { 2731 case ECORE_MCAST_CMD_ADD: 2732 cur_mac = (struct ecore_mcast_mac_elem *) 2733 ((uint8_t *)new_cmd + sizeof(*new_cmd)); 2734 2735 /* Push the MACs of the current command into the pending command 2736 * MACs list: FIFO 2737 */ 2738 ECORE_LIST_FOR_EACH_ENTRY(pos, &p->mcast_list, link, 2739 struct ecore_mcast_list_elem) { 2740 ECORE_MEMCPY(cur_mac->mac, pos->mac, ETH_ALEN); 2741 ECORE_LIST_PUSH_TAIL(&cur_mac->link, 2742 &new_cmd->data.macs_head); 2743 cur_mac++; 2744 } 2745 2746 break; 2747 2748 case ECORE_MCAST_CMD_DEL: 2749 new_cmd->data.macs_num = p->mcast_list_len; 2750 break; 2751 2752 case ECORE_MCAST_CMD_RESTORE: 2753 new_cmd->data.next_bin = 0; 2754 break; 2755 2756 default: 2757 ECORE_FREE(sc, new_cmd, total_sz); 2758 ECORE_ERR("Unknown command: %d\n", cmd); 2759 return ECORE_INVAL; 2760 } 2761 2762 /* Push the new pending command to the tail of the pending list: FIFO */ 2763 ECORE_LIST_PUSH_TAIL(&new_cmd->link, &o->pending_cmds_head); 2764 2765 o->set_sched(o); 2766 2767 return ECORE_PENDING; 2768 } 2769 2770 /** 2771 * ecore_mcast_get_next_bin - get the next set bin (index) 2772 * 2773 * @o: 2774 * @last: index to start looking from (including) 2775 * 2776 * returns the next found (set) bin or a negative value if none is found. 2777 */ 2778 static inline int ecore_mcast_get_next_bin(struct ecore_mcast_obj *o, int last) 2779 { 2780 int i, j, inner_start = last % BIT_VEC64_ELEM_SZ; 2781 2782 for (i = last / BIT_VEC64_ELEM_SZ; i < ECORE_MCAST_VEC_SZ; i++) { 2783 if (o->registry.aprox_match.vec[i]) 2784 for (j = inner_start; j < BIT_VEC64_ELEM_SZ; j++) { 2785 int cur_bit = j + BIT_VEC64_ELEM_SZ * i; 2786 if (BIT_VEC64_TEST_BIT(o->registry.aprox_match. 2787 vec, cur_bit)) { 2788 return cur_bit; 2789 } 2790 } 2791 inner_start = 0; 2792 } 2793 2794 /* None found */ 2795 return -1; 2796 } 2797 2798 /** 2799 * ecore_mcast_clear_first_bin - find the first set bin and clear it 2800 * 2801 * @o: 2802 * 2803 * returns the index of the found bin or -1 if none is found 2804 */ 2805 static inline int ecore_mcast_clear_first_bin(struct ecore_mcast_obj *o) 2806 { 2807 int cur_bit = ecore_mcast_get_next_bin(o, 0); 2808 2809 if (cur_bit >= 0) 2810 BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, cur_bit); 2811 2812 return cur_bit; 2813 } 2814 2815 static inline uint8_t ecore_mcast_get_rx_tx_flag(struct ecore_mcast_obj *o) 2816 { 2817 struct ecore_raw_obj *raw = &o->raw; 2818 uint8_t rx_tx_flag = 0; 2819 2820 if ((raw->obj_type == ECORE_OBJ_TYPE_TX) || 2821 (raw->obj_type == ECORE_OBJ_TYPE_RX_TX)) 2822 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_TX_CMD; 2823 2824 if ((raw->obj_type == ECORE_OBJ_TYPE_RX) || 2825 (raw->obj_type == ECORE_OBJ_TYPE_RX_TX)) 2826 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_RX_CMD; 2827 2828 return rx_tx_flag; 2829 } 2830 2831 static void ecore_mcast_set_one_rule_e2(struct bxe_softc *sc, 2832 struct ecore_mcast_obj *o, int idx, 2833 union ecore_mcast_config_data *cfg_data, 2834 enum ecore_mcast_cmd cmd) 2835 { 2836 struct ecore_raw_obj *r = &o->raw; 2837 struct eth_multicast_rules_ramrod_data *data = 2838 (struct eth_multicast_rules_ramrod_data *)(r->rdata); 2839 uint8_t func_id = r->func_id; 2840 uint8_t rx_tx_add_flag = ecore_mcast_get_rx_tx_flag(o); 2841 int bin; 2842 2843 if ((cmd == ECORE_MCAST_CMD_ADD) || (cmd == ECORE_MCAST_CMD_RESTORE)) 2844 rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD; 2845 2846 data->rules[idx].cmd_general_data |= rx_tx_add_flag; 2847 2848 /* Get a bin and update a bins' vector */ 2849 switch (cmd) { 2850 case ECORE_MCAST_CMD_ADD: 2851 bin = ecore_mcast_bin_from_mac(cfg_data->mac); 2852 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin); 2853 break; 2854 2855 case ECORE_MCAST_CMD_DEL: 2856 /* If there were no more bins to clear 2857 * (ecore_mcast_clear_first_bin() returns -1) then we would 2858 * clear any (0xff) bin. 2859 * See ecore_mcast_validate_e2() for explanation when it may 2860 * happen. 2861 */ 2862 bin = ecore_mcast_clear_first_bin(o); 2863 break; 2864 2865 case ECORE_MCAST_CMD_RESTORE: 2866 bin = cfg_data->bin; 2867 break; 2868 2869 default: 2870 ECORE_ERR("Unknown command: %d\n", cmd); 2871 return; 2872 } 2873 2874 ECORE_MSG(sc, "%s bin %d\n", 2875 ((rx_tx_add_flag & ETH_MULTICAST_RULES_CMD_IS_ADD) ? 2876 "Setting" : "Clearing"), bin); 2877 2878 data->rules[idx].bin_id = (uint8_t)bin; 2879 data->rules[idx].func_id = func_id; 2880 data->rules[idx].engine_id = o->engine_id; 2881 } 2882 2883 /** 2884 * ecore_mcast_handle_restore_cmd_e2 - restore configuration from the registry 2885 * 2886 * @sc: device handle 2887 * @o: 2888 * @start_bin: index in the registry to start from (including) 2889 * @rdata_idx: index in the ramrod data to start from 2890 * 2891 * returns last handled bin index or -1 if all bins have been handled 2892 */ 2893 static inline int ecore_mcast_handle_restore_cmd_e2( 2894 struct bxe_softc *sc, struct ecore_mcast_obj *o , int start_bin, 2895 int *rdata_idx) 2896 { 2897 int cur_bin, cnt = *rdata_idx; 2898 union ecore_mcast_config_data cfg_data = {NULL}; 2899 2900 /* go through the registry and configure the bins from it */ 2901 for (cur_bin = ecore_mcast_get_next_bin(o, start_bin); cur_bin >= 0; 2902 cur_bin = ecore_mcast_get_next_bin(o, cur_bin + 1)) { 2903 2904 cfg_data.bin = (uint8_t)cur_bin; 2905 o->set_one_rule(sc, o, cnt, &cfg_data, 2906 ECORE_MCAST_CMD_RESTORE); 2907 2908 cnt++; 2909 2910 ECORE_MSG(sc, "About to configure a bin %d\n", cur_bin); 2911 2912 /* Break if we reached the maximum number 2913 * of rules. 2914 */ 2915 if (cnt >= o->max_cmd_len) 2916 break; 2917 } 2918 2919 *rdata_idx = cnt; 2920 2921 return cur_bin; 2922 } 2923 2924 static inline void ecore_mcast_hdl_pending_add_e2(struct bxe_softc *sc, 2925 struct ecore_mcast_obj *o, struct ecore_pending_mcast_cmd *cmd_pos, 2926 int *line_idx) 2927 { 2928 struct ecore_mcast_mac_elem *pmac_pos, *pmac_pos_n; 2929 int cnt = *line_idx; 2930 union ecore_mcast_config_data cfg_data = {NULL}; 2931 2932 ECORE_LIST_FOR_EACH_ENTRY_SAFE(pmac_pos, pmac_pos_n, 2933 &cmd_pos->data.macs_head, link, struct ecore_mcast_mac_elem) { 2934 2935 cfg_data.mac = &pmac_pos->mac[0]; 2936 o->set_one_rule(sc, o, cnt, &cfg_data, cmd_pos->type); 2937 2938 cnt++; 2939 2940 ECORE_MSG(sc, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC\n", 2941 pmac_pos->mac[0], pmac_pos->mac[1], pmac_pos->mac[2], pmac_pos->mac[3], pmac_pos->mac[4], pmac_pos->mac[5]); 2942 2943 ECORE_LIST_REMOVE_ENTRY(&pmac_pos->link, 2944 &cmd_pos->data.macs_head); 2945 2946 /* Break if we reached the maximum number 2947 * of rules. 2948 */ 2949 if (cnt >= o->max_cmd_len) 2950 break; 2951 } 2952 2953 *line_idx = cnt; 2954 2955 /* if no more MACs to configure - we are done */ 2956 if (ECORE_LIST_IS_EMPTY(&cmd_pos->data.macs_head)) 2957 cmd_pos->done = TRUE; 2958 } 2959 2960 static inline void ecore_mcast_hdl_pending_del_e2(struct bxe_softc *sc, 2961 struct ecore_mcast_obj *o, struct ecore_pending_mcast_cmd *cmd_pos, 2962 int *line_idx) 2963 { 2964 int cnt = *line_idx; 2965 2966 while (cmd_pos->data.macs_num) { 2967 o->set_one_rule(sc, o, cnt, NULL, cmd_pos->type); 2968 2969 cnt++; 2970 2971 cmd_pos->data.macs_num--; 2972 2973 ECORE_MSG(sc, "Deleting MAC. %d left,cnt is %d\n", 2974 cmd_pos->data.macs_num, cnt); 2975 2976 /* Break if we reached the maximum 2977 * number of rules. 2978 */ 2979 if (cnt >= o->max_cmd_len) 2980 break; 2981 } 2982 2983 *line_idx = cnt; 2984 2985 /* If we cleared all bins - we are done */ 2986 if (!cmd_pos->data.macs_num) 2987 cmd_pos->done = TRUE; 2988 } 2989 2990 static inline void ecore_mcast_hdl_pending_restore_e2(struct bxe_softc *sc, 2991 struct ecore_mcast_obj *o, struct ecore_pending_mcast_cmd *cmd_pos, 2992 int *line_idx) 2993 { 2994 cmd_pos->data.next_bin = o->hdl_restore(sc, o, cmd_pos->data.next_bin, 2995 line_idx); 2996 2997 if (cmd_pos->data.next_bin < 0) 2998 /* If o->set_restore returned -1 we are done */ 2999 cmd_pos->done = TRUE; 3000 else 3001 /* Start from the next bin next time */ 3002 cmd_pos->data.next_bin++; 3003 } 3004 3005 static inline int ecore_mcast_handle_pending_cmds_e2(struct bxe_softc *sc, 3006 struct ecore_mcast_ramrod_params *p) 3007 { 3008 struct ecore_pending_mcast_cmd *cmd_pos, *cmd_pos_n; 3009 int cnt = 0; 3010 struct ecore_mcast_obj *o = p->mcast_obj; 3011 3012 ECORE_LIST_FOR_EACH_ENTRY_SAFE(cmd_pos, cmd_pos_n, 3013 &o->pending_cmds_head, link, struct ecore_pending_mcast_cmd) { 3014 switch (cmd_pos->type) { 3015 case ECORE_MCAST_CMD_ADD: 3016 ecore_mcast_hdl_pending_add_e2(sc, o, cmd_pos, &cnt); 3017 break; 3018 3019 case ECORE_MCAST_CMD_DEL: 3020 ecore_mcast_hdl_pending_del_e2(sc, o, cmd_pos, &cnt); 3021 break; 3022 3023 case ECORE_MCAST_CMD_RESTORE: 3024 ecore_mcast_hdl_pending_restore_e2(sc, o, cmd_pos, 3025 &cnt); 3026 break; 3027 3028 default: 3029 ECORE_ERR("Unknown command: %d\n", cmd_pos->type); 3030 return ECORE_INVAL; 3031 } 3032 3033 /* If the command has been completed - remove it from the list 3034 * and free the memory 3035 */ 3036 if (cmd_pos->done) { 3037 ECORE_LIST_REMOVE_ENTRY(&cmd_pos->link, 3038 &o->pending_cmds_head); 3039 ECORE_FREE(sc, cmd_pos, cmd_pos->alloc_len); 3040 } 3041 3042 /* Break if we reached the maximum number of rules */ 3043 if (cnt >= o->max_cmd_len) 3044 break; 3045 } 3046 3047 return cnt; 3048 } 3049 3050 static inline void ecore_mcast_hdl_add(struct bxe_softc *sc, 3051 struct ecore_mcast_obj *o, struct ecore_mcast_ramrod_params *p, 3052 int *line_idx) 3053 { 3054 struct ecore_mcast_list_elem *mlist_pos; 3055 union ecore_mcast_config_data cfg_data = {NULL}; 3056 int cnt = *line_idx; 3057 3058 ECORE_LIST_FOR_EACH_ENTRY(mlist_pos, &p->mcast_list, link, 3059 struct ecore_mcast_list_elem) { 3060 cfg_data.mac = mlist_pos->mac; 3061 o->set_one_rule(sc, o, cnt, &cfg_data, ECORE_MCAST_CMD_ADD); 3062 3063 cnt++; 3064 3065 ECORE_MSG(sc, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC\n", 3066 mlist_pos->mac[0], mlist_pos->mac[1], mlist_pos->mac[2], mlist_pos->mac[3], mlist_pos->mac[4], mlist_pos->mac[5]); 3067 } 3068 3069 *line_idx = cnt; 3070 } 3071 3072 static inline void ecore_mcast_hdl_del(struct bxe_softc *sc, 3073 struct ecore_mcast_obj *o, struct ecore_mcast_ramrod_params *p, 3074 int *line_idx) 3075 { 3076 int cnt = *line_idx, i; 3077 3078 for (i = 0; i < p->mcast_list_len; i++) { 3079 o->set_one_rule(sc, o, cnt, NULL, ECORE_MCAST_CMD_DEL); 3080 3081 cnt++; 3082 3083 ECORE_MSG(sc, "Deleting MAC. %d left\n", 3084 p->mcast_list_len - i - 1); 3085 } 3086 3087 *line_idx = cnt; 3088 } 3089 3090 /** 3091 * ecore_mcast_handle_current_cmd - 3092 * 3093 * @sc: device handle 3094 * @p: 3095 * @cmd: 3096 * @start_cnt: first line in the ramrod data that may be used 3097 * 3098 * This function is called iff there is enough place for the current command in 3099 * the ramrod data. 3100 * Returns number of lines filled in the ramrod data in total. 3101 */ 3102 static inline int ecore_mcast_handle_current_cmd(struct bxe_softc *sc, 3103 struct ecore_mcast_ramrod_params *p, 3104 enum ecore_mcast_cmd cmd, 3105 int start_cnt) 3106 { 3107 struct ecore_mcast_obj *o = p->mcast_obj; 3108 int cnt = start_cnt; 3109 3110 ECORE_MSG(sc, "p->mcast_list_len=%d\n", p->mcast_list_len); 3111 3112 switch (cmd) { 3113 case ECORE_MCAST_CMD_ADD: 3114 ecore_mcast_hdl_add(sc, o, p, &cnt); 3115 break; 3116 3117 case ECORE_MCAST_CMD_DEL: 3118 ecore_mcast_hdl_del(sc, o, p, &cnt); 3119 break; 3120 3121 case ECORE_MCAST_CMD_RESTORE: 3122 o->hdl_restore(sc, o, 0, &cnt); 3123 break; 3124 3125 default: 3126 ECORE_ERR("Unknown command: %d\n", cmd); 3127 return ECORE_INVAL; 3128 } 3129 3130 /* The current command has been handled */ 3131 p->mcast_list_len = 0; 3132 3133 return cnt; 3134 } 3135 3136 static int ecore_mcast_validate_e2(struct bxe_softc *sc, 3137 struct ecore_mcast_ramrod_params *p, 3138 enum ecore_mcast_cmd cmd) 3139 { 3140 struct ecore_mcast_obj *o = p->mcast_obj; 3141 int reg_sz = o->get_registry_size(o); 3142 3143 switch (cmd) { 3144 /* DEL command deletes all currently configured MACs */ 3145 case ECORE_MCAST_CMD_DEL: 3146 o->set_registry_size(o, 0); 3147 /* Don't break */ 3148 3149 /* RESTORE command will restore the entire multicast configuration */ 3150 case ECORE_MCAST_CMD_RESTORE: 3151 /* Here we set the approximate amount of work to do, which in 3152 * fact may be only less as some MACs in postponed ADD 3153 * command(s) scheduled before this command may fall into 3154 * the same bin and the actual number of bins set in the 3155 * registry would be less than we estimated here. See 3156 * ecore_mcast_set_one_rule_e2() for further details. 3157 */ 3158 p->mcast_list_len = reg_sz; 3159 break; 3160 3161 case ECORE_MCAST_CMD_ADD: 3162 case ECORE_MCAST_CMD_CONT: 3163 /* Here we assume that all new MACs will fall into new bins. 3164 * However we will correct the real registry size after we 3165 * handle all pending commands. 3166 */ 3167 o->set_registry_size(o, reg_sz + p->mcast_list_len); 3168 break; 3169 3170 default: 3171 ECORE_ERR("Unknown command: %d\n", cmd); 3172 return ECORE_INVAL; 3173 } 3174 3175 /* Increase the total number of MACs pending to be configured */ 3176 o->total_pending_num += p->mcast_list_len; 3177 3178 return ECORE_SUCCESS; 3179 } 3180 3181 static void ecore_mcast_revert_e2(struct bxe_softc *sc, 3182 struct ecore_mcast_ramrod_params *p, 3183 int old_num_bins) 3184 { 3185 struct ecore_mcast_obj *o = p->mcast_obj; 3186 3187 o->set_registry_size(o, old_num_bins); 3188 o->total_pending_num -= p->mcast_list_len; 3189 } 3190 3191 /** 3192 * ecore_mcast_set_rdata_hdr_e2 - sets a header values 3193 * 3194 * @sc: device handle 3195 * @p: 3196 * @len: number of rules to handle 3197 */ 3198 static inline void ecore_mcast_set_rdata_hdr_e2(struct bxe_softc *sc, 3199 struct ecore_mcast_ramrod_params *p, 3200 uint8_t len) 3201 { 3202 struct ecore_raw_obj *r = &p->mcast_obj->raw; 3203 struct eth_multicast_rules_ramrod_data *data = 3204 (struct eth_multicast_rules_ramrod_data *)(r->rdata); 3205 3206 data->header.echo = ECORE_CPU_TO_LE32((r->cid & ECORE_SWCID_MASK) | 3207 (ECORE_FILTER_MCAST_PENDING << 3208 ECORE_SWCID_SHIFT)); 3209 data->header.rule_cnt = len; 3210 } 3211 3212 /** 3213 * ecore_mcast_refresh_registry_e2 - recalculate the actual number of set bins 3214 * 3215 * @sc: device handle 3216 * @o: 3217 * 3218 * Recalculate the actual number of set bins in the registry using Brian 3219 * Kernighan's algorithm: it's execution complexity is as a number of set bins. 3220 * 3221 * returns 0 for the compliance with ecore_mcast_refresh_registry_e1(). 3222 */ 3223 static inline int ecore_mcast_refresh_registry_e2(struct bxe_softc *sc, 3224 struct ecore_mcast_obj *o) 3225 { 3226 int i, cnt = 0; 3227 uint64_t elem; 3228 3229 for (i = 0; i < ECORE_MCAST_VEC_SZ; i++) { 3230 elem = o->registry.aprox_match.vec[i]; 3231 for (; elem; cnt++) 3232 elem &= elem - 1; 3233 } 3234 3235 o->set_registry_size(o, cnt); 3236 3237 return ECORE_SUCCESS; 3238 } 3239 3240 static int ecore_mcast_setup_e2(struct bxe_softc *sc, 3241 struct ecore_mcast_ramrod_params *p, 3242 enum ecore_mcast_cmd cmd) 3243 { 3244 struct ecore_raw_obj *raw = &p->mcast_obj->raw; 3245 struct ecore_mcast_obj *o = p->mcast_obj; 3246 struct eth_multicast_rules_ramrod_data *data = 3247 (struct eth_multicast_rules_ramrod_data *)(raw->rdata); 3248 int cnt = 0, rc; 3249 3250 /* Reset the ramrod data buffer */ 3251 ECORE_MEMSET(data, 0, sizeof(*data)); 3252 3253 cnt = ecore_mcast_handle_pending_cmds_e2(sc, p); 3254 3255 /* If there are no more pending commands - clear SCHEDULED state */ 3256 if (ECORE_LIST_IS_EMPTY(&o->pending_cmds_head)) 3257 o->clear_sched(o); 3258 3259 /* The below may be TRUE iff there was enough room in ramrod 3260 * data for all pending commands and for the current 3261 * command. Otherwise the current command would have been added 3262 * to the pending commands and p->mcast_list_len would have been 3263 * zeroed. 3264 */ 3265 if (p->mcast_list_len > 0) 3266 cnt = ecore_mcast_handle_current_cmd(sc, p, cmd, cnt); 3267 3268 /* We've pulled out some MACs - update the total number of 3269 * outstanding. 3270 */ 3271 o->total_pending_num -= cnt; 3272 3273 /* send a ramrod */ 3274 ECORE_DBG_BREAK_IF(o->total_pending_num < 0); 3275 ECORE_DBG_BREAK_IF(cnt > o->max_cmd_len); 3276 3277 ecore_mcast_set_rdata_hdr_e2(sc, p, (uint8_t)cnt); 3278 3279 /* Update a registry size if there are no more pending operations. 3280 * 3281 * We don't want to change the value of the registry size if there are 3282 * pending operations because we want it to always be equal to the 3283 * exact or the approximate number (see ecore_mcast_validate_e2()) of 3284 * set bins after the last requested operation in order to properly 3285 * evaluate the size of the next DEL/RESTORE operation. 3286 * 3287 * Note that we update the registry itself during command(s) handling 3288 * - see ecore_mcast_set_one_rule_e2(). That's because for 57712 we 3289 * aggregate multiple commands (ADD/DEL/RESTORE) into one ramrod but 3290 * with a limited amount of update commands (per MAC/bin) and we don't 3291 * know in this scope what the actual state of bins configuration is 3292 * going to be after this ramrod. 3293 */ 3294 if (!o->total_pending_num) 3295 ecore_mcast_refresh_registry_e2(sc, o); 3296 3297 /* If CLEAR_ONLY was requested - don't send a ramrod and clear 3298 * RAMROD_PENDING status immediately. 3299 */ 3300 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) { 3301 raw->clear_pending(raw); 3302 return ECORE_SUCCESS; 3303 } else { 3304 /* No need for an explicit memory barrier here as long we would 3305 * need to ensure the ordering of writing to the SPQ element 3306 * and updating of the SPQ producer which involves a memory 3307 * read and we will have to put a full memory barrier there 3308 * (inside ecore_sp_post()). 3309 */ 3310 3311 /* Send a ramrod */ 3312 rc = ecore_sp_post( sc, 3313 RAMROD_CMD_ID_ETH_MULTICAST_RULES, 3314 raw->cid, 3315 raw->rdata_mapping, 3316 ETH_CONNECTION_TYPE); 3317 if (rc) 3318 return rc; 3319 3320 /* Ramrod completion is pending */ 3321 return ECORE_PENDING; 3322 } 3323 } 3324 3325 static int ecore_mcast_validate_e1h(struct bxe_softc *sc, 3326 struct ecore_mcast_ramrod_params *p, 3327 enum ecore_mcast_cmd cmd) 3328 { 3329 /* Mark, that there is a work to do */ 3330 if ((cmd == ECORE_MCAST_CMD_DEL) || (cmd == ECORE_MCAST_CMD_RESTORE)) 3331 p->mcast_list_len = 1; 3332 3333 return ECORE_SUCCESS; 3334 } 3335 3336 static void ecore_mcast_revert_e1h(struct bxe_softc *sc, 3337 struct ecore_mcast_ramrod_params *p, 3338 int old_num_bins) 3339 { 3340 /* Do nothing */ 3341 } 3342 3343 #define ECORE_57711_SET_MC_FILTER(filter, bit) \ 3344 do { \ 3345 (filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \ 3346 } while (0) 3347 3348 static inline void ecore_mcast_hdl_add_e1h(struct bxe_softc *sc, 3349 struct ecore_mcast_obj *o, 3350 struct ecore_mcast_ramrod_params *p, 3351 uint32_t *mc_filter) 3352 { 3353 struct ecore_mcast_list_elem *mlist_pos; 3354 int bit; 3355 3356 ECORE_LIST_FOR_EACH_ENTRY(mlist_pos, &p->mcast_list, link, 3357 struct ecore_mcast_list_elem) { 3358 bit = ecore_mcast_bin_from_mac(mlist_pos->mac); 3359 ECORE_57711_SET_MC_FILTER(mc_filter, bit); 3360 3361 ECORE_MSG(sc, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC, bin %d\n", 3362 mlist_pos->mac[0], mlist_pos->mac[1], mlist_pos->mac[2], mlist_pos->mac[3], mlist_pos->mac[4], mlist_pos->mac[5], bit); 3363 3364 /* bookkeeping... */ 3365 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, 3366 bit); 3367 } 3368 } 3369 3370 static inline void ecore_mcast_hdl_restore_e1h(struct bxe_softc *sc, 3371 struct ecore_mcast_obj *o, struct ecore_mcast_ramrod_params *p, 3372 uint32_t *mc_filter) 3373 { 3374 int bit; 3375 3376 for (bit = ecore_mcast_get_next_bin(o, 0); 3377 bit >= 0; 3378 bit = ecore_mcast_get_next_bin(o, bit + 1)) { 3379 ECORE_57711_SET_MC_FILTER(mc_filter, bit); 3380 ECORE_MSG(sc, "About to set bin %d\n", bit); 3381 } 3382 } 3383 3384 /* On 57711 we write the multicast MACs' approximate match 3385 * table by directly into the TSTORM's internal RAM. So we don't 3386 * really need to handle any tricks to make it work. 3387 */ 3388 static int ecore_mcast_setup_e1h(struct bxe_softc *sc, 3389 struct ecore_mcast_ramrod_params *p, 3390 enum ecore_mcast_cmd cmd) 3391 { 3392 int i; 3393 struct ecore_mcast_obj *o = p->mcast_obj; 3394 struct ecore_raw_obj *r = &o->raw; 3395 3396 /* If CLEAR_ONLY has been requested - clear the registry 3397 * and clear a pending bit. 3398 */ 3399 if (!ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) { 3400 uint32_t mc_filter[ECORE_MC_HASH_SIZE] = {0}; 3401 3402 /* Set the multicast filter bits before writing it into 3403 * the internal memory. 3404 */ 3405 switch (cmd) { 3406 case ECORE_MCAST_CMD_ADD: 3407 ecore_mcast_hdl_add_e1h(sc, o, p, mc_filter); 3408 break; 3409 3410 case ECORE_MCAST_CMD_DEL: 3411 ECORE_MSG(sc, 3412 "Invalidating multicast MACs configuration\n"); 3413 3414 /* clear the registry */ 3415 ECORE_MEMSET(o->registry.aprox_match.vec, 0, 3416 sizeof(o->registry.aprox_match.vec)); 3417 break; 3418 3419 case ECORE_MCAST_CMD_RESTORE: 3420 ecore_mcast_hdl_restore_e1h(sc, o, p, mc_filter); 3421 break; 3422 3423 default: 3424 ECORE_ERR("Unknown command: %d\n", cmd); 3425 return ECORE_INVAL; 3426 } 3427 3428 /* Set the mcast filter in the internal memory */ 3429 for (i = 0; i < ECORE_MC_HASH_SIZE; i++) 3430 REG_WR(sc, ECORE_MC_HASH_OFFSET(sc, i), mc_filter[i]); 3431 } else 3432 /* clear the registry */ 3433 ECORE_MEMSET(o->registry.aprox_match.vec, 0, 3434 sizeof(o->registry.aprox_match.vec)); 3435 3436 /* We are done */ 3437 r->clear_pending(r); 3438 3439 return ECORE_SUCCESS; 3440 } 3441 3442 static int ecore_mcast_validate_e1(struct bxe_softc *sc, 3443 struct ecore_mcast_ramrod_params *p, 3444 enum ecore_mcast_cmd cmd) 3445 { 3446 struct ecore_mcast_obj *o = p->mcast_obj; 3447 int reg_sz = o->get_registry_size(o); 3448 3449 switch (cmd) { 3450 /* DEL command deletes all currently configured MACs */ 3451 case ECORE_MCAST_CMD_DEL: 3452 o->set_registry_size(o, 0); 3453 /* Don't break */ 3454 3455 /* RESTORE command will restore the entire multicast configuration */ 3456 case ECORE_MCAST_CMD_RESTORE: 3457 p->mcast_list_len = reg_sz; 3458 ECORE_MSG(sc, "Command %d, p->mcast_list_len=%d\n", 3459 cmd, p->mcast_list_len); 3460 break; 3461 3462 case ECORE_MCAST_CMD_ADD: 3463 case ECORE_MCAST_CMD_CONT: 3464 /* Multicast MACs on 57710 are configured as unicast MACs and 3465 * there is only a limited number of CAM entries for that 3466 * matter. 3467 */ 3468 if (p->mcast_list_len > o->max_cmd_len) { 3469 ECORE_ERR("Can't configure more than %d multicast MACs on 57710\n", 3470 o->max_cmd_len); 3471 return ECORE_INVAL; 3472 } 3473 /* Every configured MAC should be cleared if DEL command is 3474 * called. Only the last ADD command is relevant as long as 3475 * every ADD commands overrides the previous configuration. 3476 */ 3477 ECORE_MSG(sc, "p->mcast_list_len=%d\n", p->mcast_list_len); 3478 if (p->mcast_list_len > 0) 3479 o->set_registry_size(o, p->mcast_list_len); 3480 3481 break; 3482 3483 default: 3484 ECORE_ERR("Unknown command: %d\n", cmd); 3485 return ECORE_INVAL; 3486 } 3487 3488 /* We want to ensure that commands are executed one by one for 57710. 3489 * Therefore each none-empty command will consume o->max_cmd_len. 3490 */ 3491 if (p->mcast_list_len) 3492 o->total_pending_num += o->max_cmd_len; 3493 3494 return ECORE_SUCCESS; 3495 } 3496 3497 static void ecore_mcast_revert_e1(struct bxe_softc *sc, 3498 struct ecore_mcast_ramrod_params *p, 3499 int old_num_macs) 3500 { 3501 struct ecore_mcast_obj *o = p->mcast_obj; 3502 3503 o->set_registry_size(o, old_num_macs); 3504 3505 /* If current command hasn't been handled yet and we are 3506 * here means that it's meant to be dropped and we have to 3507 * update the number of outstanding MACs accordingly. 3508 */ 3509 if (p->mcast_list_len) 3510 o->total_pending_num -= o->max_cmd_len; 3511 } 3512 3513 static void ecore_mcast_set_one_rule_e1(struct bxe_softc *sc, 3514 struct ecore_mcast_obj *o, int idx, 3515 union ecore_mcast_config_data *cfg_data, 3516 enum ecore_mcast_cmd cmd) 3517 { 3518 struct ecore_raw_obj *r = &o->raw; 3519 struct mac_configuration_cmd *data = 3520 (struct mac_configuration_cmd *)(r->rdata); 3521 3522 /* copy mac */ 3523 if ((cmd == ECORE_MCAST_CMD_ADD) || (cmd == ECORE_MCAST_CMD_RESTORE)) { 3524 ecore_set_fw_mac_addr(&data->config_table[idx].msb_mac_addr, 3525 &data->config_table[idx].middle_mac_addr, 3526 &data->config_table[idx].lsb_mac_addr, 3527 cfg_data->mac); 3528 3529 data->config_table[idx].vlan_id = 0; 3530 data->config_table[idx].pf_id = r->func_id; 3531 data->config_table[idx].clients_bit_vector = 3532 ECORE_CPU_TO_LE32(1 << r->cl_id); 3533 3534 ECORE_SET_FLAG(data->config_table[idx].flags, 3535 MAC_CONFIGURATION_ENTRY_ACTION_TYPE, 3536 T_ETH_MAC_COMMAND_SET); 3537 } 3538 } 3539 3540 /** 3541 * ecore_mcast_set_rdata_hdr_e1 - set header values in mac_configuration_cmd 3542 * 3543 * @sc: device handle 3544 * @p: 3545 * @len: number of rules to handle 3546 */ 3547 static inline void ecore_mcast_set_rdata_hdr_e1(struct bxe_softc *sc, 3548 struct ecore_mcast_ramrod_params *p, 3549 uint8_t len) 3550 { 3551 struct ecore_raw_obj *r = &p->mcast_obj->raw; 3552 struct mac_configuration_cmd *data = 3553 (struct mac_configuration_cmd *)(r->rdata); 3554 3555 uint8_t offset = (CHIP_REV_IS_SLOW(sc) ? 3556 ECORE_MAX_EMUL_MULTI*(1 + r->func_id) : 3557 ECORE_MAX_MULTICAST*(1 + r->func_id)); 3558 3559 data->hdr.offset = offset; 3560 data->hdr.client_id = ECORE_CPU_TO_LE16(0xff); 3561 data->hdr.echo = ECORE_CPU_TO_LE32((r->cid & ECORE_SWCID_MASK) | 3562 (ECORE_FILTER_MCAST_PENDING << 3563 ECORE_SWCID_SHIFT)); 3564 data->hdr.length = len; 3565 } 3566 3567 /** 3568 * ecore_mcast_handle_restore_cmd_e1 - restore command for 57710 3569 * 3570 * @sc: device handle 3571 * @o: 3572 * @start_idx: index in the registry to start from 3573 * @rdata_idx: index in the ramrod data to start from 3574 * 3575 * restore command for 57710 is like all other commands - always a stand alone 3576 * command - start_idx and rdata_idx will always be 0. This function will always 3577 * succeed. 3578 * returns -1 to comply with 57712 variant. 3579 */ 3580 static inline int ecore_mcast_handle_restore_cmd_e1( 3581 struct bxe_softc *sc, struct ecore_mcast_obj *o , int start_idx, 3582 int *rdata_idx) 3583 { 3584 struct ecore_mcast_mac_elem *elem; 3585 int i = 0; 3586 union ecore_mcast_config_data cfg_data = {NULL}; 3587 3588 /* go through the registry and configure the MACs from it. */ 3589 ECORE_LIST_FOR_EACH_ENTRY(elem, &o->registry.exact_match.macs, link, 3590 struct ecore_mcast_mac_elem) { 3591 cfg_data.mac = &elem->mac[0]; 3592 o->set_one_rule(sc, o, i, &cfg_data, ECORE_MCAST_CMD_RESTORE); 3593 3594 i++; 3595 3596 ECORE_MSG(sc, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC\n", 3597 cfg_data.mac[0], cfg_data.mac[1], cfg_data.mac[2], cfg_data.mac[3], cfg_data.mac[4], cfg_data.mac[5]); 3598 } 3599 3600 *rdata_idx = i; 3601 3602 return -1; 3603 } 3604 3605 static inline int ecore_mcast_handle_pending_cmds_e1( 3606 struct bxe_softc *sc, struct ecore_mcast_ramrod_params *p) 3607 { 3608 struct ecore_pending_mcast_cmd *cmd_pos; 3609 struct ecore_mcast_mac_elem *pmac_pos; 3610 struct ecore_mcast_obj *o = p->mcast_obj; 3611 union ecore_mcast_config_data cfg_data = {NULL}; 3612 int cnt = 0; 3613 3614 /* If nothing to be done - return */ 3615 if (ECORE_LIST_IS_EMPTY(&o->pending_cmds_head)) 3616 return 0; 3617 3618 /* Handle the first command */ 3619 cmd_pos = ECORE_LIST_FIRST_ENTRY(&o->pending_cmds_head, 3620 struct ecore_pending_mcast_cmd, link); 3621 3622 switch (cmd_pos->type) { 3623 case ECORE_MCAST_CMD_ADD: 3624 ECORE_LIST_FOR_EACH_ENTRY(pmac_pos, &cmd_pos->data.macs_head, 3625 link, struct ecore_mcast_mac_elem) { 3626 cfg_data.mac = &pmac_pos->mac[0]; 3627 o->set_one_rule(sc, o, cnt, &cfg_data, cmd_pos->type); 3628 3629 cnt++; 3630 3631 ECORE_MSG(sc, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC\n", 3632 pmac_pos->mac[0], pmac_pos->mac[1], pmac_pos->mac[2], pmac_pos->mac[3], pmac_pos->mac[4], pmac_pos->mac[5]); 3633 } 3634 break; 3635 3636 case ECORE_MCAST_CMD_DEL: 3637 cnt = cmd_pos->data.macs_num; 3638 ECORE_MSG(sc, "About to delete %d multicast MACs\n", cnt); 3639 break; 3640 3641 case ECORE_MCAST_CMD_RESTORE: 3642 o->hdl_restore(sc, o, 0, &cnt); 3643 break; 3644 3645 default: 3646 ECORE_ERR("Unknown command: %d\n", cmd_pos->type); 3647 return ECORE_INVAL; 3648 } 3649 3650 ECORE_LIST_REMOVE_ENTRY(&cmd_pos->link, &o->pending_cmds_head); 3651 ECORE_FREE(sc, cmd_pos, cmd_pos->alloc_len); 3652 3653 return cnt; 3654 } 3655 3656 /** 3657 * ecore_get_fw_mac_addr - revert the ecore_set_fw_mac_addr(). 3658 * 3659 * @fw_hi: 3660 * @fw_mid: 3661 * @fw_lo: 3662 * @mac: 3663 */ 3664 static inline void ecore_get_fw_mac_addr(uint16_t *fw_hi, uint16_t *fw_mid, 3665 uint16_t *fw_lo, uint8_t *mac) 3666 { 3667 mac[1] = ((uint8_t *)fw_hi)[0]; 3668 mac[0] = ((uint8_t *)fw_hi)[1]; 3669 mac[3] = ((uint8_t *)fw_mid)[0]; 3670 mac[2] = ((uint8_t *)fw_mid)[1]; 3671 mac[5] = ((uint8_t *)fw_lo)[0]; 3672 mac[4] = ((uint8_t *)fw_lo)[1]; 3673 } 3674 3675 /** 3676 * ecore_mcast_refresh_registry_e1 - 3677 * 3678 * @sc: device handle 3679 * @cnt: 3680 * 3681 * Check the ramrod data first entry flag to see if it's a DELETE or ADD command 3682 * and update the registry correspondingly: if ADD - allocate a memory and add 3683 * the entries to the registry (list), if DELETE - clear the registry and free 3684 * the memory. 3685 */ 3686 static inline int ecore_mcast_refresh_registry_e1(struct bxe_softc *sc, 3687 struct ecore_mcast_obj *o) 3688 { 3689 struct ecore_raw_obj *raw = &o->raw; 3690 struct ecore_mcast_mac_elem *elem; 3691 struct mac_configuration_cmd *data = 3692 (struct mac_configuration_cmd *)(raw->rdata); 3693 3694 /* If first entry contains a SET bit - the command was ADD, 3695 * otherwise - DEL_ALL 3696 */ 3697 if (ECORE_GET_FLAG(data->config_table[0].flags, 3698 MAC_CONFIGURATION_ENTRY_ACTION_TYPE)) { 3699 int i, len = data->hdr.length; 3700 3701 /* Break if it was a RESTORE command */ 3702 if (!ECORE_LIST_IS_EMPTY(&o->registry.exact_match.macs)) 3703 return ECORE_SUCCESS; 3704 3705 elem = ECORE_CALLOC(len, sizeof(*elem), GFP_ATOMIC, sc); 3706 if (!elem) { 3707 ECORE_ERR("Failed to allocate registry memory\n"); 3708 return ECORE_NOMEM; 3709 } 3710 3711 for (i = 0; i < len; i++, elem++) { 3712 ecore_get_fw_mac_addr( 3713 &data->config_table[i].msb_mac_addr, 3714 &data->config_table[i].middle_mac_addr, 3715 &data->config_table[i].lsb_mac_addr, 3716 elem->mac); 3717 ECORE_MSG(sc, "Adding registry entry for [%02x:%02x:%02x:%02x:%02x:%02x]\n", 3718 elem->mac[0], elem->mac[1], elem->mac[2], elem->mac[3], elem->mac[4], elem->mac[5]); 3719 ECORE_LIST_PUSH_TAIL(&elem->link, 3720 &o->registry.exact_match.macs); 3721 } 3722 } else { 3723 elem = ECORE_LIST_FIRST_ENTRY(&o->registry.exact_match.macs, 3724 struct ecore_mcast_mac_elem, 3725 link); 3726 ECORE_MSG(sc, "Deleting a registry\n"); 3727 ECORE_FREE(sc, elem, sizeof(*elem)); 3728 ECORE_LIST_INIT(&o->registry.exact_match.macs); 3729 } 3730 3731 return ECORE_SUCCESS; 3732 } 3733 3734 static int ecore_mcast_setup_e1(struct bxe_softc *sc, 3735 struct ecore_mcast_ramrod_params *p, 3736 enum ecore_mcast_cmd cmd) 3737 { 3738 struct ecore_mcast_obj *o = p->mcast_obj; 3739 struct ecore_raw_obj *raw = &o->raw; 3740 struct mac_configuration_cmd *data = 3741 (struct mac_configuration_cmd *)(raw->rdata); 3742 int cnt = 0, i, rc; 3743 3744 /* Reset the ramrod data buffer */ 3745 ECORE_MEMSET(data, 0, sizeof(*data)); 3746 3747 /* First set all entries as invalid */ 3748 for (i = 0; i < o->max_cmd_len ; i++) 3749 ECORE_SET_FLAG(data->config_table[i].flags, 3750 MAC_CONFIGURATION_ENTRY_ACTION_TYPE, 3751 T_ETH_MAC_COMMAND_INVALIDATE); 3752 3753 /* Handle pending commands first */ 3754 cnt = ecore_mcast_handle_pending_cmds_e1(sc, p); 3755 3756 /* If there are no more pending commands - clear SCHEDULED state */ 3757 if (ECORE_LIST_IS_EMPTY(&o->pending_cmds_head)) 3758 o->clear_sched(o); 3759 3760 /* The below may be TRUE iff there were no pending commands */ 3761 if (!cnt) 3762 cnt = ecore_mcast_handle_current_cmd(sc, p, cmd, 0); 3763 3764 /* For 57710 every command has o->max_cmd_len length to ensure that 3765 * commands are done one at a time. 3766 */ 3767 o->total_pending_num -= o->max_cmd_len; 3768 3769 /* send a ramrod */ 3770 3771 ECORE_DBG_BREAK_IF(cnt > o->max_cmd_len); 3772 3773 /* Set ramrod header (in particular, a number of entries to update) */ 3774 ecore_mcast_set_rdata_hdr_e1(sc, p, (uint8_t)cnt); 3775 3776 /* update a registry: we need the registry contents to be always up 3777 * to date in order to be able to execute a RESTORE opcode. Here 3778 * we use the fact that for 57710 we sent one command at a time 3779 * hence we may take the registry update out of the command handling 3780 * and do it in a simpler way here. 3781 */ 3782 rc = ecore_mcast_refresh_registry_e1(sc, o); 3783 if (rc) 3784 return rc; 3785 3786 /* If CLEAR_ONLY was requested - don't send a ramrod and clear 3787 * RAMROD_PENDING status immediately. 3788 */ 3789 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) { 3790 raw->clear_pending(raw); 3791 return ECORE_SUCCESS; 3792 } else { 3793 /* No need for an explicit memory barrier here as long we would 3794 * need to ensure the ordering of writing to the SPQ element 3795 * and updating of the SPQ producer which involves a memory 3796 * read and we will have to put a full memory barrier there 3797 * (inside ecore_sp_post()). 3798 */ 3799 3800 /* Send a ramrod */ 3801 rc = ecore_sp_post( sc, 3802 RAMROD_CMD_ID_ETH_SET_MAC, 3803 raw->cid, 3804 raw->rdata_mapping, 3805 ETH_CONNECTION_TYPE); 3806 if (rc) 3807 return rc; 3808 3809 /* Ramrod completion is pending */ 3810 return ECORE_PENDING; 3811 } 3812 } 3813 3814 static int ecore_mcast_get_registry_size_exact(struct ecore_mcast_obj *o) 3815 { 3816 return o->registry.exact_match.num_macs_set; 3817 } 3818 3819 static int ecore_mcast_get_registry_size_aprox(struct ecore_mcast_obj *o) 3820 { 3821 return o->registry.aprox_match.num_bins_set; 3822 } 3823 3824 static void ecore_mcast_set_registry_size_exact(struct ecore_mcast_obj *o, 3825 int n) 3826 { 3827 o->registry.exact_match.num_macs_set = n; 3828 } 3829 3830 static void ecore_mcast_set_registry_size_aprox(struct ecore_mcast_obj *o, 3831 int n) 3832 { 3833 o->registry.aprox_match.num_bins_set = n; 3834 } 3835 3836 int ecore_config_mcast(struct bxe_softc *sc, 3837 struct ecore_mcast_ramrod_params *p, 3838 enum ecore_mcast_cmd cmd) 3839 { 3840 struct ecore_mcast_obj *o = p->mcast_obj; 3841 struct ecore_raw_obj *r = &o->raw; 3842 int rc = 0, old_reg_size; 3843 3844 /* This is needed to recover number of currently configured mcast macs 3845 * in case of failure. 3846 */ 3847 old_reg_size = o->get_registry_size(o); 3848 3849 /* Do some calculations and checks */ 3850 rc = o->validate(sc, p, cmd); 3851 if (rc) 3852 return rc; 3853 3854 /* Return if there is no work to do */ 3855 if ((!p->mcast_list_len) && (!o->check_sched(o))) 3856 return ECORE_SUCCESS; 3857 3858 ECORE_MSG(sc, "o->total_pending_num=%d p->mcast_list_len=%d o->max_cmd_len=%d\n", 3859 o->total_pending_num, p->mcast_list_len, o->max_cmd_len); 3860 3861 /* Enqueue the current command to the pending list if we can't complete 3862 * it in the current iteration 3863 */ 3864 if (r->check_pending(r) || 3865 ((o->max_cmd_len > 0) && (o->total_pending_num > o->max_cmd_len))) { 3866 rc = o->enqueue_cmd(sc, p->mcast_obj, p, cmd); 3867 if (rc < 0) 3868 goto error_exit1; 3869 3870 /* As long as the current command is in a command list we 3871 * don't need to handle it separately. 3872 */ 3873 p->mcast_list_len = 0; 3874 } 3875 3876 if (!r->check_pending(r)) { 3877 3878 /* Set 'pending' state */ 3879 r->set_pending(r); 3880 3881 /* Configure the new classification in the chip */ 3882 rc = o->config_mcast(sc, p, cmd); 3883 if (rc < 0) 3884 goto error_exit2; 3885 3886 /* Wait for a ramrod completion if was requested */ 3887 if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags)) 3888 rc = o->wait_comp(sc, o); 3889 } 3890 3891 return rc; 3892 3893 error_exit2: 3894 r->clear_pending(r); 3895 3896 error_exit1: 3897 o->revert(sc, p, old_reg_size); 3898 3899 return rc; 3900 } 3901 3902 static void ecore_mcast_clear_sched(struct ecore_mcast_obj *o) 3903 { 3904 ECORE_SMP_MB_BEFORE_CLEAR_BIT(); 3905 ECORE_CLEAR_BIT(o->sched_state, o->raw.pstate); 3906 ECORE_SMP_MB_AFTER_CLEAR_BIT(); 3907 } 3908 3909 static void ecore_mcast_set_sched(struct ecore_mcast_obj *o) 3910 { 3911 ECORE_SMP_MB_BEFORE_CLEAR_BIT(); 3912 ECORE_SET_BIT(o->sched_state, o->raw.pstate); 3913 ECORE_SMP_MB_AFTER_CLEAR_BIT(); 3914 } 3915 3916 static bool ecore_mcast_check_sched(struct ecore_mcast_obj *o) 3917 { 3918 return !!ECORE_TEST_BIT(o->sched_state, o->raw.pstate); 3919 } 3920 3921 static bool ecore_mcast_check_pending(struct ecore_mcast_obj *o) 3922 { 3923 return o->raw.check_pending(&o->raw) || o->check_sched(o); 3924 } 3925 3926 void ecore_init_mcast_obj(struct bxe_softc *sc, 3927 struct ecore_mcast_obj *mcast_obj, 3928 uint8_t mcast_cl_id, uint32_t mcast_cid, uint8_t func_id, 3929 uint8_t engine_id, void *rdata, ecore_dma_addr_t rdata_mapping, 3930 int state, unsigned long *pstate, ecore_obj_type type) 3931 { 3932 ECORE_MEMSET(mcast_obj, 0, sizeof(*mcast_obj)); 3933 3934 ecore_init_raw_obj(&mcast_obj->raw, mcast_cl_id, mcast_cid, func_id, 3935 rdata, rdata_mapping, state, pstate, type); 3936 3937 mcast_obj->engine_id = engine_id; 3938 3939 ECORE_LIST_INIT(&mcast_obj->pending_cmds_head); 3940 3941 mcast_obj->sched_state = ECORE_FILTER_MCAST_SCHED; 3942 mcast_obj->check_sched = ecore_mcast_check_sched; 3943 mcast_obj->set_sched = ecore_mcast_set_sched; 3944 mcast_obj->clear_sched = ecore_mcast_clear_sched; 3945 3946 if (CHIP_IS_E1(sc)) { 3947 mcast_obj->config_mcast = ecore_mcast_setup_e1; 3948 mcast_obj->enqueue_cmd = ecore_mcast_enqueue_cmd; 3949 mcast_obj->hdl_restore = 3950 ecore_mcast_handle_restore_cmd_e1; 3951 mcast_obj->check_pending = ecore_mcast_check_pending; 3952 3953 if (CHIP_REV_IS_SLOW(sc)) 3954 mcast_obj->max_cmd_len = ECORE_MAX_EMUL_MULTI; 3955 else 3956 mcast_obj->max_cmd_len = ECORE_MAX_MULTICAST; 3957 3958 mcast_obj->wait_comp = ecore_mcast_wait; 3959 mcast_obj->set_one_rule = ecore_mcast_set_one_rule_e1; 3960 mcast_obj->validate = ecore_mcast_validate_e1; 3961 mcast_obj->revert = ecore_mcast_revert_e1; 3962 mcast_obj->get_registry_size = 3963 ecore_mcast_get_registry_size_exact; 3964 mcast_obj->set_registry_size = 3965 ecore_mcast_set_registry_size_exact; 3966 3967 /* 57710 is the only chip that uses the exact match for mcast 3968 * at the moment. 3969 */ 3970 ECORE_LIST_INIT(&mcast_obj->registry.exact_match.macs); 3971 3972 } else if (CHIP_IS_E1H(sc)) { 3973 mcast_obj->config_mcast = ecore_mcast_setup_e1h; 3974 mcast_obj->enqueue_cmd = NULL; 3975 mcast_obj->hdl_restore = NULL; 3976 mcast_obj->check_pending = ecore_mcast_check_pending; 3977 3978 /* 57711 doesn't send a ramrod, so it has unlimited credit 3979 * for one command. 3980 */ 3981 mcast_obj->max_cmd_len = -1; 3982 mcast_obj->wait_comp = ecore_mcast_wait; 3983 mcast_obj->set_one_rule = NULL; 3984 mcast_obj->validate = ecore_mcast_validate_e1h; 3985 mcast_obj->revert = ecore_mcast_revert_e1h; 3986 mcast_obj->get_registry_size = 3987 ecore_mcast_get_registry_size_aprox; 3988 mcast_obj->set_registry_size = 3989 ecore_mcast_set_registry_size_aprox; 3990 } else { 3991 mcast_obj->config_mcast = ecore_mcast_setup_e2; 3992 mcast_obj->enqueue_cmd = ecore_mcast_enqueue_cmd; 3993 mcast_obj->hdl_restore = 3994 ecore_mcast_handle_restore_cmd_e2; 3995 mcast_obj->check_pending = ecore_mcast_check_pending; 3996 /* TODO: There should be a proper HSI define for this number!!! 3997 */ 3998 mcast_obj->max_cmd_len = 16; 3999 mcast_obj->wait_comp = ecore_mcast_wait; 4000 mcast_obj->set_one_rule = ecore_mcast_set_one_rule_e2; 4001 mcast_obj->validate = ecore_mcast_validate_e2; 4002 mcast_obj->revert = ecore_mcast_revert_e2; 4003 mcast_obj->get_registry_size = 4004 ecore_mcast_get_registry_size_aprox; 4005 mcast_obj->set_registry_size = 4006 ecore_mcast_set_registry_size_aprox; 4007 } 4008 } 4009 4010 /*************************** Credit handling **********************************/ 4011 4012 /** 4013 * atomic_add_ifless - add if the result is less than a given value. 4014 * 4015 * @v: pointer of type ecore_atomic_t 4016 * @a: the amount to add to v... 4017 * @u: ...if (v + a) is less than u. 4018 * 4019 * returns TRUE if (v + a) was less than u, and FALSE otherwise. 4020 * 4021 */ 4022 static inline bool __atomic_add_ifless(ecore_atomic_t *v, int a, int u) 4023 { 4024 int c, old; 4025 4026 c = ECORE_ATOMIC_READ(v); 4027 for (;;) { 4028 if (ECORE_UNLIKELY(c + a >= u)) 4029 return FALSE; 4030 4031 old = ECORE_ATOMIC_CMPXCHG((v), c, c + a); 4032 if (ECORE_LIKELY(old == c)) 4033 break; 4034 c = old; 4035 } 4036 4037 return TRUE; 4038 } 4039 4040 /** 4041 * atomic_dec_ifmoe - dec if the result is more or equal than a given value. 4042 * 4043 * @v: pointer of type ecore_atomic_t 4044 * @a: the amount to dec from v... 4045 * @u: ...if (v - a) is more or equal than u. 4046 * 4047 * returns TRUE if (v - a) was more or equal than u, and FALSE 4048 * otherwise. 4049 */ 4050 static inline bool __atomic_dec_ifmoe(ecore_atomic_t *v, int a, int u) 4051 { 4052 int c, old; 4053 4054 c = ECORE_ATOMIC_READ(v); 4055 for (;;) { 4056 if (ECORE_UNLIKELY(c - a < u)) 4057 return FALSE; 4058 4059 old = ECORE_ATOMIC_CMPXCHG((v), c, c - a); 4060 if (ECORE_LIKELY(old == c)) 4061 break; 4062 c = old; 4063 } 4064 4065 return TRUE; 4066 } 4067 4068 static bool ecore_credit_pool_get(struct ecore_credit_pool_obj *o, int cnt) 4069 { 4070 bool rc; 4071 4072 ECORE_SMP_MB(); 4073 rc = __atomic_dec_ifmoe(&o->credit, cnt, 0); 4074 ECORE_SMP_MB(); 4075 4076 return rc; 4077 } 4078 4079 static bool ecore_credit_pool_put(struct ecore_credit_pool_obj *o, int cnt) 4080 { 4081 bool rc; 4082 4083 ECORE_SMP_MB(); 4084 4085 /* Don't let to refill if credit + cnt > pool_sz */ 4086 rc = __atomic_add_ifless(&o->credit, cnt, o->pool_sz + 1); 4087 4088 ECORE_SMP_MB(); 4089 4090 return rc; 4091 } 4092 4093 static int ecore_credit_pool_check(struct ecore_credit_pool_obj *o) 4094 { 4095 int cur_credit; 4096 4097 ECORE_SMP_MB(); 4098 cur_credit = ECORE_ATOMIC_READ(&o->credit); 4099 4100 return cur_credit; 4101 } 4102 4103 static bool ecore_credit_pool_always_TRUE(struct ecore_credit_pool_obj *o, 4104 int cnt) 4105 { 4106 return TRUE; 4107 } 4108 4109 static bool ecore_credit_pool_get_entry( 4110 struct ecore_credit_pool_obj *o, 4111 int *offset) 4112 { 4113 int idx, vec, i; 4114 4115 *offset = -1; 4116 4117 /* Find "internal cam-offset" then add to base for this object... */ 4118 for (vec = 0; vec < ECORE_POOL_VEC_SIZE; vec++) { 4119 4120 /* Skip the current vector if there are no free entries in it */ 4121 if (!o->pool_mirror[vec]) 4122 continue; 4123 4124 /* If we've got here we are going to find a free entry */ 4125 for (idx = vec * BIT_VEC64_ELEM_SZ, i = 0; 4126 i < BIT_VEC64_ELEM_SZ; idx++, i++) 4127 4128 if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) { 4129 /* Got one!! */ 4130 BIT_VEC64_CLEAR_BIT(o->pool_mirror, idx); 4131 *offset = o->base_pool_offset + idx; 4132 return TRUE; 4133 } 4134 } 4135 4136 return FALSE; 4137 } 4138 4139 static bool ecore_credit_pool_put_entry( 4140 struct ecore_credit_pool_obj *o, 4141 int offset) 4142 { 4143 if (offset < o->base_pool_offset) 4144 return FALSE; 4145 4146 offset -= o->base_pool_offset; 4147 4148 if (offset >= o->pool_sz) 4149 return FALSE; 4150 4151 /* Return the entry to the pool */ 4152 BIT_VEC64_SET_BIT(o->pool_mirror, offset); 4153 4154 return TRUE; 4155 } 4156 4157 static bool ecore_credit_pool_put_entry_always_TRUE( 4158 struct ecore_credit_pool_obj *o, 4159 int offset) 4160 { 4161 return TRUE; 4162 } 4163 4164 static bool ecore_credit_pool_get_entry_always_TRUE( 4165 struct ecore_credit_pool_obj *o, 4166 int *offset) 4167 { 4168 *offset = -1; 4169 return TRUE; 4170 } 4171 /** 4172 * ecore_init_credit_pool - initialize credit pool internals. 4173 * 4174 * @p: 4175 * @base: Base entry in the CAM to use. 4176 * @credit: pool size. 4177 * 4178 * If base is negative no CAM entries handling will be performed. 4179 * If credit is negative pool operations will always succeed (unlimited pool). 4180 * 4181 */ 4182 static inline void ecore_init_credit_pool(struct ecore_credit_pool_obj *p, 4183 int base, int credit) 4184 { 4185 /* Zero the object first */ 4186 ECORE_MEMSET(p, 0, sizeof(*p)); 4187 4188 /* Set the table to all 1s */ 4189 ECORE_MEMSET(&p->pool_mirror, 0xff, sizeof(p->pool_mirror)); 4190 4191 /* Init a pool as full */ 4192 ECORE_ATOMIC_SET(&p->credit, credit); 4193 4194 /* The total poll size */ 4195 p->pool_sz = credit; 4196 4197 p->base_pool_offset = base; 4198 4199 /* Commit the change */ 4200 ECORE_SMP_MB(); 4201 4202 p->check = ecore_credit_pool_check; 4203 4204 /* if pool credit is negative - disable the checks */ 4205 if (credit >= 0) { 4206 p->put = ecore_credit_pool_put; 4207 p->get = ecore_credit_pool_get; 4208 p->put_entry = ecore_credit_pool_put_entry; 4209 p->get_entry = ecore_credit_pool_get_entry; 4210 } else { 4211 p->put = ecore_credit_pool_always_TRUE; 4212 p->get = ecore_credit_pool_always_TRUE; 4213 p->put_entry = ecore_credit_pool_put_entry_always_TRUE; 4214 p->get_entry = ecore_credit_pool_get_entry_always_TRUE; 4215 } 4216 4217 /* If base is negative - disable entries handling */ 4218 if (base < 0) { 4219 p->put_entry = ecore_credit_pool_put_entry_always_TRUE; 4220 p->get_entry = ecore_credit_pool_get_entry_always_TRUE; 4221 } 4222 } 4223 4224 void ecore_init_mac_credit_pool(struct bxe_softc *sc, 4225 struct ecore_credit_pool_obj *p, uint8_t func_id, 4226 uint8_t func_num) 4227 { 4228 /* TODO: this will be defined in consts as well... */ 4229 #define ECORE_CAM_SIZE_EMUL 5 4230 4231 int cam_sz; 4232 4233 if (CHIP_IS_E1(sc)) { 4234 /* In E1, Multicast is saved in cam... */ 4235 if (!CHIP_REV_IS_SLOW(sc)) 4236 cam_sz = (MAX_MAC_CREDIT_E1 / 2) - ECORE_MAX_MULTICAST; 4237 else 4238 cam_sz = ECORE_CAM_SIZE_EMUL - ECORE_MAX_EMUL_MULTI; 4239 4240 ecore_init_credit_pool(p, func_id * cam_sz, cam_sz); 4241 4242 } else if (CHIP_IS_E1H(sc)) { 4243 /* CAM credit is equally divided between all active functions 4244 * on the PORT!. 4245 */ 4246 if ((func_num > 0)) { 4247 if (!CHIP_REV_IS_SLOW(sc)) 4248 cam_sz = (MAX_MAC_CREDIT_E1H / (2*func_num)); 4249 else 4250 cam_sz = ECORE_CAM_SIZE_EMUL; 4251 ecore_init_credit_pool(p, func_id * cam_sz, cam_sz); 4252 } else { 4253 /* this should never happen! Block MAC operations. */ 4254 ecore_init_credit_pool(p, 0, 0); 4255 } 4256 4257 } else { 4258 4259 /* 4260 * CAM credit is equaly divided between all active functions 4261 * on the PATH. 4262 */ 4263 if ((func_num > 1)) { 4264 if (!CHIP_REV_IS_SLOW(sc)) 4265 cam_sz = (MAX_MAC_CREDIT_E2 4266 - GET_NUM_VFS_PER_PATH(sc)) 4267 / func_num 4268 + GET_NUM_VFS_PER_PF(sc); 4269 else 4270 cam_sz = ECORE_CAM_SIZE_EMUL; 4271 4272 /* No need for CAM entries handling for 57712 and 4273 * newer. 4274 */ 4275 ecore_init_credit_pool(p, -1, cam_sz); 4276 } else if (func_num == 1) { 4277 if (!CHIP_REV_IS_SLOW(sc)) 4278 cam_sz = MAX_MAC_CREDIT_E2; 4279 else 4280 cam_sz = ECORE_CAM_SIZE_EMUL; 4281 4282 /* No need for CAM entries handling for 57712 and 4283 * newer. 4284 */ 4285 ecore_init_credit_pool(p, -1, cam_sz); 4286 } else { 4287 /* this should never happen! Block MAC operations. */ 4288 ecore_init_credit_pool(p, 0, 0); 4289 } 4290 } 4291 } 4292 4293 void ecore_init_vlan_credit_pool(struct bxe_softc *sc, 4294 struct ecore_credit_pool_obj *p, 4295 uint8_t func_id, 4296 uint8_t func_num) 4297 { 4298 if (CHIP_IS_E1x(sc)) { 4299 /* There is no VLAN credit in HW on 57710 and 57711 only 4300 * MAC / MAC-VLAN can be set 4301 */ 4302 ecore_init_credit_pool(p, 0, -1); 4303 } else { 4304 /* CAM credit is equally divided between all active functions 4305 * on the PATH. 4306 */ 4307 if (func_num > 0) { 4308 int credit = MAX_VLAN_CREDIT_E2 / func_num; 4309 ecore_init_credit_pool(p, func_id * credit, credit); 4310 } else 4311 /* this should never happen! Block VLAN operations. */ 4312 ecore_init_credit_pool(p, 0, 0); 4313 } 4314 } 4315 4316 /****************** RSS Configuration ******************/ 4317 4318 /** 4319 * ecore_setup_rss - configure RSS 4320 * 4321 * @sc: device handle 4322 * @p: rss configuration 4323 * 4324 * sends on UPDATE ramrod for that matter. 4325 */ 4326 static int ecore_setup_rss(struct bxe_softc *sc, 4327 struct ecore_config_rss_params *p) 4328 { 4329 struct ecore_rss_config_obj *o = p->rss_obj; 4330 struct ecore_raw_obj *r = &o->raw; 4331 struct eth_rss_update_ramrod_data *data = 4332 (struct eth_rss_update_ramrod_data *)(r->rdata); 4333 uint8_t rss_mode = 0; 4334 int rc; 4335 4336 ECORE_MEMSET(data, 0, sizeof(*data)); 4337 4338 ECORE_MSG(sc, "Configuring RSS\n"); 4339 4340 /* Set an echo field */ 4341 data->echo = ECORE_CPU_TO_LE32((r->cid & ECORE_SWCID_MASK) | 4342 (r->state << ECORE_SWCID_SHIFT)); 4343 4344 /* RSS mode */ 4345 if (ECORE_TEST_BIT(ECORE_RSS_MODE_DISABLED, &p->rss_flags)) 4346 rss_mode = ETH_RSS_MODE_DISABLED; 4347 else if (ECORE_TEST_BIT(ECORE_RSS_MODE_REGULAR, &p->rss_flags)) 4348 rss_mode = ETH_RSS_MODE_REGULAR; 4349 #if defined(__VMKLNX__) && (VMWARE_ESX_DDK_VERSION < 55000) /* ! BNX2X_UPSTREAM */ 4350 else if (ECORE_TEST_BIT(ECORE_RSS_MODE_ESX51, &p->rss_flags)) 4351 rss_mode = ETH_RSS_MODE_ESX51; 4352 #endif 4353 4354 data->rss_mode = rss_mode; 4355 4356 ECORE_MSG(sc, "rss_mode=%d\n", rss_mode); 4357 4358 /* RSS capabilities */ 4359 if (ECORE_TEST_BIT(ECORE_RSS_IPV4, &p->rss_flags)) 4360 data->capabilities |= 4361 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY; 4362 4363 if (ECORE_TEST_BIT(ECORE_RSS_IPV4_TCP, &p->rss_flags)) 4364 data->capabilities |= 4365 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY; 4366 4367 if (ECORE_TEST_BIT(ECORE_RSS_IPV4_UDP, &p->rss_flags)) 4368 data->capabilities |= 4369 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY; 4370 4371 if (ECORE_TEST_BIT(ECORE_RSS_IPV6, &p->rss_flags)) 4372 data->capabilities |= 4373 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY; 4374 4375 if (ECORE_TEST_BIT(ECORE_RSS_IPV6_TCP, &p->rss_flags)) 4376 data->capabilities |= 4377 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY; 4378 4379 if (ECORE_TEST_BIT(ECORE_RSS_IPV6_UDP, &p->rss_flags)) 4380 data->capabilities |= 4381 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY; 4382 4383 if (ECORE_TEST_BIT(ECORE_RSS_TUNNELING, &p->rss_flags)) { 4384 data->udp_4tuple_dst_port_mask = ECORE_CPU_TO_LE16(p->tunnel_mask); 4385 data->udp_4tuple_dst_port_value = 4386 ECORE_CPU_TO_LE16(p->tunnel_value); 4387 } 4388 4389 /* Hashing mask */ 4390 data->rss_result_mask = p->rss_result_mask; 4391 4392 /* RSS engine ID */ 4393 data->rss_engine_id = o->engine_id; 4394 4395 ECORE_MSG(sc, "rss_engine_id=%d\n", data->rss_engine_id); 4396 4397 /* Indirection table */ 4398 ECORE_MEMCPY(data->indirection_table, p->ind_table, 4399 T_ETH_INDIRECTION_TABLE_SIZE); 4400 4401 /* Remember the last configuration */ 4402 ECORE_MEMCPY(o->ind_table, p->ind_table, T_ETH_INDIRECTION_TABLE_SIZE); 4403 4404 4405 /* RSS keys */ 4406 if (ECORE_TEST_BIT(ECORE_RSS_SET_SRCH, &p->rss_flags)) { 4407 ECORE_MEMCPY(&data->rss_key[0], &p->rss_key[0], 4408 sizeof(data->rss_key)); 4409 data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY; 4410 } 4411 4412 /* No need for an explicit memory barrier here as long we would 4413 * need to ensure the ordering of writing to the SPQ element 4414 * and updating of the SPQ producer which involves a memory 4415 * read and we will have to put a full memory barrier there 4416 * (inside ecore_sp_post()). 4417 */ 4418 4419 /* Send a ramrod */ 4420 rc = ecore_sp_post(sc, 4421 RAMROD_CMD_ID_ETH_RSS_UPDATE, 4422 r->cid, 4423 r->rdata_mapping, 4424 ETH_CONNECTION_TYPE); 4425 4426 if (rc < 0) 4427 return rc; 4428 4429 return ECORE_PENDING; 4430 } 4431 4432 void ecore_get_rss_ind_table(struct ecore_rss_config_obj *rss_obj, 4433 uint8_t *ind_table) 4434 { 4435 ECORE_MEMCPY(ind_table, rss_obj->ind_table, sizeof(rss_obj->ind_table)); 4436 } 4437 4438 int ecore_config_rss(struct bxe_softc *sc, 4439 struct ecore_config_rss_params *p) 4440 { 4441 int rc; 4442 struct ecore_rss_config_obj *o = p->rss_obj; 4443 struct ecore_raw_obj *r = &o->raw; 4444 4445 /* Do nothing if only driver cleanup was requested */ 4446 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) { 4447 ECORE_MSG(sc, "Not configuring RSS ramrod_flags=%lx\n", 4448 p->ramrod_flags); 4449 return ECORE_SUCCESS; 4450 } 4451 4452 r->set_pending(r); 4453 4454 rc = o->config_rss(sc, p); 4455 if (rc < 0) { 4456 r->clear_pending(r); 4457 return rc; 4458 } 4459 4460 if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags)) 4461 rc = r->wait_comp(sc, r); 4462 4463 return rc; 4464 } 4465 4466 void ecore_init_rss_config_obj(struct bxe_softc *sc, 4467 struct ecore_rss_config_obj *rss_obj, 4468 uint8_t cl_id, uint32_t cid, uint8_t func_id, uint8_t engine_id, 4469 void *rdata, ecore_dma_addr_t rdata_mapping, 4470 int state, unsigned long *pstate, 4471 ecore_obj_type type) 4472 { 4473 ecore_init_raw_obj(&rss_obj->raw, cl_id, cid, func_id, rdata, 4474 rdata_mapping, state, pstate, type); 4475 4476 rss_obj->engine_id = engine_id; 4477 rss_obj->config_rss = ecore_setup_rss; 4478 } 4479 4480 int validate_vlan_mac(struct bxe_softc *sc, 4481 struct ecore_vlan_mac_obj *vlan_mac) 4482 { 4483 if (!vlan_mac->get_n_elements) { 4484 ECORE_ERR("vlan mac object was not intialized\n"); 4485 return ECORE_INVAL; 4486 } 4487 return 0; 4488 } 4489 4490 /********************** Queue state object ***********************************/ 4491 4492 /** 4493 * ecore_queue_state_change - perform Queue state change transition 4494 * 4495 * @sc: device handle 4496 * @params: parameters to perform the transition 4497 * 4498 * returns 0 in case of successfully completed transition, negative error 4499 * code in case of failure, positive (EBUSY) value if there is a completion 4500 * to that is still pending (possible only if RAMROD_COMP_WAIT is 4501 * not set in params->ramrod_flags for asynchronous commands). 4502 * 4503 */ 4504 int ecore_queue_state_change(struct bxe_softc *sc, 4505 struct ecore_queue_state_params *params) 4506 { 4507 struct ecore_queue_sp_obj *o = params->q_obj; 4508 int rc, pending_bit; 4509 unsigned long *pending = &o->pending; 4510 4511 /* Check that the requested transition is legal */ 4512 rc = o->check_transition(sc, o, params); 4513 if (rc) { 4514 ECORE_ERR("check transition returned an error. rc %d\n", rc); 4515 return ECORE_INVAL; 4516 } 4517 4518 /* Set "pending" bit */ 4519 ECORE_MSG(sc, "pending bit was=%lx\n", o->pending); 4520 pending_bit = o->set_pending(o, params); 4521 ECORE_MSG(sc, "pending bit now=%lx\n", o->pending); 4522 4523 /* Don't send a command if only driver cleanup was requested */ 4524 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) 4525 o->complete_cmd(sc, o, pending_bit); 4526 else { 4527 /* Send a ramrod */ 4528 rc = o->send_cmd(sc, params); 4529 if (rc) { 4530 o->next_state = ECORE_Q_STATE_MAX; 4531 ECORE_CLEAR_BIT(pending_bit, pending); 4532 ECORE_SMP_MB_AFTER_CLEAR_BIT(); 4533 return rc; 4534 } 4535 4536 if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, ¶ms->ramrod_flags)) { 4537 rc = o->wait_comp(sc, o, pending_bit); 4538 if (rc) 4539 return rc; 4540 4541 return ECORE_SUCCESS; 4542 } 4543 } 4544 4545 return ECORE_RET_PENDING(pending_bit, pending); 4546 } 4547 4548 static int ecore_queue_set_pending(struct ecore_queue_sp_obj *obj, 4549 struct ecore_queue_state_params *params) 4550 { 4551 enum ecore_queue_cmd cmd = params->cmd, bit; 4552 4553 /* ACTIVATE and DEACTIVATE commands are implemented on top of 4554 * UPDATE command. 4555 */ 4556 if ((cmd == ECORE_Q_CMD_ACTIVATE) || 4557 (cmd == ECORE_Q_CMD_DEACTIVATE)) 4558 bit = ECORE_Q_CMD_UPDATE; 4559 else 4560 bit = cmd; 4561 4562 ECORE_SET_BIT(bit, &obj->pending); 4563 return bit; 4564 } 4565 4566 static int ecore_queue_wait_comp(struct bxe_softc *sc, 4567 struct ecore_queue_sp_obj *o, 4568 enum ecore_queue_cmd cmd) 4569 { 4570 return ecore_state_wait(sc, cmd, &o->pending); 4571 } 4572 4573 /** 4574 * ecore_queue_comp_cmd - complete the state change command. 4575 * 4576 * @sc: device handle 4577 * @o: 4578 * @cmd: 4579 * 4580 * Checks that the arrived completion is expected. 4581 */ 4582 static int ecore_queue_comp_cmd(struct bxe_softc *sc, 4583 struct ecore_queue_sp_obj *o, 4584 enum ecore_queue_cmd cmd) 4585 { 4586 unsigned long cur_pending = o->pending; 4587 4588 if (!ECORE_TEST_AND_CLEAR_BIT(cmd, &cur_pending)) { 4589 ECORE_ERR("Bad MC reply %d for queue %d in state %d pending 0x%lx, next_state %d\n", 4590 cmd, o->cids[ECORE_PRIMARY_CID_INDEX], 4591 o->state, cur_pending, o->next_state); 4592 return ECORE_INVAL; 4593 } 4594 4595 if (o->next_tx_only >= o->max_cos) 4596 /* >= because tx only must always be smaller than cos since the 4597 * primary connection supports COS 0 4598 */ 4599 ECORE_ERR("illegal value for next tx_only: %d. max cos was %d", 4600 o->next_tx_only, o->max_cos); 4601 4602 ECORE_MSG(sc, 4603 "Completing command %d for queue %d, setting state to %d\n", 4604 cmd, o->cids[ECORE_PRIMARY_CID_INDEX], o->next_state); 4605 4606 if (o->next_tx_only) /* print num tx-only if any exist */ 4607 ECORE_MSG(sc, "primary cid %d: num tx-only cons %d\n", 4608 o->cids[ECORE_PRIMARY_CID_INDEX], o->next_tx_only); 4609 4610 o->state = o->next_state; 4611 o->num_tx_only = o->next_tx_only; 4612 o->next_state = ECORE_Q_STATE_MAX; 4613 4614 /* It's important that o->state and o->next_state are 4615 * updated before o->pending. 4616 */ 4617 wmb(); 4618 4619 ECORE_CLEAR_BIT(cmd, &o->pending); 4620 ECORE_SMP_MB_AFTER_CLEAR_BIT(); 4621 4622 return ECORE_SUCCESS; 4623 } 4624 4625 static void ecore_q_fill_setup_data_e2(struct bxe_softc *sc, 4626 struct ecore_queue_state_params *cmd_params, 4627 struct client_init_ramrod_data *data) 4628 { 4629 struct ecore_queue_setup_params *params = &cmd_params->params.setup; 4630 4631 /* Rx data */ 4632 4633 /* IPv6 TPA supported for E2 and above only */ 4634 data->rx.tpa_en |= ECORE_TEST_BIT(ECORE_Q_FLG_TPA_IPV6, 4635 ¶ms->flags) * 4636 CLIENT_INIT_RX_DATA_TPA_EN_IPV6; 4637 } 4638 4639 static void ecore_q_fill_init_general_data(struct bxe_softc *sc, 4640 struct ecore_queue_sp_obj *o, 4641 struct ecore_general_setup_params *params, 4642 struct client_init_general_data *gen_data, 4643 unsigned long *flags) 4644 { 4645 gen_data->client_id = o->cl_id; 4646 4647 if (ECORE_TEST_BIT(ECORE_Q_FLG_STATS, flags)) { 4648 gen_data->statistics_counter_id = 4649 params->stat_id; 4650 gen_data->statistics_en_flg = 1; 4651 gen_data->statistics_zero_flg = 4652 ECORE_TEST_BIT(ECORE_Q_FLG_ZERO_STATS, flags); 4653 } else 4654 gen_data->statistics_counter_id = 4655 DISABLE_STATISTIC_COUNTER_ID_VALUE; 4656 4657 gen_data->is_fcoe_flg = ECORE_TEST_BIT(ECORE_Q_FLG_FCOE, 4658 flags); 4659 gen_data->activate_flg = ECORE_TEST_BIT(ECORE_Q_FLG_ACTIVE, 4660 flags); 4661 gen_data->sp_client_id = params->spcl_id; 4662 gen_data->mtu = ECORE_CPU_TO_LE16(params->mtu); 4663 gen_data->func_id = o->func_id; 4664 4665 gen_data->cos = params->cos; 4666 4667 gen_data->traffic_type = 4668 ECORE_TEST_BIT(ECORE_Q_FLG_FCOE, flags) ? 4669 LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW; 4670 4671 ECORE_MSG(sc, "flags: active %d, cos %d, stats en %d\n", 4672 gen_data->activate_flg, gen_data->cos, gen_data->statistics_en_flg); 4673 } 4674 4675 static void ecore_q_fill_init_tx_data(struct ecore_queue_sp_obj *o, 4676 struct ecore_txq_setup_params *params, 4677 struct client_init_tx_data *tx_data, 4678 unsigned long *flags) 4679 { 4680 tx_data->enforce_security_flg = 4681 ECORE_TEST_BIT(ECORE_Q_FLG_TX_SEC, flags); 4682 tx_data->default_vlan = 4683 ECORE_CPU_TO_LE16(params->default_vlan); 4684 tx_data->default_vlan_flg = 4685 ECORE_TEST_BIT(ECORE_Q_FLG_DEF_VLAN, flags); 4686 tx_data->tx_switching_flg = 4687 ECORE_TEST_BIT(ECORE_Q_FLG_TX_SWITCH, flags); 4688 tx_data->anti_spoofing_flg = 4689 ECORE_TEST_BIT(ECORE_Q_FLG_ANTI_SPOOF, flags); 4690 tx_data->force_default_pri_flg = 4691 ECORE_TEST_BIT(ECORE_Q_FLG_FORCE_DEFAULT_PRI, flags); 4692 tx_data->refuse_outband_vlan_flg = 4693 ECORE_TEST_BIT(ECORE_Q_FLG_REFUSE_OUTBAND_VLAN, flags); 4694 tx_data->tunnel_lso_inc_ip_id = 4695 ECORE_TEST_BIT(ECORE_Q_FLG_TUN_INC_INNER_IP_ID, flags); 4696 tx_data->tunnel_non_lso_pcsum_location = 4697 ECORE_TEST_BIT(ECORE_Q_FLG_PCSUM_ON_PKT, flags) ? CSUM_ON_PKT : 4698 CSUM_ON_BD; 4699 4700 tx_data->tx_status_block_id = params->fw_sb_id; 4701 tx_data->tx_sb_index_number = params->sb_cq_index; 4702 tx_data->tss_leading_client_id = params->tss_leading_cl_id; 4703 4704 tx_data->tx_bd_page_base.lo = 4705 ECORE_CPU_TO_LE32(U64_LO(params->dscr_map)); 4706 tx_data->tx_bd_page_base.hi = 4707 ECORE_CPU_TO_LE32(U64_HI(params->dscr_map)); 4708 4709 /* Don't configure any Tx switching mode during queue SETUP */ 4710 tx_data->state = 0; 4711 } 4712 4713 static void ecore_q_fill_init_pause_data(struct ecore_queue_sp_obj *o, 4714 struct rxq_pause_params *params, 4715 struct client_init_rx_data *rx_data) 4716 { 4717 /* flow control data */ 4718 rx_data->cqe_pause_thr_low = ECORE_CPU_TO_LE16(params->rcq_th_lo); 4719 rx_data->cqe_pause_thr_high = ECORE_CPU_TO_LE16(params->rcq_th_hi); 4720 rx_data->bd_pause_thr_low = ECORE_CPU_TO_LE16(params->bd_th_lo); 4721 rx_data->bd_pause_thr_high = ECORE_CPU_TO_LE16(params->bd_th_hi); 4722 rx_data->sge_pause_thr_low = ECORE_CPU_TO_LE16(params->sge_th_lo); 4723 rx_data->sge_pause_thr_high = ECORE_CPU_TO_LE16(params->sge_th_hi); 4724 rx_data->rx_cos_mask = ECORE_CPU_TO_LE16(params->pri_map); 4725 } 4726 4727 static void ecore_q_fill_init_rx_data(struct ecore_queue_sp_obj *o, 4728 struct ecore_rxq_setup_params *params, 4729 struct client_init_rx_data *rx_data, 4730 unsigned long *flags) 4731 { 4732 rx_data->tpa_en = ECORE_TEST_BIT(ECORE_Q_FLG_TPA, flags) * 4733 CLIENT_INIT_RX_DATA_TPA_EN_IPV4; 4734 rx_data->tpa_en |= ECORE_TEST_BIT(ECORE_Q_FLG_TPA_GRO, flags) * 4735 CLIENT_INIT_RX_DATA_TPA_MODE; 4736 rx_data->vmqueue_mode_en_flg = 0; 4737 4738 rx_data->extra_data_over_sgl_en_flg = 4739 ECORE_TEST_BIT(ECORE_Q_FLG_OOO, flags); 4740 rx_data->cache_line_alignment_log_size = 4741 params->cache_line_log; 4742 rx_data->enable_dynamic_hc = 4743 ECORE_TEST_BIT(ECORE_Q_FLG_DHC, flags); 4744 rx_data->max_sges_for_packet = params->max_sges_pkt; 4745 rx_data->client_qzone_id = params->cl_qzone_id; 4746 rx_data->max_agg_size = ECORE_CPU_TO_LE16(params->tpa_agg_sz); 4747 4748 /* Always start in DROP_ALL mode */ 4749 rx_data->state = ECORE_CPU_TO_LE16(CLIENT_INIT_RX_DATA_UCAST_DROP_ALL | 4750 CLIENT_INIT_RX_DATA_MCAST_DROP_ALL); 4751 4752 /* We don't set drop flags */ 4753 rx_data->drop_ip_cs_err_flg = 0; 4754 rx_data->drop_tcp_cs_err_flg = 0; 4755 rx_data->drop_ttl0_flg = 0; 4756 rx_data->drop_udp_cs_err_flg = 0; 4757 rx_data->inner_vlan_removal_enable_flg = 4758 ECORE_TEST_BIT(ECORE_Q_FLG_VLAN, flags); 4759 rx_data->outer_vlan_removal_enable_flg = 4760 ECORE_TEST_BIT(ECORE_Q_FLG_OV, flags); 4761 rx_data->status_block_id = params->fw_sb_id; 4762 rx_data->rx_sb_index_number = params->sb_cq_index; 4763 rx_data->max_tpa_queues = params->max_tpa_queues; 4764 rx_data->max_bytes_on_bd = ECORE_CPU_TO_LE16(params->buf_sz); 4765 rx_data->sge_buff_size = ECORE_CPU_TO_LE16(params->sge_buf_sz); 4766 rx_data->bd_page_base.lo = 4767 ECORE_CPU_TO_LE32(U64_LO(params->dscr_map)); 4768 rx_data->bd_page_base.hi = 4769 ECORE_CPU_TO_LE32(U64_HI(params->dscr_map)); 4770 rx_data->sge_page_base.lo = 4771 ECORE_CPU_TO_LE32(U64_LO(params->sge_map)); 4772 rx_data->sge_page_base.hi = 4773 ECORE_CPU_TO_LE32(U64_HI(params->sge_map)); 4774 rx_data->cqe_page_base.lo = 4775 ECORE_CPU_TO_LE32(U64_LO(params->rcq_map)); 4776 rx_data->cqe_page_base.hi = 4777 ECORE_CPU_TO_LE32(U64_HI(params->rcq_map)); 4778 rx_data->is_leading_rss = ECORE_TEST_BIT(ECORE_Q_FLG_LEADING_RSS, 4779 flags); 4780 4781 if (ECORE_TEST_BIT(ECORE_Q_FLG_MCAST, flags)) { 4782 rx_data->approx_mcast_engine_id = params->mcast_engine_id; 4783 rx_data->is_approx_mcast = 1; 4784 } 4785 4786 rx_data->rss_engine_id = params->rss_engine_id; 4787 4788 /* silent vlan removal */ 4789 rx_data->silent_vlan_removal_flg = 4790 ECORE_TEST_BIT(ECORE_Q_FLG_SILENT_VLAN_REM, flags); 4791 rx_data->silent_vlan_value = 4792 ECORE_CPU_TO_LE16(params->silent_removal_value); 4793 rx_data->silent_vlan_mask = 4794 ECORE_CPU_TO_LE16(params->silent_removal_mask); 4795 } 4796 4797 /* initialize the general, tx and rx parts of a queue object */ 4798 static void ecore_q_fill_setup_data_cmn(struct bxe_softc *sc, 4799 struct ecore_queue_state_params *cmd_params, 4800 struct client_init_ramrod_data *data) 4801 { 4802 ecore_q_fill_init_general_data(sc, cmd_params->q_obj, 4803 &cmd_params->params.setup.gen_params, 4804 &data->general, 4805 &cmd_params->params.setup.flags); 4806 4807 ecore_q_fill_init_tx_data(cmd_params->q_obj, 4808 &cmd_params->params.setup.txq_params, 4809 &data->tx, 4810 &cmd_params->params.setup.flags); 4811 4812 ecore_q_fill_init_rx_data(cmd_params->q_obj, 4813 &cmd_params->params.setup.rxq_params, 4814 &data->rx, 4815 &cmd_params->params.setup.flags); 4816 4817 ecore_q_fill_init_pause_data(cmd_params->q_obj, 4818 &cmd_params->params.setup.pause_params, 4819 &data->rx); 4820 } 4821 4822 /* initialize the general and tx parts of a tx-only queue object */ 4823 static void ecore_q_fill_setup_tx_only(struct bxe_softc *sc, 4824 struct ecore_queue_state_params *cmd_params, 4825 struct tx_queue_init_ramrod_data *data) 4826 { 4827 ecore_q_fill_init_general_data(sc, cmd_params->q_obj, 4828 &cmd_params->params.tx_only.gen_params, 4829 &data->general, 4830 &cmd_params->params.tx_only.flags); 4831 4832 ecore_q_fill_init_tx_data(cmd_params->q_obj, 4833 &cmd_params->params.tx_only.txq_params, 4834 &data->tx, 4835 &cmd_params->params.tx_only.flags); 4836 4837 ECORE_MSG(sc, "cid %d, tx bd page lo %x hi %x", 4838 cmd_params->q_obj->cids[0], 4839 data->tx.tx_bd_page_base.lo, 4840 data->tx.tx_bd_page_base.hi); 4841 } 4842 4843 /** 4844 * ecore_q_init - init HW/FW queue 4845 * 4846 * @sc: device handle 4847 * @params: 4848 * 4849 * HW/FW initial Queue configuration: 4850 * - HC: Rx and Tx 4851 * - CDU context validation 4852 * 4853 */ 4854 static inline int ecore_q_init(struct bxe_softc *sc, 4855 struct ecore_queue_state_params *params) 4856 { 4857 struct ecore_queue_sp_obj *o = params->q_obj; 4858 struct ecore_queue_init_params *init = ¶ms->params.init; 4859 uint16_t hc_usec; 4860 uint8_t cos; 4861 4862 /* Tx HC configuration */ 4863 if (ECORE_TEST_BIT(ECORE_Q_TYPE_HAS_TX, &o->type) && 4864 ECORE_TEST_BIT(ECORE_Q_FLG_HC, &init->tx.flags)) { 4865 hc_usec = init->tx.hc_rate ? 1000000 / init->tx.hc_rate : 0; 4866 4867 ECORE_UPDATE_COALESCE_SB_INDEX(sc, init->tx.fw_sb_id, 4868 init->tx.sb_cq_index, 4869 !ECORE_TEST_BIT(ECORE_Q_FLG_HC_EN, &init->tx.flags), 4870 hc_usec); 4871 } 4872 4873 /* Rx HC configuration */ 4874 if (ECORE_TEST_BIT(ECORE_Q_TYPE_HAS_RX, &o->type) && 4875 ECORE_TEST_BIT(ECORE_Q_FLG_HC, &init->rx.flags)) { 4876 hc_usec = init->rx.hc_rate ? 1000000 / init->rx.hc_rate : 0; 4877 4878 ECORE_UPDATE_COALESCE_SB_INDEX(sc, init->rx.fw_sb_id, 4879 init->rx.sb_cq_index, 4880 !ECORE_TEST_BIT(ECORE_Q_FLG_HC_EN, &init->rx.flags), 4881 hc_usec); 4882 } 4883 4884 /* Set CDU context validation values */ 4885 for (cos = 0; cos < o->max_cos; cos++) { 4886 ECORE_MSG(sc, "setting context validation. cid %d, cos %d\n", 4887 o->cids[cos], cos); 4888 ECORE_MSG(sc, "context pointer %p\n", init->cxts[cos]); 4889 ECORE_SET_CTX_VALIDATION(sc, init->cxts[cos], o->cids[cos]); 4890 } 4891 4892 /* As no ramrod is sent, complete the command immediately */ 4893 o->complete_cmd(sc, o, ECORE_Q_CMD_INIT); 4894 4895 ECORE_MMIOWB(); 4896 ECORE_SMP_MB(); 4897 4898 return ECORE_SUCCESS; 4899 } 4900 4901 static inline int ecore_q_send_setup_e1x(struct bxe_softc *sc, 4902 struct ecore_queue_state_params *params) 4903 { 4904 struct ecore_queue_sp_obj *o = params->q_obj; 4905 struct client_init_ramrod_data *rdata = 4906 (struct client_init_ramrod_data *)o->rdata; 4907 ecore_dma_addr_t data_mapping = o->rdata_mapping; 4908 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP; 4909 4910 /* Clear the ramrod data */ 4911 ECORE_MEMSET(rdata, 0, sizeof(*rdata)); 4912 4913 /* Fill the ramrod data */ 4914 ecore_q_fill_setup_data_cmn(sc, params, rdata); 4915 4916 /* No need for an explicit memory barrier here as long we would 4917 * need to ensure the ordering of writing to the SPQ element 4918 * and updating of the SPQ producer which involves a memory 4919 * read and we will have to put a full memory barrier there 4920 * (inside ecore_sp_post()). 4921 */ 4922 4923 return ecore_sp_post(sc, 4924 ramrod, 4925 o->cids[ECORE_PRIMARY_CID_INDEX], 4926 data_mapping, 4927 ETH_CONNECTION_TYPE); 4928 } 4929 4930 static inline int ecore_q_send_setup_e2(struct bxe_softc *sc, 4931 struct ecore_queue_state_params *params) 4932 { 4933 struct ecore_queue_sp_obj *o = params->q_obj; 4934 struct client_init_ramrod_data *rdata = 4935 (struct client_init_ramrod_data *)o->rdata; 4936 ecore_dma_addr_t data_mapping = o->rdata_mapping; 4937 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP; 4938 4939 /* Clear the ramrod data */ 4940 ECORE_MEMSET(rdata, 0, sizeof(*rdata)); 4941 4942 /* Fill the ramrod data */ 4943 ecore_q_fill_setup_data_cmn(sc, params, rdata); 4944 ecore_q_fill_setup_data_e2(sc, params, rdata); 4945 4946 /* No need for an explicit memory barrier here as long we would 4947 * need to ensure the ordering of writing to the SPQ element 4948 * and updating of the SPQ producer which involves a memory 4949 * read and we will have to put a full memory barrier there 4950 * (inside ecore_sp_post()). 4951 */ 4952 4953 return ecore_sp_post(sc, 4954 ramrod, 4955 o->cids[ECORE_PRIMARY_CID_INDEX], 4956 data_mapping, 4957 ETH_CONNECTION_TYPE); 4958 } 4959 4960 static inline int ecore_q_send_setup_tx_only(struct bxe_softc *sc, 4961 struct ecore_queue_state_params *params) 4962 { 4963 struct ecore_queue_sp_obj *o = params->q_obj; 4964 struct tx_queue_init_ramrod_data *rdata = 4965 (struct tx_queue_init_ramrod_data *)o->rdata; 4966 ecore_dma_addr_t data_mapping = o->rdata_mapping; 4967 int ramrod = RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP; 4968 struct ecore_queue_setup_tx_only_params *tx_only_params = 4969 ¶ms->params.tx_only; 4970 uint8_t cid_index = tx_only_params->cid_index; 4971 4972 if (ECORE_TEST_BIT(ECORE_Q_TYPE_FWD, &o->type)) 4973 ramrod = RAMROD_CMD_ID_ETH_FORWARD_SETUP; 4974 ECORE_MSG(sc, "sending forward tx-only ramrod"); 4975 4976 if (cid_index >= o->max_cos) { 4977 ECORE_ERR("queue[%d]: cid_index (%d) is out of range\n", 4978 o->cl_id, cid_index); 4979 return ECORE_INVAL; 4980 } 4981 4982 ECORE_MSG(sc, "parameters received: cos: %d sp-id: %d\n", 4983 tx_only_params->gen_params.cos, 4984 tx_only_params->gen_params.spcl_id); 4985 4986 /* Clear the ramrod data */ 4987 ECORE_MEMSET(rdata, 0, sizeof(*rdata)); 4988 4989 /* Fill the ramrod data */ 4990 ecore_q_fill_setup_tx_only(sc, params, rdata); 4991 4992 ECORE_MSG(sc, "sending tx-only ramrod: cid %d, client-id %d, sp-client id %d, cos %d\n", 4993 o->cids[cid_index], rdata->general.client_id, 4994 rdata->general.sp_client_id, rdata->general.cos); 4995 4996 /* No need for an explicit memory barrier here as long we would 4997 * need to ensure the ordering of writing to the SPQ element 4998 * and updating of the SPQ producer which involves a memory 4999 * read and we will have to put a full memory barrier there 5000 * (inside ecore_sp_post()). 5001 */ 5002 5003 return ecore_sp_post(sc, ramrod, o->cids[cid_index], 5004 data_mapping, ETH_CONNECTION_TYPE); 5005 } 5006 5007 static void ecore_q_fill_update_data(struct bxe_softc *sc, 5008 struct ecore_queue_sp_obj *obj, 5009 struct ecore_queue_update_params *params, 5010 struct client_update_ramrod_data *data) 5011 { 5012 /* Client ID of the client to update */ 5013 data->client_id = obj->cl_id; 5014 5015 /* Function ID of the client to update */ 5016 data->func_id = obj->func_id; 5017 5018 /* Default VLAN value */ 5019 data->default_vlan = ECORE_CPU_TO_LE16(params->def_vlan); 5020 5021 /* Inner VLAN stripping */ 5022 data->inner_vlan_removal_enable_flg = 5023 ECORE_TEST_BIT(ECORE_Q_UPDATE_IN_VLAN_REM, 5024 ¶ms->update_flags); 5025 data->inner_vlan_removal_change_flg = 5026 ECORE_TEST_BIT(ECORE_Q_UPDATE_IN_VLAN_REM_CHNG, 5027 ¶ms->update_flags); 5028 5029 /* Outer VLAN stripping */ 5030 data->outer_vlan_removal_enable_flg = 5031 ECORE_TEST_BIT(ECORE_Q_UPDATE_OUT_VLAN_REM, 5032 ¶ms->update_flags); 5033 data->outer_vlan_removal_change_flg = 5034 ECORE_TEST_BIT(ECORE_Q_UPDATE_OUT_VLAN_REM_CHNG, 5035 ¶ms->update_flags); 5036 5037 /* Drop packets that have source MAC that doesn't belong to this 5038 * Queue. 5039 */ 5040 data->anti_spoofing_enable_flg = 5041 ECORE_TEST_BIT(ECORE_Q_UPDATE_ANTI_SPOOF, 5042 ¶ms->update_flags); 5043 data->anti_spoofing_change_flg = 5044 ECORE_TEST_BIT(ECORE_Q_UPDATE_ANTI_SPOOF_CHNG, 5045 ¶ms->update_flags); 5046 5047 /* Activate/Deactivate */ 5048 data->activate_flg = 5049 ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE, ¶ms->update_flags); 5050 data->activate_change_flg = 5051 ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG, 5052 ¶ms->update_flags); 5053 5054 /* Enable default VLAN */ 5055 data->default_vlan_enable_flg = 5056 ECORE_TEST_BIT(ECORE_Q_UPDATE_DEF_VLAN_EN, 5057 ¶ms->update_flags); 5058 data->default_vlan_change_flg = 5059 ECORE_TEST_BIT(ECORE_Q_UPDATE_DEF_VLAN_EN_CHNG, 5060 ¶ms->update_flags); 5061 5062 /* silent vlan removal */ 5063 data->silent_vlan_change_flg = 5064 ECORE_TEST_BIT(ECORE_Q_UPDATE_SILENT_VLAN_REM_CHNG, 5065 ¶ms->update_flags); 5066 data->silent_vlan_removal_flg = 5067 ECORE_TEST_BIT(ECORE_Q_UPDATE_SILENT_VLAN_REM, 5068 ¶ms->update_flags); 5069 data->silent_vlan_value = ECORE_CPU_TO_LE16(params->silent_removal_value); 5070 data->silent_vlan_mask = ECORE_CPU_TO_LE16(params->silent_removal_mask); 5071 5072 /* tx switching */ 5073 data->tx_switching_flg = 5074 ECORE_TEST_BIT(ECORE_Q_UPDATE_TX_SWITCHING, 5075 ¶ms->update_flags); 5076 data->tx_switching_change_flg = 5077 ECORE_TEST_BIT(ECORE_Q_UPDATE_TX_SWITCHING_CHNG, 5078 ¶ms->update_flags); 5079 } 5080 5081 static inline int ecore_q_send_update(struct bxe_softc *sc, 5082 struct ecore_queue_state_params *params) 5083 { 5084 struct ecore_queue_sp_obj *o = params->q_obj; 5085 struct client_update_ramrod_data *rdata = 5086 (struct client_update_ramrod_data *)o->rdata; 5087 ecore_dma_addr_t data_mapping = o->rdata_mapping; 5088 struct ecore_queue_update_params *update_params = 5089 ¶ms->params.update; 5090 uint8_t cid_index = update_params->cid_index; 5091 5092 if (cid_index >= o->max_cos) { 5093 ECORE_ERR("queue[%d]: cid_index (%d) is out of range\n", 5094 o->cl_id, cid_index); 5095 return ECORE_INVAL; 5096 } 5097 5098 /* Clear the ramrod data */ 5099 ECORE_MEMSET(rdata, 0, sizeof(*rdata)); 5100 5101 /* Fill the ramrod data */ 5102 ecore_q_fill_update_data(sc, o, update_params, rdata); 5103 5104 /* No need for an explicit memory barrier here as long we would 5105 * need to ensure the ordering of writing to the SPQ element 5106 * and updating of the SPQ producer which involves a memory 5107 * read and we will have to put a full memory barrier there 5108 * (inside ecore_sp_post()). 5109 */ 5110 5111 return ecore_sp_post(sc, RAMROD_CMD_ID_ETH_CLIENT_UPDATE, 5112 o->cids[cid_index], data_mapping, 5113 ETH_CONNECTION_TYPE); 5114 } 5115 5116 /** 5117 * ecore_q_send_deactivate - send DEACTIVATE command 5118 * 5119 * @sc: device handle 5120 * @params: 5121 * 5122 * implemented using the UPDATE command. 5123 */ 5124 static inline int ecore_q_send_deactivate(struct bxe_softc *sc, 5125 struct ecore_queue_state_params *params) 5126 { 5127 struct ecore_queue_update_params *update = ¶ms->params.update; 5128 5129 ECORE_MEMSET(update, 0, sizeof(*update)); 5130 5131 ECORE_SET_BIT_NA(ECORE_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags); 5132 5133 return ecore_q_send_update(sc, params); 5134 } 5135 5136 /** 5137 * ecore_q_send_activate - send ACTIVATE command 5138 * 5139 * @sc: device handle 5140 * @params: 5141 * 5142 * implemented using the UPDATE command. 5143 */ 5144 static inline int ecore_q_send_activate(struct bxe_softc *sc, 5145 struct ecore_queue_state_params *params) 5146 { 5147 struct ecore_queue_update_params *update = ¶ms->params.update; 5148 5149 ECORE_MEMSET(update, 0, sizeof(*update)); 5150 5151 ECORE_SET_BIT_NA(ECORE_Q_UPDATE_ACTIVATE, &update->update_flags); 5152 ECORE_SET_BIT_NA(ECORE_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags); 5153 5154 return ecore_q_send_update(sc, params); 5155 } 5156 5157 static inline int ecore_q_send_update_tpa(struct bxe_softc *sc, 5158 struct ecore_queue_state_params *params) 5159 { 5160 /* TODO: Not implemented yet. */ 5161 return -1; 5162 } 5163 5164 static inline int ecore_q_send_halt(struct bxe_softc *sc, 5165 struct ecore_queue_state_params *params) 5166 { 5167 struct ecore_queue_sp_obj *o = params->q_obj; 5168 5169 /* build eth_halt_ramrod_data.client_id in a big-endian friendly way */ 5170 ecore_dma_addr_t data_mapping = 0; 5171 data_mapping = (ecore_dma_addr_t)o->cl_id; 5172 5173 return ecore_sp_post(sc, 5174 RAMROD_CMD_ID_ETH_HALT, 5175 o->cids[ECORE_PRIMARY_CID_INDEX], 5176 data_mapping, 5177 ETH_CONNECTION_TYPE); 5178 } 5179 5180 static inline int ecore_q_send_cfc_del(struct bxe_softc *sc, 5181 struct ecore_queue_state_params *params) 5182 { 5183 struct ecore_queue_sp_obj *o = params->q_obj; 5184 uint8_t cid_idx = params->params.cfc_del.cid_index; 5185 5186 if (cid_idx >= o->max_cos) { 5187 ECORE_ERR("queue[%d]: cid_index (%d) is out of range\n", 5188 o->cl_id, cid_idx); 5189 return ECORE_INVAL; 5190 } 5191 5192 return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_CFC_DEL, 5193 o->cids[cid_idx], 0, 5194 NONE_CONNECTION_TYPE); 5195 } 5196 5197 static inline int ecore_q_send_terminate(struct bxe_softc *sc, 5198 struct ecore_queue_state_params *params) 5199 { 5200 struct ecore_queue_sp_obj *o = params->q_obj; 5201 uint8_t cid_index = params->params.terminate.cid_index; 5202 5203 if (cid_index >= o->max_cos) { 5204 ECORE_ERR("queue[%d]: cid_index (%d) is out of range\n", 5205 o->cl_id, cid_index); 5206 return ECORE_INVAL; 5207 } 5208 5209 return ecore_sp_post(sc, RAMROD_CMD_ID_ETH_TERMINATE, 5210 o->cids[cid_index], 0, 5211 ETH_CONNECTION_TYPE); 5212 } 5213 5214 static inline int ecore_q_send_empty(struct bxe_softc *sc, 5215 struct ecore_queue_state_params *params) 5216 { 5217 struct ecore_queue_sp_obj *o = params->q_obj; 5218 5219 return ecore_sp_post(sc, RAMROD_CMD_ID_ETH_EMPTY, 5220 o->cids[ECORE_PRIMARY_CID_INDEX], 0, 5221 ETH_CONNECTION_TYPE); 5222 } 5223 5224 static inline int ecore_queue_send_cmd_cmn(struct bxe_softc *sc, 5225 struct ecore_queue_state_params *params) 5226 { 5227 switch (params->cmd) { 5228 case ECORE_Q_CMD_INIT: 5229 return ecore_q_init(sc, params); 5230 case ECORE_Q_CMD_SETUP_TX_ONLY: 5231 return ecore_q_send_setup_tx_only(sc, params); 5232 case ECORE_Q_CMD_DEACTIVATE: 5233 return ecore_q_send_deactivate(sc, params); 5234 case ECORE_Q_CMD_ACTIVATE: 5235 return ecore_q_send_activate(sc, params); 5236 case ECORE_Q_CMD_UPDATE: 5237 return ecore_q_send_update(sc, params); 5238 case ECORE_Q_CMD_UPDATE_TPA: 5239 return ecore_q_send_update_tpa(sc, params); 5240 case ECORE_Q_CMD_HALT: 5241 return ecore_q_send_halt(sc, params); 5242 case ECORE_Q_CMD_CFC_DEL: 5243 return ecore_q_send_cfc_del(sc, params); 5244 case ECORE_Q_CMD_TERMINATE: 5245 return ecore_q_send_terminate(sc, params); 5246 case ECORE_Q_CMD_EMPTY: 5247 return ecore_q_send_empty(sc, params); 5248 default: 5249 ECORE_ERR("Unknown command: %d\n", params->cmd); 5250 return ECORE_INVAL; 5251 } 5252 } 5253 5254 static int ecore_queue_send_cmd_e1x(struct bxe_softc *sc, 5255 struct ecore_queue_state_params *params) 5256 { 5257 switch (params->cmd) { 5258 case ECORE_Q_CMD_SETUP: 5259 return ecore_q_send_setup_e1x(sc, params); 5260 case ECORE_Q_CMD_INIT: 5261 case ECORE_Q_CMD_SETUP_TX_ONLY: 5262 case ECORE_Q_CMD_DEACTIVATE: 5263 case ECORE_Q_CMD_ACTIVATE: 5264 case ECORE_Q_CMD_UPDATE: 5265 case ECORE_Q_CMD_UPDATE_TPA: 5266 case ECORE_Q_CMD_HALT: 5267 case ECORE_Q_CMD_CFC_DEL: 5268 case ECORE_Q_CMD_TERMINATE: 5269 case ECORE_Q_CMD_EMPTY: 5270 return ecore_queue_send_cmd_cmn(sc, params); 5271 default: 5272 ECORE_ERR("Unknown command: %d\n", params->cmd); 5273 return ECORE_INVAL; 5274 } 5275 } 5276 5277 static int ecore_queue_send_cmd_e2(struct bxe_softc *sc, 5278 struct ecore_queue_state_params *params) 5279 { 5280 switch (params->cmd) { 5281 case ECORE_Q_CMD_SETUP: 5282 return ecore_q_send_setup_e2(sc, params); 5283 case ECORE_Q_CMD_INIT: 5284 case ECORE_Q_CMD_SETUP_TX_ONLY: 5285 case ECORE_Q_CMD_DEACTIVATE: 5286 case ECORE_Q_CMD_ACTIVATE: 5287 case ECORE_Q_CMD_UPDATE: 5288 case ECORE_Q_CMD_UPDATE_TPA: 5289 case ECORE_Q_CMD_HALT: 5290 case ECORE_Q_CMD_CFC_DEL: 5291 case ECORE_Q_CMD_TERMINATE: 5292 case ECORE_Q_CMD_EMPTY: 5293 return ecore_queue_send_cmd_cmn(sc, params); 5294 default: 5295 ECORE_ERR("Unknown command: %d\n", params->cmd); 5296 return ECORE_INVAL; 5297 } 5298 } 5299 5300 /** 5301 * ecore_queue_chk_transition - check state machine of a regular Queue 5302 * 5303 * @sc: device handle 5304 * @o: 5305 * @params: 5306 * 5307 * (not Forwarding) 5308 * It both checks if the requested command is legal in a current 5309 * state and, if it's legal, sets a `next_state' in the object 5310 * that will be used in the completion flow to set the `state' 5311 * of the object. 5312 * 5313 * returns 0 if a requested command is a legal transition, 5314 * ECORE_INVAL otherwise. 5315 */ 5316 static int ecore_queue_chk_transition(struct bxe_softc *sc, 5317 struct ecore_queue_sp_obj *o, 5318 struct ecore_queue_state_params *params) 5319 { 5320 enum ecore_q_state state = o->state, next_state = ECORE_Q_STATE_MAX; 5321 enum ecore_queue_cmd cmd = params->cmd; 5322 struct ecore_queue_update_params *update_params = 5323 ¶ms->params.update; 5324 uint8_t next_tx_only = o->num_tx_only; 5325 5326 /* Forget all pending for completion commands if a driver only state 5327 * transition has been requested. 5328 */ 5329 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) { 5330 o->pending = 0; 5331 o->next_state = ECORE_Q_STATE_MAX; 5332 } 5333 5334 /* Don't allow a next state transition if we are in the middle of 5335 * the previous one. 5336 */ 5337 if (o->pending) { 5338 ECORE_ERR("Blocking transition since pending was %lx\n", 5339 o->pending); 5340 return ECORE_BUSY; 5341 } 5342 5343 switch (state) { 5344 case ECORE_Q_STATE_RESET: 5345 if (cmd == ECORE_Q_CMD_INIT) 5346 next_state = ECORE_Q_STATE_INITIALIZED; 5347 5348 break; 5349 case ECORE_Q_STATE_INITIALIZED: 5350 if (cmd == ECORE_Q_CMD_SETUP) { 5351 if (ECORE_TEST_BIT(ECORE_Q_FLG_ACTIVE, 5352 ¶ms->params.setup.flags)) 5353 next_state = ECORE_Q_STATE_ACTIVE; 5354 else 5355 next_state = ECORE_Q_STATE_INACTIVE; 5356 } 5357 5358 break; 5359 case ECORE_Q_STATE_ACTIVE: 5360 if (cmd == ECORE_Q_CMD_DEACTIVATE) 5361 next_state = ECORE_Q_STATE_INACTIVE; 5362 5363 else if ((cmd == ECORE_Q_CMD_EMPTY) || 5364 (cmd == ECORE_Q_CMD_UPDATE_TPA)) 5365 next_state = ECORE_Q_STATE_ACTIVE; 5366 5367 else if (cmd == ECORE_Q_CMD_SETUP_TX_ONLY) { 5368 next_state = ECORE_Q_STATE_MULTI_COS; 5369 next_tx_only = 1; 5370 } 5371 5372 else if (cmd == ECORE_Q_CMD_HALT) 5373 next_state = ECORE_Q_STATE_STOPPED; 5374 5375 else if (cmd == ECORE_Q_CMD_UPDATE) { 5376 /* If "active" state change is requested, update the 5377 * state accordingly. 5378 */ 5379 if (ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG, 5380 &update_params->update_flags) && 5381 !ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE, 5382 &update_params->update_flags)) 5383 next_state = ECORE_Q_STATE_INACTIVE; 5384 else 5385 next_state = ECORE_Q_STATE_ACTIVE; 5386 } 5387 5388 break; 5389 case ECORE_Q_STATE_MULTI_COS: 5390 if (cmd == ECORE_Q_CMD_TERMINATE) 5391 next_state = ECORE_Q_STATE_MCOS_TERMINATED; 5392 5393 else if (cmd == ECORE_Q_CMD_SETUP_TX_ONLY) { 5394 next_state = ECORE_Q_STATE_MULTI_COS; 5395 next_tx_only = o->num_tx_only + 1; 5396 } 5397 5398 else if ((cmd == ECORE_Q_CMD_EMPTY) || 5399 (cmd == ECORE_Q_CMD_UPDATE_TPA)) 5400 next_state = ECORE_Q_STATE_MULTI_COS; 5401 5402 else if (cmd == ECORE_Q_CMD_UPDATE) { 5403 /* If "active" state change is requested, update the 5404 * state accordingly. 5405 */ 5406 if (ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG, 5407 &update_params->update_flags) && 5408 !ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE, 5409 &update_params->update_flags)) 5410 next_state = ECORE_Q_STATE_INACTIVE; 5411 else 5412 next_state = ECORE_Q_STATE_MULTI_COS; 5413 } 5414 5415 break; 5416 case ECORE_Q_STATE_MCOS_TERMINATED: 5417 if (cmd == ECORE_Q_CMD_CFC_DEL) { 5418 next_tx_only = o->num_tx_only - 1; 5419 if (next_tx_only == 0) 5420 next_state = ECORE_Q_STATE_ACTIVE; 5421 else 5422 next_state = ECORE_Q_STATE_MULTI_COS; 5423 } 5424 5425 break; 5426 case ECORE_Q_STATE_INACTIVE: 5427 if (cmd == ECORE_Q_CMD_ACTIVATE) 5428 next_state = ECORE_Q_STATE_ACTIVE; 5429 5430 else if ((cmd == ECORE_Q_CMD_EMPTY) || 5431 (cmd == ECORE_Q_CMD_UPDATE_TPA)) 5432 next_state = ECORE_Q_STATE_INACTIVE; 5433 5434 else if (cmd == ECORE_Q_CMD_HALT) 5435 next_state = ECORE_Q_STATE_STOPPED; 5436 5437 else if (cmd == ECORE_Q_CMD_UPDATE) { 5438 /* If "active" state change is requested, update the 5439 * state accordingly. 5440 */ 5441 if (ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG, 5442 &update_params->update_flags) && 5443 ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE, 5444 &update_params->update_flags)){ 5445 if (o->num_tx_only == 0) 5446 next_state = ECORE_Q_STATE_ACTIVE; 5447 else /* tx only queues exist for this queue */ 5448 next_state = ECORE_Q_STATE_MULTI_COS; 5449 } else 5450 next_state = ECORE_Q_STATE_INACTIVE; 5451 } 5452 5453 break; 5454 case ECORE_Q_STATE_STOPPED: 5455 if (cmd == ECORE_Q_CMD_TERMINATE) 5456 next_state = ECORE_Q_STATE_TERMINATED; 5457 5458 break; 5459 case ECORE_Q_STATE_TERMINATED: 5460 if (cmd == ECORE_Q_CMD_CFC_DEL) 5461 next_state = ECORE_Q_STATE_RESET; 5462 5463 break; 5464 default: 5465 ECORE_ERR("Illegal state: %d\n", state); 5466 } 5467 5468 /* Transition is assured */ 5469 if (next_state != ECORE_Q_STATE_MAX) { 5470 ECORE_MSG(sc, "Good state transition: %d(%d)->%d\n", 5471 state, cmd, next_state); 5472 o->next_state = next_state; 5473 o->next_tx_only = next_tx_only; 5474 return ECORE_SUCCESS; 5475 } 5476 5477 ECORE_MSG(sc, "Bad state transition request: %d %d\n", state, cmd); 5478 5479 return ECORE_INVAL; 5480 } 5481 5482 /** 5483 * ecore_queue_chk_fwd_transition - check state machine of a Forwarding Queue. 5484 * 5485 * @sc: device handle 5486 * @o: 5487 * @params: 5488 * 5489 * It both checks if the requested command is legal in a current 5490 * state and, if it's legal, sets a `next_state' in the object 5491 * that will be used in the completion flow to set the `state' 5492 * of the object. 5493 * 5494 * returns 0 if a requested command is a legal transition, 5495 * ECORE_INVAL otherwise. 5496 */ 5497 static int ecore_queue_chk_fwd_transition(struct bxe_softc *sc, 5498 struct ecore_queue_sp_obj *o, 5499 struct ecore_queue_state_params *params) 5500 { 5501 enum ecore_q_state state = o->state, next_state = ECORE_Q_STATE_MAX; 5502 enum ecore_queue_cmd cmd = params->cmd; 5503 5504 switch (state) { 5505 case ECORE_Q_STATE_RESET: 5506 if (cmd == ECORE_Q_CMD_INIT) 5507 next_state = ECORE_Q_STATE_INITIALIZED; 5508 5509 break; 5510 case ECORE_Q_STATE_INITIALIZED: 5511 if (cmd == ECORE_Q_CMD_SETUP_TX_ONLY) { 5512 if (ECORE_TEST_BIT(ECORE_Q_FLG_ACTIVE, 5513 ¶ms->params.tx_only.flags)) 5514 next_state = ECORE_Q_STATE_ACTIVE; 5515 else 5516 next_state = ECORE_Q_STATE_INACTIVE; 5517 } 5518 5519 break; 5520 case ECORE_Q_STATE_ACTIVE: 5521 case ECORE_Q_STATE_INACTIVE: 5522 if (cmd == ECORE_Q_CMD_CFC_DEL) 5523 next_state = ECORE_Q_STATE_RESET; 5524 5525 break; 5526 default: 5527 ECORE_ERR("Illegal state: %d\n", state); 5528 } 5529 5530 /* Transition is assured */ 5531 if (next_state != ECORE_Q_STATE_MAX) { 5532 ECORE_MSG(sc, "Good state transition: %d(%d)->%d\n", 5533 state, cmd, next_state); 5534 o->next_state = next_state; 5535 return ECORE_SUCCESS; 5536 } 5537 5538 ECORE_MSG(sc, "Bad state transition request: %d %d\n", state, cmd); 5539 return ECORE_INVAL; 5540 } 5541 5542 void ecore_init_queue_obj(struct bxe_softc *sc, 5543 struct ecore_queue_sp_obj *obj, 5544 uint8_t cl_id, uint32_t *cids, uint8_t cid_cnt, uint8_t func_id, 5545 void *rdata, 5546 ecore_dma_addr_t rdata_mapping, unsigned long type) 5547 { 5548 ECORE_MEMSET(obj, 0, sizeof(*obj)); 5549 5550 /* We support only ECORE_MULTI_TX_COS Tx CoS at the moment */ 5551 ECORE_BUG_ON(ECORE_MULTI_TX_COS < cid_cnt); 5552 5553 memcpy(obj->cids, cids, sizeof(obj->cids[0]) * cid_cnt); 5554 obj->max_cos = cid_cnt; 5555 obj->cl_id = cl_id; 5556 obj->func_id = func_id; 5557 obj->rdata = rdata; 5558 obj->rdata_mapping = rdata_mapping; 5559 obj->type = type; 5560 obj->next_state = ECORE_Q_STATE_MAX; 5561 5562 if (CHIP_IS_E1x(sc)) 5563 obj->send_cmd = ecore_queue_send_cmd_e1x; 5564 else 5565 obj->send_cmd = ecore_queue_send_cmd_e2; 5566 5567 if (ECORE_TEST_BIT(ECORE_Q_TYPE_FWD, &type)) 5568 obj->check_transition = ecore_queue_chk_fwd_transition; 5569 else 5570 obj->check_transition = ecore_queue_chk_transition; 5571 5572 obj->complete_cmd = ecore_queue_comp_cmd; 5573 obj->wait_comp = ecore_queue_wait_comp; 5574 obj->set_pending = ecore_queue_set_pending; 5575 } 5576 5577 /* return a queue object's logical state*/ 5578 int ecore_get_q_logical_state(struct bxe_softc *sc, 5579 struct ecore_queue_sp_obj *obj) 5580 { 5581 switch (obj->state) { 5582 case ECORE_Q_STATE_ACTIVE: 5583 case ECORE_Q_STATE_MULTI_COS: 5584 return ECORE_Q_LOGICAL_STATE_ACTIVE; 5585 case ECORE_Q_STATE_RESET: 5586 case ECORE_Q_STATE_INITIALIZED: 5587 case ECORE_Q_STATE_MCOS_TERMINATED: 5588 case ECORE_Q_STATE_INACTIVE: 5589 case ECORE_Q_STATE_STOPPED: 5590 case ECORE_Q_STATE_TERMINATED: 5591 case ECORE_Q_STATE_FLRED: 5592 return ECORE_Q_LOGICAL_STATE_STOPPED; 5593 default: 5594 return ECORE_INVAL; 5595 } 5596 } 5597 5598 /********************** Function state object *********************************/ 5599 enum ecore_func_state ecore_func_get_state(struct bxe_softc *sc, 5600 struct ecore_func_sp_obj *o) 5601 { 5602 /* in the middle of transaction - return INVALID state */ 5603 if (o->pending) 5604 return ECORE_F_STATE_MAX; 5605 5606 /* unsure the order of reading of o->pending and o->state 5607 * o->pending should be read first 5608 */ 5609 rmb(); 5610 5611 return o->state; 5612 } 5613 5614 static int ecore_func_wait_comp(struct bxe_softc *sc, 5615 struct ecore_func_sp_obj *o, 5616 enum ecore_func_cmd cmd) 5617 { 5618 return ecore_state_wait(sc, cmd, &o->pending); 5619 } 5620 5621 /** 5622 * ecore_func_state_change_comp - complete the state machine transition 5623 * 5624 * @sc: device handle 5625 * @o: 5626 * @cmd: 5627 * 5628 * Called on state change transition. Completes the state 5629 * machine transition only - no HW interaction. 5630 */ 5631 static inline int ecore_func_state_change_comp(struct bxe_softc *sc, 5632 struct ecore_func_sp_obj *o, 5633 enum ecore_func_cmd cmd) 5634 { 5635 unsigned long cur_pending = o->pending; 5636 5637 if (!ECORE_TEST_AND_CLEAR_BIT(cmd, &cur_pending)) { 5638 ECORE_ERR("Bad MC reply %d for func %d in state %d pending 0x%lx, next_state %d\n", 5639 cmd, ECORE_FUNC_ID(sc), o->state, 5640 cur_pending, o->next_state); 5641 return ECORE_INVAL; 5642 } 5643 5644 ECORE_MSG(sc, 5645 "Completing command %d for func %d, setting state to %d\n", 5646 cmd, ECORE_FUNC_ID(sc), o->next_state); 5647 5648 o->state = o->next_state; 5649 o->next_state = ECORE_F_STATE_MAX; 5650 5651 /* It's important that o->state and o->next_state are 5652 * updated before o->pending. 5653 */ 5654 wmb(); 5655 5656 ECORE_CLEAR_BIT(cmd, &o->pending); 5657 ECORE_SMP_MB_AFTER_CLEAR_BIT(); 5658 5659 return ECORE_SUCCESS; 5660 } 5661 5662 /** 5663 * ecore_func_comp_cmd - complete the state change command 5664 * 5665 * @sc: device handle 5666 * @o: 5667 * @cmd: 5668 * 5669 * Checks that the arrived completion is expected. 5670 */ 5671 static int ecore_func_comp_cmd(struct bxe_softc *sc, 5672 struct ecore_func_sp_obj *o, 5673 enum ecore_func_cmd cmd) 5674 { 5675 /* Complete the state machine part first, check if it's a 5676 * legal completion. 5677 */ 5678 int rc = ecore_func_state_change_comp(sc, o, cmd); 5679 return rc; 5680 } 5681 5682 /** 5683 * ecore_func_chk_transition - perform function state machine transition 5684 * 5685 * @sc: device handle 5686 * @o: 5687 * @params: 5688 * 5689 * It both checks if the requested command is legal in a current 5690 * state and, if it's legal, sets a `next_state' in the object 5691 * that will be used in the completion flow to set the `state' 5692 * of the object. 5693 * 5694 * returns 0 if a requested command is a legal transition, 5695 * ECORE_INVAL otherwise. 5696 */ 5697 static int ecore_func_chk_transition(struct bxe_softc *sc, 5698 struct ecore_func_sp_obj *o, 5699 struct ecore_func_state_params *params) 5700 { 5701 enum ecore_func_state state = o->state, next_state = ECORE_F_STATE_MAX; 5702 enum ecore_func_cmd cmd = params->cmd; 5703 5704 /* Forget all pending for completion commands if a driver only state 5705 * transition has been requested. 5706 */ 5707 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) { 5708 o->pending = 0; 5709 o->next_state = ECORE_F_STATE_MAX; 5710 } 5711 5712 /* Don't allow a next state transition if we are in the middle of 5713 * the previous one. 5714 */ 5715 if (o->pending) 5716 return ECORE_BUSY; 5717 5718 switch (state) { 5719 case ECORE_F_STATE_RESET: 5720 if (cmd == ECORE_F_CMD_HW_INIT) 5721 next_state = ECORE_F_STATE_INITIALIZED; 5722 5723 break; 5724 case ECORE_F_STATE_INITIALIZED: 5725 if (cmd == ECORE_F_CMD_START) 5726 next_state = ECORE_F_STATE_STARTED; 5727 5728 else if (cmd == ECORE_F_CMD_HW_RESET) 5729 next_state = ECORE_F_STATE_RESET; 5730 5731 break; 5732 case ECORE_F_STATE_STARTED: 5733 if (cmd == ECORE_F_CMD_STOP) 5734 next_state = ECORE_F_STATE_INITIALIZED; 5735 /* afex ramrods can be sent only in started mode, and only 5736 * if not pending for function_stop ramrod completion 5737 * for these events - next state remained STARTED. 5738 */ 5739 else if ((cmd == ECORE_F_CMD_AFEX_UPDATE) && 5740 (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending))) 5741 next_state = ECORE_F_STATE_STARTED; 5742 5743 else if ((cmd == ECORE_F_CMD_AFEX_VIFLISTS) && 5744 (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending))) 5745 next_state = ECORE_F_STATE_STARTED; 5746 5747 /* Switch_update ramrod can be sent in either started or 5748 * tx_stopped state, and it doesn't change the state. 5749 */ 5750 else if ((cmd == ECORE_F_CMD_SWITCH_UPDATE) && 5751 (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending))) 5752 next_state = ECORE_F_STATE_STARTED; 5753 5754 else if (cmd == ECORE_F_CMD_TX_STOP) 5755 next_state = ECORE_F_STATE_TX_STOPPED; 5756 5757 break; 5758 case ECORE_F_STATE_TX_STOPPED: 5759 if ((cmd == ECORE_F_CMD_SWITCH_UPDATE) && 5760 (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending))) 5761 next_state = ECORE_F_STATE_TX_STOPPED; 5762 5763 else if (cmd == ECORE_F_CMD_TX_START) 5764 next_state = ECORE_F_STATE_STARTED; 5765 5766 break; 5767 default: 5768 ECORE_ERR("Unknown state: %d\n", state); 5769 } 5770 5771 /* Transition is assured */ 5772 if (next_state != ECORE_F_STATE_MAX) { 5773 ECORE_MSG(sc, "Good function state transition: %d(%d)->%d\n", 5774 state, cmd, next_state); 5775 o->next_state = next_state; 5776 return ECORE_SUCCESS; 5777 } 5778 5779 ECORE_MSG(sc, "Bad function state transition request: %d %d\n", 5780 state, cmd); 5781 5782 return ECORE_INVAL; 5783 } 5784 5785 /** 5786 * ecore_func_init_func - performs HW init at function stage 5787 * 5788 * @sc: device handle 5789 * @drv: 5790 * 5791 * Init HW when the current phase is 5792 * FW_MSG_CODE_DRV_LOAD_FUNCTION: initialize only FUNCTION-only 5793 * HW blocks. 5794 */ 5795 static inline int ecore_func_init_func(struct bxe_softc *sc, 5796 const struct ecore_func_sp_drv_ops *drv) 5797 { 5798 return drv->init_hw_func(sc); 5799 } 5800 5801 /** 5802 * ecore_func_init_port - performs HW init at port stage 5803 * 5804 * @sc: device handle 5805 * @drv: 5806 * 5807 * Init HW when the current phase is 5808 * FW_MSG_CODE_DRV_LOAD_PORT: initialize PORT-only and 5809 * FUNCTION-only HW blocks. 5810 * 5811 */ 5812 static inline int ecore_func_init_port(struct bxe_softc *sc, 5813 const struct ecore_func_sp_drv_ops *drv) 5814 { 5815 int rc = drv->init_hw_port(sc); 5816 if (rc) 5817 return rc; 5818 5819 return ecore_func_init_func(sc, drv); 5820 } 5821 5822 /** 5823 * ecore_func_init_cmn_chip - performs HW init at chip-common stage 5824 * 5825 * @sc: device handle 5826 * @drv: 5827 * 5828 * Init HW when the current phase is 5829 * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON_CHIP, 5830 * PORT-only and FUNCTION-only HW blocks. 5831 */ 5832 static inline int ecore_func_init_cmn_chip(struct bxe_softc *sc, 5833 const struct ecore_func_sp_drv_ops *drv) 5834 { 5835 int rc = drv->init_hw_cmn_chip(sc); 5836 if (rc) 5837 return rc; 5838 5839 return ecore_func_init_port(sc, drv); 5840 } 5841 5842 /** 5843 * ecore_func_init_cmn - performs HW init at common stage 5844 * 5845 * @sc: device handle 5846 * @drv: 5847 * 5848 * Init HW when the current phase is 5849 * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON, 5850 * PORT-only and FUNCTION-only HW blocks. 5851 */ 5852 static inline int ecore_func_init_cmn(struct bxe_softc *sc, 5853 const struct ecore_func_sp_drv_ops *drv) 5854 { 5855 int rc = drv->init_hw_cmn(sc); 5856 if (rc) 5857 return rc; 5858 5859 return ecore_func_init_port(sc, drv); 5860 } 5861 5862 static int ecore_func_hw_init(struct bxe_softc *sc, 5863 struct ecore_func_state_params *params) 5864 { 5865 uint32_t load_code = params->params.hw_init.load_phase; 5866 struct ecore_func_sp_obj *o = params->f_obj; 5867 const struct ecore_func_sp_drv_ops *drv = o->drv; 5868 int rc = 0; 5869 5870 ECORE_MSG(sc, "function %d load_code %x\n", 5871 ECORE_ABS_FUNC_ID(sc), load_code); 5872 5873 /* Prepare buffers for unzipping the FW */ 5874 rc = drv->gunzip_init(sc); 5875 if (rc) 5876 return rc; 5877 5878 /* Prepare FW */ 5879 rc = drv->init_fw(sc); 5880 if (rc) { 5881 ECORE_ERR("Error loading firmware\n"); 5882 goto init_err; 5883 } 5884 5885 /* Handle the beginning of COMMON_XXX pases separately... */ 5886 switch (load_code) { 5887 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: 5888 rc = ecore_func_init_cmn_chip(sc, drv); 5889 if (rc) 5890 goto init_err; 5891 5892 break; 5893 case FW_MSG_CODE_DRV_LOAD_COMMON: 5894 rc = ecore_func_init_cmn(sc, drv); 5895 if (rc) 5896 goto init_err; 5897 5898 break; 5899 case FW_MSG_CODE_DRV_LOAD_PORT: 5900 rc = ecore_func_init_port(sc, drv); 5901 if (rc) 5902 goto init_err; 5903 5904 break; 5905 case FW_MSG_CODE_DRV_LOAD_FUNCTION: 5906 rc = ecore_func_init_func(sc, drv); 5907 if (rc) 5908 goto init_err; 5909 5910 break; 5911 default: 5912 ECORE_ERR("Unknown load_code (0x%x) from MCP\n", load_code); 5913 rc = ECORE_INVAL; 5914 } 5915 5916 init_err: 5917 drv->gunzip_end(sc); 5918 5919 /* In case of success, complete the command immediately: no ramrods 5920 * have been sent. 5921 */ 5922 if (!rc) 5923 o->complete_cmd(sc, o, ECORE_F_CMD_HW_INIT); 5924 5925 return rc; 5926 } 5927 5928 /** 5929 * ecore_func_reset_func - reset HW at function stage 5930 * 5931 * @sc: device handle 5932 * @drv: 5933 * 5934 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_FUNCTION stage: reset only 5935 * FUNCTION-only HW blocks. 5936 */ 5937 static inline void ecore_func_reset_func(struct bxe_softc *sc, 5938 const struct ecore_func_sp_drv_ops *drv) 5939 { 5940 drv->reset_hw_func(sc); 5941 } 5942 5943 /** 5944 * ecore_func_reset_port - reser HW at port stage 5945 * 5946 * @sc: device handle 5947 * @drv: 5948 * 5949 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_PORT stage: reset 5950 * FUNCTION-only and PORT-only HW blocks. 5951 * 5952 * !!!IMPORTANT!!! 5953 * 5954 * It's important to call reset_port before reset_func() as the last thing 5955 * reset_func does is pf_disable() thus disabling PGLUE_B, which 5956 * makes impossible any DMAE transactions. 5957 */ 5958 static inline void ecore_func_reset_port(struct bxe_softc *sc, 5959 const struct ecore_func_sp_drv_ops *drv) 5960 { 5961 drv->reset_hw_port(sc); 5962 ecore_func_reset_func(sc, drv); 5963 } 5964 5965 /** 5966 * ecore_func_reset_cmn - reser HW at common stage 5967 * 5968 * @sc: device handle 5969 * @drv: 5970 * 5971 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_COMMON and 5972 * FW_MSG_CODE_DRV_UNLOAD_COMMON_CHIP stages: reset COMMON, 5973 * COMMON_CHIP, FUNCTION-only and PORT-only HW blocks. 5974 */ 5975 static inline void ecore_func_reset_cmn(struct bxe_softc *sc, 5976 const struct ecore_func_sp_drv_ops *drv) 5977 { 5978 ecore_func_reset_port(sc, drv); 5979 drv->reset_hw_cmn(sc); 5980 } 5981 5982 static inline int ecore_func_hw_reset(struct bxe_softc *sc, 5983 struct ecore_func_state_params *params) 5984 { 5985 uint32_t reset_phase = params->params.hw_reset.reset_phase; 5986 struct ecore_func_sp_obj *o = params->f_obj; 5987 const struct ecore_func_sp_drv_ops *drv = o->drv; 5988 5989 ECORE_MSG(sc, "function %d reset_phase %x\n", ECORE_ABS_FUNC_ID(sc), 5990 reset_phase); 5991 5992 switch (reset_phase) { 5993 case FW_MSG_CODE_DRV_UNLOAD_COMMON: 5994 ecore_func_reset_cmn(sc, drv); 5995 break; 5996 case FW_MSG_CODE_DRV_UNLOAD_PORT: 5997 ecore_func_reset_port(sc, drv); 5998 break; 5999 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION: 6000 ecore_func_reset_func(sc, drv); 6001 break; 6002 default: 6003 ECORE_ERR("Unknown reset_phase (0x%x) from MCP\n", 6004 reset_phase); 6005 break; 6006 } 6007 6008 /* Complete the command immediately: no ramrods have been sent. */ 6009 o->complete_cmd(sc, o, ECORE_F_CMD_HW_RESET); 6010 6011 return ECORE_SUCCESS; 6012 } 6013 6014 static inline int ecore_func_send_start(struct bxe_softc *sc, 6015 struct ecore_func_state_params *params) 6016 { 6017 struct ecore_func_sp_obj *o = params->f_obj; 6018 struct function_start_data *rdata = 6019 (struct function_start_data *)o->rdata; 6020 ecore_dma_addr_t data_mapping = o->rdata_mapping; 6021 struct ecore_func_start_params *start_params = ¶ms->params.start; 6022 6023 ECORE_MEMSET(rdata, 0, sizeof(*rdata)); 6024 6025 /* Fill the ramrod data with provided parameters */ 6026 rdata->function_mode = (uint8_t)start_params->mf_mode; 6027 rdata->sd_vlan_tag = ECORE_CPU_TO_LE16(start_params->sd_vlan_tag); 6028 rdata->path_id = ECORE_PATH_ID(sc); 6029 rdata->network_cos_mode = start_params->network_cos_mode; 6030 rdata->gre_tunnel_mode = start_params->gre_tunnel_mode; 6031 rdata->gre_tunnel_rss = start_params->gre_tunnel_rss; 6032 6033 /* 6034 * No need for an explicit memory barrier here as long we would 6035 * need to ensure the ordering of writing to the SPQ element 6036 * and updating of the SPQ producer which involves a memory 6037 * read and we will have to put a full memory barrier there 6038 * (inside ecore_sp_post()). 6039 */ 6040 6041 return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, 6042 data_mapping, NONE_CONNECTION_TYPE); 6043 } 6044 6045 static inline int ecore_func_send_switch_update(struct bxe_softc *sc, 6046 struct ecore_func_state_params *params) 6047 { 6048 struct ecore_func_sp_obj *o = params->f_obj; 6049 struct function_update_data *rdata = 6050 (struct function_update_data *)o->rdata; 6051 ecore_dma_addr_t data_mapping = o->rdata_mapping; 6052 struct ecore_func_switch_update_params *switch_update_params = 6053 ¶ms->params.switch_update; 6054 6055 ECORE_MEMSET(rdata, 0, sizeof(*rdata)); 6056 6057 /* Fill the ramrod data with provided parameters */ 6058 rdata->tx_switch_suspend_change_flg = 1; 6059 rdata->tx_switch_suspend = switch_update_params->suspend; 6060 rdata->echo = SWITCH_UPDATE; 6061 6062 return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0, 6063 data_mapping, NONE_CONNECTION_TYPE); 6064 } 6065 6066 static inline int ecore_func_send_afex_update(struct bxe_softc *sc, 6067 struct ecore_func_state_params *params) 6068 { 6069 struct ecore_func_sp_obj *o = params->f_obj; 6070 struct function_update_data *rdata = 6071 (struct function_update_data *)o->afex_rdata; 6072 ecore_dma_addr_t data_mapping = o->afex_rdata_mapping; 6073 struct ecore_func_afex_update_params *afex_update_params = 6074 ¶ms->params.afex_update; 6075 6076 ECORE_MEMSET(rdata, 0, sizeof(*rdata)); 6077 6078 /* Fill the ramrod data with provided parameters */ 6079 rdata->vif_id_change_flg = 1; 6080 rdata->vif_id = ECORE_CPU_TO_LE16(afex_update_params->vif_id); 6081 rdata->afex_default_vlan_change_flg = 1; 6082 rdata->afex_default_vlan = 6083 ECORE_CPU_TO_LE16(afex_update_params->afex_default_vlan); 6084 rdata->allowed_priorities_change_flg = 1; 6085 rdata->allowed_priorities = afex_update_params->allowed_priorities; 6086 rdata->echo = AFEX_UPDATE; 6087 6088 /* No need for an explicit memory barrier here as long we would 6089 * need to ensure the ordering of writing to the SPQ element 6090 * and updating of the SPQ producer which involves a memory 6091 * read and we will have to put a full memory barrier there 6092 * (inside ecore_sp_post()). 6093 */ 6094 ECORE_MSG(sc, 6095 "afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x\n", 6096 rdata->vif_id, 6097 rdata->afex_default_vlan, rdata->allowed_priorities); 6098 6099 return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0, 6100 data_mapping, NONE_CONNECTION_TYPE); 6101 } 6102 6103 static 6104 inline int ecore_func_send_afex_viflists(struct bxe_softc *sc, 6105 struct ecore_func_state_params *params) 6106 { 6107 struct ecore_func_sp_obj *o = params->f_obj; 6108 struct afex_vif_list_ramrod_data *rdata = 6109 (struct afex_vif_list_ramrod_data *)o->afex_rdata; 6110 struct ecore_func_afex_viflists_params *afex_vif_params = 6111 ¶ms->params.afex_viflists; 6112 uint64_t *p_rdata = (uint64_t *)rdata; 6113 6114 ECORE_MEMSET(rdata, 0, sizeof(*rdata)); 6115 6116 /* Fill the ramrod data with provided parameters */ 6117 rdata->vif_list_index = ECORE_CPU_TO_LE16(afex_vif_params->vif_list_index); 6118 rdata->func_bit_map = afex_vif_params->func_bit_map; 6119 rdata->afex_vif_list_command = afex_vif_params->afex_vif_list_command; 6120 rdata->func_to_clear = afex_vif_params->func_to_clear; 6121 6122 /* send in echo type of sub command */ 6123 rdata->echo = afex_vif_params->afex_vif_list_command; 6124 6125 /* No need for an explicit memory barrier here as long we would 6126 * need to ensure the ordering of writing to the SPQ element 6127 * and updating of the SPQ producer which involves a memory 6128 * read and we will have to put a full memory barrier there 6129 * (inside ecore_sp_post()). 6130 */ 6131 6132 ECORE_MSG(sc, "afex: ramrod lists, cmd 0x%x index 0x%x func_bit_map 0x%x func_to_clr 0x%x\n", 6133 rdata->afex_vif_list_command, rdata->vif_list_index, 6134 rdata->func_bit_map, rdata->func_to_clear); 6135 6136 /* this ramrod sends data directly and not through DMA mapping */ 6137 return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS, 0, 6138 *p_rdata, NONE_CONNECTION_TYPE); 6139 } 6140 6141 static inline int ecore_func_send_stop(struct bxe_softc *sc, 6142 struct ecore_func_state_params *params) 6143 { 6144 return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 6145 NONE_CONNECTION_TYPE); 6146 } 6147 6148 static inline int ecore_func_send_tx_stop(struct bxe_softc *sc, 6149 struct ecore_func_state_params *params) 6150 { 6151 return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, 0, 0, 6152 NONE_CONNECTION_TYPE); 6153 } 6154 static inline int ecore_func_send_tx_start(struct bxe_softc *sc, 6155 struct ecore_func_state_params *params) 6156 { 6157 struct ecore_func_sp_obj *o = params->f_obj; 6158 struct flow_control_configuration *rdata = 6159 (struct flow_control_configuration *)o->rdata; 6160 ecore_dma_addr_t data_mapping = o->rdata_mapping; 6161 struct ecore_func_tx_start_params *tx_start_params = 6162 ¶ms->params.tx_start; 6163 int i; 6164 6165 ECORE_MEMSET(rdata, 0, sizeof(*rdata)); 6166 6167 rdata->dcb_enabled = tx_start_params->dcb_enabled; 6168 rdata->dcb_version = tx_start_params->dcb_version; 6169 rdata->dont_add_pri_0 = tx_start_params->dont_add_pri_0; 6170 6171 for (i = 0; i < ARRAY_SIZE(rdata->traffic_type_to_priority_cos); i++) 6172 rdata->traffic_type_to_priority_cos[i] = 6173 tx_start_params->traffic_type_to_priority_cos[i]; 6174 6175 return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0, 6176 data_mapping, NONE_CONNECTION_TYPE); 6177 } 6178 6179 static int ecore_func_send_cmd(struct bxe_softc *sc, 6180 struct ecore_func_state_params *params) 6181 { 6182 switch (params->cmd) { 6183 case ECORE_F_CMD_HW_INIT: 6184 return ecore_func_hw_init(sc, params); 6185 case ECORE_F_CMD_START: 6186 return ecore_func_send_start(sc, params); 6187 case ECORE_F_CMD_STOP: 6188 return ecore_func_send_stop(sc, params); 6189 case ECORE_F_CMD_HW_RESET: 6190 return ecore_func_hw_reset(sc, params); 6191 case ECORE_F_CMD_AFEX_UPDATE: 6192 return ecore_func_send_afex_update(sc, params); 6193 case ECORE_F_CMD_AFEX_VIFLISTS: 6194 return ecore_func_send_afex_viflists(sc, params); 6195 case ECORE_F_CMD_TX_STOP: 6196 return ecore_func_send_tx_stop(sc, params); 6197 case ECORE_F_CMD_TX_START: 6198 return ecore_func_send_tx_start(sc, params); 6199 case ECORE_F_CMD_SWITCH_UPDATE: 6200 return ecore_func_send_switch_update(sc, params); 6201 default: 6202 ECORE_ERR("Unknown command: %d\n", params->cmd); 6203 return ECORE_INVAL; 6204 } 6205 } 6206 6207 void ecore_init_func_obj(struct bxe_softc *sc, 6208 struct ecore_func_sp_obj *obj, 6209 void *rdata, ecore_dma_addr_t rdata_mapping, 6210 void *afex_rdata, ecore_dma_addr_t afex_rdata_mapping, 6211 struct ecore_func_sp_drv_ops *drv_iface) 6212 { 6213 ECORE_MEMSET(obj, 0, sizeof(*obj)); 6214 6215 ECORE_MUTEX_INIT(&obj->one_pending_mutex); 6216 6217 obj->rdata = rdata; 6218 obj->rdata_mapping = rdata_mapping; 6219 obj->afex_rdata = afex_rdata; 6220 obj->afex_rdata_mapping = afex_rdata_mapping; 6221 obj->send_cmd = ecore_func_send_cmd; 6222 obj->check_transition = ecore_func_chk_transition; 6223 obj->complete_cmd = ecore_func_comp_cmd; 6224 obj->wait_comp = ecore_func_wait_comp; 6225 obj->drv = drv_iface; 6226 } 6227 6228 /** 6229 * ecore_func_state_change - perform Function state change transition 6230 * 6231 * @sc: device handle 6232 * @params: parameters to perform the transaction 6233 * 6234 * returns 0 in case of successfully completed transition, 6235 * negative error code in case of failure, positive 6236 * (EBUSY) value if there is a completion to that is 6237 * still pending (possible only if RAMROD_COMP_WAIT is 6238 * not set in params->ramrod_flags for asynchronous 6239 * commands). 6240 */ 6241 int ecore_func_state_change(struct bxe_softc *sc, 6242 struct ecore_func_state_params *params) 6243 { 6244 struct ecore_func_sp_obj *o = params->f_obj; 6245 int rc, cnt = 300; 6246 enum ecore_func_cmd cmd = params->cmd; 6247 unsigned long *pending = &o->pending; 6248 6249 ECORE_MUTEX_LOCK(&o->one_pending_mutex); 6250 6251 /* Check that the requested transition is legal */ 6252 rc = o->check_transition(sc, o, params); 6253 if ((rc == ECORE_BUSY) && 6254 (ECORE_TEST_BIT(RAMROD_RETRY, ¶ms->ramrod_flags))) { 6255 while ((rc == ECORE_BUSY) && (--cnt > 0)) { 6256 ECORE_MUTEX_UNLOCK(&o->one_pending_mutex); 6257 ECORE_MSLEEP(10); 6258 ECORE_MUTEX_LOCK(&o->one_pending_mutex); 6259 rc = o->check_transition(sc, o, params); 6260 } 6261 if (rc == ECORE_BUSY) { 6262 ECORE_MUTEX_UNLOCK(&o->one_pending_mutex); 6263 ECORE_ERR("timeout waiting for previous ramrod completion\n"); 6264 return rc; 6265 } 6266 } else if (rc) { 6267 ECORE_MUTEX_UNLOCK(&o->one_pending_mutex); 6268 return rc; 6269 } 6270 6271 /* Set "pending" bit */ 6272 ECORE_SET_BIT(cmd, pending); 6273 6274 /* Don't send a command if only driver cleanup was requested */ 6275 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) { 6276 ecore_func_state_change_comp(sc, o, cmd); 6277 ECORE_MUTEX_UNLOCK(&o->one_pending_mutex); 6278 } else { 6279 /* Send a ramrod */ 6280 rc = o->send_cmd(sc, params); 6281 6282 ECORE_MUTEX_UNLOCK(&o->one_pending_mutex); 6283 6284 if (rc) { 6285 o->next_state = ECORE_F_STATE_MAX; 6286 ECORE_CLEAR_BIT(cmd, pending); 6287 ECORE_SMP_MB_AFTER_CLEAR_BIT(); 6288 return rc; 6289 } 6290 6291 if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, ¶ms->ramrod_flags)) { 6292 rc = o->wait_comp(sc, o, cmd); 6293 if (rc) 6294 return rc; 6295 6296 return ECORE_SUCCESS; 6297 } 6298 } 6299 6300 return ECORE_RET_PENDING(cmd, pending); 6301 } 6302