1 /* bnx2x_sp.c: Broadcom Everest network driver. 2 * 3 * Copyright (c) 2011-2012 Broadcom Corporation 4 * 5 * Unless you and Broadcom execute a separate written software license 6 * agreement governing use of this software, this software is licensed to you 7 * under the terms of the GNU General Public License version 2, available 8 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). 9 * 10 * Notwithstanding the above, under no circumstances may you combine this 11 * software in any way with any other Broadcom software provided under a 12 * license other than the GPL, without Broadcom's express prior written 13 * consent. 14 * 15 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 16 * Written by: Vladislav Zolotarov 17 * 18 */ 19 20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 21 22 #include <linux/module.h> 23 #include <linux/crc32.h> 24 #include <linux/netdevice.h> 25 #include <linux/etherdevice.h> 26 #include <linux/crc32c.h> 27 #include "bnx2x.h" 28 #include "bnx2x_cmn.h" 29 #include "bnx2x_sp.h" 30 31 #define BNX2X_MAX_EMUL_MULTI 16 32 33 #define MAC_LEADING_ZERO_CNT (ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN) 34 35 /**** Exe Queue interfaces ****/ 36 37 /** 38 * bnx2x_exe_queue_init - init the Exe Queue object 39 * 40 * @o: poiter to the object 41 * @exe_len: length 42 * @owner: poiter to the owner 43 * @validate: validate function pointer 44 * @optimize: optimize function pointer 45 * @exec: execute function pointer 46 * @get: get function pointer 47 */ 48 static inline void bnx2x_exe_queue_init(struct bnx2x *bp, 49 struct bnx2x_exe_queue_obj *o, 50 int exe_len, 51 union bnx2x_qable_obj *owner, 52 exe_q_validate validate, 53 exe_q_remove remove, 54 exe_q_optimize optimize, 55 exe_q_execute exec, 56 exe_q_get get) 57 { 58 memset(o, 0, sizeof(*o)); 59 60 INIT_LIST_HEAD(&o->exe_queue); 61 INIT_LIST_HEAD(&o->pending_comp); 62 63 spin_lock_init(&o->lock); 64 65 o->exe_chunk_len = exe_len; 66 o->owner = owner; 67 68 /* Owner specific callbacks */ 69 o->validate = validate; 70 o->remove = remove; 71 o->optimize = optimize; 72 o->execute = exec; 73 o->get = get; 74 75 DP(BNX2X_MSG_SP, "Setup the execution queue with the chunk length of %d\n", 76 exe_len); 77 } 78 79 static inline void bnx2x_exe_queue_free_elem(struct bnx2x *bp, 80 struct bnx2x_exeq_elem *elem) 81 { 82 DP(BNX2X_MSG_SP, "Deleting an exe_queue element\n"); 83 kfree(elem); 84 } 85 86 static inline int bnx2x_exe_queue_length(struct bnx2x_exe_queue_obj *o) 87 { 88 struct bnx2x_exeq_elem *elem; 89 int cnt = 0; 90 91 spin_lock_bh(&o->lock); 92 93 list_for_each_entry(elem, &o->exe_queue, link) 94 cnt++; 95 96 spin_unlock_bh(&o->lock); 97 98 return cnt; 99 } 100 101 /** 102 * bnx2x_exe_queue_add - add a new element to the execution queue 103 * 104 * @bp: driver handle 105 * @o: queue 106 * @cmd: new command to add 107 * @restore: true - do not optimize the command 108 * 109 * If the element is optimized or is illegal, frees it. 110 */ 111 static inline int bnx2x_exe_queue_add(struct bnx2x *bp, 112 struct bnx2x_exe_queue_obj *o, 113 struct bnx2x_exeq_elem *elem, 114 bool restore) 115 { 116 int rc; 117 118 spin_lock_bh(&o->lock); 119 120 if (!restore) { 121 /* Try to cancel this element queue */ 122 rc = o->optimize(bp, o->owner, elem); 123 if (rc) 124 goto free_and_exit; 125 126 /* Check if this request is ok */ 127 rc = o->validate(bp, o->owner, elem); 128 if (rc) { 129 BNX2X_ERR("Preamble failed: %d\n", rc); 130 goto free_and_exit; 131 } 132 } 133 134 /* If so, add it to the execution queue */ 135 list_add_tail(&elem->link, &o->exe_queue); 136 137 spin_unlock_bh(&o->lock); 138 139 return 0; 140 141 free_and_exit: 142 bnx2x_exe_queue_free_elem(bp, elem); 143 144 spin_unlock_bh(&o->lock); 145 146 return rc; 147 148 } 149 150 static inline void __bnx2x_exe_queue_reset_pending( 151 struct bnx2x *bp, 152 struct bnx2x_exe_queue_obj *o) 153 { 154 struct bnx2x_exeq_elem *elem; 155 156 while (!list_empty(&o->pending_comp)) { 157 elem = list_first_entry(&o->pending_comp, 158 struct bnx2x_exeq_elem, link); 159 160 list_del(&elem->link); 161 bnx2x_exe_queue_free_elem(bp, elem); 162 } 163 } 164 165 static inline void bnx2x_exe_queue_reset_pending(struct bnx2x *bp, 166 struct bnx2x_exe_queue_obj *o) 167 { 168 169 spin_lock_bh(&o->lock); 170 171 __bnx2x_exe_queue_reset_pending(bp, o); 172 173 spin_unlock_bh(&o->lock); 174 175 } 176 177 /** 178 * bnx2x_exe_queue_step - execute one execution chunk atomically 179 * 180 * @bp: driver handle 181 * @o: queue 182 * @ramrod_flags: flags 183 * 184 * (Atomicy is ensured using the exe_queue->lock). 185 */ 186 static inline int bnx2x_exe_queue_step(struct bnx2x *bp, 187 struct bnx2x_exe_queue_obj *o, 188 unsigned long *ramrod_flags) 189 { 190 struct bnx2x_exeq_elem *elem, spacer; 191 int cur_len = 0, rc; 192 193 memset(&spacer, 0, sizeof(spacer)); 194 195 spin_lock_bh(&o->lock); 196 197 /* 198 * Next step should not be performed until the current is finished, 199 * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to 200 * properly clear object internals without sending any command to the FW 201 * which also implies there won't be any completion to clear the 202 * 'pending' list. 203 */ 204 if (!list_empty(&o->pending_comp)) { 205 if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) { 206 DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list\n"); 207 __bnx2x_exe_queue_reset_pending(bp, o); 208 } else { 209 spin_unlock_bh(&o->lock); 210 return 1; 211 } 212 } 213 214 /* 215 * Run through the pending commands list and create a next 216 * execution chunk. 217 */ 218 while (!list_empty(&o->exe_queue)) { 219 elem = list_first_entry(&o->exe_queue, struct bnx2x_exeq_elem, 220 link); 221 WARN_ON(!elem->cmd_len); 222 223 if (cur_len + elem->cmd_len <= o->exe_chunk_len) { 224 cur_len += elem->cmd_len; 225 /* 226 * Prevent from both lists being empty when moving an 227 * element. This will allow the call of 228 * bnx2x_exe_queue_empty() without locking. 229 */ 230 list_add_tail(&spacer.link, &o->pending_comp); 231 mb(); 232 list_del(&elem->link); 233 list_add_tail(&elem->link, &o->pending_comp); 234 list_del(&spacer.link); 235 } else 236 break; 237 } 238 239 /* Sanity check */ 240 if (!cur_len) { 241 spin_unlock_bh(&o->lock); 242 return 0; 243 } 244 245 rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags); 246 if (rc < 0) 247 /* 248 * In case of an error return the commands back to the queue 249 * and reset the pending_comp. 250 */ 251 list_splice_init(&o->pending_comp, &o->exe_queue); 252 else if (!rc) 253 /* 254 * If zero is returned, means there are no outstanding pending 255 * completions and we may dismiss the pending list. 256 */ 257 __bnx2x_exe_queue_reset_pending(bp, o); 258 259 spin_unlock_bh(&o->lock); 260 return rc; 261 } 262 263 static inline bool bnx2x_exe_queue_empty(struct bnx2x_exe_queue_obj *o) 264 { 265 bool empty = list_empty(&o->exe_queue); 266 267 /* Don't reorder!!! */ 268 mb(); 269 270 return empty && list_empty(&o->pending_comp); 271 } 272 273 static inline struct bnx2x_exeq_elem *bnx2x_exe_queue_alloc_elem( 274 struct bnx2x *bp) 275 { 276 DP(BNX2X_MSG_SP, "Allocating a new exe_queue element\n"); 277 return kzalloc(sizeof(struct bnx2x_exeq_elem), GFP_ATOMIC); 278 } 279 280 /************************ raw_obj functions ***********************************/ 281 static bool bnx2x_raw_check_pending(struct bnx2x_raw_obj *o) 282 { 283 return !!test_bit(o->state, o->pstate); 284 } 285 286 static void bnx2x_raw_clear_pending(struct bnx2x_raw_obj *o) 287 { 288 smp_mb__before_clear_bit(); 289 clear_bit(o->state, o->pstate); 290 smp_mb__after_clear_bit(); 291 } 292 293 static void bnx2x_raw_set_pending(struct bnx2x_raw_obj *o) 294 { 295 smp_mb__before_clear_bit(); 296 set_bit(o->state, o->pstate); 297 smp_mb__after_clear_bit(); 298 } 299 300 /** 301 * bnx2x_state_wait - wait until the given bit(state) is cleared 302 * 303 * @bp: device handle 304 * @state: state which is to be cleared 305 * @state_p: state buffer 306 * 307 */ 308 static inline int bnx2x_state_wait(struct bnx2x *bp, int state, 309 unsigned long *pstate) 310 { 311 /* can take a while if any port is running */ 312 int cnt = 5000; 313 314 315 if (CHIP_REV_IS_EMUL(bp)) 316 cnt *= 20; 317 318 DP(BNX2X_MSG_SP, "waiting for state to become %d\n", state); 319 320 might_sleep(); 321 while (cnt--) { 322 if (!test_bit(state, pstate)) { 323 #ifdef BNX2X_STOP_ON_ERROR 324 DP(BNX2X_MSG_SP, "exit (cnt %d)\n", 5000 - cnt); 325 #endif 326 return 0; 327 } 328 329 usleep_range(1000, 1000); 330 331 if (bp->panic) 332 return -EIO; 333 } 334 335 /* timeout! */ 336 BNX2X_ERR("timeout waiting for state %d\n", state); 337 #ifdef BNX2X_STOP_ON_ERROR 338 bnx2x_panic(); 339 #endif 340 341 return -EBUSY; 342 } 343 344 static int bnx2x_raw_wait(struct bnx2x *bp, struct bnx2x_raw_obj *raw) 345 { 346 return bnx2x_state_wait(bp, raw->state, raw->pstate); 347 } 348 349 /***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/ 350 /* credit handling callbacks */ 351 static bool bnx2x_get_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int *offset) 352 { 353 struct bnx2x_credit_pool_obj *mp = o->macs_pool; 354 355 WARN_ON(!mp); 356 357 return mp->get_entry(mp, offset); 358 } 359 360 static bool bnx2x_get_credit_mac(struct bnx2x_vlan_mac_obj *o) 361 { 362 struct bnx2x_credit_pool_obj *mp = o->macs_pool; 363 364 WARN_ON(!mp); 365 366 return mp->get(mp, 1); 367 } 368 369 static bool bnx2x_get_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int *offset) 370 { 371 struct bnx2x_credit_pool_obj *vp = o->vlans_pool; 372 373 WARN_ON(!vp); 374 375 return vp->get_entry(vp, offset); 376 } 377 378 static bool bnx2x_get_credit_vlan(struct bnx2x_vlan_mac_obj *o) 379 { 380 struct bnx2x_credit_pool_obj *vp = o->vlans_pool; 381 382 WARN_ON(!vp); 383 384 return vp->get(vp, 1); 385 } 386 387 static bool bnx2x_get_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o) 388 { 389 struct bnx2x_credit_pool_obj *mp = o->macs_pool; 390 struct bnx2x_credit_pool_obj *vp = o->vlans_pool; 391 392 if (!mp->get(mp, 1)) 393 return false; 394 395 if (!vp->get(vp, 1)) { 396 mp->put(mp, 1); 397 return false; 398 } 399 400 return true; 401 } 402 403 static bool bnx2x_put_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int offset) 404 { 405 struct bnx2x_credit_pool_obj *mp = o->macs_pool; 406 407 return mp->put_entry(mp, offset); 408 } 409 410 static bool bnx2x_put_credit_mac(struct bnx2x_vlan_mac_obj *o) 411 { 412 struct bnx2x_credit_pool_obj *mp = o->macs_pool; 413 414 return mp->put(mp, 1); 415 } 416 417 static bool bnx2x_put_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int offset) 418 { 419 struct bnx2x_credit_pool_obj *vp = o->vlans_pool; 420 421 return vp->put_entry(vp, offset); 422 } 423 424 static bool bnx2x_put_credit_vlan(struct bnx2x_vlan_mac_obj *o) 425 { 426 struct bnx2x_credit_pool_obj *vp = o->vlans_pool; 427 428 return vp->put(vp, 1); 429 } 430 431 static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o) 432 { 433 struct bnx2x_credit_pool_obj *mp = o->macs_pool; 434 struct bnx2x_credit_pool_obj *vp = o->vlans_pool; 435 436 if (!mp->put(mp, 1)) 437 return false; 438 439 if (!vp->put(vp, 1)) { 440 mp->get(mp, 1); 441 return false; 442 } 443 444 return true; 445 } 446 447 static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o, 448 int n, u8 *buf) 449 { 450 struct bnx2x_vlan_mac_registry_elem *pos; 451 u8 *next = buf; 452 int counter = 0; 453 454 /* traverse list */ 455 list_for_each_entry(pos, &o->head, link) { 456 if (counter < n) { 457 /* place leading zeroes in buffer */ 458 memset(next, 0, MAC_LEADING_ZERO_CNT); 459 460 /* place mac after leading zeroes*/ 461 memcpy(next + MAC_LEADING_ZERO_CNT, pos->u.mac.mac, 462 ETH_ALEN); 463 464 /* calculate address of next element and 465 * advance counter 466 */ 467 counter++; 468 next = buf + counter * ALIGN(ETH_ALEN, sizeof(u32)); 469 470 DP(BNX2X_MSG_SP, "copied element number %d to address %p element was %pM\n", 471 counter, next, pos->u.mac.mac); 472 } 473 } 474 return counter * ETH_ALEN; 475 } 476 477 /* check_add() callbacks */ 478 static int bnx2x_check_mac_add(struct bnx2x *bp, 479 struct bnx2x_vlan_mac_obj *o, 480 union bnx2x_classification_ramrod_data *data) 481 { 482 struct bnx2x_vlan_mac_registry_elem *pos; 483 484 DP(BNX2X_MSG_SP, "Checking MAC %pM for ADD command\n", data->mac.mac); 485 486 if (!is_valid_ether_addr(data->mac.mac)) 487 return -EINVAL; 488 489 /* Check if a requested MAC already exists */ 490 list_for_each_entry(pos, &o->head, link) 491 if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN)) 492 return -EEXIST; 493 494 return 0; 495 } 496 497 static int bnx2x_check_vlan_add(struct bnx2x *bp, 498 struct bnx2x_vlan_mac_obj *o, 499 union bnx2x_classification_ramrod_data *data) 500 { 501 struct bnx2x_vlan_mac_registry_elem *pos; 502 503 DP(BNX2X_MSG_SP, "Checking VLAN %d for ADD command\n", data->vlan.vlan); 504 505 list_for_each_entry(pos, &o->head, link) 506 if (data->vlan.vlan == pos->u.vlan.vlan) 507 return -EEXIST; 508 509 return 0; 510 } 511 512 static int bnx2x_check_vlan_mac_add(struct bnx2x *bp, 513 struct bnx2x_vlan_mac_obj *o, 514 union bnx2x_classification_ramrod_data *data) 515 { 516 struct bnx2x_vlan_mac_registry_elem *pos; 517 518 DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for ADD command\n", 519 data->vlan_mac.mac, data->vlan_mac.vlan); 520 521 list_for_each_entry(pos, &o->head, link) 522 if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) && 523 (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac, 524 ETH_ALEN))) 525 return -EEXIST; 526 527 return 0; 528 } 529 530 531 /* check_del() callbacks */ 532 static struct bnx2x_vlan_mac_registry_elem * 533 bnx2x_check_mac_del(struct bnx2x *bp, 534 struct bnx2x_vlan_mac_obj *o, 535 union bnx2x_classification_ramrod_data *data) 536 { 537 struct bnx2x_vlan_mac_registry_elem *pos; 538 539 DP(BNX2X_MSG_SP, "Checking MAC %pM for DEL command\n", data->mac.mac); 540 541 list_for_each_entry(pos, &o->head, link) 542 if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN)) 543 return pos; 544 545 return NULL; 546 } 547 548 static struct bnx2x_vlan_mac_registry_elem * 549 bnx2x_check_vlan_del(struct bnx2x *bp, 550 struct bnx2x_vlan_mac_obj *o, 551 union bnx2x_classification_ramrod_data *data) 552 { 553 struct bnx2x_vlan_mac_registry_elem *pos; 554 555 DP(BNX2X_MSG_SP, "Checking VLAN %d for DEL command\n", data->vlan.vlan); 556 557 list_for_each_entry(pos, &o->head, link) 558 if (data->vlan.vlan == pos->u.vlan.vlan) 559 return pos; 560 561 return NULL; 562 } 563 564 static struct bnx2x_vlan_mac_registry_elem * 565 bnx2x_check_vlan_mac_del(struct bnx2x *bp, 566 struct bnx2x_vlan_mac_obj *o, 567 union bnx2x_classification_ramrod_data *data) 568 { 569 struct bnx2x_vlan_mac_registry_elem *pos; 570 571 DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for DEL command\n", 572 data->vlan_mac.mac, data->vlan_mac.vlan); 573 574 list_for_each_entry(pos, &o->head, link) 575 if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) && 576 (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac, 577 ETH_ALEN))) 578 return pos; 579 580 return NULL; 581 } 582 583 /* check_move() callback */ 584 static bool bnx2x_check_move(struct bnx2x *bp, 585 struct bnx2x_vlan_mac_obj *src_o, 586 struct bnx2x_vlan_mac_obj *dst_o, 587 union bnx2x_classification_ramrod_data *data) 588 { 589 struct bnx2x_vlan_mac_registry_elem *pos; 590 int rc; 591 592 /* Check if we can delete the requested configuration from the first 593 * object. 594 */ 595 pos = src_o->check_del(bp, src_o, data); 596 597 /* check if configuration can be added */ 598 rc = dst_o->check_add(bp, dst_o, data); 599 600 /* If this classification can not be added (is already set) 601 * or can't be deleted - return an error. 602 */ 603 if (rc || !pos) 604 return false; 605 606 return true; 607 } 608 609 static bool bnx2x_check_move_always_err( 610 struct bnx2x *bp, 611 struct bnx2x_vlan_mac_obj *src_o, 612 struct bnx2x_vlan_mac_obj *dst_o, 613 union bnx2x_classification_ramrod_data *data) 614 { 615 return false; 616 } 617 618 619 static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o) 620 { 621 struct bnx2x_raw_obj *raw = &o->raw; 622 u8 rx_tx_flag = 0; 623 624 if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) || 625 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX)) 626 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_TX_CMD; 627 628 if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) || 629 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX)) 630 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_RX_CMD; 631 632 return rx_tx_flag; 633 } 634 635 636 void bnx2x_set_mac_in_nig(struct bnx2x *bp, 637 bool add, unsigned char *dev_addr, int index) 638 { 639 u32 wb_data[2]; 640 u32 reg_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM : 641 NIG_REG_LLH0_FUNC_MEM; 642 643 if (!IS_MF_SI(bp) && !IS_MF_AFEX(bp)) 644 return; 645 646 if (index > BNX2X_LLH_CAM_MAX_PF_LINE) 647 return; 648 649 DP(BNX2X_MSG_SP, "Going to %s LLH configuration at entry %d\n", 650 (add ? "ADD" : "DELETE"), index); 651 652 if (add) { 653 /* LLH_FUNC_MEM is a u64 WB register */ 654 reg_offset += 8*index; 655 656 wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) | 657 (dev_addr[4] << 8) | dev_addr[5]); 658 wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]); 659 660 REG_WR_DMAE(bp, reg_offset, wb_data, 2); 661 } 662 663 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE : 664 NIG_REG_LLH0_FUNC_MEM_ENABLE) + 4*index, add); 665 } 666 667 /** 668 * bnx2x_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod 669 * 670 * @bp: device handle 671 * @o: queue for which we want to configure this rule 672 * @add: if true the command is an ADD command, DEL otherwise 673 * @opcode: CLASSIFY_RULE_OPCODE_XXX 674 * @hdr: pointer to a header to setup 675 * 676 */ 677 static inline void bnx2x_vlan_mac_set_cmd_hdr_e2(struct bnx2x *bp, 678 struct bnx2x_vlan_mac_obj *o, bool add, int opcode, 679 struct eth_classify_cmd_header *hdr) 680 { 681 struct bnx2x_raw_obj *raw = &o->raw; 682 683 hdr->client_id = raw->cl_id; 684 hdr->func_id = raw->func_id; 685 686 /* Rx or/and Tx (internal switching) configuration ? */ 687 hdr->cmd_general_data |= 688 bnx2x_vlan_mac_get_rx_tx_flag(o); 689 690 if (add) 691 hdr->cmd_general_data |= ETH_CLASSIFY_CMD_HEADER_IS_ADD; 692 693 hdr->cmd_general_data |= 694 (opcode << ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT); 695 } 696 697 /** 698 * bnx2x_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header 699 * 700 * @cid: connection id 701 * @type: BNX2X_FILTER_XXX_PENDING 702 * @hdr: poiter to header to setup 703 * @rule_cnt: 704 * 705 * currently we always configure one rule and echo field to contain a CID and an 706 * opcode type. 707 */ 708 static inline void bnx2x_vlan_mac_set_rdata_hdr_e2(u32 cid, int type, 709 struct eth_classify_header *hdr, int rule_cnt) 710 { 711 hdr->echo = (cid & BNX2X_SWCID_MASK) | (type << BNX2X_SWCID_SHIFT); 712 hdr->rule_cnt = (u8)rule_cnt; 713 } 714 715 716 /* hw_config() callbacks */ 717 static void bnx2x_set_one_mac_e2(struct bnx2x *bp, 718 struct bnx2x_vlan_mac_obj *o, 719 struct bnx2x_exeq_elem *elem, int rule_idx, 720 int cam_offset) 721 { 722 struct bnx2x_raw_obj *raw = &o->raw; 723 struct eth_classify_rules_ramrod_data *data = 724 (struct eth_classify_rules_ramrod_data *)(raw->rdata); 725 int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd; 726 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx]; 727 bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false; 728 unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags; 729 u8 *mac = elem->cmd_data.vlan_mac.u.mac.mac; 730 731 /* 732 * Set LLH CAM entry: currently only iSCSI and ETH macs are 733 * relevant. In addition, current implementation is tuned for a 734 * single ETH MAC. 735 * 736 * When multiple unicast ETH MACs PF configuration in switch 737 * independent mode is required (NetQ, multiple netdev MACs, 738 * etc.), consider better utilisation of 8 per function MAC 739 * entries in the LLH register. There is also 740 * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the 741 * total number of CAM entries to 16. 742 * 743 * Currently we won't configure NIG for MACs other than a primary ETH 744 * MAC and iSCSI L2 MAC. 745 * 746 * If this MAC is moving from one Queue to another, no need to change 747 * NIG configuration. 748 */ 749 if (cmd != BNX2X_VLAN_MAC_MOVE) { 750 if (test_bit(BNX2X_ISCSI_ETH_MAC, vlan_mac_flags)) 751 bnx2x_set_mac_in_nig(bp, add, mac, 752 BNX2X_LLH_CAM_ISCSI_ETH_LINE); 753 else if (test_bit(BNX2X_ETH_MAC, vlan_mac_flags)) 754 bnx2x_set_mac_in_nig(bp, add, mac, 755 BNX2X_LLH_CAM_ETH_LINE); 756 } 757 758 /* Reset the ramrod data buffer for the first rule */ 759 if (rule_idx == 0) 760 memset(data, 0, sizeof(*data)); 761 762 /* Setup a command header */ 763 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_MAC, 764 &rule_entry->mac.header); 765 766 DP(BNX2X_MSG_SP, "About to %s MAC %pM for Queue %d\n", 767 (add ? "add" : "delete"), mac, raw->cl_id); 768 769 /* Set a MAC itself */ 770 bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb, 771 &rule_entry->mac.mac_mid, 772 &rule_entry->mac.mac_lsb, mac); 773 774 /* MOVE: Add a rule that will add this MAC to the target Queue */ 775 if (cmd == BNX2X_VLAN_MAC_MOVE) { 776 rule_entry++; 777 rule_cnt++; 778 779 /* Setup ramrod data */ 780 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, 781 elem->cmd_data.vlan_mac.target_obj, 782 true, CLASSIFY_RULE_OPCODE_MAC, 783 &rule_entry->mac.header); 784 785 /* Set a MAC itself */ 786 bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb, 787 &rule_entry->mac.mac_mid, 788 &rule_entry->mac.mac_lsb, mac); 789 } 790 791 /* Set the ramrod data header */ 792 /* TODO: take this to the higher level in order to prevent multiple 793 writing */ 794 bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header, 795 rule_cnt); 796 } 797 798 /** 799 * bnx2x_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod 800 * 801 * @bp: device handle 802 * @o: queue 803 * @type: 804 * @cam_offset: offset in cam memory 805 * @hdr: pointer to a header to setup 806 * 807 * E1/E1H 808 */ 809 static inline void bnx2x_vlan_mac_set_rdata_hdr_e1x(struct bnx2x *bp, 810 struct bnx2x_vlan_mac_obj *o, int type, int cam_offset, 811 struct mac_configuration_hdr *hdr) 812 { 813 struct bnx2x_raw_obj *r = &o->raw; 814 815 hdr->length = 1; 816 hdr->offset = (u8)cam_offset; 817 hdr->client_id = 0xff; 818 hdr->echo = ((r->cid & BNX2X_SWCID_MASK) | (type << BNX2X_SWCID_SHIFT)); 819 } 820 821 static inline void bnx2x_vlan_mac_set_cfg_entry_e1x(struct bnx2x *bp, 822 struct bnx2x_vlan_mac_obj *o, bool add, int opcode, u8 *mac, 823 u16 vlan_id, struct mac_configuration_entry *cfg_entry) 824 { 825 struct bnx2x_raw_obj *r = &o->raw; 826 u32 cl_bit_vec = (1 << r->cl_id); 827 828 cfg_entry->clients_bit_vector = cpu_to_le32(cl_bit_vec); 829 cfg_entry->pf_id = r->func_id; 830 cfg_entry->vlan_id = cpu_to_le16(vlan_id); 831 832 if (add) { 833 SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE, 834 T_ETH_MAC_COMMAND_SET); 835 SET_FLAG(cfg_entry->flags, 836 MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE, opcode); 837 838 /* Set a MAC in a ramrod data */ 839 bnx2x_set_fw_mac_addr(&cfg_entry->msb_mac_addr, 840 &cfg_entry->middle_mac_addr, 841 &cfg_entry->lsb_mac_addr, mac); 842 } else 843 SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE, 844 T_ETH_MAC_COMMAND_INVALIDATE); 845 } 846 847 static inline void bnx2x_vlan_mac_set_rdata_e1x(struct bnx2x *bp, 848 struct bnx2x_vlan_mac_obj *o, int type, int cam_offset, bool add, 849 u8 *mac, u16 vlan_id, int opcode, struct mac_configuration_cmd *config) 850 { 851 struct mac_configuration_entry *cfg_entry = &config->config_table[0]; 852 struct bnx2x_raw_obj *raw = &o->raw; 853 854 bnx2x_vlan_mac_set_rdata_hdr_e1x(bp, o, type, cam_offset, 855 &config->hdr); 856 bnx2x_vlan_mac_set_cfg_entry_e1x(bp, o, add, opcode, mac, vlan_id, 857 cfg_entry); 858 859 DP(BNX2X_MSG_SP, "%s MAC %pM CLID %d CAM offset %d\n", 860 (add ? "setting" : "clearing"), 861 mac, raw->cl_id, cam_offset); 862 } 863 864 /** 865 * bnx2x_set_one_mac_e1x - fill a single MAC rule ramrod data 866 * 867 * @bp: device handle 868 * @o: bnx2x_vlan_mac_obj 869 * @elem: bnx2x_exeq_elem 870 * @rule_idx: rule_idx 871 * @cam_offset: cam_offset 872 */ 873 static void bnx2x_set_one_mac_e1x(struct bnx2x *bp, 874 struct bnx2x_vlan_mac_obj *o, 875 struct bnx2x_exeq_elem *elem, int rule_idx, 876 int cam_offset) 877 { 878 struct bnx2x_raw_obj *raw = &o->raw; 879 struct mac_configuration_cmd *config = 880 (struct mac_configuration_cmd *)(raw->rdata); 881 /* 882 * 57710 and 57711 do not support MOVE command, 883 * so it's either ADD or DEL 884 */ 885 bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ? 886 true : false; 887 888 /* Reset the ramrod data buffer */ 889 memset(config, 0, sizeof(*config)); 890 891 bnx2x_vlan_mac_set_rdata_e1x(bp, o, raw->state, 892 cam_offset, add, 893 elem->cmd_data.vlan_mac.u.mac.mac, 0, 894 ETH_VLAN_FILTER_ANY_VLAN, config); 895 } 896 897 static void bnx2x_set_one_vlan_e2(struct bnx2x *bp, 898 struct bnx2x_vlan_mac_obj *o, 899 struct bnx2x_exeq_elem *elem, int rule_idx, 900 int cam_offset) 901 { 902 struct bnx2x_raw_obj *raw = &o->raw; 903 struct eth_classify_rules_ramrod_data *data = 904 (struct eth_classify_rules_ramrod_data *)(raw->rdata); 905 int rule_cnt = rule_idx + 1; 906 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx]; 907 int cmd = elem->cmd_data.vlan_mac.cmd; 908 bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false; 909 u16 vlan = elem->cmd_data.vlan_mac.u.vlan.vlan; 910 911 /* Reset the ramrod data buffer for the first rule */ 912 if (rule_idx == 0) 913 memset(data, 0, sizeof(*data)); 914 915 /* Set a rule header */ 916 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_VLAN, 917 &rule_entry->vlan.header); 918 919 DP(BNX2X_MSG_SP, "About to %s VLAN %d\n", (add ? "add" : "delete"), 920 vlan); 921 922 /* Set a VLAN itself */ 923 rule_entry->vlan.vlan = cpu_to_le16(vlan); 924 925 /* MOVE: Add a rule that will add this MAC to the target Queue */ 926 if (cmd == BNX2X_VLAN_MAC_MOVE) { 927 rule_entry++; 928 rule_cnt++; 929 930 /* Setup ramrod data */ 931 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, 932 elem->cmd_data.vlan_mac.target_obj, 933 true, CLASSIFY_RULE_OPCODE_VLAN, 934 &rule_entry->vlan.header); 935 936 /* Set a VLAN itself */ 937 rule_entry->vlan.vlan = cpu_to_le16(vlan); 938 } 939 940 /* Set the ramrod data header */ 941 /* TODO: take this to the higher level in order to prevent multiple 942 writing */ 943 bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header, 944 rule_cnt); 945 } 946 947 static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp, 948 struct bnx2x_vlan_mac_obj *o, 949 struct bnx2x_exeq_elem *elem, 950 int rule_idx, int cam_offset) 951 { 952 struct bnx2x_raw_obj *raw = &o->raw; 953 struct eth_classify_rules_ramrod_data *data = 954 (struct eth_classify_rules_ramrod_data *)(raw->rdata); 955 int rule_cnt = rule_idx + 1; 956 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx]; 957 int cmd = elem->cmd_data.vlan_mac.cmd; 958 bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false; 959 u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan; 960 u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac; 961 962 963 /* Reset the ramrod data buffer for the first rule */ 964 if (rule_idx == 0) 965 memset(data, 0, sizeof(*data)); 966 967 /* Set a rule header */ 968 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_PAIR, 969 &rule_entry->pair.header); 970 971 /* Set VLAN and MAC themselvs */ 972 rule_entry->pair.vlan = cpu_to_le16(vlan); 973 bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb, 974 &rule_entry->pair.mac_mid, 975 &rule_entry->pair.mac_lsb, mac); 976 977 /* MOVE: Add a rule that will add this MAC to the target Queue */ 978 if (cmd == BNX2X_VLAN_MAC_MOVE) { 979 rule_entry++; 980 rule_cnt++; 981 982 /* Setup ramrod data */ 983 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, 984 elem->cmd_data.vlan_mac.target_obj, 985 true, CLASSIFY_RULE_OPCODE_PAIR, 986 &rule_entry->pair.header); 987 988 /* Set a VLAN itself */ 989 rule_entry->pair.vlan = cpu_to_le16(vlan); 990 bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb, 991 &rule_entry->pair.mac_mid, 992 &rule_entry->pair.mac_lsb, mac); 993 } 994 995 /* Set the ramrod data header */ 996 /* TODO: take this to the higher level in order to prevent multiple 997 writing */ 998 bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header, 999 rule_cnt); 1000 } 1001 1002 /** 1003 * bnx2x_set_one_vlan_mac_e1h - 1004 * 1005 * @bp: device handle 1006 * @o: bnx2x_vlan_mac_obj 1007 * @elem: bnx2x_exeq_elem 1008 * @rule_idx: rule_idx 1009 * @cam_offset: cam_offset 1010 */ 1011 static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp, 1012 struct bnx2x_vlan_mac_obj *o, 1013 struct bnx2x_exeq_elem *elem, 1014 int rule_idx, int cam_offset) 1015 { 1016 struct bnx2x_raw_obj *raw = &o->raw; 1017 struct mac_configuration_cmd *config = 1018 (struct mac_configuration_cmd *)(raw->rdata); 1019 /* 1020 * 57710 and 57711 do not support MOVE command, 1021 * so it's either ADD or DEL 1022 */ 1023 bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ? 1024 true : false; 1025 1026 /* Reset the ramrod data buffer */ 1027 memset(config, 0, sizeof(*config)); 1028 1029 bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_VLAN_MAC_PENDING, 1030 cam_offset, add, 1031 elem->cmd_data.vlan_mac.u.vlan_mac.mac, 1032 elem->cmd_data.vlan_mac.u.vlan_mac.vlan, 1033 ETH_VLAN_FILTER_CLASSIFY, config); 1034 } 1035 1036 #define list_next_entry(pos, member) \ 1037 list_entry((pos)->member.next, typeof(*(pos)), member) 1038 1039 /** 1040 * bnx2x_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element 1041 * 1042 * @bp: device handle 1043 * @p: command parameters 1044 * @ppos: pointer to the cooky 1045 * 1046 * reconfigure next MAC/VLAN/VLAN-MAC element from the 1047 * previously configured elements list. 1048 * 1049 * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is taken 1050 * into an account 1051 * 1052 * pointer to the cooky - that should be given back in the next call to make 1053 * function handle the next element. If *ppos is set to NULL it will restart the 1054 * iterator. If returned *ppos == NULL this means that the last element has been 1055 * handled. 1056 * 1057 */ 1058 static int bnx2x_vlan_mac_restore(struct bnx2x *bp, 1059 struct bnx2x_vlan_mac_ramrod_params *p, 1060 struct bnx2x_vlan_mac_registry_elem **ppos) 1061 { 1062 struct bnx2x_vlan_mac_registry_elem *pos; 1063 struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj; 1064 1065 /* If list is empty - there is nothing to do here */ 1066 if (list_empty(&o->head)) { 1067 *ppos = NULL; 1068 return 0; 1069 } 1070 1071 /* make a step... */ 1072 if (*ppos == NULL) 1073 *ppos = list_first_entry(&o->head, 1074 struct bnx2x_vlan_mac_registry_elem, 1075 link); 1076 else 1077 *ppos = list_next_entry(*ppos, link); 1078 1079 pos = *ppos; 1080 1081 /* If it's the last step - return NULL */ 1082 if (list_is_last(&pos->link, &o->head)) 1083 *ppos = NULL; 1084 1085 /* Prepare a 'user_req' */ 1086 memcpy(&p->user_req.u, &pos->u, sizeof(pos->u)); 1087 1088 /* Set the command */ 1089 p->user_req.cmd = BNX2X_VLAN_MAC_ADD; 1090 1091 /* Set vlan_mac_flags */ 1092 p->user_req.vlan_mac_flags = pos->vlan_mac_flags; 1093 1094 /* Set a restore bit */ 1095 __set_bit(RAMROD_RESTORE, &p->ramrod_flags); 1096 1097 return bnx2x_config_vlan_mac(bp, p); 1098 } 1099 1100 /* 1101 * bnx2x_exeq_get_mac/bnx2x_exeq_get_vlan/bnx2x_exeq_get_vlan_mac return a 1102 * pointer to an element with a specific criteria and NULL if such an element 1103 * hasn't been found. 1104 */ 1105 static struct bnx2x_exeq_elem *bnx2x_exeq_get_mac( 1106 struct bnx2x_exe_queue_obj *o, 1107 struct bnx2x_exeq_elem *elem) 1108 { 1109 struct bnx2x_exeq_elem *pos; 1110 struct bnx2x_mac_ramrod_data *data = &elem->cmd_data.vlan_mac.u.mac; 1111 1112 /* Check pending for execution commands */ 1113 list_for_each_entry(pos, &o->exe_queue, link) 1114 if (!memcmp(&pos->cmd_data.vlan_mac.u.mac, data, 1115 sizeof(*data)) && 1116 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd)) 1117 return pos; 1118 1119 return NULL; 1120 } 1121 1122 static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan( 1123 struct bnx2x_exe_queue_obj *o, 1124 struct bnx2x_exeq_elem *elem) 1125 { 1126 struct bnx2x_exeq_elem *pos; 1127 struct bnx2x_vlan_ramrod_data *data = &elem->cmd_data.vlan_mac.u.vlan; 1128 1129 /* Check pending for execution commands */ 1130 list_for_each_entry(pos, &o->exe_queue, link) 1131 if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan, data, 1132 sizeof(*data)) && 1133 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd)) 1134 return pos; 1135 1136 return NULL; 1137 } 1138 1139 static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan_mac( 1140 struct bnx2x_exe_queue_obj *o, 1141 struct bnx2x_exeq_elem *elem) 1142 { 1143 struct bnx2x_exeq_elem *pos; 1144 struct bnx2x_vlan_mac_ramrod_data *data = 1145 &elem->cmd_data.vlan_mac.u.vlan_mac; 1146 1147 /* Check pending for execution commands */ 1148 list_for_each_entry(pos, &o->exe_queue, link) 1149 if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan_mac, data, 1150 sizeof(*data)) && 1151 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd)) 1152 return pos; 1153 1154 return NULL; 1155 } 1156 1157 /** 1158 * bnx2x_validate_vlan_mac_add - check if an ADD command can be executed 1159 * 1160 * @bp: device handle 1161 * @qo: bnx2x_qable_obj 1162 * @elem: bnx2x_exeq_elem 1163 * 1164 * Checks that the requested configuration can be added. If yes and if 1165 * requested, consume CAM credit. 1166 * 1167 * The 'validate' is run after the 'optimize'. 1168 * 1169 */ 1170 static inline int bnx2x_validate_vlan_mac_add(struct bnx2x *bp, 1171 union bnx2x_qable_obj *qo, 1172 struct bnx2x_exeq_elem *elem) 1173 { 1174 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac; 1175 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue; 1176 int rc; 1177 1178 /* Check the registry */ 1179 rc = o->check_add(bp, o, &elem->cmd_data.vlan_mac.u); 1180 if (rc) { 1181 DP(BNX2X_MSG_SP, "ADD command is not allowed considering current registry state.\n"); 1182 return rc; 1183 } 1184 1185 /* 1186 * Check if there is a pending ADD command for this 1187 * MAC/VLAN/VLAN-MAC. Return an error if there is. 1188 */ 1189 if (exeq->get(exeq, elem)) { 1190 DP(BNX2X_MSG_SP, "There is a pending ADD command already\n"); 1191 return -EEXIST; 1192 } 1193 1194 /* 1195 * TODO: Check the pending MOVE from other objects where this 1196 * object is a destination object. 1197 */ 1198 1199 /* Consume the credit if not requested not to */ 1200 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, 1201 &elem->cmd_data.vlan_mac.vlan_mac_flags) || 1202 o->get_credit(o))) 1203 return -EINVAL; 1204 1205 return 0; 1206 } 1207 1208 /** 1209 * bnx2x_validate_vlan_mac_del - check if the DEL command can be executed 1210 * 1211 * @bp: device handle 1212 * @qo: quable object to check 1213 * @elem: element that needs to be deleted 1214 * 1215 * Checks that the requested configuration can be deleted. If yes and if 1216 * requested, returns a CAM credit. 1217 * 1218 * The 'validate' is run after the 'optimize'. 1219 */ 1220 static inline int bnx2x_validate_vlan_mac_del(struct bnx2x *bp, 1221 union bnx2x_qable_obj *qo, 1222 struct bnx2x_exeq_elem *elem) 1223 { 1224 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac; 1225 struct bnx2x_vlan_mac_registry_elem *pos; 1226 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue; 1227 struct bnx2x_exeq_elem query_elem; 1228 1229 /* If this classification can not be deleted (doesn't exist) 1230 * - return a BNX2X_EXIST. 1231 */ 1232 pos = o->check_del(bp, o, &elem->cmd_data.vlan_mac.u); 1233 if (!pos) { 1234 DP(BNX2X_MSG_SP, "DEL command is not allowed considering current registry state\n"); 1235 return -EEXIST; 1236 } 1237 1238 /* 1239 * Check if there are pending DEL or MOVE commands for this 1240 * MAC/VLAN/VLAN-MAC. Return an error if so. 1241 */ 1242 memcpy(&query_elem, elem, sizeof(query_elem)); 1243 1244 /* Check for MOVE commands */ 1245 query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_MOVE; 1246 if (exeq->get(exeq, &query_elem)) { 1247 BNX2X_ERR("There is a pending MOVE command already\n"); 1248 return -EINVAL; 1249 } 1250 1251 /* Check for DEL commands */ 1252 if (exeq->get(exeq, elem)) { 1253 DP(BNX2X_MSG_SP, "There is a pending DEL command already\n"); 1254 return -EEXIST; 1255 } 1256 1257 /* Return the credit to the credit pool if not requested not to */ 1258 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, 1259 &elem->cmd_data.vlan_mac.vlan_mac_flags) || 1260 o->put_credit(o))) { 1261 BNX2X_ERR("Failed to return a credit\n"); 1262 return -EINVAL; 1263 } 1264 1265 return 0; 1266 } 1267 1268 /** 1269 * bnx2x_validate_vlan_mac_move - check if the MOVE command can be executed 1270 * 1271 * @bp: device handle 1272 * @qo: quable object to check (source) 1273 * @elem: element that needs to be moved 1274 * 1275 * Checks that the requested configuration can be moved. If yes and if 1276 * requested, returns a CAM credit. 1277 * 1278 * The 'validate' is run after the 'optimize'. 1279 */ 1280 static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp, 1281 union bnx2x_qable_obj *qo, 1282 struct bnx2x_exeq_elem *elem) 1283 { 1284 struct bnx2x_vlan_mac_obj *src_o = &qo->vlan_mac; 1285 struct bnx2x_vlan_mac_obj *dest_o = elem->cmd_data.vlan_mac.target_obj; 1286 struct bnx2x_exeq_elem query_elem; 1287 struct bnx2x_exe_queue_obj *src_exeq = &src_o->exe_queue; 1288 struct bnx2x_exe_queue_obj *dest_exeq = &dest_o->exe_queue; 1289 1290 /* 1291 * Check if we can perform this operation based on the current registry 1292 * state. 1293 */ 1294 if (!src_o->check_move(bp, src_o, dest_o, 1295 &elem->cmd_data.vlan_mac.u)) { 1296 DP(BNX2X_MSG_SP, "MOVE command is not allowed considering current registry state\n"); 1297 return -EINVAL; 1298 } 1299 1300 /* 1301 * Check if there is an already pending DEL or MOVE command for the 1302 * source object or ADD command for a destination object. Return an 1303 * error if so. 1304 */ 1305 memcpy(&query_elem, elem, sizeof(query_elem)); 1306 1307 /* Check DEL on source */ 1308 query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL; 1309 if (src_exeq->get(src_exeq, &query_elem)) { 1310 BNX2X_ERR("There is a pending DEL command on the source queue already\n"); 1311 return -EINVAL; 1312 } 1313 1314 /* Check MOVE on source */ 1315 if (src_exeq->get(src_exeq, elem)) { 1316 DP(BNX2X_MSG_SP, "There is a pending MOVE command already\n"); 1317 return -EEXIST; 1318 } 1319 1320 /* Check ADD on destination */ 1321 query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD; 1322 if (dest_exeq->get(dest_exeq, &query_elem)) { 1323 BNX2X_ERR("There is a pending ADD command on the destination queue already\n"); 1324 return -EINVAL; 1325 } 1326 1327 /* Consume the credit if not requested not to */ 1328 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT_DEST, 1329 &elem->cmd_data.vlan_mac.vlan_mac_flags) || 1330 dest_o->get_credit(dest_o))) 1331 return -EINVAL; 1332 1333 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, 1334 &elem->cmd_data.vlan_mac.vlan_mac_flags) || 1335 src_o->put_credit(src_o))) { 1336 /* return the credit taken from dest... */ 1337 dest_o->put_credit(dest_o); 1338 return -EINVAL; 1339 } 1340 1341 return 0; 1342 } 1343 1344 static int bnx2x_validate_vlan_mac(struct bnx2x *bp, 1345 union bnx2x_qable_obj *qo, 1346 struct bnx2x_exeq_elem *elem) 1347 { 1348 switch (elem->cmd_data.vlan_mac.cmd) { 1349 case BNX2X_VLAN_MAC_ADD: 1350 return bnx2x_validate_vlan_mac_add(bp, qo, elem); 1351 case BNX2X_VLAN_MAC_DEL: 1352 return bnx2x_validate_vlan_mac_del(bp, qo, elem); 1353 case BNX2X_VLAN_MAC_MOVE: 1354 return bnx2x_validate_vlan_mac_move(bp, qo, elem); 1355 default: 1356 return -EINVAL; 1357 } 1358 } 1359 1360 static int bnx2x_remove_vlan_mac(struct bnx2x *bp, 1361 union bnx2x_qable_obj *qo, 1362 struct bnx2x_exeq_elem *elem) 1363 { 1364 int rc = 0; 1365 1366 /* If consumption wasn't required, nothing to do */ 1367 if (test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, 1368 &elem->cmd_data.vlan_mac.vlan_mac_flags)) 1369 return 0; 1370 1371 switch (elem->cmd_data.vlan_mac.cmd) { 1372 case BNX2X_VLAN_MAC_ADD: 1373 case BNX2X_VLAN_MAC_MOVE: 1374 rc = qo->vlan_mac.put_credit(&qo->vlan_mac); 1375 break; 1376 case BNX2X_VLAN_MAC_DEL: 1377 rc = qo->vlan_mac.get_credit(&qo->vlan_mac); 1378 break; 1379 default: 1380 return -EINVAL; 1381 } 1382 1383 if (rc != true) 1384 return -EINVAL; 1385 1386 return 0; 1387 } 1388 1389 /** 1390 * bnx2x_wait_vlan_mac - passivly wait for 5 seconds until all work completes. 1391 * 1392 * @bp: device handle 1393 * @o: bnx2x_vlan_mac_obj 1394 * 1395 */ 1396 static int bnx2x_wait_vlan_mac(struct bnx2x *bp, 1397 struct bnx2x_vlan_mac_obj *o) 1398 { 1399 int cnt = 5000, rc; 1400 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue; 1401 struct bnx2x_raw_obj *raw = &o->raw; 1402 1403 while (cnt--) { 1404 /* Wait for the current command to complete */ 1405 rc = raw->wait_comp(bp, raw); 1406 if (rc) 1407 return rc; 1408 1409 /* Wait until there are no pending commands */ 1410 if (!bnx2x_exe_queue_empty(exeq)) 1411 usleep_range(1000, 1000); 1412 else 1413 return 0; 1414 } 1415 1416 return -EBUSY; 1417 } 1418 1419 /** 1420 * bnx2x_complete_vlan_mac - complete one VLAN-MAC ramrod 1421 * 1422 * @bp: device handle 1423 * @o: bnx2x_vlan_mac_obj 1424 * @cqe: 1425 * @cont: if true schedule next execution chunk 1426 * 1427 */ 1428 static int bnx2x_complete_vlan_mac(struct bnx2x *bp, 1429 struct bnx2x_vlan_mac_obj *o, 1430 union event_ring_elem *cqe, 1431 unsigned long *ramrod_flags) 1432 { 1433 struct bnx2x_raw_obj *r = &o->raw; 1434 int rc; 1435 1436 /* Reset pending list */ 1437 bnx2x_exe_queue_reset_pending(bp, &o->exe_queue); 1438 1439 /* Clear pending */ 1440 r->clear_pending(r); 1441 1442 /* If ramrod failed this is most likely a SW bug */ 1443 if (cqe->message.error) 1444 return -EINVAL; 1445 1446 /* Run the next bulk of pending commands if requeted */ 1447 if (test_bit(RAMROD_CONT, ramrod_flags)) { 1448 rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags); 1449 if (rc < 0) 1450 return rc; 1451 } 1452 1453 /* If there is more work to do return PENDING */ 1454 if (!bnx2x_exe_queue_empty(&o->exe_queue)) 1455 return 1; 1456 1457 return 0; 1458 } 1459 1460 /** 1461 * bnx2x_optimize_vlan_mac - optimize ADD and DEL commands. 1462 * 1463 * @bp: device handle 1464 * @o: bnx2x_qable_obj 1465 * @elem: bnx2x_exeq_elem 1466 */ 1467 static int bnx2x_optimize_vlan_mac(struct bnx2x *bp, 1468 union bnx2x_qable_obj *qo, 1469 struct bnx2x_exeq_elem *elem) 1470 { 1471 struct bnx2x_exeq_elem query, *pos; 1472 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac; 1473 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue; 1474 1475 memcpy(&query, elem, sizeof(query)); 1476 1477 switch (elem->cmd_data.vlan_mac.cmd) { 1478 case BNX2X_VLAN_MAC_ADD: 1479 query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL; 1480 break; 1481 case BNX2X_VLAN_MAC_DEL: 1482 query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD; 1483 break; 1484 default: 1485 /* Don't handle anything other than ADD or DEL */ 1486 return 0; 1487 } 1488 1489 /* If we found the appropriate element - delete it */ 1490 pos = exeq->get(exeq, &query); 1491 if (pos) { 1492 1493 /* Return the credit of the optimized command */ 1494 if (!test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, 1495 &pos->cmd_data.vlan_mac.vlan_mac_flags)) { 1496 if ((query.cmd_data.vlan_mac.cmd == 1497 BNX2X_VLAN_MAC_ADD) && !o->put_credit(o)) { 1498 BNX2X_ERR("Failed to return the credit for the optimized ADD command\n"); 1499 return -EINVAL; 1500 } else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */ 1501 BNX2X_ERR("Failed to recover the credit from the optimized DEL command\n"); 1502 return -EINVAL; 1503 } 1504 } 1505 1506 DP(BNX2X_MSG_SP, "Optimizing %s command\n", 1507 (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ? 1508 "ADD" : "DEL"); 1509 1510 list_del(&pos->link); 1511 bnx2x_exe_queue_free_elem(bp, pos); 1512 return 1; 1513 } 1514 1515 return 0; 1516 } 1517 1518 /** 1519 * bnx2x_vlan_mac_get_registry_elem - prepare a registry element 1520 * 1521 * @bp: device handle 1522 * @o: 1523 * @elem: 1524 * @restore: 1525 * @re: 1526 * 1527 * prepare a registry element according to the current command request. 1528 */ 1529 static inline int bnx2x_vlan_mac_get_registry_elem( 1530 struct bnx2x *bp, 1531 struct bnx2x_vlan_mac_obj *o, 1532 struct bnx2x_exeq_elem *elem, 1533 bool restore, 1534 struct bnx2x_vlan_mac_registry_elem **re) 1535 { 1536 int cmd = elem->cmd_data.vlan_mac.cmd; 1537 struct bnx2x_vlan_mac_registry_elem *reg_elem; 1538 1539 /* Allocate a new registry element if needed. */ 1540 if (!restore && 1541 ((cmd == BNX2X_VLAN_MAC_ADD) || (cmd == BNX2X_VLAN_MAC_MOVE))) { 1542 reg_elem = kzalloc(sizeof(*reg_elem), GFP_ATOMIC); 1543 if (!reg_elem) 1544 return -ENOMEM; 1545 1546 /* Get a new CAM offset */ 1547 if (!o->get_cam_offset(o, ®_elem->cam_offset)) { 1548 /* 1549 * This shell never happen, because we have checked the 1550 * CAM availiability in the 'validate'. 1551 */ 1552 WARN_ON(1); 1553 kfree(reg_elem); 1554 return -EINVAL; 1555 } 1556 1557 DP(BNX2X_MSG_SP, "Got cam offset %d\n", reg_elem->cam_offset); 1558 1559 /* Set a VLAN-MAC data */ 1560 memcpy(®_elem->u, &elem->cmd_data.vlan_mac.u, 1561 sizeof(reg_elem->u)); 1562 1563 /* Copy the flags (needed for DEL and RESTORE flows) */ 1564 reg_elem->vlan_mac_flags = 1565 elem->cmd_data.vlan_mac.vlan_mac_flags; 1566 } else /* DEL, RESTORE */ 1567 reg_elem = o->check_del(bp, o, &elem->cmd_data.vlan_mac.u); 1568 1569 *re = reg_elem; 1570 return 0; 1571 } 1572 1573 /** 1574 * bnx2x_execute_vlan_mac - execute vlan mac command 1575 * 1576 * @bp: device handle 1577 * @qo: 1578 * @exe_chunk: 1579 * @ramrod_flags: 1580 * 1581 * go and send a ramrod! 1582 */ 1583 static int bnx2x_execute_vlan_mac(struct bnx2x *bp, 1584 union bnx2x_qable_obj *qo, 1585 struct list_head *exe_chunk, 1586 unsigned long *ramrod_flags) 1587 { 1588 struct bnx2x_exeq_elem *elem; 1589 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac, *cam_obj; 1590 struct bnx2x_raw_obj *r = &o->raw; 1591 int rc, idx = 0; 1592 bool restore = test_bit(RAMROD_RESTORE, ramrod_flags); 1593 bool drv_only = test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags); 1594 struct bnx2x_vlan_mac_registry_elem *reg_elem; 1595 int cmd; 1596 1597 /* 1598 * If DRIVER_ONLY execution is requested, cleanup a registry 1599 * and exit. Otherwise send a ramrod to FW. 1600 */ 1601 if (!drv_only) { 1602 WARN_ON(r->check_pending(r)); 1603 1604 /* Set pending */ 1605 r->set_pending(r); 1606 1607 /* Fill tha ramrod data */ 1608 list_for_each_entry(elem, exe_chunk, link) { 1609 cmd = elem->cmd_data.vlan_mac.cmd; 1610 /* 1611 * We will add to the target object in MOVE command, so 1612 * change the object for a CAM search. 1613 */ 1614 if (cmd == BNX2X_VLAN_MAC_MOVE) 1615 cam_obj = elem->cmd_data.vlan_mac.target_obj; 1616 else 1617 cam_obj = o; 1618 1619 rc = bnx2x_vlan_mac_get_registry_elem(bp, cam_obj, 1620 elem, restore, 1621 ®_elem); 1622 if (rc) 1623 goto error_exit; 1624 1625 WARN_ON(!reg_elem); 1626 1627 /* Push a new entry into the registry */ 1628 if (!restore && 1629 ((cmd == BNX2X_VLAN_MAC_ADD) || 1630 (cmd == BNX2X_VLAN_MAC_MOVE))) 1631 list_add(®_elem->link, &cam_obj->head); 1632 1633 /* Configure a single command in a ramrod data buffer */ 1634 o->set_one_rule(bp, o, elem, idx, 1635 reg_elem->cam_offset); 1636 1637 /* MOVE command consumes 2 entries in the ramrod data */ 1638 if (cmd == BNX2X_VLAN_MAC_MOVE) 1639 idx += 2; 1640 else 1641 idx++; 1642 } 1643 1644 /* 1645 * No need for an explicit memory barrier here as long we would 1646 * need to ensure the ordering of writing to the SPQ element 1647 * and updating of the SPQ producer which involves a memory 1648 * read and we will have to put a full memory barrier there 1649 * (inside bnx2x_sp_post()). 1650 */ 1651 1652 rc = bnx2x_sp_post(bp, o->ramrod_cmd, r->cid, 1653 U64_HI(r->rdata_mapping), 1654 U64_LO(r->rdata_mapping), 1655 ETH_CONNECTION_TYPE); 1656 if (rc) 1657 goto error_exit; 1658 } 1659 1660 /* Now, when we are done with the ramrod - clean up the registry */ 1661 list_for_each_entry(elem, exe_chunk, link) { 1662 cmd = elem->cmd_data.vlan_mac.cmd; 1663 if ((cmd == BNX2X_VLAN_MAC_DEL) || 1664 (cmd == BNX2X_VLAN_MAC_MOVE)) { 1665 reg_elem = o->check_del(bp, o, 1666 &elem->cmd_data.vlan_mac.u); 1667 1668 WARN_ON(!reg_elem); 1669 1670 o->put_cam_offset(o, reg_elem->cam_offset); 1671 list_del(®_elem->link); 1672 kfree(reg_elem); 1673 } 1674 } 1675 1676 if (!drv_only) 1677 return 1; 1678 else 1679 return 0; 1680 1681 error_exit: 1682 r->clear_pending(r); 1683 1684 /* Cleanup a registry in case of a failure */ 1685 list_for_each_entry(elem, exe_chunk, link) { 1686 cmd = elem->cmd_data.vlan_mac.cmd; 1687 1688 if (cmd == BNX2X_VLAN_MAC_MOVE) 1689 cam_obj = elem->cmd_data.vlan_mac.target_obj; 1690 else 1691 cam_obj = o; 1692 1693 /* Delete all newly added above entries */ 1694 if (!restore && 1695 ((cmd == BNX2X_VLAN_MAC_ADD) || 1696 (cmd == BNX2X_VLAN_MAC_MOVE))) { 1697 reg_elem = o->check_del(bp, cam_obj, 1698 &elem->cmd_data.vlan_mac.u); 1699 if (reg_elem) { 1700 list_del(®_elem->link); 1701 kfree(reg_elem); 1702 } 1703 } 1704 } 1705 1706 return rc; 1707 } 1708 1709 static inline int bnx2x_vlan_mac_push_new_cmd( 1710 struct bnx2x *bp, 1711 struct bnx2x_vlan_mac_ramrod_params *p) 1712 { 1713 struct bnx2x_exeq_elem *elem; 1714 struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj; 1715 bool restore = test_bit(RAMROD_RESTORE, &p->ramrod_flags); 1716 1717 /* Allocate the execution queue element */ 1718 elem = bnx2x_exe_queue_alloc_elem(bp); 1719 if (!elem) 1720 return -ENOMEM; 1721 1722 /* Set the command 'length' */ 1723 switch (p->user_req.cmd) { 1724 case BNX2X_VLAN_MAC_MOVE: 1725 elem->cmd_len = 2; 1726 break; 1727 default: 1728 elem->cmd_len = 1; 1729 } 1730 1731 /* Fill the object specific info */ 1732 memcpy(&elem->cmd_data.vlan_mac, &p->user_req, sizeof(p->user_req)); 1733 1734 /* Try to add a new command to the pending list */ 1735 return bnx2x_exe_queue_add(bp, &o->exe_queue, elem, restore); 1736 } 1737 1738 /** 1739 * bnx2x_config_vlan_mac - configure VLAN/MAC/VLAN_MAC filtering rules. 1740 * 1741 * @bp: device handle 1742 * @p: 1743 * 1744 */ 1745 int bnx2x_config_vlan_mac( 1746 struct bnx2x *bp, 1747 struct bnx2x_vlan_mac_ramrod_params *p) 1748 { 1749 int rc = 0; 1750 struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj; 1751 unsigned long *ramrod_flags = &p->ramrod_flags; 1752 bool cont = test_bit(RAMROD_CONT, ramrod_flags); 1753 struct bnx2x_raw_obj *raw = &o->raw; 1754 1755 /* 1756 * Add new elements to the execution list for commands that require it. 1757 */ 1758 if (!cont) { 1759 rc = bnx2x_vlan_mac_push_new_cmd(bp, p); 1760 if (rc) 1761 return rc; 1762 } 1763 1764 /* 1765 * If nothing will be executed further in this iteration we want to 1766 * return PENDING if there are pending commands 1767 */ 1768 if (!bnx2x_exe_queue_empty(&o->exe_queue)) 1769 rc = 1; 1770 1771 if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) { 1772 DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: clearing a pending bit.\n"); 1773 raw->clear_pending(raw); 1774 } 1775 1776 /* Execute commands if required */ 1777 if (cont || test_bit(RAMROD_EXEC, ramrod_flags) || 1778 test_bit(RAMROD_COMP_WAIT, ramrod_flags)) { 1779 rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags); 1780 if (rc < 0) 1781 return rc; 1782 } 1783 1784 /* 1785 * RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set 1786 * then user want to wait until the last command is done. 1787 */ 1788 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) { 1789 /* 1790 * Wait maximum for the current exe_queue length iterations plus 1791 * one (for the current pending command). 1792 */ 1793 int max_iterations = bnx2x_exe_queue_length(&o->exe_queue) + 1; 1794 1795 while (!bnx2x_exe_queue_empty(&o->exe_queue) && 1796 max_iterations--) { 1797 1798 /* Wait for the current command to complete */ 1799 rc = raw->wait_comp(bp, raw); 1800 if (rc) 1801 return rc; 1802 1803 /* Make a next step */ 1804 rc = bnx2x_exe_queue_step(bp, &o->exe_queue, 1805 ramrod_flags); 1806 if (rc < 0) 1807 return rc; 1808 } 1809 1810 return 0; 1811 } 1812 1813 return rc; 1814 } 1815 1816 1817 1818 /** 1819 * bnx2x_vlan_mac_del_all - delete elements with given vlan_mac_flags spec 1820 * 1821 * @bp: device handle 1822 * @o: 1823 * @vlan_mac_flags: 1824 * @ramrod_flags: execution flags to be used for this deletion 1825 * 1826 * if the last operation has completed successfully and there are no 1827 * moreelements left, positive value if the last operation has completed 1828 * successfully and there are more previously configured elements, negative 1829 * value is current operation has failed. 1830 */ 1831 static int bnx2x_vlan_mac_del_all(struct bnx2x *bp, 1832 struct bnx2x_vlan_mac_obj *o, 1833 unsigned long *vlan_mac_flags, 1834 unsigned long *ramrod_flags) 1835 { 1836 struct bnx2x_vlan_mac_registry_elem *pos = NULL; 1837 int rc = 0; 1838 struct bnx2x_vlan_mac_ramrod_params p; 1839 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue; 1840 struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n; 1841 1842 /* Clear pending commands first */ 1843 1844 spin_lock_bh(&exeq->lock); 1845 1846 list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) { 1847 if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags == 1848 *vlan_mac_flags) { 1849 rc = exeq->remove(bp, exeq->owner, exeq_pos); 1850 if (rc) { 1851 BNX2X_ERR("Failed to remove command\n"); 1852 spin_unlock_bh(&exeq->lock); 1853 return rc; 1854 } 1855 list_del(&exeq_pos->link); 1856 } 1857 } 1858 1859 spin_unlock_bh(&exeq->lock); 1860 1861 /* Prepare a command request */ 1862 memset(&p, 0, sizeof(p)); 1863 p.vlan_mac_obj = o; 1864 p.ramrod_flags = *ramrod_flags; 1865 p.user_req.cmd = BNX2X_VLAN_MAC_DEL; 1866 1867 /* 1868 * Add all but the last VLAN-MAC to the execution queue without actually 1869 * execution anything. 1870 */ 1871 __clear_bit(RAMROD_COMP_WAIT, &p.ramrod_flags); 1872 __clear_bit(RAMROD_EXEC, &p.ramrod_flags); 1873 __clear_bit(RAMROD_CONT, &p.ramrod_flags); 1874 1875 list_for_each_entry(pos, &o->head, link) { 1876 if (pos->vlan_mac_flags == *vlan_mac_flags) { 1877 p.user_req.vlan_mac_flags = pos->vlan_mac_flags; 1878 memcpy(&p.user_req.u, &pos->u, sizeof(pos->u)); 1879 rc = bnx2x_config_vlan_mac(bp, &p); 1880 if (rc < 0) { 1881 BNX2X_ERR("Failed to add a new DEL command\n"); 1882 return rc; 1883 } 1884 } 1885 } 1886 1887 p.ramrod_flags = *ramrod_flags; 1888 __set_bit(RAMROD_CONT, &p.ramrod_flags); 1889 1890 return bnx2x_config_vlan_mac(bp, &p); 1891 } 1892 1893 static inline void bnx2x_init_raw_obj(struct bnx2x_raw_obj *raw, u8 cl_id, 1894 u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping, int state, 1895 unsigned long *pstate, bnx2x_obj_type type) 1896 { 1897 raw->func_id = func_id; 1898 raw->cid = cid; 1899 raw->cl_id = cl_id; 1900 raw->rdata = rdata; 1901 raw->rdata_mapping = rdata_mapping; 1902 raw->state = state; 1903 raw->pstate = pstate; 1904 raw->obj_type = type; 1905 raw->check_pending = bnx2x_raw_check_pending; 1906 raw->clear_pending = bnx2x_raw_clear_pending; 1907 raw->set_pending = bnx2x_raw_set_pending; 1908 raw->wait_comp = bnx2x_raw_wait; 1909 } 1910 1911 static inline void bnx2x_init_vlan_mac_common(struct bnx2x_vlan_mac_obj *o, 1912 u8 cl_id, u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping, 1913 int state, unsigned long *pstate, bnx2x_obj_type type, 1914 struct bnx2x_credit_pool_obj *macs_pool, 1915 struct bnx2x_credit_pool_obj *vlans_pool) 1916 { 1917 INIT_LIST_HEAD(&o->head); 1918 1919 o->macs_pool = macs_pool; 1920 o->vlans_pool = vlans_pool; 1921 1922 o->delete_all = bnx2x_vlan_mac_del_all; 1923 o->restore = bnx2x_vlan_mac_restore; 1924 o->complete = bnx2x_complete_vlan_mac; 1925 o->wait = bnx2x_wait_vlan_mac; 1926 1927 bnx2x_init_raw_obj(&o->raw, cl_id, cid, func_id, rdata, rdata_mapping, 1928 state, pstate, type); 1929 } 1930 1931 1932 void bnx2x_init_mac_obj(struct bnx2x *bp, 1933 struct bnx2x_vlan_mac_obj *mac_obj, 1934 u8 cl_id, u32 cid, u8 func_id, void *rdata, 1935 dma_addr_t rdata_mapping, int state, 1936 unsigned long *pstate, bnx2x_obj_type type, 1937 struct bnx2x_credit_pool_obj *macs_pool) 1938 { 1939 union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)mac_obj; 1940 1941 bnx2x_init_vlan_mac_common(mac_obj, cl_id, cid, func_id, rdata, 1942 rdata_mapping, state, pstate, type, 1943 macs_pool, NULL); 1944 1945 /* CAM credit pool handling */ 1946 mac_obj->get_credit = bnx2x_get_credit_mac; 1947 mac_obj->put_credit = bnx2x_put_credit_mac; 1948 mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac; 1949 mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac; 1950 1951 if (CHIP_IS_E1x(bp)) { 1952 mac_obj->set_one_rule = bnx2x_set_one_mac_e1x; 1953 mac_obj->check_del = bnx2x_check_mac_del; 1954 mac_obj->check_add = bnx2x_check_mac_add; 1955 mac_obj->check_move = bnx2x_check_move_always_err; 1956 mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC; 1957 1958 /* Exe Queue */ 1959 bnx2x_exe_queue_init(bp, 1960 &mac_obj->exe_queue, 1, qable_obj, 1961 bnx2x_validate_vlan_mac, 1962 bnx2x_remove_vlan_mac, 1963 bnx2x_optimize_vlan_mac, 1964 bnx2x_execute_vlan_mac, 1965 bnx2x_exeq_get_mac); 1966 } else { 1967 mac_obj->set_one_rule = bnx2x_set_one_mac_e2; 1968 mac_obj->check_del = bnx2x_check_mac_del; 1969 mac_obj->check_add = bnx2x_check_mac_add; 1970 mac_obj->check_move = bnx2x_check_move; 1971 mac_obj->ramrod_cmd = 1972 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES; 1973 mac_obj->get_n_elements = bnx2x_get_n_elements; 1974 1975 /* Exe Queue */ 1976 bnx2x_exe_queue_init(bp, 1977 &mac_obj->exe_queue, CLASSIFY_RULES_COUNT, 1978 qable_obj, bnx2x_validate_vlan_mac, 1979 bnx2x_remove_vlan_mac, 1980 bnx2x_optimize_vlan_mac, 1981 bnx2x_execute_vlan_mac, 1982 bnx2x_exeq_get_mac); 1983 } 1984 } 1985 1986 void bnx2x_init_vlan_obj(struct bnx2x *bp, 1987 struct bnx2x_vlan_mac_obj *vlan_obj, 1988 u8 cl_id, u32 cid, u8 func_id, void *rdata, 1989 dma_addr_t rdata_mapping, int state, 1990 unsigned long *pstate, bnx2x_obj_type type, 1991 struct bnx2x_credit_pool_obj *vlans_pool) 1992 { 1993 union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)vlan_obj; 1994 1995 bnx2x_init_vlan_mac_common(vlan_obj, cl_id, cid, func_id, rdata, 1996 rdata_mapping, state, pstate, type, NULL, 1997 vlans_pool); 1998 1999 vlan_obj->get_credit = bnx2x_get_credit_vlan; 2000 vlan_obj->put_credit = bnx2x_put_credit_vlan; 2001 vlan_obj->get_cam_offset = bnx2x_get_cam_offset_vlan; 2002 vlan_obj->put_cam_offset = bnx2x_put_cam_offset_vlan; 2003 2004 if (CHIP_IS_E1x(bp)) { 2005 BNX2X_ERR("Do not support chips others than E2 and newer\n"); 2006 BUG(); 2007 } else { 2008 vlan_obj->set_one_rule = bnx2x_set_one_vlan_e2; 2009 vlan_obj->check_del = bnx2x_check_vlan_del; 2010 vlan_obj->check_add = bnx2x_check_vlan_add; 2011 vlan_obj->check_move = bnx2x_check_move; 2012 vlan_obj->ramrod_cmd = 2013 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES; 2014 2015 /* Exe Queue */ 2016 bnx2x_exe_queue_init(bp, 2017 &vlan_obj->exe_queue, CLASSIFY_RULES_COUNT, 2018 qable_obj, bnx2x_validate_vlan_mac, 2019 bnx2x_remove_vlan_mac, 2020 bnx2x_optimize_vlan_mac, 2021 bnx2x_execute_vlan_mac, 2022 bnx2x_exeq_get_vlan); 2023 } 2024 } 2025 2026 void bnx2x_init_vlan_mac_obj(struct bnx2x *bp, 2027 struct bnx2x_vlan_mac_obj *vlan_mac_obj, 2028 u8 cl_id, u32 cid, u8 func_id, void *rdata, 2029 dma_addr_t rdata_mapping, int state, 2030 unsigned long *pstate, bnx2x_obj_type type, 2031 struct bnx2x_credit_pool_obj *macs_pool, 2032 struct bnx2x_credit_pool_obj *vlans_pool) 2033 { 2034 union bnx2x_qable_obj *qable_obj = 2035 (union bnx2x_qable_obj *)vlan_mac_obj; 2036 2037 bnx2x_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id, rdata, 2038 rdata_mapping, state, pstate, type, 2039 macs_pool, vlans_pool); 2040 2041 /* CAM pool handling */ 2042 vlan_mac_obj->get_credit = bnx2x_get_credit_vlan_mac; 2043 vlan_mac_obj->put_credit = bnx2x_put_credit_vlan_mac; 2044 /* 2045 * CAM offset is relevant for 57710 and 57711 chips only which have a 2046 * single CAM for both MACs and VLAN-MAC pairs. So the offset 2047 * will be taken from MACs' pool object only. 2048 */ 2049 vlan_mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac; 2050 vlan_mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac; 2051 2052 if (CHIP_IS_E1(bp)) { 2053 BNX2X_ERR("Do not support chips others than E2\n"); 2054 BUG(); 2055 } else if (CHIP_IS_E1H(bp)) { 2056 vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e1h; 2057 vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del; 2058 vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add; 2059 vlan_mac_obj->check_move = bnx2x_check_move_always_err; 2060 vlan_mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC; 2061 2062 /* Exe Queue */ 2063 bnx2x_exe_queue_init(bp, 2064 &vlan_mac_obj->exe_queue, 1, qable_obj, 2065 bnx2x_validate_vlan_mac, 2066 bnx2x_remove_vlan_mac, 2067 bnx2x_optimize_vlan_mac, 2068 bnx2x_execute_vlan_mac, 2069 bnx2x_exeq_get_vlan_mac); 2070 } else { 2071 vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e2; 2072 vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del; 2073 vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add; 2074 vlan_mac_obj->check_move = bnx2x_check_move; 2075 vlan_mac_obj->ramrod_cmd = 2076 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES; 2077 2078 /* Exe Queue */ 2079 bnx2x_exe_queue_init(bp, 2080 &vlan_mac_obj->exe_queue, 2081 CLASSIFY_RULES_COUNT, 2082 qable_obj, bnx2x_validate_vlan_mac, 2083 bnx2x_remove_vlan_mac, 2084 bnx2x_optimize_vlan_mac, 2085 bnx2x_execute_vlan_mac, 2086 bnx2x_exeq_get_vlan_mac); 2087 } 2088 2089 } 2090 2091 /* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */ 2092 static inline void __storm_memset_mac_filters(struct bnx2x *bp, 2093 struct tstorm_eth_mac_filter_config *mac_filters, 2094 u16 pf_id) 2095 { 2096 size_t size = sizeof(struct tstorm_eth_mac_filter_config); 2097 2098 u32 addr = BAR_TSTRORM_INTMEM + 2099 TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id); 2100 2101 __storm_memset_struct(bp, addr, size, (u32 *)mac_filters); 2102 } 2103 2104 static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp, 2105 struct bnx2x_rx_mode_ramrod_params *p) 2106 { 2107 /* update the bp MAC filter structure */ 2108 u32 mask = (1 << p->cl_id); 2109 2110 struct tstorm_eth_mac_filter_config *mac_filters = 2111 (struct tstorm_eth_mac_filter_config *)p->rdata; 2112 2113 /* initial seeting is drop-all */ 2114 u8 drop_all_ucast = 1, drop_all_mcast = 1; 2115 u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0; 2116 u8 unmatched_unicast = 0; 2117 2118 /* In e1x there we only take into account rx acceot flag since tx switching 2119 * isn't enabled. */ 2120 if (test_bit(BNX2X_ACCEPT_UNICAST, &p->rx_accept_flags)) 2121 /* accept matched ucast */ 2122 drop_all_ucast = 0; 2123 2124 if (test_bit(BNX2X_ACCEPT_MULTICAST, &p->rx_accept_flags)) 2125 /* accept matched mcast */ 2126 drop_all_mcast = 0; 2127 2128 if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &p->rx_accept_flags)) { 2129 /* accept all mcast */ 2130 drop_all_ucast = 0; 2131 accp_all_ucast = 1; 2132 } 2133 if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &p->rx_accept_flags)) { 2134 /* accept all mcast */ 2135 drop_all_mcast = 0; 2136 accp_all_mcast = 1; 2137 } 2138 if (test_bit(BNX2X_ACCEPT_BROADCAST, &p->rx_accept_flags)) 2139 /* accept (all) bcast */ 2140 accp_all_bcast = 1; 2141 if (test_bit(BNX2X_ACCEPT_UNMATCHED, &p->rx_accept_flags)) 2142 /* accept unmatched unicasts */ 2143 unmatched_unicast = 1; 2144 2145 mac_filters->ucast_drop_all = drop_all_ucast ? 2146 mac_filters->ucast_drop_all | mask : 2147 mac_filters->ucast_drop_all & ~mask; 2148 2149 mac_filters->mcast_drop_all = drop_all_mcast ? 2150 mac_filters->mcast_drop_all | mask : 2151 mac_filters->mcast_drop_all & ~mask; 2152 2153 mac_filters->ucast_accept_all = accp_all_ucast ? 2154 mac_filters->ucast_accept_all | mask : 2155 mac_filters->ucast_accept_all & ~mask; 2156 2157 mac_filters->mcast_accept_all = accp_all_mcast ? 2158 mac_filters->mcast_accept_all | mask : 2159 mac_filters->mcast_accept_all & ~mask; 2160 2161 mac_filters->bcast_accept_all = accp_all_bcast ? 2162 mac_filters->bcast_accept_all | mask : 2163 mac_filters->bcast_accept_all & ~mask; 2164 2165 mac_filters->unmatched_unicast = unmatched_unicast ? 2166 mac_filters->unmatched_unicast | mask : 2167 mac_filters->unmatched_unicast & ~mask; 2168 2169 DP(BNX2X_MSG_SP, "drop_ucast 0x%x\ndrop_mcast 0x%x\n accp_ucast 0x%x\n" 2170 "accp_mcast 0x%x\naccp_bcast 0x%x\n", 2171 mac_filters->ucast_drop_all, mac_filters->mcast_drop_all, 2172 mac_filters->ucast_accept_all, mac_filters->mcast_accept_all, 2173 mac_filters->bcast_accept_all); 2174 2175 /* write the MAC filter structure*/ 2176 __storm_memset_mac_filters(bp, mac_filters, p->func_id); 2177 2178 /* The operation is completed */ 2179 clear_bit(p->state, p->pstate); 2180 smp_mb__after_clear_bit(); 2181 2182 return 0; 2183 } 2184 2185 /* Setup ramrod data */ 2186 static inline void bnx2x_rx_mode_set_rdata_hdr_e2(u32 cid, 2187 struct eth_classify_header *hdr, 2188 u8 rule_cnt) 2189 { 2190 hdr->echo = cid; 2191 hdr->rule_cnt = rule_cnt; 2192 } 2193 2194 static inline void bnx2x_rx_mode_set_cmd_state_e2(struct bnx2x *bp, 2195 unsigned long accept_flags, 2196 struct eth_filter_rules_cmd *cmd, 2197 bool clear_accept_all) 2198 { 2199 u16 state; 2200 2201 /* start with 'drop-all' */ 2202 state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL | 2203 ETH_FILTER_RULES_CMD_MCAST_DROP_ALL; 2204 2205 if (accept_flags) { 2206 if (test_bit(BNX2X_ACCEPT_UNICAST, &accept_flags)) 2207 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL; 2208 2209 if (test_bit(BNX2X_ACCEPT_MULTICAST, &accept_flags)) 2210 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL; 2211 2212 if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &accept_flags)) { 2213 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL; 2214 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL; 2215 } 2216 2217 if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept_flags)) { 2218 state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL; 2219 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL; 2220 } 2221 if (test_bit(BNX2X_ACCEPT_BROADCAST, &accept_flags)) 2222 state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL; 2223 2224 if (test_bit(BNX2X_ACCEPT_UNMATCHED, &accept_flags)) { 2225 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL; 2226 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED; 2227 } 2228 if (test_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags)) 2229 state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN; 2230 } 2231 2232 /* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */ 2233 if (clear_accept_all) { 2234 state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL; 2235 state &= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL; 2236 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL; 2237 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED; 2238 } 2239 2240 cmd->state = cpu_to_le16(state); 2241 2242 } 2243 2244 static int bnx2x_set_rx_mode_e2(struct bnx2x *bp, 2245 struct bnx2x_rx_mode_ramrod_params *p) 2246 { 2247 struct eth_filter_rules_ramrod_data *data = p->rdata; 2248 int rc; 2249 u8 rule_idx = 0; 2250 2251 /* Reset the ramrod data buffer */ 2252 memset(data, 0, sizeof(*data)); 2253 2254 /* Setup ramrod data */ 2255 2256 /* Tx (internal switching) */ 2257 if (test_bit(RAMROD_TX, &p->ramrod_flags)) { 2258 data->rules[rule_idx].client_id = p->cl_id; 2259 data->rules[rule_idx].func_id = p->func_id; 2260 2261 data->rules[rule_idx].cmd_general_data = 2262 ETH_FILTER_RULES_CMD_TX_CMD; 2263 2264 bnx2x_rx_mode_set_cmd_state_e2(bp, p->tx_accept_flags, 2265 &(data->rules[rule_idx++]), false); 2266 } 2267 2268 /* Rx */ 2269 if (test_bit(RAMROD_RX, &p->ramrod_flags)) { 2270 data->rules[rule_idx].client_id = p->cl_id; 2271 data->rules[rule_idx].func_id = p->func_id; 2272 2273 data->rules[rule_idx].cmd_general_data = 2274 ETH_FILTER_RULES_CMD_RX_CMD; 2275 2276 bnx2x_rx_mode_set_cmd_state_e2(bp, p->rx_accept_flags, 2277 &(data->rules[rule_idx++]), false); 2278 } 2279 2280 2281 /* 2282 * If FCoE Queue configuration has been requested configure the Rx and 2283 * internal switching modes for this queue in separate rules. 2284 * 2285 * FCoE queue shell never be set to ACCEPT_ALL packets of any sort: 2286 * MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED. 2287 */ 2288 if (test_bit(BNX2X_RX_MODE_FCOE_ETH, &p->rx_mode_flags)) { 2289 /* Tx (internal switching) */ 2290 if (test_bit(RAMROD_TX, &p->ramrod_flags)) { 2291 data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id); 2292 data->rules[rule_idx].func_id = p->func_id; 2293 2294 data->rules[rule_idx].cmd_general_data = 2295 ETH_FILTER_RULES_CMD_TX_CMD; 2296 2297 bnx2x_rx_mode_set_cmd_state_e2(bp, p->tx_accept_flags, 2298 &(data->rules[rule_idx++]), 2299 true); 2300 } 2301 2302 /* Rx */ 2303 if (test_bit(RAMROD_RX, &p->ramrod_flags)) { 2304 data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id); 2305 data->rules[rule_idx].func_id = p->func_id; 2306 2307 data->rules[rule_idx].cmd_general_data = 2308 ETH_FILTER_RULES_CMD_RX_CMD; 2309 2310 bnx2x_rx_mode_set_cmd_state_e2(bp, p->rx_accept_flags, 2311 &(data->rules[rule_idx++]), 2312 true); 2313 } 2314 } 2315 2316 /* 2317 * Set the ramrod header (most importantly - number of rules to 2318 * configure). 2319 */ 2320 bnx2x_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx); 2321 2322 DP(BNX2X_MSG_SP, "About to configure %d rules, rx_accept_flags 0x%lx, tx_accept_flags 0x%lx\n", 2323 data->header.rule_cnt, p->rx_accept_flags, 2324 p->tx_accept_flags); 2325 2326 /* 2327 * No need for an explicit memory barrier here as long we would 2328 * need to ensure the ordering of writing to the SPQ element 2329 * and updating of the SPQ producer which involves a memory 2330 * read and we will have to put a full memory barrier there 2331 * (inside bnx2x_sp_post()). 2332 */ 2333 2334 /* Send a ramrod */ 2335 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_FILTER_RULES, p->cid, 2336 U64_HI(p->rdata_mapping), 2337 U64_LO(p->rdata_mapping), 2338 ETH_CONNECTION_TYPE); 2339 if (rc) 2340 return rc; 2341 2342 /* Ramrod completion is pending */ 2343 return 1; 2344 } 2345 2346 static int bnx2x_wait_rx_mode_comp_e2(struct bnx2x *bp, 2347 struct bnx2x_rx_mode_ramrod_params *p) 2348 { 2349 return bnx2x_state_wait(bp, p->state, p->pstate); 2350 } 2351 2352 static int bnx2x_empty_rx_mode_wait(struct bnx2x *bp, 2353 struct bnx2x_rx_mode_ramrod_params *p) 2354 { 2355 /* Do nothing */ 2356 return 0; 2357 } 2358 2359 int bnx2x_config_rx_mode(struct bnx2x *bp, 2360 struct bnx2x_rx_mode_ramrod_params *p) 2361 { 2362 int rc; 2363 2364 /* Configure the new classification in the chip */ 2365 rc = p->rx_mode_obj->config_rx_mode(bp, p); 2366 if (rc < 0) 2367 return rc; 2368 2369 /* Wait for a ramrod completion if was requested */ 2370 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) { 2371 rc = p->rx_mode_obj->wait_comp(bp, p); 2372 if (rc) 2373 return rc; 2374 } 2375 2376 return rc; 2377 } 2378 2379 void bnx2x_init_rx_mode_obj(struct bnx2x *bp, 2380 struct bnx2x_rx_mode_obj *o) 2381 { 2382 if (CHIP_IS_E1x(bp)) { 2383 o->wait_comp = bnx2x_empty_rx_mode_wait; 2384 o->config_rx_mode = bnx2x_set_rx_mode_e1x; 2385 } else { 2386 o->wait_comp = bnx2x_wait_rx_mode_comp_e2; 2387 o->config_rx_mode = bnx2x_set_rx_mode_e2; 2388 } 2389 } 2390 2391 /********************* Multicast verbs: SET, CLEAR ****************************/ 2392 static inline u8 bnx2x_mcast_bin_from_mac(u8 *mac) 2393 { 2394 return (crc32c_le(0, mac, ETH_ALEN) >> 24) & 0xff; 2395 } 2396 2397 struct bnx2x_mcast_mac_elem { 2398 struct list_head link; 2399 u8 mac[ETH_ALEN]; 2400 u8 pad[2]; /* For a natural alignment of the following buffer */ 2401 }; 2402 2403 struct bnx2x_pending_mcast_cmd { 2404 struct list_head link; 2405 int type; /* BNX2X_MCAST_CMD_X */ 2406 union { 2407 struct list_head macs_head; 2408 u32 macs_num; /* Needed for DEL command */ 2409 int next_bin; /* Needed for RESTORE flow with aprox match */ 2410 } data; 2411 2412 bool done; /* set to true, when the command has been handled, 2413 * practically used in 57712 handling only, where one pending 2414 * command may be handled in a few operations. As long as for 2415 * other chips every operation handling is completed in a 2416 * single ramrod, there is no need to utilize this field. 2417 */ 2418 }; 2419 2420 static int bnx2x_mcast_wait(struct bnx2x *bp, 2421 struct bnx2x_mcast_obj *o) 2422 { 2423 if (bnx2x_state_wait(bp, o->sched_state, o->raw.pstate) || 2424 o->raw.wait_comp(bp, &o->raw)) 2425 return -EBUSY; 2426 2427 return 0; 2428 } 2429 2430 static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp, 2431 struct bnx2x_mcast_obj *o, 2432 struct bnx2x_mcast_ramrod_params *p, 2433 int cmd) 2434 { 2435 int total_sz; 2436 struct bnx2x_pending_mcast_cmd *new_cmd; 2437 struct bnx2x_mcast_mac_elem *cur_mac = NULL; 2438 struct bnx2x_mcast_list_elem *pos; 2439 int macs_list_len = ((cmd == BNX2X_MCAST_CMD_ADD) ? 2440 p->mcast_list_len : 0); 2441 2442 /* If the command is empty ("handle pending commands only"), break */ 2443 if (!p->mcast_list_len) 2444 return 0; 2445 2446 total_sz = sizeof(*new_cmd) + 2447 macs_list_len * sizeof(struct bnx2x_mcast_mac_elem); 2448 2449 /* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */ 2450 new_cmd = kzalloc(total_sz, GFP_ATOMIC); 2451 2452 if (!new_cmd) 2453 return -ENOMEM; 2454 2455 DP(BNX2X_MSG_SP, "About to enqueue a new %d command. macs_list_len=%d\n", 2456 cmd, macs_list_len); 2457 2458 INIT_LIST_HEAD(&new_cmd->data.macs_head); 2459 2460 new_cmd->type = cmd; 2461 new_cmd->done = false; 2462 2463 switch (cmd) { 2464 case BNX2X_MCAST_CMD_ADD: 2465 cur_mac = (struct bnx2x_mcast_mac_elem *) 2466 ((u8 *)new_cmd + sizeof(*new_cmd)); 2467 2468 /* Push the MACs of the current command into the pendig command 2469 * MACs list: FIFO 2470 */ 2471 list_for_each_entry(pos, &p->mcast_list, link) { 2472 memcpy(cur_mac->mac, pos->mac, ETH_ALEN); 2473 list_add_tail(&cur_mac->link, &new_cmd->data.macs_head); 2474 cur_mac++; 2475 } 2476 2477 break; 2478 2479 case BNX2X_MCAST_CMD_DEL: 2480 new_cmd->data.macs_num = p->mcast_list_len; 2481 break; 2482 2483 case BNX2X_MCAST_CMD_RESTORE: 2484 new_cmd->data.next_bin = 0; 2485 break; 2486 2487 default: 2488 kfree(new_cmd); 2489 BNX2X_ERR("Unknown command: %d\n", cmd); 2490 return -EINVAL; 2491 } 2492 2493 /* Push the new pending command to the tail of the pending list: FIFO */ 2494 list_add_tail(&new_cmd->link, &o->pending_cmds_head); 2495 2496 o->set_sched(o); 2497 2498 return 1; 2499 } 2500 2501 /** 2502 * bnx2x_mcast_get_next_bin - get the next set bin (index) 2503 * 2504 * @o: 2505 * @last: index to start looking from (including) 2506 * 2507 * returns the next found (set) bin or a negative value if none is found. 2508 */ 2509 static inline int bnx2x_mcast_get_next_bin(struct bnx2x_mcast_obj *o, int last) 2510 { 2511 int i, j, inner_start = last % BIT_VEC64_ELEM_SZ; 2512 2513 for (i = last / BIT_VEC64_ELEM_SZ; i < BNX2X_MCAST_VEC_SZ; i++) { 2514 if (o->registry.aprox_match.vec[i]) 2515 for (j = inner_start; j < BIT_VEC64_ELEM_SZ; j++) { 2516 int cur_bit = j + BIT_VEC64_ELEM_SZ * i; 2517 if (BIT_VEC64_TEST_BIT(o->registry.aprox_match. 2518 vec, cur_bit)) { 2519 return cur_bit; 2520 } 2521 } 2522 inner_start = 0; 2523 } 2524 2525 /* None found */ 2526 return -1; 2527 } 2528 2529 /** 2530 * bnx2x_mcast_clear_first_bin - find the first set bin and clear it 2531 * 2532 * @o: 2533 * 2534 * returns the index of the found bin or -1 if none is found 2535 */ 2536 static inline int bnx2x_mcast_clear_first_bin(struct bnx2x_mcast_obj *o) 2537 { 2538 int cur_bit = bnx2x_mcast_get_next_bin(o, 0); 2539 2540 if (cur_bit >= 0) 2541 BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, cur_bit); 2542 2543 return cur_bit; 2544 } 2545 2546 static inline u8 bnx2x_mcast_get_rx_tx_flag(struct bnx2x_mcast_obj *o) 2547 { 2548 struct bnx2x_raw_obj *raw = &o->raw; 2549 u8 rx_tx_flag = 0; 2550 2551 if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) || 2552 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX)) 2553 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_TX_CMD; 2554 2555 if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) || 2556 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX)) 2557 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_RX_CMD; 2558 2559 return rx_tx_flag; 2560 } 2561 2562 static void bnx2x_mcast_set_one_rule_e2(struct bnx2x *bp, 2563 struct bnx2x_mcast_obj *o, int idx, 2564 union bnx2x_mcast_config_data *cfg_data, 2565 int cmd) 2566 { 2567 struct bnx2x_raw_obj *r = &o->raw; 2568 struct eth_multicast_rules_ramrod_data *data = 2569 (struct eth_multicast_rules_ramrod_data *)(r->rdata); 2570 u8 func_id = r->func_id; 2571 u8 rx_tx_add_flag = bnx2x_mcast_get_rx_tx_flag(o); 2572 int bin; 2573 2574 if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE)) 2575 rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD; 2576 2577 data->rules[idx].cmd_general_data |= rx_tx_add_flag; 2578 2579 /* Get a bin and update a bins' vector */ 2580 switch (cmd) { 2581 case BNX2X_MCAST_CMD_ADD: 2582 bin = bnx2x_mcast_bin_from_mac(cfg_data->mac); 2583 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin); 2584 break; 2585 2586 case BNX2X_MCAST_CMD_DEL: 2587 /* If there were no more bins to clear 2588 * (bnx2x_mcast_clear_first_bin() returns -1) then we would 2589 * clear any (0xff) bin. 2590 * See bnx2x_mcast_validate_e2() for explanation when it may 2591 * happen. 2592 */ 2593 bin = bnx2x_mcast_clear_first_bin(o); 2594 break; 2595 2596 case BNX2X_MCAST_CMD_RESTORE: 2597 bin = cfg_data->bin; 2598 break; 2599 2600 default: 2601 BNX2X_ERR("Unknown command: %d\n", cmd); 2602 return; 2603 } 2604 2605 DP(BNX2X_MSG_SP, "%s bin %d\n", 2606 ((rx_tx_add_flag & ETH_MULTICAST_RULES_CMD_IS_ADD) ? 2607 "Setting" : "Clearing"), bin); 2608 2609 data->rules[idx].bin_id = (u8)bin; 2610 data->rules[idx].func_id = func_id; 2611 data->rules[idx].engine_id = o->engine_id; 2612 } 2613 2614 /** 2615 * bnx2x_mcast_handle_restore_cmd_e2 - restore configuration from the registry 2616 * 2617 * @bp: device handle 2618 * @o: 2619 * @start_bin: index in the registry to start from (including) 2620 * @rdata_idx: index in the ramrod data to start from 2621 * 2622 * returns last handled bin index or -1 if all bins have been handled 2623 */ 2624 static inline int bnx2x_mcast_handle_restore_cmd_e2( 2625 struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_bin, 2626 int *rdata_idx) 2627 { 2628 int cur_bin, cnt = *rdata_idx; 2629 union bnx2x_mcast_config_data cfg_data = {0}; 2630 2631 /* go through the registry and configure the bins from it */ 2632 for (cur_bin = bnx2x_mcast_get_next_bin(o, start_bin); cur_bin >= 0; 2633 cur_bin = bnx2x_mcast_get_next_bin(o, cur_bin + 1)) { 2634 2635 cfg_data.bin = (u8)cur_bin; 2636 o->set_one_rule(bp, o, cnt, &cfg_data, 2637 BNX2X_MCAST_CMD_RESTORE); 2638 2639 cnt++; 2640 2641 DP(BNX2X_MSG_SP, "About to configure a bin %d\n", cur_bin); 2642 2643 /* Break if we reached the maximum number 2644 * of rules. 2645 */ 2646 if (cnt >= o->max_cmd_len) 2647 break; 2648 } 2649 2650 *rdata_idx = cnt; 2651 2652 return cur_bin; 2653 } 2654 2655 static inline void bnx2x_mcast_hdl_pending_add_e2(struct bnx2x *bp, 2656 struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos, 2657 int *line_idx) 2658 { 2659 struct bnx2x_mcast_mac_elem *pmac_pos, *pmac_pos_n; 2660 int cnt = *line_idx; 2661 union bnx2x_mcast_config_data cfg_data = {0}; 2662 2663 list_for_each_entry_safe(pmac_pos, pmac_pos_n, &cmd_pos->data.macs_head, 2664 link) { 2665 2666 cfg_data.mac = &pmac_pos->mac[0]; 2667 o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type); 2668 2669 cnt++; 2670 2671 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n", 2672 pmac_pos->mac); 2673 2674 list_del(&pmac_pos->link); 2675 2676 /* Break if we reached the maximum number 2677 * of rules. 2678 */ 2679 if (cnt >= o->max_cmd_len) 2680 break; 2681 } 2682 2683 *line_idx = cnt; 2684 2685 /* if no more MACs to configure - we are done */ 2686 if (list_empty(&cmd_pos->data.macs_head)) 2687 cmd_pos->done = true; 2688 } 2689 2690 static inline void bnx2x_mcast_hdl_pending_del_e2(struct bnx2x *bp, 2691 struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos, 2692 int *line_idx) 2693 { 2694 int cnt = *line_idx; 2695 2696 while (cmd_pos->data.macs_num) { 2697 o->set_one_rule(bp, o, cnt, NULL, cmd_pos->type); 2698 2699 cnt++; 2700 2701 cmd_pos->data.macs_num--; 2702 2703 DP(BNX2X_MSG_SP, "Deleting MAC. %d left,cnt is %d\n", 2704 cmd_pos->data.macs_num, cnt); 2705 2706 /* Break if we reached the maximum 2707 * number of rules. 2708 */ 2709 if (cnt >= o->max_cmd_len) 2710 break; 2711 } 2712 2713 *line_idx = cnt; 2714 2715 /* If we cleared all bins - we are done */ 2716 if (!cmd_pos->data.macs_num) 2717 cmd_pos->done = true; 2718 } 2719 2720 static inline void bnx2x_mcast_hdl_pending_restore_e2(struct bnx2x *bp, 2721 struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos, 2722 int *line_idx) 2723 { 2724 cmd_pos->data.next_bin = o->hdl_restore(bp, o, cmd_pos->data.next_bin, 2725 line_idx); 2726 2727 if (cmd_pos->data.next_bin < 0) 2728 /* If o->set_restore returned -1 we are done */ 2729 cmd_pos->done = true; 2730 else 2731 /* Start from the next bin next time */ 2732 cmd_pos->data.next_bin++; 2733 } 2734 2735 static inline int bnx2x_mcast_handle_pending_cmds_e2(struct bnx2x *bp, 2736 struct bnx2x_mcast_ramrod_params *p) 2737 { 2738 struct bnx2x_pending_mcast_cmd *cmd_pos, *cmd_pos_n; 2739 int cnt = 0; 2740 struct bnx2x_mcast_obj *o = p->mcast_obj; 2741 2742 list_for_each_entry_safe(cmd_pos, cmd_pos_n, &o->pending_cmds_head, 2743 link) { 2744 switch (cmd_pos->type) { 2745 case BNX2X_MCAST_CMD_ADD: 2746 bnx2x_mcast_hdl_pending_add_e2(bp, o, cmd_pos, &cnt); 2747 break; 2748 2749 case BNX2X_MCAST_CMD_DEL: 2750 bnx2x_mcast_hdl_pending_del_e2(bp, o, cmd_pos, &cnt); 2751 break; 2752 2753 case BNX2X_MCAST_CMD_RESTORE: 2754 bnx2x_mcast_hdl_pending_restore_e2(bp, o, cmd_pos, 2755 &cnt); 2756 break; 2757 2758 default: 2759 BNX2X_ERR("Unknown command: %d\n", cmd_pos->type); 2760 return -EINVAL; 2761 } 2762 2763 /* If the command has been completed - remove it from the list 2764 * and free the memory 2765 */ 2766 if (cmd_pos->done) { 2767 list_del(&cmd_pos->link); 2768 kfree(cmd_pos); 2769 } 2770 2771 /* Break if we reached the maximum number of rules */ 2772 if (cnt >= o->max_cmd_len) 2773 break; 2774 } 2775 2776 return cnt; 2777 } 2778 2779 static inline void bnx2x_mcast_hdl_add(struct bnx2x *bp, 2780 struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p, 2781 int *line_idx) 2782 { 2783 struct bnx2x_mcast_list_elem *mlist_pos; 2784 union bnx2x_mcast_config_data cfg_data = {0}; 2785 int cnt = *line_idx; 2786 2787 list_for_each_entry(mlist_pos, &p->mcast_list, link) { 2788 cfg_data.mac = mlist_pos->mac; 2789 o->set_one_rule(bp, o, cnt, &cfg_data, BNX2X_MCAST_CMD_ADD); 2790 2791 cnt++; 2792 2793 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n", 2794 mlist_pos->mac); 2795 } 2796 2797 *line_idx = cnt; 2798 } 2799 2800 static inline void bnx2x_mcast_hdl_del(struct bnx2x *bp, 2801 struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p, 2802 int *line_idx) 2803 { 2804 int cnt = *line_idx, i; 2805 2806 for (i = 0; i < p->mcast_list_len; i++) { 2807 o->set_one_rule(bp, o, cnt, NULL, BNX2X_MCAST_CMD_DEL); 2808 2809 cnt++; 2810 2811 DP(BNX2X_MSG_SP, "Deleting MAC. %d left\n", 2812 p->mcast_list_len - i - 1); 2813 } 2814 2815 *line_idx = cnt; 2816 } 2817 2818 /** 2819 * bnx2x_mcast_handle_current_cmd - 2820 * 2821 * @bp: device handle 2822 * @p: 2823 * @cmd: 2824 * @start_cnt: first line in the ramrod data that may be used 2825 * 2826 * This function is called iff there is enough place for the current command in 2827 * the ramrod data. 2828 * Returns number of lines filled in the ramrod data in total. 2829 */ 2830 static inline int bnx2x_mcast_handle_current_cmd(struct bnx2x *bp, 2831 struct bnx2x_mcast_ramrod_params *p, int cmd, 2832 int start_cnt) 2833 { 2834 struct bnx2x_mcast_obj *o = p->mcast_obj; 2835 int cnt = start_cnt; 2836 2837 DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len); 2838 2839 switch (cmd) { 2840 case BNX2X_MCAST_CMD_ADD: 2841 bnx2x_mcast_hdl_add(bp, o, p, &cnt); 2842 break; 2843 2844 case BNX2X_MCAST_CMD_DEL: 2845 bnx2x_mcast_hdl_del(bp, o, p, &cnt); 2846 break; 2847 2848 case BNX2X_MCAST_CMD_RESTORE: 2849 o->hdl_restore(bp, o, 0, &cnt); 2850 break; 2851 2852 default: 2853 BNX2X_ERR("Unknown command: %d\n", cmd); 2854 return -EINVAL; 2855 } 2856 2857 /* The current command has been handled */ 2858 p->mcast_list_len = 0; 2859 2860 return cnt; 2861 } 2862 2863 static int bnx2x_mcast_validate_e2(struct bnx2x *bp, 2864 struct bnx2x_mcast_ramrod_params *p, 2865 int cmd) 2866 { 2867 struct bnx2x_mcast_obj *o = p->mcast_obj; 2868 int reg_sz = o->get_registry_size(o); 2869 2870 switch (cmd) { 2871 /* DEL command deletes all currently configured MACs */ 2872 case BNX2X_MCAST_CMD_DEL: 2873 o->set_registry_size(o, 0); 2874 /* Don't break */ 2875 2876 /* RESTORE command will restore the entire multicast configuration */ 2877 case BNX2X_MCAST_CMD_RESTORE: 2878 /* Here we set the approximate amount of work to do, which in 2879 * fact may be only less as some MACs in postponed ADD 2880 * command(s) scheduled before this command may fall into 2881 * the same bin and the actual number of bins set in the 2882 * registry would be less than we estimated here. See 2883 * bnx2x_mcast_set_one_rule_e2() for further details. 2884 */ 2885 p->mcast_list_len = reg_sz; 2886 break; 2887 2888 case BNX2X_MCAST_CMD_ADD: 2889 case BNX2X_MCAST_CMD_CONT: 2890 /* Here we assume that all new MACs will fall into new bins. 2891 * However we will correct the real registry size after we 2892 * handle all pending commands. 2893 */ 2894 o->set_registry_size(o, reg_sz + p->mcast_list_len); 2895 break; 2896 2897 default: 2898 BNX2X_ERR("Unknown command: %d\n", cmd); 2899 return -EINVAL; 2900 2901 } 2902 2903 /* Increase the total number of MACs pending to be configured */ 2904 o->total_pending_num += p->mcast_list_len; 2905 2906 return 0; 2907 } 2908 2909 static void bnx2x_mcast_revert_e2(struct bnx2x *bp, 2910 struct bnx2x_mcast_ramrod_params *p, 2911 int old_num_bins) 2912 { 2913 struct bnx2x_mcast_obj *o = p->mcast_obj; 2914 2915 o->set_registry_size(o, old_num_bins); 2916 o->total_pending_num -= p->mcast_list_len; 2917 } 2918 2919 /** 2920 * bnx2x_mcast_set_rdata_hdr_e2 - sets a header values 2921 * 2922 * @bp: device handle 2923 * @p: 2924 * @len: number of rules to handle 2925 */ 2926 static inline void bnx2x_mcast_set_rdata_hdr_e2(struct bnx2x *bp, 2927 struct bnx2x_mcast_ramrod_params *p, 2928 u8 len) 2929 { 2930 struct bnx2x_raw_obj *r = &p->mcast_obj->raw; 2931 struct eth_multicast_rules_ramrod_data *data = 2932 (struct eth_multicast_rules_ramrod_data *)(r->rdata); 2933 2934 data->header.echo = ((r->cid & BNX2X_SWCID_MASK) | 2935 (BNX2X_FILTER_MCAST_PENDING << BNX2X_SWCID_SHIFT)); 2936 data->header.rule_cnt = len; 2937 } 2938 2939 /** 2940 * bnx2x_mcast_refresh_registry_e2 - recalculate the actual number of set bins 2941 * 2942 * @bp: device handle 2943 * @o: 2944 * 2945 * Recalculate the actual number of set bins in the registry using Brian 2946 * Kernighan's algorithm: it's execution complexity is as a number of set bins. 2947 * 2948 * returns 0 for the compliance with bnx2x_mcast_refresh_registry_e1(). 2949 */ 2950 static inline int bnx2x_mcast_refresh_registry_e2(struct bnx2x *bp, 2951 struct bnx2x_mcast_obj *o) 2952 { 2953 int i, cnt = 0; 2954 u64 elem; 2955 2956 for (i = 0; i < BNX2X_MCAST_VEC_SZ; i++) { 2957 elem = o->registry.aprox_match.vec[i]; 2958 for (; elem; cnt++) 2959 elem &= elem - 1; 2960 } 2961 2962 o->set_registry_size(o, cnt); 2963 2964 return 0; 2965 } 2966 2967 static int bnx2x_mcast_setup_e2(struct bnx2x *bp, 2968 struct bnx2x_mcast_ramrod_params *p, 2969 int cmd) 2970 { 2971 struct bnx2x_raw_obj *raw = &p->mcast_obj->raw; 2972 struct bnx2x_mcast_obj *o = p->mcast_obj; 2973 struct eth_multicast_rules_ramrod_data *data = 2974 (struct eth_multicast_rules_ramrod_data *)(raw->rdata); 2975 int cnt = 0, rc; 2976 2977 /* Reset the ramrod data buffer */ 2978 memset(data, 0, sizeof(*data)); 2979 2980 cnt = bnx2x_mcast_handle_pending_cmds_e2(bp, p); 2981 2982 /* If there are no more pending commands - clear SCHEDULED state */ 2983 if (list_empty(&o->pending_cmds_head)) 2984 o->clear_sched(o); 2985 2986 /* The below may be true iff there was enough room in ramrod 2987 * data for all pending commands and for the current 2988 * command. Otherwise the current command would have been added 2989 * to the pending commands and p->mcast_list_len would have been 2990 * zeroed. 2991 */ 2992 if (p->mcast_list_len > 0) 2993 cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, cnt); 2994 2995 /* We've pulled out some MACs - update the total number of 2996 * outstanding. 2997 */ 2998 o->total_pending_num -= cnt; 2999 3000 /* send a ramrod */ 3001 WARN_ON(o->total_pending_num < 0); 3002 WARN_ON(cnt > o->max_cmd_len); 3003 3004 bnx2x_mcast_set_rdata_hdr_e2(bp, p, (u8)cnt); 3005 3006 /* Update a registry size if there are no more pending operations. 3007 * 3008 * We don't want to change the value of the registry size if there are 3009 * pending operations because we want it to always be equal to the 3010 * exact or the approximate number (see bnx2x_mcast_validate_e2()) of 3011 * set bins after the last requested operation in order to properly 3012 * evaluate the size of the next DEL/RESTORE operation. 3013 * 3014 * Note that we update the registry itself during command(s) handling 3015 * - see bnx2x_mcast_set_one_rule_e2(). That's because for 57712 we 3016 * aggregate multiple commands (ADD/DEL/RESTORE) into one ramrod but 3017 * with a limited amount of update commands (per MAC/bin) and we don't 3018 * know in this scope what the actual state of bins configuration is 3019 * going to be after this ramrod. 3020 */ 3021 if (!o->total_pending_num) 3022 bnx2x_mcast_refresh_registry_e2(bp, o); 3023 3024 /* 3025 * If CLEAR_ONLY was requested - don't send a ramrod and clear 3026 * RAMROD_PENDING status immediately. 3027 */ 3028 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) { 3029 raw->clear_pending(raw); 3030 return 0; 3031 } else { 3032 /* 3033 * No need for an explicit memory barrier here as long we would 3034 * need to ensure the ordering of writing to the SPQ element 3035 * and updating of the SPQ producer which involves a memory 3036 * read and we will have to put a full memory barrier there 3037 * (inside bnx2x_sp_post()). 3038 */ 3039 3040 /* Send a ramrod */ 3041 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_MULTICAST_RULES, 3042 raw->cid, U64_HI(raw->rdata_mapping), 3043 U64_LO(raw->rdata_mapping), 3044 ETH_CONNECTION_TYPE); 3045 if (rc) 3046 return rc; 3047 3048 /* Ramrod completion is pending */ 3049 return 1; 3050 } 3051 } 3052 3053 static int bnx2x_mcast_validate_e1h(struct bnx2x *bp, 3054 struct bnx2x_mcast_ramrod_params *p, 3055 int cmd) 3056 { 3057 /* Mark, that there is a work to do */ 3058 if ((cmd == BNX2X_MCAST_CMD_DEL) || (cmd == BNX2X_MCAST_CMD_RESTORE)) 3059 p->mcast_list_len = 1; 3060 3061 return 0; 3062 } 3063 3064 static void bnx2x_mcast_revert_e1h(struct bnx2x *bp, 3065 struct bnx2x_mcast_ramrod_params *p, 3066 int old_num_bins) 3067 { 3068 /* Do nothing */ 3069 } 3070 3071 #define BNX2X_57711_SET_MC_FILTER(filter, bit) \ 3072 do { \ 3073 (filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \ 3074 } while (0) 3075 3076 static inline void bnx2x_mcast_hdl_add_e1h(struct bnx2x *bp, 3077 struct bnx2x_mcast_obj *o, 3078 struct bnx2x_mcast_ramrod_params *p, 3079 u32 *mc_filter) 3080 { 3081 struct bnx2x_mcast_list_elem *mlist_pos; 3082 int bit; 3083 3084 list_for_each_entry(mlist_pos, &p->mcast_list, link) { 3085 bit = bnx2x_mcast_bin_from_mac(mlist_pos->mac); 3086 BNX2X_57711_SET_MC_FILTER(mc_filter, bit); 3087 3088 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC, bin %d\n", 3089 mlist_pos->mac, bit); 3090 3091 /* bookkeeping... */ 3092 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, 3093 bit); 3094 } 3095 } 3096 3097 static inline void bnx2x_mcast_hdl_restore_e1h(struct bnx2x *bp, 3098 struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p, 3099 u32 *mc_filter) 3100 { 3101 int bit; 3102 3103 for (bit = bnx2x_mcast_get_next_bin(o, 0); 3104 bit >= 0; 3105 bit = bnx2x_mcast_get_next_bin(o, bit + 1)) { 3106 BNX2X_57711_SET_MC_FILTER(mc_filter, bit); 3107 DP(BNX2X_MSG_SP, "About to set bin %d\n", bit); 3108 } 3109 } 3110 3111 /* On 57711 we write the multicast MACs' aproximate match 3112 * table by directly into the TSTORM's internal RAM. So we don't 3113 * really need to handle any tricks to make it work. 3114 */ 3115 static int bnx2x_mcast_setup_e1h(struct bnx2x *bp, 3116 struct bnx2x_mcast_ramrod_params *p, 3117 int cmd) 3118 { 3119 int i; 3120 struct bnx2x_mcast_obj *o = p->mcast_obj; 3121 struct bnx2x_raw_obj *r = &o->raw; 3122 3123 /* If CLEAR_ONLY has been requested - clear the registry 3124 * and clear a pending bit. 3125 */ 3126 if (!test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) { 3127 u32 mc_filter[MC_HASH_SIZE] = {0}; 3128 3129 /* Set the multicast filter bits before writing it into 3130 * the internal memory. 3131 */ 3132 switch (cmd) { 3133 case BNX2X_MCAST_CMD_ADD: 3134 bnx2x_mcast_hdl_add_e1h(bp, o, p, mc_filter); 3135 break; 3136 3137 case BNX2X_MCAST_CMD_DEL: 3138 DP(BNX2X_MSG_SP, 3139 "Invalidating multicast MACs configuration\n"); 3140 3141 /* clear the registry */ 3142 memset(o->registry.aprox_match.vec, 0, 3143 sizeof(o->registry.aprox_match.vec)); 3144 break; 3145 3146 case BNX2X_MCAST_CMD_RESTORE: 3147 bnx2x_mcast_hdl_restore_e1h(bp, o, p, mc_filter); 3148 break; 3149 3150 default: 3151 BNX2X_ERR("Unknown command: %d\n", cmd); 3152 return -EINVAL; 3153 } 3154 3155 /* Set the mcast filter in the internal memory */ 3156 for (i = 0; i < MC_HASH_SIZE; i++) 3157 REG_WR(bp, MC_HASH_OFFSET(bp, i), mc_filter[i]); 3158 } else 3159 /* clear the registry */ 3160 memset(o->registry.aprox_match.vec, 0, 3161 sizeof(o->registry.aprox_match.vec)); 3162 3163 /* We are done */ 3164 r->clear_pending(r); 3165 3166 return 0; 3167 } 3168 3169 static int bnx2x_mcast_validate_e1(struct bnx2x *bp, 3170 struct bnx2x_mcast_ramrod_params *p, 3171 int cmd) 3172 { 3173 struct bnx2x_mcast_obj *o = p->mcast_obj; 3174 int reg_sz = o->get_registry_size(o); 3175 3176 switch (cmd) { 3177 /* DEL command deletes all currently configured MACs */ 3178 case BNX2X_MCAST_CMD_DEL: 3179 o->set_registry_size(o, 0); 3180 /* Don't break */ 3181 3182 /* RESTORE command will restore the entire multicast configuration */ 3183 case BNX2X_MCAST_CMD_RESTORE: 3184 p->mcast_list_len = reg_sz; 3185 DP(BNX2X_MSG_SP, "Command %d, p->mcast_list_len=%d\n", 3186 cmd, p->mcast_list_len); 3187 break; 3188 3189 case BNX2X_MCAST_CMD_ADD: 3190 case BNX2X_MCAST_CMD_CONT: 3191 /* Multicast MACs on 57710 are configured as unicast MACs and 3192 * there is only a limited number of CAM entries for that 3193 * matter. 3194 */ 3195 if (p->mcast_list_len > o->max_cmd_len) { 3196 BNX2X_ERR("Can't configure more than %d multicast MACs on 57710\n", 3197 o->max_cmd_len); 3198 return -EINVAL; 3199 } 3200 /* Every configured MAC should be cleared if DEL command is 3201 * called. Only the last ADD command is relevant as long as 3202 * every ADD commands overrides the previous configuration. 3203 */ 3204 DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len); 3205 if (p->mcast_list_len > 0) 3206 o->set_registry_size(o, p->mcast_list_len); 3207 3208 break; 3209 3210 default: 3211 BNX2X_ERR("Unknown command: %d\n", cmd); 3212 return -EINVAL; 3213 3214 } 3215 3216 /* We want to ensure that commands are executed one by one for 57710. 3217 * Therefore each none-empty command will consume o->max_cmd_len. 3218 */ 3219 if (p->mcast_list_len) 3220 o->total_pending_num += o->max_cmd_len; 3221 3222 return 0; 3223 } 3224 3225 static void bnx2x_mcast_revert_e1(struct bnx2x *bp, 3226 struct bnx2x_mcast_ramrod_params *p, 3227 int old_num_macs) 3228 { 3229 struct bnx2x_mcast_obj *o = p->mcast_obj; 3230 3231 o->set_registry_size(o, old_num_macs); 3232 3233 /* If current command hasn't been handled yet and we are 3234 * here means that it's meant to be dropped and we have to 3235 * update the number of outstandling MACs accordingly. 3236 */ 3237 if (p->mcast_list_len) 3238 o->total_pending_num -= o->max_cmd_len; 3239 } 3240 3241 static void bnx2x_mcast_set_one_rule_e1(struct bnx2x *bp, 3242 struct bnx2x_mcast_obj *o, int idx, 3243 union bnx2x_mcast_config_data *cfg_data, 3244 int cmd) 3245 { 3246 struct bnx2x_raw_obj *r = &o->raw; 3247 struct mac_configuration_cmd *data = 3248 (struct mac_configuration_cmd *)(r->rdata); 3249 3250 /* copy mac */ 3251 if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE)) { 3252 bnx2x_set_fw_mac_addr(&data->config_table[idx].msb_mac_addr, 3253 &data->config_table[idx].middle_mac_addr, 3254 &data->config_table[idx].lsb_mac_addr, 3255 cfg_data->mac); 3256 3257 data->config_table[idx].vlan_id = 0; 3258 data->config_table[idx].pf_id = r->func_id; 3259 data->config_table[idx].clients_bit_vector = 3260 cpu_to_le32(1 << r->cl_id); 3261 3262 SET_FLAG(data->config_table[idx].flags, 3263 MAC_CONFIGURATION_ENTRY_ACTION_TYPE, 3264 T_ETH_MAC_COMMAND_SET); 3265 } 3266 } 3267 3268 /** 3269 * bnx2x_mcast_set_rdata_hdr_e1 - set header values in mac_configuration_cmd 3270 * 3271 * @bp: device handle 3272 * @p: 3273 * @len: number of rules to handle 3274 */ 3275 static inline void bnx2x_mcast_set_rdata_hdr_e1(struct bnx2x *bp, 3276 struct bnx2x_mcast_ramrod_params *p, 3277 u8 len) 3278 { 3279 struct bnx2x_raw_obj *r = &p->mcast_obj->raw; 3280 struct mac_configuration_cmd *data = 3281 (struct mac_configuration_cmd *)(r->rdata); 3282 3283 u8 offset = (CHIP_REV_IS_SLOW(bp) ? 3284 BNX2X_MAX_EMUL_MULTI*(1 + r->func_id) : 3285 BNX2X_MAX_MULTICAST*(1 + r->func_id)); 3286 3287 data->hdr.offset = offset; 3288 data->hdr.client_id = 0xff; 3289 data->hdr.echo = ((r->cid & BNX2X_SWCID_MASK) | 3290 (BNX2X_FILTER_MCAST_PENDING << BNX2X_SWCID_SHIFT)); 3291 data->hdr.length = len; 3292 } 3293 3294 /** 3295 * bnx2x_mcast_handle_restore_cmd_e1 - restore command for 57710 3296 * 3297 * @bp: device handle 3298 * @o: 3299 * @start_idx: index in the registry to start from 3300 * @rdata_idx: index in the ramrod data to start from 3301 * 3302 * restore command for 57710 is like all other commands - always a stand alone 3303 * command - start_idx and rdata_idx will always be 0. This function will always 3304 * succeed. 3305 * returns -1 to comply with 57712 variant. 3306 */ 3307 static inline int bnx2x_mcast_handle_restore_cmd_e1( 3308 struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_idx, 3309 int *rdata_idx) 3310 { 3311 struct bnx2x_mcast_mac_elem *elem; 3312 int i = 0; 3313 union bnx2x_mcast_config_data cfg_data = {0}; 3314 3315 /* go through the registry and configure the MACs from it. */ 3316 list_for_each_entry(elem, &o->registry.exact_match.macs, link) { 3317 cfg_data.mac = &elem->mac[0]; 3318 o->set_one_rule(bp, o, i, &cfg_data, BNX2X_MCAST_CMD_RESTORE); 3319 3320 i++; 3321 3322 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n", 3323 cfg_data.mac); 3324 } 3325 3326 *rdata_idx = i; 3327 3328 return -1; 3329 } 3330 3331 3332 static inline int bnx2x_mcast_handle_pending_cmds_e1( 3333 struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p) 3334 { 3335 struct bnx2x_pending_mcast_cmd *cmd_pos; 3336 struct bnx2x_mcast_mac_elem *pmac_pos; 3337 struct bnx2x_mcast_obj *o = p->mcast_obj; 3338 union bnx2x_mcast_config_data cfg_data = {0}; 3339 int cnt = 0; 3340 3341 3342 /* If nothing to be done - return */ 3343 if (list_empty(&o->pending_cmds_head)) 3344 return 0; 3345 3346 /* Handle the first command */ 3347 cmd_pos = list_first_entry(&o->pending_cmds_head, 3348 struct bnx2x_pending_mcast_cmd, link); 3349 3350 switch (cmd_pos->type) { 3351 case BNX2X_MCAST_CMD_ADD: 3352 list_for_each_entry(pmac_pos, &cmd_pos->data.macs_head, link) { 3353 cfg_data.mac = &pmac_pos->mac[0]; 3354 o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type); 3355 3356 cnt++; 3357 3358 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n", 3359 pmac_pos->mac); 3360 } 3361 break; 3362 3363 case BNX2X_MCAST_CMD_DEL: 3364 cnt = cmd_pos->data.macs_num; 3365 DP(BNX2X_MSG_SP, "About to delete %d multicast MACs\n", cnt); 3366 break; 3367 3368 case BNX2X_MCAST_CMD_RESTORE: 3369 o->hdl_restore(bp, o, 0, &cnt); 3370 break; 3371 3372 default: 3373 BNX2X_ERR("Unknown command: %d\n", cmd_pos->type); 3374 return -EINVAL; 3375 } 3376 3377 list_del(&cmd_pos->link); 3378 kfree(cmd_pos); 3379 3380 return cnt; 3381 } 3382 3383 /** 3384 * bnx2x_get_fw_mac_addr - revert the bnx2x_set_fw_mac_addr(). 3385 * 3386 * @fw_hi: 3387 * @fw_mid: 3388 * @fw_lo: 3389 * @mac: 3390 */ 3391 static inline void bnx2x_get_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid, 3392 __le16 *fw_lo, u8 *mac) 3393 { 3394 mac[1] = ((u8 *)fw_hi)[0]; 3395 mac[0] = ((u8 *)fw_hi)[1]; 3396 mac[3] = ((u8 *)fw_mid)[0]; 3397 mac[2] = ((u8 *)fw_mid)[1]; 3398 mac[5] = ((u8 *)fw_lo)[0]; 3399 mac[4] = ((u8 *)fw_lo)[1]; 3400 } 3401 3402 /** 3403 * bnx2x_mcast_refresh_registry_e1 - 3404 * 3405 * @bp: device handle 3406 * @cnt: 3407 * 3408 * Check the ramrod data first entry flag to see if it's a DELETE or ADD command 3409 * and update the registry correspondingly: if ADD - allocate a memory and add 3410 * the entries to the registry (list), if DELETE - clear the registry and free 3411 * the memory. 3412 */ 3413 static inline int bnx2x_mcast_refresh_registry_e1(struct bnx2x *bp, 3414 struct bnx2x_mcast_obj *o) 3415 { 3416 struct bnx2x_raw_obj *raw = &o->raw; 3417 struct bnx2x_mcast_mac_elem *elem; 3418 struct mac_configuration_cmd *data = 3419 (struct mac_configuration_cmd *)(raw->rdata); 3420 3421 /* If first entry contains a SET bit - the command was ADD, 3422 * otherwise - DEL_ALL 3423 */ 3424 if (GET_FLAG(data->config_table[0].flags, 3425 MAC_CONFIGURATION_ENTRY_ACTION_TYPE)) { 3426 int i, len = data->hdr.length; 3427 3428 /* Break if it was a RESTORE command */ 3429 if (!list_empty(&o->registry.exact_match.macs)) 3430 return 0; 3431 3432 elem = kcalloc(len, sizeof(*elem), GFP_ATOMIC); 3433 if (!elem) { 3434 BNX2X_ERR("Failed to allocate registry memory\n"); 3435 return -ENOMEM; 3436 } 3437 3438 for (i = 0; i < len; i++, elem++) { 3439 bnx2x_get_fw_mac_addr( 3440 &data->config_table[i].msb_mac_addr, 3441 &data->config_table[i].middle_mac_addr, 3442 &data->config_table[i].lsb_mac_addr, 3443 elem->mac); 3444 DP(BNX2X_MSG_SP, "Adding registry entry for [%pM]\n", 3445 elem->mac); 3446 list_add_tail(&elem->link, 3447 &o->registry.exact_match.macs); 3448 } 3449 } else { 3450 elem = list_first_entry(&o->registry.exact_match.macs, 3451 struct bnx2x_mcast_mac_elem, link); 3452 DP(BNX2X_MSG_SP, "Deleting a registry\n"); 3453 kfree(elem); 3454 INIT_LIST_HEAD(&o->registry.exact_match.macs); 3455 } 3456 3457 return 0; 3458 } 3459 3460 static int bnx2x_mcast_setup_e1(struct bnx2x *bp, 3461 struct bnx2x_mcast_ramrod_params *p, 3462 int cmd) 3463 { 3464 struct bnx2x_mcast_obj *o = p->mcast_obj; 3465 struct bnx2x_raw_obj *raw = &o->raw; 3466 struct mac_configuration_cmd *data = 3467 (struct mac_configuration_cmd *)(raw->rdata); 3468 int cnt = 0, i, rc; 3469 3470 /* Reset the ramrod data buffer */ 3471 memset(data, 0, sizeof(*data)); 3472 3473 /* First set all entries as invalid */ 3474 for (i = 0; i < o->max_cmd_len ; i++) 3475 SET_FLAG(data->config_table[i].flags, 3476 MAC_CONFIGURATION_ENTRY_ACTION_TYPE, 3477 T_ETH_MAC_COMMAND_INVALIDATE); 3478 3479 /* Handle pending commands first */ 3480 cnt = bnx2x_mcast_handle_pending_cmds_e1(bp, p); 3481 3482 /* If there are no more pending commands - clear SCHEDULED state */ 3483 if (list_empty(&o->pending_cmds_head)) 3484 o->clear_sched(o); 3485 3486 /* The below may be true iff there were no pending commands */ 3487 if (!cnt) 3488 cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, 0); 3489 3490 /* For 57710 every command has o->max_cmd_len length to ensure that 3491 * commands are done one at a time. 3492 */ 3493 o->total_pending_num -= o->max_cmd_len; 3494 3495 /* send a ramrod */ 3496 3497 WARN_ON(cnt > o->max_cmd_len); 3498 3499 /* Set ramrod header (in particular, a number of entries to update) */ 3500 bnx2x_mcast_set_rdata_hdr_e1(bp, p, (u8)cnt); 3501 3502 /* update a registry: we need the registry contents to be always up 3503 * to date in order to be able to execute a RESTORE opcode. Here 3504 * we use the fact that for 57710 we sent one command at a time 3505 * hence we may take the registry update out of the command handling 3506 * and do it in a simpler way here. 3507 */ 3508 rc = bnx2x_mcast_refresh_registry_e1(bp, o); 3509 if (rc) 3510 return rc; 3511 3512 /* 3513 * If CLEAR_ONLY was requested - don't send a ramrod and clear 3514 * RAMROD_PENDING status immediately. 3515 */ 3516 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) { 3517 raw->clear_pending(raw); 3518 return 0; 3519 } else { 3520 /* 3521 * No need for an explicit memory barrier here as long we would 3522 * need to ensure the ordering of writing to the SPQ element 3523 * and updating of the SPQ producer which involves a memory 3524 * read and we will have to put a full memory barrier there 3525 * (inside bnx2x_sp_post()). 3526 */ 3527 3528 /* Send a ramrod */ 3529 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, raw->cid, 3530 U64_HI(raw->rdata_mapping), 3531 U64_LO(raw->rdata_mapping), 3532 ETH_CONNECTION_TYPE); 3533 if (rc) 3534 return rc; 3535 3536 /* Ramrod completion is pending */ 3537 return 1; 3538 } 3539 3540 } 3541 3542 static int bnx2x_mcast_get_registry_size_exact(struct bnx2x_mcast_obj *o) 3543 { 3544 return o->registry.exact_match.num_macs_set; 3545 } 3546 3547 static int bnx2x_mcast_get_registry_size_aprox(struct bnx2x_mcast_obj *o) 3548 { 3549 return o->registry.aprox_match.num_bins_set; 3550 } 3551 3552 static void bnx2x_mcast_set_registry_size_exact(struct bnx2x_mcast_obj *o, 3553 int n) 3554 { 3555 o->registry.exact_match.num_macs_set = n; 3556 } 3557 3558 static void bnx2x_mcast_set_registry_size_aprox(struct bnx2x_mcast_obj *o, 3559 int n) 3560 { 3561 o->registry.aprox_match.num_bins_set = n; 3562 } 3563 3564 int bnx2x_config_mcast(struct bnx2x *bp, 3565 struct bnx2x_mcast_ramrod_params *p, 3566 int cmd) 3567 { 3568 struct bnx2x_mcast_obj *o = p->mcast_obj; 3569 struct bnx2x_raw_obj *r = &o->raw; 3570 int rc = 0, old_reg_size; 3571 3572 /* This is needed to recover number of currently configured mcast macs 3573 * in case of failure. 3574 */ 3575 old_reg_size = o->get_registry_size(o); 3576 3577 /* Do some calculations and checks */ 3578 rc = o->validate(bp, p, cmd); 3579 if (rc) 3580 return rc; 3581 3582 /* Return if there is no work to do */ 3583 if ((!p->mcast_list_len) && (!o->check_sched(o))) 3584 return 0; 3585 3586 DP(BNX2X_MSG_SP, "o->total_pending_num=%d p->mcast_list_len=%d o->max_cmd_len=%d\n", 3587 o->total_pending_num, p->mcast_list_len, o->max_cmd_len); 3588 3589 /* Enqueue the current command to the pending list if we can't complete 3590 * it in the current iteration 3591 */ 3592 if (r->check_pending(r) || 3593 ((o->max_cmd_len > 0) && (o->total_pending_num > o->max_cmd_len))) { 3594 rc = o->enqueue_cmd(bp, p->mcast_obj, p, cmd); 3595 if (rc < 0) 3596 goto error_exit1; 3597 3598 /* As long as the current command is in a command list we 3599 * don't need to handle it separately. 3600 */ 3601 p->mcast_list_len = 0; 3602 } 3603 3604 if (!r->check_pending(r)) { 3605 3606 /* Set 'pending' state */ 3607 r->set_pending(r); 3608 3609 /* Configure the new classification in the chip */ 3610 rc = o->config_mcast(bp, p, cmd); 3611 if (rc < 0) 3612 goto error_exit2; 3613 3614 /* Wait for a ramrod completion if was requested */ 3615 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) 3616 rc = o->wait_comp(bp, o); 3617 } 3618 3619 return rc; 3620 3621 error_exit2: 3622 r->clear_pending(r); 3623 3624 error_exit1: 3625 o->revert(bp, p, old_reg_size); 3626 3627 return rc; 3628 } 3629 3630 static void bnx2x_mcast_clear_sched(struct bnx2x_mcast_obj *o) 3631 { 3632 smp_mb__before_clear_bit(); 3633 clear_bit(o->sched_state, o->raw.pstate); 3634 smp_mb__after_clear_bit(); 3635 } 3636 3637 static void bnx2x_mcast_set_sched(struct bnx2x_mcast_obj *o) 3638 { 3639 smp_mb__before_clear_bit(); 3640 set_bit(o->sched_state, o->raw.pstate); 3641 smp_mb__after_clear_bit(); 3642 } 3643 3644 static bool bnx2x_mcast_check_sched(struct bnx2x_mcast_obj *o) 3645 { 3646 return !!test_bit(o->sched_state, o->raw.pstate); 3647 } 3648 3649 static bool bnx2x_mcast_check_pending(struct bnx2x_mcast_obj *o) 3650 { 3651 return o->raw.check_pending(&o->raw) || o->check_sched(o); 3652 } 3653 3654 void bnx2x_init_mcast_obj(struct bnx2x *bp, 3655 struct bnx2x_mcast_obj *mcast_obj, 3656 u8 mcast_cl_id, u32 mcast_cid, u8 func_id, 3657 u8 engine_id, void *rdata, dma_addr_t rdata_mapping, 3658 int state, unsigned long *pstate, bnx2x_obj_type type) 3659 { 3660 memset(mcast_obj, 0, sizeof(*mcast_obj)); 3661 3662 bnx2x_init_raw_obj(&mcast_obj->raw, mcast_cl_id, mcast_cid, func_id, 3663 rdata, rdata_mapping, state, pstate, type); 3664 3665 mcast_obj->engine_id = engine_id; 3666 3667 INIT_LIST_HEAD(&mcast_obj->pending_cmds_head); 3668 3669 mcast_obj->sched_state = BNX2X_FILTER_MCAST_SCHED; 3670 mcast_obj->check_sched = bnx2x_mcast_check_sched; 3671 mcast_obj->set_sched = bnx2x_mcast_set_sched; 3672 mcast_obj->clear_sched = bnx2x_mcast_clear_sched; 3673 3674 if (CHIP_IS_E1(bp)) { 3675 mcast_obj->config_mcast = bnx2x_mcast_setup_e1; 3676 mcast_obj->enqueue_cmd = bnx2x_mcast_enqueue_cmd; 3677 mcast_obj->hdl_restore = 3678 bnx2x_mcast_handle_restore_cmd_e1; 3679 mcast_obj->check_pending = bnx2x_mcast_check_pending; 3680 3681 if (CHIP_REV_IS_SLOW(bp)) 3682 mcast_obj->max_cmd_len = BNX2X_MAX_EMUL_MULTI; 3683 else 3684 mcast_obj->max_cmd_len = BNX2X_MAX_MULTICAST; 3685 3686 mcast_obj->wait_comp = bnx2x_mcast_wait; 3687 mcast_obj->set_one_rule = bnx2x_mcast_set_one_rule_e1; 3688 mcast_obj->validate = bnx2x_mcast_validate_e1; 3689 mcast_obj->revert = bnx2x_mcast_revert_e1; 3690 mcast_obj->get_registry_size = 3691 bnx2x_mcast_get_registry_size_exact; 3692 mcast_obj->set_registry_size = 3693 bnx2x_mcast_set_registry_size_exact; 3694 3695 /* 57710 is the only chip that uses the exact match for mcast 3696 * at the moment. 3697 */ 3698 INIT_LIST_HEAD(&mcast_obj->registry.exact_match.macs); 3699 3700 } else if (CHIP_IS_E1H(bp)) { 3701 mcast_obj->config_mcast = bnx2x_mcast_setup_e1h; 3702 mcast_obj->enqueue_cmd = NULL; 3703 mcast_obj->hdl_restore = NULL; 3704 mcast_obj->check_pending = bnx2x_mcast_check_pending; 3705 3706 /* 57711 doesn't send a ramrod, so it has unlimited credit 3707 * for one command. 3708 */ 3709 mcast_obj->max_cmd_len = -1; 3710 mcast_obj->wait_comp = bnx2x_mcast_wait; 3711 mcast_obj->set_one_rule = NULL; 3712 mcast_obj->validate = bnx2x_mcast_validate_e1h; 3713 mcast_obj->revert = bnx2x_mcast_revert_e1h; 3714 mcast_obj->get_registry_size = 3715 bnx2x_mcast_get_registry_size_aprox; 3716 mcast_obj->set_registry_size = 3717 bnx2x_mcast_set_registry_size_aprox; 3718 } else { 3719 mcast_obj->config_mcast = bnx2x_mcast_setup_e2; 3720 mcast_obj->enqueue_cmd = bnx2x_mcast_enqueue_cmd; 3721 mcast_obj->hdl_restore = 3722 bnx2x_mcast_handle_restore_cmd_e2; 3723 mcast_obj->check_pending = bnx2x_mcast_check_pending; 3724 /* TODO: There should be a proper HSI define for this number!!! 3725 */ 3726 mcast_obj->max_cmd_len = 16; 3727 mcast_obj->wait_comp = bnx2x_mcast_wait; 3728 mcast_obj->set_one_rule = bnx2x_mcast_set_one_rule_e2; 3729 mcast_obj->validate = bnx2x_mcast_validate_e2; 3730 mcast_obj->revert = bnx2x_mcast_revert_e2; 3731 mcast_obj->get_registry_size = 3732 bnx2x_mcast_get_registry_size_aprox; 3733 mcast_obj->set_registry_size = 3734 bnx2x_mcast_set_registry_size_aprox; 3735 } 3736 } 3737 3738 /*************************** Credit handling **********************************/ 3739 3740 /** 3741 * atomic_add_ifless - add if the result is less than a given value. 3742 * 3743 * @v: pointer of type atomic_t 3744 * @a: the amount to add to v... 3745 * @u: ...if (v + a) is less than u. 3746 * 3747 * returns true if (v + a) was less than u, and false otherwise. 3748 * 3749 */ 3750 static inline bool __atomic_add_ifless(atomic_t *v, int a, int u) 3751 { 3752 int c, old; 3753 3754 c = atomic_read(v); 3755 for (;;) { 3756 if (unlikely(c + a >= u)) 3757 return false; 3758 3759 old = atomic_cmpxchg((v), c, c + a); 3760 if (likely(old == c)) 3761 break; 3762 c = old; 3763 } 3764 3765 return true; 3766 } 3767 3768 /** 3769 * atomic_dec_ifmoe - dec if the result is more or equal than a given value. 3770 * 3771 * @v: pointer of type atomic_t 3772 * @a: the amount to dec from v... 3773 * @u: ...if (v - a) is more or equal than u. 3774 * 3775 * returns true if (v - a) was more or equal than u, and false 3776 * otherwise. 3777 */ 3778 static inline bool __atomic_dec_ifmoe(atomic_t *v, int a, int u) 3779 { 3780 int c, old; 3781 3782 c = atomic_read(v); 3783 for (;;) { 3784 if (unlikely(c - a < u)) 3785 return false; 3786 3787 old = atomic_cmpxchg((v), c, c - a); 3788 if (likely(old == c)) 3789 break; 3790 c = old; 3791 } 3792 3793 return true; 3794 } 3795 3796 static bool bnx2x_credit_pool_get(struct bnx2x_credit_pool_obj *o, int cnt) 3797 { 3798 bool rc; 3799 3800 smp_mb(); 3801 rc = __atomic_dec_ifmoe(&o->credit, cnt, 0); 3802 smp_mb(); 3803 3804 return rc; 3805 } 3806 3807 static bool bnx2x_credit_pool_put(struct bnx2x_credit_pool_obj *o, int cnt) 3808 { 3809 bool rc; 3810 3811 smp_mb(); 3812 3813 /* Don't let to refill if credit + cnt > pool_sz */ 3814 rc = __atomic_add_ifless(&o->credit, cnt, o->pool_sz + 1); 3815 3816 smp_mb(); 3817 3818 return rc; 3819 } 3820 3821 static int bnx2x_credit_pool_check(struct bnx2x_credit_pool_obj *o) 3822 { 3823 int cur_credit; 3824 3825 smp_mb(); 3826 cur_credit = atomic_read(&o->credit); 3827 3828 return cur_credit; 3829 } 3830 3831 static bool bnx2x_credit_pool_always_true(struct bnx2x_credit_pool_obj *o, 3832 int cnt) 3833 { 3834 return true; 3835 } 3836 3837 3838 static bool bnx2x_credit_pool_get_entry( 3839 struct bnx2x_credit_pool_obj *o, 3840 int *offset) 3841 { 3842 int idx, vec, i; 3843 3844 *offset = -1; 3845 3846 /* Find "internal cam-offset" then add to base for this object... */ 3847 for (vec = 0; vec < BNX2X_POOL_VEC_SIZE; vec++) { 3848 3849 /* Skip the current vector if there are no free entries in it */ 3850 if (!o->pool_mirror[vec]) 3851 continue; 3852 3853 /* If we've got here we are going to find a free entry */ 3854 for (idx = vec * BIT_VEC64_ELEM_SZ, i = 0; 3855 i < BIT_VEC64_ELEM_SZ; idx++, i++) 3856 3857 if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) { 3858 /* Got one!! */ 3859 BIT_VEC64_CLEAR_BIT(o->pool_mirror, idx); 3860 *offset = o->base_pool_offset + idx; 3861 return true; 3862 } 3863 } 3864 3865 return false; 3866 } 3867 3868 static bool bnx2x_credit_pool_put_entry( 3869 struct bnx2x_credit_pool_obj *o, 3870 int offset) 3871 { 3872 if (offset < o->base_pool_offset) 3873 return false; 3874 3875 offset -= o->base_pool_offset; 3876 3877 if (offset >= o->pool_sz) 3878 return false; 3879 3880 /* Return the entry to the pool */ 3881 BIT_VEC64_SET_BIT(o->pool_mirror, offset); 3882 3883 return true; 3884 } 3885 3886 static bool bnx2x_credit_pool_put_entry_always_true( 3887 struct bnx2x_credit_pool_obj *o, 3888 int offset) 3889 { 3890 return true; 3891 } 3892 3893 static bool bnx2x_credit_pool_get_entry_always_true( 3894 struct bnx2x_credit_pool_obj *o, 3895 int *offset) 3896 { 3897 *offset = -1; 3898 return true; 3899 } 3900 /** 3901 * bnx2x_init_credit_pool - initialize credit pool internals. 3902 * 3903 * @p: 3904 * @base: Base entry in the CAM to use. 3905 * @credit: pool size. 3906 * 3907 * If base is negative no CAM entries handling will be performed. 3908 * If credit is negative pool operations will always succeed (unlimited pool). 3909 * 3910 */ 3911 static inline void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p, 3912 int base, int credit) 3913 { 3914 /* Zero the object first */ 3915 memset(p, 0, sizeof(*p)); 3916 3917 /* Set the table to all 1s */ 3918 memset(&p->pool_mirror, 0xff, sizeof(p->pool_mirror)); 3919 3920 /* Init a pool as full */ 3921 atomic_set(&p->credit, credit); 3922 3923 /* The total poll size */ 3924 p->pool_sz = credit; 3925 3926 p->base_pool_offset = base; 3927 3928 /* Commit the change */ 3929 smp_mb(); 3930 3931 p->check = bnx2x_credit_pool_check; 3932 3933 /* if pool credit is negative - disable the checks */ 3934 if (credit >= 0) { 3935 p->put = bnx2x_credit_pool_put; 3936 p->get = bnx2x_credit_pool_get; 3937 p->put_entry = bnx2x_credit_pool_put_entry; 3938 p->get_entry = bnx2x_credit_pool_get_entry; 3939 } else { 3940 p->put = bnx2x_credit_pool_always_true; 3941 p->get = bnx2x_credit_pool_always_true; 3942 p->put_entry = bnx2x_credit_pool_put_entry_always_true; 3943 p->get_entry = bnx2x_credit_pool_get_entry_always_true; 3944 } 3945 3946 /* If base is negative - disable entries handling */ 3947 if (base < 0) { 3948 p->put_entry = bnx2x_credit_pool_put_entry_always_true; 3949 p->get_entry = bnx2x_credit_pool_get_entry_always_true; 3950 } 3951 } 3952 3953 void bnx2x_init_mac_credit_pool(struct bnx2x *bp, 3954 struct bnx2x_credit_pool_obj *p, u8 func_id, 3955 u8 func_num) 3956 { 3957 /* TODO: this will be defined in consts as well... */ 3958 #define BNX2X_CAM_SIZE_EMUL 5 3959 3960 int cam_sz; 3961 3962 if (CHIP_IS_E1(bp)) { 3963 /* In E1, Multicast is saved in cam... */ 3964 if (!CHIP_REV_IS_SLOW(bp)) 3965 cam_sz = (MAX_MAC_CREDIT_E1 / 2) - BNX2X_MAX_MULTICAST; 3966 else 3967 cam_sz = BNX2X_CAM_SIZE_EMUL - BNX2X_MAX_EMUL_MULTI; 3968 3969 bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz); 3970 3971 } else if (CHIP_IS_E1H(bp)) { 3972 /* CAM credit is equaly divided between all active functions 3973 * on the PORT!. 3974 */ 3975 if ((func_num > 0)) { 3976 if (!CHIP_REV_IS_SLOW(bp)) 3977 cam_sz = (MAX_MAC_CREDIT_E1H / (2*func_num)); 3978 else 3979 cam_sz = BNX2X_CAM_SIZE_EMUL; 3980 bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz); 3981 } else { 3982 /* this should never happen! Block MAC operations. */ 3983 bnx2x_init_credit_pool(p, 0, 0); 3984 } 3985 3986 } else { 3987 3988 /* 3989 * CAM credit is equaly divided between all active functions 3990 * on the PATH. 3991 */ 3992 if ((func_num > 0)) { 3993 if (!CHIP_REV_IS_SLOW(bp)) 3994 cam_sz = (MAX_MAC_CREDIT_E2 / func_num); 3995 else 3996 cam_sz = BNX2X_CAM_SIZE_EMUL; 3997 3998 /* 3999 * No need for CAM entries handling for 57712 and 4000 * newer. 4001 */ 4002 bnx2x_init_credit_pool(p, -1, cam_sz); 4003 } else { 4004 /* this should never happen! Block MAC operations. */ 4005 bnx2x_init_credit_pool(p, 0, 0); 4006 } 4007 4008 } 4009 } 4010 4011 void bnx2x_init_vlan_credit_pool(struct bnx2x *bp, 4012 struct bnx2x_credit_pool_obj *p, 4013 u8 func_id, 4014 u8 func_num) 4015 { 4016 if (CHIP_IS_E1x(bp)) { 4017 /* 4018 * There is no VLAN credit in HW on 57710 and 57711 only 4019 * MAC / MAC-VLAN can be set 4020 */ 4021 bnx2x_init_credit_pool(p, 0, -1); 4022 } else { 4023 /* 4024 * CAM credit is equaly divided between all active functions 4025 * on the PATH. 4026 */ 4027 if (func_num > 0) { 4028 int credit = MAX_VLAN_CREDIT_E2 / func_num; 4029 bnx2x_init_credit_pool(p, func_id * credit, credit); 4030 } else 4031 /* this should never happen! Block VLAN operations. */ 4032 bnx2x_init_credit_pool(p, 0, 0); 4033 } 4034 } 4035 4036 /****************** RSS Configuration ******************/ 4037 /** 4038 * bnx2x_debug_print_ind_table - prints the indirection table configuration. 4039 * 4040 * @bp: driver hanlde 4041 * @p: pointer to rss configuration 4042 * 4043 * Prints it when NETIF_MSG_IFUP debug level is configured. 4044 */ 4045 static inline void bnx2x_debug_print_ind_table(struct bnx2x *bp, 4046 struct bnx2x_config_rss_params *p) 4047 { 4048 int i; 4049 4050 DP(BNX2X_MSG_SP, "Setting indirection table to:\n"); 4051 DP(BNX2X_MSG_SP, "0x0000: "); 4052 for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) { 4053 DP_CONT(BNX2X_MSG_SP, "0x%02x ", p->ind_table[i]); 4054 4055 /* Print 4 bytes in a line */ 4056 if ((i + 1 < T_ETH_INDIRECTION_TABLE_SIZE) && 4057 (((i + 1) & 0x3) == 0)) { 4058 DP_CONT(BNX2X_MSG_SP, "\n"); 4059 DP(BNX2X_MSG_SP, "0x%04x: ", i + 1); 4060 } 4061 } 4062 4063 DP_CONT(BNX2X_MSG_SP, "\n"); 4064 } 4065 4066 /** 4067 * bnx2x_setup_rss - configure RSS 4068 * 4069 * @bp: device handle 4070 * @p: rss configuration 4071 * 4072 * sends on UPDATE ramrod for that matter. 4073 */ 4074 static int bnx2x_setup_rss(struct bnx2x *bp, 4075 struct bnx2x_config_rss_params *p) 4076 { 4077 struct bnx2x_rss_config_obj *o = p->rss_obj; 4078 struct bnx2x_raw_obj *r = &o->raw; 4079 struct eth_rss_update_ramrod_data *data = 4080 (struct eth_rss_update_ramrod_data *)(r->rdata); 4081 u8 rss_mode = 0; 4082 int rc; 4083 4084 memset(data, 0, sizeof(*data)); 4085 4086 DP(BNX2X_MSG_SP, "Configuring RSS\n"); 4087 4088 /* Set an echo field */ 4089 data->echo = (r->cid & BNX2X_SWCID_MASK) | 4090 (r->state << BNX2X_SWCID_SHIFT); 4091 4092 /* RSS mode */ 4093 if (test_bit(BNX2X_RSS_MODE_DISABLED, &p->rss_flags)) 4094 rss_mode = ETH_RSS_MODE_DISABLED; 4095 else if (test_bit(BNX2X_RSS_MODE_REGULAR, &p->rss_flags)) 4096 rss_mode = ETH_RSS_MODE_REGULAR; 4097 4098 data->rss_mode = rss_mode; 4099 4100 DP(BNX2X_MSG_SP, "rss_mode=%d\n", rss_mode); 4101 4102 /* RSS capabilities */ 4103 if (test_bit(BNX2X_RSS_IPV4, &p->rss_flags)) 4104 data->capabilities |= 4105 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY; 4106 4107 if (test_bit(BNX2X_RSS_IPV4_TCP, &p->rss_flags)) 4108 data->capabilities |= 4109 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY; 4110 4111 if (test_bit(BNX2X_RSS_IPV4_UDP, &p->rss_flags)) 4112 data->capabilities |= 4113 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY; 4114 4115 if (test_bit(BNX2X_RSS_IPV6, &p->rss_flags)) 4116 data->capabilities |= 4117 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY; 4118 4119 if (test_bit(BNX2X_RSS_IPV6_TCP, &p->rss_flags)) 4120 data->capabilities |= 4121 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY; 4122 4123 if (test_bit(BNX2X_RSS_IPV6_UDP, &p->rss_flags)) 4124 data->capabilities |= 4125 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY; 4126 4127 /* Hashing mask */ 4128 data->rss_result_mask = p->rss_result_mask; 4129 4130 /* RSS engine ID */ 4131 data->rss_engine_id = o->engine_id; 4132 4133 DP(BNX2X_MSG_SP, "rss_engine_id=%d\n", data->rss_engine_id); 4134 4135 /* Indirection table */ 4136 memcpy(data->indirection_table, p->ind_table, 4137 T_ETH_INDIRECTION_TABLE_SIZE); 4138 4139 /* Remember the last configuration */ 4140 memcpy(o->ind_table, p->ind_table, T_ETH_INDIRECTION_TABLE_SIZE); 4141 4142 /* Print the indirection table */ 4143 if (netif_msg_ifup(bp)) 4144 bnx2x_debug_print_ind_table(bp, p); 4145 4146 /* RSS keys */ 4147 if (test_bit(BNX2X_RSS_SET_SRCH, &p->rss_flags)) { 4148 memcpy(&data->rss_key[0], &p->rss_key[0], 4149 sizeof(data->rss_key)); 4150 data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY; 4151 } 4152 4153 /* 4154 * No need for an explicit memory barrier here as long we would 4155 * need to ensure the ordering of writing to the SPQ element 4156 * and updating of the SPQ producer which involves a memory 4157 * read and we will have to put a full memory barrier there 4158 * (inside bnx2x_sp_post()). 4159 */ 4160 4161 /* Send a ramrod */ 4162 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_RSS_UPDATE, r->cid, 4163 U64_HI(r->rdata_mapping), 4164 U64_LO(r->rdata_mapping), 4165 ETH_CONNECTION_TYPE); 4166 4167 if (rc < 0) 4168 return rc; 4169 4170 return 1; 4171 } 4172 4173 void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj, 4174 u8 *ind_table) 4175 { 4176 memcpy(ind_table, rss_obj->ind_table, sizeof(rss_obj->ind_table)); 4177 } 4178 4179 int bnx2x_config_rss(struct bnx2x *bp, 4180 struct bnx2x_config_rss_params *p) 4181 { 4182 int rc; 4183 struct bnx2x_rss_config_obj *o = p->rss_obj; 4184 struct bnx2x_raw_obj *r = &o->raw; 4185 4186 /* Do nothing if only driver cleanup was requested */ 4187 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) 4188 return 0; 4189 4190 r->set_pending(r); 4191 4192 rc = o->config_rss(bp, p); 4193 if (rc < 0) { 4194 r->clear_pending(r); 4195 return rc; 4196 } 4197 4198 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) 4199 rc = r->wait_comp(bp, r); 4200 4201 return rc; 4202 } 4203 4204 4205 void bnx2x_init_rss_config_obj(struct bnx2x *bp, 4206 struct bnx2x_rss_config_obj *rss_obj, 4207 u8 cl_id, u32 cid, u8 func_id, u8 engine_id, 4208 void *rdata, dma_addr_t rdata_mapping, 4209 int state, unsigned long *pstate, 4210 bnx2x_obj_type type) 4211 { 4212 bnx2x_init_raw_obj(&rss_obj->raw, cl_id, cid, func_id, rdata, 4213 rdata_mapping, state, pstate, type); 4214 4215 rss_obj->engine_id = engine_id; 4216 rss_obj->config_rss = bnx2x_setup_rss; 4217 } 4218 4219 /********************** Queue state object ***********************************/ 4220 4221 /** 4222 * bnx2x_queue_state_change - perform Queue state change transition 4223 * 4224 * @bp: device handle 4225 * @params: parameters to perform the transition 4226 * 4227 * returns 0 in case of successfully completed transition, negative error 4228 * code in case of failure, positive (EBUSY) value if there is a completion 4229 * to that is still pending (possible only if RAMROD_COMP_WAIT is 4230 * not set in params->ramrod_flags for asynchronous commands). 4231 * 4232 */ 4233 int bnx2x_queue_state_change(struct bnx2x *bp, 4234 struct bnx2x_queue_state_params *params) 4235 { 4236 struct bnx2x_queue_sp_obj *o = params->q_obj; 4237 int rc, pending_bit; 4238 unsigned long *pending = &o->pending; 4239 4240 /* Check that the requested transition is legal */ 4241 if (o->check_transition(bp, o, params)) 4242 return -EINVAL; 4243 4244 /* Set "pending" bit */ 4245 pending_bit = o->set_pending(o, params); 4246 4247 /* Don't send a command if only driver cleanup was requested */ 4248 if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) 4249 o->complete_cmd(bp, o, pending_bit); 4250 else { 4251 /* Send a ramrod */ 4252 rc = o->send_cmd(bp, params); 4253 if (rc) { 4254 o->next_state = BNX2X_Q_STATE_MAX; 4255 clear_bit(pending_bit, pending); 4256 smp_mb__after_clear_bit(); 4257 return rc; 4258 } 4259 4260 if (test_bit(RAMROD_COMP_WAIT, ¶ms->ramrod_flags)) { 4261 rc = o->wait_comp(bp, o, pending_bit); 4262 if (rc) 4263 return rc; 4264 4265 return 0; 4266 } 4267 } 4268 4269 return !!test_bit(pending_bit, pending); 4270 } 4271 4272 4273 static int bnx2x_queue_set_pending(struct bnx2x_queue_sp_obj *obj, 4274 struct bnx2x_queue_state_params *params) 4275 { 4276 enum bnx2x_queue_cmd cmd = params->cmd, bit; 4277 4278 /* ACTIVATE and DEACTIVATE commands are implemented on top of 4279 * UPDATE command. 4280 */ 4281 if ((cmd == BNX2X_Q_CMD_ACTIVATE) || 4282 (cmd == BNX2X_Q_CMD_DEACTIVATE)) 4283 bit = BNX2X_Q_CMD_UPDATE; 4284 else 4285 bit = cmd; 4286 4287 set_bit(bit, &obj->pending); 4288 return bit; 4289 } 4290 4291 static int bnx2x_queue_wait_comp(struct bnx2x *bp, 4292 struct bnx2x_queue_sp_obj *o, 4293 enum bnx2x_queue_cmd cmd) 4294 { 4295 return bnx2x_state_wait(bp, cmd, &o->pending); 4296 } 4297 4298 /** 4299 * bnx2x_queue_comp_cmd - complete the state change command. 4300 * 4301 * @bp: device handle 4302 * @o: 4303 * @cmd: 4304 * 4305 * Checks that the arrived completion is expected. 4306 */ 4307 static int bnx2x_queue_comp_cmd(struct bnx2x *bp, 4308 struct bnx2x_queue_sp_obj *o, 4309 enum bnx2x_queue_cmd cmd) 4310 { 4311 unsigned long cur_pending = o->pending; 4312 4313 if (!test_and_clear_bit(cmd, &cur_pending)) { 4314 BNX2X_ERR("Bad MC reply %d for queue %d in state %d pending 0x%lx, next_state %d\n", 4315 cmd, o->cids[BNX2X_PRIMARY_CID_INDEX], 4316 o->state, cur_pending, o->next_state); 4317 return -EINVAL; 4318 } 4319 4320 if (o->next_tx_only >= o->max_cos) 4321 /* >= becuase tx only must always be smaller than cos since the 4322 * primary connection suports COS 0 4323 */ 4324 BNX2X_ERR("illegal value for next tx_only: %d. max cos was %d", 4325 o->next_tx_only, o->max_cos); 4326 4327 DP(BNX2X_MSG_SP, 4328 "Completing command %d for queue %d, setting state to %d\n", 4329 cmd, o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_state); 4330 4331 if (o->next_tx_only) /* print num tx-only if any exist */ 4332 DP(BNX2X_MSG_SP, "primary cid %d: num tx-only cons %d\n", 4333 o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_tx_only); 4334 4335 o->state = o->next_state; 4336 o->num_tx_only = o->next_tx_only; 4337 o->next_state = BNX2X_Q_STATE_MAX; 4338 4339 /* It's important that o->state and o->next_state are 4340 * updated before o->pending. 4341 */ 4342 wmb(); 4343 4344 clear_bit(cmd, &o->pending); 4345 smp_mb__after_clear_bit(); 4346 4347 return 0; 4348 } 4349 4350 static void bnx2x_q_fill_setup_data_e2(struct bnx2x *bp, 4351 struct bnx2x_queue_state_params *cmd_params, 4352 struct client_init_ramrod_data *data) 4353 { 4354 struct bnx2x_queue_setup_params *params = &cmd_params->params.setup; 4355 4356 /* Rx data */ 4357 4358 /* IPv6 TPA supported for E2 and above only */ 4359 data->rx.tpa_en |= test_bit(BNX2X_Q_FLG_TPA_IPV6, ¶ms->flags) * 4360 CLIENT_INIT_RX_DATA_TPA_EN_IPV6; 4361 } 4362 4363 static void bnx2x_q_fill_init_general_data(struct bnx2x *bp, 4364 struct bnx2x_queue_sp_obj *o, 4365 struct bnx2x_general_setup_params *params, 4366 struct client_init_general_data *gen_data, 4367 unsigned long *flags) 4368 { 4369 gen_data->client_id = o->cl_id; 4370 4371 if (test_bit(BNX2X_Q_FLG_STATS, flags)) { 4372 gen_data->statistics_counter_id = 4373 params->stat_id; 4374 gen_data->statistics_en_flg = 1; 4375 gen_data->statistics_zero_flg = 4376 test_bit(BNX2X_Q_FLG_ZERO_STATS, flags); 4377 } else 4378 gen_data->statistics_counter_id = 4379 DISABLE_STATISTIC_COUNTER_ID_VALUE; 4380 4381 gen_data->is_fcoe_flg = test_bit(BNX2X_Q_FLG_FCOE, flags); 4382 gen_data->activate_flg = test_bit(BNX2X_Q_FLG_ACTIVE, flags); 4383 gen_data->sp_client_id = params->spcl_id; 4384 gen_data->mtu = cpu_to_le16(params->mtu); 4385 gen_data->func_id = o->func_id; 4386 4387 4388 gen_data->cos = params->cos; 4389 4390 gen_data->traffic_type = 4391 test_bit(BNX2X_Q_FLG_FCOE, flags) ? 4392 LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW; 4393 4394 DP(BNX2X_MSG_SP, "flags: active %d, cos %d, stats en %d\n", 4395 gen_data->activate_flg, gen_data->cos, gen_data->statistics_en_flg); 4396 } 4397 4398 static void bnx2x_q_fill_init_tx_data(struct bnx2x_queue_sp_obj *o, 4399 struct bnx2x_txq_setup_params *params, 4400 struct client_init_tx_data *tx_data, 4401 unsigned long *flags) 4402 { 4403 tx_data->enforce_security_flg = 4404 test_bit(BNX2X_Q_FLG_TX_SEC, flags); 4405 tx_data->default_vlan = 4406 cpu_to_le16(params->default_vlan); 4407 tx_data->default_vlan_flg = 4408 test_bit(BNX2X_Q_FLG_DEF_VLAN, flags); 4409 tx_data->tx_switching_flg = 4410 test_bit(BNX2X_Q_FLG_TX_SWITCH, flags); 4411 tx_data->anti_spoofing_flg = 4412 test_bit(BNX2X_Q_FLG_ANTI_SPOOF, flags); 4413 tx_data->force_default_pri_flg = 4414 test_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, flags); 4415 4416 tx_data->tx_status_block_id = params->fw_sb_id; 4417 tx_data->tx_sb_index_number = params->sb_cq_index; 4418 tx_data->tss_leading_client_id = params->tss_leading_cl_id; 4419 4420 tx_data->tx_bd_page_base.lo = 4421 cpu_to_le32(U64_LO(params->dscr_map)); 4422 tx_data->tx_bd_page_base.hi = 4423 cpu_to_le32(U64_HI(params->dscr_map)); 4424 4425 /* Don't configure any Tx switching mode during queue SETUP */ 4426 tx_data->state = 0; 4427 } 4428 4429 static void bnx2x_q_fill_init_pause_data(struct bnx2x_queue_sp_obj *o, 4430 struct rxq_pause_params *params, 4431 struct client_init_rx_data *rx_data) 4432 { 4433 /* flow control data */ 4434 rx_data->cqe_pause_thr_low = cpu_to_le16(params->rcq_th_lo); 4435 rx_data->cqe_pause_thr_high = cpu_to_le16(params->rcq_th_hi); 4436 rx_data->bd_pause_thr_low = cpu_to_le16(params->bd_th_lo); 4437 rx_data->bd_pause_thr_high = cpu_to_le16(params->bd_th_hi); 4438 rx_data->sge_pause_thr_low = cpu_to_le16(params->sge_th_lo); 4439 rx_data->sge_pause_thr_high = cpu_to_le16(params->sge_th_hi); 4440 rx_data->rx_cos_mask = cpu_to_le16(params->pri_map); 4441 } 4442 4443 static void bnx2x_q_fill_init_rx_data(struct bnx2x_queue_sp_obj *o, 4444 struct bnx2x_rxq_setup_params *params, 4445 struct client_init_rx_data *rx_data, 4446 unsigned long *flags) 4447 { 4448 rx_data->tpa_en = test_bit(BNX2X_Q_FLG_TPA, flags) * 4449 CLIENT_INIT_RX_DATA_TPA_EN_IPV4; 4450 rx_data->tpa_en |= test_bit(BNX2X_Q_FLG_TPA_GRO, flags) * 4451 CLIENT_INIT_RX_DATA_TPA_MODE; 4452 rx_data->vmqueue_mode_en_flg = 0; 4453 4454 rx_data->cache_line_alignment_log_size = 4455 params->cache_line_log; 4456 rx_data->enable_dynamic_hc = 4457 test_bit(BNX2X_Q_FLG_DHC, flags); 4458 rx_data->max_sges_for_packet = params->max_sges_pkt; 4459 rx_data->client_qzone_id = params->cl_qzone_id; 4460 rx_data->max_agg_size = cpu_to_le16(params->tpa_agg_sz); 4461 4462 /* Always start in DROP_ALL mode */ 4463 rx_data->state = cpu_to_le16(CLIENT_INIT_RX_DATA_UCAST_DROP_ALL | 4464 CLIENT_INIT_RX_DATA_MCAST_DROP_ALL); 4465 4466 /* We don't set drop flags */ 4467 rx_data->drop_ip_cs_err_flg = 0; 4468 rx_data->drop_tcp_cs_err_flg = 0; 4469 rx_data->drop_ttl0_flg = 0; 4470 rx_data->drop_udp_cs_err_flg = 0; 4471 rx_data->inner_vlan_removal_enable_flg = 4472 test_bit(BNX2X_Q_FLG_VLAN, flags); 4473 rx_data->outer_vlan_removal_enable_flg = 4474 test_bit(BNX2X_Q_FLG_OV, flags); 4475 rx_data->status_block_id = params->fw_sb_id; 4476 rx_data->rx_sb_index_number = params->sb_cq_index; 4477 rx_data->max_tpa_queues = params->max_tpa_queues; 4478 rx_data->max_bytes_on_bd = cpu_to_le16(params->buf_sz); 4479 rx_data->sge_buff_size = cpu_to_le16(params->sge_buf_sz); 4480 rx_data->bd_page_base.lo = 4481 cpu_to_le32(U64_LO(params->dscr_map)); 4482 rx_data->bd_page_base.hi = 4483 cpu_to_le32(U64_HI(params->dscr_map)); 4484 rx_data->sge_page_base.lo = 4485 cpu_to_le32(U64_LO(params->sge_map)); 4486 rx_data->sge_page_base.hi = 4487 cpu_to_le32(U64_HI(params->sge_map)); 4488 rx_data->cqe_page_base.lo = 4489 cpu_to_le32(U64_LO(params->rcq_map)); 4490 rx_data->cqe_page_base.hi = 4491 cpu_to_le32(U64_HI(params->rcq_map)); 4492 rx_data->is_leading_rss = test_bit(BNX2X_Q_FLG_LEADING_RSS, flags); 4493 4494 if (test_bit(BNX2X_Q_FLG_MCAST, flags)) { 4495 rx_data->approx_mcast_engine_id = params->mcast_engine_id; 4496 rx_data->is_approx_mcast = 1; 4497 } 4498 4499 rx_data->rss_engine_id = params->rss_engine_id; 4500 4501 /* silent vlan removal */ 4502 rx_data->silent_vlan_removal_flg = 4503 test_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, flags); 4504 rx_data->silent_vlan_value = 4505 cpu_to_le16(params->silent_removal_value); 4506 rx_data->silent_vlan_mask = 4507 cpu_to_le16(params->silent_removal_mask); 4508 4509 } 4510 4511 /* initialize the general, tx and rx parts of a queue object */ 4512 static void bnx2x_q_fill_setup_data_cmn(struct bnx2x *bp, 4513 struct bnx2x_queue_state_params *cmd_params, 4514 struct client_init_ramrod_data *data) 4515 { 4516 bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj, 4517 &cmd_params->params.setup.gen_params, 4518 &data->general, 4519 &cmd_params->params.setup.flags); 4520 4521 bnx2x_q_fill_init_tx_data(cmd_params->q_obj, 4522 &cmd_params->params.setup.txq_params, 4523 &data->tx, 4524 &cmd_params->params.setup.flags); 4525 4526 bnx2x_q_fill_init_rx_data(cmd_params->q_obj, 4527 &cmd_params->params.setup.rxq_params, 4528 &data->rx, 4529 &cmd_params->params.setup.flags); 4530 4531 bnx2x_q_fill_init_pause_data(cmd_params->q_obj, 4532 &cmd_params->params.setup.pause_params, 4533 &data->rx); 4534 } 4535 4536 /* initialize the general and tx parts of a tx-only queue object */ 4537 static void bnx2x_q_fill_setup_tx_only(struct bnx2x *bp, 4538 struct bnx2x_queue_state_params *cmd_params, 4539 struct tx_queue_init_ramrod_data *data) 4540 { 4541 bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj, 4542 &cmd_params->params.tx_only.gen_params, 4543 &data->general, 4544 &cmd_params->params.tx_only.flags); 4545 4546 bnx2x_q_fill_init_tx_data(cmd_params->q_obj, 4547 &cmd_params->params.tx_only.txq_params, 4548 &data->tx, 4549 &cmd_params->params.tx_only.flags); 4550 4551 DP(BNX2X_MSG_SP, "cid %d, tx bd page lo %x hi %x", 4552 cmd_params->q_obj->cids[0], 4553 data->tx.tx_bd_page_base.lo, 4554 data->tx.tx_bd_page_base.hi); 4555 } 4556 4557 /** 4558 * bnx2x_q_init - init HW/FW queue 4559 * 4560 * @bp: device handle 4561 * @params: 4562 * 4563 * HW/FW initial Queue configuration: 4564 * - HC: Rx and Tx 4565 * - CDU context validation 4566 * 4567 */ 4568 static inline int bnx2x_q_init(struct bnx2x *bp, 4569 struct bnx2x_queue_state_params *params) 4570 { 4571 struct bnx2x_queue_sp_obj *o = params->q_obj; 4572 struct bnx2x_queue_init_params *init = ¶ms->params.init; 4573 u16 hc_usec; 4574 u8 cos; 4575 4576 /* Tx HC configuration */ 4577 if (test_bit(BNX2X_Q_TYPE_HAS_TX, &o->type) && 4578 test_bit(BNX2X_Q_FLG_HC, &init->tx.flags)) { 4579 hc_usec = init->tx.hc_rate ? 1000000 / init->tx.hc_rate : 0; 4580 4581 bnx2x_update_coalesce_sb_index(bp, init->tx.fw_sb_id, 4582 init->tx.sb_cq_index, 4583 !test_bit(BNX2X_Q_FLG_HC_EN, &init->tx.flags), 4584 hc_usec); 4585 } 4586 4587 /* Rx HC configuration */ 4588 if (test_bit(BNX2X_Q_TYPE_HAS_RX, &o->type) && 4589 test_bit(BNX2X_Q_FLG_HC, &init->rx.flags)) { 4590 hc_usec = init->rx.hc_rate ? 1000000 / init->rx.hc_rate : 0; 4591 4592 bnx2x_update_coalesce_sb_index(bp, init->rx.fw_sb_id, 4593 init->rx.sb_cq_index, 4594 !test_bit(BNX2X_Q_FLG_HC_EN, &init->rx.flags), 4595 hc_usec); 4596 } 4597 4598 /* Set CDU context validation values */ 4599 for (cos = 0; cos < o->max_cos; cos++) { 4600 DP(BNX2X_MSG_SP, "setting context validation. cid %d, cos %d\n", 4601 o->cids[cos], cos); 4602 DP(BNX2X_MSG_SP, "context pointer %p\n", init->cxts[cos]); 4603 bnx2x_set_ctx_validation(bp, init->cxts[cos], o->cids[cos]); 4604 } 4605 4606 /* As no ramrod is sent, complete the command immediately */ 4607 o->complete_cmd(bp, o, BNX2X_Q_CMD_INIT); 4608 4609 mmiowb(); 4610 smp_mb(); 4611 4612 return 0; 4613 } 4614 4615 static inline int bnx2x_q_send_setup_e1x(struct bnx2x *bp, 4616 struct bnx2x_queue_state_params *params) 4617 { 4618 struct bnx2x_queue_sp_obj *o = params->q_obj; 4619 struct client_init_ramrod_data *rdata = 4620 (struct client_init_ramrod_data *)o->rdata; 4621 dma_addr_t data_mapping = o->rdata_mapping; 4622 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP; 4623 4624 /* Clear the ramrod data */ 4625 memset(rdata, 0, sizeof(*rdata)); 4626 4627 /* Fill the ramrod data */ 4628 bnx2x_q_fill_setup_data_cmn(bp, params, rdata); 4629 4630 /* 4631 * No need for an explicit memory barrier here as long we would 4632 * need to ensure the ordering of writing to the SPQ element 4633 * and updating of the SPQ producer which involves a memory 4634 * read and we will have to put a full memory barrier there 4635 * (inside bnx2x_sp_post()). 4636 */ 4637 4638 return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX], 4639 U64_HI(data_mapping), 4640 U64_LO(data_mapping), ETH_CONNECTION_TYPE); 4641 } 4642 4643 static inline int bnx2x_q_send_setup_e2(struct bnx2x *bp, 4644 struct bnx2x_queue_state_params *params) 4645 { 4646 struct bnx2x_queue_sp_obj *o = params->q_obj; 4647 struct client_init_ramrod_data *rdata = 4648 (struct client_init_ramrod_data *)o->rdata; 4649 dma_addr_t data_mapping = o->rdata_mapping; 4650 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP; 4651 4652 /* Clear the ramrod data */ 4653 memset(rdata, 0, sizeof(*rdata)); 4654 4655 /* Fill the ramrod data */ 4656 bnx2x_q_fill_setup_data_cmn(bp, params, rdata); 4657 bnx2x_q_fill_setup_data_e2(bp, params, rdata); 4658 4659 /* 4660 * No need for an explicit memory barrier here as long we would 4661 * need to ensure the ordering of writing to the SPQ element 4662 * and updating of the SPQ producer which involves a memory 4663 * read and we will have to put a full memory barrier there 4664 * (inside bnx2x_sp_post()). 4665 */ 4666 4667 return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX], 4668 U64_HI(data_mapping), 4669 U64_LO(data_mapping), ETH_CONNECTION_TYPE); 4670 } 4671 4672 static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp, 4673 struct bnx2x_queue_state_params *params) 4674 { 4675 struct bnx2x_queue_sp_obj *o = params->q_obj; 4676 struct tx_queue_init_ramrod_data *rdata = 4677 (struct tx_queue_init_ramrod_data *)o->rdata; 4678 dma_addr_t data_mapping = o->rdata_mapping; 4679 int ramrod = RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP; 4680 struct bnx2x_queue_setup_tx_only_params *tx_only_params = 4681 ¶ms->params.tx_only; 4682 u8 cid_index = tx_only_params->cid_index; 4683 4684 4685 if (cid_index >= o->max_cos) { 4686 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n", 4687 o->cl_id, cid_index); 4688 return -EINVAL; 4689 } 4690 4691 DP(BNX2X_MSG_SP, "parameters received: cos: %d sp-id: %d\n", 4692 tx_only_params->gen_params.cos, 4693 tx_only_params->gen_params.spcl_id); 4694 4695 /* Clear the ramrod data */ 4696 memset(rdata, 0, sizeof(*rdata)); 4697 4698 /* Fill the ramrod data */ 4699 bnx2x_q_fill_setup_tx_only(bp, params, rdata); 4700 4701 DP(BNX2X_MSG_SP, "sending tx-only ramrod: cid %d, client-id %d, sp-client id %d, cos %d\n", 4702 o->cids[cid_index], rdata->general.client_id, 4703 rdata->general.sp_client_id, rdata->general.cos); 4704 4705 /* 4706 * No need for an explicit memory barrier here as long we would 4707 * need to ensure the ordering of writing to the SPQ element 4708 * and updating of the SPQ producer which involves a memory 4709 * read and we will have to put a full memory barrier there 4710 * (inside bnx2x_sp_post()). 4711 */ 4712 4713 return bnx2x_sp_post(bp, ramrod, o->cids[cid_index], 4714 U64_HI(data_mapping), 4715 U64_LO(data_mapping), ETH_CONNECTION_TYPE); 4716 } 4717 4718 static void bnx2x_q_fill_update_data(struct bnx2x *bp, 4719 struct bnx2x_queue_sp_obj *obj, 4720 struct bnx2x_queue_update_params *params, 4721 struct client_update_ramrod_data *data) 4722 { 4723 /* Client ID of the client to update */ 4724 data->client_id = obj->cl_id; 4725 4726 /* Function ID of the client to update */ 4727 data->func_id = obj->func_id; 4728 4729 /* Default VLAN value */ 4730 data->default_vlan = cpu_to_le16(params->def_vlan); 4731 4732 /* Inner VLAN stripping */ 4733 data->inner_vlan_removal_enable_flg = 4734 test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM, ¶ms->update_flags); 4735 data->inner_vlan_removal_change_flg = 4736 test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM_CHNG, 4737 ¶ms->update_flags); 4738 4739 /* Outer VLAN sripping */ 4740 data->outer_vlan_removal_enable_flg = 4741 test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM, ¶ms->update_flags); 4742 data->outer_vlan_removal_change_flg = 4743 test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM_CHNG, 4744 ¶ms->update_flags); 4745 4746 /* Drop packets that have source MAC that doesn't belong to this 4747 * Queue. 4748 */ 4749 data->anti_spoofing_enable_flg = 4750 test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF, ¶ms->update_flags); 4751 data->anti_spoofing_change_flg = 4752 test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF_CHNG, ¶ms->update_flags); 4753 4754 /* Activate/Deactivate */ 4755 data->activate_flg = 4756 test_bit(BNX2X_Q_UPDATE_ACTIVATE, ¶ms->update_flags); 4757 data->activate_change_flg = 4758 test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, ¶ms->update_flags); 4759 4760 /* Enable default VLAN */ 4761 data->default_vlan_enable_flg = 4762 test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, ¶ms->update_flags); 4763 data->default_vlan_change_flg = 4764 test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG, 4765 ¶ms->update_flags); 4766 4767 /* silent vlan removal */ 4768 data->silent_vlan_change_flg = 4769 test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG, 4770 ¶ms->update_flags); 4771 data->silent_vlan_removal_flg = 4772 test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, ¶ms->update_flags); 4773 data->silent_vlan_value = cpu_to_le16(params->silent_removal_value); 4774 data->silent_vlan_mask = cpu_to_le16(params->silent_removal_mask); 4775 } 4776 4777 static inline int bnx2x_q_send_update(struct bnx2x *bp, 4778 struct bnx2x_queue_state_params *params) 4779 { 4780 struct bnx2x_queue_sp_obj *o = params->q_obj; 4781 struct client_update_ramrod_data *rdata = 4782 (struct client_update_ramrod_data *)o->rdata; 4783 dma_addr_t data_mapping = o->rdata_mapping; 4784 struct bnx2x_queue_update_params *update_params = 4785 ¶ms->params.update; 4786 u8 cid_index = update_params->cid_index; 4787 4788 if (cid_index >= o->max_cos) { 4789 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n", 4790 o->cl_id, cid_index); 4791 return -EINVAL; 4792 } 4793 4794 4795 /* Clear the ramrod data */ 4796 memset(rdata, 0, sizeof(*rdata)); 4797 4798 /* Fill the ramrod data */ 4799 bnx2x_q_fill_update_data(bp, o, update_params, rdata); 4800 4801 /* 4802 * No need for an explicit memory barrier here as long we would 4803 * need to ensure the ordering of writing to the SPQ element 4804 * and updating of the SPQ producer which involves a memory 4805 * read and we will have to put a full memory barrier there 4806 * (inside bnx2x_sp_post()). 4807 */ 4808 4809 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE, 4810 o->cids[cid_index], U64_HI(data_mapping), 4811 U64_LO(data_mapping), ETH_CONNECTION_TYPE); 4812 } 4813 4814 /** 4815 * bnx2x_q_send_deactivate - send DEACTIVATE command 4816 * 4817 * @bp: device handle 4818 * @params: 4819 * 4820 * implemented using the UPDATE command. 4821 */ 4822 static inline int bnx2x_q_send_deactivate(struct bnx2x *bp, 4823 struct bnx2x_queue_state_params *params) 4824 { 4825 struct bnx2x_queue_update_params *update = ¶ms->params.update; 4826 4827 memset(update, 0, sizeof(*update)); 4828 4829 __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags); 4830 4831 return bnx2x_q_send_update(bp, params); 4832 } 4833 4834 /** 4835 * bnx2x_q_send_activate - send ACTIVATE command 4836 * 4837 * @bp: device handle 4838 * @params: 4839 * 4840 * implemented using the UPDATE command. 4841 */ 4842 static inline int bnx2x_q_send_activate(struct bnx2x *bp, 4843 struct bnx2x_queue_state_params *params) 4844 { 4845 struct bnx2x_queue_update_params *update = ¶ms->params.update; 4846 4847 memset(update, 0, sizeof(*update)); 4848 4849 __set_bit(BNX2X_Q_UPDATE_ACTIVATE, &update->update_flags); 4850 __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags); 4851 4852 return bnx2x_q_send_update(bp, params); 4853 } 4854 4855 static inline int bnx2x_q_send_update_tpa(struct bnx2x *bp, 4856 struct bnx2x_queue_state_params *params) 4857 { 4858 /* TODO: Not implemented yet. */ 4859 return -1; 4860 } 4861 4862 static inline int bnx2x_q_send_halt(struct bnx2x *bp, 4863 struct bnx2x_queue_state_params *params) 4864 { 4865 struct bnx2x_queue_sp_obj *o = params->q_obj; 4866 4867 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 4868 o->cids[BNX2X_PRIMARY_CID_INDEX], 0, o->cl_id, 4869 ETH_CONNECTION_TYPE); 4870 } 4871 4872 static inline int bnx2x_q_send_cfc_del(struct bnx2x *bp, 4873 struct bnx2x_queue_state_params *params) 4874 { 4875 struct bnx2x_queue_sp_obj *o = params->q_obj; 4876 u8 cid_idx = params->params.cfc_del.cid_index; 4877 4878 if (cid_idx >= o->max_cos) { 4879 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n", 4880 o->cl_id, cid_idx); 4881 return -EINVAL; 4882 } 4883 4884 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL, 4885 o->cids[cid_idx], 0, 0, NONE_CONNECTION_TYPE); 4886 } 4887 4888 static inline int bnx2x_q_send_terminate(struct bnx2x *bp, 4889 struct bnx2x_queue_state_params *params) 4890 { 4891 struct bnx2x_queue_sp_obj *o = params->q_obj; 4892 u8 cid_index = params->params.terminate.cid_index; 4893 4894 if (cid_index >= o->max_cos) { 4895 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n", 4896 o->cl_id, cid_index); 4897 return -EINVAL; 4898 } 4899 4900 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE, 4901 o->cids[cid_index], 0, 0, ETH_CONNECTION_TYPE); 4902 } 4903 4904 static inline int bnx2x_q_send_empty(struct bnx2x *bp, 4905 struct bnx2x_queue_state_params *params) 4906 { 4907 struct bnx2x_queue_sp_obj *o = params->q_obj; 4908 4909 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_EMPTY, 4910 o->cids[BNX2X_PRIMARY_CID_INDEX], 0, 0, 4911 ETH_CONNECTION_TYPE); 4912 } 4913 4914 static inline int bnx2x_queue_send_cmd_cmn(struct bnx2x *bp, 4915 struct bnx2x_queue_state_params *params) 4916 { 4917 switch (params->cmd) { 4918 case BNX2X_Q_CMD_INIT: 4919 return bnx2x_q_init(bp, params); 4920 case BNX2X_Q_CMD_SETUP_TX_ONLY: 4921 return bnx2x_q_send_setup_tx_only(bp, params); 4922 case BNX2X_Q_CMD_DEACTIVATE: 4923 return bnx2x_q_send_deactivate(bp, params); 4924 case BNX2X_Q_CMD_ACTIVATE: 4925 return bnx2x_q_send_activate(bp, params); 4926 case BNX2X_Q_CMD_UPDATE: 4927 return bnx2x_q_send_update(bp, params); 4928 case BNX2X_Q_CMD_UPDATE_TPA: 4929 return bnx2x_q_send_update_tpa(bp, params); 4930 case BNX2X_Q_CMD_HALT: 4931 return bnx2x_q_send_halt(bp, params); 4932 case BNX2X_Q_CMD_CFC_DEL: 4933 return bnx2x_q_send_cfc_del(bp, params); 4934 case BNX2X_Q_CMD_TERMINATE: 4935 return bnx2x_q_send_terminate(bp, params); 4936 case BNX2X_Q_CMD_EMPTY: 4937 return bnx2x_q_send_empty(bp, params); 4938 default: 4939 BNX2X_ERR("Unknown command: %d\n", params->cmd); 4940 return -EINVAL; 4941 } 4942 } 4943 4944 static int bnx2x_queue_send_cmd_e1x(struct bnx2x *bp, 4945 struct bnx2x_queue_state_params *params) 4946 { 4947 switch (params->cmd) { 4948 case BNX2X_Q_CMD_SETUP: 4949 return bnx2x_q_send_setup_e1x(bp, params); 4950 case BNX2X_Q_CMD_INIT: 4951 case BNX2X_Q_CMD_SETUP_TX_ONLY: 4952 case BNX2X_Q_CMD_DEACTIVATE: 4953 case BNX2X_Q_CMD_ACTIVATE: 4954 case BNX2X_Q_CMD_UPDATE: 4955 case BNX2X_Q_CMD_UPDATE_TPA: 4956 case BNX2X_Q_CMD_HALT: 4957 case BNX2X_Q_CMD_CFC_DEL: 4958 case BNX2X_Q_CMD_TERMINATE: 4959 case BNX2X_Q_CMD_EMPTY: 4960 return bnx2x_queue_send_cmd_cmn(bp, params); 4961 default: 4962 BNX2X_ERR("Unknown command: %d\n", params->cmd); 4963 return -EINVAL; 4964 } 4965 } 4966 4967 static int bnx2x_queue_send_cmd_e2(struct bnx2x *bp, 4968 struct bnx2x_queue_state_params *params) 4969 { 4970 switch (params->cmd) { 4971 case BNX2X_Q_CMD_SETUP: 4972 return bnx2x_q_send_setup_e2(bp, params); 4973 case BNX2X_Q_CMD_INIT: 4974 case BNX2X_Q_CMD_SETUP_TX_ONLY: 4975 case BNX2X_Q_CMD_DEACTIVATE: 4976 case BNX2X_Q_CMD_ACTIVATE: 4977 case BNX2X_Q_CMD_UPDATE: 4978 case BNX2X_Q_CMD_UPDATE_TPA: 4979 case BNX2X_Q_CMD_HALT: 4980 case BNX2X_Q_CMD_CFC_DEL: 4981 case BNX2X_Q_CMD_TERMINATE: 4982 case BNX2X_Q_CMD_EMPTY: 4983 return bnx2x_queue_send_cmd_cmn(bp, params); 4984 default: 4985 BNX2X_ERR("Unknown command: %d\n", params->cmd); 4986 return -EINVAL; 4987 } 4988 } 4989 4990 /** 4991 * bnx2x_queue_chk_transition - check state machine of a regular Queue 4992 * 4993 * @bp: device handle 4994 * @o: 4995 * @params: 4996 * 4997 * (not Forwarding) 4998 * It both checks if the requested command is legal in a current 4999 * state and, if it's legal, sets a `next_state' in the object 5000 * that will be used in the completion flow to set the `state' 5001 * of the object. 5002 * 5003 * returns 0 if a requested command is a legal transition, 5004 * -EINVAL otherwise. 5005 */ 5006 static int bnx2x_queue_chk_transition(struct bnx2x *bp, 5007 struct bnx2x_queue_sp_obj *o, 5008 struct bnx2x_queue_state_params *params) 5009 { 5010 enum bnx2x_q_state state = o->state, next_state = BNX2X_Q_STATE_MAX; 5011 enum bnx2x_queue_cmd cmd = params->cmd; 5012 struct bnx2x_queue_update_params *update_params = 5013 ¶ms->params.update; 5014 u8 next_tx_only = o->num_tx_only; 5015 5016 /* 5017 * Forget all pending for completion commands if a driver only state 5018 * transition has been requested. 5019 */ 5020 if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) { 5021 o->pending = 0; 5022 o->next_state = BNX2X_Q_STATE_MAX; 5023 } 5024 5025 /* 5026 * Don't allow a next state transition if we are in the middle of 5027 * the previous one. 5028 */ 5029 if (o->pending) 5030 return -EBUSY; 5031 5032 switch (state) { 5033 case BNX2X_Q_STATE_RESET: 5034 if (cmd == BNX2X_Q_CMD_INIT) 5035 next_state = BNX2X_Q_STATE_INITIALIZED; 5036 5037 break; 5038 case BNX2X_Q_STATE_INITIALIZED: 5039 if (cmd == BNX2X_Q_CMD_SETUP) { 5040 if (test_bit(BNX2X_Q_FLG_ACTIVE, 5041 ¶ms->params.setup.flags)) 5042 next_state = BNX2X_Q_STATE_ACTIVE; 5043 else 5044 next_state = BNX2X_Q_STATE_INACTIVE; 5045 } 5046 5047 break; 5048 case BNX2X_Q_STATE_ACTIVE: 5049 if (cmd == BNX2X_Q_CMD_DEACTIVATE) 5050 next_state = BNX2X_Q_STATE_INACTIVE; 5051 5052 else if ((cmd == BNX2X_Q_CMD_EMPTY) || 5053 (cmd == BNX2X_Q_CMD_UPDATE_TPA)) 5054 next_state = BNX2X_Q_STATE_ACTIVE; 5055 5056 else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) { 5057 next_state = BNX2X_Q_STATE_MULTI_COS; 5058 next_tx_only = 1; 5059 } 5060 5061 else if (cmd == BNX2X_Q_CMD_HALT) 5062 next_state = BNX2X_Q_STATE_STOPPED; 5063 5064 else if (cmd == BNX2X_Q_CMD_UPDATE) { 5065 /* If "active" state change is requested, update the 5066 * state accordingly. 5067 */ 5068 if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, 5069 &update_params->update_flags) && 5070 !test_bit(BNX2X_Q_UPDATE_ACTIVATE, 5071 &update_params->update_flags)) 5072 next_state = BNX2X_Q_STATE_INACTIVE; 5073 else 5074 next_state = BNX2X_Q_STATE_ACTIVE; 5075 } 5076 5077 break; 5078 case BNX2X_Q_STATE_MULTI_COS: 5079 if (cmd == BNX2X_Q_CMD_TERMINATE) 5080 next_state = BNX2X_Q_STATE_MCOS_TERMINATED; 5081 5082 else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) { 5083 next_state = BNX2X_Q_STATE_MULTI_COS; 5084 next_tx_only = o->num_tx_only + 1; 5085 } 5086 5087 else if ((cmd == BNX2X_Q_CMD_EMPTY) || 5088 (cmd == BNX2X_Q_CMD_UPDATE_TPA)) 5089 next_state = BNX2X_Q_STATE_MULTI_COS; 5090 5091 else if (cmd == BNX2X_Q_CMD_UPDATE) { 5092 /* If "active" state change is requested, update the 5093 * state accordingly. 5094 */ 5095 if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, 5096 &update_params->update_flags) && 5097 !test_bit(BNX2X_Q_UPDATE_ACTIVATE, 5098 &update_params->update_flags)) 5099 next_state = BNX2X_Q_STATE_INACTIVE; 5100 else 5101 next_state = BNX2X_Q_STATE_MULTI_COS; 5102 } 5103 5104 break; 5105 case BNX2X_Q_STATE_MCOS_TERMINATED: 5106 if (cmd == BNX2X_Q_CMD_CFC_DEL) { 5107 next_tx_only = o->num_tx_only - 1; 5108 if (next_tx_only == 0) 5109 next_state = BNX2X_Q_STATE_ACTIVE; 5110 else 5111 next_state = BNX2X_Q_STATE_MULTI_COS; 5112 } 5113 5114 break; 5115 case BNX2X_Q_STATE_INACTIVE: 5116 if (cmd == BNX2X_Q_CMD_ACTIVATE) 5117 next_state = BNX2X_Q_STATE_ACTIVE; 5118 5119 else if ((cmd == BNX2X_Q_CMD_EMPTY) || 5120 (cmd == BNX2X_Q_CMD_UPDATE_TPA)) 5121 next_state = BNX2X_Q_STATE_INACTIVE; 5122 5123 else if (cmd == BNX2X_Q_CMD_HALT) 5124 next_state = BNX2X_Q_STATE_STOPPED; 5125 5126 else if (cmd == BNX2X_Q_CMD_UPDATE) { 5127 /* If "active" state change is requested, update the 5128 * state accordingly. 5129 */ 5130 if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, 5131 &update_params->update_flags) && 5132 test_bit(BNX2X_Q_UPDATE_ACTIVATE, 5133 &update_params->update_flags)){ 5134 if (o->num_tx_only == 0) 5135 next_state = BNX2X_Q_STATE_ACTIVE; 5136 else /* tx only queues exist for this queue */ 5137 next_state = BNX2X_Q_STATE_MULTI_COS; 5138 } else 5139 next_state = BNX2X_Q_STATE_INACTIVE; 5140 } 5141 5142 break; 5143 case BNX2X_Q_STATE_STOPPED: 5144 if (cmd == BNX2X_Q_CMD_TERMINATE) 5145 next_state = BNX2X_Q_STATE_TERMINATED; 5146 5147 break; 5148 case BNX2X_Q_STATE_TERMINATED: 5149 if (cmd == BNX2X_Q_CMD_CFC_DEL) 5150 next_state = BNX2X_Q_STATE_RESET; 5151 5152 break; 5153 default: 5154 BNX2X_ERR("Illegal state: %d\n", state); 5155 } 5156 5157 /* Transition is assured */ 5158 if (next_state != BNX2X_Q_STATE_MAX) { 5159 DP(BNX2X_MSG_SP, "Good state transition: %d(%d)->%d\n", 5160 state, cmd, next_state); 5161 o->next_state = next_state; 5162 o->next_tx_only = next_tx_only; 5163 return 0; 5164 } 5165 5166 DP(BNX2X_MSG_SP, "Bad state transition request: %d %d\n", state, cmd); 5167 5168 return -EINVAL; 5169 } 5170 5171 void bnx2x_init_queue_obj(struct bnx2x *bp, 5172 struct bnx2x_queue_sp_obj *obj, 5173 u8 cl_id, u32 *cids, u8 cid_cnt, u8 func_id, 5174 void *rdata, 5175 dma_addr_t rdata_mapping, unsigned long type) 5176 { 5177 memset(obj, 0, sizeof(*obj)); 5178 5179 /* We support only BNX2X_MULTI_TX_COS Tx CoS at the moment */ 5180 BUG_ON(BNX2X_MULTI_TX_COS < cid_cnt); 5181 5182 memcpy(obj->cids, cids, sizeof(obj->cids[0]) * cid_cnt); 5183 obj->max_cos = cid_cnt; 5184 obj->cl_id = cl_id; 5185 obj->func_id = func_id; 5186 obj->rdata = rdata; 5187 obj->rdata_mapping = rdata_mapping; 5188 obj->type = type; 5189 obj->next_state = BNX2X_Q_STATE_MAX; 5190 5191 if (CHIP_IS_E1x(bp)) 5192 obj->send_cmd = bnx2x_queue_send_cmd_e1x; 5193 else 5194 obj->send_cmd = bnx2x_queue_send_cmd_e2; 5195 5196 obj->check_transition = bnx2x_queue_chk_transition; 5197 5198 obj->complete_cmd = bnx2x_queue_comp_cmd; 5199 obj->wait_comp = bnx2x_queue_wait_comp; 5200 obj->set_pending = bnx2x_queue_set_pending; 5201 } 5202 5203 /********************** Function state object *********************************/ 5204 enum bnx2x_func_state bnx2x_func_get_state(struct bnx2x *bp, 5205 struct bnx2x_func_sp_obj *o) 5206 { 5207 /* in the middle of transaction - return INVALID state */ 5208 if (o->pending) 5209 return BNX2X_F_STATE_MAX; 5210 5211 /* 5212 * unsure the order of reading of o->pending and o->state 5213 * o->pending should be read first 5214 */ 5215 rmb(); 5216 5217 return o->state; 5218 } 5219 5220 static int bnx2x_func_wait_comp(struct bnx2x *bp, 5221 struct bnx2x_func_sp_obj *o, 5222 enum bnx2x_func_cmd cmd) 5223 { 5224 return bnx2x_state_wait(bp, cmd, &o->pending); 5225 } 5226 5227 /** 5228 * bnx2x_func_state_change_comp - complete the state machine transition 5229 * 5230 * @bp: device handle 5231 * @o: 5232 * @cmd: 5233 * 5234 * Called on state change transition. Completes the state 5235 * machine transition only - no HW interaction. 5236 */ 5237 static inline int bnx2x_func_state_change_comp(struct bnx2x *bp, 5238 struct bnx2x_func_sp_obj *o, 5239 enum bnx2x_func_cmd cmd) 5240 { 5241 unsigned long cur_pending = o->pending; 5242 5243 if (!test_and_clear_bit(cmd, &cur_pending)) { 5244 BNX2X_ERR("Bad MC reply %d for func %d in state %d pending 0x%lx, next_state %d\n", 5245 cmd, BP_FUNC(bp), o->state, 5246 cur_pending, o->next_state); 5247 return -EINVAL; 5248 } 5249 5250 DP(BNX2X_MSG_SP, 5251 "Completing command %d for func %d, setting state to %d\n", 5252 cmd, BP_FUNC(bp), o->next_state); 5253 5254 o->state = o->next_state; 5255 o->next_state = BNX2X_F_STATE_MAX; 5256 5257 /* It's important that o->state and o->next_state are 5258 * updated before o->pending. 5259 */ 5260 wmb(); 5261 5262 clear_bit(cmd, &o->pending); 5263 smp_mb__after_clear_bit(); 5264 5265 return 0; 5266 } 5267 5268 /** 5269 * bnx2x_func_comp_cmd - complete the state change command 5270 * 5271 * @bp: device handle 5272 * @o: 5273 * @cmd: 5274 * 5275 * Checks that the arrived completion is expected. 5276 */ 5277 static int bnx2x_func_comp_cmd(struct bnx2x *bp, 5278 struct bnx2x_func_sp_obj *o, 5279 enum bnx2x_func_cmd cmd) 5280 { 5281 /* Complete the state machine part first, check if it's a 5282 * legal completion. 5283 */ 5284 int rc = bnx2x_func_state_change_comp(bp, o, cmd); 5285 return rc; 5286 } 5287 5288 /** 5289 * bnx2x_func_chk_transition - perform function state machine transition 5290 * 5291 * @bp: device handle 5292 * @o: 5293 * @params: 5294 * 5295 * It both checks if the requested command is legal in a current 5296 * state and, if it's legal, sets a `next_state' in the object 5297 * that will be used in the completion flow to set the `state' 5298 * of the object. 5299 * 5300 * returns 0 if a requested command is a legal transition, 5301 * -EINVAL otherwise. 5302 */ 5303 static int bnx2x_func_chk_transition(struct bnx2x *bp, 5304 struct bnx2x_func_sp_obj *o, 5305 struct bnx2x_func_state_params *params) 5306 { 5307 enum bnx2x_func_state state = o->state, next_state = BNX2X_F_STATE_MAX; 5308 enum bnx2x_func_cmd cmd = params->cmd; 5309 5310 /* 5311 * Forget all pending for completion commands if a driver only state 5312 * transition has been requested. 5313 */ 5314 if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) { 5315 o->pending = 0; 5316 o->next_state = BNX2X_F_STATE_MAX; 5317 } 5318 5319 /* 5320 * Don't allow a next state transition if we are in the middle of 5321 * the previous one. 5322 */ 5323 if (o->pending) 5324 return -EBUSY; 5325 5326 switch (state) { 5327 case BNX2X_F_STATE_RESET: 5328 if (cmd == BNX2X_F_CMD_HW_INIT) 5329 next_state = BNX2X_F_STATE_INITIALIZED; 5330 5331 break; 5332 case BNX2X_F_STATE_INITIALIZED: 5333 if (cmd == BNX2X_F_CMD_START) 5334 next_state = BNX2X_F_STATE_STARTED; 5335 5336 else if (cmd == BNX2X_F_CMD_HW_RESET) 5337 next_state = BNX2X_F_STATE_RESET; 5338 5339 break; 5340 case BNX2X_F_STATE_STARTED: 5341 if (cmd == BNX2X_F_CMD_STOP) 5342 next_state = BNX2X_F_STATE_INITIALIZED; 5343 /* afex ramrods can be sent only in started mode, and only 5344 * if not pending for function_stop ramrod completion 5345 * for these events - next state remained STARTED. 5346 */ 5347 else if ((cmd == BNX2X_F_CMD_AFEX_UPDATE) && 5348 (!test_bit(BNX2X_F_CMD_STOP, &o->pending))) 5349 next_state = BNX2X_F_STATE_STARTED; 5350 5351 else if ((cmd == BNX2X_F_CMD_AFEX_VIFLISTS) && 5352 (!test_bit(BNX2X_F_CMD_STOP, &o->pending))) 5353 next_state = BNX2X_F_STATE_STARTED; 5354 else if (cmd == BNX2X_F_CMD_TX_STOP) 5355 next_state = BNX2X_F_STATE_TX_STOPPED; 5356 5357 break; 5358 case BNX2X_F_STATE_TX_STOPPED: 5359 if (cmd == BNX2X_F_CMD_TX_START) 5360 next_state = BNX2X_F_STATE_STARTED; 5361 5362 break; 5363 default: 5364 BNX2X_ERR("Unknown state: %d\n", state); 5365 } 5366 5367 /* Transition is assured */ 5368 if (next_state != BNX2X_F_STATE_MAX) { 5369 DP(BNX2X_MSG_SP, "Good function state transition: %d(%d)->%d\n", 5370 state, cmd, next_state); 5371 o->next_state = next_state; 5372 return 0; 5373 } 5374 5375 DP(BNX2X_MSG_SP, "Bad function state transition request: %d %d\n", 5376 state, cmd); 5377 5378 return -EINVAL; 5379 } 5380 5381 /** 5382 * bnx2x_func_init_func - performs HW init at function stage 5383 * 5384 * @bp: device handle 5385 * @drv: 5386 * 5387 * Init HW when the current phase is 5388 * FW_MSG_CODE_DRV_LOAD_FUNCTION: initialize only FUNCTION-only 5389 * HW blocks. 5390 */ 5391 static inline int bnx2x_func_init_func(struct bnx2x *bp, 5392 const struct bnx2x_func_sp_drv_ops *drv) 5393 { 5394 return drv->init_hw_func(bp); 5395 } 5396 5397 /** 5398 * bnx2x_func_init_port - performs HW init at port stage 5399 * 5400 * @bp: device handle 5401 * @drv: 5402 * 5403 * Init HW when the current phase is 5404 * FW_MSG_CODE_DRV_LOAD_PORT: initialize PORT-only and 5405 * FUNCTION-only HW blocks. 5406 * 5407 */ 5408 static inline int bnx2x_func_init_port(struct bnx2x *bp, 5409 const struct bnx2x_func_sp_drv_ops *drv) 5410 { 5411 int rc = drv->init_hw_port(bp); 5412 if (rc) 5413 return rc; 5414 5415 return bnx2x_func_init_func(bp, drv); 5416 } 5417 5418 /** 5419 * bnx2x_func_init_cmn_chip - performs HW init at chip-common stage 5420 * 5421 * @bp: device handle 5422 * @drv: 5423 * 5424 * Init HW when the current phase is 5425 * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON_CHIP, 5426 * PORT-only and FUNCTION-only HW blocks. 5427 */ 5428 static inline int bnx2x_func_init_cmn_chip(struct bnx2x *bp, 5429 const struct bnx2x_func_sp_drv_ops *drv) 5430 { 5431 int rc = drv->init_hw_cmn_chip(bp); 5432 if (rc) 5433 return rc; 5434 5435 return bnx2x_func_init_port(bp, drv); 5436 } 5437 5438 /** 5439 * bnx2x_func_init_cmn - performs HW init at common stage 5440 * 5441 * @bp: device handle 5442 * @drv: 5443 * 5444 * Init HW when the current phase is 5445 * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON, 5446 * PORT-only and FUNCTION-only HW blocks. 5447 */ 5448 static inline int bnx2x_func_init_cmn(struct bnx2x *bp, 5449 const struct bnx2x_func_sp_drv_ops *drv) 5450 { 5451 int rc = drv->init_hw_cmn(bp); 5452 if (rc) 5453 return rc; 5454 5455 return bnx2x_func_init_port(bp, drv); 5456 } 5457 5458 static int bnx2x_func_hw_init(struct bnx2x *bp, 5459 struct bnx2x_func_state_params *params) 5460 { 5461 u32 load_code = params->params.hw_init.load_phase; 5462 struct bnx2x_func_sp_obj *o = params->f_obj; 5463 const struct bnx2x_func_sp_drv_ops *drv = o->drv; 5464 int rc = 0; 5465 5466 DP(BNX2X_MSG_SP, "function %d load_code %x\n", 5467 BP_ABS_FUNC(bp), load_code); 5468 5469 /* Prepare buffers for unzipping the FW */ 5470 rc = drv->gunzip_init(bp); 5471 if (rc) 5472 return rc; 5473 5474 /* Prepare FW */ 5475 rc = drv->init_fw(bp); 5476 if (rc) { 5477 BNX2X_ERR("Error loading firmware\n"); 5478 goto init_err; 5479 } 5480 5481 /* Handle the beginning of COMMON_XXX pases separatelly... */ 5482 switch (load_code) { 5483 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: 5484 rc = bnx2x_func_init_cmn_chip(bp, drv); 5485 if (rc) 5486 goto init_err; 5487 5488 break; 5489 case FW_MSG_CODE_DRV_LOAD_COMMON: 5490 rc = bnx2x_func_init_cmn(bp, drv); 5491 if (rc) 5492 goto init_err; 5493 5494 break; 5495 case FW_MSG_CODE_DRV_LOAD_PORT: 5496 rc = bnx2x_func_init_port(bp, drv); 5497 if (rc) 5498 goto init_err; 5499 5500 break; 5501 case FW_MSG_CODE_DRV_LOAD_FUNCTION: 5502 rc = bnx2x_func_init_func(bp, drv); 5503 if (rc) 5504 goto init_err; 5505 5506 break; 5507 default: 5508 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code); 5509 rc = -EINVAL; 5510 } 5511 5512 init_err: 5513 drv->gunzip_end(bp); 5514 5515 /* In case of success, complete the comand immediatelly: no ramrods 5516 * have been sent. 5517 */ 5518 if (!rc) 5519 o->complete_cmd(bp, o, BNX2X_F_CMD_HW_INIT); 5520 5521 return rc; 5522 } 5523 5524 /** 5525 * bnx2x_func_reset_func - reset HW at function stage 5526 * 5527 * @bp: device handle 5528 * @drv: 5529 * 5530 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_FUNCTION stage: reset only 5531 * FUNCTION-only HW blocks. 5532 */ 5533 static inline void bnx2x_func_reset_func(struct bnx2x *bp, 5534 const struct bnx2x_func_sp_drv_ops *drv) 5535 { 5536 drv->reset_hw_func(bp); 5537 } 5538 5539 /** 5540 * bnx2x_func_reset_port - reser HW at port stage 5541 * 5542 * @bp: device handle 5543 * @drv: 5544 * 5545 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_PORT stage: reset 5546 * FUNCTION-only and PORT-only HW blocks. 5547 * 5548 * !!!IMPORTANT!!! 5549 * 5550 * It's important to call reset_port before reset_func() as the last thing 5551 * reset_func does is pf_disable() thus disabling PGLUE_B, which 5552 * makes impossible any DMAE transactions. 5553 */ 5554 static inline void bnx2x_func_reset_port(struct bnx2x *bp, 5555 const struct bnx2x_func_sp_drv_ops *drv) 5556 { 5557 drv->reset_hw_port(bp); 5558 bnx2x_func_reset_func(bp, drv); 5559 } 5560 5561 /** 5562 * bnx2x_func_reset_cmn - reser HW at common stage 5563 * 5564 * @bp: device handle 5565 * @drv: 5566 * 5567 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_COMMON and 5568 * FW_MSG_CODE_DRV_UNLOAD_COMMON_CHIP stages: reset COMMON, 5569 * COMMON_CHIP, FUNCTION-only and PORT-only HW blocks. 5570 */ 5571 static inline void bnx2x_func_reset_cmn(struct bnx2x *bp, 5572 const struct bnx2x_func_sp_drv_ops *drv) 5573 { 5574 bnx2x_func_reset_port(bp, drv); 5575 drv->reset_hw_cmn(bp); 5576 } 5577 5578 5579 static inline int bnx2x_func_hw_reset(struct bnx2x *bp, 5580 struct bnx2x_func_state_params *params) 5581 { 5582 u32 reset_phase = params->params.hw_reset.reset_phase; 5583 struct bnx2x_func_sp_obj *o = params->f_obj; 5584 const struct bnx2x_func_sp_drv_ops *drv = o->drv; 5585 5586 DP(BNX2X_MSG_SP, "function %d reset_phase %x\n", BP_ABS_FUNC(bp), 5587 reset_phase); 5588 5589 switch (reset_phase) { 5590 case FW_MSG_CODE_DRV_UNLOAD_COMMON: 5591 bnx2x_func_reset_cmn(bp, drv); 5592 break; 5593 case FW_MSG_CODE_DRV_UNLOAD_PORT: 5594 bnx2x_func_reset_port(bp, drv); 5595 break; 5596 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION: 5597 bnx2x_func_reset_func(bp, drv); 5598 break; 5599 default: 5600 BNX2X_ERR("Unknown reset_phase (0x%x) from MCP\n", 5601 reset_phase); 5602 break; 5603 } 5604 5605 /* Complete the comand immediatelly: no ramrods have been sent. */ 5606 o->complete_cmd(bp, o, BNX2X_F_CMD_HW_RESET); 5607 5608 return 0; 5609 } 5610 5611 static inline int bnx2x_func_send_start(struct bnx2x *bp, 5612 struct bnx2x_func_state_params *params) 5613 { 5614 struct bnx2x_func_sp_obj *o = params->f_obj; 5615 struct function_start_data *rdata = 5616 (struct function_start_data *)o->rdata; 5617 dma_addr_t data_mapping = o->rdata_mapping; 5618 struct bnx2x_func_start_params *start_params = ¶ms->params.start; 5619 5620 memset(rdata, 0, sizeof(*rdata)); 5621 5622 /* Fill the ramrod data with provided parameters */ 5623 rdata->function_mode = cpu_to_le16(start_params->mf_mode); 5624 rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag); 5625 rdata->path_id = BP_PATH(bp); 5626 rdata->network_cos_mode = start_params->network_cos_mode; 5627 5628 /* 5629 * No need for an explicit memory barrier here as long we would 5630 * need to ensure the ordering of writing to the SPQ element 5631 * and updating of the SPQ producer which involves a memory 5632 * read and we will have to put a full memory barrier there 5633 * (inside bnx2x_sp_post()). 5634 */ 5635 5636 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, 5637 U64_HI(data_mapping), 5638 U64_LO(data_mapping), NONE_CONNECTION_TYPE); 5639 } 5640 5641 static inline int bnx2x_func_send_afex_update(struct bnx2x *bp, 5642 struct bnx2x_func_state_params *params) 5643 { 5644 struct bnx2x_func_sp_obj *o = params->f_obj; 5645 struct function_update_data *rdata = 5646 (struct function_update_data *)o->afex_rdata; 5647 dma_addr_t data_mapping = o->afex_rdata_mapping; 5648 struct bnx2x_func_afex_update_params *afex_update_params = 5649 ¶ms->params.afex_update; 5650 5651 memset(rdata, 0, sizeof(*rdata)); 5652 5653 /* Fill the ramrod data with provided parameters */ 5654 rdata->vif_id_change_flg = 1; 5655 rdata->vif_id = cpu_to_le16(afex_update_params->vif_id); 5656 rdata->afex_default_vlan_change_flg = 1; 5657 rdata->afex_default_vlan = 5658 cpu_to_le16(afex_update_params->afex_default_vlan); 5659 rdata->allowed_priorities_change_flg = 1; 5660 rdata->allowed_priorities = afex_update_params->allowed_priorities; 5661 5662 /* No need for an explicit memory barrier here as long we would 5663 * need to ensure the ordering of writing to the SPQ element 5664 * and updating of the SPQ producer which involves a memory 5665 * read and we will have to put a full memory barrier there 5666 * (inside bnx2x_sp_post()). 5667 */ 5668 DP(BNX2X_MSG_SP, 5669 "afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x\n", 5670 rdata->vif_id, 5671 rdata->afex_default_vlan, rdata->allowed_priorities); 5672 5673 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0, 5674 U64_HI(data_mapping), 5675 U64_LO(data_mapping), NONE_CONNECTION_TYPE); 5676 } 5677 5678 static 5679 inline int bnx2x_func_send_afex_viflists(struct bnx2x *bp, 5680 struct bnx2x_func_state_params *params) 5681 { 5682 struct bnx2x_func_sp_obj *o = params->f_obj; 5683 struct afex_vif_list_ramrod_data *rdata = 5684 (struct afex_vif_list_ramrod_data *)o->afex_rdata; 5685 struct bnx2x_func_afex_viflists_params *afex_viflist_params = 5686 ¶ms->params.afex_viflists; 5687 u64 *p_rdata = (u64 *)rdata; 5688 5689 memset(rdata, 0, sizeof(*rdata)); 5690 5691 /* Fill the ramrod data with provided parameters */ 5692 rdata->vif_list_index = afex_viflist_params->vif_list_index; 5693 rdata->func_bit_map = afex_viflist_params->func_bit_map; 5694 rdata->afex_vif_list_command = 5695 afex_viflist_params->afex_vif_list_command; 5696 rdata->func_to_clear = afex_viflist_params->func_to_clear; 5697 5698 /* send in echo type of sub command */ 5699 rdata->echo = afex_viflist_params->afex_vif_list_command; 5700 5701 /* No need for an explicit memory barrier here as long we would 5702 * need to ensure the ordering of writing to the SPQ element 5703 * and updating of the SPQ producer which involves a memory 5704 * read and we will have to put a full memory barrier there 5705 * (inside bnx2x_sp_post()). 5706 */ 5707 5708 DP(BNX2X_MSG_SP, "afex: ramrod lists, cmd 0x%x index 0x%x func_bit_map 0x%x func_to_clr 0x%x\n", 5709 rdata->afex_vif_list_command, rdata->vif_list_index, 5710 rdata->func_bit_map, rdata->func_to_clear); 5711 5712 /* this ramrod sends data directly and not through DMA mapping */ 5713 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS, 0, 5714 U64_HI(*p_rdata), U64_LO(*p_rdata), 5715 NONE_CONNECTION_TYPE); 5716 } 5717 5718 static inline int bnx2x_func_send_stop(struct bnx2x *bp, 5719 struct bnx2x_func_state_params *params) 5720 { 5721 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0, 5722 NONE_CONNECTION_TYPE); 5723 } 5724 5725 static inline int bnx2x_func_send_tx_stop(struct bnx2x *bp, 5726 struct bnx2x_func_state_params *params) 5727 { 5728 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, 0, 0, 0, 5729 NONE_CONNECTION_TYPE); 5730 } 5731 static inline int bnx2x_func_send_tx_start(struct bnx2x *bp, 5732 struct bnx2x_func_state_params *params) 5733 { 5734 struct bnx2x_func_sp_obj *o = params->f_obj; 5735 struct flow_control_configuration *rdata = 5736 (struct flow_control_configuration *)o->rdata; 5737 dma_addr_t data_mapping = o->rdata_mapping; 5738 struct bnx2x_func_tx_start_params *tx_start_params = 5739 ¶ms->params.tx_start; 5740 int i; 5741 5742 memset(rdata, 0, sizeof(*rdata)); 5743 5744 rdata->dcb_enabled = tx_start_params->dcb_enabled; 5745 rdata->dcb_version = tx_start_params->dcb_version; 5746 rdata->dont_add_pri_0_en = tx_start_params->dont_add_pri_0_en; 5747 5748 for (i = 0; i < ARRAY_SIZE(rdata->traffic_type_to_priority_cos); i++) 5749 rdata->traffic_type_to_priority_cos[i] = 5750 tx_start_params->traffic_type_to_priority_cos[i]; 5751 5752 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0, 5753 U64_HI(data_mapping), 5754 U64_LO(data_mapping), NONE_CONNECTION_TYPE); 5755 } 5756 5757 static int bnx2x_func_send_cmd(struct bnx2x *bp, 5758 struct bnx2x_func_state_params *params) 5759 { 5760 switch (params->cmd) { 5761 case BNX2X_F_CMD_HW_INIT: 5762 return bnx2x_func_hw_init(bp, params); 5763 case BNX2X_F_CMD_START: 5764 return bnx2x_func_send_start(bp, params); 5765 case BNX2X_F_CMD_STOP: 5766 return bnx2x_func_send_stop(bp, params); 5767 case BNX2X_F_CMD_HW_RESET: 5768 return bnx2x_func_hw_reset(bp, params); 5769 case BNX2X_F_CMD_AFEX_UPDATE: 5770 return bnx2x_func_send_afex_update(bp, params); 5771 case BNX2X_F_CMD_AFEX_VIFLISTS: 5772 return bnx2x_func_send_afex_viflists(bp, params); 5773 case BNX2X_F_CMD_TX_STOP: 5774 return bnx2x_func_send_tx_stop(bp, params); 5775 case BNX2X_F_CMD_TX_START: 5776 return bnx2x_func_send_tx_start(bp, params); 5777 default: 5778 BNX2X_ERR("Unknown command: %d\n", params->cmd); 5779 return -EINVAL; 5780 } 5781 } 5782 5783 void bnx2x_init_func_obj(struct bnx2x *bp, 5784 struct bnx2x_func_sp_obj *obj, 5785 void *rdata, dma_addr_t rdata_mapping, 5786 void *afex_rdata, dma_addr_t afex_rdata_mapping, 5787 struct bnx2x_func_sp_drv_ops *drv_iface) 5788 { 5789 memset(obj, 0, sizeof(*obj)); 5790 5791 mutex_init(&obj->one_pending_mutex); 5792 5793 obj->rdata = rdata; 5794 obj->rdata_mapping = rdata_mapping; 5795 obj->afex_rdata = afex_rdata; 5796 obj->afex_rdata_mapping = afex_rdata_mapping; 5797 obj->send_cmd = bnx2x_func_send_cmd; 5798 obj->check_transition = bnx2x_func_chk_transition; 5799 obj->complete_cmd = bnx2x_func_comp_cmd; 5800 obj->wait_comp = bnx2x_func_wait_comp; 5801 5802 obj->drv = drv_iface; 5803 } 5804 5805 /** 5806 * bnx2x_func_state_change - perform Function state change transition 5807 * 5808 * @bp: device handle 5809 * @params: parameters to perform the transaction 5810 * 5811 * returns 0 in case of successfully completed transition, 5812 * negative error code in case of failure, positive 5813 * (EBUSY) value if there is a completion to that is 5814 * still pending (possible only if RAMROD_COMP_WAIT is 5815 * not set in params->ramrod_flags for asynchronous 5816 * commands). 5817 */ 5818 int bnx2x_func_state_change(struct bnx2x *bp, 5819 struct bnx2x_func_state_params *params) 5820 { 5821 struct bnx2x_func_sp_obj *o = params->f_obj; 5822 int rc; 5823 enum bnx2x_func_cmd cmd = params->cmd; 5824 unsigned long *pending = &o->pending; 5825 5826 mutex_lock(&o->one_pending_mutex); 5827 5828 /* Check that the requested transition is legal */ 5829 if (o->check_transition(bp, o, params)) { 5830 mutex_unlock(&o->one_pending_mutex); 5831 return -EINVAL; 5832 } 5833 5834 /* Set "pending" bit */ 5835 set_bit(cmd, pending); 5836 5837 /* Don't send a command if only driver cleanup was requested */ 5838 if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) { 5839 bnx2x_func_state_change_comp(bp, o, cmd); 5840 mutex_unlock(&o->one_pending_mutex); 5841 } else { 5842 /* Send a ramrod */ 5843 rc = o->send_cmd(bp, params); 5844 5845 mutex_unlock(&o->one_pending_mutex); 5846 5847 if (rc) { 5848 o->next_state = BNX2X_F_STATE_MAX; 5849 clear_bit(cmd, pending); 5850 smp_mb__after_clear_bit(); 5851 return rc; 5852 } 5853 5854 if (test_bit(RAMROD_COMP_WAIT, ¶ms->ramrod_flags)) { 5855 rc = o->wait_comp(bp, o, cmd); 5856 if (rc) 5857 return rc; 5858 5859 return 0; 5860 } 5861 } 5862 5863 return !!test_bit(cmd, pending); 5864 } 5865