1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (C) 2013 Emulex 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions are met: 9 * 10 * 1. Redistributions of source code must retain the above copyright notice, 11 * this list of conditions and the following disclaimer. 12 * 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * 3. Neither the name of the Emulex Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived from 19 * this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 * 33 * Contact Information: 34 * freebsd-drivers@emulex.com 35 * 36 * Emulex 37 * 3333 Susan Street 38 * Costa Mesa, CA 92626 39 */ 40 41 /* $FreeBSD$ */ 42 43 #include "oce_if.h" 44 45 /***************************************************** 46 * local queue functions 47 *****************************************************/ 48 49 static struct oce_wq *oce_wq_init(POCE_SOFTC sc, 50 uint32_t q_len, uint32_t wq_type); 51 static int oce_wq_create(struct oce_wq *wq, struct oce_eq *eq); 52 static void oce_wq_free(struct oce_wq *wq); 53 static void oce_wq_del(struct oce_wq *wq); 54 static struct oce_rq *oce_rq_init(POCE_SOFTC sc, 55 uint32_t q_len, 56 uint32_t frag_size, 57 uint32_t mtu, uint32_t rss); 58 static int oce_rq_create(struct oce_rq *rq, uint32_t if_id, struct oce_eq *eq); 59 static void oce_rq_free(struct oce_rq *rq); 60 static void oce_rq_del(struct oce_rq *rq); 61 static struct oce_eq *oce_eq_create(POCE_SOFTC sc, 62 uint32_t q_len, 63 uint32_t item_size, 64 uint32_t eq_delay, 65 uint32_t vector); 66 static void oce_eq_del(struct oce_eq *eq); 67 static struct oce_mq *oce_mq_create(POCE_SOFTC sc, 68 struct oce_eq *eq, uint32_t q_len); 69 static void oce_mq_free(struct oce_mq *mq); 70 static int oce_destroy_q(POCE_SOFTC sc, struct oce_mbx 71 *mbx, size_t req_size, enum qtype qtype, int version); 72 struct oce_cq *oce_cq_create(POCE_SOFTC sc, 73 struct oce_eq *eq, 74 uint32_t q_len, 75 uint32_t item_size, 76 uint32_t sol_event, 77 uint32_t is_eventable, 78 uint32_t nodelay, uint32_t ncoalesce); 79 static void oce_cq_del(POCE_SOFTC sc, struct oce_cq *cq); 80 81 82 83 /** 84 * @brief Create and initialize all the queues on the board 85 * @param sc software handle to the device 86 * @returns 0 if successful, or error 87 **/ 88 int 89 oce_queue_init_all(POCE_SOFTC sc) 90 { 91 int rc = 0, i, vector; 92 struct oce_wq *wq; 93 struct oce_rq *rq; 94 struct oce_aic_obj *aic; 95 96 /* alloc TX/RX queues */ 97 for_all_wq_queues(sc, wq, i) { 98 sc->wq[i] = oce_wq_init(sc, sc->tx_ring_size, 99 NIC_WQ_TYPE_STANDARD); 100 if (!sc->wq[i]) 101 goto error; 102 103 } 104 105 for_all_rq_queues(sc, rq, i) { 106 sc->rq[i] = oce_rq_init(sc, sc->rx_ring_size, sc->rq_frag_size, 107 OCE_MAX_JUMBO_FRAME_SIZE, 108 (i == 0) ? 0 : is_rss_enabled(sc)); 109 if (!sc->rq[i]) 110 goto error; 111 } 112 113 /* Create network interface on card */ 114 if (oce_create_nw_interface(sc)) 115 goto error; 116 117 /* create all of the event queues */ 118 for (vector = 0; vector < sc->intr_count; vector++) { 119 /* setup aic defaults for each event queue */ 120 aic = &sc->aic_obj[vector]; 121 aic->max_eqd = OCE_MAX_EQD; 122 aic->min_eqd = OCE_MIN_EQD; 123 aic->et_eqd = OCE_MIN_EQD; 124 aic->enable = TRUE; 125 126 sc->eq[vector] = oce_eq_create(sc, sc->enable_hwlro ? EQ_LEN_2048 : EQ_LEN_1024, 127 EQE_SIZE_4,0, vector); 128 129 if (!sc->eq[vector]) 130 goto error; 131 } 132 133 /* create Tx, Rx and mcc queues */ 134 for_all_wq_queues(sc, wq, i) { 135 rc = oce_wq_create(wq, sc->eq[i]); 136 if (rc) 137 goto error; 138 wq->queue_index = i; 139 TASK_INIT(&wq->txtask, 1, oce_tx_task, wq); 140 } 141 142 for_all_rq_queues(sc, rq, i) { 143 rc = oce_rq_create(rq, sc->if_id, 144 sc->eq[(i == 0) ? 0:(i-1)]); 145 if (rc) 146 goto error; 147 rq->queue_index = i; 148 } 149 150 sc->mq = oce_mq_create(sc, sc->eq[0], 64); 151 if (!sc->mq) 152 goto error; 153 154 return rc; 155 156 error: 157 oce_queue_release_all(sc); 158 return 1; 159 } 160 161 162 163 /** 164 * @brief Releases all mailbox queues created 165 * @param sc software handle to the device 166 */ 167 void 168 oce_queue_release_all(POCE_SOFTC sc) 169 { 170 int i = 0; 171 struct oce_wq *wq; 172 struct oce_rq *rq; 173 struct oce_eq *eq; 174 175 /* before deleting lro queues, we have to disable hwlro */ 176 if(sc->enable_hwlro) 177 oce_mbox_nic_set_iface_lro_config(sc, 0); 178 179 for_all_rq_queues(sc, rq, i) { 180 if (rq) { 181 oce_rq_del(sc->rq[i]); 182 oce_rq_free(sc->rq[i]); 183 } 184 } 185 186 for_all_wq_queues(sc, wq, i) { 187 if (wq) { 188 oce_wq_del(sc->wq[i]); 189 oce_wq_free(sc->wq[i]); 190 } 191 } 192 193 if (sc->mq) 194 oce_mq_free(sc->mq); 195 196 for_all_evnt_queues(sc, eq, i) { 197 if (eq) 198 oce_eq_del(sc->eq[i]); 199 } 200 } 201 202 203 204 /** 205 * @brief Function to create a WQ for NIC Tx 206 * @param sc software handle to the device 207 * @param qlen number of entries in the queue 208 * @param wq_type work queue type 209 * @returns the pointer to the WQ created or NULL on failure 210 */ 211 static struct 212 oce_wq *oce_wq_init(POCE_SOFTC sc, uint32_t q_len, uint32_t wq_type) 213 { 214 struct oce_wq *wq; 215 int rc = 0, i; 216 217 /* q_len must be min 256 and max 2k */ 218 if (q_len < 256 || q_len > 2048) { 219 device_printf(sc->dev, 220 "Invalid q length. Must be " 221 "[256, 2000]: 0x%x\n", q_len); 222 return NULL; 223 } 224 225 /* allocate wq */ 226 wq = malloc(sizeof(struct oce_wq), M_DEVBUF, M_NOWAIT | M_ZERO); 227 if (!wq) 228 return NULL; 229 230 /* Set the wq config */ 231 wq->cfg.q_len = q_len; 232 wq->cfg.wq_type = (uint8_t) wq_type; 233 wq->cfg.eqd = OCE_DEFAULT_WQ_EQD; 234 wq->cfg.nbufs = 2 * wq->cfg.q_len; 235 wq->cfg.nhdl = 2 * wq->cfg.q_len; 236 237 wq->parent = (void *)sc; 238 239 rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 240 1, 0, 241 BUS_SPACE_MAXADDR, 242 BUS_SPACE_MAXADDR, 243 NULL, NULL, 244 OCE_MAX_TX_SIZE, 245 OCE_MAX_TX_ELEMENTS, 246 PAGE_SIZE, 0, NULL, NULL, &wq->tag); 247 248 if (rc) 249 goto free_wq; 250 251 252 for (i = 0; i < OCE_WQ_PACKET_ARRAY_SIZE; i++) { 253 rc = bus_dmamap_create(wq->tag, 0, &wq->pckts[i].map); 254 if (rc) 255 goto free_wq; 256 } 257 258 wq->ring = oce_create_ring_buffer(sc, q_len, NIC_WQE_SIZE); 259 if (!wq->ring) 260 goto free_wq; 261 262 263 LOCK_CREATE(&wq->tx_lock, "TX_lock"); 264 LOCK_CREATE(&wq->tx_compl_lock, "WQ_HANDLER_LOCK"); 265 266 /* Allocate buf ring for multiqueue*/ 267 wq->br = buf_ring_alloc(4096, M_DEVBUF, 268 M_WAITOK, &wq->tx_lock.mutex); 269 if (!wq->br) 270 goto free_wq; 271 return wq; 272 273 274 free_wq: 275 device_printf(sc->dev, "Create WQ failed\n"); 276 oce_wq_free(wq); 277 return NULL; 278 } 279 280 281 282 /** 283 * @brief Frees the work queue 284 * @param wq pointer to work queue to free 285 */ 286 static void 287 oce_wq_free(struct oce_wq *wq) 288 { 289 POCE_SOFTC sc = (POCE_SOFTC) wq->parent; 290 int i; 291 292 taskqueue_drain(taskqueue_swi, &wq->txtask); 293 294 if (wq->ring != NULL) { 295 oce_destroy_ring_buffer(sc, wq->ring); 296 wq->ring = NULL; 297 } 298 299 for (i = 0; i < OCE_WQ_PACKET_ARRAY_SIZE; i++) { 300 if (wq->pckts[i].map != NULL) { 301 bus_dmamap_unload(wq->tag, wq->pckts[i].map); 302 bus_dmamap_destroy(wq->tag, wq->pckts[i].map); 303 wq->pckts[i].map = NULL; 304 } 305 } 306 307 if (wq->tag != NULL) 308 bus_dma_tag_destroy(wq->tag); 309 if (wq->br != NULL) 310 buf_ring_free(wq->br, M_DEVBUF); 311 312 LOCK_DESTROY(&wq->tx_lock); 313 LOCK_DESTROY(&wq->tx_compl_lock); 314 free(wq, M_DEVBUF); 315 } 316 317 318 319 /** 320 * @brief Create a work queue 321 * @param wq pointer to work queue 322 * @param eq pointer to associated event queue 323 */ 324 static int 325 oce_wq_create(struct oce_wq *wq, struct oce_eq *eq) 326 { 327 POCE_SOFTC sc = wq->parent; 328 struct oce_cq *cq; 329 int rc = 0; 330 331 /* create the CQ */ 332 cq = oce_cq_create(sc, 333 eq, 334 CQ_LEN_1024, 335 sizeof(struct oce_nic_tx_cqe), 0, 1, 0, 3); 336 if (!cq) 337 return ENXIO; 338 339 340 wq->cq = cq; 341 342 rc = oce_mbox_create_wq(wq); 343 if (rc) 344 goto error; 345 346 wq->qstate = QCREATED; 347 wq->wq_free = wq->cfg.q_len; 348 wq->ring->cidx = 0; 349 wq->ring->pidx = 0; 350 351 eq->cq[eq->cq_valid] = cq; 352 eq->cq_valid++; 353 cq->cb_arg = wq; 354 cq->cq_handler = oce_wq_handler; 355 356 return 0; 357 358 error: 359 device_printf(sc->dev, "WQ create failed\n"); 360 oce_wq_del(wq); 361 return rc; 362 } 363 364 365 366 367 /** 368 * @brief Delete a work queue 369 * @param wq pointer to work queue 370 */ 371 static void 372 oce_wq_del(struct oce_wq *wq) 373 { 374 struct oce_mbx mbx; 375 struct mbx_delete_nic_wq *fwcmd; 376 POCE_SOFTC sc = (POCE_SOFTC) wq->parent; 377 378 if (wq->qstate == QCREATED) { 379 bzero(&mbx, sizeof(struct oce_mbx)); 380 /* now fill the command */ 381 fwcmd = (struct mbx_delete_nic_wq *)&mbx.payload; 382 fwcmd->params.req.wq_id = wq->wq_id; 383 (void)oce_destroy_q(sc, &mbx, 384 sizeof(struct mbx_delete_nic_wq), QTYPE_WQ, 0); 385 wq->qstate = QDELETED; 386 } 387 388 if (wq->cq != NULL) { 389 oce_cq_del(sc, wq->cq); 390 wq->cq = NULL; 391 } 392 } 393 394 395 396 /** 397 * @brief function to allocate receive queue resources 398 * @param sc software handle to the device 399 * @param q_len length of receive queue 400 * @param frag_size size of an receive queue fragment 401 * @param mtu maximum transmission unit 402 * @param rss is-rss-queue flag 403 * @returns the pointer to the RQ created or NULL on failure 404 */ 405 static struct 406 oce_rq *oce_rq_init(POCE_SOFTC sc, 407 uint32_t q_len, 408 uint32_t frag_size, 409 uint32_t mtu, uint32_t rss) 410 { 411 struct oce_rq *rq; 412 int rc = 0, i; 413 414 if (OCE_LOG2(frag_size) <= 0) 415 return NULL; 416 417 if ((q_len == 0) || (q_len > 1024)) 418 return NULL; 419 420 /* allocate the rq */ 421 rq = malloc(sizeof(struct oce_rq), M_DEVBUF, M_NOWAIT | M_ZERO); 422 if (!rq) 423 return NULL; 424 425 426 rq->cfg.q_len = q_len; 427 rq->cfg.frag_size = frag_size; 428 rq->cfg.mtu = mtu; 429 rq->cfg.eqd = 0; 430 rq->lro_pkts_queued = 0; 431 rq->cfg.is_rss_queue = rss; 432 rq->pending = 0; 433 434 rq->parent = (void *)sc; 435 436 rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 437 1, 0, 438 BUS_SPACE_MAXADDR, 439 BUS_SPACE_MAXADDR, 440 NULL, NULL, 441 oce_rq_buf_size, 442 1, oce_rq_buf_size, 0, NULL, NULL, &rq->tag); 443 if (rc) 444 goto free_rq; 445 446 for (i = 0; i < OCE_RQ_PACKET_ARRAY_SIZE; i++) { 447 rc = bus_dmamap_create(rq->tag, 0, &rq->pckts[i].map); 448 if (rc) 449 goto free_rq; 450 } 451 452 /* create the ring buffer */ 453 rq->ring = oce_create_ring_buffer(sc, q_len, 454 sizeof(struct oce_nic_rqe)); 455 if (!rq->ring) 456 goto free_rq; 457 458 LOCK_CREATE(&rq->rx_lock, "RX_lock"); 459 460 return rq; 461 462 free_rq: 463 device_printf(sc->dev, "Create RQ failed\n"); 464 oce_rq_free(rq); 465 return NULL; 466 } 467 468 469 470 471 /** 472 * @brief Free a receive queue 473 * @param rq pointer to receive queue 474 */ 475 static void 476 oce_rq_free(struct oce_rq *rq) 477 { 478 POCE_SOFTC sc = (POCE_SOFTC) rq->parent; 479 int i = 0 ; 480 481 if (rq->ring != NULL) { 482 oce_destroy_ring_buffer(sc, rq->ring); 483 rq->ring = NULL; 484 } 485 for (i = 0; i < OCE_RQ_PACKET_ARRAY_SIZE; i++) { 486 if (rq->pckts[i].map != NULL) { 487 bus_dmamap_unload(rq->tag, rq->pckts[i].map); 488 bus_dmamap_destroy(rq->tag, rq->pckts[i].map); 489 rq->pckts[i].map = NULL; 490 } 491 if (rq->pckts[i].mbuf) { 492 m_free(rq->pckts[i].mbuf); 493 rq->pckts[i].mbuf = NULL; 494 } 495 } 496 497 if (rq->tag != NULL) 498 bus_dma_tag_destroy(rq->tag); 499 500 LOCK_DESTROY(&rq->rx_lock); 501 free(rq, M_DEVBUF); 502 } 503 504 505 506 507 /** 508 * @brief Create a receive queue 509 * @param rq receive queue 510 * @param if_id interface identifier index` 511 * @param eq pointer to event queue 512 */ 513 static int 514 oce_rq_create(struct oce_rq *rq, uint32_t if_id, struct oce_eq *eq) 515 { 516 POCE_SOFTC sc = rq->parent; 517 struct oce_cq *cq; 518 519 cq = oce_cq_create(sc, eq, 520 sc->enable_hwlro ? CQ_LEN_2048 : CQ_LEN_1024, 521 sizeof(struct oce_nic_rx_cqe), 0, 1, 0, 3); 522 523 if (!cq) 524 return ENXIO; 525 526 rq->cq = cq; 527 rq->cfg.if_id = if_id; 528 529 /* Dont create RQ here. Create in if_activate */ 530 rq->qstate = 0; 531 rq->ring->cidx = 0; 532 rq->ring->pidx = 0; 533 eq->cq[eq->cq_valid] = cq; 534 eq->cq_valid++; 535 cq->cb_arg = rq; 536 cq->cq_handler = oce_rq_handler; 537 538 return 0; 539 540 } 541 542 543 544 545 /** 546 * @brief Delete a receive queue 547 * @param rq receive queue 548 */ 549 static void 550 oce_rq_del(struct oce_rq *rq) 551 { 552 POCE_SOFTC sc = (POCE_SOFTC) rq->parent; 553 struct oce_mbx mbx; 554 struct mbx_delete_nic_rq *fwcmd; 555 struct mbx_delete_nic_rq_v1 *fwcmd1; 556 557 if (rq->qstate == QCREATED) { 558 bzero(&mbx, sizeof(mbx)); 559 if(!rq->islro) { 560 fwcmd = (struct mbx_delete_nic_rq *)&mbx.payload; 561 fwcmd->params.req.rq_id = rq->rq_id; 562 (void)oce_destroy_q(sc, &mbx, sizeof(struct mbx_delete_nic_rq), QTYPE_RQ, 0); 563 }else { 564 fwcmd1 = (struct mbx_delete_nic_rq_v1 *)&mbx.payload; 565 fwcmd1->params.req.rq_id = rq->rq_id; 566 fwcmd1->params.req.rq_flags = (NIC_RQ_FLAGS_RSS | NIC_RQ_FLAGS_LRO); 567 (void)oce_destroy_q(sc, &mbx, sizeof(struct mbx_delete_nic_rq_v1), QTYPE_RQ, 1); 568 } 569 rq->qstate = QDELETED; 570 } 571 572 if (rq->cq != NULL) { 573 oce_cq_del(sc, rq->cq); 574 rq->cq = NULL; 575 } 576 } 577 578 579 580 /** 581 * @brief function to create an event queue 582 * @param sc software handle to the device 583 * @param q_len length of event queue 584 * @param item_size size of an event queue item 585 * @param eq_delay event queue delay 586 * @retval eq success, pointer to event queue 587 * @retval NULL failure 588 */ 589 static struct 590 oce_eq *oce_eq_create(POCE_SOFTC sc, uint32_t q_len, 591 uint32_t item_size, 592 uint32_t eq_delay, 593 uint32_t vector) 594 { 595 struct oce_eq *eq; 596 int rc = 0; 597 598 /* allocate an eq */ 599 eq = malloc(sizeof(struct oce_eq), M_DEVBUF, M_NOWAIT | M_ZERO); 600 if (eq == NULL) 601 return NULL; 602 603 eq->parent = (void *)sc; 604 eq->eq_id = 0xffff; 605 eq->ring = oce_create_ring_buffer(sc, q_len, item_size); 606 if (!eq->ring) 607 goto free_eq; 608 609 eq->eq_cfg.q_len = q_len; 610 eq->eq_cfg.item_size = item_size; 611 eq->eq_cfg.cur_eqd = (uint8_t) eq_delay; 612 613 rc = oce_mbox_create_eq(eq); 614 if (rc) 615 goto free_eq; 616 617 sc->intrs[sc->neqs++].eq = eq; 618 619 return eq; 620 621 free_eq: 622 oce_eq_del(eq); 623 return NULL; 624 } 625 626 627 628 629 /** 630 * @brief Function to delete an event queue 631 * @param eq pointer to an event queue 632 */ 633 static void 634 oce_eq_del(struct oce_eq *eq) 635 { 636 struct oce_mbx mbx; 637 struct mbx_destroy_common_eq *fwcmd; 638 POCE_SOFTC sc = (POCE_SOFTC) eq->parent; 639 640 if (eq->eq_id != 0xffff) { 641 bzero(&mbx, sizeof(mbx)); 642 fwcmd = (struct mbx_destroy_common_eq *)&mbx.payload; 643 fwcmd->params.req.id = eq->eq_id; 644 (void)oce_destroy_q(sc, &mbx, 645 sizeof(struct mbx_destroy_common_eq), QTYPE_EQ, 0); 646 } 647 648 if (eq->ring != NULL) { 649 oce_destroy_ring_buffer(sc, eq->ring); 650 eq->ring = NULL; 651 } 652 653 free(eq, M_DEVBUF); 654 655 } 656 657 658 659 660 /** 661 * @brief Function to create an MQ 662 * @param sc software handle to the device 663 * @param eq the EQ to associate with the MQ for event notification 664 * @param q_len the number of entries to create in the MQ 665 * @returns pointer to the created MQ, failure otherwise 666 */ 667 static struct oce_mq * 668 oce_mq_create(POCE_SOFTC sc, struct oce_eq *eq, uint32_t q_len) 669 { 670 struct oce_mbx mbx; 671 struct mbx_create_common_mq_ex *fwcmd = NULL; 672 struct oce_mq *mq = NULL; 673 int rc = 0; 674 struct oce_cq *cq; 675 oce_mq_ext_ctx_t *ctx; 676 uint32_t num_pages; 677 uint32_t page_size; 678 int version; 679 680 cq = oce_cq_create(sc, eq, CQ_LEN_256, 681 sizeof(struct oce_mq_cqe), 1, 1, 0, 0); 682 if (!cq) 683 return NULL; 684 685 /* allocate the mq */ 686 mq = malloc(sizeof(struct oce_mq), M_DEVBUF, M_NOWAIT | M_ZERO); 687 if (!mq) { 688 oce_cq_del(sc, cq); 689 goto error; 690 } 691 692 mq->parent = sc; 693 694 mq->ring = oce_create_ring_buffer(sc, q_len, sizeof(struct oce_mbx)); 695 if (!mq->ring) 696 goto error; 697 698 bzero(&mbx, sizeof(struct oce_mbx)); 699 700 IS_XE201(sc) ? (version = OCE_MBX_VER_V1) : (version = OCE_MBX_VER_V0); 701 fwcmd = (struct mbx_create_common_mq_ex *)&mbx.payload; 702 mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0, 703 MBX_SUBSYSTEM_COMMON, 704 OPCODE_COMMON_CREATE_MQ_EXT, 705 MBX_TIMEOUT_SEC, 706 sizeof(struct mbx_create_common_mq_ex), 707 version); 708 709 num_pages = oce_page_list(mq->ring, &fwcmd->params.req.pages[0]); 710 page_size = mq->ring->num_items * mq->ring->item_size; 711 712 ctx = &fwcmd->params.req.context; 713 714 if (IS_XE201(sc)) { 715 ctx->v1.num_pages = num_pages; 716 ctx->v1.ring_size = OCE_LOG2(q_len) + 1; 717 ctx->v1.cq_id = cq->cq_id; 718 ctx->v1.valid = 1; 719 ctx->v1.async_cq_id = cq->cq_id; 720 ctx->v1.async_cq_valid = 1; 721 /* Subscribe to Link State and Group 5 Events(bits 1 & 5 set) */ 722 ctx->v1.async_evt_bitmap |= LE_32(0x00000022); 723 ctx->v1.async_evt_bitmap |= LE_32(1 << ASYNC_EVENT_CODE_DEBUG); 724 ctx->v1.async_evt_bitmap |= 725 LE_32(1 << ASYNC_EVENT_CODE_SLIPORT); 726 } 727 else { 728 ctx->v0.num_pages = num_pages; 729 ctx->v0.cq_id = cq->cq_id; 730 ctx->v0.ring_size = OCE_LOG2(q_len) + 1; 731 ctx->v0.valid = 1; 732 /* Subscribe to Link State and Group5 Events(bits 1 & 5 set) */ 733 ctx->v0.async_evt_bitmap = 0xffffffff; 734 } 735 736 mbx.u0.s.embedded = 1; 737 mbx.payload_length = sizeof(struct mbx_create_common_mq_ex); 738 DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ); 739 740 rc = oce_mbox_post(sc, &mbx, NULL); 741 if (!rc) 742 rc = fwcmd->hdr.u0.rsp.status; 743 if (rc) { 744 device_printf(sc->dev,"%s failed - cmd status: %d\n", 745 __FUNCTION__, rc); 746 goto error; 747 } 748 mq->mq_id = LE_16(fwcmd->params.rsp.mq_id); 749 mq->cq = cq; 750 eq->cq[eq->cq_valid] = cq; 751 eq->cq_valid++; 752 mq->cq->eq = eq; 753 mq->cfg.q_len = (uint8_t) q_len; 754 mq->cfg.eqd = 0; 755 mq->qstate = QCREATED; 756 757 mq->cq->cb_arg = mq; 758 mq->cq->cq_handler = oce_mq_handler; 759 760 return mq; 761 762 error: 763 device_printf(sc->dev, "MQ create failed\n"); 764 oce_mq_free(mq); 765 mq = NULL; 766 return mq; 767 } 768 769 770 771 772 773 /** 774 * @brief Function to free a mailbox queue 775 * @param mq pointer to a mailbox queue 776 */ 777 static void 778 oce_mq_free(struct oce_mq *mq) 779 { 780 POCE_SOFTC sc = (POCE_SOFTC) mq->parent; 781 struct oce_mbx mbx; 782 struct mbx_destroy_common_mq *fwcmd; 783 784 if (!mq) 785 return; 786 787 if (mq->ring != NULL) { 788 oce_destroy_ring_buffer(sc, mq->ring); 789 mq->ring = NULL; 790 if (mq->qstate == QCREATED) { 791 bzero(&mbx, sizeof (struct oce_mbx)); 792 fwcmd = (struct mbx_destroy_common_mq *)&mbx.payload; 793 fwcmd->params.req.id = mq->mq_id; 794 (void) oce_destroy_q(sc, &mbx, 795 sizeof (struct mbx_destroy_common_mq), 796 QTYPE_MQ, 0); 797 } 798 mq->qstate = QDELETED; 799 } 800 801 if (mq->cq != NULL) { 802 oce_cq_del(sc, mq->cq); 803 mq->cq = NULL; 804 } 805 806 free(mq, M_DEVBUF); 807 mq = NULL; 808 } 809 810 811 812 /** 813 * @brief Function to delete a EQ, CQ, MQ, WQ or RQ 814 * @param sc sofware handle to the device 815 * @param mbx mailbox command to send to the fw to delete the queue 816 * (mbx contains the queue information to delete) 817 * @param req_size the size of the mbx payload dependent on the qtype 818 * @param qtype the type of queue i.e. EQ, CQ, MQ, WQ or RQ 819 * @returns 0 on success, failure otherwise 820 */ 821 static int 822 oce_destroy_q(POCE_SOFTC sc, struct oce_mbx *mbx, size_t req_size, 823 enum qtype qtype, int version) 824 { 825 struct mbx_hdr *hdr = (struct mbx_hdr *)&mbx->payload; 826 int opcode; 827 int subsys; 828 int rc = 0; 829 830 switch (qtype) { 831 case QTYPE_EQ: 832 opcode = OPCODE_COMMON_DESTROY_EQ; 833 subsys = MBX_SUBSYSTEM_COMMON; 834 break; 835 case QTYPE_CQ: 836 opcode = OPCODE_COMMON_DESTROY_CQ; 837 subsys = MBX_SUBSYSTEM_COMMON; 838 break; 839 case QTYPE_MQ: 840 opcode = OPCODE_COMMON_DESTROY_MQ; 841 subsys = MBX_SUBSYSTEM_COMMON; 842 break; 843 case QTYPE_WQ: 844 opcode = NIC_DELETE_WQ; 845 subsys = MBX_SUBSYSTEM_NIC; 846 break; 847 case QTYPE_RQ: 848 opcode = NIC_DELETE_RQ; 849 subsys = MBX_SUBSYSTEM_NIC; 850 break; 851 default: 852 return EINVAL; 853 } 854 855 mbx_common_req_hdr_init(hdr, 0, 0, subsys, 856 opcode, MBX_TIMEOUT_SEC, req_size, 857 version); 858 859 mbx->u0.s.embedded = 1; 860 mbx->payload_length = (uint32_t) req_size; 861 DW_SWAP(u32ptr(mbx), mbx->payload_length + OCE_BMBX_RHDR_SZ); 862 863 rc = oce_mbox_post(sc, mbx, NULL); 864 if (!rc) 865 rc = hdr->u0.rsp.status; 866 if (rc) 867 device_printf(sc->dev,"%s failed - cmd status: %d\n", 868 __FUNCTION__, rc); 869 return rc; 870 } 871 872 873 874 /** 875 * @brief Function to create a completion queue 876 * @param sc software handle to the device 877 * @param eq optional eq to be associated with to the cq 878 * @param q_len length of completion queue 879 * @param item_size size of completion queue items 880 * @param sol_event command context event 881 * @param is_eventable event table 882 * @param nodelay no delay flag 883 * @param ncoalesce no coalescence flag 884 * @returns pointer to the cq created, NULL on failure 885 */ 886 struct oce_cq * 887 oce_cq_create(POCE_SOFTC sc, struct oce_eq *eq, 888 uint32_t q_len, 889 uint32_t item_size, 890 uint32_t sol_event, 891 uint32_t is_eventable, 892 uint32_t nodelay, uint32_t ncoalesce) 893 { 894 struct oce_cq *cq = NULL; 895 int rc = 0; 896 897 cq = malloc(sizeof(struct oce_cq), M_DEVBUF, M_NOWAIT | M_ZERO); 898 if (!cq) 899 return NULL; 900 901 cq->ring = oce_create_ring_buffer(sc, q_len, item_size); 902 if (!cq->ring) 903 goto error; 904 905 cq->parent = sc; 906 cq->eq = eq; 907 cq->cq_cfg.q_len = q_len; 908 cq->cq_cfg.item_size = item_size; 909 cq->cq_cfg.nodelay = (uint8_t) nodelay; 910 911 rc = oce_mbox_cq_create(cq, ncoalesce, is_eventable); 912 if (rc) 913 goto error; 914 915 sc->cq[sc->ncqs++] = cq; 916 917 return cq; 918 919 error: 920 device_printf(sc->dev, "CQ create failed\n"); 921 oce_cq_del(sc, cq); 922 return NULL; 923 } 924 925 926 927 /** 928 * @brief Deletes the completion queue 929 * @param sc software handle to the device 930 * @param cq pointer to a completion queue 931 */ 932 static void 933 oce_cq_del(POCE_SOFTC sc, struct oce_cq *cq) 934 { 935 struct oce_mbx mbx; 936 struct mbx_destroy_common_cq *fwcmd; 937 938 if (cq->ring != NULL) { 939 940 bzero(&mbx, sizeof(struct oce_mbx)); 941 /* now fill the command */ 942 fwcmd = (struct mbx_destroy_common_cq *)&mbx.payload; 943 fwcmd->params.req.id = cq->cq_id; 944 (void)oce_destroy_q(sc, &mbx, 945 sizeof(struct mbx_destroy_common_cq), QTYPE_CQ, 0); 946 /*NOW destroy the ring */ 947 oce_destroy_ring_buffer(sc, cq->ring); 948 cq->ring = NULL; 949 } 950 951 free(cq, M_DEVBUF); 952 cq = NULL; 953 } 954 955 956 957 /** 958 * @brief Start a receive queue 959 * @param rq pointer to a receive queue 960 */ 961 int 962 oce_start_rq(struct oce_rq *rq) 963 { 964 POCE_SOFTC sc = (POCE_SOFTC) rq->parent; 965 int rc; 966 967 if(sc->enable_hwlro) 968 rc = oce_alloc_rx_bufs(rq, 960); 969 else 970 rc = oce_alloc_rx_bufs(rq, rq->cfg.q_len - 1); 971 972 if (rc == 0) 973 oce_arm_cq(rq->parent, rq->cq->cq_id, 0, TRUE); 974 975 return rc; 976 } 977 978 979 980 /** 981 * @brief Start a work queue 982 * @param wq pointer to a work queue 983 */ 984 int 985 oce_start_wq(struct oce_wq *wq) 986 { 987 oce_arm_cq(wq->parent, wq->cq->cq_id, 0, TRUE); 988 return 0; 989 } 990 991 992 993 /** 994 * @brief Start a mailbox queue 995 * @param mq pointer to a mailbox queue 996 */ 997 int 998 oce_start_mq(struct oce_mq *mq) 999 { 1000 oce_arm_cq(mq->parent, mq->cq->cq_id, 0, TRUE); 1001 return 0; 1002 } 1003 1004 1005 1006 /** 1007 * @brief Function to arm an EQ so that it can generate events 1008 * @param sc software handle to the device 1009 * @param qid id of the EQ returned by the fw at the time of creation 1010 * @param npopped number of EQEs to arm 1011 * @param rearm rearm bit enable/disable 1012 * @param clearint bit to clear the interrupt condition because of which 1013 * EQEs are generated 1014 */ 1015 void 1016 oce_arm_eq(POCE_SOFTC sc, 1017 int16_t qid, int npopped, uint32_t rearm, uint32_t clearint) 1018 { 1019 eq_db_t eq_db = { 0 }; 1020 1021 eq_db.bits.rearm = rearm; 1022 eq_db.bits.event = 1; 1023 eq_db.bits.num_popped = npopped; 1024 eq_db.bits.clrint = clearint; 1025 eq_db.bits.qid = qid; 1026 OCE_WRITE_REG32(sc, db, PD_EQ_DB, eq_db.dw0); 1027 1028 } 1029 1030 1031 1032 1033 /** 1034 * @brief Function to arm a CQ with CQEs 1035 * @param sc software handle to the device 1036 * @param qid id of the CQ returned by the fw at the time of creation 1037 * @param npopped number of CQEs to arm 1038 * @param rearm rearm bit enable/disable 1039 */ 1040 void oce_arm_cq(POCE_SOFTC sc, int16_t qid, int npopped, uint32_t rearm) 1041 { 1042 cq_db_t cq_db = { 0 }; 1043 1044 cq_db.bits.rearm = rearm; 1045 cq_db.bits.num_popped = npopped; 1046 cq_db.bits.event = 0; 1047 cq_db.bits.qid = qid; 1048 OCE_WRITE_REG32(sc, db, PD_CQ_DB, cq_db.dw0); 1049 1050 } 1051 1052 1053 1054 1055 /* 1056 * @brief function to cleanup the eqs used during stop 1057 * @param eq pointer to event queue structure 1058 * @returns the number of EQs processed 1059 */ 1060 void 1061 oce_drain_eq(struct oce_eq *eq) 1062 { 1063 1064 struct oce_eqe *eqe; 1065 uint16_t num_eqe = 0; 1066 POCE_SOFTC sc = eq->parent; 1067 1068 do { 1069 eqe = RING_GET_CONSUMER_ITEM_VA(eq->ring, struct oce_eqe); 1070 if (eqe->evnt == 0) 1071 break; 1072 eqe->evnt = 0; 1073 bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map, 1074 BUS_DMASYNC_POSTWRITE); 1075 num_eqe++; 1076 RING_GET(eq->ring, 1); 1077 1078 } while (TRUE); 1079 1080 oce_arm_eq(sc, eq->eq_id, num_eqe, FALSE, TRUE); 1081 1082 } 1083 1084 1085 1086 void 1087 oce_drain_wq_cq(struct oce_wq *wq) 1088 { 1089 POCE_SOFTC sc = wq->parent; 1090 struct oce_cq *cq = wq->cq; 1091 struct oce_nic_tx_cqe *cqe; 1092 int num_cqes = 0; 1093 1094 bus_dmamap_sync(cq->ring->dma.tag, cq->ring->dma.map, 1095 BUS_DMASYNC_POSTWRITE); 1096 1097 do { 1098 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe); 1099 if (cqe->u0.dw[3] == 0) 1100 break; 1101 cqe->u0.dw[3] = 0; 1102 bus_dmamap_sync(cq->ring->dma.tag, cq->ring->dma.map, 1103 BUS_DMASYNC_POSTWRITE); 1104 RING_GET(cq->ring, 1); 1105 num_cqes++; 1106 1107 } while (TRUE); 1108 1109 oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE); 1110 1111 } 1112 1113 1114 /* 1115 * @brief function to drain a MCQ and process its CQEs 1116 * @param dev software handle to the device 1117 * @param cq pointer to the cq to drain 1118 * @returns the number of CQEs processed 1119 */ 1120 void 1121 oce_drain_mq_cq(void *arg) 1122 { 1123 /* TODO: additional code. */ 1124 return; 1125 } 1126 1127 1128 1129 /** 1130 * @brief function to process a Recieve queue 1131 * @param arg pointer to the RQ to charge 1132 * @return number of cqes processed 1133 */ 1134 void 1135 oce_drain_rq_cq(struct oce_rq *rq) 1136 { 1137 struct oce_nic_rx_cqe *cqe; 1138 uint16_t num_cqe = 0; 1139 struct oce_cq *cq; 1140 POCE_SOFTC sc; 1141 1142 sc = rq->parent; 1143 cq = rq->cq; 1144 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe); 1145 /* dequeue till you reach an invalid cqe */ 1146 while (RQ_CQE_VALID(cqe)) { 1147 RQ_CQE_INVALIDATE(cqe); 1148 RING_GET(cq->ring, 1); 1149 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, 1150 struct oce_nic_rx_cqe); 1151 num_cqe++; 1152 } 1153 oce_arm_cq(sc, cq->cq_id, num_cqe, FALSE); 1154 1155 return; 1156 } 1157 1158 1159 void 1160 oce_free_posted_rxbuf(struct oce_rq *rq) 1161 { 1162 struct oce_packet_desc *pd; 1163 1164 while (rq->pending) { 1165 1166 pd = &rq->pckts[rq->ring->cidx]; 1167 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE); 1168 bus_dmamap_unload(rq->tag, pd->map); 1169 if (pd->mbuf != NULL) { 1170 m_freem(pd->mbuf); 1171 pd->mbuf = NULL; 1172 } 1173 1174 RING_GET(rq->ring,1); 1175 rq->pending--; 1176 } 1177 1178 } 1179 1180 void 1181 oce_rx_cq_clean_hwlro(struct oce_rq *rq) 1182 { 1183 struct oce_cq *cq = rq->cq; 1184 POCE_SOFTC sc = rq->parent; 1185 struct nic_hwlro_singleton_cqe *cqe; 1186 struct nic_hwlro_cqe_part2 *cqe2; 1187 int flush_wait = 0; 1188 int flush_compl = 0; 1189 int num_frags = 0; 1190 1191 for (;;) { 1192 bus_dmamap_sync(cq->ring->dma.tag,cq->ring->dma.map, BUS_DMASYNC_POSTWRITE); 1193 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct nic_hwlro_singleton_cqe); 1194 if(cqe->valid) { 1195 if(cqe->cqe_type == 0) { /* singleton cqe */ 1196 /* we should not get singleton cqe after cqe1 on same rq */ 1197 if(rq->cqe_firstpart != NULL) { 1198 device_printf(sc->dev, "Got singleton cqe after cqe1 \n"); 1199 goto exit_rx_cq_clean_hwlro; 1200 } 1201 num_frags = cqe->pkt_size / rq->cfg.frag_size; 1202 if(cqe->pkt_size % rq->cfg.frag_size) 1203 num_frags++; 1204 oce_discard_rx_comp(rq, num_frags); 1205 /* Check if CQE is flush completion */ 1206 if(!cqe->pkt_size) 1207 flush_compl = 1; 1208 cqe->valid = 0; 1209 RING_GET(cq->ring, 1); 1210 }else if(cqe->cqe_type == 0x1) { /* first part */ 1211 /* we should not get cqe1 after cqe1 on same rq */ 1212 if(rq->cqe_firstpart != NULL) { 1213 device_printf(sc->dev, "Got cqe1 after cqe1 \n"); 1214 goto exit_rx_cq_clean_hwlro; 1215 } 1216 rq->cqe_firstpart = (struct nic_hwlro_cqe_part1 *)cqe; 1217 RING_GET(cq->ring, 1); 1218 }else if(cqe->cqe_type == 0x2) { /* second part */ 1219 cqe2 = (struct nic_hwlro_cqe_part2 *)cqe; 1220 /* We should not get cqe2 without cqe1 */ 1221 if(rq->cqe_firstpart == NULL) { 1222 device_printf(sc->dev, "Got cqe2 without cqe1 \n"); 1223 goto exit_rx_cq_clean_hwlro; 1224 } 1225 num_frags = cqe2->coalesced_size / rq->cfg.frag_size; 1226 if(cqe2->coalesced_size % rq->cfg.frag_size) 1227 num_frags++; 1228 1229 /* Flush completion will always come in singleton CQE */ 1230 oce_discard_rx_comp(rq, num_frags); 1231 1232 rq->cqe_firstpart->valid = 0; 1233 cqe2->valid = 0; 1234 rq->cqe_firstpart = NULL; 1235 RING_GET(cq->ring, 1); 1236 } 1237 oce_arm_cq(sc, cq->cq_id, 1, FALSE); 1238 if(flush_compl) 1239 break; 1240 }else { 1241 if (flush_wait++ > 100) { 1242 device_printf(sc->dev, "did not receive hwlro flush compl\n"); 1243 break; 1244 } 1245 oce_arm_cq(sc, cq->cq_id, 0, TRUE); 1246 DELAY(1000); 1247 } 1248 } 1249 1250 /* After cleanup, leave the CQ in unarmed state */ 1251 oce_arm_cq(sc, cq->cq_id, 0, FALSE); 1252 1253 exit_rx_cq_clean_hwlro: 1254 return; 1255 } 1256 1257 1258 void 1259 oce_rx_cq_clean(struct oce_rq *rq) 1260 { 1261 struct oce_nic_rx_cqe *cqe; 1262 struct oce_cq *cq; 1263 POCE_SOFTC sc; 1264 int flush_wait = 0; 1265 int flush_compl = 0; 1266 sc = rq->parent; 1267 cq = rq->cq; 1268 1269 for (;;) { 1270 bus_dmamap_sync(cq->ring->dma.tag, 1271 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE); 1272 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe); 1273 if(RQ_CQE_VALID(cqe)) { 1274 DW_SWAP((uint32_t *) cqe, sizeof(oce_rq_cqe)); 1275 oce_discard_rx_comp(rq, cqe->u0.s.num_fragments); 1276 /* Check if CQE is flush completion */ 1277 if((cqe->u0.s.num_fragments==0)&&(cqe->u0.s.pkt_size == 0)&&(cqe->u0.s.error == 0)) 1278 flush_compl = 1; 1279 1280 RQ_CQE_INVALIDATE(cqe); 1281 RING_GET(cq->ring, 1); 1282 #if defined(INET6) || defined(INET) 1283 if (IF_LRO_ENABLED(sc)) 1284 oce_rx_flush_lro(rq); 1285 #endif 1286 oce_arm_cq(sc, cq->cq_id, 1, FALSE); 1287 if(flush_compl) 1288 break; 1289 }else { 1290 if (flush_wait++ > 100) { 1291 device_printf(sc->dev, "did not receive flush compl\n"); 1292 break; 1293 } 1294 oce_arm_cq(sc, cq->cq_id, 0, TRUE); 1295 DELAY(1000); 1296 } 1297 } 1298 1299 /* After cleanup, leave the CQ in unarmed state */ 1300 oce_arm_cq(sc, cq->cq_id, 0, FALSE); 1301 } 1302 1303 void 1304 oce_stop_rx(POCE_SOFTC sc) 1305 { 1306 struct oce_mbx mbx; 1307 struct mbx_delete_nic_rq *fwcmd; 1308 struct mbx_delete_nic_rq_v1 *fwcmd1; 1309 struct oce_rq *rq; 1310 int i = 0; 1311 1312 /* before deleting disable hwlro */ 1313 if(sc->enable_hwlro) 1314 oce_mbox_nic_set_iface_lro_config(sc, 0); 1315 1316 for_all_rq_queues(sc, rq, i) { 1317 if (rq->qstate == QCREATED) { 1318 /* Delete rxq in firmware */ 1319 LOCK(&rq->rx_lock); 1320 1321 bzero(&mbx, sizeof(mbx)); 1322 if(!rq->islro) { 1323 fwcmd = (struct mbx_delete_nic_rq *)&mbx.payload; 1324 fwcmd->params.req.rq_id = rq->rq_id; 1325 (void)oce_destroy_q(sc, &mbx, sizeof(struct mbx_delete_nic_rq), QTYPE_RQ, 0); 1326 }else { 1327 fwcmd1 = (struct mbx_delete_nic_rq_v1 *)&mbx.payload; 1328 fwcmd1->params.req.rq_id = rq->rq_id; 1329 fwcmd1->params.req.rq_flags = (NIC_RQ_FLAGS_RSS | NIC_RQ_FLAGS_LRO); 1330 1331 (void)oce_destroy_q(sc,&mbx,sizeof(struct mbx_delete_nic_rq_v1),QTYPE_RQ,1); 1332 } 1333 rq->qstate = QDELETED; 1334 1335 DELAY(1000); 1336 1337 if(!rq->islro) 1338 oce_rx_cq_clean(rq); 1339 else 1340 oce_rx_cq_clean_hwlro(rq); 1341 1342 /* Free posted RX buffers that are not used */ 1343 oce_free_posted_rxbuf(rq); 1344 UNLOCK(&rq->rx_lock); 1345 } 1346 } 1347 } 1348 1349 1350 1351 int 1352 oce_start_rx(POCE_SOFTC sc) 1353 { 1354 struct oce_rq *rq; 1355 int rc = 0, i; 1356 1357 for_all_rq_queues(sc, rq, i) { 1358 if (rq->qstate == QCREATED) 1359 continue; 1360 if((i == 0) || (!sc->enable_hwlro)) { 1361 rc = oce_mbox_create_rq(rq); 1362 if (rc) 1363 goto error; 1364 rq->islro = 0; 1365 }else { 1366 rc = oce_mbox_create_rq_v2(rq); 1367 if (rc) 1368 goto error; 1369 rq->islro = 1; 1370 } 1371 /* reset queue pointers */ 1372 rq->qstate = QCREATED; 1373 rq->pending = 0; 1374 rq->ring->cidx = 0; 1375 rq->ring->pidx = 0; 1376 } 1377 1378 if(sc->enable_hwlro) { 1379 rc = oce_mbox_nic_set_iface_lro_config(sc, 1); 1380 if (rc) 1381 goto error; 1382 } 1383 1384 DELAY(1); 1385 1386 /* RSS config */ 1387 if (is_rss_enabled(sc)) { 1388 rc = oce_config_nic_rss(sc, (uint8_t) sc->if_id, RSS_ENABLE); 1389 if (rc) 1390 goto error; 1391 1392 } 1393 1394 DELAY(1); 1395 return rc; 1396 error: 1397 device_printf(sc->dev, "Start RX failed\n"); 1398 return rc; 1399 1400 } 1401 1402 1403 1404