1 /*- 2 * Copyright (C) 2013 Emulex 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright notice, 9 * this list of conditions and the following disclaimer. 10 * 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * 3. Neither the name of the Emulex Corporation nor the names of its 16 * contributors may be used to endorse or promote products derived from 17 * this software without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 * 31 * Contact Information: 32 * freebsd-drivers@emulex.com 33 * 34 * Emulex 35 * 3333 Susan Street 36 * Costa Mesa, CA 92626 37 */ 38 39 /* $FreeBSD$ */ 40 41 #include "oce_if.h" 42 43 /***************************************************** 44 * local queue functions 45 *****************************************************/ 46 47 static struct oce_wq *oce_wq_init(POCE_SOFTC sc, 48 uint32_t q_len, uint32_t wq_type); 49 static int oce_wq_create(struct oce_wq *wq, struct oce_eq *eq); 50 static void oce_wq_free(struct oce_wq *wq); 51 static void oce_wq_del(struct oce_wq *wq); 52 static struct oce_rq *oce_rq_init(POCE_SOFTC sc, 53 uint32_t q_len, 54 uint32_t frag_size, 55 uint32_t mtu, uint32_t rss); 56 static int oce_rq_create(struct oce_rq *rq, uint32_t if_id, struct oce_eq *eq); 57 static void oce_rq_free(struct oce_rq *rq); 58 static void oce_rq_del(struct oce_rq *rq); 59 static struct oce_eq *oce_eq_create(POCE_SOFTC sc, 60 uint32_t q_len, 61 uint32_t item_size, 62 uint32_t eq_delay, 63 uint32_t vector); 64 static void oce_eq_del(struct oce_eq *eq); 65 static struct oce_mq *oce_mq_create(POCE_SOFTC sc, 66 struct oce_eq *eq, uint32_t q_len); 67 static void oce_mq_free(struct oce_mq *mq); 68 static int oce_destroy_q(POCE_SOFTC sc, struct oce_mbx 69 *mbx, size_t req_size, enum qtype qtype); 70 struct oce_cq *oce_cq_create(POCE_SOFTC sc, 71 struct oce_eq *eq, 72 uint32_t q_len, 73 uint32_t item_size, 74 uint32_t sol_event, 75 uint32_t is_eventable, 76 uint32_t nodelay, uint32_t ncoalesce); 77 static void oce_cq_del(POCE_SOFTC sc, struct oce_cq *cq); 78 79 80 81 /** 82 * @brief Create and initialize all the queues on the board 83 * @param sc software handle to the device 84 * @returns 0 if successful, or error 85 **/ 86 int 87 oce_queue_init_all(POCE_SOFTC sc) 88 { 89 int rc = 0, i, vector; 90 struct oce_wq *wq; 91 struct oce_rq *rq; 92 struct oce_aic_obj *aic; 93 94 /* alloc TX/RX queues */ 95 for_all_wq_queues(sc, wq, i) { 96 sc->wq[i] = oce_wq_init(sc, sc->tx_ring_size, 97 NIC_WQ_TYPE_STANDARD); 98 if (!sc->wq[i]) 99 goto error; 100 101 } 102 103 for_all_rq_queues(sc, rq, i) { 104 sc->rq[i] = oce_rq_init(sc, sc->rx_ring_size, sc->rq_frag_size, 105 OCE_MAX_JUMBO_FRAME_SIZE, 106 (i == 0) ? 0 : is_rss_enabled(sc)); 107 if (!sc->rq[i]) 108 goto error; 109 } 110 111 /* Create network interface on card */ 112 if (oce_create_nw_interface(sc)) 113 goto error; 114 115 /* create all of the event queues */ 116 for (vector = 0; vector < sc->intr_count; vector++) { 117 /* setup aic defaults for each event queue */ 118 aic = &sc->aic_obj[vector]; 119 aic->max_eqd = OCE_MAX_EQD; 120 aic->min_eqd = OCE_MIN_EQD; 121 aic->et_eqd = OCE_MIN_EQD; 122 aic->enable = TRUE; 123 124 sc->eq[vector] = oce_eq_create(sc, EQ_LEN_1024, EQE_SIZE_4, 125 0, vector); 126 if (!sc->eq[vector]) 127 goto error; 128 } 129 130 /* create Tx, Rx and mcc queues */ 131 for_all_wq_queues(sc, wq, i) { 132 rc = oce_wq_create(wq, sc->eq[i]); 133 if (rc) 134 goto error; 135 wq->queue_index = i; 136 TASK_INIT(&wq->txtask, 1, oce_tx_task, wq); 137 } 138 139 for_all_rq_queues(sc, rq, i) { 140 rc = oce_rq_create(rq, sc->if_id, 141 sc->eq[(i == 0) ? 0:(i-1)]); 142 if (rc) 143 goto error; 144 rq->queue_index = i; 145 } 146 147 sc->mq = oce_mq_create(sc, sc->eq[0], 64); 148 if (!sc->mq) 149 goto error; 150 151 return rc; 152 153 error: 154 oce_queue_release_all(sc); 155 return 1; 156 } 157 158 159 160 /** 161 * @brief Releases all mailbox queues created 162 * @param sc software handle to the device 163 */ 164 void 165 oce_queue_release_all(POCE_SOFTC sc) 166 { 167 int i = 0; 168 struct oce_wq *wq; 169 struct oce_rq *rq; 170 struct oce_eq *eq; 171 172 for_all_rq_queues(sc, rq, i) { 173 if (rq) { 174 oce_rq_del(sc->rq[i]); 175 oce_rq_free(sc->rq[i]); 176 } 177 } 178 179 for_all_wq_queues(sc, wq, i) { 180 if (wq) { 181 oce_wq_del(sc->wq[i]); 182 oce_wq_free(sc->wq[i]); 183 } 184 } 185 186 if (sc->mq) 187 oce_mq_free(sc->mq); 188 189 for_all_evnt_queues(sc, eq, i) { 190 if (eq) 191 oce_eq_del(sc->eq[i]); 192 } 193 } 194 195 196 197 /** 198 * @brief Function to create a WQ for NIC Tx 199 * @param sc software handle to the device 200 * @param qlen number of entries in the queue 201 * @param wq_type work queue type 202 * @returns the pointer to the WQ created or NULL on failure 203 */ 204 static struct 205 oce_wq *oce_wq_init(POCE_SOFTC sc, uint32_t q_len, uint32_t wq_type) 206 { 207 struct oce_wq *wq; 208 int rc = 0, i; 209 210 /* q_len must be min 256 and max 2k */ 211 if (q_len < 256 || q_len > 2048) { 212 device_printf(sc->dev, 213 "Invalid q length. Must be " 214 "[256, 2000]: 0x%x\n", q_len); 215 return NULL; 216 } 217 218 /* allocate wq */ 219 wq = malloc(sizeof(struct oce_wq), M_DEVBUF, M_NOWAIT | M_ZERO); 220 if (!wq) 221 return NULL; 222 223 /* Set the wq config */ 224 wq->cfg.q_len = q_len; 225 wq->cfg.wq_type = (uint8_t) wq_type; 226 wq->cfg.eqd = OCE_DEFAULT_WQ_EQD; 227 wq->cfg.nbufs = 2 * wq->cfg.q_len; 228 wq->cfg.nhdl = 2 * wq->cfg.q_len; 229 230 wq->parent = (void *)sc; 231 232 rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 233 1, 0, 234 BUS_SPACE_MAXADDR, 235 BUS_SPACE_MAXADDR, 236 NULL, NULL, 237 OCE_MAX_TX_SIZE, 238 OCE_MAX_TX_ELEMENTS, 239 PAGE_SIZE, 0, NULL, NULL, &wq->tag); 240 241 if (rc) 242 goto free_wq; 243 244 245 for (i = 0; i < OCE_WQ_PACKET_ARRAY_SIZE; i++) { 246 rc = bus_dmamap_create(wq->tag, 0, &wq->pckts[i].map); 247 if (rc) 248 goto free_wq; 249 } 250 251 wq->ring = oce_create_ring_buffer(sc, q_len, NIC_WQE_SIZE); 252 if (!wq->ring) 253 goto free_wq; 254 255 256 LOCK_CREATE(&wq->tx_lock, "TX_lock"); 257 258 #if __FreeBSD_version >= 800000 259 /* Allocate buf ring for multiqueue*/ 260 wq->br = buf_ring_alloc(4096, M_DEVBUF, 261 M_WAITOK, &wq->tx_lock.mutex); 262 if (!wq->br) 263 goto free_wq; 264 #endif 265 return wq; 266 267 268 free_wq: 269 device_printf(sc->dev, "Create WQ failed\n"); 270 oce_wq_free(wq); 271 return NULL; 272 } 273 274 275 276 /** 277 * @brief Frees the work queue 278 * @param wq pointer to work queue to free 279 */ 280 static void 281 oce_wq_free(struct oce_wq *wq) 282 { 283 POCE_SOFTC sc = (POCE_SOFTC) wq->parent; 284 int i; 285 286 taskqueue_drain(taskqueue_swi, &wq->txtask); 287 288 if (wq->ring != NULL) { 289 oce_destroy_ring_buffer(sc, wq->ring); 290 wq->ring = NULL; 291 } 292 293 for (i = 0; i < OCE_WQ_PACKET_ARRAY_SIZE; i++) { 294 if (wq->pckts[i].map != NULL) { 295 bus_dmamap_unload(wq->tag, wq->pckts[i].map); 296 bus_dmamap_destroy(wq->tag, wq->pckts[i].map); 297 wq->pckts[i].map = NULL; 298 } 299 } 300 301 if (wq->tag != NULL) 302 bus_dma_tag_destroy(wq->tag); 303 if (wq->br != NULL) 304 buf_ring_free(wq->br, M_DEVBUF); 305 306 LOCK_DESTROY(&wq->tx_lock); 307 free(wq, M_DEVBUF); 308 } 309 310 311 312 /** 313 * @brief Create a work queue 314 * @param wq pointer to work queue 315 * @param eq pointer to associated event queue 316 */ 317 static int 318 oce_wq_create(struct oce_wq *wq, struct oce_eq *eq) 319 { 320 POCE_SOFTC sc = wq->parent; 321 struct oce_cq *cq; 322 int rc = 0; 323 324 /* create the CQ */ 325 cq = oce_cq_create(sc, 326 eq, 327 CQ_LEN_1024, 328 sizeof(struct oce_nic_tx_cqe), 0, 1, 0, 3); 329 if (!cq) 330 return ENXIO; 331 332 333 wq->cq = cq; 334 335 rc = oce_mbox_create_wq(wq); 336 if (rc) 337 goto error; 338 339 wq->qstate = QCREATED; 340 wq->wq_free = wq->cfg.q_len; 341 wq->ring->cidx = 0; 342 wq->ring->pidx = 0; 343 344 eq->cq[eq->cq_valid] = cq; 345 eq->cq_valid++; 346 cq->cb_arg = wq; 347 cq->cq_handler = oce_wq_handler; 348 349 return 0; 350 351 error: 352 device_printf(sc->dev, "WQ create failed\n"); 353 oce_wq_del(wq); 354 return rc; 355 } 356 357 358 359 360 /** 361 * @brief Delete a work queue 362 * @param wq pointer to work queue 363 */ 364 static void 365 oce_wq_del(struct oce_wq *wq) 366 { 367 struct oce_mbx mbx; 368 struct mbx_delete_nic_wq *fwcmd; 369 POCE_SOFTC sc = (POCE_SOFTC) wq->parent; 370 371 if (wq->qstate == QCREATED) { 372 bzero(&mbx, sizeof(struct oce_mbx)); 373 /* now fill the command */ 374 fwcmd = (struct mbx_delete_nic_wq *)&mbx.payload; 375 fwcmd->params.req.wq_id = wq->wq_id; 376 (void)oce_destroy_q(sc, &mbx, 377 sizeof(struct mbx_delete_nic_wq), QTYPE_WQ); 378 wq->qstate = QDELETED; 379 } 380 381 if (wq->cq != NULL) { 382 oce_cq_del(sc, wq->cq); 383 wq->cq = NULL; 384 } 385 } 386 387 388 389 /** 390 * @brief function to allocate receive queue resources 391 * @param sc software handle to the device 392 * @param q_len length of receive queue 393 * @param frag_size size of an receive queue fragment 394 * @param mtu maximum transmission unit 395 * @param rss is-rss-queue flag 396 * @returns the pointer to the RQ created or NULL on failure 397 */ 398 static struct 399 oce_rq *oce_rq_init(POCE_SOFTC sc, 400 uint32_t q_len, 401 uint32_t frag_size, 402 uint32_t mtu, uint32_t rss) 403 { 404 struct oce_rq *rq; 405 int rc = 0, i; 406 407 if (OCE_LOG2(frag_size) <= 0) 408 return NULL; 409 410 if ((q_len == 0) || (q_len > 1024)) 411 return NULL; 412 413 /* allocate the rq */ 414 rq = malloc(sizeof(struct oce_rq), M_DEVBUF, M_NOWAIT | M_ZERO); 415 if (!rq) 416 return NULL; 417 418 419 rq->cfg.q_len = q_len; 420 rq->cfg.frag_size = frag_size; 421 rq->cfg.mtu = mtu; 422 rq->cfg.eqd = 0; 423 rq->lro_pkts_queued = 0; 424 rq->cfg.is_rss_queue = rss; 425 rq->packets_in = 0; 426 rq->packets_out = 0; 427 rq->pending = 0; 428 429 rq->parent = (void *)sc; 430 431 rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 432 1, 0, 433 BUS_SPACE_MAXADDR, 434 BUS_SPACE_MAXADDR, 435 NULL, NULL, 436 OCE_MAX_RX_SIZE, 437 1, PAGE_SIZE, 0, NULL, NULL, &rq->tag); 438 439 if (rc) 440 goto free_rq; 441 442 for (i = 0; i < OCE_RQ_PACKET_ARRAY_SIZE; i++) { 443 rc = bus_dmamap_create(rq->tag, 0, &rq->pckts[i].map); 444 if (rc) 445 goto free_rq; 446 } 447 448 /* create the ring buffer */ 449 rq->ring = oce_create_ring_buffer(sc, q_len, 450 sizeof(struct oce_nic_rqe)); 451 if (!rq->ring) 452 goto free_rq; 453 454 LOCK_CREATE(&rq->rx_lock, "RX_lock"); 455 456 return rq; 457 458 free_rq: 459 device_printf(sc->dev, "Create RQ failed\n"); 460 oce_rq_free(rq); 461 return NULL; 462 } 463 464 465 466 467 /** 468 * @brief Free a receive queue 469 * @param rq pointer to receive queue 470 */ 471 static void 472 oce_rq_free(struct oce_rq *rq) 473 { 474 POCE_SOFTC sc = (POCE_SOFTC) rq->parent; 475 int i = 0 ; 476 477 if (rq->ring != NULL) { 478 oce_destroy_ring_buffer(sc, rq->ring); 479 rq->ring = NULL; 480 } 481 for (i = 0; i < OCE_RQ_PACKET_ARRAY_SIZE; i++) { 482 if (rq->pckts[i].map != NULL) { 483 bus_dmamap_unload(rq->tag, rq->pckts[i].map); 484 bus_dmamap_destroy(rq->tag, rq->pckts[i].map); 485 rq->pckts[i].map = NULL; 486 } 487 if (rq->pckts[i].mbuf) { 488 m_free(rq->pckts[i].mbuf); 489 rq->pckts[i].mbuf = NULL; 490 } 491 } 492 493 if (rq->tag != NULL) 494 bus_dma_tag_destroy(rq->tag); 495 496 LOCK_DESTROY(&rq->rx_lock); 497 free(rq, M_DEVBUF); 498 } 499 500 501 502 503 /** 504 * @brief Create a receive queue 505 * @param rq receive queue 506 * @param if_id interface identifier index` 507 * @param eq pointer to event queue 508 */ 509 static int 510 oce_rq_create(struct oce_rq *rq, uint32_t if_id, struct oce_eq *eq) 511 { 512 POCE_SOFTC sc = rq->parent; 513 struct oce_cq *cq; 514 515 cq = oce_cq_create(sc, 516 eq, 517 CQ_LEN_1024, 518 sizeof(struct oce_nic_rx_cqe), 0, 1, 0, 3); 519 if (!cq) 520 return ENXIO; 521 522 rq->cq = cq; 523 rq->cfg.if_id = if_id; 524 525 /* Dont create RQ here. Create in if_activate */ 526 rq->qstate = 0; 527 rq->ring->cidx = 0; 528 rq->ring->pidx = 0; 529 eq->cq[eq->cq_valid] = cq; 530 eq->cq_valid++; 531 cq->cb_arg = rq; 532 cq->cq_handler = oce_rq_handler; 533 534 return 0; 535 536 } 537 538 539 540 541 /** 542 * @brief Delete a receive queue 543 * @param rq receive queue 544 */ 545 static void 546 oce_rq_del(struct oce_rq *rq) 547 { 548 POCE_SOFTC sc = (POCE_SOFTC) rq->parent; 549 struct oce_mbx mbx; 550 struct mbx_delete_nic_rq *fwcmd; 551 552 if (rq->qstate == QCREATED) { 553 bzero(&mbx, sizeof(mbx)); 554 555 fwcmd = (struct mbx_delete_nic_rq *)&mbx.payload; 556 fwcmd->params.req.rq_id = rq->rq_id; 557 (void)oce_destroy_q(sc, &mbx, 558 sizeof(struct mbx_delete_nic_rq), QTYPE_RQ); 559 rq->qstate = QDELETED; 560 } 561 562 if (rq->cq != NULL) { 563 oce_cq_del(sc, rq->cq); 564 rq->cq = NULL; 565 } 566 } 567 568 569 570 /** 571 * @brief function to create an event queue 572 * @param sc software handle to the device 573 * @param q_len length of event queue 574 * @param item_size size of an event queue item 575 * @param eq_delay event queue delay 576 * @retval eq success, pointer to event queue 577 * @retval NULL failure 578 */ 579 static struct 580 oce_eq *oce_eq_create(POCE_SOFTC sc, uint32_t q_len, 581 uint32_t item_size, 582 uint32_t eq_delay, 583 uint32_t vector) 584 { 585 struct oce_eq *eq; 586 int rc = 0; 587 588 /* allocate an eq */ 589 eq = malloc(sizeof(struct oce_eq), M_DEVBUF, M_NOWAIT | M_ZERO); 590 if (eq == NULL) 591 return NULL; 592 593 eq->parent = (void *)sc; 594 eq->eq_id = 0xffff; 595 eq->ring = oce_create_ring_buffer(sc, q_len, item_size); 596 if (!eq->ring) 597 goto free_eq; 598 599 eq->eq_cfg.q_len = q_len; 600 eq->eq_cfg.item_size = item_size; 601 eq->eq_cfg.cur_eqd = (uint8_t) eq_delay; 602 603 rc = oce_mbox_create_eq(eq); 604 if (rc) 605 goto free_eq; 606 607 sc->intrs[sc->neqs++].eq = eq; 608 609 return eq; 610 611 free_eq: 612 oce_eq_del(eq); 613 return NULL; 614 } 615 616 617 618 619 /** 620 * @brief Function to delete an event queue 621 * @param eq pointer to an event queue 622 */ 623 static void 624 oce_eq_del(struct oce_eq *eq) 625 { 626 struct oce_mbx mbx; 627 struct mbx_destroy_common_eq *fwcmd; 628 POCE_SOFTC sc = (POCE_SOFTC) eq->parent; 629 630 if (eq->eq_id != 0xffff) { 631 bzero(&mbx, sizeof(mbx)); 632 fwcmd = (struct mbx_destroy_common_eq *)&mbx.payload; 633 fwcmd->params.req.id = eq->eq_id; 634 (void)oce_destroy_q(sc, &mbx, 635 sizeof(struct mbx_destroy_common_eq), QTYPE_EQ); 636 } 637 638 if (eq->ring != NULL) { 639 oce_destroy_ring_buffer(sc, eq->ring); 640 eq->ring = NULL; 641 } 642 643 free(eq, M_DEVBUF); 644 645 } 646 647 648 649 650 /** 651 * @brief Function to create an MQ 652 * @param sc software handle to the device 653 * @param eq the EQ to associate with the MQ for event notification 654 * @param q_len the number of entries to create in the MQ 655 * @returns pointer to the created MQ, failure otherwise 656 */ 657 static struct oce_mq * 658 oce_mq_create(POCE_SOFTC sc, struct oce_eq *eq, uint32_t q_len) 659 { 660 struct oce_mbx mbx; 661 struct mbx_create_common_mq_ex *fwcmd = NULL; 662 struct oce_mq *mq = NULL; 663 int rc = 0; 664 struct oce_cq *cq; 665 oce_mq_ext_ctx_t *ctx; 666 uint32_t num_pages; 667 uint32_t page_size; 668 int version; 669 670 cq = oce_cq_create(sc, eq, CQ_LEN_256, 671 sizeof(struct oce_mq_cqe), 1, 1, 0, 0); 672 if (!cq) 673 return NULL; 674 675 /* allocate the mq */ 676 mq = malloc(sizeof(struct oce_mq), M_DEVBUF, M_NOWAIT | M_ZERO); 677 if (!mq) { 678 oce_cq_del(sc, cq); 679 goto error; 680 } 681 682 mq->parent = sc; 683 684 mq->ring = oce_create_ring_buffer(sc, q_len, sizeof(struct oce_mbx)); 685 if (!mq->ring) 686 goto error; 687 688 bzero(&mbx, sizeof(struct oce_mbx)); 689 690 IS_XE201(sc) ? (version = OCE_MBX_VER_V1) : (version = OCE_MBX_VER_V0); 691 fwcmd = (struct mbx_create_common_mq_ex *)&mbx.payload; 692 mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0, 693 MBX_SUBSYSTEM_COMMON, 694 OPCODE_COMMON_CREATE_MQ_EXT, 695 MBX_TIMEOUT_SEC, 696 sizeof(struct mbx_create_common_mq_ex), 697 version); 698 699 num_pages = oce_page_list(mq->ring, &fwcmd->params.req.pages[0]); 700 page_size = mq->ring->num_items * mq->ring->item_size; 701 702 ctx = &fwcmd->params.req.context; 703 704 if (IS_XE201(sc)) { 705 ctx->v1.num_pages = num_pages; 706 ctx->v1.ring_size = OCE_LOG2(q_len) + 1; 707 ctx->v1.cq_id = cq->cq_id; 708 ctx->v1.valid = 1; 709 ctx->v1.async_cq_id = cq->cq_id; 710 ctx->v1.async_cq_valid = 1; 711 /* Subscribe to Link State and Group 5 Events(bits 1 & 5 set) */ 712 ctx->v1.async_evt_bitmap |= LE_32(0x00000022); 713 ctx->v1.async_evt_bitmap |= LE_32(1 << ASYNC_EVENT_CODE_DEBUG); 714 ctx->v1.async_evt_bitmap |= 715 LE_32(1 << ASYNC_EVENT_CODE_SLIPORT); 716 } 717 else { 718 ctx->v0.num_pages = num_pages; 719 ctx->v0.cq_id = cq->cq_id; 720 ctx->v0.ring_size = OCE_LOG2(q_len) + 1; 721 ctx->v0.valid = 1; 722 /* Subscribe to Link State and Group5 Events(bits 1 & 5 set) */ 723 ctx->v0.async_evt_bitmap = 0xffffffff; 724 } 725 726 mbx.u0.s.embedded = 1; 727 mbx.payload_length = sizeof(struct mbx_create_common_mq_ex); 728 DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ); 729 730 rc = oce_mbox_post(sc, &mbx, NULL); 731 if (!rc) 732 rc = fwcmd->hdr.u0.rsp.status; 733 if (rc) { 734 device_printf(sc->dev,"%s failed - cmd status: %d\n", 735 __FUNCTION__, rc); 736 goto error; 737 } 738 mq->mq_id = LE_16(fwcmd->params.rsp.mq_id); 739 mq->cq = cq; 740 eq->cq[eq->cq_valid] = cq; 741 eq->cq_valid++; 742 mq->cq->eq = eq; 743 mq->cfg.q_len = (uint8_t) q_len; 744 mq->cfg.eqd = 0; 745 mq->qstate = QCREATED; 746 747 mq->cq->cb_arg = mq; 748 mq->cq->cq_handler = oce_mq_handler; 749 750 return mq; 751 752 error: 753 device_printf(sc->dev, "MQ create failed\n"); 754 oce_mq_free(mq); 755 mq = NULL; 756 return mq; 757 } 758 759 760 761 762 763 /** 764 * @brief Function to free a mailbox queue 765 * @param mq pointer to a mailbox queue 766 */ 767 static void 768 oce_mq_free(struct oce_mq *mq) 769 { 770 POCE_SOFTC sc = (POCE_SOFTC) mq->parent; 771 struct oce_mbx mbx; 772 struct mbx_destroy_common_mq *fwcmd; 773 774 if (!mq) 775 return; 776 777 if (mq->ring != NULL) { 778 oce_destroy_ring_buffer(sc, mq->ring); 779 mq->ring = NULL; 780 if (mq->qstate == QCREATED) { 781 bzero(&mbx, sizeof (struct oce_mbx)); 782 fwcmd = (struct mbx_destroy_common_mq *)&mbx.payload; 783 fwcmd->params.req.id = mq->mq_id; 784 (void) oce_destroy_q(sc, &mbx, 785 sizeof (struct mbx_destroy_common_mq), 786 QTYPE_MQ); 787 } 788 mq->qstate = QDELETED; 789 } 790 791 if (mq->cq != NULL) { 792 oce_cq_del(sc, mq->cq); 793 mq->cq = NULL; 794 } 795 796 free(mq, M_DEVBUF); 797 mq = NULL; 798 } 799 800 801 802 /** 803 * @brief Function to delete a EQ, CQ, MQ, WQ or RQ 804 * @param sc sofware handle to the device 805 * @param mbx mailbox command to send to the fw to delete the queue 806 * (mbx contains the queue information to delete) 807 * @param req_size the size of the mbx payload dependent on the qtype 808 * @param qtype the type of queue i.e. EQ, CQ, MQ, WQ or RQ 809 * @returns 0 on success, failure otherwise 810 */ 811 static int 812 oce_destroy_q(POCE_SOFTC sc, struct oce_mbx *mbx, size_t req_size, 813 enum qtype qtype) 814 { 815 struct mbx_hdr *hdr = (struct mbx_hdr *)&mbx->payload; 816 int opcode; 817 int subsys; 818 int rc = 0; 819 820 switch (qtype) { 821 case QTYPE_EQ: 822 opcode = OPCODE_COMMON_DESTROY_EQ; 823 subsys = MBX_SUBSYSTEM_COMMON; 824 break; 825 case QTYPE_CQ: 826 opcode = OPCODE_COMMON_DESTROY_CQ; 827 subsys = MBX_SUBSYSTEM_COMMON; 828 break; 829 case QTYPE_MQ: 830 opcode = OPCODE_COMMON_DESTROY_MQ; 831 subsys = MBX_SUBSYSTEM_COMMON; 832 break; 833 case QTYPE_WQ: 834 opcode = NIC_DELETE_WQ; 835 subsys = MBX_SUBSYSTEM_NIC; 836 break; 837 case QTYPE_RQ: 838 opcode = NIC_DELETE_RQ; 839 subsys = MBX_SUBSYSTEM_NIC; 840 break; 841 default: 842 return EINVAL; 843 } 844 845 mbx_common_req_hdr_init(hdr, 0, 0, subsys, 846 opcode, MBX_TIMEOUT_SEC, req_size, 847 OCE_MBX_VER_V0); 848 849 mbx->u0.s.embedded = 1; 850 mbx->payload_length = (uint32_t) req_size; 851 DW_SWAP(u32ptr(mbx), mbx->payload_length + OCE_BMBX_RHDR_SZ); 852 853 rc = oce_mbox_post(sc, mbx, NULL); 854 if (!rc) 855 rc = hdr->u0.rsp.status; 856 if (rc) 857 device_printf(sc->dev,"%s failed - cmd status: %d\n", 858 __FUNCTION__, rc); 859 return rc; 860 } 861 862 863 864 /** 865 * @brief Function to create a completion queue 866 * @param sc software handle to the device 867 * @param eq optional eq to be associated with to the cq 868 * @param q_len length of completion queue 869 * @param item_size size of completion queue items 870 * @param sol_event command context event 871 * @param is_eventable event table 872 * @param nodelay no delay flag 873 * @param ncoalesce no coalescence flag 874 * @returns pointer to the cq created, NULL on failure 875 */ 876 struct oce_cq * 877 oce_cq_create(POCE_SOFTC sc, struct oce_eq *eq, 878 uint32_t q_len, 879 uint32_t item_size, 880 uint32_t sol_event, 881 uint32_t is_eventable, 882 uint32_t nodelay, uint32_t ncoalesce) 883 { 884 struct oce_cq *cq = NULL; 885 int rc = 0; 886 887 cq = malloc(sizeof(struct oce_cq), M_DEVBUF, M_NOWAIT | M_ZERO); 888 if (!cq) 889 return NULL; 890 891 cq->ring = oce_create_ring_buffer(sc, q_len, item_size); 892 if (!cq->ring) 893 goto error; 894 895 cq->parent = sc; 896 cq->eq = eq; 897 cq->cq_cfg.q_len = q_len; 898 cq->cq_cfg.item_size = item_size; 899 cq->cq_cfg.nodelay = (uint8_t) nodelay; 900 901 rc = oce_mbox_cq_create(cq, ncoalesce, is_eventable); 902 if (rc) 903 goto error; 904 905 sc->cq[sc->ncqs++] = cq; 906 907 return cq; 908 909 error: 910 device_printf(sc->dev, "CQ create failed\n"); 911 oce_cq_del(sc, cq); 912 return NULL; 913 } 914 915 916 917 /** 918 * @brief Deletes the completion queue 919 * @param sc software handle to the device 920 * @param cq pointer to a completion queue 921 */ 922 static void 923 oce_cq_del(POCE_SOFTC sc, struct oce_cq *cq) 924 { 925 struct oce_mbx mbx; 926 struct mbx_destroy_common_cq *fwcmd; 927 928 if (cq->ring != NULL) { 929 930 bzero(&mbx, sizeof(struct oce_mbx)); 931 /* now fill the command */ 932 fwcmd = (struct mbx_destroy_common_cq *)&mbx.payload; 933 fwcmd->params.req.id = cq->cq_id; 934 (void)oce_destroy_q(sc, &mbx, 935 sizeof(struct mbx_destroy_common_cq), QTYPE_CQ); 936 /*NOW destroy the ring */ 937 oce_destroy_ring_buffer(sc, cq->ring); 938 cq->ring = NULL; 939 } 940 941 free(cq, M_DEVBUF); 942 cq = NULL; 943 } 944 945 946 947 /** 948 * @brief Start a receive queue 949 * @param rq pointer to a receive queue 950 */ 951 int 952 oce_start_rq(struct oce_rq *rq) 953 { 954 int rc; 955 956 rc = oce_alloc_rx_bufs(rq, rq->cfg.q_len); 957 958 if (rc == 0) 959 oce_arm_cq(rq->parent, rq->cq->cq_id, 0, TRUE); 960 return rc; 961 } 962 963 964 965 /** 966 * @brief Start a work queue 967 * @param wq pointer to a work queue 968 */ 969 int 970 oce_start_wq(struct oce_wq *wq) 971 { 972 oce_arm_cq(wq->parent, wq->cq->cq_id, 0, TRUE); 973 return 0; 974 } 975 976 977 978 /** 979 * @brief Start a mailbox queue 980 * @param mq pointer to a mailbox queue 981 */ 982 int 983 oce_start_mq(struct oce_mq *mq) 984 { 985 oce_arm_cq(mq->parent, mq->cq->cq_id, 0, TRUE); 986 return 0; 987 } 988 989 990 991 /** 992 * @brief Function to arm an EQ so that it can generate events 993 * @param sc software handle to the device 994 * @param qid id of the EQ returned by the fw at the time of creation 995 * @param npopped number of EQEs to arm 996 * @param rearm rearm bit enable/disable 997 * @param clearint bit to clear the interrupt condition because of which 998 * EQEs are generated 999 */ 1000 void 1001 oce_arm_eq(POCE_SOFTC sc, 1002 int16_t qid, int npopped, uint32_t rearm, uint32_t clearint) 1003 { 1004 eq_db_t eq_db = { 0 }; 1005 1006 eq_db.bits.rearm = rearm; 1007 eq_db.bits.event = 1; 1008 eq_db.bits.num_popped = npopped; 1009 eq_db.bits.clrint = clearint; 1010 eq_db.bits.qid = qid; 1011 OCE_WRITE_REG32(sc, db, PD_EQ_DB, eq_db.dw0); 1012 1013 } 1014 1015 1016 1017 1018 /** 1019 * @brief Function to arm a CQ with CQEs 1020 * @param sc software handle to the device 1021 * @param qid id of the CQ returned by the fw at the time of creation 1022 * @param npopped number of CQEs to arm 1023 * @param rearm rearm bit enable/disable 1024 */ 1025 void oce_arm_cq(POCE_SOFTC sc, int16_t qid, int npopped, uint32_t rearm) 1026 { 1027 cq_db_t cq_db = { 0 }; 1028 1029 cq_db.bits.rearm = rearm; 1030 cq_db.bits.num_popped = npopped; 1031 cq_db.bits.event = 0; 1032 cq_db.bits.qid = qid; 1033 OCE_WRITE_REG32(sc, db, PD_CQ_DB, cq_db.dw0); 1034 1035 } 1036 1037 1038 1039 1040 /* 1041 * @brief function to cleanup the eqs used during stop 1042 * @param eq pointer to event queue structure 1043 * @returns the number of EQs processed 1044 */ 1045 void 1046 oce_drain_eq(struct oce_eq *eq) 1047 { 1048 1049 struct oce_eqe *eqe; 1050 uint16_t num_eqe = 0; 1051 POCE_SOFTC sc = eq->parent; 1052 1053 do { 1054 eqe = RING_GET_CONSUMER_ITEM_VA(eq->ring, struct oce_eqe); 1055 if (eqe->evnt == 0) 1056 break; 1057 eqe->evnt = 0; 1058 bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map, 1059 BUS_DMASYNC_POSTWRITE); 1060 num_eqe++; 1061 RING_GET(eq->ring, 1); 1062 1063 } while (TRUE); 1064 1065 oce_arm_eq(sc, eq->eq_id, num_eqe, FALSE, TRUE); 1066 1067 } 1068 1069 1070 1071 void 1072 oce_drain_wq_cq(struct oce_wq *wq) 1073 { 1074 POCE_SOFTC sc = wq->parent; 1075 struct oce_cq *cq = wq->cq; 1076 struct oce_nic_tx_cqe *cqe; 1077 int num_cqes = 0; 1078 1079 bus_dmamap_sync(cq->ring->dma.tag, cq->ring->dma.map, 1080 BUS_DMASYNC_POSTWRITE); 1081 1082 do { 1083 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe); 1084 if (cqe->u0.dw[3] == 0) 1085 break; 1086 cqe->u0.dw[3] = 0; 1087 bus_dmamap_sync(cq->ring->dma.tag, cq->ring->dma.map, 1088 BUS_DMASYNC_POSTWRITE); 1089 RING_GET(cq->ring, 1); 1090 num_cqes++; 1091 1092 } while (TRUE); 1093 1094 oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE); 1095 1096 } 1097 1098 1099 /* 1100 * @brief function to drain a MCQ and process its CQEs 1101 * @param dev software handle to the device 1102 * @param cq pointer to the cq to drain 1103 * @returns the number of CQEs processed 1104 */ 1105 void 1106 oce_drain_mq_cq(void *arg) 1107 { 1108 /* TODO: additional code. */ 1109 return; 1110 } 1111 1112 1113 1114 /** 1115 * @brief function to process a Recieve queue 1116 * @param arg pointer to the RQ to charge 1117 * @return number of cqes processed 1118 */ 1119 void 1120 oce_drain_rq_cq(struct oce_rq *rq) 1121 { 1122 struct oce_nic_rx_cqe *cqe; 1123 uint16_t num_cqe = 0; 1124 struct oce_cq *cq; 1125 POCE_SOFTC sc; 1126 1127 sc = rq->parent; 1128 cq = rq->cq; 1129 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe); 1130 /* dequeue till you reach an invalid cqe */ 1131 while (RQ_CQE_VALID(cqe)) { 1132 RQ_CQE_INVALIDATE(cqe); 1133 RING_GET(cq->ring, 1); 1134 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, 1135 struct oce_nic_rx_cqe); 1136 num_cqe++; 1137 } 1138 oce_arm_cq(sc, cq->cq_id, num_cqe, FALSE); 1139 1140 return; 1141 } 1142 1143 1144 void 1145 oce_free_posted_rxbuf(struct oce_rq *rq) 1146 { 1147 struct oce_packet_desc *pd; 1148 1149 while (rq->pending) { 1150 1151 pd = &rq->pckts[rq->packets_out]; 1152 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE); 1153 bus_dmamap_unload(rq->tag, pd->map); 1154 if (pd->mbuf != NULL) { 1155 m_freem(pd->mbuf); 1156 pd->mbuf = NULL; 1157 } 1158 1159 if ((rq->packets_out + 1) == OCE_RQ_PACKET_ARRAY_SIZE) 1160 rq->packets_out = 0; 1161 else 1162 rq->packets_out++; 1163 1164 rq->pending--; 1165 } 1166 1167 } 1168 1169 void 1170 oce_stop_rx(POCE_SOFTC sc) 1171 { 1172 struct oce_mbx mbx; 1173 struct mbx_delete_nic_rq *fwcmd; 1174 struct oce_rq *rq; 1175 int i = 0; 1176 1177 for_all_rq_queues(sc, rq, i) { 1178 if (rq->qstate == QCREATED) { 1179 /* Delete rxq in firmware */ 1180 1181 bzero(&mbx, sizeof(mbx)); 1182 fwcmd = (struct mbx_delete_nic_rq *)&mbx.payload; 1183 fwcmd->params.req.rq_id = rq->rq_id; 1184 1185 (void)oce_destroy_q(sc, &mbx, 1186 sizeof(struct mbx_delete_nic_rq), QTYPE_RQ); 1187 1188 rq->qstate = QDELETED; 1189 1190 DELAY(1); 1191 1192 /* Free posted RX buffers that are not used */ 1193 oce_free_posted_rxbuf(rq); 1194 1195 } 1196 } 1197 } 1198 1199 1200 1201 int 1202 oce_start_rx(POCE_SOFTC sc) 1203 { 1204 struct oce_rq *rq; 1205 int rc = 0, i; 1206 1207 for_all_rq_queues(sc, rq, i) { 1208 if (rq->qstate == QCREATED) 1209 continue; 1210 rc = oce_mbox_create_rq(rq); 1211 if (rc) 1212 goto error; 1213 /* reset queue pointers */ 1214 rq->qstate = QCREATED; 1215 rq->pending = 0; 1216 rq->ring->cidx = 0; 1217 rq->ring->pidx = 0; 1218 rq->packets_in = 0; 1219 rq->packets_out = 0; 1220 } 1221 1222 DELAY(1); 1223 1224 /* RSS config */ 1225 if (is_rss_enabled(sc)) { 1226 rc = oce_config_nic_rss(sc, (uint8_t) sc->if_id, RSS_ENABLE); 1227 if (rc) 1228 goto error; 1229 1230 } 1231 1232 return rc; 1233 error: 1234 device_printf(sc->dev, "Start RX failed\n"); 1235 return rc; 1236 1237 } 1238 1239 1240 1241