1 /*- 2 * Copyright (c) 2011 Chelsio Communications, Inc. 3 * All rights reserved. 4 * Written by: Navdeep Parhar <np@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include "opt_inet.h" 32 33 #include <sys/types.h> 34 #include <sys/mbuf.h> 35 #include <sys/socket.h> 36 #include <sys/kernel.h> 37 #include <sys/malloc.h> 38 #include <sys/queue.h> 39 #include <sys/taskqueue.h> 40 #include <sys/sysctl.h> 41 #include <net/bpf.h> 42 #include <net/ethernet.h> 43 #include <net/if.h> 44 #include <net/if_vlan_var.h> 45 #include <netinet/in.h> 46 #include <netinet/ip.h> 47 #include <netinet/tcp.h> 48 49 #include "common/common.h" 50 #include "common/t4_regs.h" 51 #include "common/t4_regs_values.h" 52 #include "common/t4_msg.h" 53 #include "common/t4fw_interface.h" 54 55 struct fl_buf_info { 56 int size; 57 int type; 58 uma_zone_t zone; 59 }; 60 61 /* Filled up by t4_sge_modload */ 62 static struct fl_buf_info fl_buf_info[FL_BUF_SIZES]; 63 64 #define FL_BUF_SIZE(x) (fl_buf_info[x].size) 65 #define FL_BUF_TYPE(x) (fl_buf_info[x].type) 66 #define FL_BUF_ZONE(x) (fl_buf_info[x].zone) 67 68 enum { 69 FL_PKTSHIFT = 2 70 }; 71 72 #define FL_ALIGN min(CACHE_LINE_SIZE, 32) 73 #if CACHE_LINE_SIZE > 64 74 #define SPG_LEN 128 75 #else 76 #define SPG_LEN 64 77 #endif 78 79 /* Used to track coalesced tx work request */ 80 struct txpkts { 81 uint64_t *flitp; /* ptr to flit where next pkt should start */ 82 uint8_t npkt; /* # of packets in this work request */ 83 uint8_t nflits; /* # of flits used by this work request */ 84 uint16_t plen; /* total payload (sum of all packets) */ 85 }; 86 87 /* A packet's SGL. This + m_pkthdr has all info needed for tx */ 88 struct sgl { 89 int nsegs; /* # of segments in the SGL, 0 means imm. tx */ 90 int nflits; /* # of flits needed for the SGL */ 91 bus_dma_segment_t seg[TX_SGL_SEGS]; 92 }; 93 94 static inline void init_iq(struct sge_iq *, struct adapter *, int, int, int, 95 int, iq_intr_handler_t *, char *); 96 static inline void init_fl(struct sge_fl *, int, char *); 97 static inline void init_eq(struct sge_eq *, int, char *); 98 static int alloc_ring(struct adapter *, size_t, bus_dma_tag_t *, bus_dmamap_t *, 99 bus_addr_t *, void **); 100 static int free_ring(struct adapter *, bus_dma_tag_t, bus_dmamap_t, bus_addr_t, 101 void *); 102 static int alloc_iq_fl(struct port_info *, struct sge_iq *, struct sge_fl *, 103 int); 104 static int free_iq_fl(struct port_info *, struct sge_iq *, struct sge_fl *); 105 static int alloc_iq(struct sge_iq *, int); 106 static int free_iq(struct sge_iq *); 107 static int alloc_rxq(struct port_info *, struct sge_rxq *, int, int); 108 static int free_rxq(struct port_info *, struct sge_rxq *); 109 static int alloc_ctrlq(struct adapter *, struct sge_ctrlq *, int); 110 static int free_ctrlq(struct adapter *, struct sge_ctrlq *); 111 static int alloc_txq(struct port_info *, struct sge_txq *, int); 112 static int free_txq(struct port_info *, struct sge_txq *); 113 static void oneseg_dma_callback(void *, bus_dma_segment_t *, int, int); 114 static inline bool is_new_response(const struct sge_iq *, struct rsp_ctrl **); 115 static inline void iq_next(struct sge_iq *); 116 static inline void ring_fl_db(struct adapter *, struct sge_fl *); 117 static void refill_fl(struct sge_fl *, int); 118 static int alloc_fl_sdesc(struct sge_fl *); 119 static void free_fl_sdesc(struct sge_fl *); 120 static int alloc_tx_maps(struct sge_txq *); 121 static void free_tx_maps(struct sge_txq *); 122 static void set_fl_tag_idx(struct sge_fl *, int); 123 124 static int get_pkt_sgl(struct sge_txq *, struct mbuf **, struct sgl *, int); 125 static int free_pkt_sgl(struct sge_txq *, struct sgl *); 126 static int write_txpkt_wr(struct port_info *, struct sge_txq *, struct mbuf *, 127 struct sgl *); 128 static int add_to_txpkts(struct port_info *, struct sge_txq *, struct txpkts *, 129 struct mbuf *, struct sgl *); 130 static void write_txpkts_wr(struct sge_txq *, struct txpkts *); 131 static inline void write_ulp_cpl_sgl(struct port_info *, struct sge_txq *, 132 struct txpkts *, struct mbuf *, struct sgl *); 133 static int write_sgl_to_txd(struct sge_eq *, struct sgl *, caddr_t *); 134 static inline void copy_to_txd(struct sge_eq *, caddr_t, caddr_t *, int); 135 static inline void ring_eq_db(struct adapter *, struct sge_eq *); 136 static inline int reclaimable(struct sge_eq *); 137 static int reclaim_tx_descs(struct sge_txq *, int, int); 138 static void write_eqflush_wr(struct sge_eq *); 139 static __be64 get_flit(bus_dma_segment_t *, int, int); 140 static int handle_sge_egr_update(struct adapter *, 141 const struct cpl_sge_egr_update *); 142 143 static int ctrl_tx(struct adapter *, struct sge_ctrlq *, struct mbuf *); 144 145 /* 146 * Called on MOD_LOAD and fills up fl_buf_info[]. 147 */ 148 void 149 t4_sge_modload(void) 150 { 151 int i; 152 int bufsize[FL_BUF_SIZES] = { 153 MCLBYTES, 154 #if MJUMPAGESIZE != MCLBYTES 155 MJUMPAGESIZE, 156 #endif 157 MJUM9BYTES, 158 MJUM16BYTES 159 }; 160 161 for (i = 0; i < FL_BUF_SIZES; i++) { 162 FL_BUF_SIZE(i) = bufsize[i]; 163 FL_BUF_TYPE(i) = m_gettype(bufsize[i]); 164 FL_BUF_ZONE(i) = m_getzone(bufsize[i]); 165 } 166 } 167 168 /** 169 * t4_sge_init - initialize SGE 170 * @sc: the adapter 171 * 172 * Performs SGE initialization needed every time after a chip reset. 173 * We do not initialize any of the queues here, instead the driver 174 * top-level must request them individually. 175 */ 176 void 177 t4_sge_init(struct adapter *sc) 178 { 179 struct sge *s = &sc->sge; 180 int i; 181 182 t4_set_reg_field(sc, A_SGE_CONTROL, V_PKTSHIFT(M_PKTSHIFT) | 183 V_INGPADBOUNDARY(M_INGPADBOUNDARY) | 184 F_EGRSTATUSPAGESIZE, 185 V_INGPADBOUNDARY(ilog2(FL_ALIGN) - 5) | 186 V_PKTSHIFT(FL_PKTSHIFT) | 187 F_RXPKTCPLMODE | 188 V_EGRSTATUSPAGESIZE(SPG_LEN == 128)); 189 t4_set_reg_field(sc, A_SGE_HOST_PAGE_SIZE, 190 V_HOSTPAGESIZEPF0(M_HOSTPAGESIZEPF0), 191 V_HOSTPAGESIZEPF0(PAGE_SHIFT - 10)); 192 193 for (i = 0; i < FL_BUF_SIZES; i++) { 194 t4_write_reg(sc, A_SGE_FL_BUFFER_SIZE0 + (4 * i), 195 FL_BUF_SIZE(i)); 196 } 197 198 t4_write_reg(sc, A_SGE_INGRESS_RX_THRESHOLD, 199 V_THRESHOLD_0(s->counter_val[0]) | 200 V_THRESHOLD_1(s->counter_val[1]) | 201 V_THRESHOLD_2(s->counter_val[2]) | 202 V_THRESHOLD_3(s->counter_val[3])); 203 204 t4_write_reg(sc, A_SGE_TIMER_VALUE_0_AND_1, 205 V_TIMERVALUE0(us_to_core_ticks(sc, s->timer_val[0])) | 206 V_TIMERVALUE1(us_to_core_ticks(sc, s->timer_val[1]))); 207 t4_write_reg(sc, A_SGE_TIMER_VALUE_2_AND_3, 208 V_TIMERVALUE2(us_to_core_ticks(sc, s->timer_val[2])) | 209 V_TIMERVALUE3(us_to_core_ticks(sc, s->timer_val[3]))); 210 t4_write_reg(sc, A_SGE_TIMER_VALUE_4_AND_5, 211 V_TIMERVALUE4(us_to_core_ticks(sc, s->timer_val[4])) | 212 V_TIMERVALUE5(us_to_core_ticks(sc, s->timer_val[5]))); 213 } 214 215 int 216 t4_create_dma_tag(struct adapter *sc) 217 { 218 int rc; 219 220 rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0, 221 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE, 222 BUS_SPACE_UNRESTRICTED, BUS_SPACE_MAXSIZE, BUS_DMA_ALLOCNOW, NULL, 223 NULL, &sc->dmat); 224 if (rc != 0) { 225 device_printf(sc->dev, 226 "failed to create main DMA tag: %d\n", rc); 227 } 228 229 return (rc); 230 } 231 232 int 233 t4_destroy_dma_tag(struct adapter *sc) 234 { 235 if (sc->dmat) 236 bus_dma_tag_destroy(sc->dmat); 237 238 return (0); 239 } 240 241 /* 242 * Allocate and initialize the firmware event queue, control queues, and the 243 * forwarded interrupt queues (if any). The adapter owns all these queues as 244 * they are not associated with any particular port. 245 * 246 * Returns errno on failure. Resources allocated up to that point may still be 247 * allocated. Caller is responsible for cleanup in case this function fails. 248 */ 249 int 250 t4_setup_adapter_queues(struct adapter *sc) 251 { 252 int i, rc; 253 struct sge_iq *iq, *fwq; 254 struct sge_ctrlq *ctrlq; 255 iq_intr_handler_t *handler; 256 char name[16]; 257 258 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 259 260 if (sysctl_ctx_init(&sc->ctx) == 0) { 261 struct sysctl_oid *oid = device_get_sysctl_tree(sc->dev); 262 struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); 263 264 sc->oid_ctrlq = SYSCTL_ADD_NODE(&sc->ctx, children, OID_AUTO, 265 "ctrlq", CTLFLAG_RD, NULL, "ctrl queues"); 266 } 267 268 fwq = &sc->sge.fwq; 269 if (sc->flags & INTR_FWD) { 270 iq = &sc->sge.fiq[0]; 271 272 /* 273 * Forwarded interrupt queues - allocate 1 if there's only 1 274 * vector available, one less than the number of vectors 275 * otherwise (the first vector is reserved for the error 276 * interrupt in that case). 277 */ 278 i = sc->intr_count > 1 ? 1 : 0; 279 for (; i < sc->intr_count; i++, iq++) { 280 281 snprintf(name, sizeof(name), "%s fiq%d", 282 device_get_nameunit(sc->dev), i); 283 init_iq(iq, sc, 0, 0, (sc->sge.nrxq + 1) * 2, 16, NULL, 284 name); 285 286 rc = alloc_iq(iq, i); 287 if (rc != 0) { 288 device_printf(sc->dev, 289 "failed to create fwd intr queue %d: %d\n", 290 i, rc); 291 return (rc); 292 } 293 } 294 295 handler = t4_evt_rx; 296 i = 0; /* forward fwq's interrupt to the first fiq */ 297 } else { 298 handler = NULL; 299 i = 1; /* fwq should use vector 1 (0 is used by error) */ 300 } 301 302 snprintf(name, sizeof(name), "%s fwq", device_get_nameunit(sc->dev)); 303 init_iq(fwq, sc, 0, 0, FW_IQ_QSIZE, FW_IQ_ESIZE, handler, name); 304 rc = alloc_iq(fwq, i); 305 if (rc != 0) { 306 device_printf(sc->dev, 307 "failed to create firmware event queue: %d\n", rc); 308 309 return (rc); 310 } 311 312 /* 313 * Control queues - one per hardware channel. 314 */ 315 ctrlq = &sc->sge.ctrlq[0]; 316 for (i = 0; i < NCHAN; i++, ctrlq++) { 317 snprintf(name, sizeof(name), "%s ctrlq%d", 318 device_get_nameunit(sc->dev), i); 319 init_eq(&ctrlq->eq, CTRL_EQ_QSIZE, name); 320 321 rc = alloc_ctrlq(sc, ctrlq, i); 322 if (rc != 0) { 323 device_printf(sc->dev, 324 "failed to create control queue %d: %d\n", i, rc); 325 return (rc); 326 } 327 } 328 329 return (rc); 330 } 331 332 /* 333 * Idempotent 334 */ 335 int 336 t4_teardown_adapter_queues(struct adapter *sc) 337 { 338 int i; 339 struct sge_iq *iq; 340 341 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 342 343 /* Do this before freeing the queues */ 344 if (sc->oid_ctrlq) { 345 sysctl_ctx_free(&sc->ctx); 346 sc->oid_ctrlq = NULL; 347 } 348 349 for (i = 0; i < NCHAN; i++) 350 free_ctrlq(sc, &sc->sge.ctrlq[i]); 351 352 iq = &sc->sge.fwq; 353 free_iq(iq); 354 if (sc->flags & INTR_FWD) { 355 for (i = 0; i < NFIQ(sc); i++) { 356 iq = &sc->sge.fiq[i]; 357 free_iq(iq); 358 } 359 } 360 361 return (0); 362 } 363 364 int 365 t4_setup_eth_queues(struct port_info *pi) 366 { 367 int rc = 0, i, intr_idx; 368 struct sge_rxq *rxq; 369 struct sge_txq *txq; 370 char name[16]; 371 struct adapter *sc = pi->adapter; 372 373 if (sysctl_ctx_init(&pi->ctx) == 0) { 374 struct sysctl_oid *oid = device_get_sysctl_tree(pi->dev); 375 struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); 376 377 pi->oid_rxq = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, 378 "rxq", CTLFLAG_RD, NULL, "rx queues"); 379 pi->oid_txq = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, 380 "txq", CTLFLAG_RD, NULL, "tx queues"); 381 } 382 383 for_each_rxq(pi, i, rxq) { 384 385 snprintf(name, sizeof(name), "%s rxq%d-iq", 386 device_get_nameunit(pi->dev), i); 387 init_iq(&rxq->iq, sc, pi->tmr_idx, pi->pktc_idx, 388 pi->qsize_rxq, RX_IQ_ESIZE, 389 sc->flags & INTR_FWD ? t4_eth_rx : NULL, name); 390 391 snprintf(name, sizeof(name), "%s rxq%d-fl", 392 device_get_nameunit(pi->dev), i); 393 init_fl(&rxq->fl, pi->qsize_rxq / 8, name); 394 395 if (sc->flags & INTR_FWD) 396 intr_idx = (pi->first_rxq + i) % NFIQ(sc); 397 else 398 intr_idx = pi->first_rxq + i + 2; 399 400 rc = alloc_rxq(pi, rxq, intr_idx, i); 401 if (rc != 0) 402 goto done; 403 404 intr_idx++; 405 } 406 407 for_each_txq(pi, i, txq) { 408 409 snprintf(name, sizeof(name), "%s txq%d", 410 device_get_nameunit(pi->dev), i); 411 init_eq(&txq->eq, pi->qsize_txq, name); 412 413 rc = alloc_txq(pi, txq, i); 414 if (rc != 0) 415 goto done; 416 } 417 418 done: 419 if (rc) 420 t4_teardown_eth_queues(pi); 421 422 return (rc); 423 } 424 425 /* 426 * Idempotent 427 */ 428 int 429 t4_teardown_eth_queues(struct port_info *pi) 430 { 431 int i; 432 struct sge_rxq *rxq; 433 struct sge_txq *txq; 434 435 /* Do this before freeing the queues */ 436 if (pi->oid_txq || pi->oid_rxq) { 437 sysctl_ctx_free(&pi->ctx); 438 pi->oid_txq = pi->oid_rxq = NULL; 439 } 440 441 for_each_txq(pi, i, txq) { 442 free_txq(pi, txq); 443 } 444 445 for_each_rxq(pi, i, rxq) { 446 free_rxq(pi, rxq); 447 } 448 449 return (0); 450 } 451 452 /* Deals with errors and forwarded interrupts */ 453 void 454 t4_intr_all(void *arg) 455 { 456 struct adapter *sc = arg; 457 458 t4_intr_err(arg); 459 t4_intr_fwd(&sc->sge.fiq[0]); 460 } 461 462 /* Deals with forwarded interrupts on the given ingress queue */ 463 void 464 t4_intr_fwd(void *arg) 465 { 466 struct sge_iq *iq = arg, *q; 467 struct adapter *sc = iq->adapter; 468 struct rsp_ctrl *ctrl; 469 int ndesc_pending = 0, ndesc_total = 0; 470 int qid; 471 472 if (!atomic_cmpset_32(&iq->state, IQS_IDLE, IQS_BUSY)) 473 return; 474 475 while (is_new_response(iq, &ctrl)) { 476 477 rmb(); 478 479 /* Only interrupt muxing expected on this queue */ 480 KASSERT(G_RSPD_TYPE(ctrl->u.type_gen) == X_RSPD_TYPE_INTR, 481 ("unexpected event on forwarded interrupt queue: %x", 482 G_RSPD_TYPE(ctrl->u.type_gen))); 483 484 qid = ntohl(ctrl->pldbuflen_qid) - sc->sge.iq_start; 485 q = sc->sge.iqmap[qid]; 486 487 q->handler(q); 488 489 ndesc_total++; 490 if (++ndesc_pending >= iq->qsize / 4) { 491 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), 492 V_CIDXINC(ndesc_pending) | 493 V_INGRESSQID(iq->cntxt_id) | 494 V_SEINTARM( 495 V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX))); 496 ndesc_pending = 0; 497 } 498 499 iq_next(iq); 500 } 501 502 if (ndesc_total > 0) { 503 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), 504 V_CIDXINC(ndesc_pending) | V_INGRESSQID((u32)iq->cntxt_id) | 505 V_SEINTARM(iq->intr_params)); 506 } 507 508 atomic_cmpset_32(&iq->state, IQS_BUSY, IQS_IDLE); 509 } 510 511 /* Deals with error interrupts */ 512 void 513 t4_intr_err(void *arg) 514 { 515 struct adapter *sc = arg; 516 517 if (sc->intr_type == INTR_INTX) 518 t4_write_reg(sc, MYPF_REG(A_PCIE_PF_CLI), 0); 519 520 t4_slow_intr_handler(sc); 521 } 522 523 /* Deals with the firmware event queue */ 524 void 525 t4_intr_evt(void *arg) 526 { 527 struct sge_iq *iq = arg; 528 529 if (!atomic_cmpset_32(&iq->state, IQS_IDLE, IQS_BUSY)) 530 return; 531 532 t4_evt_rx(arg); 533 534 atomic_cmpset_32(&iq->state, IQS_BUSY, IQS_IDLE); 535 } 536 537 void 538 t4_intr_data(void *arg) 539 { 540 struct sge_iq *iq = arg; 541 542 if (!atomic_cmpset_32(&iq->state, IQS_IDLE, IQS_BUSY)) 543 return; 544 545 t4_eth_rx(arg); 546 547 atomic_cmpset_32(&iq->state, IQS_BUSY, IQS_IDLE); 548 } 549 550 void 551 t4_evt_rx(void *arg) 552 { 553 struct sge_iq *iq = arg; 554 struct adapter *sc = iq->adapter; 555 struct rsp_ctrl *ctrl; 556 const struct rss_header *rss; 557 int ndesc_pending = 0, ndesc_total = 0; 558 559 KASSERT(iq == &sc->sge.fwq, ("%s: unexpected ingress queue", __func__)); 560 561 while (is_new_response(iq, &ctrl)) { 562 563 rmb(); 564 565 rss = (const void *)iq->cdesc; 566 567 /* Should only get CPL on this queue */ 568 KASSERT(G_RSPD_TYPE(ctrl->u.type_gen) == X_RSPD_TYPE_CPL, 569 ("%s: unexpected type %d", __func__, 570 G_RSPD_TYPE(ctrl->u.type_gen))); 571 572 switch (rss->opcode) { 573 case CPL_FW4_MSG: 574 case CPL_FW6_MSG: { 575 const struct cpl_fw6_msg *cpl; 576 577 cpl = (const void *)(rss + 1); 578 if (cpl->type == FW6_TYPE_CMD_RPL) 579 t4_handle_fw_rpl(sc, cpl->data); 580 581 break; 582 } 583 case CPL_SGE_EGR_UPDATE: 584 handle_sge_egr_update(sc, (const void *)(rss + 1)); 585 break; 586 587 default: 588 device_printf(sc->dev, 589 "can't handle CPL opcode %d.", rss->opcode); 590 } 591 592 ndesc_total++; 593 if (++ndesc_pending >= iq->qsize / 4) { 594 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), 595 V_CIDXINC(ndesc_pending) | 596 V_INGRESSQID(iq->cntxt_id) | 597 V_SEINTARM( 598 V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX))); 599 ndesc_pending = 0; 600 } 601 iq_next(iq); 602 } 603 604 if (ndesc_total > 0) { 605 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), 606 V_CIDXINC(ndesc_pending) | V_INGRESSQID(iq->cntxt_id) | 607 V_SEINTARM(iq->intr_params)); 608 } 609 } 610 611 void 612 t4_eth_rx(void *arg) 613 { 614 struct sge_rxq *rxq = arg; 615 struct sge_iq *iq = arg; 616 struct adapter *sc = iq->adapter; 617 struct rsp_ctrl *ctrl; 618 struct ifnet *ifp = rxq->ifp; 619 struct sge_fl *fl = &rxq->fl; 620 struct fl_sdesc *sd = &fl->sdesc[fl->cidx], *sd_next; 621 const struct rss_header *rss; 622 const struct cpl_rx_pkt *cpl; 623 uint32_t len; 624 int ndescs = 0, i; 625 struct mbuf *m0, *m; 626 #ifdef INET 627 struct lro_ctrl *lro = &rxq->lro; 628 struct lro_entry *l; 629 #endif 630 631 prefetch(sd->m); 632 prefetch(sd->cl); 633 634 iq->intr_next = iq->intr_params; 635 while (is_new_response(iq, &ctrl)) { 636 637 rmb(); 638 639 rss = (const void *)iq->cdesc; 640 i = G_RSPD_TYPE(ctrl->u.type_gen); 641 642 if (__predict_false(i == X_RSPD_TYPE_CPL)) { 643 644 /* Can't be anything except an egress update */ 645 KASSERT(rss->opcode == CPL_SGE_EGR_UPDATE, 646 ("%s: unexpected CPL %x", __func__, rss->opcode)); 647 648 handle_sge_egr_update(sc, (const void *)(rss + 1)); 649 goto nextdesc; 650 } 651 KASSERT(i == X_RSPD_TYPE_FLBUF && rss->opcode == CPL_RX_PKT, 652 ("%s: unexpected CPL %x rsp %d", __func__, rss->opcode, i)); 653 654 sd_next = sd + 1; 655 if (__predict_false(fl->cidx + 1 == fl->cap)) 656 sd_next = fl->sdesc; 657 prefetch(sd_next->m); 658 prefetch(sd_next->cl); 659 660 cpl = (const void *)(rss + 1); 661 662 m0 = sd->m; 663 sd->m = NULL; /* consumed */ 664 665 len = be32toh(ctrl->pldbuflen_qid); 666 if (__predict_false((len & F_RSPD_NEWBUF) == 0)) 667 panic("%s: cannot handle packed frames", __func__); 668 len = G_RSPD_LEN(len); 669 670 bus_dmamap_sync(fl->tag[sd->tag_idx], sd->map, 671 BUS_DMASYNC_POSTREAD); 672 673 m_init(m0, NULL, 0, M_NOWAIT, MT_DATA, M_PKTHDR); 674 if (len < MINCLSIZE) { 675 /* copy data to mbuf, buffer will be recycled */ 676 bcopy(sd->cl, mtod(m0, caddr_t), len); 677 m0->m_len = len; 678 } else { 679 bus_dmamap_unload(fl->tag[sd->tag_idx], sd->map); 680 m_cljset(m0, sd->cl, FL_BUF_TYPE(sd->tag_idx)); 681 sd->cl = NULL; /* consumed */ 682 m0->m_len = min(len, FL_BUF_SIZE(sd->tag_idx)); 683 } 684 685 len -= FL_PKTSHIFT; 686 m0->m_len -= FL_PKTSHIFT; 687 m0->m_data += FL_PKTSHIFT; 688 689 m0->m_pkthdr.len = len; 690 m0->m_pkthdr.rcvif = ifp; 691 m0->m_flags |= M_FLOWID; 692 m0->m_pkthdr.flowid = rss->hash_val; 693 694 if (cpl->csum_calc && !cpl->err_vec && 695 ifp->if_capenable & IFCAP_RXCSUM) { 696 m0->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED | 697 CSUM_IP_VALID | CSUM_DATA_VALID | CSUM_PSEUDO_HDR); 698 if (cpl->ip_frag) 699 m0->m_pkthdr.csum_data = be16toh(cpl->csum); 700 else 701 m0->m_pkthdr.csum_data = 0xffff; 702 rxq->rxcsum++; 703 } 704 705 if (cpl->vlan_ex) { 706 m0->m_pkthdr.ether_vtag = be16toh(cpl->vlan); 707 m0->m_flags |= M_VLANTAG; 708 rxq->vlan_extraction++; 709 } 710 711 i = 1; /* # of fl sdesc used */ 712 sd = sd_next; 713 if (__predict_false(++fl->cidx == fl->cap)) 714 fl->cidx = 0; 715 716 len -= m0->m_len; 717 m = m0; 718 while (len) { 719 i++; 720 721 sd_next = sd + 1; 722 if (__predict_false(fl->cidx + 1 == fl->cap)) 723 sd_next = fl->sdesc; 724 prefetch(sd_next->m); 725 prefetch(sd_next->cl); 726 727 m->m_next = sd->m; 728 sd->m = NULL; /* consumed */ 729 m = m->m_next; 730 731 bus_dmamap_sync(fl->tag[sd->tag_idx], sd->map, 732 BUS_DMASYNC_POSTREAD); 733 734 m_init(m, NULL, 0, M_NOWAIT, MT_DATA, 0); 735 if (len <= MLEN) { 736 bcopy(sd->cl, mtod(m, caddr_t), len); 737 m->m_len = len; 738 } else { 739 bus_dmamap_unload(fl->tag[sd->tag_idx], 740 sd->map); 741 m_cljset(m, sd->cl, FL_BUF_TYPE(sd->tag_idx)); 742 sd->cl = NULL; /* consumed */ 743 m->m_len = min(len, FL_BUF_SIZE(sd->tag_idx)); 744 } 745 746 i++; 747 sd = sd_next; 748 if (__predict_false(++fl->cidx == fl->cap)) 749 fl->cidx = 0; 750 751 len -= m->m_len; 752 } 753 754 #ifdef INET 755 if (cpl->l2info & htobe32(F_RXF_LRO) && 756 rxq->flags & RXQ_LRO_ENABLED && 757 tcp_lro_rx(lro, m0, 0) == 0) { 758 /* queued for LRO */ 759 } else 760 #endif 761 ifp->if_input(ifp, m0); 762 763 FL_LOCK(fl); 764 fl->needed += i; 765 if (fl->needed >= 32) 766 refill_fl(fl, 64); 767 if (fl->pending >= 32) 768 ring_fl_db(sc, fl); 769 FL_UNLOCK(fl); 770 771 nextdesc: ndescs++; 772 iq_next(iq); 773 774 if (ndescs > 32) { 775 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), 776 V_CIDXINC(ndescs) | 777 V_INGRESSQID((u32)iq->cntxt_id) | 778 V_SEINTARM(V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX))); 779 ndescs = 0; 780 } 781 } 782 783 #ifdef INET 784 while (!SLIST_EMPTY(&lro->lro_active)) { 785 l = SLIST_FIRST(&lro->lro_active); 786 SLIST_REMOVE_HEAD(&lro->lro_active, next); 787 tcp_lro_flush(lro, l); 788 } 789 #endif 790 791 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), V_CIDXINC(ndescs) | 792 V_INGRESSQID((u32)iq->cntxt_id) | V_SEINTARM(iq->intr_next)); 793 794 FL_LOCK(fl); 795 if (fl->needed >= 32) 796 refill_fl(fl, 128); 797 if (fl->pending >= 8) 798 ring_fl_db(sc, fl); 799 FL_UNLOCK(fl); 800 } 801 802 int 803 t4_mgmt_tx(struct adapter *sc, struct mbuf *m) 804 { 805 return ctrl_tx(sc, &sc->sge.ctrlq[0], m); 806 } 807 808 /* Per-packet header in a coalesced tx WR, before the SGL starts (in flits) */ 809 #define TXPKTS_PKT_HDR ((\ 810 sizeof(struct ulp_txpkt) + \ 811 sizeof(struct ulptx_idata) + \ 812 sizeof(struct cpl_tx_pkt_core) \ 813 ) / 8) 814 815 /* Header of a coalesced tx WR, before SGL of first packet (in flits) */ 816 #define TXPKTS_WR_HDR (\ 817 sizeof(struct fw_eth_tx_pkts_wr) / 8 + \ 818 TXPKTS_PKT_HDR) 819 820 /* Header of a tx WR, before SGL of first packet (in flits) */ 821 #define TXPKT_WR_HDR ((\ 822 sizeof(struct fw_eth_tx_pkt_wr) + \ 823 sizeof(struct cpl_tx_pkt_core) \ 824 ) / 8 ) 825 826 /* Header of a tx LSO WR, before SGL of first packet (in flits) */ 827 #define TXPKT_LSO_WR_HDR ((\ 828 sizeof(struct fw_eth_tx_pkt_wr) + \ 829 sizeof(struct cpl_tx_pkt_lso) + \ 830 sizeof(struct cpl_tx_pkt_core) \ 831 ) / 8 ) 832 833 int 834 t4_eth_tx(struct ifnet *ifp, struct sge_txq *txq, struct mbuf *m) 835 { 836 struct port_info *pi = (void *)ifp->if_softc; 837 struct adapter *sc = pi->adapter; 838 struct sge_eq *eq = &txq->eq; 839 struct buf_ring *br = txq->br; 840 struct mbuf *next; 841 int rc, coalescing, can_reclaim; 842 struct txpkts txpkts; 843 struct sgl sgl; 844 845 TXQ_LOCK_ASSERT_OWNED(txq); 846 KASSERT(m, ("%s: called with nothing to do.", __func__)); 847 848 prefetch(&eq->desc[eq->pidx]); 849 prefetch(&txq->sdesc[eq->pidx]); 850 851 txpkts.npkt = 0;/* indicates there's nothing in txpkts */ 852 coalescing = 0; 853 854 if (eq->avail < 8) 855 reclaim_tx_descs(txq, 0, 8); 856 857 for (; m; m = next ? next : drbr_dequeue(ifp, br)) { 858 859 if (eq->avail < 8) 860 break; 861 862 next = m->m_nextpkt; 863 m->m_nextpkt = NULL; 864 865 if (next || buf_ring_peek(br)) 866 coalescing = 1; 867 868 rc = get_pkt_sgl(txq, &m, &sgl, coalescing); 869 if (rc != 0) { 870 if (rc == ENOMEM) { 871 872 /* Short of resources, suspend tx */ 873 874 m->m_nextpkt = next; 875 break; 876 } 877 878 /* 879 * Unrecoverable error for this packet, throw it away 880 * and move on to the next. get_pkt_sgl may already 881 * have freed m (it will be NULL in that case and the 882 * m_freem here is still safe). 883 */ 884 885 m_freem(m); 886 continue; 887 } 888 889 if (coalescing && 890 add_to_txpkts(pi, txq, &txpkts, m, &sgl) == 0) { 891 892 /* Successfully absorbed into txpkts */ 893 894 write_ulp_cpl_sgl(pi, txq, &txpkts, m, &sgl); 895 goto doorbell; 896 } 897 898 /* 899 * We weren't coalescing to begin with, or current frame could 900 * not be coalesced (add_to_txpkts flushes txpkts if a frame 901 * given to it can't be coalesced). Either way there should be 902 * nothing in txpkts. 903 */ 904 KASSERT(txpkts.npkt == 0, 905 ("%s: txpkts not empty: %d", __func__, txpkts.npkt)); 906 907 /* We're sending out individual packets now */ 908 coalescing = 0; 909 910 if (eq->avail < 8) 911 reclaim_tx_descs(txq, 0, 8); 912 rc = write_txpkt_wr(pi, txq, m, &sgl); 913 if (rc != 0) { 914 915 /* Short of hardware descriptors, suspend tx */ 916 917 /* 918 * This is an unlikely but expensive failure. We've 919 * done all the hard work (DMA mappings etc.) and now we 920 * can't send out the packet. What's worse, we have to 921 * spend even more time freeing up everything in sgl. 922 */ 923 txq->no_desc++; 924 free_pkt_sgl(txq, &sgl); 925 926 m->m_nextpkt = next; 927 break; 928 } 929 930 ETHER_BPF_MTAP(ifp, m); 931 if (sgl.nsegs == 0) 932 m_freem(m); 933 934 doorbell: 935 /* Fewer and fewer doorbells as the queue fills up */ 936 if (eq->pending >= (1 << (fls(eq->qsize - eq->avail) / 2))) 937 ring_eq_db(sc, eq); 938 939 can_reclaim = reclaimable(eq); 940 if (can_reclaim >= 32) 941 reclaim_tx_descs(txq, can_reclaim, 32); 942 } 943 944 if (txpkts.npkt > 0) 945 write_txpkts_wr(txq, &txpkts); 946 947 /* 948 * m not NULL means there was an error but we haven't thrown it away. 949 * This can happen when we're short of tx descriptors (no_desc) or maybe 950 * even DMA maps (no_dmamap). Either way, a credit flush and reclaim 951 * will get things going again. 952 * 953 * If eq->avail is already 0 we know a credit flush was requested in the 954 * WR that reduced it to 0 so we don't need another flush (we don't have 955 * any descriptor for a flush WR anyway, duh). 956 */ 957 if (m && eq->avail > 0 && !(eq->flags & EQ_CRFLUSHED)) { 958 struct tx_sdesc *txsd = &txq->sdesc[eq->pidx]; 959 960 txsd->desc_used = 1; 961 txsd->credits = 0; 962 write_eqflush_wr(eq); 963 } 964 txq->m = m; 965 966 if (eq->pending) 967 ring_eq_db(sc, eq); 968 969 can_reclaim = reclaimable(eq); 970 if (can_reclaim >= 32) 971 reclaim_tx_descs(txq, can_reclaim, 128); 972 973 return (0); 974 } 975 976 void 977 t4_update_fl_bufsize(struct ifnet *ifp) 978 { 979 struct port_info *pi = ifp->if_softc; 980 struct sge_rxq *rxq; 981 struct sge_fl *fl; 982 int i; 983 984 for_each_rxq(pi, i, rxq) { 985 fl = &rxq->fl; 986 987 FL_LOCK(fl); 988 set_fl_tag_idx(fl, ifp->if_mtu); 989 FL_UNLOCK(fl); 990 } 991 } 992 993 /* 994 * A non-NULL handler indicates this iq will not receive direct interrupts, the 995 * handler will be invoked by a forwarded interrupt queue. 996 */ 997 static inline void 998 init_iq(struct sge_iq *iq, struct adapter *sc, int tmr_idx, int pktc_idx, 999 int qsize, int esize, iq_intr_handler_t *handler, char *name) 1000 { 1001 KASSERT(tmr_idx >= 0 && tmr_idx < SGE_NTIMERS, 1002 ("%s: bad tmr_idx %d", __func__, tmr_idx)); 1003 KASSERT(pktc_idx < SGE_NCOUNTERS, /* -ve is ok, means don't use */ 1004 ("%s: bad pktc_idx %d", __func__, pktc_idx)); 1005 1006 iq->flags = 0; 1007 iq->adapter = sc; 1008 iq->intr_params = V_QINTR_TIMER_IDX(tmr_idx) | 1009 V_QINTR_CNT_EN(pktc_idx >= 0); 1010 iq->intr_pktc_idx = pktc_idx; 1011 iq->qsize = roundup(qsize, 16); /* See FW_IQ_CMD/iqsize */ 1012 iq->esize = max(esize, 16); /* See FW_IQ_CMD/iqesize */ 1013 iq->handler = handler; 1014 strlcpy(iq->lockname, name, sizeof(iq->lockname)); 1015 } 1016 1017 static inline void 1018 init_fl(struct sge_fl *fl, int qsize, char *name) 1019 { 1020 fl->qsize = qsize; 1021 strlcpy(fl->lockname, name, sizeof(fl->lockname)); 1022 } 1023 1024 static inline void 1025 init_eq(struct sge_eq *eq, int qsize, char *name) 1026 { 1027 eq->qsize = qsize; 1028 strlcpy(eq->lockname, name, sizeof(eq->lockname)); 1029 } 1030 1031 static int 1032 alloc_ring(struct adapter *sc, size_t len, bus_dma_tag_t *tag, 1033 bus_dmamap_t *map, bus_addr_t *pa, void **va) 1034 { 1035 int rc; 1036 1037 rc = bus_dma_tag_create(sc->dmat, 512, 0, BUS_SPACE_MAXADDR, 1038 BUS_SPACE_MAXADDR, NULL, NULL, len, 1, len, 0, NULL, NULL, tag); 1039 if (rc != 0) { 1040 device_printf(sc->dev, "cannot allocate DMA tag: %d\n", rc); 1041 goto done; 1042 } 1043 1044 rc = bus_dmamem_alloc(*tag, va, 1045 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, map); 1046 if (rc != 0) { 1047 device_printf(sc->dev, "cannot allocate DMA memory: %d\n", rc); 1048 goto done; 1049 } 1050 1051 rc = bus_dmamap_load(*tag, *map, *va, len, oneseg_dma_callback, pa, 0); 1052 if (rc != 0) { 1053 device_printf(sc->dev, "cannot load DMA map: %d\n", rc); 1054 goto done; 1055 } 1056 done: 1057 if (rc) 1058 free_ring(sc, *tag, *map, *pa, *va); 1059 1060 return (rc); 1061 } 1062 1063 static int 1064 free_ring(struct adapter *sc, bus_dma_tag_t tag, bus_dmamap_t map, 1065 bus_addr_t pa, void *va) 1066 { 1067 if (pa) 1068 bus_dmamap_unload(tag, map); 1069 if (va) 1070 bus_dmamem_free(tag, va, map); 1071 if (tag) 1072 bus_dma_tag_destroy(tag); 1073 1074 return (0); 1075 } 1076 1077 /* 1078 * Allocates the ring for an ingress queue and an optional freelist. If the 1079 * freelist is specified it will be allocated and then associated with the 1080 * ingress queue. 1081 * 1082 * Returns errno on failure. Resources allocated up to that point may still be 1083 * allocated. Caller is responsible for cleanup in case this function fails. 1084 * 1085 * If the ingress queue will take interrupts directly (iq->handler == NULL) then 1086 * the intr_idx specifies the vector, starting from 0. Otherwise it specifies 1087 * the index of the queue to which its interrupts will be forwarded. 1088 */ 1089 static int 1090 alloc_iq_fl(struct port_info *pi, struct sge_iq *iq, struct sge_fl *fl, 1091 int intr_idx) 1092 { 1093 int rc, i, cntxt_id; 1094 size_t len; 1095 struct fw_iq_cmd c; 1096 struct adapter *sc = iq->adapter; 1097 __be32 v = 0; 1098 1099 /* The adapter queues are nominally allocated in port[0]'s name */ 1100 if (pi == NULL) 1101 pi = sc->port[0]; 1102 1103 len = iq->qsize * iq->esize; 1104 rc = alloc_ring(sc, len, &iq->desc_tag, &iq->desc_map, &iq->ba, 1105 (void **)&iq->desc); 1106 if (rc != 0) 1107 return (rc); 1108 1109 bzero(&c, sizeof(c)); 1110 c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST | 1111 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(sc->pf) | 1112 V_FW_IQ_CMD_VFN(0)); 1113 1114 c.alloc_to_len16 = htobe32(F_FW_IQ_CMD_ALLOC | F_FW_IQ_CMD_IQSTART | 1115 FW_LEN16(c)); 1116 1117 /* Special handling for firmware event queue */ 1118 if (iq == &sc->sge.fwq) 1119 v |= F_FW_IQ_CMD_IQASYNCH; 1120 1121 if (iq->handler) { 1122 KASSERT(intr_idx < NFIQ(sc), 1123 ("%s: invalid indirect intr_idx %d", __func__, intr_idx)); 1124 v |= F_FW_IQ_CMD_IQANDST; 1125 v |= V_FW_IQ_CMD_IQANDSTINDEX(sc->sge.fiq[intr_idx].abs_id); 1126 } else { 1127 KASSERT(intr_idx < sc->intr_count, 1128 ("%s: invalid direct intr_idx %d", __func__, intr_idx)); 1129 v |= V_FW_IQ_CMD_IQANDSTINDEX(intr_idx); 1130 } 1131 1132 c.type_to_iqandstindex = htobe32(v | 1133 V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) | 1134 V_FW_IQ_CMD_VIID(pi->viid) | 1135 V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_INTERRUPT)); 1136 c.iqdroprss_to_iqesize = htobe16(V_FW_IQ_CMD_IQPCIECH(pi->tx_chan) | 1137 F_FW_IQ_CMD_IQGTSMODE | 1138 V_FW_IQ_CMD_IQINTCNTTHRESH(iq->intr_pktc_idx) | 1139 V_FW_IQ_CMD_IQESIZE(ilog2(iq->esize) - 4)); 1140 c.iqsize = htobe16(iq->qsize); 1141 c.iqaddr = htobe64(iq->ba); 1142 1143 if (fl) { 1144 mtx_init(&fl->fl_lock, fl->lockname, NULL, MTX_DEF); 1145 1146 for (i = 0; i < FL_BUF_SIZES; i++) { 1147 1148 /* 1149 * A freelist buffer must be 16 byte aligned as the SGE 1150 * uses the low 4 bits of the bus addr to figure out the 1151 * buffer size. 1152 */ 1153 rc = bus_dma_tag_create(sc->dmat, 16, 0, 1154 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 1155 FL_BUF_SIZE(i), 1, FL_BUF_SIZE(i), BUS_DMA_ALLOCNOW, 1156 NULL, NULL, &fl->tag[i]); 1157 if (rc != 0) { 1158 device_printf(sc->dev, 1159 "failed to create fl DMA tag[%d]: %d\n", 1160 i, rc); 1161 return (rc); 1162 } 1163 } 1164 len = fl->qsize * RX_FL_ESIZE; 1165 rc = alloc_ring(sc, len, &fl->desc_tag, &fl->desc_map, 1166 &fl->ba, (void **)&fl->desc); 1167 if (rc) 1168 return (rc); 1169 1170 /* Allocate space for one software descriptor per buffer. */ 1171 fl->cap = (fl->qsize - SPG_LEN / RX_FL_ESIZE) * 8; 1172 FL_LOCK(fl); 1173 set_fl_tag_idx(fl, pi->ifp->if_mtu); 1174 rc = alloc_fl_sdesc(fl); 1175 FL_UNLOCK(fl); 1176 if (rc != 0) { 1177 device_printf(sc->dev, 1178 "failed to setup fl software descriptors: %d\n", 1179 rc); 1180 return (rc); 1181 } 1182 fl->needed = fl->cap - 1; /* one less to avoid cidx = pidx */ 1183 1184 c.iqns_to_fl0congen = 1185 htobe32(V_FW_IQ_CMD_FL0HOSTFCMODE(X_HOSTFCMODE_NONE)); 1186 c.fl0dcaen_to_fl0cidxfthresh = 1187 htobe16(V_FW_IQ_CMD_FL0FBMIN(X_FETCHBURSTMIN_64B) | 1188 V_FW_IQ_CMD_FL0FBMAX(X_FETCHBURSTMAX_512B)); 1189 c.fl0size = htobe16(fl->qsize); 1190 c.fl0addr = htobe64(fl->ba); 1191 } 1192 1193 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); 1194 if (rc != 0) { 1195 device_printf(sc->dev, 1196 "failed to create ingress queue: %d\n", rc); 1197 return (rc); 1198 } 1199 1200 iq->cdesc = iq->desc; 1201 iq->cidx = 0; 1202 iq->gen = 1; 1203 iq->intr_next = iq->intr_params; 1204 iq->cntxt_id = be16toh(c.iqid); 1205 iq->abs_id = be16toh(c.physiqid); 1206 iq->flags |= (IQ_ALLOCATED | IQ_STARTED); 1207 1208 cntxt_id = iq->cntxt_id - sc->sge.iq_start; 1209 KASSERT(cntxt_id < sc->sge.niq, 1210 ("%s: iq->cntxt_id (%d) more than the max (%d)", __func__, 1211 cntxt_id, sc->sge.niq - 1)); 1212 sc->sge.iqmap[cntxt_id] = iq; 1213 1214 if (fl) { 1215 fl->cntxt_id = be16toh(c.fl0id); 1216 fl->pidx = fl->cidx = 0; 1217 1218 cntxt_id = fl->cntxt_id - sc->sge.eq_start; 1219 KASSERT(cntxt_id < sc->sge.neq, 1220 ("%s: fl->cntxt_id (%d) more than the max (%d)", __func__, 1221 cntxt_id, sc->sge.neq - 1)); 1222 sc->sge.eqmap[cntxt_id] = (void *)fl; 1223 1224 FL_LOCK(fl); 1225 refill_fl(fl, -1); 1226 if (fl->pending >= 8) 1227 ring_fl_db(sc, fl); 1228 FL_UNLOCK(fl); 1229 } 1230 1231 /* Enable IQ interrupts */ 1232 atomic_store_rel_32(&iq->state, IQS_IDLE); 1233 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), V_SEINTARM(iq->intr_params) | 1234 V_INGRESSQID(iq->cntxt_id)); 1235 1236 return (0); 1237 } 1238 1239 /* 1240 * This can be called with the iq/fl in any state - fully allocated and 1241 * functional, partially allocated, even all-zeroed out. 1242 */ 1243 static int 1244 free_iq_fl(struct port_info *pi, struct sge_iq *iq, struct sge_fl *fl) 1245 { 1246 int i, rc; 1247 struct adapter *sc = iq->adapter; 1248 device_t dev; 1249 1250 if (sc == NULL) 1251 return (0); /* nothing to do */ 1252 1253 dev = pi ? pi->dev : sc->dev; 1254 1255 if (iq->flags & IQ_STARTED) { 1256 rc = -t4_iq_start_stop(sc, sc->mbox, 0, sc->pf, 0, 1257 iq->cntxt_id, fl ? fl->cntxt_id : 0xffff, 0xffff); 1258 if (rc != 0) { 1259 device_printf(dev, 1260 "failed to stop queue %p: %d\n", iq, rc); 1261 return (rc); 1262 } 1263 iq->flags &= ~IQ_STARTED; 1264 1265 /* Synchronize with the interrupt handler */ 1266 while (!atomic_cmpset_32(&iq->state, IQS_IDLE, IQS_DISABLED)) 1267 pause("iqfree", hz / 1000); 1268 } 1269 1270 if (iq->flags & IQ_ALLOCATED) { 1271 1272 rc = -t4_iq_free(sc, sc->mbox, sc->pf, 0, 1273 FW_IQ_TYPE_FL_INT_CAP, iq->cntxt_id, 1274 fl ? fl->cntxt_id : 0xffff, 0xffff); 1275 if (rc != 0) { 1276 device_printf(dev, 1277 "failed to free queue %p: %d\n", iq, rc); 1278 return (rc); 1279 } 1280 iq->flags &= ~IQ_ALLOCATED; 1281 } 1282 1283 free_ring(sc, iq->desc_tag, iq->desc_map, iq->ba, iq->desc); 1284 1285 bzero(iq, sizeof(*iq)); 1286 1287 if (fl) { 1288 free_ring(sc, fl->desc_tag, fl->desc_map, fl->ba, 1289 fl->desc); 1290 1291 if (fl->sdesc) { 1292 FL_LOCK(fl); 1293 free_fl_sdesc(fl); 1294 FL_UNLOCK(fl); 1295 } 1296 1297 if (mtx_initialized(&fl->fl_lock)) 1298 mtx_destroy(&fl->fl_lock); 1299 1300 for (i = 0; i < FL_BUF_SIZES; i++) { 1301 if (fl->tag[i]) 1302 bus_dma_tag_destroy(fl->tag[i]); 1303 } 1304 1305 bzero(fl, sizeof(*fl)); 1306 } 1307 1308 return (0); 1309 } 1310 1311 static int 1312 alloc_iq(struct sge_iq *iq, int intr_idx) 1313 { 1314 return alloc_iq_fl(NULL, iq, NULL, intr_idx); 1315 } 1316 1317 static int 1318 free_iq(struct sge_iq *iq) 1319 { 1320 return free_iq_fl(NULL, iq, NULL); 1321 } 1322 1323 static int 1324 alloc_rxq(struct port_info *pi, struct sge_rxq *rxq, int intr_idx, int idx) 1325 { 1326 int rc; 1327 struct sysctl_oid *oid; 1328 struct sysctl_oid_list *children; 1329 char name[16]; 1330 1331 rc = alloc_iq_fl(pi, &rxq->iq, &rxq->fl, intr_idx); 1332 if (rc != 0) 1333 return (rc); 1334 1335 #ifdef INET 1336 rc = tcp_lro_init(&rxq->lro); 1337 if (rc != 0) 1338 return (rc); 1339 rxq->lro.ifp = pi->ifp; /* also indicates LRO init'ed */ 1340 1341 if (pi->ifp->if_capenable & IFCAP_LRO) 1342 rxq->flags |= RXQ_LRO_ENABLED; 1343 #endif 1344 rxq->ifp = pi->ifp; 1345 1346 children = SYSCTL_CHILDREN(pi->oid_rxq); 1347 1348 snprintf(name, sizeof(name), "%d", idx); 1349 oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, name, CTLFLAG_RD, 1350 NULL, "rx queue"); 1351 children = SYSCTL_CHILDREN(oid); 1352 1353 #ifdef INET 1354 SYSCTL_ADD_INT(&pi->ctx, children, OID_AUTO, "lro_queued", CTLFLAG_RD, 1355 &rxq->lro.lro_queued, 0, NULL); 1356 SYSCTL_ADD_INT(&pi->ctx, children, OID_AUTO, "lro_flushed", CTLFLAG_RD, 1357 &rxq->lro.lro_flushed, 0, NULL); 1358 #endif 1359 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "rxcsum", CTLFLAG_RD, 1360 &rxq->rxcsum, "# of times hardware assisted with checksum"); 1361 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "vlan_extraction", 1362 CTLFLAG_RD, &rxq->vlan_extraction, 1363 "# of times hardware extracted 802.1Q tag"); 1364 1365 return (rc); 1366 } 1367 1368 static int 1369 free_rxq(struct port_info *pi, struct sge_rxq *rxq) 1370 { 1371 int rc; 1372 1373 #ifdef INET 1374 if (rxq->lro.ifp) { 1375 tcp_lro_free(&rxq->lro); 1376 rxq->lro.ifp = NULL; 1377 } 1378 #endif 1379 1380 rc = free_iq_fl(pi, &rxq->iq, &rxq->fl); 1381 if (rc == 0) 1382 bzero(rxq, sizeof(*rxq)); 1383 1384 return (rc); 1385 } 1386 1387 static int 1388 alloc_ctrlq(struct adapter *sc, struct sge_ctrlq *ctrlq, int idx) 1389 { 1390 int rc, cntxt_id; 1391 size_t len; 1392 struct fw_eq_ctrl_cmd c; 1393 struct sge_eq *eq = &ctrlq->eq; 1394 char name[16]; 1395 struct sysctl_oid *oid; 1396 struct sysctl_oid_list *children; 1397 1398 mtx_init(&eq->eq_lock, eq->lockname, NULL, MTX_DEF); 1399 1400 len = eq->qsize * CTRL_EQ_ESIZE; 1401 rc = alloc_ring(sc, len, &eq->desc_tag, &eq->desc_map, 1402 &eq->ba, (void **)&eq->desc); 1403 if (rc) 1404 return (rc); 1405 1406 eq->cap = eq->qsize - SPG_LEN / CTRL_EQ_ESIZE; 1407 eq->spg = (void *)&eq->desc[eq->cap]; 1408 eq->avail = eq->cap - 1; /* one less to avoid cidx = pidx */ 1409 eq->iqid = sc->sge.fwq.cntxt_id; 1410 1411 bzero(&c, sizeof(c)); 1412 1413 c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | F_FW_CMD_REQUEST | 1414 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_CTRL_CMD_PFN(sc->pf) | 1415 V_FW_EQ_CTRL_CMD_VFN(0)); 1416 c.alloc_to_len16 = htobe32(F_FW_EQ_CTRL_CMD_ALLOC | 1417 F_FW_EQ_CTRL_CMD_EQSTART | FW_LEN16(c)); 1418 c.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_CMPLIQID(eq->iqid)); /* XXX */ 1419 c.physeqid_pkd = htobe32(0); 1420 c.fetchszm_to_iqid = 1421 htobe32(V_FW_EQ_CTRL_CMD_HOSTFCMODE(X_HOSTFCMODE_STATUS_PAGE) | 1422 V_FW_EQ_CTRL_CMD_PCIECHN(idx) | 1423 V_FW_EQ_CTRL_CMD_IQID(eq->iqid)); 1424 c.dcaen_to_eqsize = 1425 htobe32(V_FW_EQ_CTRL_CMD_FBMIN(X_FETCHBURSTMIN_64B) | 1426 V_FW_EQ_CTRL_CMD_FBMAX(X_FETCHBURSTMAX_512B) | 1427 V_FW_EQ_CTRL_CMD_CIDXFTHRESH(X_CIDXFLUSHTHRESH_32) | 1428 V_FW_EQ_CTRL_CMD_EQSIZE(eq->qsize)); 1429 c.eqaddr = htobe64(eq->ba); 1430 1431 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); 1432 if (rc != 0) { 1433 device_printf(sc->dev, 1434 "failed to create control queue %d: %d\n", idx, rc); 1435 return (rc); 1436 } 1437 1438 eq->pidx = eq->cidx = 0; 1439 eq->cntxt_id = G_FW_EQ_CTRL_CMD_EQID(be32toh(c.cmpliqid_eqid)); 1440 eq->flags |= (EQ_ALLOCATED | EQ_STARTED); 1441 1442 cntxt_id = eq->cntxt_id - sc->sge.eq_start; 1443 KASSERT(cntxt_id < sc->sge.neq, 1444 ("%s: eq->cntxt_id (%d) more than the max (%d)", __func__, 1445 cntxt_id, sc->sge.neq - 1)); 1446 sc->sge.eqmap[cntxt_id] = eq; 1447 1448 children = SYSCTL_CHILDREN(sc->oid_ctrlq); 1449 1450 snprintf(name, sizeof(name), "%d", idx); 1451 oid = SYSCTL_ADD_NODE(&sc->ctx, children, OID_AUTO, name, CTLFLAG_RD, 1452 NULL, "ctrl queue"); 1453 children = SYSCTL_CHILDREN(oid); 1454 1455 SYSCTL_ADD_UQUAD(&sc->ctx, children, OID_AUTO, "total_wrs", CTLFLAG_RD, 1456 &ctrlq->total_wrs, "total # of work requests"); 1457 SYSCTL_ADD_UINT(&sc->ctx, children, OID_AUTO, "no_desc", CTLFLAG_RD, 1458 &ctrlq->no_desc, 0, 1459 "# of times ctrlq ran out of hardware descriptors"); 1460 SYSCTL_ADD_UINT(&sc->ctx, children, OID_AUTO, "too_long", CTLFLAG_RD, 1461 &ctrlq->too_long, 0, "# of oversized work requests"); 1462 1463 return (rc); 1464 } 1465 1466 static int 1467 free_ctrlq(struct adapter *sc, struct sge_ctrlq *ctrlq) 1468 { 1469 int rc; 1470 struct sge_eq *eq = &ctrlq->eq; 1471 1472 if (eq->flags & (EQ_ALLOCATED | EQ_STARTED)) { 1473 rc = -t4_eth_eq_free(sc, sc->mbox, sc->pf, 0, eq->cntxt_id); 1474 if (rc != 0) { 1475 device_printf(sc->dev, 1476 "failed to free ctrl queue %p: %d\n", eq, rc); 1477 return (rc); 1478 } 1479 eq->flags &= ~(EQ_ALLOCATED | EQ_STARTED); 1480 } 1481 1482 free_ring(sc, eq->desc_tag, eq->desc_map, eq->ba, eq->desc); 1483 1484 if (mtx_initialized(&eq->eq_lock)) 1485 mtx_destroy(&eq->eq_lock); 1486 1487 bzero(ctrlq, sizeof(*ctrlq)); 1488 return (0); 1489 } 1490 1491 static int 1492 alloc_txq(struct port_info *pi, struct sge_txq *txq, int idx) 1493 { 1494 int rc, cntxt_id; 1495 size_t len; 1496 struct adapter *sc = pi->adapter; 1497 struct fw_eq_eth_cmd c; 1498 struct sge_eq *eq = &txq->eq; 1499 char name[16]; 1500 struct sysctl_oid *oid; 1501 struct sysctl_oid_list *children; 1502 1503 txq->ifp = pi->ifp; 1504 TASK_INIT(&txq->resume_tx, 0, cxgbe_txq_start, txq); 1505 1506 mtx_init(&eq->eq_lock, eq->lockname, NULL, MTX_DEF); 1507 1508 len = eq->qsize * TX_EQ_ESIZE; 1509 rc = alloc_ring(sc, len, &eq->desc_tag, &eq->desc_map, 1510 &eq->ba, (void **)&eq->desc); 1511 if (rc) 1512 return (rc); 1513 1514 eq->cap = eq->qsize - SPG_LEN / TX_EQ_ESIZE; 1515 eq->spg = (void *)&eq->desc[eq->cap]; 1516 eq->avail = eq->cap - 1; /* one less to avoid cidx = pidx */ 1517 txq->sdesc = malloc(eq->cap * sizeof(struct tx_sdesc), M_CXGBE, 1518 M_ZERO | M_WAITOK); 1519 txq->br = buf_ring_alloc(eq->qsize, M_CXGBE, M_WAITOK, &eq->eq_lock); 1520 eq->iqid = sc->sge.rxq[pi->first_rxq].iq.cntxt_id; 1521 1522 rc = bus_dma_tag_create(sc->dmat, 1, 0, BUS_SPACE_MAXADDR, 1523 BUS_SPACE_MAXADDR, NULL, NULL, 64 * 1024, TX_SGL_SEGS, 1524 BUS_SPACE_MAXSIZE, BUS_DMA_ALLOCNOW, NULL, NULL, &txq->tx_tag); 1525 if (rc != 0) { 1526 device_printf(sc->dev, 1527 "failed to create tx DMA tag: %d\n", rc); 1528 return (rc); 1529 } 1530 1531 rc = alloc_tx_maps(txq); 1532 if (rc != 0) { 1533 device_printf(sc->dev, "failed to setup tx DMA maps: %d\n", rc); 1534 return (rc); 1535 } 1536 1537 bzero(&c, sizeof(c)); 1538 1539 c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST | 1540 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(sc->pf) | 1541 V_FW_EQ_ETH_CMD_VFN(0)); 1542 c.alloc_to_len16 = htobe32(F_FW_EQ_ETH_CMD_ALLOC | 1543 F_FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c)); 1544 c.viid_pkd = htobe32(V_FW_EQ_ETH_CMD_VIID(pi->viid)); 1545 c.fetchszm_to_iqid = 1546 htobe32(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_STATUS_PAGE) | 1547 V_FW_EQ_ETH_CMD_PCIECHN(pi->tx_chan) | 1548 V_FW_EQ_ETH_CMD_IQID(eq->iqid)); 1549 c.dcaen_to_eqsize = htobe32(V_FW_EQ_ETH_CMD_FBMIN(X_FETCHBURSTMIN_64B) | 1550 V_FW_EQ_ETH_CMD_FBMAX(X_FETCHBURSTMAX_512B) | 1551 V_FW_EQ_ETH_CMD_CIDXFTHRESH(X_CIDXFLUSHTHRESH_32) | 1552 V_FW_EQ_ETH_CMD_EQSIZE(eq->qsize)); 1553 c.eqaddr = htobe64(eq->ba); 1554 1555 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); 1556 if (rc != 0) { 1557 device_printf(pi->dev, 1558 "failed to create egress queue: %d\n", rc); 1559 return (rc); 1560 } 1561 1562 eq->pidx = eq->cidx = 0; 1563 eq->cntxt_id = G_FW_EQ_ETH_CMD_EQID(be32toh(c.eqid_pkd)); 1564 eq->flags |= (EQ_ALLOCATED | EQ_STARTED); 1565 1566 cntxt_id = eq->cntxt_id - sc->sge.eq_start; 1567 KASSERT(cntxt_id < sc->sge.neq, 1568 ("%s: eq->cntxt_id (%d) more than the max (%d)", __func__, 1569 cntxt_id, sc->sge.neq - 1)); 1570 sc->sge.eqmap[cntxt_id] = eq; 1571 1572 children = SYSCTL_CHILDREN(pi->oid_txq); 1573 1574 snprintf(name, sizeof(name), "%d", idx); 1575 oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, name, CTLFLAG_RD, 1576 NULL, "tx queue"); 1577 children = SYSCTL_CHILDREN(oid); 1578 1579 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "txcsum", CTLFLAG_RD, 1580 &txq->txcsum, "# of times hardware assisted with checksum"); 1581 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "vlan_insertion", 1582 CTLFLAG_RD, &txq->vlan_insertion, 1583 "# of times hardware inserted 802.1Q tag"); 1584 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "tso_wrs", CTLFLAG_RD, 1585 &txq->tso_wrs, "# of IPv4 TSO work requests"); 1586 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "imm_wrs", CTLFLAG_RD, 1587 &txq->imm_wrs, "# of work requests with immediate data"); 1588 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "sgl_wrs", CTLFLAG_RD, 1589 &txq->sgl_wrs, "# of work requests with direct SGL"); 1590 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "txpkt_wrs", CTLFLAG_RD, 1591 &txq->txpkt_wrs, "# of txpkt work requests (one pkt/WR)"); 1592 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "txpkts_wrs", CTLFLAG_RD, 1593 &txq->txpkts_wrs, "# of txpkts work requests (multiple pkts/WR)"); 1594 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "txpkts_pkts", CTLFLAG_RD, 1595 &txq->txpkts_pkts, "# of frames tx'd using txpkts work requests"); 1596 1597 SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "no_dmamap", CTLFLAG_RD, 1598 &txq->no_dmamap, 0, "# of times txq ran out of DMA maps"); 1599 SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "no_desc", CTLFLAG_RD, 1600 &txq->no_desc, 0, "# of times txq ran out of hardware descriptors"); 1601 SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "egr_update", CTLFLAG_RD, 1602 &txq->egr_update, 0, "egress update notifications from the SGE"); 1603 1604 return (rc); 1605 } 1606 1607 static int 1608 free_txq(struct port_info *pi, struct sge_txq *txq) 1609 { 1610 int rc; 1611 struct adapter *sc = pi->adapter; 1612 struct sge_eq *eq = &txq->eq; 1613 1614 if (eq->flags & (EQ_ALLOCATED | EQ_STARTED)) { 1615 1616 /* 1617 * Wait for the response to a credit flush if there's one 1618 * pending. Clearing the flag tells handle_sge_egr_update or 1619 * cxgbe_txq_start (depending on how far the response has made 1620 * it) that they should ignore the response and wake up free_txq 1621 * instead. 1622 * 1623 * The interface has been marked down by the time we get here 1624 * (both IFF_UP and IFF_DRV_RUNNING cleared). qflush has 1625 * emptied the tx buf_rings and we know nothing new is being 1626 * queued for tx so we don't have to worry about a new credit 1627 * flush request. 1628 */ 1629 TXQ_LOCK(txq); 1630 if (eq->flags & EQ_CRFLUSHED) { 1631 eq->flags &= ~EQ_CRFLUSHED; 1632 msleep(txq, &eq->eq_lock, 0, "crflush", 0); 1633 } 1634 TXQ_UNLOCK(txq); 1635 1636 rc = -t4_eth_eq_free(sc, sc->mbox, sc->pf, 0, eq->cntxt_id); 1637 if (rc != 0) { 1638 device_printf(pi->dev, 1639 "failed to free egress queue %p: %d\n", eq, rc); 1640 return (rc); 1641 } 1642 eq->flags &= ~(EQ_ALLOCATED | EQ_STARTED); 1643 } 1644 1645 free_ring(sc, eq->desc_tag, eq->desc_map, eq->ba, eq->desc); 1646 1647 free(txq->sdesc, M_CXGBE); 1648 1649 if (txq->maps) 1650 free_tx_maps(txq); 1651 1652 buf_ring_free(txq->br, M_CXGBE); 1653 1654 if (txq->tx_tag) 1655 bus_dma_tag_destroy(txq->tx_tag); 1656 1657 if (mtx_initialized(&eq->eq_lock)) 1658 mtx_destroy(&eq->eq_lock); 1659 1660 bzero(txq, sizeof(*txq)); 1661 return (0); 1662 } 1663 1664 static void 1665 oneseg_dma_callback(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1666 { 1667 bus_addr_t *ba = arg; 1668 1669 KASSERT(nseg == 1, 1670 ("%s meant for single segment mappings only.", __func__)); 1671 1672 *ba = error ? 0 : segs->ds_addr; 1673 } 1674 1675 static inline bool 1676 is_new_response(const struct sge_iq *iq, struct rsp_ctrl **ctrl) 1677 { 1678 *ctrl = (void *)((uintptr_t)iq->cdesc + 1679 (iq->esize - sizeof(struct rsp_ctrl))); 1680 1681 return (((*ctrl)->u.type_gen >> S_RSPD_GEN) == iq->gen); 1682 } 1683 1684 static inline void 1685 iq_next(struct sge_iq *iq) 1686 { 1687 iq->cdesc = (void *) ((uintptr_t)iq->cdesc + iq->esize); 1688 if (__predict_false(++iq->cidx == iq->qsize - 1)) { 1689 iq->cidx = 0; 1690 iq->gen ^= 1; 1691 iq->cdesc = iq->desc; 1692 } 1693 } 1694 1695 static inline void 1696 ring_fl_db(struct adapter *sc, struct sge_fl *fl) 1697 { 1698 int ndesc = fl->pending / 8; 1699 1700 /* Caller responsible for ensuring there's something useful to do */ 1701 KASSERT(ndesc > 0, ("%s called with no useful work to do.", __func__)); 1702 1703 wmb(); 1704 1705 t4_write_reg(sc, MYPF_REG(A_SGE_PF_KDOORBELL), F_DBPRIO | 1706 V_QID(fl->cntxt_id) | V_PIDX(ndesc)); 1707 1708 fl->pending &= 7; 1709 } 1710 1711 static void 1712 refill_fl(struct sge_fl *fl, int nbufs) 1713 { 1714 __be64 *d = &fl->desc[fl->pidx]; 1715 struct fl_sdesc *sd = &fl->sdesc[fl->pidx]; 1716 bus_dma_tag_t tag; 1717 bus_addr_t pa; 1718 caddr_t cl; 1719 int rc; 1720 1721 FL_LOCK_ASSERT_OWNED(fl); 1722 1723 if (nbufs < 0 || nbufs > fl->needed) 1724 nbufs = fl->needed; 1725 1726 while (nbufs--) { 1727 1728 if (sd->cl != NULL) { 1729 1730 /* 1731 * This happens when a frame small enough to fit 1732 * entirely in an mbuf was received in cl last time. 1733 * We'd held on to cl and can reuse it now. Note that 1734 * we reuse a cluster of the old size if fl->tag_idx is 1735 * no longer the same as sd->tag_idx. 1736 */ 1737 1738 KASSERT(*d == sd->ba_tag, 1739 ("%s: recyling problem at pidx %d", 1740 __func__, fl->pidx)); 1741 1742 d++; 1743 goto recycled; 1744 } 1745 1746 1747 if (fl->tag_idx != sd->tag_idx) { 1748 bus_dmamap_t map; 1749 bus_dma_tag_t newtag = fl->tag[fl->tag_idx]; 1750 bus_dma_tag_t oldtag = fl->tag[sd->tag_idx]; 1751 1752 /* 1753 * An MTU change can get us here. Discard the old map 1754 * which was created with the old tag, but only if 1755 * we're able to get a new one. 1756 */ 1757 rc = bus_dmamap_create(newtag, 0, &map); 1758 if (rc == 0) { 1759 bus_dmamap_destroy(oldtag, sd->map); 1760 sd->map = map; 1761 sd->tag_idx = fl->tag_idx; 1762 } 1763 } 1764 1765 tag = fl->tag[sd->tag_idx]; 1766 1767 cl = m_cljget(NULL, M_NOWAIT, FL_BUF_SIZE(sd->tag_idx)); 1768 if (cl == NULL) 1769 break; 1770 1771 rc = bus_dmamap_load(tag, sd->map, cl, FL_BUF_SIZE(sd->tag_idx), 1772 oneseg_dma_callback, &pa, 0); 1773 if (rc != 0 || pa == 0) { 1774 fl->dmamap_failed++; 1775 uma_zfree(FL_BUF_ZONE(sd->tag_idx), cl); 1776 break; 1777 } 1778 1779 sd->cl = cl; 1780 *d++ = htobe64(pa | sd->tag_idx); 1781 1782 #ifdef INVARIANTS 1783 sd->ba_tag = htobe64(pa | sd->tag_idx); 1784 #endif 1785 1786 recycled: 1787 /* sd->m is never recycled, should always be NULL */ 1788 KASSERT(sd->m == NULL, ("%s: stray mbuf", __func__)); 1789 1790 sd->m = m_gethdr(M_NOWAIT, MT_NOINIT); 1791 if (sd->m == NULL) 1792 break; 1793 1794 fl->pending++; 1795 fl->needed--; 1796 sd++; 1797 if (++fl->pidx == fl->cap) { 1798 fl->pidx = 0; 1799 sd = fl->sdesc; 1800 d = fl->desc; 1801 } 1802 } 1803 } 1804 1805 static int 1806 alloc_fl_sdesc(struct sge_fl *fl) 1807 { 1808 struct fl_sdesc *sd; 1809 bus_dma_tag_t tag; 1810 int i, rc; 1811 1812 FL_LOCK_ASSERT_OWNED(fl); 1813 1814 fl->sdesc = malloc(fl->cap * sizeof(struct fl_sdesc), M_CXGBE, 1815 M_ZERO | M_WAITOK); 1816 1817 tag = fl->tag[fl->tag_idx]; 1818 sd = fl->sdesc; 1819 for (i = 0; i < fl->cap; i++, sd++) { 1820 1821 sd->tag_idx = fl->tag_idx; 1822 rc = bus_dmamap_create(tag, 0, &sd->map); 1823 if (rc != 0) 1824 goto failed; 1825 } 1826 1827 return (0); 1828 failed: 1829 while (--i >= 0) { 1830 sd--; 1831 bus_dmamap_destroy(tag, sd->map); 1832 if (sd->m) { 1833 m_init(sd->m, NULL, 0, M_NOWAIT, MT_DATA, 0); 1834 m_free(sd->m); 1835 sd->m = NULL; 1836 } 1837 } 1838 KASSERT(sd == fl->sdesc, ("%s: EDOOFUS", __func__)); 1839 1840 free(fl->sdesc, M_CXGBE); 1841 fl->sdesc = NULL; 1842 1843 return (rc); 1844 } 1845 1846 static void 1847 free_fl_sdesc(struct sge_fl *fl) 1848 { 1849 struct fl_sdesc *sd; 1850 int i; 1851 1852 FL_LOCK_ASSERT_OWNED(fl); 1853 1854 sd = fl->sdesc; 1855 for (i = 0; i < fl->cap; i++, sd++) { 1856 1857 if (sd->m) { 1858 m_init(sd->m, NULL, 0, M_NOWAIT, MT_DATA, 0); 1859 m_free(sd->m); 1860 sd->m = NULL; 1861 } 1862 1863 if (sd->cl) { 1864 bus_dmamap_unload(fl->tag[sd->tag_idx], sd->map); 1865 uma_zfree(FL_BUF_ZONE(sd->tag_idx), sd->cl); 1866 sd->cl = NULL; 1867 } 1868 1869 bus_dmamap_destroy(fl->tag[sd->tag_idx], sd->map); 1870 } 1871 1872 free(fl->sdesc, M_CXGBE); 1873 fl->sdesc = NULL; 1874 } 1875 1876 static int 1877 alloc_tx_maps(struct sge_txq *txq) 1878 { 1879 struct tx_map *txm; 1880 int i, rc, count; 1881 1882 /* 1883 * We can stuff ~10 frames in an 8-descriptor txpkts WR (8 is the SGE 1884 * limit for any WR). txq->no_dmamap events shouldn't occur if maps is 1885 * sized for the worst case. 1886 */ 1887 count = txq->eq.qsize * 10 / 8; 1888 txq->map_total = txq->map_avail = count; 1889 txq->map_cidx = txq->map_pidx = 0; 1890 1891 txq->maps = malloc(count * sizeof(struct tx_map), M_CXGBE, 1892 M_ZERO | M_WAITOK); 1893 1894 txm = txq->maps; 1895 for (i = 0; i < count; i++, txm++) { 1896 rc = bus_dmamap_create(txq->tx_tag, 0, &txm->map); 1897 if (rc != 0) 1898 goto failed; 1899 } 1900 1901 return (0); 1902 failed: 1903 while (--i >= 0) { 1904 txm--; 1905 bus_dmamap_destroy(txq->tx_tag, txm->map); 1906 } 1907 KASSERT(txm == txq->maps, ("%s: EDOOFUS", __func__)); 1908 1909 free(txq->maps, M_CXGBE); 1910 txq->maps = NULL; 1911 1912 return (rc); 1913 } 1914 1915 static void 1916 free_tx_maps(struct sge_txq *txq) 1917 { 1918 struct tx_map *txm; 1919 int i; 1920 1921 txm = txq->maps; 1922 for (i = 0; i < txq->map_total; i++, txm++) { 1923 1924 if (txm->m) { 1925 bus_dmamap_unload(txq->tx_tag, txm->map); 1926 m_freem(txm->m); 1927 txm->m = NULL; 1928 } 1929 1930 bus_dmamap_destroy(txq->tx_tag, txm->map); 1931 } 1932 1933 free(txq->maps, M_CXGBE); 1934 txq->maps = NULL; 1935 } 1936 1937 /* 1938 * We'll do immediate data tx for non-TSO, but only when not coalescing. We're 1939 * willing to use upto 2 hardware descriptors which means a maximum of 96 bytes 1940 * of immediate data. 1941 */ 1942 #define IMM_LEN ( \ 1943 2 * TX_EQ_ESIZE \ 1944 - sizeof(struct fw_eth_tx_pkt_wr) \ 1945 - sizeof(struct cpl_tx_pkt_core)) 1946 1947 /* 1948 * Returns non-zero on failure, no need to cleanup anything in that case. 1949 * 1950 * Note 1: We always try to defrag the mbuf if required and return EFBIG only 1951 * if the resulting chain still won't fit in a tx descriptor. 1952 * 1953 * Note 2: We'll pullup the mbuf chain if TSO is requested and the first mbuf 1954 * does not have the TCP header in it. 1955 */ 1956 static int 1957 get_pkt_sgl(struct sge_txq *txq, struct mbuf **fp, struct sgl *sgl, 1958 int sgl_only) 1959 { 1960 struct mbuf *m = *fp; 1961 struct tx_map *txm; 1962 int rc, defragged = 0, n; 1963 1964 TXQ_LOCK_ASSERT_OWNED(txq); 1965 1966 if (m->m_pkthdr.tso_segsz) 1967 sgl_only = 1; /* Do not allow immediate data with LSO */ 1968 1969 start: sgl->nsegs = 0; 1970 1971 if (m->m_pkthdr.len <= IMM_LEN && !sgl_only) 1972 return (0); /* nsegs = 0 tells caller to use imm. tx */ 1973 1974 if (txq->map_avail == 0) { 1975 txq->no_dmamap++; 1976 return (ENOMEM); 1977 } 1978 txm = &txq->maps[txq->map_pidx]; 1979 1980 if (m->m_pkthdr.tso_segsz && m->m_len < 50) { 1981 *fp = m_pullup(m, 50); 1982 m = *fp; 1983 if (m == NULL) 1984 return (ENOBUFS); 1985 } 1986 1987 rc = bus_dmamap_load_mbuf_sg(txq->tx_tag, txm->map, m, sgl->seg, 1988 &sgl->nsegs, BUS_DMA_NOWAIT); 1989 if (rc == EFBIG && defragged == 0) { 1990 m = m_defrag(m, M_DONTWAIT); 1991 if (m == NULL) 1992 return (EFBIG); 1993 1994 defragged = 1; 1995 *fp = m; 1996 goto start; 1997 } 1998 if (rc != 0) 1999 return (rc); 2000 2001 txm->m = m; 2002 txq->map_avail--; 2003 if (++txq->map_pidx == txq->map_total) 2004 txq->map_pidx = 0; 2005 2006 KASSERT(sgl->nsegs > 0 && sgl->nsegs <= TX_SGL_SEGS, 2007 ("%s: bad DMA mapping (%d segments)", __func__, sgl->nsegs)); 2008 2009 /* 2010 * Store the # of flits required to hold this frame's SGL in nflits. An 2011 * SGL has a (ULPTX header + len0, addr0) tuple optionally followed by 2012 * multiple (len0 + len1, addr0, addr1) tuples. If addr1 is not used 2013 * then len1 must be set to 0. 2014 */ 2015 n = sgl->nsegs - 1; 2016 sgl->nflits = (3 * n) / 2 + (n & 1) + 2; 2017 2018 return (0); 2019 } 2020 2021 2022 /* 2023 * Releases all the txq resources used up in the specified sgl. 2024 */ 2025 static int 2026 free_pkt_sgl(struct sge_txq *txq, struct sgl *sgl) 2027 { 2028 struct tx_map *txm; 2029 2030 TXQ_LOCK_ASSERT_OWNED(txq); 2031 2032 if (sgl->nsegs == 0) 2033 return (0); /* didn't use any map */ 2034 2035 /* 1 pkt uses exactly 1 map, back it out */ 2036 2037 txq->map_avail++; 2038 if (txq->map_pidx > 0) 2039 txq->map_pidx--; 2040 else 2041 txq->map_pidx = txq->map_total - 1; 2042 2043 txm = &txq->maps[txq->map_pidx]; 2044 bus_dmamap_unload(txq->tx_tag, txm->map); 2045 txm->m = NULL; 2046 2047 return (0); 2048 } 2049 2050 static int 2051 write_txpkt_wr(struct port_info *pi, struct sge_txq *txq, struct mbuf *m, 2052 struct sgl *sgl) 2053 { 2054 struct sge_eq *eq = &txq->eq; 2055 struct fw_eth_tx_pkt_wr *wr; 2056 struct cpl_tx_pkt_core *cpl; 2057 uint32_t ctrl; /* used in many unrelated places */ 2058 uint64_t ctrl1; 2059 int nflits, ndesc, pktlen; 2060 struct tx_sdesc *txsd; 2061 caddr_t dst; 2062 2063 TXQ_LOCK_ASSERT_OWNED(txq); 2064 2065 pktlen = m->m_pkthdr.len; 2066 2067 /* 2068 * Do we have enough flits to send this frame out? 2069 */ 2070 ctrl = sizeof(struct cpl_tx_pkt_core); 2071 if (m->m_pkthdr.tso_segsz) { 2072 nflits = TXPKT_LSO_WR_HDR; 2073 ctrl += sizeof(struct cpl_tx_pkt_lso); 2074 } else 2075 nflits = TXPKT_WR_HDR; 2076 if (sgl->nsegs > 0) 2077 nflits += sgl->nflits; 2078 else { 2079 nflits += howmany(pktlen, 8); 2080 ctrl += pktlen; 2081 } 2082 ndesc = howmany(nflits, 8); 2083 if (ndesc > eq->avail) 2084 return (ENOMEM); 2085 2086 /* Firmware work request header */ 2087 wr = (void *)&eq->desc[eq->pidx]; 2088 wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_PKT_WR) | 2089 V_FW_WR_IMMDLEN(ctrl)); 2090 ctrl = V_FW_WR_LEN16(howmany(nflits, 2)); 2091 if (eq->avail == ndesc && !(eq->flags & EQ_CRFLUSHED)) { 2092 ctrl |= F_FW_WR_EQUEQ | F_FW_WR_EQUIQ; 2093 eq->flags |= EQ_CRFLUSHED; 2094 } 2095 2096 wr->equiq_to_len16 = htobe32(ctrl); 2097 wr->r3 = 0; 2098 2099 if (m->m_pkthdr.tso_segsz) { 2100 struct cpl_tx_pkt_lso *lso = (void *)(wr + 1); 2101 struct ether_header *eh; 2102 struct ip *ip; 2103 struct tcphdr *tcp; 2104 2105 ctrl = V_LSO_OPCODE(CPL_TX_PKT_LSO) | F_LSO_FIRST_SLICE | 2106 F_LSO_LAST_SLICE; 2107 2108 eh = mtod(m, struct ether_header *); 2109 if (eh->ether_type == htons(ETHERTYPE_VLAN)) { 2110 ctrl |= V_LSO_ETHHDR_LEN(1); 2111 ip = (void *)((struct ether_vlan_header *)eh + 1); 2112 } else 2113 ip = (void *)(eh + 1); 2114 2115 tcp = (void *)((uintptr_t)ip + ip->ip_hl * 4); 2116 ctrl |= V_LSO_IPHDR_LEN(ip->ip_hl) | 2117 V_LSO_TCPHDR_LEN(tcp->th_off); 2118 2119 lso->lso_ctrl = htobe32(ctrl); 2120 lso->ipid_ofst = htobe16(0); 2121 lso->mss = htobe16(m->m_pkthdr.tso_segsz); 2122 lso->seqno_offset = htobe32(0); 2123 lso->len = htobe32(pktlen); 2124 2125 cpl = (void *)(lso + 1); 2126 2127 txq->tso_wrs++; 2128 } else 2129 cpl = (void *)(wr + 1); 2130 2131 /* Checksum offload */ 2132 ctrl1 = 0; 2133 if (!(m->m_pkthdr.csum_flags & CSUM_IP)) 2134 ctrl1 |= F_TXPKT_IPCSUM_DIS; 2135 if (!(m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))) 2136 ctrl1 |= F_TXPKT_L4CSUM_DIS; 2137 if (m->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP)) 2138 txq->txcsum++; /* some hardware assistance provided */ 2139 2140 /* VLAN tag insertion */ 2141 if (m->m_flags & M_VLANTAG) { 2142 ctrl1 |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(m->m_pkthdr.ether_vtag); 2143 txq->vlan_insertion++; 2144 } 2145 2146 /* CPL header */ 2147 cpl->ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT) | 2148 V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(pi->adapter->pf)); 2149 cpl->pack = 0; 2150 cpl->len = htobe16(pktlen); 2151 cpl->ctrl1 = htobe64(ctrl1); 2152 2153 /* Software descriptor */ 2154 txsd = &txq->sdesc[eq->pidx]; 2155 txsd->desc_used = ndesc; 2156 2157 eq->pending += ndesc; 2158 eq->avail -= ndesc; 2159 eq->pidx += ndesc; 2160 if (eq->pidx >= eq->cap) 2161 eq->pidx -= eq->cap; 2162 2163 /* SGL */ 2164 dst = (void *)(cpl + 1); 2165 if (sgl->nsegs > 0) { 2166 txsd->credits = 1; 2167 txq->sgl_wrs++; 2168 write_sgl_to_txd(eq, sgl, &dst); 2169 } else { 2170 txsd->credits = 0; 2171 txq->imm_wrs++; 2172 for (; m; m = m->m_next) { 2173 copy_to_txd(eq, mtod(m, caddr_t), &dst, m->m_len); 2174 #ifdef INVARIANTS 2175 pktlen -= m->m_len; 2176 #endif 2177 } 2178 #ifdef INVARIANTS 2179 KASSERT(pktlen == 0, ("%s: %d bytes left.", __func__, pktlen)); 2180 #endif 2181 2182 } 2183 2184 txq->txpkt_wrs++; 2185 return (0); 2186 } 2187 2188 /* 2189 * Returns 0 to indicate that m has been accepted into a coalesced tx work 2190 * request. It has either been folded into txpkts or txpkts was flushed and m 2191 * has started a new coalesced work request (as the first frame in a fresh 2192 * txpkts). 2193 * 2194 * Returns non-zero to indicate a failure - caller is responsible for 2195 * transmitting m, if there was anything in txpkts it has been flushed. 2196 */ 2197 static int 2198 add_to_txpkts(struct port_info *pi, struct sge_txq *txq, struct txpkts *txpkts, 2199 struct mbuf *m, struct sgl *sgl) 2200 { 2201 struct sge_eq *eq = &txq->eq; 2202 int can_coalesce; 2203 struct tx_sdesc *txsd; 2204 int flits; 2205 2206 TXQ_LOCK_ASSERT_OWNED(txq); 2207 2208 if (txpkts->npkt > 0) { 2209 flits = TXPKTS_PKT_HDR + sgl->nflits; 2210 can_coalesce = m->m_pkthdr.tso_segsz == 0 && 2211 txpkts->nflits + flits <= TX_WR_FLITS && 2212 txpkts->nflits + flits <= eq->avail * 8 && 2213 txpkts->plen + m->m_pkthdr.len < 65536; 2214 2215 if (can_coalesce) { 2216 txpkts->npkt++; 2217 txpkts->nflits += flits; 2218 txpkts->plen += m->m_pkthdr.len; 2219 2220 txsd = &txq->sdesc[eq->pidx]; 2221 txsd->credits++; 2222 2223 return (0); 2224 } 2225 2226 /* 2227 * Couldn't coalesce m into txpkts. The first order of business 2228 * is to send txpkts on its way. Then we'll revisit m. 2229 */ 2230 write_txpkts_wr(txq, txpkts); 2231 } 2232 2233 /* 2234 * Check if we can start a new coalesced tx work request with m as 2235 * the first packet in it. 2236 */ 2237 2238 KASSERT(txpkts->npkt == 0, ("%s: txpkts not empty", __func__)); 2239 2240 flits = TXPKTS_WR_HDR + sgl->nflits; 2241 can_coalesce = m->m_pkthdr.tso_segsz == 0 && 2242 flits <= eq->avail * 8 && flits <= TX_WR_FLITS; 2243 2244 if (can_coalesce == 0) 2245 return (EINVAL); 2246 2247 /* 2248 * Start a fresh coalesced tx WR with m as the first frame in it. 2249 */ 2250 txpkts->npkt = 1; 2251 txpkts->nflits = flits; 2252 txpkts->flitp = &eq->desc[eq->pidx].flit[2]; 2253 txpkts->plen = m->m_pkthdr.len; 2254 2255 txsd = &txq->sdesc[eq->pidx]; 2256 txsd->credits = 1; 2257 2258 return (0); 2259 } 2260 2261 /* 2262 * Note that write_txpkts_wr can never run out of hardware descriptors (but 2263 * write_txpkt_wr can). add_to_txpkts ensures that a frame is accepted for 2264 * coalescing only if sufficient hardware descriptors are available. 2265 */ 2266 static void 2267 write_txpkts_wr(struct sge_txq *txq, struct txpkts *txpkts) 2268 { 2269 struct sge_eq *eq = &txq->eq; 2270 struct fw_eth_tx_pkts_wr *wr; 2271 struct tx_sdesc *txsd; 2272 uint32_t ctrl; 2273 int ndesc; 2274 2275 TXQ_LOCK_ASSERT_OWNED(txq); 2276 2277 ndesc = howmany(txpkts->nflits, 8); 2278 2279 wr = (void *)&eq->desc[eq->pidx]; 2280 wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_WR) | 2281 V_FW_WR_IMMDLEN(0)); /* immdlen does not matter in this WR */ 2282 ctrl = V_FW_WR_LEN16(howmany(txpkts->nflits, 2)); 2283 if (eq->avail == ndesc && !(eq->flags & EQ_CRFLUSHED)) { 2284 ctrl |= F_FW_WR_EQUEQ | F_FW_WR_EQUIQ; 2285 eq->flags |= EQ_CRFLUSHED; 2286 } 2287 wr->equiq_to_len16 = htobe32(ctrl); 2288 wr->plen = htobe16(txpkts->plen); 2289 wr->npkt = txpkts->npkt; 2290 wr->r3 = wr->r4 = 0; 2291 2292 /* Everything else already written */ 2293 2294 txsd = &txq->sdesc[eq->pidx]; 2295 txsd->desc_used = ndesc; 2296 2297 KASSERT(eq->avail >= ndesc, ("%s: out of descriptors", __func__)); 2298 2299 eq->pending += ndesc; 2300 eq->avail -= ndesc; 2301 eq->pidx += ndesc; 2302 if (eq->pidx >= eq->cap) 2303 eq->pidx -= eq->cap; 2304 2305 txq->txpkts_pkts += txpkts->npkt; 2306 txq->txpkts_wrs++; 2307 txpkts->npkt = 0; /* emptied */ 2308 } 2309 2310 static inline void 2311 write_ulp_cpl_sgl(struct port_info *pi, struct sge_txq *txq, 2312 struct txpkts *txpkts, struct mbuf *m, struct sgl *sgl) 2313 { 2314 struct ulp_txpkt *ulpmc; 2315 struct ulptx_idata *ulpsc; 2316 struct cpl_tx_pkt_core *cpl; 2317 struct sge_eq *eq = &txq->eq; 2318 uintptr_t flitp, start, end; 2319 uint64_t ctrl; 2320 caddr_t dst; 2321 2322 KASSERT(txpkts->npkt > 0, ("%s: txpkts is empty", __func__)); 2323 2324 start = (uintptr_t)eq->desc; 2325 end = (uintptr_t)eq->spg; 2326 2327 /* Checksum offload */ 2328 ctrl = 0; 2329 if (!(m->m_pkthdr.csum_flags & CSUM_IP)) 2330 ctrl |= F_TXPKT_IPCSUM_DIS; 2331 if (!(m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))) 2332 ctrl |= F_TXPKT_L4CSUM_DIS; 2333 if (m->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP)) 2334 txq->txcsum++; /* some hardware assistance provided */ 2335 2336 /* VLAN tag insertion */ 2337 if (m->m_flags & M_VLANTAG) { 2338 ctrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(m->m_pkthdr.ether_vtag); 2339 txq->vlan_insertion++; 2340 } 2341 2342 /* 2343 * The previous packet's SGL must have ended at a 16 byte boundary (this 2344 * is required by the firmware/hardware). It follows that flitp cannot 2345 * wrap around between the ULPTX master command and ULPTX subcommand (8 2346 * bytes each), and that it can not wrap around in the middle of the 2347 * cpl_tx_pkt_core either. 2348 */ 2349 flitp = (uintptr_t)txpkts->flitp; 2350 KASSERT((flitp & 0xf) == 0, 2351 ("%s: last SGL did not end at 16 byte boundary: %p", 2352 __func__, txpkts->flitp)); 2353 2354 /* ULP master command */ 2355 ulpmc = (void *)flitp; 2356 ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0) | 2357 V_ULP_TXPKT_FID(eq->iqid)); 2358 ulpmc->len = htonl(howmany(sizeof(*ulpmc) + sizeof(*ulpsc) + 2359 sizeof(*cpl) + 8 * sgl->nflits, 16)); 2360 2361 /* ULP subcommand */ 2362 ulpsc = (void *)(ulpmc + 1); 2363 ulpsc->cmd_more = htobe32(V_ULPTX_CMD((u32)ULP_TX_SC_IMM) | 2364 F_ULP_TX_SC_MORE); 2365 ulpsc->len = htobe32(sizeof(struct cpl_tx_pkt_core)); 2366 2367 flitp += sizeof(*ulpmc) + sizeof(*ulpsc); 2368 if (flitp == end) 2369 flitp = start; 2370 2371 /* CPL_TX_PKT */ 2372 cpl = (void *)flitp; 2373 cpl->ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT) | 2374 V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(pi->adapter->pf)); 2375 cpl->pack = 0; 2376 cpl->len = htobe16(m->m_pkthdr.len); 2377 cpl->ctrl1 = htobe64(ctrl); 2378 2379 flitp += sizeof(*cpl); 2380 if (flitp == end) 2381 flitp = start; 2382 2383 /* SGL for this frame */ 2384 dst = (caddr_t)flitp; 2385 txpkts->nflits += write_sgl_to_txd(eq, sgl, &dst); 2386 txpkts->flitp = (void *)dst; 2387 2388 KASSERT(((uintptr_t)dst & 0xf) == 0, 2389 ("%s: SGL ends at %p (not a 16 byte boundary)", __func__, dst)); 2390 } 2391 2392 /* 2393 * If the SGL ends on an address that is not 16 byte aligned, this function will 2394 * add a 0 filled flit at the end. It returns 1 in that case. 2395 */ 2396 static int 2397 write_sgl_to_txd(struct sge_eq *eq, struct sgl *sgl, caddr_t *to) 2398 { 2399 __be64 *flitp, *end; 2400 struct ulptx_sgl *usgl; 2401 bus_dma_segment_t *seg; 2402 int i, padded; 2403 2404 KASSERT(sgl->nsegs > 0 && sgl->nflits > 0, 2405 ("%s: bad SGL - nsegs=%d, nflits=%d", 2406 __func__, sgl->nsegs, sgl->nflits)); 2407 2408 KASSERT(((uintptr_t)(*to) & 0xf) == 0, 2409 ("%s: SGL must start at a 16 byte boundary: %p", __func__, *to)); 2410 2411 flitp = (__be64 *)(*to); 2412 end = flitp + sgl->nflits; 2413 seg = &sgl->seg[0]; 2414 usgl = (void *)flitp; 2415 2416 /* 2417 * We start at a 16 byte boundary somewhere inside the tx descriptor 2418 * ring, so we're at least 16 bytes away from the status page. There is 2419 * no chance of a wrap around in the middle of usgl (which is 16 bytes). 2420 */ 2421 2422 usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) | 2423 V_ULPTX_NSGE(sgl->nsegs)); 2424 usgl->len0 = htobe32(seg->ds_len); 2425 usgl->addr0 = htobe64(seg->ds_addr); 2426 seg++; 2427 2428 if ((uintptr_t)end <= (uintptr_t)eq->spg) { 2429 2430 /* Won't wrap around at all */ 2431 2432 for (i = 0; i < sgl->nsegs - 1; i++, seg++) { 2433 usgl->sge[i / 2].len[i & 1] = htobe32(seg->ds_len); 2434 usgl->sge[i / 2].addr[i & 1] = htobe64(seg->ds_addr); 2435 } 2436 if (i & 1) 2437 usgl->sge[i / 2].len[1] = htobe32(0); 2438 } else { 2439 2440 /* Will wrap somewhere in the rest of the SGL */ 2441 2442 /* 2 flits already written, write the rest flit by flit */ 2443 flitp = (void *)(usgl + 1); 2444 for (i = 0; i < sgl->nflits - 2; i++) { 2445 if ((uintptr_t)flitp == (uintptr_t)eq->spg) 2446 flitp = (void *)eq->desc; 2447 *flitp++ = get_flit(seg, sgl->nsegs - 1, i); 2448 } 2449 end = flitp; 2450 } 2451 2452 if ((uintptr_t)end & 0xf) { 2453 *(uint64_t *)end = 0; 2454 end++; 2455 padded = 1; 2456 } else 2457 padded = 0; 2458 2459 if ((uintptr_t)end == (uintptr_t)eq->spg) 2460 *to = (void *)eq->desc; 2461 else 2462 *to = (void *)end; 2463 2464 return (padded); 2465 } 2466 2467 static inline void 2468 copy_to_txd(struct sge_eq *eq, caddr_t from, caddr_t *to, int len) 2469 { 2470 if ((uintptr_t)(*to) + len <= (uintptr_t)eq->spg) { 2471 bcopy(from, *to, len); 2472 (*to) += len; 2473 } else { 2474 int portion = (uintptr_t)eq->spg - (uintptr_t)(*to); 2475 2476 bcopy(from, *to, portion); 2477 from += portion; 2478 portion = len - portion; /* remaining */ 2479 bcopy(from, (void *)eq->desc, portion); 2480 (*to) = (caddr_t)eq->desc + portion; 2481 } 2482 } 2483 2484 static inline void 2485 ring_eq_db(struct adapter *sc, struct sge_eq *eq) 2486 { 2487 wmb(); 2488 t4_write_reg(sc, MYPF_REG(A_SGE_PF_KDOORBELL), 2489 V_QID(eq->cntxt_id) | V_PIDX(eq->pending)); 2490 eq->pending = 0; 2491 } 2492 2493 static inline int 2494 reclaimable(struct sge_eq *eq) 2495 { 2496 unsigned int cidx; 2497 2498 cidx = eq->spg->cidx; /* stable snapshot */ 2499 cidx = be16_to_cpu(cidx); 2500 2501 if (cidx >= eq->cidx) 2502 return (cidx - eq->cidx); 2503 else 2504 return (cidx + eq->cap - eq->cidx); 2505 } 2506 2507 /* 2508 * There are "can_reclaim" tx descriptors ready to be reclaimed. Reclaim as 2509 * many as possible but stop when there are around "n" mbufs to free. 2510 * 2511 * The actual number reclaimed is provided as the return value. 2512 */ 2513 static int 2514 reclaim_tx_descs(struct sge_txq *txq, int can_reclaim, int n) 2515 { 2516 struct tx_sdesc *txsd; 2517 struct tx_map *txm; 2518 unsigned int reclaimed, maps; 2519 struct sge_eq *eq = &txq->eq; 2520 2521 EQ_LOCK_ASSERT_OWNED(eq); 2522 2523 if (can_reclaim == 0) 2524 can_reclaim = reclaimable(eq); 2525 2526 maps = reclaimed = 0; 2527 while (can_reclaim && maps < n) { 2528 int ndesc; 2529 2530 txsd = &txq->sdesc[eq->cidx]; 2531 ndesc = txsd->desc_used; 2532 2533 /* Firmware doesn't return "partial" credits. */ 2534 KASSERT(can_reclaim >= ndesc, 2535 ("%s: unexpected number of credits: %d, %d", 2536 __func__, can_reclaim, ndesc)); 2537 2538 maps += txsd->credits; 2539 2540 reclaimed += ndesc; 2541 can_reclaim -= ndesc; 2542 2543 eq->cidx += ndesc; 2544 if (__predict_false(eq->cidx >= eq->cap)) 2545 eq->cidx -= eq->cap; 2546 } 2547 2548 txm = &txq->maps[txq->map_cidx]; 2549 if (maps) 2550 prefetch(txm->m); 2551 2552 eq->avail += reclaimed; 2553 KASSERT(eq->avail < eq->cap, /* avail tops out at (cap - 1) */ 2554 ("%s: too many descriptors available", __func__)); 2555 2556 txq->map_avail += maps; 2557 KASSERT(txq->map_avail <= txq->map_total, 2558 ("%s: too many maps available", __func__)); 2559 2560 while (maps--) { 2561 struct tx_map *next; 2562 2563 next = txm + 1; 2564 if (__predict_false(txq->map_cidx + 1 == txq->map_total)) 2565 next = txq->maps; 2566 prefetch(next->m); 2567 2568 bus_dmamap_unload(txq->tx_tag, txm->map); 2569 m_freem(txm->m); 2570 txm->m = NULL; 2571 2572 txm = next; 2573 if (__predict_false(++txq->map_cidx == txq->map_total)) 2574 txq->map_cidx = 0; 2575 } 2576 2577 return (reclaimed); 2578 } 2579 2580 static void 2581 write_eqflush_wr(struct sge_eq *eq) 2582 { 2583 struct fw_eq_flush_wr *wr; 2584 2585 EQ_LOCK_ASSERT_OWNED(eq); 2586 KASSERT(eq->avail > 0, ("%s: no descriptors left.", __func__)); 2587 2588 wr = (void *)&eq->desc[eq->pidx]; 2589 bzero(wr, sizeof(*wr)); 2590 wr->opcode = FW_EQ_FLUSH_WR; 2591 wr->equiq_to_len16 = htobe32(V_FW_WR_LEN16(sizeof(*wr) / 16) | 2592 F_FW_WR_EQUEQ | F_FW_WR_EQUIQ); 2593 2594 eq->flags |= EQ_CRFLUSHED; 2595 eq->pending++; 2596 eq->avail--; 2597 if (++eq->pidx == eq->cap) 2598 eq->pidx = 0; 2599 } 2600 2601 static __be64 2602 get_flit(bus_dma_segment_t *sgl, int nsegs, int idx) 2603 { 2604 int i = (idx / 3) * 2; 2605 2606 switch (idx % 3) { 2607 case 0: { 2608 __be64 rc; 2609 2610 rc = htobe32(sgl[i].ds_len); 2611 if (i + 1 < nsegs) 2612 rc |= (uint64_t)htobe32(sgl[i + 1].ds_len) << 32; 2613 2614 return (rc); 2615 } 2616 case 1: 2617 return htobe64(sgl[i].ds_addr); 2618 case 2: 2619 return htobe64(sgl[i + 1].ds_addr); 2620 } 2621 2622 return (0); 2623 } 2624 2625 static void 2626 set_fl_tag_idx(struct sge_fl *fl, int mtu) 2627 { 2628 int i; 2629 2630 FL_LOCK_ASSERT_OWNED(fl); 2631 2632 for (i = 0; i < FL_BUF_SIZES - 1; i++) { 2633 if (FL_BUF_SIZE(i) >= (mtu + FL_PKTSHIFT)) 2634 break; 2635 } 2636 2637 fl->tag_idx = i; 2638 } 2639 2640 static int 2641 handle_sge_egr_update(struct adapter *sc, const struct cpl_sge_egr_update *cpl) 2642 { 2643 unsigned int qid = G_EGR_QID(ntohl(cpl->opcode_qid)); 2644 struct sge *s = &sc->sge; 2645 struct sge_txq *txq; 2646 struct port_info *pi; 2647 2648 txq = (void *)s->eqmap[qid - s->eq_start]; 2649 TXQ_LOCK(txq); 2650 if (txq->eq.flags & EQ_CRFLUSHED) { 2651 pi = txq->ifp->if_softc; 2652 taskqueue_enqueue(pi->tq, &txq->resume_tx); 2653 txq->egr_update++; 2654 } else 2655 wakeup_one(txq); /* txq is going away, wakeup free_txq */ 2656 TXQ_UNLOCK(txq); 2657 2658 return (0); 2659 } 2660 2661 /* 2662 * m0 is freed on successful transmission. 2663 */ 2664 static int 2665 ctrl_tx(struct adapter *sc, struct sge_ctrlq *ctrlq, struct mbuf *m0) 2666 { 2667 struct sge_eq *eq = &ctrlq->eq; 2668 int rc = 0, ndesc; 2669 int can_reclaim; 2670 caddr_t dst; 2671 struct mbuf *m; 2672 2673 M_ASSERTPKTHDR(m0); 2674 2675 if (m0->m_pkthdr.len > SGE_MAX_WR_LEN) { 2676 ctrlq->too_long++; 2677 return (EMSGSIZE); 2678 } 2679 ndesc = howmany(m0->m_pkthdr.len, CTRL_EQ_ESIZE); 2680 2681 EQ_LOCK(eq); 2682 2683 can_reclaim = reclaimable(eq); 2684 eq->cidx += can_reclaim; 2685 eq->avail += can_reclaim; 2686 if (__predict_false(eq->cidx >= eq->cap)) 2687 eq->cidx -= eq->cap; 2688 2689 if (eq->avail < ndesc) { 2690 rc = EAGAIN; 2691 ctrlq->no_desc++; 2692 goto failed; 2693 } 2694 2695 dst = (void *)&eq->desc[eq->pidx]; 2696 for (m = m0; m; m = m->m_next) 2697 copy_to_txd(eq, mtod(m, caddr_t), &dst, m->m_len); 2698 2699 eq->pidx += ndesc; 2700 if (__predict_false(eq->pidx >= eq->cap)) 2701 eq->pidx -= eq->cap; 2702 2703 eq->pending += ndesc; 2704 ctrlq->total_wrs++; 2705 ring_eq_db(sc, eq); 2706 failed: 2707 EQ_UNLOCK(eq); 2708 if (rc == 0) 2709 m_freem(m0); 2710 2711 return (rc); 2712 } 2713