1 /************************************************************************** 2 3 Copyright (c) 2007-2009, Chelsio Inc. 4 All rights reserved. 5 6 Redistribution and use in source and binary forms, with or without 7 modification, are permitted provided that the following conditions are met: 8 9 1. Redistributions of source code must retain the above copyright notice, 10 this list of conditions and the following disclaimer. 11 12 2. Neither the name of the Chelsio Corporation nor the names of its 13 contributors may be used to endorse or promote products derived from 14 this software without specific prior written permission. 15 16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 17 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 20 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 21 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 22 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 23 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 24 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 25 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 26 POSSIBILITY OF SUCH DAMAGE. 27 28 ***************************************************************************/ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include "opt_inet6.h" 34 #include "opt_inet.h" 35 36 #include <sys/param.h> 37 #include <sys/systm.h> 38 #include <sys/kernel.h> 39 #include <sys/module.h> 40 #include <sys/bus.h> 41 #include <sys/conf.h> 42 #include <machine/bus.h> 43 #include <machine/resource.h> 44 #include <sys/bus_dma.h> 45 #include <sys/rman.h> 46 #include <sys/queue.h> 47 #include <sys/sysctl.h> 48 #include <sys/taskqueue.h> 49 50 #include <sys/proc.h> 51 #include <sys/sbuf.h> 52 #include <sys/sched.h> 53 #include <sys/smp.h> 54 #include <sys/systm.h> 55 #include <sys/syslog.h> 56 #include <sys/socket.h> 57 #include <sys/sglist.h> 58 59 #include <net/if.h> 60 #include <net/if_var.h> 61 #include <net/bpf.h> 62 #include <net/ethernet.h> 63 #include <net/if_vlan_var.h> 64 65 #include <netinet/in_systm.h> 66 #include <netinet/in.h> 67 #include <netinet/ip.h> 68 #include <netinet/ip6.h> 69 #include <netinet/tcp.h> 70 71 #include <dev/pci/pcireg.h> 72 #include <dev/pci/pcivar.h> 73 74 #include <vm/vm.h> 75 #include <vm/pmap.h> 76 77 #include <cxgb_include.h> 78 #include <sys/mvec.h> 79 80 int txq_fills = 0; 81 int multiq_tx_enable = 1; 82 83 #ifdef TCP_OFFLOAD 84 CTASSERT(NUM_CPL_HANDLERS >= NUM_CPL_CMDS); 85 #endif 86 87 extern struct sysctl_oid_list sysctl__hw_cxgb_children; 88 int cxgb_txq_buf_ring_size = TX_ETH_Q_SIZE; 89 SYSCTL_INT(_hw_cxgb, OID_AUTO, txq_mr_size, CTLFLAG_RDTUN, &cxgb_txq_buf_ring_size, 0, 90 "size of per-queue mbuf ring"); 91 92 static int cxgb_tx_coalesce_force = 0; 93 SYSCTL_INT(_hw_cxgb, OID_AUTO, tx_coalesce_force, CTLFLAG_RWTUN, 94 &cxgb_tx_coalesce_force, 0, 95 "coalesce small packets into a single work request regardless of ring state"); 96 97 #define COALESCE_START_DEFAULT TX_ETH_Q_SIZE>>1 98 #define COALESCE_START_MAX (TX_ETH_Q_SIZE-(TX_ETH_Q_SIZE>>3)) 99 #define COALESCE_STOP_DEFAULT TX_ETH_Q_SIZE>>2 100 #define COALESCE_STOP_MIN TX_ETH_Q_SIZE>>5 101 #define TX_RECLAIM_DEFAULT TX_ETH_Q_SIZE>>5 102 #define TX_RECLAIM_MAX TX_ETH_Q_SIZE>>2 103 #define TX_RECLAIM_MIN TX_ETH_Q_SIZE>>6 104 105 106 static int cxgb_tx_coalesce_enable_start = COALESCE_START_DEFAULT; 107 SYSCTL_INT(_hw_cxgb, OID_AUTO, tx_coalesce_enable_start, CTLFLAG_RWTUN, 108 &cxgb_tx_coalesce_enable_start, 0, 109 "coalesce enable threshold"); 110 static int cxgb_tx_coalesce_enable_stop = COALESCE_STOP_DEFAULT; 111 SYSCTL_INT(_hw_cxgb, OID_AUTO, tx_coalesce_enable_stop, CTLFLAG_RWTUN, 112 &cxgb_tx_coalesce_enable_stop, 0, 113 "coalesce disable threshold"); 114 static int cxgb_tx_reclaim_threshold = TX_RECLAIM_DEFAULT; 115 SYSCTL_INT(_hw_cxgb, OID_AUTO, tx_reclaim_threshold, CTLFLAG_RWTUN, 116 &cxgb_tx_reclaim_threshold, 0, 117 "tx cleaning minimum threshold"); 118 119 /* 120 * XXX don't re-enable this until TOE stops assuming 121 * we have an m_ext 122 */ 123 static int recycle_enable = 0; 124 125 extern int cxgb_use_16k_clusters; 126 extern int nmbjumbop; 127 extern int nmbjumbo9; 128 extern int nmbjumbo16; 129 130 #define USE_GTS 0 131 132 #define SGE_RX_SM_BUF_SIZE 1536 133 #define SGE_RX_DROP_THRES 16 134 #define SGE_RX_COPY_THRES 128 135 136 /* 137 * Period of the Tx buffer reclaim timer. This timer does not need to run 138 * frequently as Tx buffers are usually reclaimed by new Tx packets. 139 */ 140 #define TX_RECLAIM_PERIOD (hz >> 1) 141 142 /* 143 * Values for sge_txq.flags 144 */ 145 enum { 146 TXQ_RUNNING = 1 << 0, /* fetch engine is running */ 147 TXQ_LAST_PKT_DB = 1 << 1, /* last packet rang the doorbell */ 148 }; 149 150 struct tx_desc { 151 uint64_t flit[TX_DESC_FLITS]; 152 } __packed; 153 154 struct rx_desc { 155 uint32_t addr_lo; 156 uint32_t len_gen; 157 uint32_t gen2; 158 uint32_t addr_hi; 159 } __packed; 160 161 struct rsp_desc { /* response queue descriptor */ 162 struct rss_header rss_hdr; 163 uint32_t flags; 164 uint32_t len_cq; 165 uint8_t imm_data[47]; 166 uint8_t intr_gen; 167 } __packed; 168 169 #define RX_SW_DESC_MAP_CREATED (1 << 0) 170 #define TX_SW_DESC_MAP_CREATED (1 << 1) 171 #define RX_SW_DESC_INUSE (1 << 3) 172 #define TX_SW_DESC_MAPPED (1 << 4) 173 174 #define RSPQ_NSOP_NEOP G_RSPD_SOP_EOP(0) 175 #define RSPQ_EOP G_RSPD_SOP_EOP(F_RSPD_EOP) 176 #define RSPQ_SOP G_RSPD_SOP_EOP(F_RSPD_SOP) 177 #define RSPQ_SOP_EOP G_RSPD_SOP_EOP(F_RSPD_SOP|F_RSPD_EOP) 178 179 struct tx_sw_desc { /* SW state per Tx descriptor */ 180 struct mbuf *m; 181 bus_dmamap_t map; 182 int flags; 183 }; 184 185 struct rx_sw_desc { /* SW state per Rx descriptor */ 186 caddr_t rxsd_cl; 187 struct mbuf *m; 188 bus_dmamap_t map; 189 int flags; 190 }; 191 192 struct txq_state { 193 unsigned int compl; 194 unsigned int gen; 195 unsigned int pidx; 196 }; 197 198 struct refill_fl_cb_arg { 199 int error; 200 bus_dma_segment_t seg; 201 int nseg; 202 }; 203 204 205 /* 206 * Maps a number of flits to the number of Tx descriptors that can hold them. 207 * The formula is 208 * 209 * desc = 1 + (flits - 2) / (WR_FLITS - 1). 210 * 211 * HW allows up to 4 descriptors to be combined into a WR. 212 */ 213 static uint8_t flit_desc_map[] = { 214 0, 215 #if SGE_NUM_GENBITS == 1 216 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 217 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 218 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 219 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4 220 #elif SGE_NUM_GENBITS == 2 221 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 222 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 223 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 224 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 225 #else 226 # error "SGE_NUM_GENBITS must be 1 or 2" 227 #endif 228 }; 229 230 #define TXQ_LOCK_ASSERT(qs) mtx_assert(&(qs)->lock, MA_OWNED) 231 #define TXQ_TRYLOCK(qs) mtx_trylock(&(qs)->lock) 232 #define TXQ_LOCK(qs) mtx_lock(&(qs)->lock) 233 #define TXQ_UNLOCK(qs) mtx_unlock(&(qs)->lock) 234 #define TXQ_RING_EMPTY(qs) drbr_empty((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr) 235 #define TXQ_RING_NEEDS_ENQUEUE(qs) \ 236 drbr_needs_enqueue((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr) 237 #define TXQ_RING_FLUSH(qs) drbr_flush((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr) 238 #define TXQ_RING_DEQUEUE_COND(qs, func, arg) \ 239 drbr_dequeue_cond((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr, func, arg) 240 #define TXQ_RING_DEQUEUE(qs) \ 241 drbr_dequeue((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr) 242 243 int cxgb_debug = 0; 244 245 static void sge_timer_cb(void *arg); 246 static void sge_timer_reclaim(void *arg, int ncount); 247 static void sge_txq_reclaim_handler(void *arg, int ncount); 248 static void cxgb_start_locked(struct sge_qset *qs); 249 250 /* 251 * XXX need to cope with bursty scheduling by looking at a wider 252 * window than we are now for determining the need for coalescing 253 * 254 */ 255 static __inline uint64_t 256 check_pkt_coalesce(struct sge_qset *qs) 257 { 258 struct adapter *sc; 259 struct sge_txq *txq; 260 uint8_t *fill; 261 262 if (__predict_false(cxgb_tx_coalesce_force)) 263 return (1); 264 txq = &qs->txq[TXQ_ETH]; 265 sc = qs->port->adapter; 266 fill = &sc->tunq_fill[qs->idx]; 267 268 if (cxgb_tx_coalesce_enable_start > COALESCE_START_MAX) 269 cxgb_tx_coalesce_enable_start = COALESCE_START_MAX; 270 if (cxgb_tx_coalesce_enable_stop < COALESCE_STOP_MIN) 271 cxgb_tx_coalesce_enable_start = COALESCE_STOP_MIN; 272 /* 273 * if the hardware transmit queue is more than 1/8 full 274 * we mark it as coalescing - we drop back from coalescing 275 * when we go below 1/32 full and there are no packets enqueued, 276 * this provides us with some degree of hysteresis 277 */ 278 if (*fill != 0 && (txq->in_use <= cxgb_tx_coalesce_enable_stop) && 279 TXQ_RING_EMPTY(qs) && (qs->coalescing == 0)) 280 *fill = 0; 281 else if (*fill == 0 && (txq->in_use >= cxgb_tx_coalesce_enable_start)) 282 *fill = 1; 283 284 return (sc->tunq_coalesce); 285 } 286 287 #ifdef __LP64__ 288 static void 289 set_wr_hdr(struct work_request_hdr *wrp, uint32_t wr_hi, uint32_t wr_lo) 290 { 291 uint64_t wr_hilo; 292 #if _BYTE_ORDER == _LITTLE_ENDIAN 293 wr_hilo = wr_hi; 294 wr_hilo |= (((uint64_t)wr_lo)<<32); 295 #else 296 wr_hilo = wr_lo; 297 wr_hilo |= (((uint64_t)wr_hi)<<32); 298 #endif 299 wrp->wrh_hilo = wr_hilo; 300 } 301 #else 302 static void 303 set_wr_hdr(struct work_request_hdr *wrp, uint32_t wr_hi, uint32_t wr_lo) 304 { 305 306 wrp->wrh_hi = wr_hi; 307 wmb(); 308 wrp->wrh_lo = wr_lo; 309 } 310 #endif 311 312 struct coalesce_info { 313 int count; 314 int nbytes; 315 }; 316 317 static int 318 coalesce_check(struct mbuf *m, void *arg) 319 { 320 struct coalesce_info *ci = arg; 321 int *count = &ci->count; 322 int *nbytes = &ci->nbytes; 323 324 if ((*nbytes == 0) || ((*nbytes + m->m_len <= 10500) && 325 (*count < 7) && (m->m_next == NULL))) { 326 *count += 1; 327 *nbytes += m->m_len; 328 return (1); 329 } 330 return (0); 331 } 332 333 static struct mbuf * 334 cxgb_dequeue(struct sge_qset *qs) 335 { 336 struct mbuf *m, *m_head, *m_tail; 337 struct coalesce_info ci; 338 339 340 if (check_pkt_coalesce(qs) == 0) 341 return TXQ_RING_DEQUEUE(qs); 342 343 m_head = m_tail = NULL; 344 ci.count = ci.nbytes = 0; 345 do { 346 m = TXQ_RING_DEQUEUE_COND(qs, coalesce_check, &ci); 347 if (m_head == NULL) { 348 m_tail = m_head = m; 349 } else if (m != NULL) { 350 m_tail->m_nextpkt = m; 351 m_tail = m; 352 } 353 } while (m != NULL); 354 if (ci.count > 7) 355 panic("trying to coalesce %d packets in to one WR", ci.count); 356 return (m_head); 357 } 358 359 /** 360 * reclaim_completed_tx - reclaims completed Tx descriptors 361 * @adapter: the adapter 362 * @q: the Tx queue to reclaim completed descriptors from 363 * 364 * Reclaims Tx descriptors that the SGE has indicated it has processed, 365 * and frees the associated buffers if possible. Called with the Tx 366 * queue's lock held. 367 */ 368 static __inline int 369 reclaim_completed_tx(struct sge_qset *qs, int reclaim_min, int queue) 370 { 371 struct sge_txq *q = &qs->txq[queue]; 372 int reclaim = desc_reclaimable(q); 373 374 if ((cxgb_tx_reclaim_threshold > TX_RECLAIM_MAX) || 375 (cxgb_tx_reclaim_threshold < TX_RECLAIM_MIN)) 376 cxgb_tx_reclaim_threshold = TX_RECLAIM_DEFAULT; 377 378 if (reclaim < reclaim_min) 379 return (0); 380 381 mtx_assert(&qs->lock, MA_OWNED); 382 if (reclaim > 0) { 383 t3_free_tx_desc(qs, reclaim, queue); 384 q->cleaned += reclaim; 385 q->in_use -= reclaim; 386 } 387 if (isset(&qs->txq_stopped, TXQ_ETH)) 388 clrbit(&qs->txq_stopped, TXQ_ETH); 389 390 return (reclaim); 391 } 392 393 /** 394 * should_restart_tx - are there enough resources to restart a Tx queue? 395 * @q: the Tx queue 396 * 397 * Checks if there are enough descriptors to restart a suspended Tx queue. 398 */ 399 static __inline int 400 should_restart_tx(const struct sge_txq *q) 401 { 402 unsigned int r = q->processed - q->cleaned; 403 404 return q->in_use - r < (q->size >> 1); 405 } 406 407 /** 408 * t3_sge_init - initialize SGE 409 * @adap: the adapter 410 * @p: the SGE parameters 411 * 412 * Performs SGE initialization needed every time after a chip reset. 413 * We do not initialize any of the queue sets here, instead the driver 414 * top-level must request those individually. We also do not enable DMA 415 * here, that should be done after the queues have been set up. 416 */ 417 void 418 t3_sge_init(adapter_t *adap, struct sge_params *p) 419 { 420 u_int ctrl, ups; 421 422 ups = 0; /* = ffs(pci_resource_len(adap->pdev, 2) >> 12); */ 423 424 ctrl = F_DROPPKT | V_PKTSHIFT(2) | F_FLMODE | F_AVOIDCQOVFL | 425 F_CQCRDTCTRL | F_CONGMODE | F_TNLFLMODE | F_FATLPERREN | 426 V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS | 427 V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING; 428 #if SGE_NUM_GENBITS == 1 429 ctrl |= F_EGRGENCTRL; 430 #endif 431 if (adap->params.rev > 0) { 432 if (!(adap->flags & (USING_MSIX | USING_MSI))) 433 ctrl |= F_ONEINTMULTQ | F_OPTONEINTMULTQ; 434 } 435 t3_write_reg(adap, A_SG_CONTROL, ctrl); 436 t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) | 437 V_LORCQDRBTHRSH(512)); 438 t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10); 439 t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) | 440 V_TIMEOUT(200 * core_ticks_per_usec(adap))); 441 t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH, 442 adap->params.rev < T3_REV_C ? 1000 : 500); 443 t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256); 444 t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000); 445 t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256); 446 t3_write_reg(adap, A_SG_OCO_BASE, V_BASE1(0xfff)); 447 t3_write_reg(adap, A_SG_DRB_PRI_THRESH, 63 * 1024); 448 } 449 450 451 /** 452 * sgl_len - calculates the size of an SGL of the given capacity 453 * @n: the number of SGL entries 454 * 455 * Calculates the number of flits needed for a scatter/gather list that 456 * can hold the given number of entries. 457 */ 458 static __inline unsigned int 459 sgl_len(unsigned int n) 460 { 461 return ((3 * n) / 2 + (n & 1)); 462 } 463 464 /** 465 * get_imm_packet - return the next ingress packet buffer from a response 466 * @resp: the response descriptor containing the packet data 467 * 468 * Return a packet containing the immediate data of the given response. 469 */ 470 static int 471 get_imm_packet(adapter_t *sc, const struct rsp_desc *resp, struct mbuf *m) 472 { 473 474 if (resp->rss_hdr.opcode == CPL_RX_DATA) { 475 const struct cpl_rx_data *cpl = (const void *)&resp->imm_data[0]; 476 m->m_len = sizeof(*cpl) + ntohs(cpl->len); 477 } else if (resp->rss_hdr.opcode == CPL_RX_PKT) { 478 const struct cpl_rx_pkt *cpl = (const void *)&resp->imm_data[0]; 479 m->m_len = sizeof(*cpl) + ntohs(cpl->len); 480 } else 481 m->m_len = IMMED_PKT_SIZE; 482 m->m_ext.ext_buf = NULL; 483 m->m_ext.ext_type = 0; 484 memcpy(mtod(m, uint8_t *), resp->imm_data, m->m_len); 485 return (0); 486 } 487 488 static __inline u_int 489 flits_to_desc(u_int n) 490 { 491 return (flit_desc_map[n]); 492 } 493 494 #define SGE_PARERR (F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \ 495 F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \ 496 V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \ 497 F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \ 498 F_HIRCQPARITYERROR) 499 #define SGE_FRAMINGERR (F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR) 500 #define SGE_FATALERR (SGE_PARERR | SGE_FRAMINGERR | F_RSPQCREDITOVERFOW | \ 501 F_RSPQDISABLED) 502 503 /** 504 * t3_sge_err_intr_handler - SGE async event interrupt handler 505 * @adapter: the adapter 506 * 507 * Interrupt handler for SGE asynchronous (non-data) events. 508 */ 509 void 510 t3_sge_err_intr_handler(adapter_t *adapter) 511 { 512 unsigned int v, status; 513 514 status = t3_read_reg(adapter, A_SG_INT_CAUSE); 515 if (status & SGE_PARERR) 516 CH_ALERT(adapter, "SGE parity error (0x%x)\n", 517 status & SGE_PARERR); 518 if (status & SGE_FRAMINGERR) 519 CH_ALERT(adapter, "SGE framing error (0x%x)\n", 520 status & SGE_FRAMINGERR); 521 if (status & F_RSPQCREDITOVERFOW) 522 CH_ALERT(adapter, "SGE response queue credit overflow\n"); 523 524 if (status & F_RSPQDISABLED) { 525 v = t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS); 526 527 CH_ALERT(adapter, 528 "packet delivered to disabled response queue (0x%x)\n", 529 (v >> S_RSPQ0DISABLED) & 0xff); 530 } 531 532 t3_write_reg(adapter, A_SG_INT_CAUSE, status); 533 if (status & SGE_FATALERR) 534 t3_fatal_err(adapter); 535 } 536 537 void 538 t3_sge_prep(adapter_t *adap, struct sge_params *p) 539 { 540 int i, nqsets, fl_q_size, jumbo_q_size, use_16k, jumbo_buf_size; 541 542 nqsets = min(SGE_QSETS / adap->params.nports, mp_ncpus); 543 nqsets *= adap->params.nports; 544 545 fl_q_size = min(nmbclusters/(3*nqsets), FL_Q_SIZE); 546 547 while (!powerof2(fl_q_size)) 548 fl_q_size--; 549 550 use_16k = cxgb_use_16k_clusters != -1 ? cxgb_use_16k_clusters : 551 is_offload(adap); 552 553 #if __FreeBSD_version >= 700111 554 if (use_16k) { 555 jumbo_q_size = min(nmbjumbo16/(3*nqsets), JUMBO_Q_SIZE); 556 jumbo_buf_size = MJUM16BYTES; 557 } else { 558 jumbo_q_size = min(nmbjumbo9/(3*nqsets), JUMBO_Q_SIZE); 559 jumbo_buf_size = MJUM9BYTES; 560 } 561 #else 562 jumbo_q_size = min(nmbjumbop/(3*nqsets), JUMBO_Q_SIZE); 563 jumbo_buf_size = MJUMPAGESIZE; 564 #endif 565 while (!powerof2(jumbo_q_size)) 566 jumbo_q_size--; 567 568 if (fl_q_size < (FL_Q_SIZE / 4) || jumbo_q_size < (JUMBO_Q_SIZE / 2)) 569 device_printf(adap->dev, 570 "Insufficient clusters and/or jumbo buffers.\n"); 571 572 p->max_pkt_size = jumbo_buf_size - sizeof(struct cpl_rx_data); 573 574 for (i = 0; i < SGE_QSETS; ++i) { 575 struct qset_params *q = p->qset + i; 576 577 if (adap->params.nports > 2) { 578 q->coalesce_usecs = 50; 579 } else { 580 #ifdef INVARIANTS 581 q->coalesce_usecs = 10; 582 #else 583 q->coalesce_usecs = 5; 584 #endif 585 } 586 q->polling = 0; 587 q->rspq_size = RSPQ_Q_SIZE; 588 q->fl_size = fl_q_size; 589 q->jumbo_size = jumbo_q_size; 590 q->jumbo_buf_size = jumbo_buf_size; 591 q->txq_size[TXQ_ETH] = TX_ETH_Q_SIZE; 592 q->txq_size[TXQ_OFLD] = is_offload(adap) ? TX_OFLD_Q_SIZE : 16; 593 q->txq_size[TXQ_CTRL] = TX_CTRL_Q_SIZE; 594 q->cong_thres = 0; 595 } 596 } 597 598 int 599 t3_sge_alloc(adapter_t *sc) 600 { 601 602 /* The parent tag. */ 603 if (bus_dma_tag_create( bus_get_dma_tag(sc->dev),/* PCI parent */ 604 1, 0, /* algnmnt, boundary */ 605 BUS_SPACE_MAXADDR, /* lowaddr */ 606 BUS_SPACE_MAXADDR, /* highaddr */ 607 NULL, NULL, /* filter, filterarg */ 608 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */ 609 BUS_SPACE_UNRESTRICTED, /* nsegments */ 610 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 611 0, /* flags */ 612 NULL, NULL, /* lock, lockarg */ 613 &sc->parent_dmat)) { 614 device_printf(sc->dev, "Cannot allocate parent DMA tag\n"); 615 return (ENOMEM); 616 } 617 618 /* 619 * DMA tag for normal sized RX frames 620 */ 621 if (bus_dma_tag_create(sc->parent_dmat, MCLBYTES, 0, BUS_SPACE_MAXADDR, 622 BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, 623 MCLBYTES, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->rx_dmat)) { 624 device_printf(sc->dev, "Cannot allocate RX DMA tag\n"); 625 return (ENOMEM); 626 } 627 628 /* 629 * DMA tag for jumbo sized RX frames. 630 */ 631 if (bus_dma_tag_create(sc->parent_dmat, MJUM16BYTES, 0, BUS_SPACE_MAXADDR, 632 BUS_SPACE_MAXADDR, NULL, NULL, MJUM16BYTES, 1, MJUM16BYTES, 633 BUS_DMA_ALLOCNOW, NULL, NULL, &sc->rx_jumbo_dmat)) { 634 device_printf(sc->dev, "Cannot allocate RX jumbo DMA tag\n"); 635 return (ENOMEM); 636 } 637 638 /* 639 * DMA tag for TX frames. 640 */ 641 if (bus_dma_tag_create(sc->parent_dmat, 1, 0, BUS_SPACE_MAXADDR, 642 BUS_SPACE_MAXADDR, NULL, NULL, TX_MAX_SIZE, TX_MAX_SEGS, 643 TX_MAX_SIZE, BUS_DMA_ALLOCNOW, 644 NULL, NULL, &sc->tx_dmat)) { 645 device_printf(sc->dev, "Cannot allocate TX DMA tag\n"); 646 return (ENOMEM); 647 } 648 649 return (0); 650 } 651 652 int 653 t3_sge_free(struct adapter * sc) 654 { 655 656 if (sc->tx_dmat != NULL) 657 bus_dma_tag_destroy(sc->tx_dmat); 658 659 if (sc->rx_jumbo_dmat != NULL) 660 bus_dma_tag_destroy(sc->rx_jumbo_dmat); 661 662 if (sc->rx_dmat != NULL) 663 bus_dma_tag_destroy(sc->rx_dmat); 664 665 if (sc->parent_dmat != NULL) 666 bus_dma_tag_destroy(sc->parent_dmat); 667 668 return (0); 669 } 670 671 void 672 t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p) 673 { 674 675 qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U); 676 qs->rspq.polling = 0 /* p->polling */; 677 } 678 679 #if !defined(__i386__) && !defined(__amd64__) 680 static void 681 refill_fl_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 682 { 683 struct refill_fl_cb_arg *cb_arg = arg; 684 685 cb_arg->error = error; 686 cb_arg->seg = segs[0]; 687 cb_arg->nseg = nseg; 688 689 } 690 #endif 691 /** 692 * refill_fl - refill an SGE free-buffer list 693 * @sc: the controller softc 694 * @q: the free-list to refill 695 * @n: the number of new buffers to allocate 696 * 697 * (Re)populate an SGE free-buffer list with up to @n new packet buffers. 698 * The caller must assure that @n does not exceed the queue's capacity. 699 */ 700 static void 701 refill_fl(adapter_t *sc, struct sge_fl *q, int n) 702 { 703 struct rx_sw_desc *sd = &q->sdesc[q->pidx]; 704 struct rx_desc *d = &q->desc[q->pidx]; 705 struct refill_fl_cb_arg cb_arg; 706 struct mbuf *m; 707 caddr_t cl; 708 int err; 709 710 cb_arg.error = 0; 711 while (n--) { 712 /* 713 * We allocate an uninitialized mbuf + cluster, mbuf is 714 * initialized after rx. 715 */ 716 if (q->zone == zone_pack) { 717 if ((m = m_getcl(M_NOWAIT, MT_NOINIT, M_PKTHDR)) == NULL) 718 break; 719 cl = m->m_ext.ext_buf; 720 } else { 721 if ((cl = m_cljget(NULL, M_NOWAIT, q->buf_size)) == NULL) 722 break; 723 if ((m = m_gethdr(M_NOWAIT, MT_NOINIT)) == NULL) { 724 uma_zfree(q->zone, cl); 725 break; 726 } 727 } 728 if ((sd->flags & RX_SW_DESC_MAP_CREATED) == 0) { 729 if ((err = bus_dmamap_create(q->entry_tag, 0, &sd->map))) { 730 log(LOG_WARNING, "bus_dmamap_create failed %d\n", err); 731 uma_zfree(q->zone, cl); 732 goto done; 733 } 734 sd->flags |= RX_SW_DESC_MAP_CREATED; 735 } 736 #if !defined(__i386__) && !defined(__amd64__) 737 err = bus_dmamap_load(q->entry_tag, sd->map, 738 cl, q->buf_size, refill_fl_cb, &cb_arg, 0); 739 740 if (err != 0 || cb_arg.error) { 741 if (q->zone == zone_pack) 742 uma_zfree(q->zone, cl); 743 m_free(m); 744 goto done; 745 } 746 #else 747 cb_arg.seg.ds_addr = pmap_kextract((vm_offset_t)cl); 748 #endif 749 sd->flags |= RX_SW_DESC_INUSE; 750 sd->rxsd_cl = cl; 751 sd->m = m; 752 d->addr_lo = htobe32(cb_arg.seg.ds_addr & 0xffffffff); 753 d->addr_hi = htobe32(((uint64_t)cb_arg.seg.ds_addr >>32) & 0xffffffff); 754 d->len_gen = htobe32(V_FLD_GEN1(q->gen)); 755 d->gen2 = htobe32(V_FLD_GEN2(q->gen)); 756 757 d++; 758 sd++; 759 760 if (++q->pidx == q->size) { 761 q->pidx = 0; 762 q->gen ^= 1; 763 sd = q->sdesc; 764 d = q->desc; 765 } 766 q->credits++; 767 q->db_pending++; 768 } 769 770 done: 771 if (q->db_pending >= 32) { 772 q->db_pending = 0; 773 t3_write_reg(sc, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id)); 774 } 775 } 776 777 778 /** 779 * free_rx_bufs - free the Rx buffers on an SGE free list 780 * @sc: the controle softc 781 * @q: the SGE free list to clean up 782 * 783 * Release the buffers on an SGE free-buffer Rx queue. HW fetching from 784 * this queue should be stopped before calling this function. 785 */ 786 static void 787 free_rx_bufs(adapter_t *sc, struct sge_fl *q) 788 { 789 u_int cidx = q->cidx; 790 791 while (q->credits--) { 792 struct rx_sw_desc *d = &q->sdesc[cidx]; 793 794 if (d->flags & RX_SW_DESC_INUSE) { 795 bus_dmamap_unload(q->entry_tag, d->map); 796 bus_dmamap_destroy(q->entry_tag, d->map); 797 if (q->zone == zone_pack) { 798 m_init(d->m, zone_pack, MCLBYTES, 799 M_NOWAIT, MT_DATA, M_EXT); 800 uma_zfree(zone_pack, d->m); 801 } else { 802 m_init(d->m, zone_mbuf, MLEN, 803 M_NOWAIT, MT_DATA, 0); 804 uma_zfree(zone_mbuf, d->m); 805 uma_zfree(q->zone, d->rxsd_cl); 806 } 807 } 808 809 d->rxsd_cl = NULL; 810 d->m = NULL; 811 if (++cidx == q->size) 812 cidx = 0; 813 } 814 } 815 816 static __inline void 817 __refill_fl(adapter_t *adap, struct sge_fl *fl) 818 { 819 refill_fl(adap, fl, min(16U, fl->size - fl->credits)); 820 } 821 822 static __inline void 823 __refill_fl_lt(adapter_t *adap, struct sge_fl *fl, int max) 824 { 825 uint32_t reclaimable = fl->size - fl->credits; 826 827 if (reclaimable > 0) 828 refill_fl(adap, fl, min(max, reclaimable)); 829 } 830 831 /** 832 * recycle_rx_buf - recycle a receive buffer 833 * @adapter: the adapter 834 * @q: the SGE free list 835 * @idx: index of buffer to recycle 836 * 837 * Recycles the specified buffer on the given free list by adding it at 838 * the next available slot on the list. 839 */ 840 static void 841 recycle_rx_buf(adapter_t *adap, struct sge_fl *q, unsigned int idx) 842 { 843 struct rx_desc *from = &q->desc[idx]; 844 struct rx_desc *to = &q->desc[q->pidx]; 845 846 q->sdesc[q->pidx] = q->sdesc[idx]; 847 to->addr_lo = from->addr_lo; // already big endian 848 to->addr_hi = from->addr_hi; // likewise 849 wmb(); /* necessary ? */ 850 to->len_gen = htobe32(V_FLD_GEN1(q->gen)); 851 to->gen2 = htobe32(V_FLD_GEN2(q->gen)); 852 q->credits++; 853 854 if (++q->pidx == q->size) { 855 q->pidx = 0; 856 q->gen ^= 1; 857 } 858 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id)); 859 } 860 861 static void 862 alloc_ring_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 863 { 864 uint32_t *addr; 865 866 addr = arg; 867 *addr = segs[0].ds_addr; 868 } 869 870 static int 871 alloc_ring(adapter_t *sc, size_t nelem, size_t elem_size, size_t sw_size, 872 bus_addr_t *phys, void *desc, void *sdesc, bus_dma_tag_t *tag, 873 bus_dmamap_t *map, bus_dma_tag_t parent_entry_tag, bus_dma_tag_t *entry_tag) 874 { 875 size_t len = nelem * elem_size; 876 void *s = NULL; 877 void *p = NULL; 878 int err; 879 880 if ((err = bus_dma_tag_create(sc->parent_dmat, PAGE_SIZE, 0, 881 BUS_SPACE_MAXADDR_32BIT, 882 BUS_SPACE_MAXADDR, NULL, NULL, len, 1, 883 len, 0, NULL, NULL, tag)) != 0) { 884 device_printf(sc->dev, "Cannot allocate descriptor tag\n"); 885 return (ENOMEM); 886 } 887 888 if ((err = bus_dmamem_alloc(*tag, (void **)&p, BUS_DMA_NOWAIT, 889 map)) != 0) { 890 device_printf(sc->dev, "Cannot allocate descriptor memory\n"); 891 return (ENOMEM); 892 } 893 894 bus_dmamap_load(*tag, *map, p, len, alloc_ring_cb, phys, 0); 895 bzero(p, len); 896 *(void **)desc = p; 897 898 if (sw_size) { 899 len = nelem * sw_size; 900 s = malloc(len, M_DEVBUF, M_WAITOK|M_ZERO); 901 *(void **)sdesc = s; 902 } 903 if (parent_entry_tag == NULL) 904 return (0); 905 906 if ((err = bus_dma_tag_create(parent_entry_tag, 1, 0, 907 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 908 NULL, NULL, TX_MAX_SIZE, TX_MAX_SEGS, 909 TX_MAX_SIZE, BUS_DMA_ALLOCNOW, 910 NULL, NULL, entry_tag)) != 0) { 911 device_printf(sc->dev, "Cannot allocate descriptor entry tag\n"); 912 return (ENOMEM); 913 } 914 return (0); 915 } 916 917 static void 918 sge_slow_intr_handler(void *arg, int ncount) 919 { 920 adapter_t *sc = arg; 921 922 t3_slow_intr_handler(sc); 923 t3_write_reg(sc, A_PL_INT_ENABLE0, sc->slow_intr_mask); 924 (void) t3_read_reg(sc, A_PL_INT_ENABLE0); 925 } 926 927 /** 928 * sge_timer_cb - perform periodic maintenance of an SGE qset 929 * @data: the SGE queue set to maintain 930 * 931 * Runs periodically from a timer to perform maintenance of an SGE queue 932 * set. It performs two tasks: 933 * 934 * a) Cleans up any completed Tx descriptors that may still be pending. 935 * Normal descriptor cleanup happens when new packets are added to a Tx 936 * queue so this timer is relatively infrequent and does any cleanup only 937 * if the Tx queue has not seen any new packets in a while. We make a 938 * best effort attempt to reclaim descriptors, in that we don't wait 939 * around if we cannot get a queue's lock (which most likely is because 940 * someone else is queueing new packets and so will also handle the clean 941 * up). Since control queues use immediate data exclusively we don't 942 * bother cleaning them up here. 943 * 944 * b) Replenishes Rx queues that have run out due to memory shortage. 945 * Normally new Rx buffers are added when existing ones are consumed but 946 * when out of memory a queue can become empty. We try to add only a few 947 * buffers here, the queue will be replenished fully as these new buffers 948 * are used up if memory shortage has subsided. 949 * 950 * c) Return coalesced response queue credits in case a response queue is 951 * starved. 952 * 953 * d) Ring doorbells for T304 tunnel queues since we have seen doorbell 954 * fifo overflows and the FW doesn't implement any recovery scheme yet. 955 */ 956 static void 957 sge_timer_cb(void *arg) 958 { 959 adapter_t *sc = arg; 960 if ((sc->flags & USING_MSIX) == 0) { 961 962 struct port_info *pi; 963 struct sge_qset *qs; 964 struct sge_txq *txq; 965 int i, j; 966 int reclaim_ofl, refill_rx; 967 968 if (sc->open_device_map == 0) 969 return; 970 971 for (i = 0; i < sc->params.nports; i++) { 972 pi = &sc->port[i]; 973 for (j = 0; j < pi->nqsets; j++) { 974 qs = &sc->sge.qs[pi->first_qset + j]; 975 txq = &qs->txq[0]; 976 reclaim_ofl = txq[TXQ_OFLD].processed - txq[TXQ_OFLD].cleaned; 977 refill_rx = ((qs->fl[0].credits < qs->fl[0].size) || 978 (qs->fl[1].credits < qs->fl[1].size)); 979 if (reclaim_ofl || refill_rx) { 980 taskqueue_enqueue(sc->tq, &pi->timer_reclaim_task); 981 break; 982 } 983 } 984 } 985 } 986 987 if (sc->params.nports > 2) { 988 int i; 989 990 for_each_port(sc, i) { 991 struct port_info *pi = &sc->port[i]; 992 993 t3_write_reg(sc, A_SG_KDOORBELL, 994 F_SELEGRCNTX | 995 (FW_TUNNEL_SGEEC_START + pi->first_qset)); 996 } 997 } 998 if (((sc->flags & USING_MSIX) == 0 || sc->params.nports > 2) && 999 sc->open_device_map != 0) 1000 callout_reset(&sc->sge_timer_ch, TX_RECLAIM_PERIOD, sge_timer_cb, sc); 1001 } 1002 1003 /* 1004 * This is meant to be a catch-all function to keep sge state private 1005 * to sge.c 1006 * 1007 */ 1008 int 1009 t3_sge_init_adapter(adapter_t *sc) 1010 { 1011 callout_init(&sc->sge_timer_ch, CALLOUT_MPSAFE); 1012 callout_reset(&sc->sge_timer_ch, TX_RECLAIM_PERIOD, sge_timer_cb, sc); 1013 TASK_INIT(&sc->slow_intr_task, 0, sge_slow_intr_handler, sc); 1014 return (0); 1015 } 1016 1017 int 1018 t3_sge_reset_adapter(adapter_t *sc) 1019 { 1020 callout_reset(&sc->sge_timer_ch, TX_RECLAIM_PERIOD, sge_timer_cb, sc); 1021 return (0); 1022 } 1023 1024 int 1025 t3_sge_init_port(struct port_info *pi) 1026 { 1027 TASK_INIT(&pi->timer_reclaim_task, 0, sge_timer_reclaim, pi); 1028 return (0); 1029 } 1030 1031 /** 1032 * refill_rspq - replenish an SGE response queue 1033 * @adapter: the adapter 1034 * @q: the response queue to replenish 1035 * @credits: how many new responses to make available 1036 * 1037 * Replenishes a response queue by making the supplied number of responses 1038 * available to HW. 1039 */ 1040 static __inline void 1041 refill_rspq(adapter_t *sc, const struct sge_rspq *q, u_int credits) 1042 { 1043 1044 /* mbufs are allocated on demand when a rspq entry is processed. */ 1045 t3_write_reg(sc, A_SG_RSPQ_CREDIT_RETURN, 1046 V_RSPQ(q->cntxt_id) | V_CREDITS(credits)); 1047 } 1048 1049 static void 1050 sge_txq_reclaim_handler(void *arg, int ncount) 1051 { 1052 struct sge_qset *qs = arg; 1053 int i; 1054 1055 for (i = 0; i < 3; i++) 1056 reclaim_completed_tx(qs, 16, i); 1057 } 1058 1059 static void 1060 sge_timer_reclaim(void *arg, int ncount) 1061 { 1062 struct port_info *pi = arg; 1063 int i, nqsets = pi->nqsets; 1064 adapter_t *sc = pi->adapter; 1065 struct sge_qset *qs; 1066 struct mtx *lock; 1067 1068 KASSERT((sc->flags & USING_MSIX) == 0, 1069 ("can't call timer reclaim for msi-x")); 1070 1071 for (i = 0; i < nqsets; i++) { 1072 qs = &sc->sge.qs[pi->first_qset + i]; 1073 1074 reclaim_completed_tx(qs, 16, TXQ_OFLD); 1075 lock = (sc->flags & USING_MSIX) ? &qs->rspq.lock : 1076 &sc->sge.qs[0].rspq.lock; 1077 1078 if (mtx_trylock(lock)) { 1079 /* XXX currently assume that we are *NOT* polling */ 1080 uint32_t status = t3_read_reg(sc, A_SG_RSPQ_FL_STATUS); 1081 1082 if (qs->fl[0].credits < qs->fl[0].size - 16) 1083 __refill_fl(sc, &qs->fl[0]); 1084 if (qs->fl[1].credits < qs->fl[1].size - 16) 1085 __refill_fl(sc, &qs->fl[1]); 1086 1087 if (status & (1 << qs->rspq.cntxt_id)) { 1088 if (qs->rspq.credits) { 1089 refill_rspq(sc, &qs->rspq, 1); 1090 qs->rspq.credits--; 1091 t3_write_reg(sc, A_SG_RSPQ_FL_STATUS, 1092 1 << qs->rspq.cntxt_id); 1093 } 1094 } 1095 mtx_unlock(lock); 1096 } 1097 } 1098 } 1099 1100 /** 1101 * init_qset_cntxt - initialize an SGE queue set context info 1102 * @qs: the queue set 1103 * @id: the queue set id 1104 * 1105 * Initializes the TIDs and context ids for the queues of a queue set. 1106 */ 1107 static void 1108 init_qset_cntxt(struct sge_qset *qs, u_int id) 1109 { 1110 1111 qs->rspq.cntxt_id = id; 1112 qs->fl[0].cntxt_id = 2 * id; 1113 qs->fl[1].cntxt_id = 2 * id + 1; 1114 qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id; 1115 qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id; 1116 qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id; 1117 qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id; 1118 qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id; 1119 1120 mbufq_init(&qs->txq[TXQ_ETH].sendq); 1121 mbufq_init(&qs->txq[TXQ_OFLD].sendq); 1122 mbufq_init(&qs->txq[TXQ_CTRL].sendq); 1123 } 1124 1125 1126 static void 1127 txq_prod(struct sge_txq *txq, unsigned int ndesc, struct txq_state *txqs) 1128 { 1129 txq->in_use += ndesc; 1130 /* 1131 * XXX we don't handle stopping of queue 1132 * presumably start handles this when we bump against the end 1133 */ 1134 txqs->gen = txq->gen; 1135 txq->unacked += ndesc; 1136 txqs->compl = (txq->unacked & 32) << (S_WR_COMPL - 5); 1137 txq->unacked &= 31; 1138 txqs->pidx = txq->pidx; 1139 txq->pidx += ndesc; 1140 #ifdef INVARIANTS 1141 if (((txqs->pidx > txq->cidx) && 1142 (txq->pidx < txqs->pidx) && 1143 (txq->pidx >= txq->cidx)) || 1144 ((txqs->pidx < txq->cidx) && 1145 (txq->pidx >= txq-> cidx)) || 1146 ((txqs->pidx < txq->cidx) && 1147 (txq->cidx < txqs->pidx))) 1148 panic("txqs->pidx=%d txq->pidx=%d txq->cidx=%d", 1149 txqs->pidx, txq->pidx, txq->cidx); 1150 #endif 1151 if (txq->pidx >= txq->size) { 1152 txq->pidx -= txq->size; 1153 txq->gen ^= 1; 1154 } 1155 1156 } 1157 1158 /** 1159 * calc_tx_descs - calculate the number of Tx descriptors for a packet 1160 * @m: the packet mbufs 1161 * @nsegs: the number of segments 1162 * 1163 * Returns the number of Tx descriptors needed for the given Ethernet 1164 * packet. Ethernet packets require addition of WR and CPL headers. 1165 */ 1166 static __inline unsigned int 1167 calc_tx_descs(const struct mbuf *m, int nsegs) 1168 { 1169 unsigned int flits; 1170 1171 if (m->m_pkthdr.len <= PIO_LEN) 1172 return 1; 1173 1174 flits = sgl_len(nsegs) + 2; 1175 if (m->m_pkthdr.csum_flags & CSUM_TSO) 1176 flits++; 1177 1178 return flits_to_desc(flits); 1179 } 1180 1181 /** 1182 * make_sgl - populate a scatter/gather list for a packet 1183 * @sgp: the SGL to populate 1184 * @segs: the packet dma segments 1185 * @nsegs: the number of segments 1186 * 1187 * Generates a scatter/gather list for the buffers that make up a packet 1188 * and returns the SGL size in 8-byte words. The caller must size the SGL 1189 * appropriately. 1190 */ 1191 static __inline void 1192 make_sgl(struct sg_ent *sgp, bus_dma_segment_t *segs, int nsegs) 1193 { 1194 int i, idx; 1195 1196 for (idx = 0, i = 0; i < nsegs; i++) { 1197 /* 1198 * firmware doesn't like empty segments 1199 */ 1200 if (segs[i].ds_len == 0) 1201 continue; 1202 if (i && idx == 0) 1203 ++sgp; 1204 1205 sgp->len[idx] = htobe32(segs[i].ds_len); 1206 sgp->addr[idx] = htobe64(segs[i].ds_addr); 1207 idx ^= 1; 1208 } 1209 1210 if (idx) { 1211 sgp->len[idx] = 0; 1212 sgp->addr[idx] = 0; 1213 } 1214 } 1215 1216 /** 1217 * check_ring_tx_db - check and potentially ring a Tx queue's doorbell 1218 * @adap: the adapter 1219 * @q: the Tx queue 1220 * 1221 * Ring the doorbell if a Tx queue is asleep. There is a natural race, 1222 * where the HW is going to sleep just after we checked, however, 1223 * then the interrupt handler will detect the outstanding TX packet 1224 * and ring the doorbell for us. 1225 * 1226 * When GTS is disabled we unconditionally ring the doorbell. 1227 */ 1228 static __inline void 1229 check_ring_tx_db(adapter_t *adap, struct sge_txq *q, int mustring) 1230 { 1231 #if USE_GTS 1232 clear_bit(TXQ_LAST_PKT_DB, &q->flags); 1233 if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) { 1234 set_bit(TXQ_LAST_PKT_DB, &q->flags); 1235 #ifdef T3_TRACE 1236 T3_TRACE1(adap->tb[q->cntxt_id & 7], "doorbell Tx, cntxt %d", 1237 q->cntxt_id); 1238 #endif 1239 t3_write_reg(adap, A_SG_KDOORBELL, 1240 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); 1241 } 1242 #else 1243 if (mustring || ++q->db_pending >= 32) { 1244 wmb(); /* write descriptors before telling HW */ 1245 t3_write_reg(adap, A_SG_KDOORBELL, 1246 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); 1247 q->db_pending = 0; 1248 } 1249 #endif 1250 } 1251 1252 static __inline void 1253 wr_gen2(struct tx_desc *d, unsigned int gen) 1254 { 1255 #if SGE_NUM_GENBITS == 2 1256 d->flit[TX_DESC_FLITS - 1] = htobe64(gen); 1257 #endif 1258 } 1259 1260 /** 1261 * write_wr_hdr_sgl - write a WR header and, optionally, SGL 1262 * @ndesc: number of Tx descriptors spanned by the SGL 1263 * @txd: first Tx descriptor to be written 1264 * @txqs: txq state (generation and producer index) 1265 * @txq: the SGE Tx queue 1266 * @sgl: the SGL 1267 * @flits: number of flits to the start of the SGL in the first descriptor 1268 * @sgl_flits: the SGL size in flits 1269 * @wr_hi: top 32 bits of WR header based on WR type (big endian) 1270 * @wr_lo: low 32 bits of WR header based on WR type (big endian) 1271 * 1272 * Write a work request header and an associated SGL. If the SGL is 1273 * small enough to fit into one Tx descriptor it has already been written 1274 * and we just need to write the WR header. Otherwise we distribute the 1275 * SGL across the number of descriptors it spans. 1276 */ 1277 static void 1278 write_wr_hdr_sgl(unsigned int ndesc, struct tx_desc *txd, struct txq_state *txqs, 1279 const struct sge_txq *txq, const struct sg_ent *sgl, unsigned int flits, 1280 unsigned int sgl_flits, unsigned int wr_hi, unsigned int wr_lo) 1281 { 1282 1283 struct work_request_hdr *wrp = (struct work_request_hdr *)txd; 1284 struct tx_sw_desc *txsd = &txq->sdesc[txqs->pidx]; 1285 1286 if (__predict_true(ndesc == 1)) { 1287 set_wr_hdr(wrp, htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) | 1288 V_WR_SGLSFLT(flits)) | wr_hi, 1289 htonl(V_WR_LEN(flits + sgl_flits) | V_WR_GEN(txqs->gen)) | 1290 wr_lo); 1291 1292 wr_gen2(txd, txqs->gen); 1293 1294 } else { 1295 unsigned int ogen = txqs->gen; 1296 const uint64_t *fp = (const uint64_t *)sgl; 1297 struct work_request_hdr *wp = wrp; 1298 1299 wrp->wrh_hi = htonl(F_WR_SOP | V_WR_DATATYPE(1) | 1300 V_WR_SGLSFLT(flits)) | wr_hi; 1301 1302 while (sgl_flits) { 1303 unsigned int avail = WR_FLITS - flits; 1304 1305 if (avail > sgl_flits) 1306 avail = sgl_flits; 1307 memcpy(&txd->flit[flits], fp, avail * sizeof(*fp)); 1308 sgl_flits -= avail; 1309 ndesc--; 1310 if (!sgl_flits) 1311 break; 1312 1313 fp += avail; 1314 txd++; 1315 txsd++; 1316 if (++txqs->pidx == txq->size) { 1317 txqs->pidx = 0; 1318 txqs->gen ^= 1; 1319 txd = txq->desc; 1320 txsd = txq->sdesc; 1321 } 1322 1323 /* 1324 * when the head of the mbuf chain 1325 * is freed all clusters will be freed 1326 * with it 1327 */ 1328 wrp = (struct work_request_hdr *)txd; 1329 wrp->wrh_hi = htonl(V_WR_DATATYPE(1) | 1330 V_WR_SGLSFLT(1)) | wr_hi; 1331 wrp->wrh_lo = htonl(V_WR_LEN(min(WR_FLITS, 1332 sgl_flits + 1)) | 1333 V_WR_GEN(txqs->gen)) | wr_lo; 1334 wr_gen2(txd, txqs->gen); 1335 flits = 1; 1336 } 1337 wrp->wrh_hi |= htonl(F_WR_EOP); 1338 wmb(); 1339 wp->wrh_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo; 1340 wr_gen2((struct tx_desc *)wp, ogen); 1341 } 1342 } 1343 1344 /* sizeof(*eh) + sizeof(*ip) + sizeof(*tcp) */ 1345 #define TCPPKTHDRSIZE (ETHER_HDR_LEN + 20 + 20) 1346 1347 #define GET_VTAG(cntrl, m) \ 1348 do { \ 1349 if ((m)->m_flags & M_VLANTAG) \ 1350 cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN((m)->m_pkthdr.ether_vtag); \ 1351 } while (0) 1352 1353 static int 1354 t3_encap(struct sge_qset *qs, struct mbuf **m) 1355 { 1356 adapter_t *sc; 1357 struct mbuf *m0; 1358 struct sge_txq *txq; 1359 struct txq_state txqs; 1360 struct port_info *pi; 1361 unsigned int ndesc, flits, cntrl, mlen; 1362 int err, nsegs, tso_info = 0; 1363 1364 struct work_request_hdr *wrp; 1365 struct tx_sw_desc *txsd; 1366 struct sg_ent *sgp, *sgl; 1367 uint32_t wr_hi, wr_lo, sgl_flits; 1368 bus_dma_segment_t segs[TX_MAX_SEGS]; 1369 1370 struct tx_desc *txd; 1371 1372 pi = qs->port; 1373 sc = pi->adapter; 1374 txq = &qs->txq[TXQ_ETH]; 1375 txd = &txq->desc[txq->pidx]; 1376 txsd = &txq->sdesc[txq->pidx]; 1377 sgl = txq->txq_sgl; 1378 1379 prefetch(txd); 1380 m0 = *m; 1381 1382 mtx_assert(&qs->lock, MA_OWNED); 1383 cntrl = V_TXPKT_INTF(pi->txpkt_intf); 1384 KASSERT(m0->m_flags & M_PKTHDR, ("not packet header\n")); 1385 1386 if (m0->m_nextpkt == NULL && m0->m_next != NULL && 1387 m0->m_pkthdr.csum_flags & (CSUM_TSO)) 1388 tso_info = V_LSO_MSS(m0->m_pkthdr.tso_segsz); 1389 1390 if (m0->m_nextpkt != NULL) { 1391 busdma_map_sg_vec(txq->entry_tag, txsd->map, m0, segs, &nsegs); 1392 ndesc = 1; 1393 mlen = 0; 1394 } else { 1395 if ((err = busdma_map_sg_collapse(txq->entry_tag, txsd->map, 1396 &m0, segs, &nsegs))) { 1397 if (cxgb_debug) 1398 printf("failed ... err=%d\n", err); 1399 return (err); 1400 } 1401 mlen = m0->m_pkthdr.len; 1402 ndesc = calc_tx_descs(m0, nsegs); 1403 } 1404 txq_prod(txq, ndesc, &txqs); 1405 1406 KASSERT(m0->m_pkthdr.len, ("empty packet nsegs=%d", nsegs)); 1407 txsd->m = m0; 1408 1409 if (m0->m_nextpkt != NULL) { 1410 struct cpl_tx_pkt_batch *cpl_batch = (struct cpl_tx_pkt_batch *)txd; 1411 int i, fidx; 1412 1413 if (nsegs > 7) 1414 panic("trying to coalesce %d packets in to one WR", nsegs); 1415 txq->txq_coalesced += nsegs; 1416 wrp = (struct work_request_hdr *)txd; 1417 flits = nsegs*2 + 1; 1418 1419 for (fidx = 1, i = 0; i < nsegs; i++, fidx += 2) { 1420 struct cpl_tx_pkt_batch_entry *cbe; 1421 uint64_t flit; 1422 uint32_t *hflit = (uint32_t *)&flit; 1423 int cflags = m0->m_pkthdr.csum_flags; 1424 1425 cntrl = V_TXPKT_INTF(pi->txpkt_intf); 1426 GET_VTAG(cntrl, m0); 1427 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT); 1428 if (__predict_false(!(cflags & CSUM_IP))) 1429 cntrl |= F_TXPKT_IPCSUM_DIS; 1430 if (__predict_false(!(cflags & (CSUM_TCP | CSUM_UDP | 1431 CSUM_UDP_IPV6 | CSUM_TCP_IPV6)))) 1432 cntrl |= F_TXPKT_L4CSUM_DIS; 1433 1434 hflit[0] = htonl(cntrl); 1435 hflit[1] = htonl(segs[i].ds_len | 0x80000000); 1436 flit |= htobe64(1 << 24); 1437 cbe = &cpl_batch->pkt_entry[i]; 1438 cbe->cntrl = hflit[0]; 1439 cbe->len = hflit[1]; 1440 cbe->addr = htobe64(segs[i].ds_addr); 1441 } 1442 1443 wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) | 1444 V_WR_SGLSFLT(flits)) | 1445 htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | txqs.compl); 1446 wr_lo = htonl(V_WR_LEN(flits) | 1447 V_WR_GEN(txqs.gen)) | htonl(V_WR_TID(txq->token)); 1448 set_wr_hdr(wrp, wr_hi, wr_lo); 1449 wmb(); 1450 ETHER_BPF_MTAP(pi->ifp, m0); 1451 wr_gen2(txd, txqs.gen); 1452 check_ring_tx_db(sc, txq, 0); 1453 return (0); 1454 } else if (tso_info) { 1455 uint16_t eth_type; 1456 struct cpl_tx_pkt_lso *hdr = (struct cpl_tx_pkt_lso *)txd; 1457 struct ether_header *eh; 1458 void *l3hdr; 1459 struct tcphdr *tcp; 1460 1461 txd->flit[2] = 0; 1462 GET_VTAG(cntrl, m0); 1463 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO); 1464 hdr->cntrl = htonl(cntrl); 1465 hdr->len = htonl(mlen | 0x80000000); 1466 1467 if (__predict_false(mlen < TCPPKTHDRSIZE)) { 1468 printf("mbuf=%p,len=%d,tso_segsz=%d,csum_flags=%b,flags=%#x", 1469 m0, mlen, m0->m_pkthdr.tso_segsz, 1470 (int)m0->m_pkthdr.csum_flags, CSUM_BITS, m0->m_flags); 1471 panic("tx tso packet too small"); 1472 } 1473 1474 /* Make sure that ether, ip, tcp headers are all in m0 */ 1475 if (__predict_false(m0->m_len < TCPPKTHDRSIZE)) { 1476 m0 = m_pullup(m0, TCPPKTHDRSIZE); 1477 if (__predict_false(m0 == NULL)) { 1478 /* XXX panic probably an overreaction */ 1479 panic("couldn't fit header into mbuf"); 1480 } 1481 } 1482 1483 eh = mtod(m0, struct ether_header *); 1484 eth_type = eh->ether_type; 1485 if (eth_type == htons(ETHERTYPE_VLAN)) { 1486 struct ether_vlan_header *evh = (void *)eh; 1487 1488 tso_info |= V_LSO_ETH_TYPE(CPL_ETH_II_VLAN); 1489 l3hdr = evh + 1; 1490 eth_type = evh->evl_proto; 1491 } else { 1492 tso_info |= V_LSO_ETH_TYPE(CPL_ETH_II); 1493 l3hdr = eh + 1; 1494 } 1495 1496 if (eth_type == htons(ETHERTYPE_IP)) { 1497 struct ip *ip = l3hdr; 1498 1499 tso_info |= V_LSO_IPHDR_WORDS(ip->ip_hl); 1500 tcp = (struct tcphdr *)(ip + 1); 1501 } else if (eth_type == htons(ETHERTYPE_IPV6)) { 1502 struct ip6_hdr *ip6 = l3hdr; 1503 1504 KASSERT(ip6->ip6_nxt == IPPROTO_TCP, 1505 ("%s: CSUM_TSO with ip6_nxt %d", 1506 __func__, ip6->ip6_nxt)); 1507 1508 tso_info |= F_LSO_IPV6; 1509 tso_info |= V_LSO_IPHDR_WORDS(sizeof(*ip6) >> 2); 1510 tcp = (struct tcphdr *)(ip6 + 1); 1511 } else 1512 panic("%s: CSUM_TSO but neither ip nor ip6", __func__); 1513 1514 tso_info |= V_LSO_TCPHDR_WORDS(tcp->th_off); 1515 hdr->lso_info = htonl(tso_info); 1516 1517 if (__predict_false(mlen <= PIO_LEN)) { 1518 /* 1519 * pkt not undersized but fits in PIO_LEN 1520 * Indicates a TSO bug at the higher levels. 1521 */ 1522 txsd->m = NULL; 1523 m_copydata(m0, 0, mlen, (caddr_t)&txd->flit[3]); 1524 flits = (mlen + 7) / 8 + 3; 1525 wr_hi = htonl(V_WR_BCNTLFLT(mlen & 7) | 1526 V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | 1527 F_WR_SOP | F_WR_EOP | txqs.compl); 1528 wr_lo = htonl(V_WR_LEN(flits) | 1529 V_WR_GEN(txqs.gen) | V_WR_TID(txq->token)); 1530 set_wr_hdr(&hdr->wr, wr_hi, wr_lo); 1531 wmb(); 1532 ETHER_BPF_MTAP(pi->ifp, m0); 1533 wr_gen2(txd, txqs.gen); 1534 check_ring_tx_db(sc, txq, 0); 1535 m_freem(m0); 1536 return (0); 1537 } 1538 flits = 3; 1539 } else { 1540 struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)txd; 1541 1542 GET_VTAG(cntrl, m0); 1543 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT); 1544 if (__predict_false(!(m0->m_pkthdr.csum_flags & CSUM_IP))) 1545 cntrl |= F_TXPKT_IPCSUM_DIS; 1546 if (__predict_false(!(m0->m_pkthdr.csum_flags & (CSUM_TCP | 1547 CSUM_UDP | CSUM_UDP_IPV6 | CSUM_TCP_IPV6)))) 1548 cntrl |= F_TXPKT_L4CSUM_DIS; 1549 cpl->cntrl = htonl(cntrl); 1550 cpl->len = htonl(mlen | 0x80000000); 1551 1552 if (mlen <= PIO_LEN) { 1553 txsd->m = NULL; 1554 m_copydata(m0, 0, mlen, (caddr_t)&txd->flit[2]); 1555 flits = (mlen + 7) / 8 + 2; 1556 1557 wr_hi = htonl(V_WR_BCNTLFLT(mlen & 7) | 1558 V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | 1559 F_WR_SOP | F_WR_EOP | txqs.compl); 1560 wr_lo = htonl(V_WR_LEN(flits) | 1561 V_WR_GEN(txqs.gen) | V_WR_TID(txq->token)); 1562 set_wr_hdr(&cpl->wr, wr_hi, wr_lo); 1563 wmb(); 1564 ETHER_BPF_MTAP(pi->ifp, m0); 1565 wr_gen2(txd, txqs.gen); 1566 check_ring_tx_db(sc, txq, 0); 1567 m_freem(m0); 1568 return (0); 1569 } 1570 flits = 2; 1571 } 1572 wrp = (struct work_request_hdr *)txd; 1573 sgp = (ndesc == 1) ? (struct sg_ent *)&txd->flit[flits] : sgl; 1574 make_sgl(sgp, segs, nsegs); 1575 1576 sgl_flits = sgl_len(nsegs); 1577 1578 ETHER_BPF_MTAP(pi->ifp, m0); 1579 1580 KASSERT(ndesc <= 4, ("ndesc too large %d", ndesc)); 1581 wr_hi = htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | txqs.compl); 1582 wr_lo = htonl(V_WR_TID(txq->token)); 1583 write_wr_hdr_sgl(ndesc, txd, &txqs, txq, sgl, flits, 1584 sgl_flits, wr_hi, wr_lo); 1585 check_ring_tx_db(sc, txq, 0); 1586 1587 return (0); 1588 } 1589 1590 void 1591 cxgb_tx_watchdog(void *arg) 1592 { 1593 struct sge_qset *qs = arg; 1594 struct sge_txq *txq = &qs->txq[TXQ_ETH]; 1595 1596 if (qs->coalescing != 0 && 1597 (txq->in_use <= cxgb_tx_coalesce_enable_stop) && 1598 TXQ_RING_EMPTY(qs)) 1599 qs->coalescing = 0; 1600 else if (qs->coalescing == 0 && 1601 (txq->in_use >= cxgb_tx_coalesce_enable_start)) 1602 qs->coalescing = 1; 1603 if (TXQ_TRYLOCK(qs)) { 1604 qs->qs_flags |= QS_FLUSHING; 1605 cxgb_start_locked(qs); 1606 qs->qs_flags &= ~QS_FLUSHING; 1607 TXQ_UNLOCK(qs); 1608 } 1609 if (qs->port->ifp->if_drv_flags & IFF_DRV_RUNNING) 1610 callout_reset_on(&txq->txq_watchdog, hz/4, cxgb_tx_watchdog, 1611 qs, txq->txq_watchdog.c_cpu); 1612 } 1613 1614 static void 1615 cxgb_tx_timeout(void *arg) 1616 { 1617 struct sge_qset *qs = arg; 1618 struct sge_txq *txq = &qs->txq[TXQ_ETH]; 1619 1620 if (qs->coalescing == 0 && (txq->in_use >= (txq->size>>3))) 1621 qs->coalescing = 1; 1622 if (TXQ_TRYLOCK(qs)) { 1623 qs->qs_flags |= QS_TIMEOUT; 1624 cxgb_start_locked(qs); 1625 qs->qs_flags &= ~QS_TIMEOUT; 1626 TXQ_UNLOCK(qs); 1627 } 1628 } 1629 1630 static void 1631 cxgb_start_locked(struct sge_qset *qs) 1632 { 1633 struct mbuf *m_head = NULL; 1634 struct sge_txq *txq = &qs->txq[TXQ_ETH]; 1635 struct port_info *pi = qs->port; 1636 struct ifnet *ifp = pi->ifp; 1637 1638 if (qs->qs_flags & (QS_FLUSHING|QS_TIMEOUT)) 1639 reclaim_completed_tx(qs, 0, TXQ_ETH); 1640 1641 if (!pi->link_config.link_ok) { 1642 TXQ_RING_FLUSH(qs); 1643 return; 1644 } 1645 TXQ_LOCK_ASSERT(qs); 1646 while (!TXQ_RING_EMPTY(qs) && (ifp->if_drv_flags & IFF_DRV_RUNNING) && 1647 pi->link_config.link_ok) { 1648 reclaim_completed_tx(qs, cxgb_tx_reclaim_threshold, TXQ_ETH); 1649 1650 if (txq->size - txq->in_use <= TX_MAX_DESC) 1651 break; 1652 1653 if ((m_head = cxgb_dequeue(qs)) == NULL) 1654 break; 1655 /* 1656 * Encapsulation can modify our pointer, and or make it 1657 * NULL on failure. In that event, we can't requeue. 1658 */ 1659 if (t3_encap(qs, &m_head) || m_head == NULL) 1660 break; 1661 1662 m_head = NULL; 1663 } 1664 1665 if (txq->db_pending) 1666 check_ring_tx_db(pi->adapter, txq, 1); 1667 1668 if (!TXQ_RING_EMPTY(qs) && callout_pending(&txq->txq_timer) == 0 && 1669 pi->link_config.link_ok) 1670 callout_reset_on(&txq->txq_timer, 1, cxgb_tx_timeout, 1671 qs, txq->txq_timer.c_cpu); 1672 if (m_head != NULL) 1673 m_freem(m_head); 1674 } 1675 1676 static int 1677 cxgb_transmit_locked(struct ifnet *ifp, struct sge_qset *qs, struct mbuf *m) 1678 { 1679 struct port_info *pi = qs->port; 1680 struct sge_txq *txq = &qs->txq[TXQ_ETH]; 1681 struct buf_ring *br = txq->txq_mr; 1682 int error, avail; 1683 1684 avail = txq->size - txq->in_use; 1685 TXQ_LOCK_ASSERT(qs); 1686 1687 /* 1688 * We can only do a direct transmit if the following are true: 1689 * - we aren't coalescing (ring < 3/4 full) 1690 * - the link is up -- checked in caller 1691 * - there are no packets enqueued already 1692 * - there is space in hardware transmit queue 1693 */ 1694 if (check_pkt_coalesce(qs) == 0 && 1695 !TXQ_RING_NEEDS_ENQUEUE(qs) && avail > TX_MAX_DESC) { 1696 if (t3_encap(qs, &m)) { 1697 if (m != NULL && 1698 (error = drbr_enqueue(ifp, br, m)) != 0) 1699 return (error); 1700 } else { 1701 if (txq->db_pending) 1702 check_ring_tx_db(pi->adapter, txq, 1); 1703 1704 /* 1705 * We've bypassed the buf ring so we need to update 1706 * the stats directly 1707 */ 1708 txq->txq_direct_packets++; 1709 txq->txq_direct_bytes += m->m_pkthdr.len; 1710 } 1711 } else if ((error = drbr_enqueue(ifp, br, m)) != 0) 1712 return (error); 1713 1714 reclaim_completed_tx(qs, cxgb_tx_reclaim_threshold, TXQ_ETH); 1715 if (!TXQ_RING_EMPTY(qs) && pi->link_config.link_ok && 1716 (!check_pkt_coalesce(qs) || (drbr_inuse(ifp, br) >= 7))) 1717 cxgb_start_locked(qs); 1718 else if (!TXQ_RING_EMPTY(qs) && !callout_pending(&txq->txq_timer)) 1719 callout_reset_on(&txq->txq_timer, 1, cxgb_tx_timeout, 1720 qs, txq->txq_timer.c_cpu); 1721 return (0); 1722 } 1723 1724 int 1725 cxgb_transmit(struct ifnet *ifp, struct mbuf *m) 1726 { 1727 struct sge_qset *qs; 1728 struct port_info *pi = ifp->if_softc; 1729 int error, qidx = pi->first_qset; 1730 1731 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 1732 ||(!pi->link_config.link_ok)) { 1733 m_freem(m); 1734 return (0); 1735 } 1736 1737 if (m->m_flags & M_FLOWID) 1738 qidx = (m->m_pkthdr.flowid % pi->nqsets) + pi->first_qset; 1739 1740 qs = &pi->adapter->sge.qs[qidx]; 1741 1742 if (TXQ_TRYLOCK(qs)) { 1743 /* XXX running */ 1744 error = cxgb_transmit_locked(ifp, qs, m); 1745 TXQ_UNLOCK(qs); 1746 } else 1747 error = drbr_enqueue(ifp, qs->txq[TXQ_ETH].txq_mr, m); 1748 return (error); 1749 } 1750 1751 void 1752 cxgb_qflush(struct ifnet *ifp) 1753 { 1754 /* 1755 * flush any enqueued mbufs in the buf_rings 1756 * and in the transmit queues 1757 * no-op for now 1758 */ 1759 return; 1760 } 1761 1762 /** 1763 * write_imm - write a packet into a Tx descriptor as immediate data 1764 * @d: the Tx descriptor to write 1765 * @m: the packet 1766 * @len: the length of packet data to write as immediate data 1767 * @gen: the generation bit value to write 1768 * 1769 * Writes a packet as immediate data into a Tx descriptor. The packet 1770 * contains a work request at its beginning. We must write the packet 1771 * carefully so the SGE doesn't read accidentally before it's written in 1772 * its entirety. 1773 */ 1774 static __inline void 1775 write_imm(struct tx_desc *d, caddr_t src, 1776 unsigned int len, unsigned int gen) 1777 { 1778 struct work_request_hdr *from = (struct work_request_hdr *)src; 1779 struct work_request_hdr *to = (struct work_request_hdr *)d; 1780 uint32_t wr_hi, wr_lo; 1781 1782 KASSERT(len <= WR_LEN && len >= sizeof(*from), 1783 ("%s: invalid len %d", __func__, len)); 1784 1785 memcpy(&to[1], &from[1], len - sizeof(*from)); 1786 wr_hi = from->wrh_hi | htonl(F_WR_SOP | F_WR_EOP | 1787 V_WR_BCNTLFLT(len & 7)); 1788 wr_lo = from->wrh_lo | htonl(V_WR_GEN(gen) | V_WR_LEN((len + 7) / 8)); 1789 set_wr_hdr(to, wr_hi, wr_lo); 1790 wmb(); 1791 wr_gen2(d, gen); 1792 } 1793 1794 /** 1795 * check_desc_avail - check descriptor availability on a send queue 1796 * @adap: the adapter 1797 * @q: the TX queue 1798 * @m: the packet needing the descriptors 1799 * @ndesc: the number of Tx descriptors needed 1800 * @qid: the Tx queue number in its queue set (TXQ_OFLD or TXQ_CTRL) 1801 * 1802 * Checks if the requested number of Tx descriptors is available on an 1803 * SGE send queue. If the queue is already suspended or not enough 1804 * descriptors are available the packet is queued for later transmission. 1805 * Must be called with the Tx queue locked. 1806 * 1807 * Returns 0 if enough descriptors are available, 1 if there aren't 1808 * enough descriptors and the packet has been queued, and 2 if the caller 1809 * needs to retry because there weren't enough descriptors at the 1810 * beginning of the call but some freed up in the mean time. 1811 */ 1812 static __inline int 1813 check_desc_avail(adapter_t *adap, struct sge_txq *q, 1814 struct mbuf *m, unsigned int ndesc, 1815 unsigned int qid) 1816 { 1817 /* 1818 * XXX We currently only use this for checking the control queue 1819 * the control queue is only used for binding qsets which happens 1820 * at init time so we are guaranteed enough descriptors 1821 */ 1822 if (__predict_false(!mbufq_empty(&q->sendq))) { 1823 addq_exit: mbufq_tail(&q->sendq, m); 1824 return 1; 1825 } 1826 if (__predict_false(q->size - q->in_use < ndesc)) { 1827 1828 struct sge_qset *qs = txq_to_qset(q, qid); 1829 1830 setbit(&qs->txq_stopped, qid); 1831 if (should_restart_tx(q) && 1832 test_and_clear_bit(qid, &qs->txq_stopped)) 1833 return 2; 1834 1835 q->stops++; 1836 goto addq_exit; 1837 } 1838 return 0; 1839 } 1840 1841 1842 /** 1843 * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs 1844 * @q: the SGE control Tx queue 1845 * 1846 * This is a variant of reclaim_completed_tx() that is used for Tx queues 1847 * that send only immediate data (presently just the control queues) and 1848 * thus do not have any mbufs 1849 */ 1850 static __inline void 1851 reclaim_completed_tx_imm(struct sge_txq *q) 1852 { 1853 unsigned int reclaim = q->processed - q->cleaned; 1854 1855 q->in_use -= reclaim; 1856 q->cleaned += reclaim; 1857 } 1858 1859 /** 1860 * ctrl_xmit - send a packet through an SGE control Tx queue 1861 * @adap: the adapter 1862 * @q: the control queue 1863 * @m: the packet 1864 * 1865 * Send a packet through an SGE control Tx queue. Packets sent through 1866 * a control queue must fit entirely as immediate data in a single Tx 1867 * descriptor and have no page fragments. 1868 */ 1869 static int 1870 ctrl_xmit(adapter_t *adap, struct sge_qset *qs, struct mbuf *m) 1871 { 1872 int ret; 1873 struct work_request_hdr *wrp = mtod(m, struct work_request_hdr *); 1874 struct sge_txq *q = &qs->txq[TXQ_CTRL]; 1875 1876 KASSERT(m->m_len <= WR_LEN, ("%s: bad tx data", __func__)); 1877 1878 wrp->wrh_hi |= htonl(F_WR_SOP | F_WR_EOP); 1879 wrp->wrh_lo = htonl(V_WR_TID(q->token)); 1880 1881 TXQ_LOCK(qs); 1882 again: reclaim_completed_tx_imm(q); 1883 1884 ret = check_desc_avail(adap, q, m, 1, TXQ_CTRL); 1885 if (__predict_false(ret)) { 1886 if (ret == 1) { 1887 TXQ_UNLOCK(qs); 1888 return (ENOSPC); 1889 } 1890 goto again; 1891 } 1892 write_imm(&q->desc[q->pidx], m->m_data, m->m_len, q->gen); 1893 1894 q->in_use++; 1895 if (++q->pidx >= q->size) { 1896 q->pidx = 0; 1897 q->gen ^= 1; 1898 } 1899 TXQ_UNLOCK(qs); 1900 wmb(); 1901 t3_write_reg(adap, A_SG_KDOORBELL, 1902 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); 1903 1904 m_free(m); 1905 return (0); 1906 } 1907 1908 1909 /** 1910 * restart_ctrlq - restart a suspended control queue 1911 * @qs: the queue set cotaining the control queue 1912 * 1913 * Resumes transmission on a suspended Tx control queue. 1914 */ 1915 static void 1916 restart_ctrlq(void *data, int npending) 1917 { 1918 struct mbuf *m; 1919 struct sge_qset *qs = (struct sge_qset *)data; 1920 struct sge_txq *q = &qs->txq[TXQ_CTRL]; 1921 adapter_t *adap = qs->port->adapter; 1922 1923 TXQ_LOCK(qs); 1924 again: reclaim_completed_tx_imm(q); 1925 1926 while (q->in_use < q->size && 1927 (m = mbufq_dequeue(&q->sendq)) != NULL) { 1928 1929 write_imm(&q->desc[q->pidx], m->m_data, m->m_len, q->gen); 1930 m_free(m); 1931 1932 if (++q->pidx >= q->size) { 1933 q->pidx = 0; 1934 q->gen ^= 1; 1935 } 1936 q->in_use++; 1937 } 1938 if (!mbufq_empty(&q->sendq)) { 1939 setbit(&qs->txq_stopped, TXQ_CTRL); 1940 1941 if (should_restart_tx(q) && 1942 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) 1943 goto again; 1944 q->stops++; 1945 } 1946 TXQ_UNLOCK(qs); 1947 t3_write_reg(adap, A_SG_KDOORBELL, 1948 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); 1949 } 1950 1951 1952 /* 1953 * Send a management message through control queue 0 1954 */ 1955 int 1956 t3_mgmt_tx(struct adapter *adap, struct mbuf *m) 1957 { 1958 return ctrl_xmit(adap, &adap->sge.qs[0], m); 1959 } 1960 1961 /** 1962 * free_qset - free the resources of an SGE queue set 1963 * @sc: the controller owning the queue set 1964 * @q: the queue set 1965 * 1966 * Release the HW and SW resources associated with an SGE queue set, such 1967 * as HW contexts, packet buffers, and descriptor rings. Traffic to the 1968 * queue set must be quiesced prior to calling this. 1969 */ 1970 static void 1971 t3_free_qset(adapter_t *sc, struct sge_qset *q) 1972 { 1973 int i; 1974 1975 reclaim_completed_tx(q, 0, TXQ_ETH); 1976 if (q->txq[TXQ_ETH].txq_mr != NULL) 1977 buf_ring_free(q->txq[TXQ_ETH].txq_mr, M_DEVBUF); 1978 if (q->txq[TXQ_ETH].txq_ifq != NULL) { 1979 ifq_delete(q->txq[TXQ_ETH].txq_ifq); 1980 free(q->txq[TXQ_ETH].txq_ifq, M_DEVBUF); 1981 } 1982 1983 for (i = 0; i < SGE_RXQ_PER_SET; ++i) { 1984 if (q->fl[i].desc) { 1985 mtx_lock_spin(&sc->sge.reg_lock); 1986 t3_sge_disable_fl(sc, q->fl[i].cntxt_id); 1987 mtx_unlock_spin(&sc->sge.reg_lock); 1988 bus_dmamap_unload(q->fl[i].desc_tag, q->fl[i].desc_map); 1989 bus_dmamem_free(q->fl[i].desc_tag, q->fl[i].desc, 1990 q->fl[i].desc_map); 1991 bus_dma_tag_destroy(q->fl[i].desc_tag); 1992 bus_dma_tag_destroy(q->fl[i].entry_tag); 1993 } 1994 if (q->fl[i].sdesc) { 1995 free_rx_bufs(sc, &q->fl[i]); 1996 free(q->fl[i].sdesc, M_DEVBUF); 1997 } 1998 } 1999 2000 mtx_unlock(&q->lock); 2001 MTX_DESTROY(&q->lock); 2002 for (i = 0; i < SGE_TXQ_PER_SET; i++) { 2003 if (q->txq[i].desc) { 2004 mtx_lock_spin(&sc->sge.reg_lock); 2005 t3_sge_enable_ecntxt(sc, q->txq[i].cntxt_id, 0); 2006 mtx_unlock_spin(&sc->sge.reg_lock); 2007 bus_dmamap_unload(q->txq[i].desc_tag, 2008 q->txq[i].desc_map); 2009 bus_dmamem_free(q->txq[i].desc_tag, q->txq[i].desc, 2010 q->txq[i].desc_map); 2011 bus_dma_tag_destroy(q->txq[i].desc_tag); 2012 bus_dma_tag_destroy(q->txq[i].entry_tag); 2013 } 2014 if (q->txq[i].sdesc) { 2015 free(q->txq[i].sdesc, M_DEVBUF); 2016 } 2017 } 2018 2019 if (q->rspq.desc) { 2020 mtx_lock_spin(&sc->sge.reg_lock); 2021 t3_sge_disable_rspcntxt(sc, q->rspq.cntxt_id); 2022 mtx_unlock_spin(&sc->sge.reg_lock); 2023 2024 bus_dmamap_unload(q->rspq.desc_tag, q->rspq.desc_map); 2025 bus_dmamem_free(q->rspq.desc_tag, q->rspq.desc, 2026 q->rspq.desc_map); 2027 bus_dma_tag_destroy(q->rspq.desc_tag); 2028 MTX_DESTROY(&q->rspq.lock); 2029 } 2030 2031 #if defined(INET6) || defined(INET) 2032 tcp_lro_free(&q->lro.ctrl); 2033 #endif 2034 2035 bzero(q, sizeof(*q)); 2036 } 2037 2038 /** 2039 * t3_free_sge_resources - free SGE resources 2040 * @sc: the adapter softc 2041 * 2042 * Frees resources used by the SGE queue sets. 2043 */ 2044 void 2045 t3_free_sge_resources(adapter_t *sc, int nqsets) 2046 { 2047 int i; 2048 2049 for (i = 0; i < nqsets; ++i) { 2050 TXQ_LOCK(&sc->sge.qs[i]); 2051 t3_free_qset(sc, &sc->sge.qs[i]); 2052 } 2053 } 2054 2055 /** 2056 * t3_sge_start - enable SGE 2057 * @sc: the controller softc 2058 * 2059 * Enables the SGE for DMAs. This is the last step in starting packet 2060 * transfers. 2061 */ 2062 void 2063 t3_sge_start(adapter_t *sc) 2064 { 2065 t3_set_reg_field(sc, A_SG_CONTROL, F_GLOBALENABLE, F_GLOBALENABLE); 2066 } 2067 2068 /** 2069 * t3_sge_stop - disable SGE operation 2070 * @sc: the adapter 2071 * 2072 * Disables the DMA engine. This can be called in emeregencies (e.g., 2073 * from error interrupts) or from normal process context. In the latter 2074 * case it also disables any pending queue restart tasklets. Note that 2075 * if it is called in interrupt context it cannot disable the restart 2076 * tasklets as it cannot wait, however the tasklets will have no effect 2077 * since the doorbells are disabled and the driver will call this again 2078 * later from process context, at which time the tasklets will be stopped 2079 * if they are still running. 2080 */ 2081 void 2082 t3_sge_stop(adapter_t *sc) 2083 { 2084 int i, nqsets; 2085 2086 t3_set_reg_field(sc, A_SG_CONTROL, F_GLOBALENABLE, 0); 2087 2088 if (sc->tq == NULL) 2089 return; 2090 2091 for (nqsets = i = 0; i < (sc)->params.nports; i++) 2092 nqsets += sc->port[i].nqsets; 2093 #ifdef notyet 2094 /* 2095 * 2096 * XXX 2097 */ 2098 for (i = 0; i < nqsets; ++i) { 2099 struct sge_qset *qs = &sc->sge.qs[i]; 2100 2101 taskqueue_drain(sc->tq, &qs->txq[TXQ_OFLD].qresume_task); 2102 taskqueue_drain(sc->tq, &qs->txq[TXQ_CTRL].qresume_task); 2103 } 2104 #endif 2105 } 2106 2107 /** 2108 * t3_free_tx_desc - reclaims Tx descriptors and their buffers 2109 * @adapter: the adapter 2110 * @q: the Tx queue to reclaim descriptors from 2111 * @reclaimable: the number of descriptors to reclaim 2112 * @m_vec_size: maximum number of buffers to reclaim 2113 * @desc_reclaimed: returns the number of descriptors reclaimed 2114 * 2115 * Reclaims Tx descriptors from an SGE Tx queue and frees the associated 2116 * Tx buffers. Called with the Tx queue lock held. 2117 * 2118 * Returns number of buffers of reclaimed 2119 */ 2120 void 2121 t3_free_tx_desc(struct sge_qset *qs, int reclaimable, int queue) 2122 { 2123 struct tx_sw_desc *txsd; 2124 unsigned int cidx, mask; 2125 struct sge_txq *q = &qs->txq[queue]; 2126 2127 #ifdef T3_TRACE 2128 T3_TRACE2(sc->tb[q->cntxt_id & 7], 2129 "reclaiming %u Tx descriptors at cidx %u", reclaimable, cidx); 2130 #endif 2131 cidx = q->cidx; 2132 mask = q->size - 1; 2133 txsd = &q->sdesc[cidx]; 2134 2135 mtx_assert(&qs->lock, MA_OWNED); 2136 while (reclaimable--) { 2137 prefetch(q->sdesc[(cidx + 1) & mask].m); 2138 prefetch(q->sdesc[(cidx + 2) & mask].m); 2139 2140 if (txsd->m != NULL) { 2141 if (txsd->flags & TX_SW_DESC_MAPPED) { 2142 bus_dmamap_unload(q->entry_tag, txsd->map); 2143 txsd->flags &= ~TX_SW_DESC_MAPPED; 2144 } 2145 m_freem_list(txsd->m); 2146 txsd->m = NULL; 2147 } else 2148 q->txq_skipped++; 2149 2150 ++txsd; 2151 if (++cidx == q->size) { 2152 cidx = 0; 2153 txsd = q->sdesc; 2154 } 2155 } 2156 q->cidx = cidx; 2157 2158 } 2159 2160 /** 2161 * is_new_response - check if a response is newly written 2162 * @r: the response descriptor 2163 * @q: the response queue 2164 * 2165 * Returns true if a response descriptor contains a yet unprocessed 2166 * response. 2167 */ 2168 static __inline int 2169 is_new_response(const struct rsp_desc *r, 2170 const struct sge_rspq *q) 2171 { 2172 return (r->intr_gen & F_RSPD_GEN2) == q->gen; 2173 } 2174 2175 #define RSPD_GTS_MASK (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS) 2176 #define RSPD_CTRL_MASK (RSPD_GTS_MASK | \ 2177 V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \ 2178 V_RSPD_TXQ1_CR(M_RSPD_TXQ1_CR) | \ 2179 V_RSPD_TXQ2_CR(M_RSPD_TXQ2_CR)) 2180 2181 /* How long to delay the next interrupt in case of memory shortage, in 0.1us. */ 2182 #define NOMEM_INTR_DELAY 2500 2183 2184 #ifdef TCP_OFFLOAD 2185 /** 2186 * write_ofld_wr - write an offload work request 2187 * @adap: the adapter 2188 * @m: the packet to send 2189 * @q: the Tx queue 2190 * @pidx: index of the first Tx descriptor to write 2191 * @gen: the generation value to use 2192 * @ndesc: number of descriptors the packet will occupy 2193 * 2194 * Write an offload work request to send the supplied packet. The packet 2195 * data already carry the work request with most fields populated. 2196 */ 2197 static void 2198 write_ofld_wr(adapter_t *adap, struct mbuf *m, struct sge_txq *q, 2199 unsigned int pidx, unsigned int gen, unsigned int ndesc) 2200 { 2201 unsigned int sgl_flits, flits; 2202 int i, idx, nsegs, wrlen; 2203 struct work_request_hdr *from; 2204 struct sg_ent *sgp, t3sgl[TX_MAX_SEGS / 2 + 1]; 2205 struct tx_desc *d = &q->desc[pidx]; 2206 struct txq_state txqs; 2207 struct sglist_seg *segs; 2208 struct ofld_hdr *oh = mtod(m, struct ofld_hdr *); 2209 struct sglist *sgl; 2210 2211 from = (void *)(oh + 1); /* Start of WR within mbuf */ 2212 wrlen = m->m_len - sizeof(*oh); 2213 2214 if (!(oh->flags & F_HDR_SGL)) { 2215 write_imm(d, (caddr_t)from, wrlen, gen); 2216 2217 /* 2218 * mbuf with "real" immediate tx data will be enqueue_wr'd by 2219 * t3_push_frames and freed in wr_ack. Others, like those sent 2220 * down by close_conn, t3_send_reset, etc. should be freed here. 2221 */ 2222 if (!(oh->flags & F_HDR_DF)) 2223 m_free(m); 2224 return; 2225 } 2226 2227 memcpy(&d->flit[1], &from[1], wrlen - sizeof(*from)); 2228 2229 sgl = oh->sgl; 2230 flits = wrlen / 8; 2231 sgp = (ndesc == 1) ? (struct sg_ent *)&d->flit[flits] : t3sgl; 2232 2233 nsegs = sgl->sg_nseg; 2234 segs = sgl->sg_segs; 2235 for (idx = 0, i = 0; i < nsegs; i++) { 2236 KASSERT(segs[i].ss_len, ("%s: 0 len in sgl", __func__)); 2237 if (i && idx == 0) 2238 ++sgp; 2239 sgp->len[idx] = htobe32(segs[i].ss_len); 2240 sgp->addr[idx] = htobe64(segs[i].ss_paddr); 2241 idx ^= 1; 2242 } 2243 if (idx) { 2244 sgp->len[idx] = 0; 2245 sgp->addr[idx] = 0; 2246 } 2247 2248 sgl_flits = sgl_len(nsegs); 2249 txqs.gen = gen; 2250 txqs.pidx = pidx; 2251 txqs.compl = 0; 2252 2253 write_wr_hdr_sgl(ndesc, d, &txqs, q, t3sgl, flits, sgl_flits, 2254 from->wrh_hi, from->wrh_lo); 2255 } 2256 2257 /** 2258 * ofld_xmit - send a packet through an offload queue 2259 * @adap: the adapter 2260 * @q: the Tx offload queue 2261 * @m: the packet 2262 * 2263 * Send an offload packet through an SGE offload queue. 2264 */ 2265 static int 2266 ofld_xmit(adapter_t *adap, struct sge_qset *qs, struct mbuf *m) 2267 { 2268 int ret; 2269 unsigned int ndesc; 2270 unsigned int pidx, gen; 2271 struct sge_txq *q = &qs->txq[TXQ_OFLD]; 2272 struct ofld_hdr *oh = mtod(m, struct ofld_hdr *); 2273 2274 ndesc = G_HDR_NDESC(oh->flags); 2275 2276 TXQ_LOCK(qs); 2277 again: reclaim_completed_tx(qs, 16, TXQ_OFLD); 2278 ret = check_desc_avail(adap, q, m, ndesc, TXQ_OFLD); 2279 if (__predict_false(ret)) { 2280 if (ret == 1) { 2281 TXQ_UNLOCK(qs); 2282 return (EINTR); 2283 } 2284 goto again; 2285 } 2286 2287 gen = q->gen; 2288 q->in_use += ndesc; 2289 pidx = q->pidx; 2290 q->pidx += ndesc; 2291 if (q->pidx >= q->size) { 2292 q->pidx -= q->size; 2293 q->gen ^= 1; 2294 } 2295 2296 write_ofld_wr(adap, m, q, pidx, gen, ndesc); 2297 check_ring_tx_db(adap, q, 1); 2298 TXQ_UNLOCK(qs); 2299 2300 return (0); 2301 } 2302 2303 /** 2304 * restart_offloadq - restart a suspended offload queue 2305 * @qs: the queue set cotaining the offload queue 2306 * 2307 * Resumes transmission on a suspended Tx offload queue. 2308 */ 2309 static void 2310 restart_offloadq(void *data, int npending) 2311 { 2312 struct mbuf *m; 2313 struct sge_qset *qs = data; 2314 struct sge_txq *q = &qs->txq[TXQ_OFLD]; 2315 adapter_t *adap = qs->port->adapter; 2316 int cleaned; 2317 2318 TXQ_LOCK(qs); 2319 again: cleaned = reclaim_completed_tx(qs, 16, TXQ_OFLD); 2320 2321 while ((m = mbufq_peek(&q->sendq)) != NULL) { 2322 unsigned int gen, pidx; 2323 struct ofld_hdr *oh = mtod(m, struct ofld_hdr *); 2324 unsigned int ndesc = G_HDR_NDESC(oh->flags); 2325 2326 if (__predict_false(q->size - q->in_use < ndesc)) { 2327 setbit(&qs->txq_stopped, TXQ_OFLD); 2328 if (should_restart_tx(q) && 2329 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) 2330 goto again; 2331 q->stops++; 2332 break; 2333 } 2334 2335 gen = q->gen; 2336 q->in_use += ndesc; 2337 pidx = q->pidx; 2338 q->pidx += ndesc; 2339 if (q->pidx >= q->size) { 2340 q->pidx -= q->size; 2341 q->gen ^= 1; 2342 } 2343 2344 (void)mbufq_dequeue(&q->sendq); 2345 TXQ_UNLOCK(qs); 2346 write_ofld_wr(adap, m, q, pidx, gen, ndesc); 2347 TXQ_LOCK(qs); 2348 } 2349 #if USE_GTS 2350 set_bit(TXQ_RUNNING, &q->flags); 2351 set_bit(TXQ_LAST_PKT_DB, &q->flags); 2352 #endif 2353 TXQ_UNLOCK(qs); 2354 wmb(); 2355 t3_write_reg(adap, A_SG_KDOORBELL, 2356 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); 2357 } 2358 2359 /** 2360 * t3_offload_tx - send an offload packet 2361 * @m: the packet 2362 * 2363 * Sends an offload packet. We use the packet priority to select the 2364 * appropriate Tx queue as follows: bit 0 indicates whether the packet 2365 * should be sent as regular or control, bits 1-3 select the queue set. 2366 */ 2367 int 2368 t3_offload_tx(struct adapter *sc, struct mbuf *m) 2369 { 2370 struct ofld_hdr *oh = mtod(m, struct ofld_hdr *); 2371 struct sge_qset *qs = &sc->sge.qs[G_HDR_QSET(oh->flags)]; 2372 2373 if (oh->flags & F_HDR_CTRL) { 2374 m_adj(m, sizeof (*oh)); /* trim ofld_hdr off */ 2375 return (ctrl_xmit(sc, qs, m)); 2376 } else 2377 return (ofld_xmit(sc, qs, m)); 2378 } 2379 #endif 2380 2381 static void 2382 restart_tx(struct sge_qset *qs) 2383 { 2384 struct adapter *sc = qs->port->adapter; 2385 2386 if (isset(&qs->txq_stopped, TXQ_OFLD) && 2387 should_restart_tx(&qs->txq[TXQ_OFLD]) && 2388 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) { 2389 qs->txq[TXQ_OFLD].restarts++; 2390 taskqueue_enqueue(sc->tq, &qs->txq[TXQ_OFLD].qresume_task); 2391 } 2392 2393 if (isset(&qs->txq_stopped, TXQ_CTRL) && 2394 should_restart_tx(&qs->txq[TXQ_CTRL]) && 2395 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) { 2396 qs->txq[TXQ_CTRL].restarts++; 2397 taskqueue_enqueue(sc->tq, &qs->txq[TXQ_CTRL].qresume_task); 2398 } 2399 } 2400 2401 /** 2402 * t3_sge_alloc_qset - initialize an SGE queue set 2403 * @sc: the controller softc 2404 * @id: the queue set id 2405 * @nports: how many Ethernet ports will be using this queue set 2406 * @irq_vec_idx: the IRQ vector index for response queue interrupts 2407 * @p: configuration parameters for this queue set 2408 * @ntxq: number of Tx queues for the queue set 2409 * @pi: port info for queue set 2410 * 2411 * Allocate resources and initialize an SGE queue set. A queue set 2412 * comprises a response queue, two Rx free-buffer queues, and up to 3 2413 * Tx queues. The Tx queues are assigned roles in the order Ethernet 2414 * queue, offload queue, and control queue. 2415 */ 2416 int 2417 t3_sge_alloc_qset(adapter_t *sc, u_int id, int nports, int irq_vec_idx, 2418 const struct qset_params *p, int ntxq, struct port_info *pi) 2419 { 2420 struct sge_qset *q = &sc->sge.qs[id]; 2421 int i, ret = 0; 2422 2423 MTX_INIT(&q->lock, q->namebuf, NULL, MTX_DEF); 2424 q->port = pi; 2425 q->adap = sc; 2426 2427 if ((q->txq[TXQ_ETH].txq_mr = buf_ring_alloc(cxgb_txq_buf_ring_size, 2428 M_DEVBUF, M_WAITOK, &q->lock)) == NULL) { 2429 device_printf(sc->dev, "failed to allocate mbuf ring\n"); 2430 goto err; 2431 } 2432 if ((q->txq[TXQ_ETH].txq_ifq = malloc(sizeof(struct ifaltq), M_DEVBUF, 2433 M_NOWAIT | M_ZERO)) == NULL) { 2434 device_printf(sc->dev, "failed to allocate ifq\n"); 2435 goto err; 2436 } 2437 ifq_init(q->txq[TXQ_ETH].txq_ifq, pi->ifp); 2438 callout_init(&q->txq[TXQ_ETH].txq_timer, 1); 2439 callout_init(&q->txq[TXQ_ETH].txq_watchdog, 1); 2440 q->txq[TXQ_ETH].txq_timer.c_cpu = id % mp_ncpus; 2441 q->txq[TXQ_ETH].txq_watchdog.c_cpu = id % mp_ncpus; 2442 2443 init_qset_cntxt(q, id); 2444 q->idx = id; 2445 if ((ret = alloc_ring(sc, p->fl_size, sizeof(struct rx_desc), 2446 sizeof(struct rx_sw_desc), &q->fl[0].phys_addr, 2447 &q->fl[0].desc, &q->fl[0].sdesc, 2448 &q->fl[0].desc_tag, &q->fl[0].desc_map, 2449 sc->rx_dmat, &q->fl[0].entry_tag)) != 0) { 2450 printf("error %d from alloc ring fl0\n", ret); 2451 goto err; 2452 } 2453 2454 if ((ret = alloc_ring(sc, p->jumbo_size, sizeof(struct rx_desc), 2455 sizeof(struct rx_sw_desc), &q->fl[1].phys_addr, 2456 &q->fl[1].desc, &q->fl[1].sdesc, 2457 &q->fl[1].desc_tag, &q->fl[1].desc_map, 2458 sc->rx_jumbo_dmat, &q->fl[1].entry_tag)) != 0) { 2459 printf("error %d from alloc ring fl1\n", ret); 2460 goto err; 2461 } 2462 2463 if ((ret = alloc_ring(sc, p->rspq_size, sizeof(struct rsp_desc), 0, 2464 &q->rspq.phys_addr, &q->rspq.desc, NULL, 2465 &q->rspq.desc_tag, &q->rspq.desc_map, 2466 NULL, NULL)) != 0) { 2467 printf("error %d from alloc ring rspq\n", ret); 2468 goto err; 2469 } 2470 2471 snprintf(q->rspq.lockbuf, RSPQ_NAME_LEN, "t3 rspq lock %d:%d", 2472 device_get_unit(sc->dev), irq_vec_idx); 2473 MTX_INIT(&q->rspq.lock, q->rspq.lockbuf, NULL, MTX_DEF); 2474 2475 for (i = 0; i < ntxq; ++i) { 2476 size_t sz = i == TXQ_CTRL ? 0 : sizeof(struct tx_sw_desc); 2477 2478 if ((ret = alloc_ring(sc, p->txq_size[i], 2479 sizeof(struct tx_desc), sz, 2480 &q->txq[i].phys_addr, &q->txq[i].desc, 2481 &q->txq[i].sdesc, &q->txq[i].desc_tag, 2482 &q->txq[i].desc_map, 2483 sc->tx_dmat, &q->txq[i].entry_tag)) != 0) { 2484 printf("error %d from alloc ring tx %i\n", ret, i); 2485 goto err; 2486 } 2487 mbufq_init(&q->txq[i].sendq); 2488 q->txq[i].gen = 1; 2489 q->txq[i].size = p->txq_size[i]; 2490 } 2491 2492 #ifdef TCP_OFFLOAD 2493 TASK_INIT(&q->txq[TXQ_OFLD].qresume_task, 0, restart_offloadq, q); 2494 #endif 2495 TASK_INIT(&q->txq[TXQ_CTRL].qresume_task, 0, restart_ctrlq, q); 2496 TASK_INIT(&q->txq[TXQ_ETH].qreclaim_task, 0, sge_txq_reclaim_handler, q); 2497 TASK_INIT(&q->txq[TXQ_OFLD].qreclaim_task, 0, sge_txq_reclaim_handler, q); 2498 2499 q->fl[0].gen = q->fl[1].gen = 1; 2500 q->fl[0].size = p->fl_size; 2501 q->fl[1].size = p->jumbo_size; 2502 2503 q->rspq.gen = 1; 2504 q->rspq.cidx = 0; 2505 q->rspq.size = p->rspq_size; 2506 2507 q->txq[TXQ_ETH].stop_thres = nports * 2508 flits_to_desc(sgl_len(TX_MAX_SEGS + 1) + 3); 2509 2510 q->fl[0].buf_size = MCLBYTES; 2511 q->fl[0].zone = zone_pack; 2512 q->fl[0].type = EXT_PACKET; 2513 2514 if (p->jumbo_buf_size == MJUM16BYTES) { 2515 q->fl[1].zone = zone_jumbo16; 2516 q->fl[1].type = EXT_JUMBO16; 2517 } else if (p->jumbo_buf_size == MJUM9BYTES) { 2518 q->fl[1].zone = zone_jumbo9; 2519 q->fl[1].type = EXT_JUMBO9; 2520 } else if (p->jumbo_buf_size == MJUMPAGESIZE) { 2521 q->fl[1].zone = zone_jumbop; 2522 q->fl[1].type = EXT_JUMBOP; 2523 } else { 2524 KASSERT(0, ("can't deal with jumbo_buf_size %d.", p->jumbo_buf_size)); 2525 ret = EDOOFUS; 2526 goto err; 2527 } 2528 q->fl[1].buf_size = p->jumbo_buf_size; 2529 2530 /* Allocate and setup the lro_ctrl structure */ 2531 q->lro.enabled = !!(pi->ifp->if_capenable & IFCAP_LRO); 2532 #if defined(INET6) || defined(INET) 2533 ret = tcp_lro_init(&q->lro.ctrl); 2534 if (ret) { 2535 printf("error %d from tcp_lro_init\n", ret); 2536 goto err; 2537 } 2538 #endif 2539 q->lro.ctrl.ifp = pi->ifp; 2540 2541 mtx_lock_spin(&sc->sge.reg_lock); 2542 ret = -t3_sge_init_rspcntxt(sc, q->rspq.cntxt_id, irq_vec_idx, 2543 q->rspq.phys_addr, q->rspq.size, 2544 q->fl[0].buf_size, 1, 0); 2545 if (ret) { 2546 printf("error %d from t3_sge_init_rspcntxt\n", ret); 2547 goto err_unlock; 2548 } 2549 2550 for (i = 0; i < SGE_RXQ_PER_SET; ++i) { 2551 ret = -t3_sge_init_flcntxt(sc, q->fl[i].cntxt_id, 0, 2552 q->fl[i].phys_addr, q->fl[i].size, 2553 q->fl[i].buf_size, p->cong_thres, 1, 2554 0); 2555 if (ret) { 2556 printf("error %d from t3_sge_init_flcntxt for index i=%d\n", ret, i); 2557 goto err_unlock; 2558 } 2559 } 2560 2561 ret = -t3_sge_init_ecntxt(sc, q->txq[TXQ_ETH].cntxt_id, USE_GTS, 2562 SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr, 2563 q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token, 2564 1, 0); 2565 if (ret) { 2566 printf("error %d from t3_sge_init_ecntxt\n", ret); 2567 goto err_unlock; 2568 } 2569 2570 if (ntxq > 1) { 2571 ret = -t3_sge_init_ecntxt(sc, q->txq[TXQ_OFLD].cntxt_id, 2572 USE_GTS, SGE_CNTXT_OFLD, id, 2573 q->txq[TXQ_OFLD].phys_addr, 2574 q->txq[TXQ_OFLD].size, 0, 1, 0); 2575 if (ret) { 2576 printf("error %d from t3_sge_init_ecntxt\n", ret); 2577 goto err_unlock; 2578 } 2579 } 2580 2581 if (ntxq > 2) { 2582 ret = -t3_sge_init_ecntxt(sc, q->txq[TXQ_CTRL].cntxt_id, 0, 2583 SGE_CNTXT_CTRL, id, 2584 q->txq[TXQ_CTRL].phys_addr, 2585 q->txq[TXQ_CTRL].size, 2586 q->txq[TXQ_CTRL].token, 1, 0); 2587 if (ret) { 2588 printf("error %d from t3_sge_init_ecntxt\n", ret); 2589 goto err_unlock; 2590 } 2591 } 2592 2593 mtx_unlock_spin(&sc->sge.reg_lock); 2594 t3_update_qset_coalesce(q, p); 2595 2596 refill_fl(sc, &q->fl[0], q->fl[0].size); 2597 refill_fl(sc, &q->fl[1], q->fl[1].size); 2598 refill_rspq(sc, &q->rspq, q->rspq.size - 1); 2599 2600 t3_write_reg(sc, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) | 2601 V_NEWTIMER(q->rspq.holdoff_tmr)); 2602 2603 return (0); 2604 2605 err_unlock: 2606 mtx_unlock_spin(&sc->sge.reg_lock); 2607 err: 2608 TXQ_LOCK(q); 2609 t3_free_qset(sc, q); 2610 2611 return (ret); 2612 } 2613 2614 /* 2615 * Remove CPL_RX_PKT headers from the mbuf and reduce it to a regular mbuf with 2616 * ethernet data. Hardware assistance with various checksums and any vlan tag 2617 * will also be taken into account here. 2618 */ 2619 void 2620 t3_rx_eth(struct adapter *adap, struct mbuf *m, int ethpad) 2621 { 2622 struct cpl_rx_pkt *cpl = (struct cpl_rx_pkt *)(mtod(m, uint8_t *) + ethpad); 2623 struct port_info *pi = &adap->port[adap->rxpkt_map[cpl->iff]]; 2624 struct ifnet *ifp = pi->ifp; 2625 2626 if (cpl->vlan_valid) { 2627 m->m_pkthdr.ether_vtag = ntohs(cpl->vlan); 2628 m->m_flags |= M_VLANTAG; 2629 } 2630 2631 m->m_pkthdr.rcvif = ifp; 2632 /* 2633 * adjust after conversion to mbuf chain 2634 */ 2635 m->m_pkthdr.len -= (sizeof(*cpl) + ethpad); 2636 m->m_len -= (sizeof(*cpl) + ethpad); 2637 m->m_data += (sizeof(*cpl) + ethpad); 2638 2639 if (!cpl->fragment && cpl->csum_valid && cpl->csum == 0xffff) { 2640 struct ether_header *eh = mtod(m, void *); 2641 uint16_t eh_type; 2642 2643 if (eh->ether_type == htons(ETHERTYPE_VLAN)) { 2644 struct ether_vlan_header *evh = mtod(m, void *); 2645 2646 eh_type = evh->evl_proto; 2647 } else 2648 eh_type = eh->ether_type; 2649 2650 if (ifp->if_capenable & IFCAP_RXCSUM && 2651 eh_type == htons(ETHERTYPE_IP)) { 2652 m->m_pkthdr.csum_flags = (CSUM_IP_CHECKED | 2653 CSUM_IP_VALID | CSUM_DATA_VALID | CSUM_PSEUDO_HDR); 2654 m->m_pkthdr.csum_data = 0xffff; 2655 } else if (ifp->if_capenable & IFCAP_RXCSUM_IPV6 && 2656 eh_type == htons(ETHERTYPE_IPV6)) { 2657 m->m_pkthdr.csum_flags = (CSUM_DATA_VALID_IPV6 | 2658 CSUM_PSEUDO_HDR); 2659 m->m_pkthdr.csum_data = 0xffff; 2660 } 2661 } 2662 } 2663 2664 /** 2665 * get_packet - return the next ingress packet buffer from a free list 2666 * @adap: the adapter that received the packet 2667 * @drop_thres: # of remaining buffers before we start dropping packets 2668 * @qs: the qset that the SGE free list holding the packet belongs to 2669 * @mh: the mbuf header, contains a pointer to the head and tail of the mbuf chain 2670 * @r: response descriptor 2671 * 2672 * Get the next packet from a free list and complete setup of the 2673 * sk_buff. If the packet is small we make a copy and recycle the 2674 * original buffer, otherwise we use the original buffer itself. If a 2675 * positive drop threshold is supplied packets are dropped and their 2676 * buffers recycled if (a) the number of remaining buffers is under the 2677 * threshold and the packet is too big to copy, or (b) the packet should 2678 * be copied but there is no memory for the copy. 2679 */ 2680 static int 2681 get_packet(adapter_t *adap, unsigned int drop_thres, struct sge_qset *qs, 2682 struct t3_mbuf_hdr *mh, struct rsp_desc *r) 2683 { 2684 2685 unsigned int len_cq = ntohl(r->len_cq); 2686 struct sge_fl *fl = (len_cq & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0]; 2687 int mask, cidx = fl->cidx; 2688 struct rx_sw_desc *sd = &fl->sdesc[cidx]; 2689 uint32_t len = G_RSPD_LEN(len_cq); 2690 uint32_t flags = M_EXT; 2691 uint8_t sopeop = G_RSPD_SOP_EOP(ntohl(r->flags)); 2692 caddr_t cl; 2693 struct mbuf *m; 2694 int ret = 0; 2695 2696 mask = fl->size - 1; 2697 prefetch(fl->sdesc[(cidx + 1) & mask].m); 2698 prefetch(fl->sdesc[(cidx + 2) & mask].m); 2699 prefetch(fl->sdesc[(cidx + 1) & mask].rxsd_cl); 2700 prefetch(fl->sdesc[(cidx + 2) & mask].rxsd_cl); 2701 2702 fl->credits--; 2703 bus_dmamap_sync(fl->entry_tag, sd->map, BUS_DMASYNC_POSTREAD); 2704 2705 if (recycle_enable && len <= SGE_RX_COPY_THRES && 2706 sopeop == RSPQ_SOP_EOP) { 2707 if ((m = m_gethdr(M_NOWAIT, MT_DATA)) == NULL) 2708 goto skip_recycle; 2709 cl = mtod(m, void *); 2710 memcpy(cl, sd->rxsd_cl, len); 2711 recycle_rx_buf(adap, fl, fl->cidx); 2712 m->m_pkthdr.len = m->m_len = len; 2713 m->m_flags = 0; 2714 mh->mh_head = mh->mh_tail = m; 2715 ret = 1; 2716 goto done; 2717 } else { 2718 skip_recycle: 2719 bus_dmamap_unload(fl->entry_tag, sd->map); 2720 cl = sd->rxsd_cl; 2721 m = sd->m; 2722 2723 if ((sopeop == RSPQ_SOP_EOP) || 2724 (sopeop == RSPQ_SOP)) 2725 flags |= M_PKTHDR; 2726 m_init(m, fl->zone, fl->buf_size, M_NOWAIT, MT_DATA, flags); 2727 if (fl->zone == zone_pack) { 2728 /* 2729 * restore clobbered data pointer 2730 */ 2731 m->m_data = m->m_ext.ext_buf; 2732 } else { 2733 m_cljset(m, cl, fl->type); 2734 } 2735 m->m_len = len; 2736 } 2737 switch(sopeop) { 2738 case RSPQ_SOP_EOP: 2739 ret = 1; 2740 /* FALLTHROUGH */ 2741 case RSPQ_SOP: 2742 mh->mh_head = mh->mh_tail = m; 2743 m->m_pkthdr.len = len; 2744 break; 2745 case RSPQ_EOP: 2746 ret = 1; 2747 /* FALLTHROUGH */ 2748 case RSPQ_NSOP_NEOP: 2749 if (mh->mh_tail == NULL) { 2750 log(LOG_ERR, "discarding intermediate descriptor entry\n"); 2751 m_freem(m); 2752 break; 2753 } 2754 mh->mh_tail->m_next = m; 2755 mh->mh_tail = m; 2756 mh->mh_head->m_pkthdr.len += len; 2757 break; 2758 } 2759 if (cxgb_debug) 2760 printf("len=%d pktlen=%d\n", m->m_len, m->m_pkthdr.len); 2761 done: 2762 if (++fl->cidx == fl->size) 2763 fl->cidx = 0; 2764 2765 return (ret); 2766 } 2767 2768 /** 2769 * handle_rsp_cntrl_info - handles control information in a response 2770 * @qs: the queue set corresponding to the response 2771 * @flags: the response control flags 2772 * 2773 * Handles the control information of an SGE response, such as GTS 2774 * indications and completion credits for the queue set's Tx queues. 2775 * HW coalesces credits, we don't do any extra SW coalescing. 2776 */ 2777 static __inline void 2778 handle_rsp_cntrl_info(struct sge_qset *qs, uint32_t flags) 2779 { 2780 unsigned int credits; 2781 2782 #if USE_GTS 2783 if (flags & F_RSPD_TXQ0_GTS) 2784 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags); 2785 #endif 2786 credits = G_RSPD_TXQ0_CR(flags); 2787 if (credits) 2788 qs->txq[TXQ_ETH].processed += credits; 2789 2790 credits = G_RSPD_TXQ2_CR(flags); 2791 if (credits) 2792 qs->txq[TXQ_CTRL].processed += credits; 2793 2794 # if USE_GTS 2795 if (flags & F_RSPD_TXQ1_GTS) 2796 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags); 2797 # endif 2798 credits = G_RSPD_TXQ1_CR(flags); 2799 if (credits) 2800 qs->txq[TXQ_OFLD].processed += credits; 2801 2802 } 2803 2804 static void 2805 check_ring_db(adapter_t *adap, struct sge_qset *qs, 2806 unsigned int sleeping) 2807 { 2808 ; 2809 } 2810 2811 /** 2812 * process_responses - process responses from an SGE response queue 2813 * @adap: the adapter 2814 * @qs: the queue set to which the response queue belongs 2815 * @budget: how many responses can be processed in this round 2816 * 2817 * Process responses from an SGE response queue up to the supplied budget. 2818 * Responses include received packets as well as credits and other events 2819 * for the queues that belong to the response queue's queue set. 2820 * A negative budget is effectively unlimited. 2821 * 2822 * Additionally choose the interrupt holdoff time for the next interrupt 2823 * on this queue. If the system is under memory shortage use a fairly 2824 * long delay to help recovery. 2825 */ 2826 static int 2827 process_responses(adapter_t *adap, struct sge_qset *qs, int budget) 2828 { 2829 struct sge_rspq *rspq = &qs->rspq; 2830 struct rsp_desc *r = &rspq->desc[rspq->cidx]; 2831 int budget_left = budget; 2832 unsigned int sleeping = 0; 2833 #if defined(INET6) || defined(INET) 2834 int lro_enabled = qs->lro.enabled; 2835 int skip_lro; 2836 struct lro_ctrl *lro_ctrl = &qs->lro.ctrl; 2837 #endif 2838 struct t3_mbuf_hdr *mh = &rspq->rspq_mh; 2839 #ifdef DEBUG 2840 static int last_holdoff = 0; 2841 if (cxgb_debug && rspq->holdoff_tmr != last_holdoff) { 2842 printf("next_holdoff=%d\n", rspq->holdoff_tmr); 2843 last_holdoff = rspq->holdoff_tmr; 2844 } 2845 #endif 2846 rspq->next_holdoff = rspq->holdoff_tmr; 2847 2848 while (__predict_true(budget_left && is_new_response(r, rspq))) { 2849 int eth, eop = 0, ethpad = 0; 2850 uint32_t flags = ntohl(r->flags); 2851 uint32_t rss_hash = be32toh(r->rss_hdr.rss_hash_val); 2852 uint8_t opcode = r->rss_hdr.opcode; 2853 2854 eth = (opcode == CPL_RX_PKT); 2855 2856 if (__predict_false(flags & F_RSPD_ASYNC_NOTIF)) { 2857 struct mbuf *m; 2858 2859 if (cxgb_debug) 2860 printf("async notification\n"); 2861 2862 if (mh->mh_head == NULL) { 2863 mh->mh_head = m_gethdr(M_NOWAIT, MT_DATA); 2864 m = mh->mh_head; 2865 } else { 2866 m = m_gethdr(M_NOWAIT, MT_DATA); 2867 } 2868 if (m == NULL) 2869 goto no_mem; 2870 2871 memcpy(mtod(m, char *), r, AN_PKT_SIZE); 2872 m->m_len = m->m_pkthdr.len = AN_PKT_SIZE; 2873 *mtod(m, char *) = CPL_ASYNC_NOTIF; 2874 opcode = CPL_ASYNC_NOTIF; 2875 eop = 1; 2876 rspq->async_notif++; 2877 goto skip; 2878 } else if (flags & F_RSPD_IMM_DATA_VALID) { 2879 struct mbuf *m = m_gethdr(M_NOWAIT, MT_DATA); 2880 2881 if (m == NULL) { 2882 no_mem: 2883 rspq->next_holdoff = NOMEM_INTR_DELAY; 2884 budget_left--; 2885 break; 2886 } 2887 if (mh->mh_head == NULL) 2888 mh->mh_head = m; 2889 else 2890 mh->mh_tail->m_next = m; 2891 mh->mh_tail = m; 2892 2893 get_imm_packet(adap, r, m); 2894 mh->mh_head->m_pkthdr.len += m->m_len; 2895 eop = 1; 2896 rspq->imm_data++; 2897 } else if (r->len_cq) { 2898 int drop_thresh = eth ? SGE_RX_DROP_THRES : 0; 2899 2900 eop = get_packet(adap, drop_thresh, qs, mh, r); 2901 if (eop) { 2902 if (r->rss_hdr.hash_type && !adap->timestamp) 2903 mh->mh_head->m_flags |= M_FLOWID; 2904 mh->mh_head->m_pkthdr.flowid = rss_hash; 2905 } 2906 2907 ethpad = 2; 2908 } else { 2909 rspq->pure_rsps++; 2910 } 2911 skip: 2912 if (flags & RSPD_CTRL_MASK) { 2913 sleeping |= flags & RSPD_GTS_MASK; 2914 handle_rsp_cntrl_info(qs, flags); 2915 } 2916 2917 if (!eth && eop) { 2918 rspq->offload_pkts++; 2919 #ifdef TCP_OFFLOAD 2920 adap->cpl_handler[opcode](qs, r, mh->mh_head); 2921 #else 2922 m_freem(mh->mh_head); 2923 #endif 2924 mh->mh_head = NULL; 2925 } else if (eth && eop) { 2926 struct mbuf *m = mh->mh_head; 2927 2928 t3_rx_eth(adap, m, ethpad); 2929 2930 /* 2931 * The T304 sends incoming packets on any qset. If LRO 2932 * is also enabled, we could end up sending packet up 2933 * lro_ctrl->ifp's input. That is incorrect. 2934 * 2935 * The mbuf's rcvif was derived from the cpl header and 2936 * is accurate. Skip LRO and just use that. 2937 */ 2938 #if defined(INET6) || defined(INET) 2939 skip_lro = __predict_false(qs->port->ifp != m->m_pkthdr.rcvif); 2940 2941 if (lro_enabled && lro_ctrl->lro_cnt && !skip_lro 2942 && (tcp_lro_rx(lro_ctrl, m, 0) == 0) 2943 ) { 2944 /* successfully queue'd for LRO */ 2945 } else 2946 #endif 2947 { 2948 /* 2949 * LRO not enabled, packet unsuitable for LRO, 2950 * or unable to queue. Pass it up right now in 2951 * either case. 2952 */ 2953 struct ifnet *ifp = m->m_pkthdr.rcvif; 2954 (*ifp->if_input)(ifp, m); 2955 } 2956 mh->mh_head = NULL; 2957 2958 } 2959 2960 r++; 2961 if (__predict_false(++rspq->cidx == rspq->size)) { 2962 rspq->cidx = 0; 2963 rspq->gen ^= 1; 2964 r = rspq->desc; 2965 } 2966 2967 if (++rspq->credits >= 64) { 2968 refill_rspq(adap, rspq, rspq->credits); 2969 rspq->credits = 0; 2970 } 2971 __refill_fl_lt(adap, &qs->fl[0], 32); 2972 __refill_fl_lt(adap, &qs->fl[1], 32); 2973 --budget_left; 2974 } 2975 2976 #if defined(INET6) || defined(INET) 2977 /* Flush LRO */ 2978 while (!SLIST_EMPTY(&lro_ctrl->lro_active)) { 2979 struct lro_entry *queued = SLIST_FIRST(&lro_ctrl->lro_active); 2980 SLIST_REMOVE_HEAD(&lro_ctrl->lro_active, next); 2981 tcp_lro_flush(lro_ctrl, queued); 2982 } 2983 #endif 2984 2985 if (sleeping) 2986 check_ring_db(adap, qs, sleeping); 2987 2988 mb(); /* commit Tx queue processed updates */ 2989 if (__predict_false(qs->txq_stopped > 1)) 2990 restart_tx(qs); 2991 2992 __refill_fl_lt(adap, &qs->fl[0], 512); 2993 __refill_fl_lt(adap, &qs->fl[1], 512); 2994 budget -= budget_left; 2995 return (budget); 2996 } 2997 2998 /* 2999 * A helper function that processes responses and issues GTS. 3000 */ 3001 static __inline int 3002 process_responses_gts(adapter_t *adap, struct sge_rspq *rq) 3003 { 3004 int work; 3005 static int last_holdoff = 0; 3006 3007 work = process_responses(adap, rspq_to_qset(rq), -1); 3008 3009 if (cxgb_debug && (rq->next_holdoff != last_holdoff)) { 3010 printf("next_holdoff=%d\n", rq->next_holdoff); 3011 last_holdoff = rq->next_holdoff; 3012 } 3013 t3_write_reg(adap, A_SG_GTS, V_RSPQ(rq->cntxt_id) | 3014 V_NEWTIMER(rq->next_holdoff) | V_NEWINDEX(rq->cidx)); 3015 3016 return (work); 3017 } 3018 3019 3020 /* 3021 * Interrupt handler for legacy INTx interrupts for T3B-based cards. 3022 * Handles data events from SGE response queues as well as error and other 3023 * async events as they all use the same interrupt pin. We use one SGE 3024 * response queue per port in this mode and protect all response queues with 3025 * queue 0's lock. 3026 */ 3027 void 3028 t3b_intr(void *data) 3029 { 3030 uint32_t i, map; 3031 adapter_t *adap = data; 3032 struct sge_rspq *q0 = &adap->sge.qs[0].rspq; 3033 3034 t3_write_reg(adap, A_PL_CLI, 0); 3035 map = t3_read_reg(adap, A_SG_DATA_INTR); 3036 3037 if (!map) 3038 return; 3039 3040 if (__predict_false(map & F_ERRINTR)) { 3041 t3_write_reg(adap, A_PL_INT_ENABLE0, 0); 3042 (void) t3_read_reg(adap, A_PL_INT_ENABLE0); 3043 taskqueue_enqueue(adap->tq, &adap->slow_intr_task); 3044 } 3045 3046 mtx_lock(&q0->lock); 3047 for_each_port(adap, i) 3048 if (map & (1 << i)) 3049 process_responses_gts(adap, &adap->sge.qs[i].rspq); 3050 mtx_unlock(&q0->lock); 3051 } 3052 3053 /* 3054 * The MSI interrupt handler. This needs to handle data events from SGE 3055 * response queues as well as error and other async events as they all use 3056 * the same MSI vector. We use one SGE response queue per port in this mode 3057 * and protect all response queues with queue 0's lock. 3058 */ 3059 void 3060 t3_intr_msi(void *data) 3061 { 3062 adapter_t *adap = data; 3063 struct sge_rspq *q0 = &adap->sge.qs[0].rspq; 3064 int i, new_packets = 0; 3065 3066 mtx_lock(&q0->lock); 3067 3068 for_each_port(adap, i) 3069 if (process_responses_gts(adap, &adap->sge.qs[i].rspq)) 3070 new_packets = 1; 3071 mtx_unlock(&q0->lock); 3072 if (new_packets == 0) { 3073 t3_write_reg(adap, A_PL_INT_ENABLE0, 0); 3074 (void) t3_read_reg(adap, A_PL_INT_ENABLE0); 3075 taskqueue_enqueue(adap->tq, &adap->slow_intr_task); 3076 } 3077 } 3078 3079 void 3080 t3_intr_msix(void *data) 3081 { 3082 struct sge_qset *qs = data; 3083 adapter_t *adap = qs->port->adapter; 3084 struct sge_rspq *rspq = &qs->rspq; 3085 3086 if (process_responses_gts(adap, rspq) == 0) 3087 rspq->unhandled_irqs++; 3088 } 3089 3090 #define QDUMP_SBUF_SIZE 32 * 400 3091 static int 3092 t3_dump_rspq(SYSCTL_HANDLER_ARGS) 3093 { 3094 struct sge_rspq *rspq; 3095 struct sge_qset *qs; 3096 int i, err, dump_end, idx; 3097 struct sbuf *sb; 3098 struct rsp_desc *rspd; 3099 uint32_t data[4]; 3100 3101 rspq = arg1; 3102 qs = rspq_to_qset(rspq); 3103 if (rspq->rspq_dump_count == 0) 3104 return (0); 3105 if (rspq->rspq_dump_count > RSPQ_Q_SIZE) { 3106 log(LOG_WARNING, 3107 "dump count is too large %d\n", rspq->rspq_dump_count); 3108 rspq->rspq_dump_count = 0; 3109 return (EINVAL); 3110 } 3111 if (rspq->rspq_dump_start > (RSPQ_Q_SIZE-1)) { 3112 log(LOG_WARNING, 3113 "dump start of %d is greater than queue size\n", 3114 rspq->rspq_dump_start); 3115 rspq->rspq_dump_start = 0; 3116 return (EINVAL); 3117 } 3118 err = t3_sge_read_rspq(qs->port->adapter, rspq->cntxt_id, data); 3119 if (err) 3120 return (err); 3121 err = sysctl_wire_old_buffer(req, 0); 3122 if (err) 3123 return (err); 3124 sb = sbuf_new_for_sysctl(NULL, NULL, QDUMP_SBUF_SIZE, req); 3125 3126 sbuf_printf(sb, " \n index=%u size=%u MSI-X/RspQ=%u intr enable=%u intr armed=%u\n", 3127 (data[0] & 0xffff), data[0] >> 16, ((data[2] >> 20) & 0x3f), 3128 ((data[2] >> 26) & 1), ((data[2] >> 27) & 1)); 3129 sbuf_printf(sb, " generation=%u CQ mode=%u FL threshold=%u\n", 3130 ((data[2] >> 28) & 1), ((data[2] >> 31) & 1), data[3]); 3131 3132 sbuf_printf(sb, " start=%d -> end=%d\n", rspq->rspq_dump_start, 3133 (rspq->rspq_dump_start + rspq->rspq_dump_count) & (RSPQ_Q_SIZE-1)); 3134 3135 dump_end = rspq->rspq_dump_start + rspq->rspq_dump_count; 3136 for (i = rspq->rspq_dump_start; i < dump_end; i++) { 3137 idx = i & (RSPQ_Q_SIZE-1); 3138 3139 rspd = &rspq->desc[idx]; 3140 sbuf_printf(sb, "\tidx=%04d opcode=%02x cpu_idx=%x hash_type=%x cq_idx=%x\n", 3141 idx, rspd->rss_hdr.opcode, rspd->rss_hdr.cpu_idx, 3142 rspd->rss_hdr.hash_type, be16toh(rspd->rss_hdr.cq_idx)); 3143 sbuf_printf(sb, "\trss_hash_val=%x flags=%08x len_cq=%x intr_gen=%x\n", 3144 rspd->rss_hdr.rss_hash_val, be32toh(rspd->flags), 3145 be32toh(rspd->len_cq), rspd->intr_gen); 3146 } 3147 3148 err = sbuf_finish(sb); 3149 /* Output a trailing NUL. */ 3150 if (err == 0) 3151 err = SYSCTL_OUT(req, "", 1); 3152 sbuf_delete(sb); 3153 return (err); 3154 } 3155 3156 static int 3157 t3_dump_txq_eth(SYSCTL_HANDLER_ARGS) 3158 { 3159 struct sge_txq *txq; 3160 struct sge_qset *qs; 3161 int i, j, err, dump_end; 3162 struct sbuf *sb; 3163 struct tx_desc *txd; 3164 uint32_t *WR, wr_hi, wr_lo, gen; 3165 uint32_t data[4]; 3166 3167 txq = arg1; 3168 qs = txq_to_qset(txq, TXQ_ETH); 3169 if (txq->txq_dump_count == 0) { 3170 return (0); 3171 } 3172 if (txq->txq_dump_count > TX_ETH_Q_SIZE) { 3173 log(LOG_WARNING, 3174 "dump count is too large %d\n", txq->txq_dump_count); 3175 txq->txq_dump_count = 1; 3176 return (EINVAL); 3177 } 3178 if (txq->txq_dump_start > (TX_ETH_Q_SIZE-1)) { 3179 log(LOG_WARNING, 3180 "dump start of %d is greater than queue size\n", 3181 txq->txq_dump_start); 3182 txq->txq_dump_start = 0; 3183 return (EINVAL); 3184 } 3185 err = t3_sge_read_ecntxt(qs->port->adapter, qs->rspq.cntxt_id, data); 3186 if (err) 3187 return (err); 3188 err = sysctl_wire_old_buffer(req, 0); 3189 if (err) 3190 return (err); 3191 sb = sbuf_new_for_sysctl(NULL, NULL, QDUMP_SBUF_SIZE, req); 3192 3193 sbuf_printf(sb, " \n credits=%u GTS=%u index=%u size=%u rspq#=%u cmdq#=%u\n", 3194 (data[0] & 0x7fff), ((data[0] >> 15) & 1), (data[0] >> 16), 3195 (data[1] & 0xffff), ((data[3] >> 4) & 7), ((data[3] >> 7) & 1)); 3196 sbuf_printf(sb, " TUN=%u TOE=%u generation%u uP token=%u valid=%u\n", 3197 ((data[3] >> 8) & 1), ((data[3] >> 9) & 1), ((data[3] >> 10) & 1), 3198 ((data[3] >> 11) & 0xfffff), ((data[3] >> 31) & 1)); 3199 sbuf_printf(sb, " qid=%d start=%d -> end=%d\n", qs->idx, 3200 txq->txq_dump_start, 3201 (txq->txq_dump_start + txq->txq_dump_count) & (TX_ETH_Q_SIZE-1)); 3202 3203 dump_end = txq->txq_dump_start + txq->txq_dump_count; 3204 for (i = txq->txq_dump_start; i < dump_end; i++) { 3205 txd = &txq->desc[i & (TX_ETH_Q_SIZE-1)]; 3206 WR = (uint32_t *)txd->flit; 3207 wr_hi = ntohl(WR[0]); 3208 wr_lo = ntohl(WR[1]); 3209 gen = G_WR_GEN(wr_lo); 3210 3211 sbuf_printf(sb," wr_hi %08x wr_lo %08x gen %d\n", 3212 wr_hi, wr_lo, gen); 3213 for (j = 2; j < 30; j += 4) 3214 sbuf_printf(sb, "\t%08x %08x %08x %08x \n", 3215 WR[j], WR[j + 1], WR[j + 2], WR[j + 3]); 3216 3217 } 3218 err = sbuf_finish(sb); 3219 /* Output a trailing NUL. */ 3220 if (err == 0) 3221 err = SYSCTL_OUT(req, "", 1); 3222 sbuf_delete(sb); 3223 return (err); 3224 } 3225 3226 static int 3227 t3_dump_txq_ctrl(SYSCTL_HANDLER_ARGS) 3228 { 3229 struct sge_txq *txq; 3230 struct sge_qset *qs; 3231 int i, j, err, dump_end; 3232 struct sbuf *sb; 3233 struct tx_desc *txd; 3234 uint32_t *WR, wr_hi, wr_lo, gen; 3235 3236 txq = arg1; 3237 qs = txq_to_qset(txq, TXQ_CTRL); 3238 if (txq->txq_dump_count == 0) { 3239 return (0); 3240 } 3241 if (txq->txq_dump_count > 256) { 3242 log(LOG_WARNING, 3243 "dump count is too large %d\n", txq->txq_dump_count); 3244 txq->txq_dump_count = 1; 3245 return (EINVAL); 3246 } 3247 if (txq->txq_dump_start > 255) { 3248 log(LOG_WARNING, 3249 "dump start of %d is greater than queue size\n", 3250 txq->txq_dump_start); 3251 txq->txq_dump_start = 0; 3252 return (EINVAL); 3253 } 3254 3255 err = sysctl_wire_old_buffer(req, 0); 3256 if (err != 0) 3257 return (err); 3258 sb = sbuf_new_for_sysctl(NULL, NULL, QDUMP_SBUF_SIZE, req); 3259 sbuf_printf(sb, " qid=%d start=%d -> end=%d\n", qs->idx, 3260 txq->txq_dump_start, 3261 (txq->txq_dump_start + txq->txq_dump_count) & 255); 3262 3263 dump_end = txq->txq_dump_start + txq->txq_dump_count; 3264 for (i = txq->txq_dump_start; i < dump_end; i++) { 3265 txd = &txq->desc[i & (255)]; 3266 WR = (uint32_t *)txd->flit; 3267 wr_hi = ntohl(WR[0]); 3268 wr_lo = ntohl(WR[1]); 3269 gen = G_WR_GEN(wr_lo); 3270 3271 sbuf_printf(sb," wr_hi %08x wr_lo %08x gen %d\n", 3272 wr_hi, wr_lo, gen); 3273 for (j = 2; j < 30; j += 4) 3274 sbuf_printf(sb, "\t%08x %08x %08x %08x \n", 3275 WR[j], WR[j + 1], WR[j + 2], WR[j + 3]); 3276 3277 } 3278 err = sbuf_finish(sb); 3279 /* Output a trailing NUL. */ 3280 if (err == 0) 3281 err = SYSCTL_OUT(req, "", 1); 3282 sbuf_delete(sb); 3283 return (err); 3284 } 3285 3286 static int 3287 t3_set_coalesce_usecs(SYSCTL_HANDLER_ARGS) 3288 { 3289 adapter_t *sc = arg1; 3290 struct qset_params *qsp = &sc->params.sge.qset[0]; 3291 int coalesce_usecs; 3292 struct sge_qset *qs; 3293 int i, j, err, nqsets = 0; 3294 struct mtx *lock; 3295 3296 if ((sc->flags & FULL_INIT_DONE) == 0) 3297 return (ENXIO); 3298 3299 coalesce_usecs = qsp->coalesce_usecs; 3300 err = sysctl_handle_int(oidp, &coalesce_usecs, arg2, req); 3301 3302 if (err != 0) { 3303 return (err); 3304 } 3305 if (coalesce_usecs == qsp->coalesce_usecs) 3306 return (0); 3307 3308 for (i = 0; i < sc->params.nports; i++) 3309 for (j = 0; j < sc->port[i].nqsets; j++) 3310 nqsets++; 3311 3312 coalesce_usecs = max(1, coalesce_usecs); 3313 3314 for (i = 0; i < nqsets; i++) { 3315 qs = &sc->sge.qs[i]; 3316 qsp = &sc->params.sge.qset[i]; 3317 qsp->coalesce_usecs = coalesce_usecs; 3318 3319 lock = (sc->flags & USING_MSIX) ? &qs->rspq.lock : 3320 &sc->sge.qs[0].rspq.lock; 3321 3322 mtx_lock(lock); 3323 t3_update_qset_coalesce(qs, qsp); 3324 t3_write_reg(sc, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) | 3325 V_NEWTIMER(qs->rspq.holdoff_tmr)); 3326 mtx_unlock(lock); 3327 } 3328 3329 return (0); 3330 } 3331 3332 static int 3333 t3_pkt_timestamp(SYSCTL_HANDLER_ARGS) 3334 { 3335 adapter_t *sc = arg1; 3336 int rc, timestamp; 3337 3338 if ((sc->flags & FULL_INIT_DONE) == 0) 3339 return (ENXIO); 3340 3341 timestamp = sc->timestamp; 3342 rc = sysctl_handle_int(oidp, ×tamp, arg2, req); 3343 3344 if (rc != 0) 3345 return (rc); 3346 3347 if (timestamp != sc->timestamp) { 3348 t3_set_reg_field(sc, A_TP_PC_CONFIG2, F_ENABLERXPKTTMSTPRSS, 3349 timestamp ? F_ENABLERXPKTTMSTPRSS : 0); 3350 sc->timestamp = timestamp; 3351 } 3352 3353 return (0); 3354 } 3355 3356 void 3357 t3_add_attach_sysctls(adapter_t *sc) 3358 { 3359 struct sysctl_ctx_list *ctx; 3360 struct sysctl_oid_list *children; 3361 3362 ctx = device_get_sysctl_ctx(sc->dev); 3363 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)); 3364 3365 /* random information */ 3366 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, 3367 "firmware_version", 3368 CTLFLAG_RD, sc->fw_version, 3369 0, "firmware version"); 3370 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 3371 "hw_revision", 3372 CTLFLAG_RD, &sc->params.rev, 3373 0, "chip model"); 3374 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, 3375 "port_types", 3376 CTLFLAG_RD, sc->port_types, 3377 0, "type of ports"); 3378 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 3379 "enable_debug", 3380 CTLFLAG_RW, &cxgb_debug, 3381 0, "enable verbose debugging output"); 3382 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "tunq_coalesce", 3383 CTLFLAG_RD, &sc->tunq_coalesce, 3384 "#tunneled packets freed"); 3385 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 3386 "txq_overrun", 3387 CTLFLAG_RD, &txq_fills, 3388 0, "#times txq overrun"); 3389 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 3390 "core_clock", 3391 CTLFLAG_RD, &sc->params.vpd.cclk, 3392 0, "core clock frequency (in KHz)"); 3393 } 3394 3395 3396 static const char *rspq_name = "rspq"; 3397 static const char *txq_names[] = 3398 { 3399 "txq_eth", 3400 "txq_ofld", 3401 "txq_ctrl" 3402 }; 3403 3404 static int 3405 sysctl_handle_macstat(SYSCTL_HANDLER_ARGS) 3406 { 3407 struct port_info *p = arg1; 3408 uint64_t *parg; 3409 3410 if (!p) 3411 return (EINVAL); 3412 3413 cxgb_refresh_stats(p); 3414 parg = (uint64_t *) ((uint8_t *)&p->mac.stats + arg2); 3415 3416 return (sysctl_handle_64(oidp, parg, 0, req)); 3417 } 3418 3419 void 3420 t3_add_configured_sysctls(adapter_t *sc) 3421 { 3422 struct sysctl_ctx_list *ctx; 3423 struct sysctl_oid_list *children; 3424 int i, j; 3425 3426 ctx = device_get_sysctl_ctx(sc->dev); 3427 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)); 3428 3429 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 3430 "intr_coal", 3431 CTLTYPE_INT|CTLFLAG_RW, sc, 3432 0, t3_set_coalesce_usecs, 3433 "I", "interrupt coalescing timer (us)"); 3434 3435 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 3436 "pkt_timestamp", 3437 CTLTYPE_INT | CTLFLAG_RW, sc, 3438 0, t3_pkt_timestamp, 3439 "I", "provide packet timestamp instead of connection hash"); 3440 3441 for (i = 0; i < sc->params.nports; i++) { 3442 struct port_info *pi = &sc->port[i]; 3443 struct sysctl_oid *poid; 3444 struct sysctl_oid_list *poidlist; 3445 struct mac_stats *mstats = &pi->mac.stats; 3446 3447 snprintf(pi->namebuf, PORT_NAME_LEN, "port%d", i); 3448 poid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, 3449 pi->namebuf, CTLFLAG_RD, NULL, "port statistics"); 3450 poidlist = SYSCTL_CHILDREN(poid); 3451 SYSCTL_ADD_UINT(ctx, poidlist, OID_AUTO, 3452 "nqsets", CTLFLAG_RD, &pi->nqsets, 3453 0, "#queue sets"); 3454 3455 for (j = 0; j < pi->nqsets; j++) { 3456 struct sge_qset *qs = &sc->sge.qs[pi->first_qset + j]; 3457 struct sysctl_oid *qspoid, *rspqpoid, *txqpoid, 3458 *ctrlqpoid, *lropoid; 3459 struct sysctl_oid_list *qspoidlist, *rspqpoidlist, 3460 *txqpoidlist, *ctrlqpoidlist, 3461 *lropoidlist; 3462 struct sge_txq *txq = &qs->txq[TXQ_ETH]; 3463 3464 snprintf(qs->namebuf, QS_NAME_LEN, "qs%d", j); 3465 3466 qspoid = SYSCTL_ADD_NODE(ctx, poidlist, OID_AUTO, 3467 qs->namebuf, CTLFLAG_RD, NULL, "qset statistics"); 3468 qspoidlist = SYSCTL_CHILDREN(qspoid); 3469 3470 SYSCTL_ADD_UINT(ctx, qspoidlist, OID_AUTO, "fl0_empty", 3471 CTLFLAG_RD, &qs->fl[0].empty, 0, 3472 "freelist #0 empty"); 3473 SYSCTL_ADD_UINT(ctx, qspoidlist, OID_AUTO, "fl1_empty", 3474 CTLFLAG_RD, &qs->fl[1].empty, 0, 3475 "freelist #1 empty"); 3476 3477 rspqpoid = SYSCTL_ADD_NODE(ctx, qspoidlist, OID_AUTO, 3478 rspq_name, CTLFLAG_RD, NULL, "rspq statistics"); 3479 rspqpoidlist = SYSCTL_CHILDREN(rspqpoid); 3480 3481 txqpoid = SYSCTL_ADD_NODE(ctx, qspoidlist, OID_AUTO, 3482 txq_names[0], CTLFLAG_RD, NULL, "txq statistics"); 3483 txqpoidlist = SYSCTL_CHILDREN(txqpoid); 3484 3485 ctrlqpoid = SYSCTL_ADD_NODE(ctx, qspoidlist, OID_AUTO, 3486 txq_names[2], CTLFLAG_RD, NULL, "ctrlq statistics"); 3487 ctrlqpoidlist = SYSCTL_CHILDREN(ctrlqpoid); 3488 3489 lropoid = SYSCTL_ADD_NODE(ctx, qspoidlist, OID_AUTO, 3490 "lro_stats", CTLFLAG_RD, NULL, "LRO statistics"); 3491 lropoidlist = SYSCTL_CHILDREN(lropoid); 3492 3493 SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "size", 3494 CTLFLAG_RD, &qs->rspq.size, 3495 0, "#entries in response queue"); 3496 SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "cidx", 3497 CTLFLAG_RD, &qs->rspq.cidx, 3498 0, "consumer index"); 3499 SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "credits", 3500 CTLFLAG_RD, &qs->rspq.credits, 3501 0, "#credits"); 3502 SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "starved", 3503 CTLFLAG_RD, &qs->rspq.starved, 3504 0, "#times starved"); 3505 SYSCTL_ADD_UAUTO(ctx, rspqpoidlist, OID_AUTO, "phys_addr", 3506 CTLFLAG_RD, &qs->rspq.phys_addr, 3507 "physical_address_of the queue"); 3508 SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "dump_start", 3509 CTLFLAG_RW, &qs->rspq.rspq_dump_start, 3510 0, "start rspq dump entry"); 3511 SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "dump_count", 3512 CTLFLAG_RW, &qs->rspq.rspq_dump_count, 3513 0, "#rspq entries to dump"); 3514 SYSCTL_ADD_PROC(ctx, rspqpoidlist, OID_AUTO, "qdump", 3515 CTLTYPE_STRING | CTLFLAG_RD, &qs->rspq, 3516 0, t3_dump_rspq, "A", "dump of the response queue"); 3517 3518 SYSCTL_ADD_UQUAD(ctx, txqpoidlist, OID_AUTO, "dropped", 3519 CTLFLAG_RD, &qs->txq[TXQ_ETH].txq_mr->br_drops, 3520 "#tunneled packets dropped"); 3521 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "sendqlen", 3522 CTLFLAG_RD, &qs->txq[TXQ_ETH].sendq.qlen, 3523 0, "#tunneled packets waiting to be sent"); 3524 #if 0 3525 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "queue_pidx", 3526 CTLFLAG_RD, (uint32_t *)(uintptr_t)&qs->txq[TXQ_ETH].txq_mr.br_prod, 3527 0, "#tunneled packets queue producer index"); 3528 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "queue_cidx", 3529 CTLFLAG_RD, (uint32_t *)(uintptr_t)&qs->txq[TXQ_ETH].txq_mr.br_cons, 3530 0, "#tunneled packets queue consumer index"); 3531 #endif 3532 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "processed", 3533 CTLFLAG_RD, &qs->txq[TXQ_ETH].processed, 3534 0, "#tunneled packets processed by the card"); 3535 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "cleaned", 3536 CTLFLAG_RD, &txq->cleaned, 3537 0, "#tunneled packets cleaned"); 3538 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "in_use", 3539 CTLFLAG_RD, &txq->in_use, 3540 0, "#tunneled packet slots in use"); 3541 SYSCTL_ADD_UQUAD(ctx, txqpoidlist, OID_AUTO, "frees", 3542 CTLFLAG_RD, &txq->txq_frees, 3543 "#tunneled packets freed"); 3544 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "skipped", 3545 CTLFLAG_RD, &txq->txq_skipped, 3546 0, "#tunneled packet descriptors skipped"); 3547 SYSCTL_ADD_UQUAD(ctx, txqpoidlist, OID_AUTO, "coalesced", 3548 CTLFLAG_RD, &txq->txq_coalesced, 3549 "#tunneled packets coalesced"); 3550 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "enqueued", 3551 CTLFLAG_RD, &txq->txq_enqueued, 3552 0, "#tunneled packets enqueued to hardware"); 3553 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "stopped_flags", 3554 CTLFLAG_RD, &qs->txq_stopped, 3555 0, "tx queues stopped"); 3556 SYSCTL_ADD_UAUTO(ctx, txqpoidlist, OID_AUTO, "phys_addr", 3557 CTLFLAG_RD, &txq->phys_addr, 3558 "physical_address_of the queue"); 3559 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "qgen", 3560 CTLFLAG_RW, &qs->txq[TXQ_ETH].gen, 3561 0, "txq generation"); 3562 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "hw_cidx", 3563 CTLFLAG_RD, &txq->cidx, 3564 0, "hardware queue cidx"); 3565 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "hw_pidx", 3566 CTLFLAG_RD, &txq->pidx, 3567 0, "hardware queue pidx"); 3568 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "dump_start", 3569 CTLFLAG_RW, &qs->txq[TXQ_ETH].txq_dump_start, 3570 0, "txq start idx for dump"); 3571 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "dump_count", 3572 CTLFLAG_RW, &qs->txq[TXQ_ETH].txq_dump_count, 3573 0, "txq #entries to dump"); 3574 SYSCTL_ADD_PROC(ctx, txqpoidlist, OID_AUTO, "qdump", 3575 CTLTYPE_STRING | CTLFLAG_RD, &qs->txq[TXQ_ETH], 3576 0, t3_dump_txq_eth, "A", "dump of the transmit queue"); 3577 3578 SYSCTL_ADD_UINT(ctx, ctrlqpoidlist, OID_AUTO, "dump_start", 3579 CTLFLAG_RW, &qs->txq[TXQ_CTRL].txq_dump_start, 3580 0, "ctrlq start idx for dump"); 3581 SYSCTL_ADD_UINT(ctx, ctrlqpoidlist, OID_AUTO, "dump_count", 3582 CTLFLAG_RW, &qs->txq[TXQ_CTRL].txq_dump_count, 3583 0, "ctrl #entries to dump"); 3584 SYSCTL_ADD_PROC(ctx, ctrlqpoidlist, OID_AUTO, "qdump", 3585 CTLTYPE_STRING | CTLFLAG_RD, &qs->txq[TXQ_CTRL], 3586 0, t3_dump_txq_ctrl, "A", "dump of the transmit queue"); 3587 3588 SYSCTL_ADD_INT(ctx, lropoidlist, OID_AUTO, "lro_queued", 3589 CTLFLAG_RD, &qs->lro.ctrl.lro_queued, 0, NULL); 3590 SYSCTL_ADD_INT(ctx, lropoidlist, OID_AUTO, "lro_flushed", 3591 CTLFLAG_RD, &qs->lro.ctrl.lro_flushed, 0, NULL); 3592 SYSCTL_ADD_INT(ctx, lropoidlist, OID_AUTO, "lro_bad_csum", 3593 CTLFLAG_RD, &qs->lro.ctrl.lro_bad_csum, 0, NULL); 3594 SYSCTL_ADD_INT(ctx, lropoidlist, OID_AUTO, "lro_cnt", 3595 CTLFLAG_RD, &qs->lro.ctrl.lro_cnt, 0, NULL); 3596 } 3597 3598 /* Now add a node for mac stats. */ 3599 poid = SYSCTL_ADD_NODE(ctx, poidlist, OID_AUTO, "mac_stats", 3600 CTLFLAG_RD, NULL, "MAC statistics"); 3601 poidlist = SYSCTL_CHILDREN(poid); 3602 3603 /* 3604 * We (ab)use the length argument (arg2) to pass on the offset 3605 * of the data that we are interested in. This is only required 3606 * for the quad counters that are updated from the hardware (we 3607 * make sure that we return the latest value). 3608 * sysctl_handle_macstat first updates *all* the counters from 3609 * the hardware, and then returns the latest value of the 3610 * requested counter. Best would be to update only the 3611 * requested counter from hardware, but t3_mac_update_stats() 3612 * hides all the register details and we don't want to dive into 3613 * all that here. 3614 */ 3615 #define CXGB_SYSCTL_ADD_QUAD(a) SYSCTL_ADD_OID(ctx, poidlist, OID_AUTO, #a, \ 3616 (CTLTYPE_U64 | CTLFLAG_RD), pi, offsetof(struct mac_stats, a), \ 3617 sysctl_handle_macstat, "QU", 0) 3618 CXGB_SYSCTL_ADD_QUAD(tx_octets); 3619 CXGB_SYSCTL_ADD_QUAD(tx_octets_bad); 3620 CXGB_SYSCTL_ADD_QUAD(tx_frames); 3621 CXGB_SYSCTL_ADD_QUAD(tx_mcast_frames); 3622 CXGB_SYSCTL_ADD_QUAD(tx_bcast_frames); 3623 CXGB_SYSCTL_ADD_QUAD(tx_pause); 3624 CXGB_SYSCTL_ADD_QUAD(tx_deferred); 3625 CXGB_SYSCTL_ADD_QUAD(tx_late_collisions); 3626 CXGB_SYSCTL_ADD_QUAD(tx_total_collisions); 3627 CXGB_SYSCTL_ADD_QUAD(tx_excess_collisions); 3628 CXGB_SYSCTL_ADD_QUAD(tx_underrun); 3629 CXGB_SYSCTL_ADD_QUAD(tx_len_errs); 3630 CXGB_SYSCTL_ADD_QUAD(tx_mac_internal_errs); 3631 CXGB_SYSCTL_ADD_QUAD(tx_excess_deferral); 3632 CXGB_SYSCTL_ADD_QUAD(tx_fcs_errs); 3633 CXGB_SYSCTL_ADD_QUAD(tx_frames_64); 3634 CXGB_SYSCTL_ADD_QUAD(tx_frames_65_127); 3635 CXGB_SYSCTL_ADD_QUAD(tx_frames_128_255); 3636 CXGB_SYSCTL_ADD_QUAD(tx_frames_256_511); 3637 CXGB_SYSCTL_ADD_QUAD(tx_frames_512_1023); 3638 CXGB_SYSCTL_ADD_QUAD(tx_frames_1024_1518); 3639 CXGB_SYSCTL_ADD_QUAD(tx_frames_1519_max); 3640 CXGB_SYSCTL_ADD_QUAD(rx_octets); 3641 CXGB_SYSCTL_ADD_QUAD(rx_octets_bad); 3642 CXGB_SYSCTL_ADD_QUAD(rx_frames); 3643 CXGB_SYSCTL_ADD_QUAD(rx_mcast_frames); 3644 CXGB_SYSCTL_ADD_QUAD(rx_bcast_frames); 3645 CXGB_SYSCTL_ADD_QUAD(rx_pause); 3646 CXGB_SYSCTL_ADD_QUAD(rx_fcs_errs); 3647 CXGB_SYSCTL_ADD_QUAD(rx_align_errs); 3648 CXGB_SYSCTL_ADD_QUAD(rx_symbol_errs); 3649 CXGB_SYSCTL_ADD_QUAD(rx_data_errs); 3650 CXGB_SYSCTL_ADD_QUAD(rx_sequence_errs); 3651 CXGB_SYSCTL_ADD_QUAD(rx_runt); 3652 CXGB_SYSCTL_ADD_QUAD(rx_jabber); 3653 CXGB_SYSCTL_ADD_QUAD(rx_short); 3654 CXGB_SYSCTL_ADD_QUAD(rx_too_long); 3655 CXGB_SYSCTL_ADD_QUAD(rx_mac_internal_errs); 3656 CXGB_SYSCTL_ADD_QUAD(rx_cong_drops); 3657 CXGB_SYSCTL_ADD_QUAD(rx_frames_64); 3658 CXGB_SYSCTL_ADD_QUAD(rx_frames_65_127); 3659 CXGB_SYSCTL_ADD_QUAD(rx_frames_128_255); 3660 CXGB_SYSCTL_ADD_QUAD(rx_frames_256_511); 3661 CXGB_SYSCTL_ADD_QUAD(rx_frames_512_1023); 3662 CXGB_SYSCTL_ADD_QUAD(rx_frames_1024_1518); 3663 CXGB_SYSCTL_ADD_QUAD(rx_frames_1519_max); 3664 #undef CXGB_SYSCTL_ADD_QUAD 3665 3666 #define CXGB_SYSCTL_ADD_ULONG(a) SYSCTL_ADD_ULONG(ctx, poidlist, OID_AUTO, #a, \ 3667 CTLFLAG_RD, &mstats->a, 0) 3668 CXGB_SYSCTL_ADD_ULONG(tx_fifo_parity_err); 3669 CXGB_SYSCTL_ADD_ULONG(rx_fifo_parity_err); 3670 CXGB_SYSCTL_ADD_ULONG(tx_fifo_urun); 3671 CXGB_SYSCTL_ADD_ULONG(rx_fifo_ovfl); 3672 CXGB_SYSCTL_ADD_ULONG(serdes_signal_loss); 3673 CXGB_SYSCTL_ADD_ULONG(xaui_pcs_ctc_err); 3674 CXGB_SYSCTL_ADD_ULONG(xaui_pcs_align_change); 3675 CXGB_SYSCTL_ADD_ULONG(num_toggled); 3676 CXGB_SYSCTL_ADD_ULONG(num_resets); 3677 CXGB_SYSCTL_ADD_ULONG(link_faults); 3678 #undef CXGB_SYSCTL_ADD_ULONG 3679 } 3680 } 3681 3682 /** 3683 * t3_get_desc - dump an SGE descriptor for debugging purposes 3684 * @qs: the queue set 3685 * @qnum: identifies the specific queue (0..2: Tx, 3:response, 4..5: Rx) 3686 * @idx: the descriptor index in the queue 3687 * @data: where to dump the descriptor contents 3688 * 3689 * Dumps the contents of a HW descriptor of an SGE queue. Returns the 3690 * size of the descriptor. 3691 */ 3692 int 3693 t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx, 3694 unsigned char *data) 3695 { 3696 if (qnum >= 6) 3697 return (EINVAL); 3698 3699 if (qnum < 3) { 3700 if (!qs->txq[qnum].desc || idx >= qs->txq[qnum].size) 3701 return -EINVAL; 3702 memcpy(data, &qs->txq[qnum].desc[idx], sizeof(struct tx_desc)); 3703 return sizeof(struct tx_desc); 3704 } 3705 3706 if (qnum == 3) { 3707 if (!qs->rspq.desc || idx >= qs->rspq.size) 3708 return (EINVAL); 3709 memcpy(data, &qs->rspq.desc[idx], sizeof(struct rsp_desc)); 3710 return sizeof(struct rsp_desc); 3711 } 3712 3713 qnum -= 4; 3714 if (!qs->fl[qnum].desc || idx >= qs->fl[qnum].size) 3715 return (EINVAL); 3716 memcpy(data, &qs->fl[qnum].desc[idx], sizeof(struct rx_desc)); 3717 return sizeof(struct rx_desc); 3718 } 3719