1 /*- 2 * Copyright (c) 2011 Chelsio Communications, Inc. 3 * All rights reserved. 4 * Written by: Navdeep Parhar <np@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include "opt_inet.h" 32 #include "opt_inet6.h" 33 34 #include <sys/types.h> 35 #include <sys/eventhandler.h> 36 #include <sys/mbuf.h> 37 #include <sys/socket.h> 38 #include <sys/kernel.h> 39 #include <sys/kdb.h> 40 #include <sys/malloc.h> 41 #include <sys/queue.h> 42 #include <sys/sbuf.h> 43 #include <sys/taskqueue.h> 44 #include <sys/time.h> 45 #include <sys/sysctl.h> 46 #include <sys/smp.h> 47 #include <net/bpf.h> 48 #include <net/ethernet.h> 49 #include <net/if.h> 50 #include <net/if_vlan_var.h> 51 #include <netinet/in.h> 52 #include <netinet/ip.h> 53 #include <netinet/ip6.h> 54 #include <netinet/tcp.h> 55 #include <machine/md_var.h> 56 #include <vm/vm.h> 57 #include <vm/pmap.h> 58 59 #include "common/common.h" 60 #include "common/t4_regs.h" 61 #include "common/t4_regs_values.h" 62 #include "common/t4_msg.h" 63 64 #ifdef T4_PKT_TIMESTAMP 65 #define RX_COPY_THRESHOLD (MINCLSIZE - 8) 66 #else 67 #define RX_COPY_THRESHOLD MINCLSIZE 68 #endif 69 70 /* 71 * Ethernet frames are DMA'd at this byte offset into the freelist buffer. 72 * 0-7 are valid values. 73 */ 74 static int fl_pktshift = 2; 75 TUNABLE_INT("hw.cxgbe.fl_pktshift", &fl_pktshift); 76 77 /* 78 * Pad ethernet payload up to this boundary. 79 * -1: driver should figure out a good value. 80 * 0: disable padding. 81 * Any power of 2 from 32 to 4096 (both inclusive) is also a valid value. 82 */ 83 static int fl_pad = -1; 84 TUNABLE_INT("hw.cxgbe.fl_pad", &fl_pad); 85 86 /* 87 * Status page length. 88 * -1: driver should figure out a good value. 89 * 64 or 128 are the only other valid values. 90 */ 91 static int spg_len = -1; 92 TUNABLE_INT("hw.cxgbe.spg_len", &spg_len); 93 94 /* 95 * Congestion drops. 96 * -1: no congestion feedback (not recommended). 97 * 0: backpressure the channel instead of dropping packets right away. 98 * 1: no backpressure, drop packets for the congested queue immediately. 99 */ 100 static int cong_drop = 0; 101 TUNABLE_INT("hw.cxgbe.cong_drop", &cong_drop); 102 103 /* 104 * Deliver multiple frames in the same free list buffer if they fit. 105 * -1: let the driver decide whether to enable buffer packing or not. 106 * 0: disable buffer packing. 107 * 1: enable buffer packing. 108 */ 109 static int buffer_packing = -1; 110 TUNABLE_INT("hw.cxgbe.buffer_packing", &buffer_packing); 111 112 /* 113 * Start next frame in a packed buffer at this boundary. 114 * -1: driver should figure out a good value. 115 * T4: 116 * --- 117 * if fl_pad != 0 118 * value specified here will be overridden by fl_pad. 119 * else 120 * power of 2 from 32 to 4096 (both inclusive) is a valid value here. 121 * T5: 122 * --- 123 * 16, or a power of 2 from 64 to 4096 (both inclusive) is a valid value. 124 */ 125 static int fl_pack = -1; 126 static int t4_fl_pack; 127 static int t5_fl_pack; 128 TUNABLE_INT("hw.cxgbe.fl_pack", &fl_pack); 129 130 /* 131 * Allow the driver to create mbuf(s) in a cluster allocated for rx. 132 * 0: never; always allocate mbufs from the zone_mbuf UMA zone. 133 * 1: ok to create mbuf(s) within a cluster if there is room. 134 */ 135 static int allow_mbufs_in_cluster = 1; 136 TUNABLE_INT("hw.cxgbe.allow_mbufs_in_cluster", &allow_mbufs_in_cluster); 137 138 /* 139 * Largest rx cluster size that the driver is allowed to allocate. 140 */ 141 static int largest_rx_cluster = MJUM16BYTES; 142 TUNABLE_INT("hw.cxgbe.largest_rx_cluster", &largest_rx_cluster); 143 144 /* 145 * Size of cluster allocation that's most likely to succeed. The driver will 146 * fall back to this size if it fails to allocate clusters larger than this. 147 */ 148 static int safest_rx_cluster = PAGE_SIZE; 149 TUNABLE_INT("hw.cxgbe.safest_rx_cluster", &safest_rx_cluster); 150 151 /* Used to track coalesced tx work request */ 152 struct txpkts { 153 uint64_t *flitp; /* ptr to flit where next pkt should start */ 154 uint8_t npkt; /* # of packets in this work request */ 155 uint8_t nflits; /* # of flits used by this work request */ 156 uint16_t plen; /* total payload (sum of all packets) */ 157 }; 158 159 /* A packet's SGL. This + m_pkthdr has all info needed for tx */ 160 struct sgl { 161 int nsegs; /* # of segments in the SGL, 0 means imm. tx */ 162 int nflits; /* # of flits needed for the SGL */ 163 bus_dma_segment_t seg[TX_SGL_SEGS]; 164 }; 165 166 static int service_iq(struct sge_iq *, int); 167 static struct mbuf *get_fl_payload(struct adapter *, struct sge_fl *, uint32_t, 168 int *); 169 static int t4_eth_rx(struct sge_iq *, const struct rss_header *, struct mbuf *); 170 static inline void init_iq(struct sge_iq *, struct adapter *, int, int, int, 171 int); 172 static inline void init_fl(struct adapter *, struct sge_fl *, int, int, int, 173 char *); 174 static inline void init_eq(struct sge_eq *, int, int, uint8_t, uint16_t, 175 char *); 176 static int alloc_ring(struct adapter *, size_t, bus_dma_tag_t *, bus_dmamap_t *, 177 bus_addr_t *, void **); 178 static int free_ring(struct adapter *, bus_dma_tag_t, bus_dmamap_t, bus_addr_t, 179 void *); 180 static int alloc_iq_fl(struct port_info *, struct sge_iq *, struct sge_fl *, 181 int, int); 182 static int free_iq_fl(struct port_info *, struct sge_iq *, struct sge_fl *); 183 static void add_fl_sysctls(struct sysctl_ctx_list *, struct sysctl_oid *, 184 struct sge_fl *); 185 static int alloc_fwq(struct adapter *); 186 static int free_fwq(struct adapter *); 187 static int alloc_mgmtq(struct adapter *); 188 static int free_mgmtq(struct adapter *); 189 static int alloc_rxq(struct port_info *, struct sge_rxq *, int, int, 190 struct sysctl_oid *); 191 static int free_rxq(struct port_info *, struct sge_rxq *); 192 #ifdef TCP_OFFLOAD 193 static int alloc_ofld_rxq(struct port_info *, struct sge_ofld_rxq *, int, int, 194 struct sysctl_oid *); 195 static int free_ofld_rxq(struct port_info *, struct sge_ofld_rxq *); 196 #endif 197 static int ctrl_eq_alloc(struct adapter *, struct sge_eq *); 198 static int eth_eq_alloc(struct adapter *, struct port_info *, struct sge_eq *); 199 #ifdef TCP_OFFLOAD 200 static int ofld_eq_alloc(struct adapter *, struct port_info *, struct sge_eq *); 201 #endif 202 static int alloc_eq(struct adapter *, struct port_info *, struct sge_eq *); 203 static int free_eq(struct adapter *, struct sge_eq *); 204 static int alloc_wrq(struct adapter *, struct port_info *, struct sge_wrq *, 205 struct sysctl_oid *); 206 static int free_wrq(struct adapter *, struct sge_wrq *); 207 static int alloc_txq(struct port_info *, struct sge_txq *, int, 208 struct sysctl_oid *); 209 static int free_txq(struct port_info *, struct sge_txq *); 210 static void oneseg_dma_callback(void *, bus_dma_segment_t *, int, int); 211 static inline bool is_new_response(const struct sge_iq *, struct rsp_ctrl **); 212 static inline void iq_next(struct sge_iq *); 213 static inline void ring_fl_db(struct adapter *, struct sge_fl *); 214 static int refill_fl(struct adapter *, struct sge_fl *, int); 215 static void refill_sfl(void *); 216 static int alloc_fl_sdesc(struct sge_fl *); 217 static void free_fl_sdesc(struct adapter *, struct sge_fl *); 218 static void find_best_refill_source(struct adapter *, struct sge_fl *, int); 219 static void find_safe_refill_source(struct adapter *, struct sge_fl *); 220 static void add_fl_to_sfl(struct adapter *, struct sge_fl *); 221 222 static int get_pkt_sgl(struct sge_txq *, struct mbuf **, struct sgl *, int); 223 static int free_pkt_sgl(struct sge_txq *, struct sgl *); 224 static int write_txpkt_wr(struct port_info *, struct sge_txq *, struct mbuf *, 225 struct sgl *); 226 static int add_to_txpkts(struct port_info *, struct sge_txq *, struct txpkts *, 227 struct mbuf *, struct sgl *); 228 static void write_txpkts_wr(struct sge_txq *, struct txpkts *); 229 static inline void write_ulp_cpl_sgl(struct port_info *, struct sge_txq *, 230 struct txpkts *, struct mbuf *, struct sgl *); 231 static int write_sgl_to_txd(struct sge_eq *, struct sgl *, caddr_t *); 232 static inline void copy_to_txd(struct sge_eq *, caddr_t, caddr_t *, int); 233 static inline void ring_eq_db(struct adapter *, struct sge_eq *); 234 static inline int reclaimable(struct sge_eq *); 235 static int reclaim_tx_descs(struct sge_txq *, int, int); 236 static void write_eqflush_wr(struct sge_eq *); 237 static __be64 get_flit(bus_dma_segment_t *, int, int); 238 static int handle_sge_egr_update(struct sge_iq *, const struct rss_header *, 239 struct mbuf *); 240 static int handle_fw_msg(struct sge_iq *, const struct rss_header *, 241 struct mbuf *); 242 243 static int sysctl_uint16(SYSCTL_HANDLER_ARGS); 244 static int sysctl_bufsizes(SYSCTL_HANDLER_ARGS); 245 246 /* 247 * Called on MOD_LOAD. Validates and calculates the SGE tunables. 248 */ 249 void 250 t4_sge_modload(void) 251 { 252 int pad; 253 254 /* set pad to a reasonable powerof2 between 16 and 4096 (inclusive) */ 255 #if defined(__i386__) || defined(__amd64__) 256 pad = max(cpu_clflush_line_size, 16); 257 #else 258 pad = max(CACHE_LINE_SIZE, 16); 259 #endif 260 pad = min(pad, 4096); 261 262 if (fl_pktshift < 0 || fl_pktshift > 7) { 263 printf("Invalid hw.cxgbe.fl_pktshift value (%d)," 264 " using 2 instead.\n", fl_pktshift); 265 fl_pktshift = 2; 266 } 267 268 if (fl_pad != 0 && 269 (fl_pad < 32 || fl_pad > 4096 || !powerof2(fl_pad))) { 270 271 if (fl_pad != -1) { 272 printf("Invalid hw.cxgbe.fl_pad value (%d)," 273 " using %d instead.\n", fl_pad, max(pad, 32)); 274 } 275 fl_pad = max(pad, 32); 276 } 277 278 /* 279 * T4 has the same pad and pack boundary. If a pad boundary is set, 280 * pack boundary must be set to the same value. Otherwise take the 281 * specified value or auto-calculate something reasonable. 282 */ 283 if (fl_pad) 284 t4_fl_pack = fl_pad; 285 else if (fl_pack < 32 || fl_pack > 4096 || !powerof2(fl_pack)) 286 t4_fl_pack = max(pad, 32); 287 else 288 t4_fl_pack = fl_pack; 289 290 /* T5's pack boundary is independent of the pad boundary. */ 291 if (fl_pack < 16 || fl_pack == 32 || fl_pack > 4096 || 292 !powerof2(fl_pack)) 293 t5_fl_pack = max(pad, CACHE_LINE_SIZE); 294 else 295 t5_fl_pack = fl_pack; 296 297 if (spg_len != 64 && spg_len != 128) { 298 int len; 299 300 #if defined(__i386__) || defined(__amd64__) 301 len = cpu_clflush_line_size > 64 ? 128 : 64; 302 #else 303 len = 64; 304 #endif 305 if (spg_len != -1) { 306 printf("Invalid hw.cxgbe.spg_len value (%d)," 307 " using %d instead.\n", spg_len, len); 308 } 309 spg_len = len; 310 } 311 312 if (cong_drop < -1 || cong_drop > 1) { 313 printf("Invalid hw.cxgbe.cong_drop value (%d)," 314 " using 0 instead.\n", cong_drop); 315 cong_drop = 0; 316 } 317 } 318 319 void 320 t4_init_sge_cpl_handlers(struct adapter *sc) 321 { 322 323 t4_register_cpl_handler(sc, CPL_FW4_MSG, handle_fw_msg); 324 t4_register_cpl_handler(sc, CPL_FW6_MSG, handle_fw_msg); 325 t4_register_cpl_handler(sc, CPL_SGE_EGR_UPDATE, handle_sge_egr_update); 326 t4_register_cpl_handler(sc, CPL_RX_PKT, t4_eth_rx); 327 t4_register_fw_msg_handler(sc, FW6_TYPE_CMD_RPL, t4_handle_fw_rpl); 328 } 329 330 /* 331 * adap->params.vpd.cclk must be set up before this is called. 332 */ 333 void 334 t4_tweak_chip_settings(struct adapter *sc) 335 { 336 int i; 337 uint32_t v, m; 338 int intr_timer[SGE_NTIMERS] = {1, 5, 10, 50, 100, 200}; 339 int timer_max = M_TIMERVALUE0 * 1000 / sc->params.vpd.cclk; 340 int intr_pktcount[SGE_NCOUNTERS] = {1, 8, 16, 32}; /* 63 max */ 341 uint16_t indsz = min(RX_COPY_THRESHOLD - 1, M_INDICATESIZE); 342 static int sge_flbuf_sizes[] = { 343 MCLBYTES, 344 #if MJUMPAGESIZE != MCLBYTES 345 MJUMPAGESIZE, 346 MJUMPAGESIZE - CL_METADATA_SIZE, 347 MJUMPAGESIZE - 2 * MSIZE - CL_METADATA_SIZE, 348 #endif 349 MJUM9BYTES, 350 MJUM16BYTES, 351 MCLBYTES - MSIZE - CL_METADATA_SIZE, 352 MJUM9BYTES - CL_METADATA_SIZE, 353 MJUM16BYTES - CL_METADATA_SIZE, 354 }; 355 356 KASSERT(sc->flags & MASTER_PF, 357 ("%s: trying to change chip settings when not master.", __func__)); 358 359 m = V_PKTSHIFT(M_PKTSHIFT) | F_RXPKTCPLMODE | F_EGRSTATUSPAGESIZE; 360 v = V_PKTSHIFT(fl_pktshift) | F_RXPKTCPLMODE | 361 V_EGRSTATUSPAGESIZE(spg_len == 128); 362 if (is_t4(sc) && (fl_pad || buffer_packing)) { 363 /* t4_fl_pack has the correct value even when fl_pad = 0 */ 364 m |= V_INGPADBOUNDARY(M_INGPADBOUNDARY); 365 v |= V_INGPADBOUNDARY(ilog2(t4_fl_pack) - 5); 366 } else if (is_t5(sc) && fl_pad) { 367 m |= V_INGPADBOUNDARY(M_INGPADBOUNDARY); 368 v |= V_INGPADBOUNDARY(ilog2(fl_pad) - 5); 369 } 370 t4_set_reg_field(sc, A_SGE_CONTROL, m, v); 371 372 if (is_t5(sc) && buffer_packing) { 373 m = V_INGPACKBOUNDARY(M_INGPACKBOUNDARY); 374 if (t5_fl_pack == 16) 375 v = V_INGPACKBOUNDARY(0); 376 else 377 v = V_INGPACKBOUNDARY(ilog2(t5_fl_pack) - 5); 378 t4_set_reg_field(sc, A_SGE_CONTROL2, m, v); 379 } 380 381 v = V_HOSTPAGESIZEPF0(PAGE_SHIFT - 10) | 382 V_HOSTPAGESIZEPF1(PAGE_SHIFT - 10) | 383 V_HOSTPAGESIZEPF2(PAGE_SHIFT - 10) | 384 V_HOSTPAGESIZEPF3(PAGE_SHIFT - 10) | 385 V_HOSTPAGESIZEPF4(PAGE_SHIFT - 10) | 386 V_HOSTPAGESIZEPF5(PAGE_SHIFT - 10) | 387 V_HOSTPAGESIZEPF6(PAGE_SHIFT - 10) | 388 V_HOSTPAGESIZEPF7(PAGE_SHIFT - 10); 389 t4_write_reg(sc, A_SGE_HOST_PAGE_SIZE, v); 390 391 KASSERT(nitems(sge_flbuf_sizes) <= SGE_FLBUF_SIZES, 392 ("%s: hw buffer size table too big", __func__)); 393 for (i = 0; i < min(nitems(sge_flbuf_sizes), SGE_FLBUF_SIZES); i++) { 394 t4_write_reg(sc, A_SGE_FL_BUFFER_SIZE0 + (4 * i), 395 sge_flbuf_sizes[i]); 396 } 397 398 v = V_THRESHOLD_0(intr_pktcount[0]) | V_THRESHOLD_1(intr_pktcount[1]) | 399 V_THRESHOLD_2(intr_pktcount[2]) | V_THRESHOLD_3(intr_pktcount[3]); 400 t4_write_reg(sc, A_SGE_INGRESS_RX_THRESHOLD, v); 401 402 KASSERT(intr_timer[0] <= timer_max, 403 ("%s: not a single usable timer (%d, %d)", __func__, intr_timer[0], 404 timer_max)); 405 for (i = 1; i < nitems(intr_timer); i++) { 406 KASSERT(intr_timer[i] >= intr_timer[i - 1], 407 ("%s: timers not listed in increasing order (%d)", 408 __func__, i)); 409 410 while (intr_timer[i] > timer_max) { 411 if (i == nitems(intr_timer) - 1) { 412 intr_timer[i] = timer_max; 413 break; 414 } 415 intr_timer[i] += intr_timer[i - 1]; 416 intr_timer[i] /= 2; 417 } 418 } 419 420 v = V_TIMERVALUE0(us_to_core_ticks(sc, intr_timer[0])) | 421 V_TIMERVALUE1(us_to_core_ticks(sc, intr_timer[1])); 422 t4_write_reg(sc, A_SGE_TIMER_VALUE_0_AND_1, v); 423 v = V_TIMERVALUE2(us_to_core_ticks(sc, intr_timer[2])) | 424 V_TIMERVALUE3(us_to_core_ticks(sc, intr_timer[3])); 425 t4_write_reg(sc, A_SGE_TIMER_VALUE_2_AND_3, v); 426 v = V_TIMERVALUE4(us_to_core_ticks(sc, intr_timer[4])) | 427 V_TIMERVALUE5(us_to_core_ticks(sc, intr_timer[5])); 428 t4_write_reg(sc, A_SGE_TIMER_VALUE_4_AND_5, v); 429 430 if (cong_drop == 0) { 431 m = F_TUNNELCNGDROP0 | F_TUNNELCNGDROP1 | F_TUNNELCNGDROP2 | 432 F_TUNNELCNGDROP3; 433 t4_set_reg_field(sc, A_TP_PARA_REG3, m, 0); 434 } 435 436 /* 4K, 16K, 64K, 256K DDP "page sizes" */ 437 v = V_HPZ0(0) | V_HPZ1(2) | V_HPZ2(4) | V_HPZ3(6); 438 t4_write_reg(sc, A_ULP_RX_TDDP_PSZ, v); 439 440 m = v = F_TDDPTAGTCB; 441 t4_set_reg_field(sc, A_ULP_RX_CTL, m, v); 442 443 m = V_INDICATESIZE(M_INDICATESIZE) | F_REARMDDPOFFSET | 444 F_RESETDDPOFFSET; 445 v = V_INDICATESIZE(indsz) | F_REARMDDPOFFSET | F_RESETDDPOFFSET; 446 t4_set_reg_field(sc, A_TP_PARA_REG5, m, v); 447 } 448 449 /* 450 * SGE wants the buffer to be at least 64B and then a multiple of the pad 451 * boundary or 16, whichever is greater. 452 */ 453 static inline int 454 hwsz_ok(int hwsz) 455 { 456 int mask = max(fl_pad, 16) - 1; 457 458 return (hwsz >= 64 && (hwsz & mask) == 0); 459 } 460 461 /* 462 * XXX: driver really should be able to deal with unexpected settings. 463 */ 464 int 465 t4_read_chip_settings(struct adapter *sc) 466 { 467 struct sge *s = &sc->sge; 468 int i, j, n, rc = 0; 469 uint32_t m, v, r; 470 uint16_t indsz = min(RX_COPY_THRESHOLD - 1, M_INDICATESIZE); 471 static int sw_buf_sizes[] = { /* Sorted by size */ 472 MCLBYTES, 473 #if MJUMPAGESIZE != MCLBYTES 474 MJUMPAGESIZE, 475 #endif 476 MJUM9BYTES, 477 MJUM16BYTES 478 }; 479 struct sw_zone_info *swz, *safe_swz; 480 struct hw_buf_info *hwb; 481 482 m = V_PKTSHIFT(M_PKTSHIFT) | F_RXPKTCPLMODE | F_EGRSTATUSPAGESIZE; 483 v = V_PKTSHIFT(fl_pktshift) | F_RXPKTCPLMODE | 484 V_EGRSTATUSPAGESIZE(spg_len == 128); 485 if (is_t4(sc) && (fl_pad || buffer_packing)) { 486 m |= V_INGPADBOUNDARY(M_INGPADBOUNDARY); 487 v |= V_INGPADBOUNDARY(ilog2(t4_fl_pack) - 5); 488 } else if (is_t5(sc) && fl_pad) { 489 m |= V_INGPADBOUNDARY(M_INGPADBOUNDARY); 490 v |= V_INGPADBOUNDARY(ilog2(fl_pad) - 5); 491 } 492 r = t4_read_reg(sc, A_SGE_CONTROL); 493 if ((r & m) != v) { 494 device_printf(sc->dev, "invalid SGE_CONTROL(0x%x)\n", r); 495 rc = EINVAL; 496 } 497 498 if (is_t5(sc) && buffer_packing) { 499 m = V_INGPACKBOUNDARY(M_INGPACKBOUNDARY); 500 if (t5_fl_pack == 16) 501 v = V_INGPACKBOUNDARY(0); 502 else 503 v = V_INGPACKBOUNDARY(ilog2(t5_fl_pack) - 5); 504 r = t4_read_reg(sc, A_SGE_CONTROL2); 505 if ((r & m) != v) { 506 device_printf(sc->dev, 507 "invalid SGE_CONTROL2(0x%x)\n", r); 508 rc = EINVAL; 509 } 510 } 511 s->pack_boundary = is_t4(sc) ? t4_fl_pack : t5_fl_pack; 512 513 v = V_HOSTPAGESIZEPF0(PAGE_SHIFT - 10) | 514 V_HOSTPAGESIZEPF1(PAGE_SHIFT - 10) | 515 V_HOSTPAGESIZEPF2(PAGE_SHIFT - 10) | 516 V_HOSTPAGESIZEPF3(PAGE_SHIFT - 10) | 517 V_HOSTPAGESIZEPF4(PAGE_SHIFT - 10) | 518 V_HOSTPAGESIZEPF5(PAGE_SHIFT - 10) | 519 V_HOSTPAGESIZEPF6(PAGE_SHIFT - 10) | 520 V_HOSTPAGESIZEPF7(PAGE_SHIFT - 10); 521 r = t4_read_reg(sc, A_SGE_HOST_PAGE_SIZE); 522 if (r != v) { 523 device_printf(sc->dev, "invalid SGE_HOST_PAGE_SIZE(0x%x)\n", r); 524 rc = EINVAL; 525 } 526 527 /* Filter out unusable hw buffer sizes entirely (mark with -2). */ 528 hwb = &s->hw_buf_info[0]; 529 for (i = 0; i < nitems(s->hw_buf_info); i++, hwb++) { 530 r = t4_read_reg(sc, A_SGE_FL_BUFFER_SIZE0 + (4 * i)); 531 hwb->size = r; 532 hwb->zidx = hwsz_ok(r) ? -1 : -2; 533 hwb->next = -1; 534 } 535 536 /* 537 * Create a sorted list in decreasing order of hw buffer sizes (and so 538 * increasing order of spare area) for each software zone. 539 */ 540 n = 0; /* no usable buffer size to begin with */ 541 swz = &s->sw_zone_info[0]; 542 safe_swz = NULL; 543 for (i = 0; i < SW_ZONE_SIZES; i++, swz++) { 544 int8_t head = -1, tail = -1; 545 546 swz->size = sw_buf_sizes[i]; 547 swz->zone = m_getzone(swz->size); 548 swz->type = m_gettype(swz->size); 549 550 if (swz->size == safest_rx_cluster) 551 safe_swz = swz; 552 553 hwb = &s->hw_buf_info[0]; 554 for (j = 0; j < SGE_FLBUF_SIZES; j++, hwb++) { 555 if (hwb->zidx != -1 || hwb->size > swz->size) 556 continue; 557 hwb->zidx = i; 558 if (head == -1) 559 head = tail = j; 560 else if (hwb->size < s->hw_buf_info[tail].size) { 561 s->hw_buf_info[tail].next = j; 562 tail = j; 563 } else { 564 int8_t *cur; 565 struct hw_buf_info *t; 566 567 for (cur = &head; *cur != -1; cur = &t->next) { 568 t = &s->hw_buf_info[*cur]; 569 if (hwb->size == t->size) { 570 hwb->zidx = -2; 571 break; 572 } 573 if (hwb->size > t->size) { 574 hwb->next = *cur; 575 *cur = j; 576 break; 577 } 578 } 579 } 580 } 581 swz->head_hwidx = head; 582 swz->tail_hwidx = tail; 583 584 if (tail != -1) { 585 n++; 586 if (swz->size - s->hw_buf_info[tail].size >= 587 CL_METADATA_SIZE) 588 sc->flags |= BUF_PACKING_OK; 589 } 590 } 591 if (n == 0) { 592 device_printf(sc->dev, "no usable SGE FL buffer size.\n"); 593 rc = EINVAL; 594 } 595 596 s->safe_hwidx1 = -1; 597 s->safe_hwidx2 = -1; 598 if (safe_swz != NULL) { 599 s->safe_hwidx1 = safe_swz->head_hwidx; 600 for (i = safe_swz->head_hwidx; i != -1; i = hwb->next) { 601 int spare; 602 603 hwb = &s->hw_buf_info[i]; 604 spare = safe_swz->size - hwb->size; 605 if (spare < CL_METADATA_SIZE) 606 continue; 607 if (s->safe_hwidx2 == -1 || 608 spare == CL_METADATA_SIZE + MSIZE) 609 s->safe_hwidx2 = i; 610 if (spare >= CL_METADATA_SIZE + MSIZE) 611 break; 612 } 613 } 614 615 r = t4_read_reg(sc, A_SGE_INGRESS_RX_THRESHOLD); 616 s->counter_val[0] = G_THRESHOLD_0(r); 617 s->counter_val[1] = G_THRESHOLD_1(r); 618 s->counter_val[2] = G_THRESHOLD_2(r); 619 s->counter_val[3] = G_THRESHOLD_3(r); 620 621 r = t4_read_reg(sc, A_SGE_TIMER_VALUE_0_AND_1); 622 s->timer_val[0] = G_TIMERVALUE0(r) / core_ticks_per_usec(sc); 623 s->timer_val[1] = G_TIMERVALUE1(r) / core_ticks_per_usec(sc); 624 r = t4_read_reg(sc, A_SGE_TIMER_VALUE_2_AND_3); 625 s->timer_val[2] = G_TIMERVALUE2(r) / core_ticks_per_usec(sc); 626 s->timer_val[3] = G_TIMERVALUE3(r) / core_ticks_per_usec(sc); 627 r = t4_read_reg(sc, A_SGE_TIMER_VALUE_4_AND_5); 628 s->timer_val[4] = G_TIMERVALUE4(r) / core_ticks_per_usec(sc); 629 s->timer_val[5] = G_TIMERVALUE5(r) / core_ticks_per_usec(sc); 630 631 if (cong_drop == 0) { 632 m = F_TUNNELCNGDROP0 | F_TUNNELCNGDROP1 | F_TUNNELCNGDROP2 | 633 F_TUNNELCNGDROP3; 634 r = t4_read_reg(sc, A_TP_PARA_REG3); 635 if (r & m) { 636 device_printf(sc->dev, 637 "invalid TP_PARA_REG3(0x%x)\n", r); 638 rc = EINVAL; 639 } 640 } 641 642 v = V_HPZ0(0) | V_HPZ1(2) | V_HPZ2(4) | V_HPZ3(6); 643 r = t4_read_reg(sc, A_ULP_RX_TDDP_PSZ); 644 if (r != v) { 645 device_printf(sc->dev, "invalid ULP_RX_TDDP_PSZ(0x%x)\n", r); 646 rc = EINVAL; 647 } 648 649 m = v = F_TDDPTAGTCB; 650 r = t4_read_reg(sc, A_ULP_RX_CTL); 651 if ((r & m) != v) { 652 device_printf(sc->dev, "invalid ULP_RX_CTL(0x%x)\n", r); 653 rc = EINVAL; 654 } 655 656 m = V_INDICATESIZE(M_INDICATESIZE) | F_REARMDDPOFFSET | 657 F_RESETDDPOFFSET; 658 v = V_INDICATESIZE(indsz) | F_REARMDDPOFFSET | F_RESETDDPOFFSET; 659 r = t4_read_reg(sc, A_TP_PARA_REG5); 660 if ((r & m) != v) { 661 device_printf(sc->dev, "invalid TP_PARA_REG5(0x%x)\n", r); 662 rc = EINVAL; 663 } 664 665 r = t4_read_reg(sc, A_SGE_CONM_CTRL); 666 s->fl_starve_threshold = G_EGRTHRESHOLD(r) * 2 + 1; 667 if (is_t4(sc)) 668 s->fl_starve_threshold2 = s->fl_starve_threshold; 669 else 670 s->fl_starve_threshold2 = G_EGRTHRESHOLDPACKING(r) * 2 + 1; 671 672 /* egress queues: log2 of # of doorbells per BAR2 page */ 673 r = t4_read_reg(sc, A_SGE_EGRESS_QUEUES_PER_PAGE_PF); 674 r >>= S_QUEUESPERPAGEPF0 + 675 (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * sc->pf; 676 s->eq_s_qpp = r & M_QUEUESPERPAGEPF0; 677 678 /* ingress queues: log2 of # of doorbells per BAR2 page */ 679 r = t4_read_reg(sc, A_SGE_INGRESS_QUEUES_PER_PAGE_PF); 680 r >>= S_QUEUESPERPAGEPF0 + 681 (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * sc->pf; 682 s->iq_s_qpp = r & M_QUEUESPERPAGEPF0; 683 684 t4_init_tp_params(sc); 685 686 t4_read_mtu_tbl(sc, sc->params.mtus, NULL); 687 t4_load_mtus(sc, sc->params.mtus, sc->params.a_wnd, sc->params.b_wnd); 688 689 return (rc); 690 } 691 692 int 693 t4_create_dma_tag(struct adapter *sc) 694 { 695 int rc; 696 697 rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0, 698 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE, 699 BUS_SPACE_UNRESTRICTED, BUS_SPACE_MAXSIZE, BUS_DMA_ALLOCNOW, NULL, 700 NULL, &sc->dmat); 701 if (rc != 0) { 702 device_printf(sc->dev, 703 "failed to create main DMA tag: %d\n", rc); 704 } 705 706 return (rc); 707 } 708 709 static inline int 710 enable_buffer_packing(struct adapter *sc) 711 { 712 713 if (sc->flags & BUF_PACKING_OK && 714 ((is_t5(sc) && buffer_packing) || /* 1 or -1 both ok for T5 */ 715 (is_t4(sc) && buffer_packing == 1))) 716 return (1); 717 return (0); 718 } 719 720 void 721 t4_sge_sysctls(struct adapter *sc, struct sysctl_ctx_list *ctx, 722 struct sysctl_oid_list *children) 723 { 724 725 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "buffer_sizes", 726 CTLTYPE_STRING | CTLFLAG_RD, &sc->sge, 0, sysctl_bufsizes, "A", 727 "freelist buffer sizes"); 728 729 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "fl_pktshift", CTLFLAG_RD, 730 NULL, fl_pktshift, "payload DMA offset in rx buffer (bytes)"); 731 732 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "fl_pad", CTLFLAG_RD, 733 NULL, fl_pad, "payload pad boundary (bytes)"); 734 735 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "spg_len", CTLFLAG_RD, 736 NULL, spg_len, "status page size (bytes)"); 737 738 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "cong_drop", CTLFLAG_RD, 739 NULL, cong_drop, "congestion drop setting"); 740 741 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "buffer_packing", CTLFLAG_RD, 742 NULL, enable_buffer_packing(sc), 743 "pack multiple frames in one fl buffer"); 744 745 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "fl_pack", CTLFLAG_RD, 746 NULL, sc->sge.pack_boundary, "payload pack boundary (bytes)"); 747 } 748 749 int 750 t4_destroy_dma_tag(struct adapter *sc) 751 { 752 if (sc->dmat) 753 bus_dma_tag_destroy(sc->dmat); 754 755 return (0); 756 } 757 758 /* 759 * Allocate and initialize the firmware event queue and the management queue. 760 * 761 * Returns errno on failure. Resources allocated up to that point may still be 762 * allocated. Caller is responsible for cleanup in case this function fails. 763 */ 764 int 765 t4_setup_adapter_queues(struct adapter *sc) 766 { 767 int rc; 768 769 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 770 771 sysctl_ctx_init(&sc->ctx); 772 sc->flags |= ADAP_SYSCTL_CTX; 773 774 /* 775 * Firmware event queue 776 */ 777 rc = alloc_fwq(sc); 778 if (rc != 0) 779 return (rc); 780 781 /* 782 * Management queue. This is just a control queue that uses the fwq as 783 * its associated iq. 784 */ 785 rc = alloc_mgmtq(sc); 786 787 return (rc); 788 } 789 790 /* 791 * Idempotent 792 */ 793 int 794 t4_teardown_adapter_queues(struct adapter *sc) 795 { 796 797 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 798 799 /* Do this before freeing the queue */ 800 if (sc->flags & ADAP_SYSCTL_CTX) { 801 sysctl_ctx_free(&sc->ctx); 802 sc->flags &= ~ADAP_SYSCTL_CTX; 803 } 804 805 free_mgmtq(sc); 806 free_fwq(sc); 807 808 return (0); 809 } 810 811 static inline int 812 first_vector(struct port_info *pi) 813 { 814 struct adapter *sc = pi->adapter; 815 int rc = T4_EXTRA_INTR, i; 816 817 if (sc->intr_count == 1) 818 return (0); 819 820 for_each_port(sc, i) { 821 struct port_info *p = sc->port[i]; 822 823 if (i == pi->port_id) 824 break; 825 826 #ifdef TCP_OFFLOAD 827 if (sc->flags & INTR_DIRECT) 828 rc += p->nrxq + p->nofldrxq; 829 else 830 rc += max(p->nrxq, p->nofldrxq); 831 #else 832 /* 833 * Not compiled with offload support and intr_count > 1. Only 834 * NIC queues exist and they'd better be taking direct 835 * interrupts. 836 */ 837 KASSERT(sc->flags & INTR_DIRECT, 838 ("%s: intr_count %d, !INTR_DIRECT", __func__, 839 sc->intr_count)); 840 841 rc += p->nrxq; 842 #endif 843 } 844 845 return (rc); 846 } 847 848 /* 849 * Given an arbitrary "index," come up with an iq that can be used by other 850 * queues (of this port) for interrupt forwarding, SGE egress updates, etc. 851 * The iq returned is guaranteed to be something that takes direct interrupts. 852 */ 853 static struct sge_iq * 854 port_intr_iq(struct port_info *pi, int idx) 855 { 856 struct adapter *sc = pi->adapter; 857 struct sge *s = &sc->sge; 858 struct sge_iq *iq = NULL; 859 860 if (sc->intr_count == 1) 861 return (&sc->sge.fwq); 862 863 #ifdef TCP_OFFLOAD 864 if (sc->flags & INTR_DIRECT) { 865 idx %= pi->nrxq + pi->nofldrxq; 866 867 if (idx >= pi->nrxq) { 868 idx -= pi->nrxq; 869 iq = &s->ofld_rxq[pi->first_ofld_rxq + idx].iq; 870 } else 871 iq = &s->rxq[pi->first_rxq + idx].iq; 872 873 } else { 874 idx %= max(pi->nrxq, pi->nofldrxq); 875 876 if (pi->nrxq >= pi->nofldrxq) 877 iq = &s->rxq[pi->first_rxq + idx].iq; 878 else 879 iq = &s->ofld_rxq[pi->first_ofld_rxq + idx].iq; 880 } 881 #else 882 /* 883 * Not compiled with offload support and intr_count > 1. Only NIC 884 * queues exist and they'd better be taking direct interrupts. 885 */ 886 KASSERT(sc->flags & INTR_DIRECT, 887 ("%s: intr_count %d, !INTR_DIRECT", __func__, sc->intr_count)); 888 889 idx %= pi->nrxq; 890 iq = &s->rxq[pi->first_rxq + idx].iq; 891 #endif 892 893 KASSERT(iq->flags & IQ_INTR, ("%s: EDOOFUS", __func__)); 894 return (iq); 895 } 896 897 /* Maximum payload that can be delivered with a single iq descriptor */ 898 static inline int 899 mtu_to_max_payload(struct adapter *sc, int mtu, const int toe) 900 { 901 int payload; 902 903 #ifdef TCP_OFFLOAD 904 if (toe) { 905 payload = sc->tt.rx_coalesce ? 906 G_RXCOALESCESIZE(t4_read_reg(sc, A_TP_PARA_REG2)) : mtu; 907 } else { 908 #endif 909 /* large enough even when hw VLAN extraction is disabled */ 910 payload = fl_pktshift + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + 911 mtu; 912 #ifdef TCP_OFFLOAD 913 } 914 #endif 915 payload = roundup2(payload, fl_pad); 916 917 return (payload); 918 } 919 920 int 921 t4_setup_port_queues(struct port_info *pi) 922 { 923 int rc = 0, i, j, intr_idx, iqid; 924 struct sge_rxq *rxq; 925 struct sge_txq *txq; 926 struct sge_wrq *ctrlq; 927 #ifdef TCP_OFFLOAD 928 struct sge_ofld_rxq *ofld_rxq; 929 struct sge_wrq *ofld_txq; 930 struct sysctl_oid *oid2 = NULL; 931 #endif 932 char name[16]; 933 struct adapter *sc = pi->adapter; 934 struct ifnet *ifp = pi->ifp; 935 struct sysctl_oid *oid = device_get_sysctl_tree(pi->dev); 936 struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); 937 int maxp, pack, mtu = ifp->if_mtu; 938 939 oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "rxq", CTLFLAG_RD, 940 NULL, "rx queues"); 941 942 #ifdef TCP_OFFLOAD 943 if (is_offload(sc)) { 944 oid2 = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "ofld_rxq", 945 CTLFLAG_RD, NULL, 946 "rx queues for offloaded TCP connections"); 947 } 948 #endif 949 950 /* Interrupt vector to start from (when using multiple vectors) */ 951 intr_idx = first_vector(pi); 952 953 /* 954 * First pass over all rx queues (NIC and TOE): 955 * a) initialize iq and fl 956 * b) allocate queue iff it will take direct interrupts. 957 */ 958 maxp = mtu_to_max_payload(sc, mtu, 0); 959 pack = enable_buffer_packing(sc); 960 for_each_rxq(pi, i, rxq) { 961 962 init_iq(&rxq->iq, sc, pi->tmr_idx, pi->pktc_idx, pi->qsize_rxq, 963 RX_IQ_ESIZE); 964 965 snprintf(name, sizeof(name), "%s rxq%d-fl", 966 device_get_nameunit(pi->dev), i); 967 init_fl(sc, &rxq->fl, pi->qsize_rxq / 8, maxp, pack, name); 968 969 if (sc->flags & INTR_DIRECT 970 #ifdef TCP_OFFLOAD 971 || (sc->intr_count > 1 && pi->nrxq >= pi->nofldrxq) 972 #endif 973 ) { 974 rxq->iq.flags |= IQ_INTR; 975 rc = alloc_rxq(pi, rxq, intr_idx, i, oid); 976 if (rc != 0) 977 goto done; 978 intr_idx++; 979 } 980 } 981 982 #ifdef TCP_OFFLOAD 983 maxp = mtu_to_max_payload(sc, mtu, 1); 984 for_each_ofld_rxq(pi, i, ofld_rxq) { 985 986 init_iq(&ofld_rxq->iq, sc, pi->tmr_idx, pi->pktc_idx, 987 pi->qsize_rxq, RX_IQ_ESIZE); 988 989 snprintf(name, sizeof(name), "%s ofld_rxq%d-fl", 990 device_get_nameunit(pi->dev), i); 991 init_fl(sc, &ofld_rxq->fl, pi->qsize_rxq / 8, maxp, pack, name); 992 993 if (sc->flags & INTR_DIRECT || 994 (sc->intr_count > 1 && pi->nofldrxq > pi->nrxq)) { 995 ofld_rxq->iq.flags |= IQ_INTR; 996 rc = alloc_ofld_rxq(pi, ofld_rxq, intr_idx, i, oid2); 997 if (rc != 0) 998 goto done; 999 intr_idx++; 1000 } 1001 } 1002 #endif 1003 1004 /* 1005 * Second pass over all rx queues (NIC and TOE). The queues forwarding 1006 * their interrupts are allocated now. 1007 */ 1008 j = 0; 1009 for_each_rxq(pi, i, rxq) { 1010 if (rxq->iq.flags & IQ_INTR) 1011 continue; 1012 1013 intr_idx = port_intr_iq(pi, j)->abs_id; 1014 1015 rc = alloc_rxq(pi, rxq, intr_idx, i, oid); 1016 if (rc != 0) 1017 goto done; 1018 j++; 1019 } 1020 1021 #ifdef TCP_OFFLOAD 1022 for_each_ofld_rxq(pi, i, ofld_rxq) { 1023 if (ofld_rxq->iq.flags & IQ_INTR) 1024 continue; 1025 1026 intr_idx = port_intr_iq(pi, j)->abs_id; 1027 1028 rc = alloc_ofld_rxq(pi, ofld_rxq, intr_idx, i, oid2); 1029 if (rc != 0) 1030 goto done; 1031 j++; 1032 } 1033 #endif 1034 1035 /* 1036 * Now the tx queues. Only one pass needed. 1037 */ 1038 oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "txq", CTLFLAG_RD, 1039 NULL, "tx queues"); 1040 j = 0; 1041 for_each_txq(pi, i, txq) { 1042 uint16_t iqid; 1043 1044 iqid = port_intr_iq(pi, j)->cntxt_id; 1045 1046 snprintf(name, sizeof(name), "%s txq%d", 1047 device_get_nameunit(pi->dev), i); 1048 init_eq(&txq->eq, EQ_ETH, pi->qsize_txq, pi->tx_chan, iqid, 1049 name); 1050 1051 rc = alloc_txq(pi, txq, i, oid); 1052 if (rc != 0) 1053 goto done; 1054 j++; 1055 } 1056 1057 #ifdef TCP_OFFLOAD 1058 oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "ofld_txq", 1059 CTLFLAG_RD, NULL, "tx queues for offloaded TCP connections"); 1060 for_each_ofld_txq(pi, i, ofld_txq) { 1061 uint16_t iqid; 1062 1063 iqid = port_intr_iq(pi, j)->cntxt_id; 1064 1065 snprintf(name, sizeof(name), "%s ofld_txq%d", 1066 device_get_nameunit(pi->dev), i); 1067 init_eq(&ofld_txq->eq, EQ_OFLD, pi->qsize_txq, pi->tx_chan, 1068 iqid, name); 1069 1070 snprintf(name, sizeof(name), "%d", i); 1071 oid2 = SYSCTL_ADD_NODE(&pi->ctx, SYSCTL_CHILDREN(oid), OID_AUTO, 1072 name, CTLFLAG_RD, NULL, "offload tx queue"); 1073 1074 rc = alloc_wrq(sc, pi, ofld_txq, oid2); 1075 if (rc != 0) 1076 goto done; 1077 j++; 1078 } 1079 #endif 1080 1081 /* 1082 * Finally, the control queue. 1083 */ 1084 oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "ctrlq", CTLFLAG_RD, 1085 NULL, "ctrl queue"); 1086 ctrlq = &sc->sge.ctrlq[pi->port_id]; 1087 iqid = port_intr_iq(pi, 0)->cntxt_id; 1088 snprintf(name, sizeof(name), "%s ctrlq", device_get_nameunit(pi->dev)); 1089 init_eq(&ctrlq->eq, EQ_CTRL, CTRL_EQ_QSIZE, pi->tx_chan, iqid, name); 1090 rc = alloc_wrq(sc, pi, ctrlq, oid); 1091 1092 done: 1093 if (rc) 1094 t4_teardown_port_queues(pi); 1095 1096 return (rc); 1097 } 1098 1099 /* 1100 * Idempotent 1101 */ 1102 int 1103 t4_teardown_port_queues(struct port_info *pi) 1104 { 1105 int i; 1106 struct adapter *sc = pi->adapter; 1107 struct sge_rxq *rxq; 1108 struct sge_txq *txq; 1109 #ifdef TCP_OFFLOAD 1110 struct sge_ofld_rxq *ofld_rxq; 1111 struct sge_wrq *ofld_txq; 1112 #endif 1113 1114 /* Do this before freeing the queues */ 1115 if (pi->flags & PORT_SYSCTL_CTX) { 1116 sysctl_ctx_free(&pi->ctx); 1117 pi->flags &= ~PORT_SYSCTL_CTX; 1118 } 1119 1120 /* 1121 * Take down all the tx queues first, as they reference the rx queues 1122 * (for egress updates, etc.). 1123 */ 1124 1125 free_wrq(sc, &sc->sge.ctrlq[pi->port_id]); 1126 1127 for_each_txq(pi, i, txq) { 1128 free_txq(pi, txq); 1129 } 1130 1131 #ifdef TCP_OFFLOAD 1132 for_each_ofld_txq(pi, i, ofld_txq) { 1133 free_wrq(sc, ofld_txq); 1134 } 1135 #endif 1136 1137 /* 1138 * Then take down the rx queues that forward their interrupts, as they 1139 * reference other rx queues. 1140 */ 1141 1142 for_each_rxq(pi, i, rxq) { 1143 if ((rxq->iq.flags & IQ_INTR) == 0) 1144 free_rxq(pi, rxq); 1145 } 1146 1147 #ifdef TCP_OFFLOAD 1148 for_each_ofld_rxq(pi, i, ofld_rxq) { 1149 if ((ofld_rxq->iq.flags & IQ_INTR) == 0) 1150 free_ofld_rxq(pi, ofld_rxq); 1151 } 1152 #endif 1153 1154 /* 1155 * Then take down the rx queues that take direct interrupts. 1156 */ 1157 1158 for_each_rxq(pi, i, rxq) { 1159 if (rxq->iq.flags & IQ_INTR) 1160 free_rxq(pi, rxq); 1161 } 1162 1163 #ifdef TCP_OFFLOAD 1164 for_each_ofld_rxq(pi, i, ofld_rxq) { 1165 if (ofld_rxq->iq.flags & IQ_INTR) 1166 free_ofld_rxq(pi, ofld_rxq); 1167 } 1168 #endif 1169 1170 return (0); 1171 } 1172 1173 /* 1174 * Deals with errors and the firmware event queue. All data rx queues forward 1175 * their interrupt to the firmware event queue. 1176 */ 1177 void 1178 t4_intr_all(void *arg) 1179 { 1180 struct adapter *sc = arg; 1181 struct sge_iq *fwq = &sc->sge.fwq; 1182 1183 t4_intr_err(arg); 1184 if (atomic_cmpset_int(&fwq->state, IQS_IDLE, IQS_BUSY)) { 1185 service_iq(fwq, 0); 1186 atomic_cmpset_int(&fwq->state, IQS_BUSY, IQS_IDLE); 1187 } 1188 } 1189 1190 /* Deals with error interrupts */ 1191 void 1192 t4_intr_err(void *arg) 1193 { 1194 struct adapter *sc = arg; 1195 1196 t4_write_reg(sc, MYPF_REG(A_PCIE_PF_CLI), 0); 1197 t4_slow_intr_handler(sc); 1198 } 1199 1200 void 1201 t4_intr_evt(void *arg) 1202 { 1203 struct sge_iq *iq = arg; 1204 1205 if (atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_BUSY)) { 1206 service_iq(iq, 0); 1207 atomic_cmpset_int(&iq->state, IQS_BUSY, IQS_IDLE); 1208 } 1209 } 1210 1211 void 1212 t4_intr(void *arg) 1213 { 1214 struct sge_iq *iq = arg; 1215 1216 if (atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_BUSY)) { 1217 service_iq(iq, 0); 1218 atomic_cmpset_int(&iq->state, IQS_BUSY, IQS_IDLE); 1219 } 1220 } 1221 1222 /* 1223 * Deals with anything and everything on the given ingress queue. 1224 */ 1225 static int 1226 service_iq(struct sge_iq *iq, int budget) 1227 { 1228 struct sge_iq *q; 1229 struct sge_rxq *rxq = iq_to_rxq(iq); /* Use iff iq is part of rxq */ 1230 struct sge_fl *fl = &rxq->fl; /* Use iff IQ_HAS_FL */ 1231 struct adapter *sc = iq->adapter; 1232 struct rsp_ctrl *ctrl; 1233 const struct rss_header *rss; 1234 int ndescs = 0, limit, fl_bufs_used = 0; 1235 int rsp_type; 1236 uint32_t lq; 1237 struct mbuf *m0; 1238 STAILQ_HEAD(, sge_iq) iql = STAILQ_HEAD_INITIALIZER(iql); 1239 #if defined(INET) || defined(INET6) 1240 const struct timeval lro_timeout = {0, sc->lro_timeout}; 1241 #endif 1242 1243 limit = budget ? budget : iq->qsize / 8; 1244 1245 KASSERT(iq->state == IQS_BUSY, ("%s: iq %p not BUSY", __func__, iq)); 1246 1247 /* 1248 * We always come back and check the descriptor ring for new indirect 1249 * interrupts and other responses after running a single handler. 1250 */ 1251 for (;;) { 1252 while (is_new_response(iq, &ctrl)) { 1253 1254 rmb(); 1255 1256 m0 = NULL; 1257 rsp_type = G_RSPD_TYPE(ctrl->u.type_gen); 1258 lq = be32toh(ctrl->pldbuflen_qid); 1259 rss = (const void *)iq->cdesc; 1260 1261 switch (rsp_type) { 1262 case X_RSPD_TYPE_FLBUF: 1263 1264 KASSERT(iq->flags & IQ_HAS_FL, 1265 ("%s: data for an iq (%p) with no freelist", 1266 __func__, iq)); 1267 1268 m0 = get_fl_payload(sc, fl, lq, &fl_bufs_used); 1269 if (__predict_false(m0 == NULL)) 1270 goto process_iql; 1271 #ifdef T4_PKT_TIMESTAMP 1272 /* 1273 * 60 bit timestamp for the payload is 1274 * *(uint64_t *)m0->m_pktdat. Note that it is 1275 * in the leading free-space in the mbuf. The 1276 * kernel can clobber it during a pullup, 1277 * m_copymdata, etc. You need to make sure that 1278 * the mbuf reaches you unmolested if you care 1279 * about the timestamp. 1280 */ 1281 *(uint64_t *)m0->m_pktdat = 1282 be64toh(ctrl->u.last_flit) & 1283 0xfffffffffffffff; 1284 #endif 1285 1286 /* fall through */ 1287 1288 case X_RSPD_TYPE_CPL: 1289 KASSERT(rss->opcode < NUM_CPL_CMDS, 1290 ("%s: bad opcode %02x.", __func__, 1291 rss->opcode)); 1292 sc->cpl_handler[rss->opcode](iq, rss, m0); 1293 break; 1294 1295 case X_RSPD_TYPE_INTR: 1296 1297 /* 1298 * Interrupts should be forwarded only to queues 1299 * that are not forwarding their interrupts. 1300 * This means service_iq can recurse but only 1 1301 * level deep. 1302 */ 1303 KASSERT(budget == 0, 1304 ("%s: budget %u, rsp_type %u", __func__, 1305 budget, rsp_type)); 1306 1307 /* 1308 * There are 1K interrupt-capable queues (qids 0 1309 * through 1023). A response type indicating a 1310 * forwarded interrupt with a qid >= 1K is an 1311 * iWARP async notification. 1312 */ 1313 if (lq >= 1024) { 1314 sc->an_handler(iq, ctrl); 1315 break; 1316 } 1317 1318 q = sc->sge.iqmap[lq - sc->sge.iq_start]; 1319 if (atomic_cmpset_int(&q->state, IQS_IDLE, 1320 IQS_BUSY)) { 1321 if (service_iq(q, q->qsize / 8) == 0) { 1322 atomic_cmpset_int(&q->state, 1323 IQS_BUSY, IQS_IDLE); 1324 } else { 1325 STAILQ_INSERT_TAIL(&iql, q, 1326 link); 1327 } 1328 } 1329 break; 1330 1331 default: 1332 KASSERT(0, 1333 ("%s: illegal response type %d on iq %p", 1334 __func__, rsp_type, iq)); 1335 log(LOG_ERR, 1336 "%s: illegal response type %d on iq %p", 1337 device_get_nameunit(sc->dev), rsp_type, iq); 1338 break; 1339 } 1340 1341 if (fl_bufs_used >= 16) { 1342 FL_LOCK(fl); 1343 fl->needed += fl_bufs_used; 1344 refill_fl(sc, fl, 32); 1345 FL_UNLOCK(fl); 1346 fl_bufs_used = 0; 1347 } 1348 1349 iq_next(iq); 1350 if (++ndescs == limit) { 1351 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), 1352 V_CIDXINC(ndescs) | 1353 V_INGRESSQID(iq->cntxt_id) | 1354 V_SEINTARM(V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX))); 1355 ndescs = 0; 1356 1357 #if defined(INET) || defined(INET6) 1358 if (iq->flags & IQ_LRO_ENABLED && 1359 sc->lro_timeout != 0) { 1360 tcp_lro_flush_inactive(&rxq->lro, 1361 &lro_timeout); 1362 } 1363 #endif 1364 1365 if (budget) 1366 return (EINPROGRESS); 1367 } 1368 } 1369 1370 process_iql: 1371 if (STAILQ_EMPTY(&iql)) 1372 break; 1373 1374 /* 1375 * Process the head only, and send it to the back of the list if 1376 * it's still not done. 1377 */ 1378 q = STAILQ_FIRST(&iql); 1379 STAILQ_REMOVE_HEAD(&iql, link); 1380 if (service_iq(q, q->qsize / 8) == 0) 1381 atomic_cmpset_int(&q->state, IQS_BUSY, IQS_IDLE); 1382 else 1383 STAILQ_INSERT_TAIL(&iql, q, link); 1384 } 1385 1386 #if defined(INET) || defined(INET6) 1387 if (iq->flags & IQ_LRO_ENABLED) { 1388 struct lro_ctrl *lro = &rxq->lro; 1389 struct lro_entry *l; 1390 1391 while (!SLIST_EMPTY(&lro->lro_active)) { 1392 l = SLIST_FIRST(&lro->lro_active); 1393 SLIST_REMOVE_HEAD(&lro->lro_active, next); 1394 tcp_lro_flush(lro, l); 1395 } 1396 } 1397 #endif 1398 1399 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), V_CIDXINC(ndescs) | 1400 V_INGRESSQID((u32)iq->cntxt_id) | V_SEINTARM(iq->intr_params)); 1401 1402 if (iq->flags & IQ_HAS_FL) { 1403 int starved; 1404 1405 FL_LOCK(fl); 1406 fl->needed += fl_bufs_used; 1407 starved = refill_fl(sc, fl, 64); 1408 FL_UNLOCK(fl); 1409 if (__predict_false(starved != 0)) 1410 add_fl_to_sfl(sc, fl); 1411 } 1412 1413 return (0); 1414 } 1415 1416 static inline int 1417 cl_has_metadata(struct sge_fl *fl, struct cluster_layout *cll) 1418 { 1419 int rc = fl->flags & FL_BUF_PACKING || cll->region1 > 0; 1420 1421 if (rc) 1422 MPASS(cll->region3 >= CL_METADATA_SIZE); 1423 1424 return (rc); 1425 } 1426 1427 static inline struct cluster_metadata * 1428 cl_metadata(struct adapter *sc, struct sge_fl *fl, struct cluster_layout *cll, 1429 caddr_t cl) 1430 { 1431 1432 if (cl_has_metadata(fl, cll)) { 1433 struct sw_zone_info *swz = &sc->sge.sw_zone_info[cll->zidx]; 1434 1435 return ((struct cluster_metadata *)(cl + swz->size) - 1); 1436 } 1437 return (NULL); 1438 } 1439 1440 static int 1441 rxb_free(struct mbuf *m, void *arg1, void *arg2) 1442 { 1443 uma_zone_t zone = arg1; 1444 caddr_t cl = arg2; 1445 1446 uma_zfree(zone, cl); 1447 1448 return (EXT_FREE_OK); 1449 } 1450 1451 /* 1452 * The mbuf returned by this function could be allocated from zone_mbuf or 1453 * constructed in spare room in the cluster. 1454 * 1455 * The mbuf carries the payload in one of these ways 1456 * a) frame inside the mbuf (mbuf from zone_mbuf) 1457 * b) m_cljset (for clusters without metadata) zone_mbuf 1458 * c) m_extaddref (cluster with metadata) inline mbuf 1459 * d) m_extaddref (cluster with metadata) zone_mbuf 1460 */ 1461 static struct mbuf * 1462 get_scatter_segment(struct adapter *sc, struct sge_fl *fl, int total, int flags) 1463 { 1464 struct mbuf *m; 1465 struct fl_sdesc *sd = &fl->sdesc[fl->cidx]; 1466 struct cluster_layout *cll = &sd->cll; 1467 struct sw_zone_info *swz = &sc->sge.sw_zone_info[cll->zidx]; 1468 struct hw_buf_info *hwb = &sc->sge.hw_buf_info[cll->hwidx]; 1469 struct cluster_metadata *clm = cl_metadata(sc, fl, cll, sd->cl); 1470 int len, padded_len; 1471 caddr_t payload; 1472 1473 len = min(total, hwb->size - fl->rx_offset); 1474 padded_len = roundup2(len, fl_pad); 1475 payload = sd->cl + cll->region1 + fl->rx_offset; 1476 1477 if (sc->sc_do_rxcopy && len < RX_COPY_THRESHOLD) { 1478 1479 /* 1480 * Copy payload into a freshly allocated mbuf. 1481 */ 1482 1483 m = flags & M_PKTHDR ? 1484 m_gethdr(M_NOWAIT, MT_DATA) : m_get(M_NOWAIT, MT_DATA); 1485 if (m == NULL) 1486 return (NULL); 1487 fl->mbuf_allocated++; 1488 #ifdef T4_PKT_TIMESTAMP 1489 /* Leave room for a timestamp */ 1490 m->m_data += 8; 1491 #endif 1492 /* copy data to mbuf */ 1493 bcopy(payload, mtod(m, caddr_t), len); 1494 1495 } else if (sd->nmbuf * MSIZE < cll->region1) { 1496 1497 /* 1498 * There's spare room in the cluster for an mbuf. Create one 1499 * and associate it with the payload that's in the cluster too. 1500 */ 1501 1502 MPASS(clm != NULL); 1503 m = (struct mbuf *)(sd->cl + sd->nmbuf * MSIZE); 1504 /* No bzero required */ 1505 if (m_init(m, NULL, 0, M_NOWAIT, MT_DATA, flags | M_NOFREE)) 1506 return (NULL); 1507 fl->mbuf_inlined++; 1508 m_extaddref(m, payload, padded_len, &clm->refcount, rxb_free, 1509 swz->zone, sd->cl); 1510 sd->nmbuf++; 1511 1512 } else { 1513 1514 /* 1515 * Grab an mbuf from zone_mbuf and associate it with the 1516 * payload in the cluster. 1517 */ 1518 1519 m = flags & M_PKTHDR ? 1520 m_gethdr(M_NOWAIT, MT_DATA) : m_get(M_NOWAIT, MT_DATA); 1521 if (m == NULL) 1522 return (NULL); 1523 fl->mbuf_allocated++; 1524 if (clm != NULL) 1525 m_extaddref(m, payload, padded_len, &clm->refcount, 1526 rxb_free, swz->zone, sd->cl); 1527 else { 1528 m_cljset(m, sd->cl, swz->type); 1529 sd->cl = NULL; /* consumed, not a recycle candidate */ 1530 } 1531 } 1532 if (flags & M_PKTHDR) 1533 m->m_pkthdr.len = total; 1534 m->m_len = len; 1535 1536 if (fl->flags & FL_BUF_PACKING) { 1537 fl->rx_offset += roundup2(padded_len, sc->sge.pack_boundary); 1538 MPASS(fl->rx_offset <= hwb->size); 1539 if (fl->rx_offset < hwb->size) 1540 return (m); /* without advancing the cidx */ 1541 } 1542 1543 if (__predict_false(++fl->cidx == fl->cap)) 1544 fl->cidx = 0; 1545 fl->rx_offset = 0; 1546 1547 return (m); 1548 } 1549 1550 static struct mbuf * 1551 get_fl_payload(struct adapter *sc, struct sge_fl *fl, uint32_t len_newbuf, 1552 int *fl_bufs_used) 1553 { 1554 struct mbuf *m0, *m, **pnext; 1555 u_int nbuf, len; 1556 1557 /* 1558 * No assertion for the fl lock because we don't need it. This routine 1559 * is called only from the rx interrupt handler and it only updates 1560 * fl->cidx. (Contrast that with fl->pidx/fl->needed which could be 1561 * updated in the rx interrupt handler or the starvation helper routine. 1562 * That's why code that manipulates fl->pidx/fl->needed needs the fl 1563 * lock but this routine does not). 1564 */ 1565 1566 nbuf = 0; 1567 len = G_RSPD_LEN(len_newbuf); 1568 if (__predict_false(fl->m0 != NULL)) { 1569 MPASS(len == fl->m0->m_pkthdr.len); 1570 MPASS(fl->remaining < len); 1571 1572 m0 = fl->m0; 1573 pnext = fl->pnext; 1574 len = fl->remaining; 1575 fl->m0 = NULL; 1576 goto get_segment; 1577 } 1578 1579 if (fl->rx_offset > 0 && len_newbuf & F_RSPD_NEWBUF) { 1580 nbuf++; 1581 fl->rx_offset = 0; 1582 if (__predict_false(++fl->cidx == fl->cap)) 1583 fl->cidx = 0; 1584 } 1585 1586 /* 1587 * Payload starts at rx_offset in the current hw buffer. Its length is 1588 * 'len' and it may span multiple hw buffers. 1589 */ 1590 1591 m0 = get_scatter_segment(sc, fl, len, M_PKTHDR); 1592 len -= m0->m_len; 1593 pnext = &m0->m_next; 1594 while (len > 0) { 1595 nbuf++; 1596 get_segment: 1597 MPASS(fl->rx_offset == 0); 1598 m = get_scatter_segment(sc, fl, len, 0); 1599 if (m == NULL) { 1600 fl->m0 = m0; 1601 fl->pnext = pnext; 1602 fl->remaining = len; 1603 return (NULL); 1604 } 1605 *pnext = m; 1606 pnext = &m->m_next; 1607 len -= m->m_len; 1608 } 1609 *pnext = NULL; 1610 if (fl->rx_offset == 0) 1611 nbuf++; 1612 1613 (*fl_bufs_used) += nbuf; 1614 return (m0); 1615 } 1616 1617 static int 1618 t4_eth_rx(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m0) 1619 { 1620 struct sge_rxq *rxq = iq_to_rxq(iq); 1621 struct ifnet *ifp = rxq->ifp; 1622 const struct cpl_rx_pkt *cpl = (const void *)(rss + 1); 1623 #if defined(INET) || defined(INET6) 1624 struct lro_ctrl *lro = &rxq->lro; 1625 #endif 1626 1627 KASSERT(m0 != NULL, ("%s: no payload with opcode %02x", __func__, 1628 rss->opcode)); 1629 1630 m0->m_pkthdr.len -= fl_pktshift; 1631 m0->m_len -= fl_pktshift; 1632 m0->m_data += fl_pktshift; 1633 1634 m0->m_pkthdr.rcvif = ifp; 1635 m0->m_flags |= M_FLOWID; 1636 m0->m_pkthdr.flowid = be32toh(rss->hash_val); 1637 1638 if (cpl->csum_calc && !cpl->err_vec) { 1639 if (ifp->if_capenable & IFCAP_RXCSUM && 1640 cpl->l2info & htobe32(F_RXF_IP)) { 1641 m0->m_pkthdr.csum_flags = (CSUM_IP_CHECKED | 1642 CSUM_IP_VALID | CSUM_DATA_VALID | CSUM_PSEUDO_HDR); 1643 rxq->rxcsum++; 1644 } else if (ifp->if_capenable & IFCAP_RXCSUM_IPV6 && 1645 cpl->l2info & htobe32(F_RXF_IP6)) { 1646 m0->m_pkthdr.csum_flags = (CSUM_DATA_VALID_IPV6 | 1647 CSUM_PSEUDO_HDR); 1648 rxq->rxcsum++; 1649 } 1650 1651 if (__predict_false(cpl->ip_frag)) 1652 m0->m_pkthdr.csum_data = be16toh(cpl->csum); 1653 else 1654 m0->m_pkthdr.csum_data = 0xffff; 1655 } 1656 1657 if (cpl->vlan_ex) { 1658 m0->m_pkthdr.ether_vtag = be16toh(cpl->vlan); 1659 m0->m_flags |= M_VLANTAG; 1660 rxq->vlan_extraction++; 1661 } 1662 1663 #if defined(INET) || defined(INET6) 1664 if (cpl->l2info & htobe32(F_RXF_LRO) && 1665 iq->flags & IQ_LRO_ENABLED && 1666 tcp_lro_rx(lro, m0, 0) == 0) { 1667 /* queued for LRO */ 1668 } else 1669 #endif 1670 ifp->if_input(ifp, m0); 1671 1672 return (0); 1673 } 1674 1675 /* 1676 * Doesn't fail. Holds on to work requests it can't send right away. 1677 */ 1678 void 1679 t4_wrq_tx_locked(struct adapter *sc, struct sge_wrq *wrq, struct wrqe *wr) 1680 { 1681 struct sge_eq *eq = &wrq->eq; 1682 int can_reclaim; 1683 caddr_t dst; 1684 1685 TXQ_LOCK_ASSERT_OWNED(wrq); 1686 #ifdef TCP_OFFLOAD 1687 KASSERT((eq->flags & EQ_TYPEMASK) == EQ_OFLD || 1688 (eq->flags & EQ_TYPEMASK) == EQ_CTRL, 1689 ("%s: eq type %d", __func__, eq->flags & EQ_TYPEMASK)); 1690 #else 1691 KASSERT((eq->flags & EQ_TYPEMASK) == EQ_CTRL, 1692 ("%s: eq type %d", __func__, eq->flags & EQ_TYPEMASK)); 1693 #endif 1694 1695 if (__predict_true(wr != NULL)) 1696 STAILQ_INSERT_TAIL(&wrq->wr_list, wr, link); 1697 1698 can_reclaim = reclaimable(eq); 1699 if (__predict_false(eq->flags & EQ_STALLED)) { 1700 if (can_reclaim < tx_resume_threshold(eq)) 1701 return; 1702 eq->flags &= ~EQ_STALLED; 1703 eq->unstalled++; 1704 } 1705 eq->cidx += can_reclaim; 1706 eq->avail += can_reclaim; 1707 if (__predict_false(eq->cidx >= eq->cap)) 1708 eq->cidx -= eq->cap; 1709 1710 while ((wr = STAILQ_FIRST(&wrq->wr_list)) != NULL) { 1711 int ndesc; 1712 1713 if (__predict_false(wr->wr_len < 0 || 1714 wr->wr_len > SGE_MAX_WR_LEN || (wr->wr_len & 0x7))) { 1715 1716 #ifdef INVARIANTS 1717 panic("%s: work request with length %d", __func__, 1718 wr->wr_len); 1719 #endif 1720 #ifdef KDB 1721 kdb_backtrace(); 1722 #endif 1723 log(LOG_ERR, "%s: %s work request with length %d", 1724 device_get_nameunit(sc->dev), __func__, wr->wr_len); 1725 STAILQ_REMOVE_HEAD(&wrq->wr_list, link); 1726 free_wrqe(wr); 1727 continue; 1728 } 1729 1730 ndesc = howmany(wr->wr_len, EQ_ESIZE); 1731 if (eq->avail < ndesc) { 1732 wrq->no_desc++; 1733 break; 1734 } 1735 1736 dst = (void *)&eq->desc[eq->pidx]; 1737 copy_to_txd(eq, wrtod(wr), &dst, wr->wr_len); 1738 1739 eq->pidx += ndesc; 1740 eq->avail -= ndesc; 1741 if (__predict_false(eq->pidx >= eq->cap)) 1742 eq->pidx -= eq->cap; 1743 1744 eq->pending += ndesc; 1745 if (eq->pending >= 8) 1746 ring_eq_db(sc, eq); 1747 1748 wrq->tx_wrs++; 1749 STAILQ_REMOVE_HEAD(&wrq->wr_list, link); 1750 free_wrqe(wr); 1751 1752 if (eq->avail < 8) { 1753 can_reclaim = reclaimable(eq); 1754 eq->cidx += can_reclaim; 1755 eq->avail += can_reclaim; 1756 if (__predict_false(eq->cidx >= eq->cap)) 1757 eq->cidx -= eq->cap; 1758 } 1759 } 1760 1761 if (eq->pending) 1762 ring_eq_db(sc, eq); 1763 1764 if (wr != NULL) { 1765 eq->flags |= EQ_STALLED; 1766 if (callout_pending(&eq->tx_callout) == 0) 1767 callout_reset(&eq->tx_callout, 1, t4_tx_callout, eq); 1768 } 1769 } 1770 1771 /* Per-packet header in a coalesced tx WR, before the SGL starts (in flits) */ 1772 #define TXPKTS_PKT_HDR ((\ 1773 sizeof(struct ulp_txpkt) + \ 1774 sizeof(struct ulptx_idata) + \ 1775 sizeof(struct cpl_tx_pkt_core) \ 1776 ) / 8) 1777 1778 /* Header of a coalesced tx WR, before SGL of first packet (in flits) */ 1779 #define TXPKTS_WR_HDR (\ 1780 sizeof(struct fw_eth_tx_pkts_wr) / 8 + \ 1781 TXPKTS_PKT_HDR) 1782 1783 /* Header of a tx WR, before SGL of first packet (in flits) */ 1784 #define TXPKT_WR_HDR ((\ 1785 sizeof(struct fw_eth_tx_pkt_wr) + \ 1786 sizeof(struct cpl_tx_pkt_core) \ 1787 ) / 8 ) 1788 1789 /* Header of a tx LSO WR, before SGL of first packet (in flits) */ 1790 #define TXPKT_LSO_WR_HDR ((\ 1791 sizeof(struct fw_eth_tx_pkt_wr) + \ 1792 sizeof(struct cpl_tx_pkt_lso_core) + \ 1793 sizeof(struct cpl_tx_pkt_core) \ 1794 ) / 8 ) 1795 1796 int 1797 t4_eth_tx(struct ifnet *ifp, struct sge_txq *txq, struct mbuf *m) 1798 { 1799 struct port_info *pi = (void *)ifp->if_softc; 1800 struct adapter *sc = pi->adapter; 1801 struct sge_eq *eq = &txq->eq; 1802 struct buf_ring *br = txq->br; 1803 struct mbuf *next; 1804 int rc, coalescing, can_reclaim; 1805 struct txpkts txpkts; 1806 struct sgl sgl; 1807 1808 TXQ_LOCK_ASSERT_OWNED(txq); 1809 KASSERT(m, ("%s: called with nothing to do.", __func__)); 1810 KASSERT((eq->flags & EQ_TYPEMASK) == EQ_ETH, 1811 ("%s: eq type %d", __func__, eq->flags & EQ_TYPEMASK)); 1812 1813 prefetch(&eq->desc[eq->pidx]); 1814 prefetch(&txq->sdesc[eq->pidx]); 1815 1816 txpkts.npkt = 0;/* indicates there's nothing in txpkts */ 1817 coalescing = 0; 1818 1819 can_reclaim = reclaimable(eq); 1820 if (__predict_false(eq->flags & EQ_STALLED)) { 1821 if (can_reclaim < tx_resume_threshold(eq)) { 1822 txq->m = m; 1823 return (0); 1824 } 1825 eq->flags &= ~EQ_STALLED; 1826 eq->unstalled++; 1827 } 1828 1829 if (__predict_false(eq->flags & EQ_DOOMED)) { 1830 m_freem(m); 1831 while ((m = buf_ring_dequeue_sc(txq->br)) != NULL) 1832 m_freem(m); 1833 return (ENETDOWN); 1834 } 1835 1836 if (eq->avail < 8 && can_reclaim) 1837 reclaim_tx_descs(txq, can_reclaim, 32); 1838 1839 for (; m; m = next ? next : drbr_dequeue(ifp, br)) { 1840 1841 if (eq->avail < 8) 1842 break; 1843 1844 next = m->m_nextpkt; 1845 m->m_nextpkt = NULL; 1846 1847 if (next || buf_ring_peek(br)) 1848 coalescing = 1; 1849 1850 rc = get_pkt_sgl(txq, &m, &sgl, coalescing); 1851 if (rc != 0) { 1852 if (rc == ENOMEM) { 1853 1854 /* Short of resources, suspend tx */ 1855 1856 m->m_nextpkt = next; 1857 break; 1858 } 1859 1860 /* 1861 * Unrecoverable error for this packet, throw it away 1862 * and move on to the next. get_pkt_sgl may already 1863 * have freed m (it will be NULL in that case and the 1864 * m_freem here is still safe). 1865 */ 1866 1867 m_freem(m); 1868 continue; 1869 } 1870 1871 if (coalescing && 1872 add_to_txpkts(pi, txq, &txpkts, m, &sgl) == 0) { 1873 1874 /* Successfully absorbed into txpkts */ 1875 1876 write_ulp_cpl_sgl(pi, txq, &txpkts, m, &sgl); 1877 goto doorbell; 1878 } 1879 1880 /* 1881 * We weren't coalescing to begin with, or current frame could 1882 * not be coalesced (add_to_txpkts flushes txpkts if a frame 1883 * given to it can't be coalesced). Either way there should be 1884 * nothing in txpkts. 1885 */ 1886 KASSERT(txpkts.npkt == 0, 1887 ("%s: txpkts not empty: %d", __func__, txpkts.npkt)); 1888 1889 /* We're sending out individual packets now */ 1890 coalescing = 0; 1891 1892 if (eq->avail < 8) 1893 reclaim_tx_descs(txq, 0, 8); 1894 rc = write_txpkt_wr(pi, txq, m, &sgl); 1895 if (rc != 0) { 1896 1897 /* Short of hardware descriptors, suspend tx */ 1898 1899 /* 1900 * This is an unlikely but expensive failure. We've 1901 * done all the hard work (DMA mappings etc.) and now we 1902 * can't send out the packet. What's worse, we have to 1903 * spend even more time freeing up everything in sgl. 1904 */ 1905 txq->no_desc++; 1906 free_pkt_sgl(txq, &sgl); 1907 1908 m->m_nextpkt = next; 1909 break; 1910 } 1911 1912 ETHER_BPF_MTAP(ifp, m); 1913 if (sgl.nsegs == 0) 1914 m_freem(m); 1915 doorbell: 1916 if (eq->pending >= 8) 1917 ring_eq_db(sc, eq); 1918 1919 can_reclaim = reclaimable(eq); 1920 if (can_reclaim >= 32) 1921 reclaim_tx_descs(txq, can_reclaim, 64); 1922 } 1923 1924 if (txpkts.npkt > 0) 1925 write_txpkts_wr(txq, &txpkts); 1926 1927 /* 1928 * m not NULL means there was an error but we haven't thrown it away. 1929 * This can happen when we're short of tx descriptors (no_desc) or maybe 1930 * even DMA maps (no_dmamap). Either way, a credit flush and reclaim 1931 * will get things going again. 1932 */ 1933 if (m && !(eq->flags & EQ_CRFLUSHED)) { 1934 struct tx_sdesc *txsd = &txq->sdesc[eq->pidx]; 1935 1936 /* 1937 * If EQ_CRFLUSHED is not set then we know we have at least one 1938 * available descriptor because any WR that reduces eq->avail to 1939 * 0 also sets EQ_CRFLUSHED. 1940 */ 1941 KASSERT(eq->avail > 0, ("%s: no space for eqflush.", __func__)); 1942 1943 txsd->desc_used = 1; 1944 txsd->credits = 0; 1945 write_eqflush_wr(eq); 1946 } 1947 txq->m = m; 1948 1949 if (eq->pending) 1950 ring_eq_db(sc, eq); 1951 1952 reclaim_tx_descs(txq, 0, 128); 1953 1954 if (eq->flags & EQ_STALLED && callout_pending(&eq->tx_callout) == 0) 1955 callout_reset(&eq->tx_callout, 1, t4_tx_callout, eq); 1956 1957 return (0); 1958 } 1959 1960 void 1961 t4_update_fl_bufsize(struct ifnet *ifp) 1962 { 1963 struct port_info *pi = ifp->if_softc; 1964 struct adapter *sc = pi->adapter; 1965 struct sge_rxq *rxq; 1966 #ifdef TCP_OFFLOAD 1967 struct sge_ofld_rxq *ofld_rxq; 1968 #endif 1969 struct sge_fl *fl; 1970 int i, maxp, mtu = ifp->if_mtu; 1971 1972 maxp = mtu_to_max_payload(sc, mtu, 0); 1973 for_each_rxq(pi, i, rxq) { 1974 fl = &rxq->fl; 1975 1976 FL_LOCK(fl); 1977 find_best_refill_source(sc, fl, maxp); 1978 FL_UNLOCK(fl); 1979 } 1980 #ifdef TCP_OFFLOAD 1981 maxp = mtu_to_max_payload(sc, mtu, 1); 1982 for_each_ofld_rxq(pi, i, ofld_rxq) { 1983 fl = &ofld_rxq->fl; 1984 1985 FL_LOCK(fl); 1986 find_best_refill_source(sc, fl, maxp); 1987 FL_UNLOCK(fl); 1988 } 1989 #endif 1990 } 1991 1992 int 1993 can_resume_tx(struct sge_eq *eq) 1994 { 1995 return (reclaimable(eq) >= tx_resume_threshold(eq)); 1996 } 1997 1998 static inline void 1999 init_iq(struct sge_iq *iq, struct adapter *sc, int tmr_idx, int pktc_idx, 2000 int qsize, int esize) 2001 { 2002 KASSERT(tmr_idx >= 0 && tmr_idx < SGE_NTIMERS, 2003 ("%s: bad tmr_idx %d", __func__, tmr_idx)); 2004 KASSERT(pktc_idx < SGE_NCOUNTERS, /* -ve is ok, means don't use */ 2005 ("%s: bad pktc_idx %d", __func__, pktc_idx)); 2006 2007 iq->flags = 0; 2008 iq->adapter = sc; 2009 iq->intr_params = V_QINTR_TIMER_IDX(tmr_idx); 2010 iq->intr_pktc_idx = SGE_NCOUNTERS - 1; 2011 if (pktc_idx >= 0) { 2012 iq->intr_params |= F_QINTR_CNT_EN; 2013 iq->intr_pktc_idx = pktc_idx; 2014 } 2015 iq->qsize = roundup2(qsize, 16); /* See FW_IQ_CMD/iqsize */ 2016 iq->esize = max(esize, 16); /* See FW_IQ_CMD/iqesize */ 2017 } 2018 2019 static inline void 2020 init_fl(struct adapter *sc, struct sge_fl *fl, int qsize, int maxp, int pack, 2021 char *name) 2022 { 2023 2024 fl->qsize = qsize; 2025 strlcpy(fl->lockname, name, sizeof(fl->lockname)); 2026 if (pack) 2027 fl->flags |= FL_BUF_PACKING; 2028 find_best_refill_source(sc, fl, maxp); 2029 find_safe_refill_source(sc, fl); 2030 } 2031 2032 static inline void 2033 init_eq(struct sge_eq *eq, int eqtype, int qsize, uint8_t tx_chan, 2034 uint16_t iqid, char *name) 2035 { 2036 KASSERT(tx_chan < NCHAN, ("%s: bad tx channel %d", __func__, tx_chan)); 2037 KASSERT(eqtype <= EQ_TYPEMASK, ("%s: bad qtype %d", __func__, eqtype)); 2038 2039 eq->flags = eqtype & EQ_TYPEMASK; 2040 eq->tx_chan = tx_chan; 2041 eq->iqid = iqid; 2042 eq->qsize = qsize; 2043 strlcpy(eq->lockname, name, sizeof(eq->lockname)); 2044 2045 TASK_INIT(&eq->tx_task, 0, t4_tx_task, eq); 2046 callout_init(&eq->tx_callout, CALLOUT_MPSAFE); 2047 } 2048 2049 static int 2050 alloc_ring(struct adapter *sc, size_t len, bus_dma_tag_t *tag, 2051 bus_dmamap_t *map, bus_addr_t *pa, void **va) 2052 { 2053 int rc; 2054 2055 rc = bus_dma_tag_create(sc->dmat, 512, 0, BUS_SPACE_MAXADDR, 2056 BUS_SPACE_MAXADDR, NULL, NULL, len, 1, len, 0, NULL, NULL, tag); 2057 if (rc != 0) { 2058 device_printf(sc->dev, "cannot allocate DMA tag: %d\n", rc); 2059 goto done; 2060 } 2061 2062 rc = bus_dmamem_alloc(*tag, va, 2063 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, map); 2064 if (rc != 0) { 2065 device_printf(sc->dev, "cannot allocate DMA memory: %d\n", rc); 2066 goto done; 2067 } 2068 2069 rc = bus_dmamap_load(*tag, *map, *va, len, oneseg_dma_callback, pa, 0); 2070 if (rc != 0) { 2071 device_printf(sc->dev, "cannot load DMA map: %d\n", rc); 2072 goto done; 2073 } 2074 done: 2075 if (rc) 2076 free_ring(sc, *tag, *map, *pa, *va); 2077 2078 return (rc); 2079 } 2080 2081 static int 2082 free_ring(struct adapter *sc, bus_dma_tag_t tag, bus_dmamap_t map, 2083 bus_addr_t pa, void *va) 2084 { 2085 if (pa) 2086 bus_dmamap_unload(tag, map); 2087 if (va) 2088 bus_dmamem_free(tag, va, map); 2089 if (tag) 2090 bus_dma_tag_destroy(tag); 2091 2092 return (0); 2093 } 2094 2095 /* 2096 * Allocates the ring for an ingress queue and an optional freelist. If the 2097 * freelist is specified it will be allocated and then associated with the 2098 * ingress queue. 2099 * 2100 * Returns errno on failure. Resources allocated up to that point may still be 2101 * allocated. Caller is responsible for cleanup in case this function fails. 2102 * 2103 * If the ingress queue will take interrupts directly (iq->flags & IQ_INTR) then 2104 * the intr_idx specifies the vector, starting from 0. Otherwise it specifies 2105 * the abs_id of the ingress queue to which its interrupts should be forwarded. 2106 */ 2107 static int 2108 alloc_iq_fl(struct port_info *pi, struct sge_iq *iq, struct sge_fl *fl, 2109 int intr_idx, int cong) 2110 { 2111 int rc, i, cntxt_id; 2112 size_t len; 2113 struct fw_iq_cmd c; 2114 struct adapter *sc = iq->adapter; 2115 __be32 v = 0; 2116 2117 len = iq->qsize * iq->esize; 2118 rc = alloc_ring(sc, len, &iq->desc_tag, &iq->desc_map, &iq->ba, 2119 (void **)&iq->desc); 2120 if (rc != 0) 2121 return (rc); 2122 2123 bzero(&c, sizeof(c)); 2124 c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST | 2125 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(sc->pf) | 2126 V_FW_IQ_CMD_VFN(0)); 2127 2128 c.alloc_to_len16 = htobe32(F_FW_IQ_CMD_ALLOC | F_FW_IQ_CMD_IQSTART | 2129 FW_LEN16(c)); 2130 2131 /* Special handling for firmware event queue */ 2132 if (iq == &sc->sge.fwq) 2133 v |= F_FW_IQ_CMD_IQASYNCH; 2134 2135 if (iq->flags & IQ_INTR) { 2136 KASSERT(intr_idx < sc->intr_count, 2137 ("%s: invalid direct intr_idx %d", __func__, intr_idx)); 2138 } else 2139 v |= F_FW_IQ_CMD_IQANDST; 2140 v |= V_FW_IQ_CMD_IQANDSTINDEX(intr_idx); 2141 2142 c.type_to_iqandstindex = htobe32(v | 2143 V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) | 2144 V_FW_IQ_CMD_VIID(pi->viid) | 2145 V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_INTERRUPT)); 2146 c.iqdroprss_to_iqesize = htobe16(V_FW_IQ_CMD_IQPCIECH(pi->tx_chan) | 2147 F_FW_IQ_CMD_IQGTSMODE | 2148 V_FW_IQ_CMD_IQINTCNTTHRESH(iq->intr_pktc_idx) | 2149 V_FW_IQ_CMD_IQESIZE(ilog2(iq->esize) - 4)); 2150 c.iqsize = htobe16(iq->qsize); 2151 c.iqaddr = htobe64(iq->ba); 2152 if (cong >= 0) 2153 c.iqns_to_fl0congen = htobe32(F_FW_IQ_CMD_IQFLINTCONGEN); 2154 2155 if (fl) { 2156 mtx_init(&fl->fl_lock, fl->lockname, NULL, MTX_DEF); 2157 2158 len = fl->qsize * RX_FL_ESIZE; 2159 rc = alloc_ring(sc, len, &fl->desc_tag, &fl->desc_map, 2160 &fl->ba, (void **)&fl->desc); 2161 if (rc) 2162 return (rc); 2163 2164 /* Allocate space for one software descriptor per buffer. */ 2165 fl->cap = (fl->qsize - spg_len / RX_FL_ESIZE) * 8; 2166 rc = alloc_fl_sdesc(fl); 2167 if (rc != 0) { 2168 device_printf(sc->dev, 2169 "failed to setup fl software descriptors: %d\n", 2170 rc); 2171 return (rc); 2172 } 2173 fl->needed = fl->cap; 2174 fl->lowat = fl->flags & FL_BUF_PACKING ? 2175 roundup2(sc->sge.fl_starve_threshold2, 8) : 2176 roundup2(sc->sge.fl_starve_threshold, 8); 2177 2178 c.iqns_to_fl0congen |= 2179 htobe32(V_FW_IQ_CMD_FL0HOSTFCMODE(X_HOSTFCMODE_NONE) | 2180 F_FW_IQ_CMD_FL0FETCHRO | F_FW_IQ_CMD_FL0DATARO | 2181 (fl_pad ? F_FW_IQ_CMD_FL0PADEN : 0) | 2182 (fl->flags & FL_BUF_PACKING ? F_FW_IQ_CMD_FL0PACKEN : 2183 0)); 2184 if (cong >= 0) { 2185 c.iqns_to_fl0congen |= 2186 htobe32(V_FW_IQ_CMD_FL0CNGCHMAP(cong) | 2187 F_FW_IQ_CMD_FL0CONGCIF | 2188 F_FW_IQ_CMD_FL0CONGEN); 2189 } 2190 c.fl0dcaen_to_fl0cidxfthresh = 2191 htobe16(V_FW_IQ_CMD_FL0FBMIN(X_FETCHBURSTMIN_64B) | 2192 V_FW_IQ_CMD_FL0FBMAX(X_FETCHBURSTMAX_512B)); 2193 c.fl0size = htobe16(fl->qsize); 2194 c.fl0addr = htobe64(fl->ba); 2195 } 2196 2197 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); 2198 if (rc != 0) { 2199 device_printf(sc->dev, 2200 "failed to create ingress queue: %d\n", rc); 2201 return (rc); 2202 } 2203 2204 iq->cdesc = iq->desc; 2205 iq->cidx = 0; 2206 iq->gen = 1; 2207 iq->intr_next = iq->intr_params; 2208 iq->cntxt_id = be16toh(c.iqid); 2209 iq->abs_id = be16toh(c.physiqid); 2210 iq->flags |= IQ_ALLOCATED; 2211 2212 cntxt_id = iq->cntxt_id - sc->sge.iq_start; 2213 if (cntxt_id >= sc->sge.niq) { 2214 panic ("%s: iq->cntxt_id (%d) more than the max (%d)", __func__, 2215 cntxt_id, sc->sge.niq - 1); 2216 } 2217 sc->sge.iqmap[cntxt_id] = iq; 2218 2219 if (fl) { 2220 fl->cntxt_id = be16toh(c.fl0id); 2221 fl->pidx = fl->cidx = 0; 2222 2223 cntxt_id = fl->cntxt_id - sc->sge.eq_start; 2224 if (cntxt_id >= sc->sge.neq) { 2225 panic("%s: fl->cntxt_id (%d) more than the max (%d)", 2226 __func__, cntxt_id, sc->sge.neq - 1); 2227 } 2228 sc->sge.eqmap[cntxt_id] = (void *)fl; 2229 2230 FL_LOCK(fl); 2231 /* Enough to make sure the SGE doesn't think it's starved */ 2232 refill_fl(sc, fl, fl->lowat); 2233 FL_UNLOCK(fl); 2234 2235 iq->flags |= IQ_HAS_FL; 2236 } 2237 2238 if (is_t5(sc) && cong >= 0) { 2239 uint32_t param, val; 2240 2241 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | 2242 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) | 2243 V_FW_PARAMS_PARAM_YZ(iq->cntxt_id); 2244 if (cong == 0) 2245 val = 1 << 19; 2246 else { 2247 val = 2 << 19; 2248 for (i = 0; i < 4; i++) { 2249 if (cong & (1 << i)) 2250 val |= 1 << (i << 2); 2251 } 2252 } 2253 2254 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 2255 if (rc != 0) { 2256 /* report error but carry on */ 2257 device_printf(sc->dev, 2258 "failed to set congestion manager context for " 2259 "ingress queue %d: %d\n", iq->cntxt_id, rc); 2260 } 2261 } 2262 2263 /* Enable IQ interrupts */ 2264 atomic_store_rel_int(&iq->state, IQS_IDLE); 2265 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), V_SEINTARM(iq->intr_params) | 2266 V_INGRESSQID(iq->cntxt_id)); 2267 2268 return (0); 2269 } 2270 2271 static int 2272 free_iq_fl(struct port_info *pi, struct sge_iq *iq, struct sge_fl *fl) 2273 { 2274 int rc; 2275 struct adapter *sc = iq->adapter; 2276 device_t dev; 2277 2278 if (sc == NULL) 2279 return (0); /* nothing to do */ 2280 2281 dev = pi ? pi->dev : sc->dev; 2282 2283 if (iq->flags & IQ_ALLOCATED) { 2284 rc = -t4_iq_free(sc, sc->mbox, sc->pf, 0, 2285 FW_IQ_TYPE_FL_INT_CAP, iq->cntxt_id, 2286 fl ? fl->cntxt_id : 0xffff, 0xffff); 2287 if (rc != 0) { 2288 device_printf(dev, 2289 "failed to free queue %p: %d\n", iq, rc); 2290 return (rc); 2291 } 2292 iq->flags &= ~IQ_ALLOCATED; 2293 } 2294 2295 free_ring(sc, iq->desc_tag, iq->desc_map, iq->ba, iq->desc); 2296 2297 bzero(iq, sizeof(*iq)); 2298 2299 if (fl) { 2300 free_ring(sc, fl->desc_tag, fl->desc_map, fl->ba, 2301 fl->desc); 2302 2303 if (fl->sdesc) 2304 free_fl_sdesc(sc, fl); 2305 2306 if (mtx_initialized(&fl->fl_lock)) 2307 mtx_destroy(&fl->fl_lock); 2308 2309 bzero(fl, sizeof(*fl)); 2310 } 2311 2312 return (0); 2313 } 2314 2315 static void 2316 add_fl_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *oid, 2317 struct sge_fl *fl) 2318 { 2319 struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); 2320 2321 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "fl", CTLFLAG_RD, NULL, 2322 "freelist"); 2323 children = SYSCTL_CHILDREN(oid); 2324 2325 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cntxt_id", 2326 CTLTYPE_INT | CTLFLAG_RD, &fl->cntxt_id, 0, sysctl_uint16, "I", 2327 "SGE context id of the freelist"); 2328 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cidx", CTLFLAG_RD, &fl->cidx, 2329 0, "consumer index"); 2330 if (fl->flags & FL_BUF_PACKING) { 2331 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rx_offset", 2332 CTLFLAG_RD, &fl->rx_offset, 0, "packing rx offset"); 2333 } 2334 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "pidx", CTLFLAG_RD, &fl->pidx, 2335 0, "producer index"); 2336 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "mbuf_allocated", 2337 CTLFLAG_RD, &fl->mbuf_allocated, "# of mbuf allocated"); 2338 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "mbuf_inlined", 2339 CTLFLAG_RD, &fl->mbuf_inlined, "# of mbuf inlined in clusters"); 2340 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "cluster_allocated", 2341 CTLFLAG_RD, &fl->cl_allocated, "# of clusters allocated"); 2342 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "cluster_recycled", 2343 CTLFLAG_RD, &fl->cl_recycled, "# of clusters recycled"); 2344 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "cluster_fast_recycled", 2345 CTLFLAG_RD, &fl->cl_fast_recycled, "# of clusters recycled (fast)"); 2346 } 2347 2348 static int 2349 alloc_fwq(struct adapter *sc) 2350 { 2351 int rc, intr_idx; 2352 struct sge_iq *fwq = &sc->sge.fwq; 2353 struct sysctl_oid *oid = device_get_sysctl_tree(sc->dev); 2354 struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); 2355 2356 init_iq(fwq, sc, 0, 0, FW_IQ_QSIZE, FW_IQ_ESIZE); 2357 fwq->flags |= IQ_INTR; /* always */ 2358 intr_idx = sc->intr_count > 1 ? 1 : 0; 2359 rc = alloc_iq_fl(sc->port[0], fwq, NULL, intr_idx, -1); 2360 if (rc != 0) { 2361 device_printf(sc->dev, 2362 "failed to create firmware event queue: %d\n", rc); 2363 return (rc); 2364 } 2365 2366 oid = SYSCTL_ADD_NODE(&sc->ctx, children, OID_AUTO, "fwq", CTLFLAG_RD, 2367 NULL, "firmware event queue"); 2368 children = SYSCTL_CHILDREN(oid); 2369 2370 SYSCTL_ADD_PROC(&sc->ctx, children, OID_AUTO, "abs_id", 2371 CTLTYPE_INT | CTLFLAG_RD, &fwq->abs_id, 0, sysctl_uint16, "I", 2372 "absolute id of the queue"); 2373 SYSCTL_ADD_PROC(&sc->ctx, children, OID_AUTO, "cntxt_id", 2374 CTLTYPE_INT | CTLFLAG_RD, &fwq->cntxt_id, 0, sysctl_uint16, "I", 2375 "SGE context id of the queue"); 2376 SYSCTL_ADD_PROC(&sc->ctx, children, OID_AUTO, "cidx", 2377 CTLTYPE_INT | CTLFLAG_RD, &fwq->cidx, 0, sysctl_uint16, "I", 2378 "consumer index"); 2379 2380 return (0); 2381 } 2382 2383 static int 2384 free_fwq(struct adapter *sc) 2385 { 2386 return free_iq_fl(NULL, &sc->sge.fwq, NULL); 2387 } 2388 2389 static int 2390 alloc_mgmtq(struct adapter *sc) 2391 { 2392 int rc; 2393 struct sge_wrq *mgmtq = &sc->sge.mgmtq; 2394 char name[16]; 2395 struct sysctl_oid *oid = device_get_sysctl_tree(sc->dev); 2396 struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); 2397 2398 oid = SYSCTL_ADD_NODE(&sc->ctx, children, OID_AUTO, "mgmtq", CTLFLAG_RD, 2399 NULL, "management queue"); 2400 2401 snprintf(name, sizeof(name), "%s mgmtq", device_get_nameunit(sc->dev)); 2402 init_eq(&mgmtq->eq, EQ_CTRL, CTRL_EQ_QSIZE, sc->port[0]->tx_chan, 2403 sc->sge.fwq.cntxt_id, name); 2404 rc = alloc_wrq(sc, NULL, mgmtq, oid); 2405 if (rc != 0) { 2406 device_printf(sc->dev, 2407 "failed to create management queue: %d\n", rc); 2408 return (rc); 2409 } 2410 2411 return (0); 2412 } 2413 2414 static int 2415 free_mgmtq(struct adapter *sc) 2416 { 2417 2418 return free_wrq(sc, &sc->sge.mgmtq); 2419 } 2420 2421 static inline int 2422 tnl_cong(struct port_info *pi) 2423 { 2424 2425 if (cong_drop == -1) 2426 return (-1); 2427 else if (cong_drop == 1) 2428 return (0); 2429 else 2430 return (pi->rx_chan_map); 2431 } 2432 2433 static int 2434 alloc_rxq(struct port_info *pi, struct sge_rxq *rxq, int intr_idx, int idx, 2435 struct sysctl_oid *oid) 2436 { 2437 int rc; 2438 struct sysctl_oid_list *children; 2439 char name[16]; 2440 2441 rc = alloc_iq_fl(pi, &rxq->iq, &rxq->fl, intr_idx, tnl_cong(pi)); 2442 if (rc != 0) 2443 return (rc); 2444 2445 FL_LOCK(&rxq->fl); 2446 refill_fl(pi->adapter, &rxq->fl, rxq->fl.needed / 8); 2447 FL_UNLOCK(&rxq->fl); 2448 2449 #if defined(INET) || defined(INET6) 2450 rc = tcp_lro_init(&rxq->lro); 2451 if (rc != 0) 2452 return (rc); 2453 rxq->lro.ifp = pi->ifp; /* also indicates LRO init'ed */ 2454 2455 if (pi->ifp->if_capenable & IFCAP_LRO) 2456 rxq->iq.flags |= IQ_LRO_ENABLED; 2457 #endif 2458 rxq->ifp = pi->ifp; 2459 2460 children = SYSCTL_CHILDREN(oid); 2461 2462 snprintf(name, sizeof(name), "%d", idx); 2463 oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, name, CTLFLAG_RD, 2464 NULL, "rx queue"); 2465 children = SYSCTL_CHILDREN(oid); 2466 2467 SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "abs_id", 2468 CTLTYPE_INT | CTLFLAG_RD, &rxq->iq.abs_id, 0, sysctl_uint16, "I", 2469 "absolute id of the queue"); 2470 SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "cntxt_id", 2471 CTLTYPE_INT | CTLFLAG_RD, &rxq->iq.cntxt_id, 0, sysctl_uint16, "I", 2472 "SGE context id of the queue"); 2473 SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "cidx", 2474 CTLTYPE_INT | CTLFLAG_RD, &rxq->iq.cidx, 0, sysctl_uint16, "I", 2475 "consumer index"); 2476 #if defined(INET) || defined(INET6) 2477 SYSCTL_ADD_INT(&pi->ctx, children, OID_AUTO, "lro_queued", CTLFLAG_RD, 2478 &rxq->lro.lro_queued, 0, NULL); 2479 SYSCTL_ADD_INT(&pi->ctx, children, OID_AUTO, "lro_flushed", CTLFLAG_RD, 2480 &rxq->lro.lro_flushed, 0, NULL); 2481 #endif 2482 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "rxcsum", CTLFLAG_RD, 2483 &rxq->rxcsum, "# of times hardware assisted with checksum"); 2484 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "vlan_extraction", 2485 CTLFLAG_RD, &rxq->vlan_extraction, 2486 "# of times hardware extracted 802.1Q tag"); 2487 2488 add_fl_sysctls(&pi->ctx, oid, &rxq->fl); 2489 2490 return (rc); 2491 } 2492 2493 static int 2494 free_rxq(struct port_info *pi, struct sge_rxq *rxq) 2495 { 2496 int rc; 2497 2498 #if defined(INET) || defined(INET6) 2499 if (rxq->lro.ifp) { 2500 tcp_lro_free(&rxq->lro); 2501 rxq->lro.ifp = NULL; 2502 } 2503 #endif 2504 2505 rc = free_iq_fl(pi, &rxq->iq, &rxq->fl); 2506 if (rc == 0) 2507 bzero(rxq, sizeof(*rxq)); 2508 2509 return (rc); 2510 } 2511 2512 #ifdef TCP_OFFLOAD 2513 static int 2514 alloc_ofld_rxq(struct port_info *pi, struct sge_ofld_rxq *ofld_rxq, 2515 int intr_idx, int idx, struct sysctl_oid *oid) 2516 { 2517 int rc; 2518 struct sysctl_oid_list *children; 2519 char name[16]; 2520 2521 rc = alloc_iq_fl(pi, &ofld_rxq->iq, &ofld_rxq->fl, intr_idx, 2522 pi->rx_chan_map); 2523 if (rc != 0) 2524 return (rc); 2525 2526 children = SYSCTL_CHILDREN(oid); 2527 2528 snprintf(name, sizeof(name), "%d", idx); 2529 oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, name, CTLFLAG_RD, 2530 NULL, "rx queue"); 2531 children = SYSCTL_CHILDREN(oid); 2532 2533 SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "abs_id", 2534 CTLTYPE_INT | CTLFLAG_RD, &ofld_rxq->iq.abs_id, 0, sysctl_uint16, 2535 "I", "absolute id of the queue"); 2536 SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "cntxt_id", 2537 CTLTYPE_INT | CTLFLAG_RD, &ofld_rxq->iq.cntxt_id, 0, sysctl_uint16, 2538 "I", "SGE context id of the queue"); 2539 SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "cidx", 2540 CTLTYPE_INT | CTLFLAG_RD, &ofld_rxq->iq.cidx, 0, sysctl_uint16, "I", 2541 "consumer index"); 2542 2543 add_fl_sysctls(&pi->ctx, oid, &ofld_rxq->fl); 2544 2545 return (rc); 2546 } 2547 2548 static int 2549 free_ofld_rxq(struct port_info *pi, struct sge_ofld_rxq *ofld_rxq) 2550 { 2551 int rc; 2552 2553 rc = free_iq_fl(pi, &ofld_rxq->iq, &ofld_rxq->fl); 2554 if (rc == 0) 2555 bzero(ofld_rxq, sizeof(*ofld_rxq)); 2556 2557 return (rc); 2558 } 2559 #endif 2560 2561 static int 2562 ctrl_eq_alloc(struct adapter *sc, struct sge_eq *eq) 2563 { 2564 int rc, cntxt_id; 2565 struct fw_eq_ctrl_cmd c; 2566 2567 bzero(&c, sizeof(c)); 2568 2569 c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | F_FW_CMD_REQUEST | 2570 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_CTRL_CMD_PFN(sc->pf) | 2571 V_FW_EQ_CTRL_CMD_VFN(0)); 2572 c.alloc_to_len16 = htobe32(F_FW_EQ_CTRL_CMD_ALLOC | 2573 F_FW_EQ_CTRL_CMD_EQSTART | FW_LEN16(c)); 2574 c.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_CMPLIQID(eq->iqid)); /* XXX */ 2575 c.physeqid_pkd = htobe32(0); 2576 c.fetchszm_to_iqid = 2577 htobe32(V_FW_EQ_CTRL_CMD_HOSTFCMODE(X_HOSTFCMODE_STATUS_PAGE) | 2578 V_FW_EQ_CTRL_CMD_PCIECHN(eq->tx_chan) | 2579 F_FW_EQ_CTRL_CMD_FETCHRO | V_FW_EQ_CTRL_CMD_IQID(eq->iqid)); 2580 c.dcaen_to_eqsize = 2581 htobe32(V_FW_EQ_CTRL_CMD_FBMIN(X_FETCHBURSTMIN_64B) | 2582 V_FW_EQ_CTRL_CMD_FBMAX(X_FETCHBURSTMAX_512B) | 2583 V_FW_EQ_CTRL_CMD_CIDXFTHRESH(X_CIDXFLUSHTHRESH_32) | 2584 V_FW_EQ_CTRL_CMD_EQSIZE(eq->qsize)); 2585 c.eqaddr = htobe64(eq->ba); 2586 2587 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); 2588 if (rc != 0) { 2589 device_printf(sc->dev, 2590 "failed to create control queue %d: %d\n", eq->tx_chan, rc); 2591 return (rc); 2592 } 2593 eq->flags |= EQ_ALLOCATED; 2594 2595 eq->cntxt_id = G_FW_EQ_CTRL_CMD_EQID(be32toh(c.cmpliqid_eqid)); 2596 cntxt_id = eq->cntxt_id - sc->sge.eq_start; 2597 if (cntxt_id >= sc->sge.neq) 2598 panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__, 2599 cntxt_id, sc->sge.neq - 1); 2600 sc->sge.eqmap[cntxt_id] = eq; 2601 2602 return (rc); 2603 } 2604 2605 static int 2606 eth_eq_alloc(struct adapter *sc, struct port_info *pi, struct sge_eq *eq) 2607 { 2608 int rc, cntxt_id; 2609 struct fw_eq_eth_cmd c; 2610 2611 bzero(&c, sizeof(c)); 2612 2613 c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST | 2614 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(sc->pf) | 2615 V_FW_EQ_ETH_CMD_VFN(0)); 2616 c.alloc_to_len16 = htobe32(F_FW_EQ_ETH_CMD_ALLOC | 2617 F_FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c)); 2618 c.viid_pkd = htobe32(V_FW_EQ_ETH_CMD_VIID(pi->viid)); 2619 c.fetchszm_to_iqid = 2620 htobe32(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_STATUS_PAGE) | 2621 V_FW_EQ_ETH_CMD_PCIECHN(eq->tx_chan) | F_FW_EQ_ETH_CMD_FETCHRO | 2622 V_FW_EQ_ETH_CMD_IQID(eq->iqid)); 2623 c.dcaen_to_eqsize = htobe32(V_FW_EQ_ETH_CMD_FBMIN(X_FETCHBURSTMIN_64B) | 2624 V_FW_EQ_ETH_CMD_FBMAX(X_FETCHBURSTMAX_512B) | 2625 V_FW_EQ_ETH_CMD_CIDXFTHRESH(X_CIDXFLUSHTHRESH_32) | 2626 V_FW_EQ_ETH_CMD_EQSIZE(eq->qsize)); 2627 c.eqaddr = htobe64(eq->ba); 2628 2629 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); 2630 if (rc != 0) { 2631 device_printf(pi->dev, 2632 "failed to create Ethernet egress queue: %d\n", rc); 2633 return (rc); 2634 } 2635 eq->flags |= EQ_ALLOCATED; 2636 2637 eq->cntxt_id = G_FW_EQ_ETH_CMD_EQID(be32toh(c.eqid_pkd)); 2638 cntxt_id = eq->cntxt_id - sc->sge.eq_start; 2639 if (cntxt_id >= sc->sge.neq) 2640 panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__, 2641 cntxt_id, sc->sge.neq - 1); 2642 sc->sge.eqmap[cntxt_id] = eq; 2643 2644 return (rc); 2645 } 2646 2647 #ifdef TCP_OFFLOAD 2648 static int 2649 ofld_eq_alloc(struct adapter *sc, struct port_info *pi, struct sge_eq *eq) 2650 { 2651 int rc, cntxt_id; 2652 struct fw_eq_ofld_cmd c; 2653 2654 bzero(&c, sizeof(c)); 2655 2656 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_OFLD_CMD) | F_FW_CMD_REQUEST | 2657 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_OFLD_CMD_PFN(sc->pf) | 2658 V_FW_EQ_OFLD_CMD_VFN(0)); 2659 c.alloc_to_len16 = htonl(F_FW_EQ_OFLD_CMD_ALLOC | 2660 F_FW_EQ_OFLD_CMD_EQSTART | FW_LEN16(c)); 2661 c.fetchszm_to_iqid = 2662 htonl(V_FW_EQ_OFLD_CMD_HOSTFCMODE(X_HOSTFCMODE_STATUS_PAGE) | 2663 V_FW_EQ_OFLD_CMD_PCIECHN(eq->tx_chan) | 2664 F_FW_EQ_OFLD_CMD_FETCHRO | V_FW_EQ_OFLD_CMD_IQID(eq->iqid)); 2665 c.dcaen_to_eqsize = 2666 htobe32(V_FW_EQ_OFLD_CMD_FBMIN(X_FETCHBURSTMIN_64B) | 2667 V_FW_EQ_OFLD_CMD_FBMAX(X_FETCHBURSTMAX_512B) | 2668 V_FW_EQ_OFLD_CMD_CIDXFTHRESH(X_CIDXFLUSHTHRESH_32) | 2669 V_FW_EQ_OFLD_CMD_EQSIZE(eq->qsize)); 2670 c.eqaddr = htobe64(eq->ba); 2671 2672 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); 2673 if (rc != 0) { 2674 device_printf(pi->dev, 2675 "failed to create egress queue for TCP offload: %d\n", rc); 2676 return (rc); 2677 } 2678 eq->flags |= EQ_ALLOCATED; 2679 2680 eq->cntxt_id = G_FW_EQ_OFLD_CMD_EQID(be32toh(c.eqid_pkd)); 2681 cntxt_id = eq->cntxt_id - sc->sge.eq_start; 2682 if (cntxt_id >= sc->sge.neq) 2683 panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__, 2684 cntxt_id, sc->sge.neq - 1); 2685 sc->sge.eqmap[cntxt_id] = eq; 2686 2687 return (rc); 2688 } 2689 #endif 2690 2691 static int 2692 alloc_eq(struct adapter *sc, struct port_info *pi, struct sge_eq *eq) 2693 { 2694 int rc; 2695 size_t len; 2696 2697 mtx_init(&eq->eq_lock, eq->lockname, NULL, MTX_DEF); 2698 2699 len = eq->qsize * EQ_ESIZE; 2700 rc = alloc_ring(sc, len, &eq->desc_tag, &eq->desc_map, 2701 &eq->ba, (void **)&eq->desc); 2702 if (rc) 2703 return (rc); 2704 2705 eq->cap = eq->qsize - spg_len / EQ_ESIZE; 2706 eq->spg = (void *)&eq->desc[eq->cap]; 2707 eq->avail = eq->cap - 1; /* one less to avoid cidx = pidx */ 2708 eq->pidx = eq->cidx = 0; 2709 eq->doorbells = sc->doorbells; 2710 2711 switch (eq->flags & EQ_TYPEMASK) { 2712 case EQ_CTRL: 2713 rc = ctrl_eq_alloc(sc, eq); 2714 break; 2715 2716 case EQ_ETH: 2717 rc = eth_eq_alloc(sc, pi, eq); 2718 break; 2719 2720 #ifdef TCP_OFFLOAD 2721 case EQ_OFLD: 2722 rc = ofld_eq_alloc(sc, pi, eq); 2723 break; 2724 #endif 2725 2726 default: 2727 panic("%s: invalid eq type %d.", __func__, 2728 eq->flags & EQ_TYPEMASK); 2729 } 2730 if (rc != 0) { 2731 device_printf(sc->dev, 2732 "failed to allocate egress queue(%d): %d", 2733 eq->flags & EQ_TYPEMASK, rc); 2734 } 2735 2736 eq->tx_callout.c_cpu = eq->cntxt_id % mp_ncpus; 2737 2738 if (isset(&eq->doorbells, DOORBELL_UDB) || 2739 isset(&eq->doorbells, DOORBELL_UDBWC) || 2740 isset(&eq->doorbells, DOORBELL_WCWR)) { 2741 uint32_t s_qpp = sc->sge.eq_s_qpp; 2742 uint32_t mask = (1 << s_qpp) - 1; 2743 volatile uint8_t *udb; 2744 2745 udb = sc->udbs_base + UDBS_DB_OFFSET; 2746 udb += (eq->cntxt_id >> s_qpp) << PAGE_SHIFT; /* pg offset */ 2747 eq->udb_qid = eq->cntxt_id & mask; /* id in page */ 2748 if (eq->udb_qid > PAGE_SIZE / UDBS_SEG_SIZE) 2749 clrbit(&eq->doorbells, DOORBELL_WCWR); 2750 else { 2751 udb += eq->udb_qid << UDBS_SEG_SHIFT; /* seg offset */ 2752 eq->udb_qid = 0; 2753 } 2754 eq->udb = (volatile void *)udb; 2755 } 2756 2757 return (rc); 2758 } 2759 2760 static int 2761 free_eq(struct adapter *sc, struct sge_eq *eq) 2762 { 2763 int rc; 2764 2765 if (eq->flags & EQ_ALLOCATED) { 2766 switch (eq->flags & EQ_TYPEMASK) { 2767 case EQ_CTRL: 2768 rc = -t4_ctrl_eq_free(sc, sc->mbox, sc->pf, 0, 2769 eq->cntxt_id); 2770 break; 2771 2772 case EQ_ETH: 2773 rc = -t4_eth_eq_free(sc, sc->mbox, sc->pf, 0, 2774 eq->cntxt_id); 2775 break; 2776 2777 #ifdef TCP_OFFLOAD 2778 case EQ_OFLD: 2779 rc = -t4_ofld_eq_free(sc, sc->mbox, sc->pf, 0, 2780 eq->cntxt_id); 2781 break; 2782 #endif 2783 2784 default: 2785 panic("%s: invalid eq type %d.", __func__, 2786 eq->flags & EQ_TYPEMASK); 2787 } 2788 if (rc != 0) { 2789 device_printf(sc->dev, 2790 "failed to free egress queue (%d): %d\n", 2791 eq->flags & EQ_TYPEMASK, rc); 2792 return (rc); 2793 } 2794 eq->flags &= ~EQ_ALLOCATED; 2795 } 2796 2797 free_ring(sc, eq->desc_tag, eq->desc_map, eq->ba, eq->desc); 2798 2799 if (mtx_initialized(&eq->eq_lock)) 2800 mtx_destroy(&eq->eq_lock); 2801 2802 bzero(eq, sizeof(*eq)); 2803 return (0); 2804 } 2805 2806 static int 2807 alloc_wrq(struct adapter *sc, struct port_info *pi, struct sge_wrq *wrq, 2808 struct sysctl_oid *oid) 2809 { 2810 int rc; 2811 struct sysctl_ctx_list *ctx = pi ? &pi->ctx : &sc->ctx; 2812 struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); 2813 2814 rc = alloc_eq(sc, pi, &wrq->eq); 2815 if (rc) 2816 return (rc); 2817 2818 wrq->adapter = sc; 2819 STAILQ_INIT(&wrq->wr_list); 2820 2821 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD, 2822 &wrq->eq.cntxt_id, 0, "SGE context id of the queue"); 2823 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cidx", 2824 CTLTYPE_INT | CTLFLAG_RD, &wrq->eq.cidx, 0, sysctl_uint16, "I", 2825 "consumer index"); 2826 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pidx", 2827 CTLTYPE_INT | CTLFLAG_RD, &wrq->eq.pidx, 0, sysctl_uint16, "I", 2828 "producer index"); 2829 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "tx_wrs", CTLFLAG_RD, 2830 &wrq->tx_wrs, "# of work requests"); 2831 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "no_desc", CTLFLAG_RD, 2832 &wrq->no_desc, 0, 2833 "# of times queue ran out of hardware descriptors"); 2834 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "unstalled", CTLFLAG_RD, 2835 &wrq->eq.unstalled, 0, "# of times queue recovered after stall"); 2836 2837 return (rc); 2838 } 2839 2840 static int 2841 free_wrq(struct adapter *sc, struct sge_wrq *wrq) 2842 { 2843 int rc; 2844 2845 rc = free_eq(sc, &wrq->eq); 2846 if (rc) 2847 return (rc); 2848 2849 bzero(wrq, sizeof(*wrq)); 2850 return (0); 2851 } 2852 2853 static int 2854 alloc_txq(struct port_info *pi, struct sge_txq *txq, int idx, 2855 struct sysctl_oid *oid) 2856 { 2857 int rc; 2858 struct adapter *sc = pi->adapter; 2859 struct sge_eq *eq = &txq->eq; 2860 char name[16]; 2861 struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); 2862 2863 rc = alloc_eq(sc, pi, eq); 2864 if (rc) 2865 return (rc); 2866 2867 txq->ifp = pi->ifp; 2868 2869 txq->sdesc = malloc(eq->cap * sizeof(struct tx_sdesc), M_CXGBE, 2870 M_ZERO | M_WAITOK); 2871 txq->br = buf_ring_alloc(eq->qsize, M_CXGBE, M_WAITOK, &eq->eq_lock); 2872 2873 rc = bus_dma_tag_create(sc->dmat, 1, 0, BUS_SPACE_MAXADDR, 2874 BUS_SPACE_MAXADDR, NULL, NULL, 64 * 1024, TX_SGL_SEGS, 2875 BUS_SPACE_MAXSIZE, BUS_DMA_ALLOCNOW, NULL, NULL, &txq->tx_tag); 2876 if (rc != 0) { 2877 device_printf(sc->dev, 2878 "failed to create tx DMA tag: %d\n", rc); 2879 return (rc); 2880 } 2881 2882 /* 2883 * We can stuff ~10 frames in an 8-descriptor txpkts WR (8 is the SGE 2884 * limit for any WR). txq->no_dmamap events shouldn't occur if maps is 2885 * sized for the worst case. 2886 */ 2887 rc = t4_alloc_tx_maps(&txq->txmaps, txq->tx_tag, eq->qsize * 10 / 8, 2888 M_WAITOK); 2889 if (rc != 0) { 2890 device_printf(sc->dev, "failed to setup tx DMA maps: %d\n", rc); 2891 return (rc); 2892 } 2893 2894 snprintf(name, sizeof(name), "%d", idx); 2895 oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, name, CTLFLAG_RD, 2896 NULL, "tx queue"); 2897 children = SYSCTL_CHILDREN(oid); 2898 2899 SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD, 2900 &eq->cntxt_id, 0, "SGE context id of the queue"); 2901 SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "cidx", 2902 CTLTYPE_INT | CTLFLAG_RD, &eq->cidx, 0, sysctl_uint16, "I", 2903 "consumer index"); 2904 SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "pidx", 2905 CTLTYPE_INT | CTLFLAG_RD, &eq->pidx, 0, sysctl_uint16, "I", 2906 "producer index"); 2907 2908 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "txcsum", CTLFLAG_RD, 2909 &txq->txcsum, "# of times hardware assisted with checksum"); 2910 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "vlan_insertion", 2911 CTLFLAG_RD, &txq->vlan_insertion, 2912 "# of times hardware inserted 802.1Q tag"); 2913 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "tso_wrs", CTLFLAG_RD, 2914 &txq->tso_wrs, "# of TSO work requests"); 2915 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "imm_wrs", CTLFLAG_RD, 2916 &txq->imm_wrs, "# of work requests with immediate data"); 2917 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "sgl_wrs", CTLFLAG_RD, 2918 &txq->sgl_wrs, "# of work requests with direct SGL"); 2919 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "txpkt_wrs", CTLFLAG_RD, 2920 &txq->txpkt_wrs, "# of txpkt work requests (one pkt/WR)"); 2921 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "txpkts_wrs", CTLFLAG_RD, 2922 &txq->txpkts_wrs, "# of txpkts work requests (multiple pkts/WR)"); 2923 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "txpkts_pkts", CTLFLAG_RD, 2924 &txq->txpkts_pkts, "# of frames tx'd using txpkts work requests"); 2925 2926 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "br_drops", CTLFLAG_RD, 2927 &txq->br->br_drops, "# of drops in the buf_ring for this queue"); 2928 SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "no_dmamap", CTLFLAG_RD, 2929 &txq->no_dmamap, 0, "# of times txq ran out of DMA maps"); 2930 SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "no_desc", CTLFLAG_RD, 2931 &txq->no_desc, 0, "# of times txq ran out of hardware descriptors"); 2932 SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "egr_update", CTLFLAG_RD, 2933 &eq->egr_update, 0, "egress update notifications from the SGE"); 2934 SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "unstalled", CTLFLAG_RD, 2935 &eq->unstalled, 0, "# of times txq recovered after stall"); 2936 2937 return (rc); 2938 } 2939 2940 static int 2941 free_txq(struct port_info *pi, struct sge_txq *txq) 2942 { 2943 int rc; 2944 struct adapter *sc = pi->adapter; 2945 struct sge_eq *eq = &txq->eq; 2946 2947 rc = free_eq(sc, eq); 2948 if (rc) 2949 return (rc); 2950 2951 free(txq->sdesc, M_CXGBE); 2952 2953 if (txq->txmaps.maps) 2954 t4_free_tx_maps(&txq->txmaps, txq->tx_tag); 2955 2956 buf_ring_free(txq->br, M_CXGBE); 2957 2958 if (txq->tx_tag) 2959 bus_dma_tag_destroy(txq->tx_tag); 2960 2961 bzero(txq, sizeof(*txq)); 2962 return (0); 2963 } 2964 2965 static void 2966 oneseg_dma_callback(void *arg, bus_dma_segment_t *segs, int nseg, int error) 2967 { 2968 bus_addr_t *ba = arg; 2969 2970 KASSERT(nseg == 1, 2971 ("%s meant for single segment mappings only.", __func__)); 2972 2973 *ba = error ? 0 : segs->ds_addr; 2974 } 2975 2976 static inline bool 2977 is_new_response(const struct sge_iq *iq, struct rsp_ctrl **ctrl) 2978 { 2979 *ctrl = (void *)((uintptr_t)iq->cdesc + 2980 (iq->esize - sizeof(struct rsp_ctrl))); 2981 2982 return (((*ctrl)->u.type_gen >> S_RSPD_GEN) == iq->gen); 2983 } 2984 2985 static inline void 2986 iq_next(struct sge_iq *iq) 2987 { 2988 iq->cdesc = (void *) ((uintptr_t)iq->cdesc + iq->esize); 2989 if (__predict_false(++iq->cidx == iq->qsize - 1)) { 2990 iq->cidx = 0; 2991 iq->gen ^= 1; 2992 iq->cdesc = iq->desc; 2993 } 2994 } 2995 2996 #define FL_HW_IDX(x) ((x) >> 3) 2997 static inline void 2998 ring_fl_db(struct adapter *sc, struct sge_fl *fl) 2999 { 3000 int ndesc = fl->pending / 8; 3001 uint32_t v; 3002 3003 if (FL_HW_IDX(fl->pidx) == FL_HW_IDX(fl->cidx)) 3004 ndesc--; /* hold back one credit */ 3005 3006 if (ndesc <= 0) 3007 return; /* nothing to do */ 3008 3009 v = F_DBPRIO | V_QID(fl->cntxt_id) | V_PIDX(ndesc); 3010 if (is_t5(sc)) 3011 v |= F_DBTYPE; 3012 3013 wmb(); 3014 3015 t4_write_reg(sc, MYPF_REG(A_SGE_PF_KDOORBELL), v); 3016 fl->pending -= ndesc * 8; 3017 } 3018 3019 /* 3020 * Fill up the freelist by upto nbufs and maybe ring its doorbell. 3021 * 3022 * Returns non-zero to indicate that it should be added to the list of starving 3023 * freelists. 3024 */ 3025 static int 3026 refill_fl(struct adapter *sc, struct sge_fl *fl, int nbufs) 3027 { 3028 __be64 *d = &fl->desc[fl->pidx]; 3029 struct fl_sdesc *sd = &fl->sdesc[fl->pidx]; 3030 uintptr_t pa; 3031 caddr_t cl; 3032 struct cluster_layout *cll = &fl->cll_def; /* default layout */ 3033 struct sw_zone_info *swz = &sc->sge.sw_zone_info[cll->zidx]; 3034 struct cluster_metadata *clm; 3035 3036 FL_LOCK_ASSERT_OWNED(fl); 3037 3038 if (nbufs > fl->needed) 3039 nbufs = fl->needed; 3040 nbufs -= (fl->pidx + nbufs) % 8; 3041 3042 while (nbufs--) { 3043 3044 if (sd->cl != NULL) { 3045 3046 if (sd->nmbuf == 0) { 3047 /* 3048 * Fast recycle without involving any atomics on 3049 * the cluster's metadata (if the cluster has 3050 * metadata). This happens when all frames 3051 * received in the cluster were small enough to 3052 * fit within a single mbuf each. 3053 */ 3054 fl->cl_fast_recycled++; 3055 goto recycled_fast; 3056 } 3057 3058 /* 3059 * Cluster is guaranteed to have metadata. Clusters 3060 * without metadata always take the fast recycle path 3061 * when they're recycled. 3062 */ 3063 clm = cl_metadata(sc, fl, &sd->cll, sd->cl); 3064 MPASS(clm != NULL); 3065 3066 if (atomic_fetchadd_int(&clm->refcount, -1) == 1) { 3067 fl->cl_recycled++; 3068 goto recycled; 3069 } 3070 sd->cl = NULL; /* gave up my reference */ 3071 } 3072 MPASS(sd->cl == NULL); 3073 alloc: 3074 cl = uma_zalloc(swz->zone, M_NOWAIT); 3075 if (__predict_false(cl == NULL)) { 3076 if (cll == &fl->cll_alt || fl->cll_alt.zidx == -1 || 3077 fl->cll_def.zidx == fl->cll_alt.zidx) 3078 break; 3079 3080 /* fall back to the safe zone */ 3081 cll = &fl->cll_alt; 3082 swz = &sc->sge.sw_zone_info[cll->zidx]; 3083 goto alloc; 3084 } 3085 fl->cl_allocated++; 3086 3087 pa = pmap_kextract((vm_offset_t)cl); 3088 pa += cll->region1; 3089 sd->cl = cl; 3090 sd->cll = *cll; 3091 *d = htobe64(pa | cll->hwidx); 3092 clm = cl_metadata(sc, fl, cll, cl); 3093 if (clm != NULL) { 3094 recycled: 3095 #ifdef INVARIANTS 3096 clm->sd = sd; 3097 #endif 3098 clm->refcount = 1; 3099 } 3100 sd->nmbuf = 0; 3101 recycled_fast: 3102 fl->pending++; 3103 fl->needed--; 3104 d++; 3105 sd++; 3106 if (__predict_false(++fl->pidx == fl->cap)) { 3107 fl->pidx = 0; 3108 sd = fl->sdesc; 3109 d = fl->desc; 3110 } 3111 } 3112 3113 if (fl->pending >= 8) 3114 ring_fl_db(sc, fl); 3115 3116 return (FL_RUNNING_LOW(fl) && !(fl->flags & FL_STARVING)); 3117 } 3118 3119 /* 3120 * Attempt to refill all starving freelists. 3121 */ 3122 static void 3123 refill_sfl(void *arg) 3124 { 3125 struct adapter *sc = arg; 3126 struct sge_fl *fl, *fl_temp; 3127 3128 mtx_lock(&sc->sfl_lock); 3129 TAILQ_FOREACH_SAFE(fl, &sc->sfl, link, fl_temp) { 3130 FL_LOCK(fl); 3131 refill_fl(sc, fl, 64); 3132 if (FL_NOT_RUNNING_LOW(fl) || fl->flags & FL_DOOMED) { 3133 TAILQ_REMOVE(&sc->sfl, fl, link); 3134 fl->flags &= ~FL_STARVING; 3135 } 3136 FL_UNLOCK(fl); 3137 } 3138 3139 if (!TAILQ_EMPTY(&sc->sfl)) 3140 callout_schedule(&sc->sfl_callout, hz / 5); 3141 mtx_unlock(&sc->sfl_lock); 3142 } 3143 3144 static int 3145 alloc_fl_sdesc(struct sge_fl *fl) 3146 { 3147 3148 fl->sdesc = malloc(fl->cap * sizeof(struct fl_sdesc), M_CXGBE, 3149 M_ZERO | M_WAITOK); 3150 3151 return (0); 3152 } 3153 3154 static void 3155 free_fl_sdesc(struct adapter *sc, struct sge_fl *fl) 3156 { 3157 struct fl_sdesc *sd; 3158 struct cluster_metadata *clm; 3159 struct cluster_layout *cll; 3160 int i; 3161 3162 sd = fl->sdesc; 3163 for (i = 0; i < fl->cap; i++, sd++) { 3164 if (sd->cl == NULL) 3165 continue; 3166 3167 cll = &sd->cll; 3168 clm = cl_metadata(sc, fl, cll, sd->cl); 3169 if (sd->nmbuf == 0 || 3170 (clm && atomic_fetchadd_int(&clm->refcount, -1) == 1)) { 3171 uma_zfree(sc->sge.sw_zone_info[cll->zidx].zone, sd->cl); 3172 } 3173 sd->cl = NULL; 3174 } 3175 3176 free(fl->sdesc, M_CXGBE); 3177 fl->sdesc = NULL; 3178 } 3179 3180 int 3181 t4_alloc_tx_maps(struct tx_maps *txmaps, bus_dma_tag_t tx_tag, int count, 3182 int flags) 3183 { 3184 struct tx_map *txm; 3185 int i, rc; 3186 3187 txmaps->map_total = txmaps->map_avail = count; 3188 txmaps->map_cidx = txmaps->map_pidx = 0; 3189 3190 txmaps->maps = malloc(count * sizeof(struct tx_map), M_CXGBE, 3191 M_ZERO | flags); 3192 3193 txm = txmaps->maps; 3194 for (i = 0; i < count; i++, txm++) { 3195 rc = bus_dmamap_create(tx_tag, 0, &txm->map); 3196 if (rc != 0) 3197 goto failed; 3198 } 3199 3200 return (0); 3201 failed: 3202 while (--i >= 0) { 3203 txm--; 3204 bus_dmamap_destroy(tx_tag, txm->map); 3205 } 3206 KASSERT(txm == txmaps->maps, ("%s: EDOOFUS", __func__)); 3207 3208 free(txmaps->maps, M_CXGBE); 3209 txmaps->maps = NULL; 3210 3211 return (rc); 3212 } 3213 3214 void 3215 t4_free_tx_maps(struct tx_maps *txmaps, bus_dma_tag_t tx_tag) 3216 { 3217 struct tx_map *txm; 3218 int i; 3219 3220 txm = txmaps->maps; 3221 for (i = 0; i < txmaps->map_total; i++, txm++) { 3222 3223 if (txm->m) { 3224 bus_dmamap_unload(tx_tag, txm->map); 3225 m_freem(txm->m); 3226 txm->m = NULL; 3227 } 3228 3229 bus_dmamap_destroy(tx_tag, txm->map); 3230 } 3231 3232 free(txmaps->maps, M_CXGBE); 3233 txmaps->maps = NULL; 3234 } 3235 3236 /* 3237 * We'll do immediate data tx for non-TSO, but only when not coalescing. We're 3238 * willing to use upto 2 hardware descriptors which means a maximum of 96 bytes 3239 * of immediate data. 3240 */ 3241 #define IMM_LEN ( \ 3242 2 * EQ_ESIZE \ 3243 - sizeof(struct fw_eth_tx_pkt_wr) \ 3244 - sizeof(struct cpl_tx_pkt_core)) 3245 3246 /* 3247 * Returns non-zero on failure, no need to cleanup anything in that case. 3248 * 3249 * Note 1: We always try to defrag the mbuf if required and return EFBIG only 3250 * if the resulting chain still won't fit in a tx descriptor. 3251 * 3252 * Note 2: We'll pullup the mbuf chain if TSO is requested and the first mbuf 3253 * does not have the TCP header in it. 3254 */ 3255 static int 3256 get_pkt_sgl(struct sge_txq *txq, struct mbuf **fp, struct sgl *sgl, 3257 int sgl_only) 3258 { 3259 struct mbuf *m = *fp; 3260 struct tx_maps *txmaps; 3261 struct tx_map *txm; 3262 int rc, defragged = 0, n; 3263 3264 TXQ_LOCK_ASSERT_OWNED(txq); 3265 3266 if (m->m_pkthdr.tso_segsz) 3267 sgl_only = 1; /* Do not allow immediate data with LSO */ 3268 3269 start: sgl->nsegs = 0; 3270 3271 if (m->m_pkthdr.len <= IMM_LEN && !sgl_only) 3272 return (0); /* nsegs = 0 tells caller to use imm. tx */ 3273 3274 txmaps = &txq->txmaps; 3275 if (txmaps->map_avail == 0) { 3276 txq->no_dmamap++; 3277 return (ENOMEM); 3278 } 3279 txm = &txmaps->maps[txmaps->map_pidx]; 3280 3281 if (m->m_pkthdr.tso_segsz && m->m_len < 50) { 3282 *fp = m_pullup(m, 50); 3283 m = *fp; 3284 if (m == NULL) 3285 return (ENOBUFS); 3286 } 3287 3288 rc = bus_dmamap_load_mbuf_sg(txq->tx_tag, txm->map, m, sgl->seg, 3289 &sgl->nsegs, BUS_DMA_NOWAIT); 3290 if (rc == EFBIG && defragged == 0) { 3291 m = m_defrag(m, M_NOWAIT); 3292 if (m == NULL) 3293 return (EFBIG); 3294 3295 defragged = 1; 3296 *fp = m; 3297 goto start; 3298 } 3299 if (rc != 0) 3300 return (rc); 3301 3302 txm->m = m; 3303 txmaps->map_avail--; 3304 if (++txmaps->map_pidx == txmaps->map_total) 3305 txmaps->map_pidx = 0; 3306 3307 KASSERT(sgl->nsegs > 0 && sgl->nsegs <= TX_SGL_SEGS, 3308 ("%s: bad DMA mapping (%d segments)", __func__, sgl->nsegs)); 3309 3310 /* 3311 * Store the # of flits required to hold this frame's SGL in nflits. An 3312 * SGL has a (ULPTX header + len0, addr0) tuple optionally followed by 3313 * multiple (len0 + len1, addr0, addr1) tuples. If addr1 is not used 3314 * then len1 must be set to 0. 3315 */ 3316 n = sgl->nsegs - 1; 3317 sgl->nflits = (3 * n) / 2 + (n & 1) + 2; 3318 3319 return (0); 3320 } 3321 3322 3323 /* 3324 * Releases all the txq resources used up in the specified sgl. 3325 */ 3326 static int 3327 free_pkt_sgl(struct sge_txq *txq, struct sgl *sgl) 3328 { 3329 struct tx_maps *txmaps; 3330 struct tx_map *txm; 3331 3332 TXQ_LOCK_ASSERT_OWNED(txq); 3333 3334 if (sgl->nsegs == 0) 3335 return (0); /* didn't use any map */ 3336 3337 txmaps = &txq->txmaps; 3338 3339 /* 1 pkt uses exactly 1 map, back it out */ 3340 3341 txmaps->map_avail++; 3342 if (txmaps->map_pidx > 0) 3343 txmaps->map_pidx--; 3344 else 3345 txmaps->map_pidx = txmaps->map_total - 1; 3346 3347 txm = &txmaps->maps[txmaps->map_pidx]; 3348 bus_dmamap_unload(txq->tx_tag, txm->map); 3349 txm->m = NULL; 3350 3351 return (0); 3352 } 3353 3354 static int 3355 write_txpkt_wr(struct port_info *pi, struct sge_txq *txq, struct mbuf *m, 3356 struct sgl *sgl) 3357 { 3358 struct sge_eq *eq = &txq->eq; 3359 struct fw_eth_tx_pkt_wr *wr; 3360 struct cpl_tx_pkt_core *cpl; 3361 uint32_t ctrl; /* used in many unrelated places */ 3362 uint64_t ctrl1; 3363 int nflits, ndesc, pktlen; 3364 struct tx_sdesc *txsd; 3365 caddr_t dst; 3366 3367 TXQ_LOCK_ASSERT_OWNED(txq); 3368 3369 pktlen = m->m_pkthdr.len; 3370 3371 /* 3372 * Do we have enough flits to send this frame out? 3373 */ 3374 ctrl = sizeof(struct cpl_tx_pkt_core); 3375 if (m->m_pkthdr.tso_segsz) { 3376 nflits = TXPKT_LSO_WR_HDR; 3377 ctrl += sizeof(struct cpl_tx_pkt_lso_core); 3378 } else 3379 nflits = TXPKT_WR_HDR; 3380 if (sgl->nsegs > 0) 3381 nflits += sgl->nflits; 3382 else { 3383 nflits += howmany(pktlen, 8); 3384 ctrl += pktlen; 3385 } 3386 ndesc = howmany(nflits, 8); 3387 if (ndesc > eq->avail) 3388 return (ENOMEM); 3389 3390 /* Firmware work request header */ 3391 wr = (void *)&eq->desc[eq->pidx]; 3392 wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_PKT_WR) | 3393 V_FW_ETH_TX_PKT_WR_IMMDLEN(ctrl)); 3394 ctrl = V_FW_WR_LEN16(howmany(nflits, 2)); 3395 if (eq->avail == ndesc) { 3396 if (!(eq->flags & EQ_CRFLUSHED)) { 3397 ctrl |= F_FW_WR_EQUEQ | F_FW_WR_EQUIQ; 3398 eq->flags |= EQ_CRFLUSHED; 3399 } 3400 eq->flags |= EQ_STALLED; 3401 } 3402 3403 wr->equiq_to_len16 = htobe32(ctrl); 3404 wr->r3 = 0; 3405 3406 if (m->m_pkthdr.tso_segsz) { 3407 struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1); 3408 struct ether_header *eh; 3409 void *l3hdr; 3410 #if defined(INET) || defined(INET6) 3411 struct tcphdr *tcp; 3412 #endif 3413 uint16_t eh_type; 3414 3415 ctrl = V_LSO_OPCODE(CPL_TX_PKT_LSO) | F_LSO_FIRST_SLICE | 3416 F_LSO_LAST_SLICE; 3417 3418 eh = mtod(m, struct ether_header *); 3419 eh_type = ntohs(eh->ether_type); 3420 if (eh_type == ETHERTYPE_VLAN) { 3421 struct ether_vlan_header *evh = (void *)eh; 3422 3423 ctrl |= V_LSO_ETHHDR_LEN(1); 3424 l3hdr = evh + 1; 3425 eh_type = ntohs(evh->evl_proto); 3426 } else 3427 l3hdr = eh + 1; 3428 3429 switch (eh_type) { 3430 #ifdef INET6 3431 case ETHERTYPE_IPV6: 3432 { 3433 struct ip6_hdr *ip6 = l3hdr; 3434 3435 /* 3436 * XXX-BZ For now we do not pretend to support 3437 * IPv6 extension headers. 3438 */ 3439 KASSERT(ip6->ip6_nxt == IPPROTO_TCP, ("%s: CSUM_TSO " 3440 "with ip6_nxt != TCP: %u", __func__, ip6->ip6_nxt)); 3441 tcp = (struct tcphdr *)(ip6 + 1); 3442 ctrl |= F_LSO_IPV6; 3443 ctrl |= V_LSO_IPHDR_LEN(sizeof(*ip6) >> 2) | 3444 V_LSO_TCPHDR_LEN(tcp->th_off); 3445 break; 3446 } 3447 #endif 3448 #ifdef INET 3449 case ETHERTYPE_IP: 3450 { 3451 struct ip *ip = l3hdr; 3452 3453 tcp = (void *)((uintptr_t)ip + ip->ip_hl * 4); 3454 ctrl |= V_LSO_IPHDR_LEN(ip->ip_hl) | 3455 V_LSO_TCPHDR_LEN(tcp->th_off); 3456 break; 3457 } 3458 #endif 3459 default: 3460 panic("%s: CSUM_TSO but no supported IP version " 3461 "(0x%04x)", __func__, eh_type); 3462 } 3463 3464 lso->lso_ctrl = htobe32(ctrl); 3465 lso->ipid_ofst = htobe16(0); 3466 lso->mss = htobe16(m->m_pkthdr.tso_segsz); 3467 lso->seqno_offset = htobe32(0); 3468 lso->len = htobe32(pktlen); 3469 3470 cpl = (void *)(lso + 1); 3471 3472 txq->tso_wrs++; 3473 } else 3474 cpl = (void *)(wr + 1); 3475 3476 /* Checksum offload */ 3477 ctrl1 = 0; 3478 if (!(m->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TSO))) 3479 ctrl1 |= F_TXPKT_IPCSUM_DIS; 3480 if (!(m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP | CSUM_UDP_IPV6 | 3481 CSUM_TCP_IPV6 | CSUM_TSO))) 3482 ctrl1 |= F_TXPKT_L4CSUM_DIS; 3483 if (m->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP | 3484 CSUM_UDP_IPV6 | CSUM_TCP_IPV6 | CSUM_TSO)) 3485 txq->txcsum++; /* some hardware assistance provided */ 3486 3487 /* VLAN tag insertion */ 3488 if (m->m_flags & M_VLANTAG) { 3489 ctrl1 |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(m->m_pkthdr.ether_vtag); 3490 txq->vlan_insertion++; 3491 } 3492 3493 /* CPL header */ 3494 cpl->ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT) | 3495 V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(pi->adapter->pf)); 3496 cpl->pack = 0; 3497 cpl->len = htobe16(pktlen); 3498 cpl->ctrl1 = htobe64(ctrl1); 3499 3500 /* Software descriptor */ 3501 txsd = &txq->sdesc[eq->pidx]; 3502 txsd->desc_used = ndesc; 3503 3504 eq->pending += ndesc; 3505 eq->avail -= ndesc; 3506 eq->pidx += ndesc; 3507 if (eq->pidx >= eq->cap) 3508 eq->pidx -= eq->cap; 3509 3510 /* SGL */ 3511 dst = (void *)(cpl + 1); 3512 if (sgl->nsegs > 0) { 3513 txsd->credits = 1; 3514 txq->sgl_wrs++; 3515 write_sgl_to_txd(eq, sgl, &dst); 3516 } else { 3517 txsd->credits = 0; 3518 txq->imm_wrs++; 3519 for (; m; m = m->m_next) { 3520 copy_to_txd(eq, mtod(m, caddr_t), &dst, m->m_len); 3521 #ifdef INVARIANTS 3522 pktlen -= m->m_len; 3523 #endif 3524 } 3525 #ifdef INVARIANTS 3526 KASSERT(pktlen == 0, ("%s: %d bytes left.", __func__, pktlen)); 3527 #endif 3528 3529 } 3530 3531 txq->txpkt_wrs++; 3532 return (0); 3533 } 3534 3535 /* 3536 * Returns 0 to indicate that m has been accepted into a coalesced tx work 3537 * request. It has either been folded into txpkts or txpkts was flushed and m 3538 * has started a new coalesced work request (as the first frame in a fresh 3539 * txpkts). 3540 * 3541 * Returns non-zero to indicate a failure - caller is responsible for 3542 * transmitting m, if there was anything in txpkts it has been flushed. 3543 */ 3544 static int 3545 add_to_txpkts(struct port_info *pi, struct sge_txq *txq, struct txpkts *txpkts, 3546 struct mbuf *m, struct sgl *sgl) 3547 { 3548 struct sge_eq *eq = &txq->eq; 3549 int can_coalesce; 3550 struct tx_sdesc *txsd; 3551 int flits; 3552 3553 TXQ_LOCK_ASSERT_OWNED(txq); 3554 3555 KASSERT(sgl->nsegs, ("%s: can't coalesce imm data", __func__)); 3556 3557 if (txpkts->npkt > 0) { 3558 flits = TXPKTS_PKT_HDR + sgl->nflits; 3559 can_coalesce = m->m_pkthdr.tso_segsz == 0 && 3560 txpkts->nflits + flits <= TX_WR_FLITS && 3561 txpkts->nflits + flits <= eq->avail * 8 && 3562 txpkts->plen + m->m_pkthdr.len < 65536; 3563 3564 if (can_coalesce) { 3565 txpkts->npkt++; 3566 txpkts->nflits += flits; 3567 txpkts->plen += m->m_pkthdr.len; 3568 3569 txsd = &txq->sdesc[eq->pidx]; 3570 txsd->credits++; 3571 3572 return (0); 3573 } 3574 3575 /* 3576 * Couldn't coalesce m into txpkts. The first order of business 3577 * is to send txpkts on its way. Then we'll revisit m. 3578 */ 3579 write_txpkts_wr(txq, txpkts); 3580 } 3581 3582 /* 3583 * Check if we can start a new coalesced tx work request with m as 3584 * the first packet in it. 3585 */ 3586 3587 KASSERT(txpkts->npkt == 0, ("%s: txpkts not empty", __func__)); 3588 3589 flits = TXPKTS_WR_HDR + sgl->nflits; 3590 can_coalesce = m->m_pkthdr.tso_segsz == 0 && 3591 flits <= eq->avail * 8 && flits <= TX_WR_FLITS; 3592 3593 if (can_coalesce == 0) 3594 return (EINVAL); 3595 3596 /* 3597 * Start a fresh coalesced tx WR with m as the first frame in it. 3598 */ 3599 txpkts->npkt = 1; 3600 txpkts->nflits = flits; 3601 txpkts->flitp = &eq->desc[eq->pidx].flit[2]; 3602 txpkts->plen = m->m_pkthdr.len; 3603 3604 txsd = &txq->sdesc[eq->pidx]; 3605 txsd->credits = 1; 3606 3607 return (0); 3608 } 3609 3610 /* 3611 * Note that write_txpkts_wr can never run out of hardware descriptors (but 3612 * write_txpkt_wr can). add_to_txpkts ensures that a frame is accepted for 3613 * coalescing only if sufficient hardware descriptors are available. 3614 */ 3615 static void 3616 write_txpkts_wr(struct sge_txq *txq, struct txpkts *txpkts) 3617 { 3618 struct sge_eq *eq = &txq->eq; 3619 struct fw_eth_tx_pkts_wr *wr; 3620 struct tx_sdesc *txsd; 3621 uint32_t ctrl; 3622 int ndesc; 3623 3624 TXQ_LOCK_ASSERT_OWNED(txq); 3625 3626 ndesc = howmany(txpkts->nflits, 8); 3627 3628 wr = (void *)&eq->desc[eq->pidx]; 3629 wr->op_pkd = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_WR)); 3630 ctrl = V_FW_WR_LEN16(howmany(txpkts->nflits, 2)); 3631 if (eq->avail == ndesc) { 3632 if (!(eq->flags & EQ_CRFLUSHED)) { 3633 ctrl |= F_FW_WR_EQUEQ | F_FW_WR_EQUIQ; 3634 eq->flags |= EQ_CRFLUSHED; 3635 } 3636 eq->flags |= EQ_STALLED; 3637 } 3638 wr->equiq_to_len16 = htobe32(ctrl); 3639 wr->plen = htobe16(txpkts->plen); 3640 wr->npkt = txpkts->npkt; 3641 wr->r3 = wr->type = 0; 3642 3643 /* Everything else already written */ 3644 3645 txsd = &txq->sdesc[eq->pidx]; 3646 txsd->desc_used = ndesc; 3647 3648 KASSERT(eq->avail >= ndesc, ("%s: out of descriptors", __func__)); 3649 3650 eq->pending += ndesc; 3651 eq->avail -= ndesc; 3652 eq->pidx += ndesc; 3653 if (eq->pidx >= eq->cap) 3654 eq->pidx -= eq->cap; 3655 3656 txq->txpkts_pkts += txpkts->npkt; 3657 txq->txpkts_wrs++; 3658 txpkts->npkt = 0; /* emptied */ 3659 } 3660 3661 static inline void 3662 write_ulp_cpl_sgl(struct port_info *pi, struct sge_txq *txq, 3663 struct txpkts *txpkts, struct mbuf *m, struct sgl *sgl) 3664 { 3665 struct ulp_txpkt *ulpmc; 3666 struct ulptx_idata *ulpsc; 3667 struct cpl_tx_pkt_core *cpl; 3668 struct sge_eq *eq = &txq->eq; 3669 uintptr_t flitp, start, end; 3670 uint64_t ctrl; 3671 caddr_t dst; 3672 3673 KASSERT(txpkts->npkt > 0, ("%s: txpkts is empty", __func__)); 3674 3675 start = (uintptr_t)eq->desc; 3676 end = (uintptr_t)eq->spg; 3677 3678 /* Checksum offload */ 3679 ctrl = 0; 3680 if (!(m->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TSO))) 3681 ctrl |= F_TXPKT_IPCSUM_DIS; 3682 if (!(m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP | CSUM_UDP_IPV6 | 3683 CSUM_TCP_IPV6 | CSUM_TSO))) 3684 ctrl |= F_TXPKT_L4CSUM_DIS; 3685 if (m->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP | 3686 CSUM_UDP_IPV6 | CSUM_TCP_IPV6 | CSUM_TSO)) 3687 txq->txcsum++; /* some hardware assistance provided */ 3688 3689 /* VLAN tag insertion */ 3690 if (m->m_flags & M_VLANTAG) { 3691 ctrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(m->m_pkthdr.ether_vtag); 3692 txq->vlan_insertion++; 3693 } 3694 3695 /* 3696 * The previous packet's SGL must have ended at a 16 byte boundary (this 3697 * is required by the firmware/hardware). It follows that flitp cannot 3698 * wrap around between the ULPTX master command and ULPTX subcommand (8 3699 * bytes each), and that it can not wrap around in the middle of the 3700 * cpl_tx_pkt_core either. 3701 */ 3702 flitp = (uintptr_t)txpkts->flitp; 3703 KASSERT((flitp & 0xf) == 0, 3704 ("%s: last SGL did not end at 16 byte boundary: %p", 3705 __func__, txpkts->flitp)); 3706 3707 /* ULP master command */ 3708 ulpmc = (void *)flitp; 3709 ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0) | 3710 V_ULP_TXPKT_FID(eq->iqid)); 3711 ulpmc->len = htonl(howmany(sizeof(*ulpmc) + sizeof(*ulpsc) + 3712 sizeof(*cpl) + 8 * sgl->nflits, 16)); 3713 3714 /* ULP subcommand */ 3715 ulpsc = (void *)(ulpmc + 1); 3716 ulpsc->cmd_more = htobe32(V_ULPTX_CMD((u32)ULP_TX_SC_IMM) | 3717 F_ULP_TX_SC_MORE); 3718 ulpsc->len = htobe32(sizeof(struct cpl_tx_pkt_core)); 3719 3720 flitp += sizeof(*ulpmc) + sizeof(*ulpsc); 3721 if (flitp == end) 3722 flitp = start; 3723 3724 /* CPL_TX_PKT */ 3725 cpl = (void *)flitp; 3726 cpl->ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT) | 3727 V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(pi->adapter->pf)); 3728 cpl->pack = 0; 3729 cpl->len = htobe16(m->m_pkthdr.len); 3730 cpl->ctrl1 = htobe64(ctrl); 3731 3732 flitp += sizeof(*cpl); 3733 if (flitp == end) 3734 flitp = start; 3735 3736 /* SGL for this frame */ 3737 dst = (caddr_t)flitp; 3738 txpkts->nflits += write_sgl_to_txd(eq, sgl, &dst); 3739 txpkts->flitp = (void *)dst; 3740 3741 KASSERT(((uintptr_t)dst & 0xf) == 0, 3742 ("%s: SGL ends at %p (not a 16 byte boundary)", __func__, dst)); 3743 } 3744 3745 /* 3746 * If the SGL ends on an address that is not 16 byte aligned, this function will 3747 * add a 0 filled flit at the end. It returns 1 in that case. 3748 */ 3749 static int 3750 write_sgl_to_txd(struct sge_eq *eq, struct sgl *sgl, caddr_t *to) 3751 { 3752 __be64 *flitp, *end; 3753 struct ulptx_sgl *usgl; 3754 bus_dma_segment_t *seg; 3755 int i, padded; 3756 3757 KASSERT(sgl->nsegs > 0 && sgl->nflits > 0, 3758 ("%s: bad SGL - nsegs=%d, nflits=%d", 3759 __func__, sgl->nsegs, sgl->nflits)); 3760 3761 KASSERT(((uintptr_t)(*to) & 0xf) == 0, 3762 ("%s: SGL must start at a 16 byte boundary: %p", __func__, *to)); 3763 3764 flitp = (__be64 *)(*to); 3765 end = flitp + sgl->nflits; 3766 seg = &sgl->seg[0]; 3767 usgl = (void *)flitp; 3768 3769 /* 3770 * We start at a 16 byte boundary somewhere inside the tx descriptor 3771 * ring, so we're at least 16 bytes away from the status page. There is 3772 * no chance of a wrap around in the middle of usgl (which is 16 bytes). 3773 */ 3774 3775 usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) | 3776 V_ULPTX_NSGE(sgl->nsegs)); 3777 usgl->len0 = htobe32(seg->ds_len); 3778 usgl->addr0 = htobe64(seg->ds_addr); 3779 seg++; 3780 3781 if ((uintptr_t)end <= (uintptr_t)eq->spg) { 3782 3783 /* Won't wrap around at all */ 3784 3785 for (i = 0; i < sgl->nsegs - 1; i++, seg++) { 3786 usgl->sge[i / 2].len[i & 1] = htobe32(seg->ds_len); 3787 usgl->sge[i / 2].addr[i & 1] = htobe64(seg->ds_addr); 3788 } 3789 if (i & 1) 3790 usgl->sge[i / 2].len[1] = htobe32(0); 3791 } else { 3792 3793 /* Will wrap somewhere in the rest of the SGL */ 3794 3795 /* 2 flits already written, write the rest flit by flit */ 3796 flitp = (void *)(usgl + 1); 3797 for (i = 0; i < sgl->nflits - 2; i++) { 3798 if ((uintptr_t)flitp == (uintptr_t)eq->spg) 3799 flitp = (void *)eq->desc; 3800 *flitp++ = get_flit(seg, sgl->nsegs - 1, i); 3801 } 3802 end = flitp; 3803 } 3804 3805 if ((uintptr_t)end & 0xf) { 3806 *(uint64_t *)end = 0; 3807 end++; 3808 padded = 1; 3809 } else 3810 padded = 0; 3811 3812 if ((uintptr_t)end == (uintptr_t)eq->spg) 3813 *to = (void *)eq->desc; 3814 else 3815 *to = (void *)end; 3816 3817 return (padded); 3818 } 3819 3820 static inline void 3821 copy_to_txd(struct sge_eq *eq, caddr_t from, caddr_t *to, int len) 3822 { 3823 if (__predict_true((uintptr_t)(*to) + len <= (uintptr_t)eq->spg)) { 3824 bcopy(from, *to, len); 3825 (*to) += len; 3826 } else { 3827 int portion = (uintptr_t)eq->spg - (uintptr_t)(*to); 3828 3829 bcopy(from, *to, portion); 3830 from += portion; 3831 portion = len - portion; /* remaining */ 3832 bcopy(from, (void *)eq->desc, portion); 3833 (*to) = (caddr_t)eq->desc + portion; 3834 } 3835 } 3836 3837 static inline void 3838 ring_eq_db(struct adapter *sc, struct sge_eq *eq) 3839 { 3840 u_int db, pending; 3841 3842 db = eq->doorbells; 3843 pending = eq->pending; 3844 if (pending > 1) 3845 clrbit(&db, DOORBELL_WCWR); 3846 eq->pending = 0; 3847 wmb(); 3848 3849 switch (ffs(db) - 1) { 3850 case DOORBELL_UDB: 3851 *eq->udb = htole32(V_QID(eq->udb_qid) | V_PIDX(pending)); 3852 return; 3853 3854 case DOORBELL_WCWR: { 3855 volatile uint64_t *dst, *src; 3856 int i; 3857 3858 /* 3859 * Queues whose 128B doorbell segment fits in the page do not 3860 * use relative qid (udb_qid is always 0). Only queues with 3861 * doorbell segments can do WCWR. 3862 */ 3863 KASSERT(eq->udb_qid == 0 && pending == 1, 3864 ("%s: inappropriate doorbell (0x%x, %d, %d) for eq %p", 3865 __func__, eq->doorbells, pending, eq->pidx, eq)); 3866 3867 dst = (volatile void *)((uintptr_t)eq->udb + UDBS_WR_OFFSET - 3868 UDBS_DB_OFFSET); 3869 i = eq->pidx ? eq->pidx - 1 : eq->cap - 1; 3870 src = (void *)&eq->desc[i]; 3871 while (src != (void *)&eq->desc[i + 1]) 3872 *dst++ = *src++; 3873 wmb(); 3874 return; 3875 } 3876 3877 case DOORBELL_UDBWC: 3878 *eq->udb = htole32(V_QID(eq->udb_qid) | V_PIDX(pending)); 3879 wmb(); 3880 return; 3881 3882 case DOORBELL_KDB: 3883 t4_write_reg(sc, MYPF_REG(A_SGE_PF_KDOORBELL), 3884 V_QID(eq->cntxt_id) | V_PIDX(pending)); 3885 return; 3886 } 3887 } 3888 3889 static inline int 3890 reclaimable(struct sge_eq *eq) 3891 { 3892 unsigned int cidx; 3893 3894 cidx = eq->spg->cidx; /* stable snapshot */ 3895 cidx = be16toh(cidx); 3896 3897 if (cidx >= eq->cidx) 3898 return (cidx - eq->cidx); 3899 else 3900 return (cidx + eq->cap - eq->cidx); 3901 } 3902 3903 /* 3904 * There are "can_reclaim" tx descriptors ready to be reclaimed. Reclaim as 3905 * many as possible but stop when there are around "n" mbufs to free. 3906 * 3907 * The actual number reclaimed is provided as the return value. 3908 */ 3909 static int 3910 reclaim_tx_descs(struct sge_txq *txq, int can_reclaim, int n) 3911 { 3912 struct tx_sdesc *txsd; 3913 struct tx_maps *txmaps; 3914 struct tx_map *txm; 3915 unsigned int reclaimed, maps; 3916 struct sge_eq *eq = &txq->eq; 3917 3918 TXQ_LOCK_ASSERT_OWNED(txq); 3919 3920 if (can_reclaim == 0) 3921 can_reclaim = reclaimable(eq); 3922 3923 maps = reclaimed = 0; 3924 while (can_reclaim && maps < n) { 3925 int ndesc; 3926 3927 txsd = &txq->sdesc[eq->cidx]; 3928 ndesc = txsd->desc_used; 3929 3930 /* Firmware doesn't return "partial" credits. */ 3931 KASSERT(can_reclaim >= ndesc, 3932 ("%s: unexpected number of credits: %d, %d", 3933 __func__, can_reclaim, ndesc)); 3934 3935 maps += txsd->credits; 3936 3937 reclaimed += ndesc; 3938 can_reclaim -= ndesc; 3939 3940 eq->cidx += ndesc; 3941 if (__predict_false(eq->cidx >= eq->cap)) 3942 eq->cidx -= eq->cap; 3943 } 3944 3945 txmaps = &txq->txmaps; 3946 txm = &txmaps->maps[txmaps->map_cidx]; 3947 if (maps) 3948 prefetch(txm->m); 3949 3950 eq->avail += reclaimed; 3951 KASSERT(eq->avail < eq->cap, /* avail tops out at (cap - 1) */ 3952 ("%s: too many descriptors available", __func__)); 3953 3954 txmaps->map_avail += maps; 3955 KASSERT(txmaps->map_avail <= txmaps->map_total, 3956 ("%s: too many maps available", __func__)); 3957 3958 while (maps--) { 3959 struct tx_map *next; 3960 3961 next = txm + 1; 3962 if (__predict_false(txmaps->map_cidx + 1 == txmaps->map_total)) 3963 next = txmaps->maps; 3964 prefetch(next->m); 3965 3966 bus_dmamap_unload(txq->tx_tag, txm->map); 3967 m_freem(txm->m); 3968 txm->m = NULL; 3969 3970 txm = next; 3971 if (__predict_false(++txmaps->map_cidx == txmaps->map_total)) 3972 txmaps->map_cidx = 0; 3973 } 3974 3975 return (reclaimed); 3976 } 3977 3978 static void 3979 write_eqflush_wr(struct sge_eq *eq) 3980 { 3981 struct fw_eq_flush_wr *wr; 3982 3983 EQ_LOCK_ASSERT_OWNED(eq); 3984 KASSERT(eq->avail > 0, ("%s: no descriptors left.", __func__)); 3985 KASSERT(!(eq->flags & EQ_CRFLUSHED), ("%s: flushed already", __func__)); 3986 3987 wr = (void *)&eq->desc[eq->pidx]; 3988 bzero(wr, sizeof(*wr)); 3989 wr->opcode = FW_EQ_FLUSH_WR; 3990 wr->equiq_to_len16 = htobe32(V_FW_WR_LEN16(sizeof(*wr) / 16) | 3991 F_FW_WR_EQUEQ | F_FW_WR_EQUIQ); 3992 3993 eq->flags |= (EQ_CRFLUSHED | EQ_STALLED); 3994 eq->pending++; 3995 eq->avail--; 3996 if (++eq->pidx == eq->cap) 3997 eq->pidx = 0; 3998 } 3999 4000 static __be64 4001 get_flit(bus_dma_segment_t *sgl, int nsegs, int idx) 4002 { 4003 int i = (idx / 3) * 2; 4004 4005 switch (idx % 3) { 4006 case 0: { 4007 __be64 rc; 4008 4009 rc = htobe32(sgl[i].ds_len); 4010 if (i + 1 < nsegs) 4011 rc |= (uint64_t)htobe32(sgl[i + 1].ds_len) << 32; 4012 4013 return (rc); 4014 } 4015 case 1: 4016 return htobe64(sgl[i].ds_addr); 4017 case 2: 4018 return htobe64(sgl[i + 1].ds_addr); 4019 } 4020 4021 return (0); 4022 } 4023 4024 static void 4025 find_best_refill_source(struct adapter *sc, struct sge_fl *fl, int maxp) 4026 { 4027 int8_t zidx, hwidx, idx; 4028 uint16_t region1, region3; 4029 int spare, spare_needed, n; 4030 struct sw_zone_info *swz; 4031 struct hw_buf_info *hwb, *hwb_list = &sc->sge.hw_buf_info[0]; 4032 4033 /* 4034 * Buffer Packing: Look for PAGE_SIZE or larger zone which has a bufsize 4035 * large enough for the max payload and cluster metadata. Otherwise 4036 * settle for the largest bufsize that leaves enough room in the cluster 4037 * for metadata. 4038 * 4039 * Without buffer packing: Look for the smallest zone which has a 4040 * bufsize large enough for the max payload. Settle for the largest 4041 * bufsize available if there's nothing big enough for max payload. 4042 */ 4043 spare_needed = fl->flags & FL_BUF_PACKING ? CL_METADATA_SIZE : 0; 4044 swz = &sc->sge.sw_zone_info[0]; 4045 hwidx = -1; 4046 for (zidx = 0; zidx < SW_ZONE_SIZES; zidx++, swz++) { 4047 if (swz->size > largest_rx_cluster) { 4048 if (__predict_true(hwidx != -1)) 4049 break; 4050 4051 /* 4052 * This is a misconfiguration. largest_rx_cluster is 4053 * preventing us from finding a refill source. See 4054 * dev.t5nex.<n>.buffer_sizes to figure out why. 4055 */ 4056 device_printf(sc->dev, "largest_rx_cluster=%u leaves no" 4057 " refill source for fl %p (dma %u). Ignored.\n", 4058 largest_rx_cluster, fl, maxp); 4059 } 4060 for (idx = swz->head_hwidx; idx != -1; idx = hwb->next) { 4061 hwb = &hwb_list[idx]; 4062 spare = swz->size - hwb->size; 4063 if (spare < spare_needed) 4064 continue; 4065 4066 hwidx = idx; /* best option so far */ 4067 if (hwb->size >= maxp) { 4068 4069 if ((fl->flags & FL_BUF_PACKING) == 0) 4070 goto done; /* stop looking (not packing) */ 4071 4072 if (swz->size >= safest_rx_cluster) 4073 goto done; /* stop looking (packing) */ 4074 } 4075 break; /* keep looking, next zone */ 4076 } 4077 } 4078 done: 4079 /* A usable hwidx has been located. */ 4080 MPASS(hwidx != -1); 4081 hwb = &hwb_list[hwidx]; 4082 zidx = hwb->zidx; 4083 swz = &sc->sge.sw_zone_info[zidx]; 4084 region1 = 0; 4085 region3 = swz->size - hwb->size; 4086 4087 /* 4088 * Stay within this zone and see if there is a better match when mbuf 4089 * inlining is allowed. Remember that the hwidx's are sorted in 4090 * decreasing order of size (so in increasing order of spare area). 4091 */ 4092 for (idx = hwidx; idx != -1; idx = hwb->next) { 4093 hwb = &hwb_list[idx]; 4094 spare = swz->size - hwb->size; 4095 4096 if (allow_mbufs_in_cluster == 0 || hwb->size < maxp) 4097 break; 4098 if (spare < CL_METADATA_SIZE + MSIZE) 4099 continue; 4100 n = (spare - CL_METADATA_SIZE) / MSIZE; 4101 if (n > howmany(hwb->size, maxp)) 4102 break; 4103 4104 hwidx = idx; 4105 if (fl->flags & FL_BUF_PACKING) { 4106 region1 = n * MSIZE; 4107 region3 = spare - region1; 4108 } else { 4109 region1 = MSIZE; 4110 region3 = spare - region1; 4111 break; 4112 } 4113 } 4114 4115 KASSERT(zidx >= 0 && zidx < SW_ZONE_SIZES, 4116 ("%s: bad zone %d for fl %p, maxp %d", __func__, zidx, fl, maxp)); 4117 KASSERT(hwidx >= 0 && hwidx <= SGE_FLBUF_SIZES, 4118 ("%s: bad hwidx %d for fl %p, maxp %d", __func__, hwidx, fl, maxp)); 4119 KASSERT(region1 + sc->sge.hw_buf_info[hwidx].size + region3 == 4120 sc->sge.sw_zone_info[zidx].size, 4121 ("%s: bad buffer layout for fl %p, maxp %d. " 4122 "cl %d; r1 %d, payload %d, r3 %d", __func__, fl, maxp, 4123 sc->sge.sw_zone_info[zidx].size, region1, 4124 sc->sge.hw_buf_info[hwidx].size, region3)); 4125 if (fl->flags & FL_BUF_PACKING || region1 > 0) { 4126 KASSERT(region3 >= CL_METADATA_SIZE, 4127 ("%s: no room for metadata. fl %p, maxp %d; " 4128 "cl %d; r1 %d, payload %d, r3 %d", __func__, fl, maxp, 4129 sc->sge.sw_zone_info[zidx].size, region1, 4130 sc->sge.hw_buf_info[hwidx].size, region3)); 4131 KASSERT(region1 % MSIZE == 0, 4132 ("%s: bad mbuf region for fl %p, maxp %d. " 4133 "cl %d; r1 %d, payload %d, r3 %d", __func__, fl, maxp, 4134 sc->sge.sw_zone_info[zidx].size, region1, 4135 sc->sge.hw_buf_info[hwidx].size, region3)); 4136 } 4137 4138 fl->cll_def.zidx = zidx; 4139 fl->cll_def.hwidx = hwidx; 4140 fl->cll_def.region1 = region1; 4141 fl->cll_def.region3 = region3; 4142 } 4143 4144 static void 4145 find_safe_refill_source(struct adapter *sc, struct sge_fl *fl) 4146 { 4147 struct sge *s = &sc->sge; 4148 struct hw_buf_info *hwb; 4149 struct sw_zone_info *swz; 4150 int spare; 4151 int8_t hwidx; 4152 4153 if (fl->flags & FL_BUF_PACKING) 4154 hwidx = s->safe_hwidx2; /* with room for metadata */ 4155 else if (allow_mbufs_in_cluster && s->safe_hwidx2 != -1) { 4156 hwidx = s->safe_hwidx2; 4157 hwb = &s->hw_buf_info[hwidx]; 4158 swz = &s->sw_zone_info[hwb->zidx]; 4159 spare = swz->size - hwb->size; 4160 4161 /* no good if there isn't room for an mbuf as well */ 4162 if (spare < CL_METADATA_SIZE + MSIZE) 4163 hwidx = s->safe_hwidx1; 4164 } else 4165 hwidx = s->safe_hwidx1; 4166 4167 if (hwidx == -1) { 4168 /* No fallback source */ 4169 fl->cll_alt.hwidx = -1; 4170 fl->cll_alt.zidx = -1; 4171 4172 return; 4173 } 4174 4175 hwb = &s->hw_buf_info[hwidx]; 4176 swz = &s->sw_zone_info[hwb->zidx]; 4177 spare = swz->size - hwb->size; 4178 fl->cll_alt.hwidx = hwidx; 4179 fl->cll_alt.zidx = hwb->zidx; 4180 if (allow_mbufs_in_cluster) 4181 fl->cll_alt.region1 = ((spare - CL_METADATA_SIZE) / MSIZE) * MSIZE; 4182 else 4183 fl->cll_alt.region1 = 0; 4184 fl->cll_alt.region3 = spare - fl->cll_alt.region1; 4185 } 4186 4187 static void 4188 add_fl_to_sfl(struct adapter *sc, struct sge_fl *fl) 4189 { 4190 mtx_lock(&sc->sfl_lock); 4191 FL_LOCK(fl); 4192 if ((fl->flags & FL_DOOMED) == 0) { 4193 fl->flags |= FL_STARVING; 4194 TAILQ_INSERT_TAIL(&sc->sfl, fl, link); 4195 callout_reset(&sc->sfl_callout, hz / 5, refill_sfl, sc); 4196 } 4197 FL_UNLOCK(fl); 4198 mtx_unlock(&sc->sfl_lock); 4199 } 4200 4201 static int 4202 handle_sge_egr_update(struct sge_iq *iq, const struct rss_header *rss, 4203 struct mbuf *m) 4204 { 4205 const struct cpl_sge_egr_update *cpl = (const void *)(rss + 1); 4206 unsigned int qid = G_EGR_QID(ntohl(cpl->opcode_qid)); 4207 struct adapter *sc = iq->adapter; 4208 struct sge *s = &sc->sge; 4209 struct sge_eq *eq; 4210 4211 KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__, 4212 rss->opcode)); 4213 4214 eq = s->eqmap[qid - s->eq_start]; 4215 EQ_LOCK(eq); 4216 KASSERT(eq->flags & EQ_CRFLUSHED, 4217 ("%s: unsolicited egress update", __func__)); 4218 eq->flags &= ~EQ_CRFLUSHED; 4219 eq->egr_update++; 4220 4221 if (__predict_false(eq->flags & EQ_DOOMED)) 4222 wakeup_one(eq); 4223 else if (eq->flags & EQ_STALLED && can_resume_tx(eq)) 4224 taskqueue_enqueue(sc->tq[eq->tx_chan], &eq->tx_task); 4225 EQ_UNLOCK(eq); 4226 4227 return (0); 4228 } 4229 4230 /* handle_fw_msg works for both fw4_msg and fw6_msg because this is valid */ 4231 CTASSERT(offsetof(struct cpl_fw4_msg, data) == \ 4232 offsetof(struct cpl_fw6_msg, data)); 4233 4234 static int 4235 handle_fw_msg(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 4236 { 4237 struct adapter *sc = iq->adapter; 4238 const struct cpl_fw6_msg *cpl = (const void *)(rss + 1); 4239 4240 KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__, 4241 rss->opcode)); 4242 4243 if (cpl->type == FW_TYPE_RSSCPL || cpl->type == FW6_TYPE_RSSCPL) { 4244 const struct rss_header *rss2; 4245 4246 rss2 = (const struct rss_header *)&cpl->data[0]; 4247 return (sc->cpl_handler[rss2->opcode](iq, rss2, m)); 4248 } 4249 4250 return (sc->fw_msg_handler[cpl->type](sc, &cpl->data[0])); 4251 } 4252 4253 static int 4254 sysctl_uint16(SYSCTL_HANDLER_ARGS) 4255 { 4256 uint16_t *id = arg1; 4257 int i = *id; 4258 4259 return sysctl_handle_int(oidp, &i, 0, req); 4260 } 4261 4262 static int 4263 sysctl_bufsizes(SYSCTL_HANDLER_ARGS) 4264 { 4265 struct sge *s = arg1; 4266 struct hw_buf_info *hwb = &s->hw_buf_info[0]; 4267 struct sw_zone_info *swz = &s->sw_zone_info[0]; 4268 int i, rc; 4269 struct sbuf sb; 4270 char c; 4271 4272 sbuf_new(&sb, NULL, 32, SBUF_AUTOEXTEND); 4273 for (i = 0; i < SGE_FLBUF_SIZES; i++, hwb++) { 4274 if (hwb->zidx >= 0 && swz[hwb->zidx].size <= largest_rx_cluster) 4275 c = '*'; 4276 else 4277 c = '\0'; 4278 4279 sbuf_printf(&sb, "%u%c ", hwb->size, c); 4280 } 4281 sbuf_trim(&sb); 4282 sbuf_finish(&sb); 4283 rc = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req); 4284 sbuf_delete(&sb); 4285 return (rc); 4286 } 4287