1 /*- 2 * Copyright (c) 2011 Chelsio Communications, Inc. 3 * All rights reserved. 4 * Written by: Navdeep Parhar <np@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include "opt_inet.h" 32 #include "opt_inet6.h" 33 34 #include <sys/types.h> 35 #include <sys/eventhandler.h> 36 #include <sys/mbuf.h> 37 #include <sys/socket.h> 38 #include <sys/kernel.h> 39 #include <sys/kdb.h> 40 #include <sys/malloc.h> 41 #include <sys/queue.h> 42 #include <sys/sbuf.h> 43 #include <sys/taskqueue.h> 44 #include <sys/time.h> 45 #include <sys/sysctl.h> 46 #include <sys/smp.h> 47 #include <sys/counter.h> 48 #include <net/bpf.h> 49 #include <net/ethernet.h> 50 #include <net/if.h> 51 #include <net/if_vlan_var.h> 52 #include <netinet/in.h> 53 #include <netinet/ip.h> 54 #include <netinet/ip6.h> 55 #include <netinet/tcp.h> 56 #include <machine/md_var.h> 57 #include <vm/vm.h> 58 #include <vm/pmap.h> 59 #ifdef DEV_NETMAP 60 #include <machine/bus.h> 61 #include <sys/selinfo.h> 62 #include <net/if_var.h> 63 #include <net/netmap.h> 64 #include <dev/netmap/netmap_kern.h> 65 #endif 66 67 #include "common/common.h" 68 #include "common/t4_regs.h" 69 #include "common/t4_regs_values.h" 70 #include "common/t4_msg.h" 71 72 #ifdef T4_PKT_TIMESTAMP 73 #define RX_COPY_THRESHOLD (MINCLSIZE - 8) 74 #else 75 #define RX_COPY_THRESHOLD MINCLSIZE 76 #endif 77 78 /* 79 * Ethernet frames are DMA'd at this byte offset into the freelist buffer. 80 * 0-7 are valid values. 81 */ 82 int fl_pktshift = 2; 83 TUNABLE_INT("hw.cxgbe.fl_pktshift", &fl_pktshift); 84 85 /* 86 * Pad ethernet payload up to this boundary. 87 * -1: driver should figure out a good value. 88 * 0: disable padding. 89 * Any power of 2 from 32 to 4096 (both inclusive) is also a valid value. 90 */ 91 int fl_pad = -1; 92 TUNABLE_INT("hw.cxgbe.fl_pad", &fl_pad); 93 94 /* 95 * Status page length. 96 * -1: driver should figure out a good value. 97 * 64 or 128 are the only other valid values. 98 */ 99 int spg_len = -1; 100 TUNABLE_INT("hw.cxgbe.spg_len", &spg_len); 101 102 /* 103 * Congestion drops. 104 * -1: no congestion feedback (not recommended). 105 * 0: backpressure the channel instead of dropping packets right away. 106 * 1: no backpressure, drop packets for the congested queue immediately. 107 */ 108 static int cong_drop = 0; 109 TUNABLE_INT("hw.cxgbe.cong_drop", &cong_drop); 110 111 /* 112 * Deliver multiple frames in the same free list buffer if they fit. 113 * -1: let the driver decide whether to enable buffer packing or not. 114 * 0: disable buffer packing. 115 * 1: enable buffer packing. 116 */ 117 static int buffer_packing = -1; 118 TUNABLE_INT("hw.cxgbe.buffer_packing", &buffer_packing); 119 120 /* 121 * Start next frame in a packed buffer at this boundary. 122 * -1: driver should figure out a good value. 123 * T4: driver will ignore this and use the same value as fl_pad above. 124 * T5: 16, or a power of 2 from 64 to 4096 (both inclusive) is a valid value. 125 */ 126 static int fl_pack = -1; 127 TUNABLE_INT("hw.cxgbe.fl_pack", &fl_pack); 128 129 /* 130 * Allow the driver to create mbuf(s) in a cluster allocated for rx. 131 * 0: never; always allocate mbufs from the zone_mbuf UMA zone. 132 * 1: ok to create mbuf(s) within a cluster if there is room. 133 */ 134 static int allow_mbufs_in_cluster = 1; 135 TUNABLE_INT("hw.cxgbe.allow_mbufs_in_cluster", &allow_mbufs_in_cluster); 136 137 /* 138 * Largest rx cluster size that the driver is allowed to allocate. 139 */ 140 static int largest_rx_cluster = MJUM16BYTES; 141 TUNABLE_INT("hw.cxgbe.largest_rx_cluster", &largest_rx_cluster); 142 143 /* 144 * Size of cluster allocation that's most likely to succeed. The driver will 145 * fall back to this size if it fails to allocate clusters larger than this. 146 */ 147 static int safest_rx_cluster = PAGE_SIZE; 148 TUNABLE_INT("hw.cxgbe.safest_rx_cluster", &safest_rx_cluster); 149 150 /* Used to track coalesced tx work request */ 151 struct txpkts { 152 uint64_t *flitp; /* ptr to flit where next pkt should start */ 153 uint8_t npkt; /* # of packets in this work request */ 154 uint8_t nflits; /* # of flits used by this work request */ 155 uint16_t plen; /* total payload (sum of all packets) */ 156 }; 157 158 /* A packet's SGL. This + m_pkthdr has all info needed for tx */ 159 struct sgl { 160 int nsegs; /* # of segments in the SGL, 0 means imm. tx */ 161 int nflits; /* # of flits needed for the SGL */ 162 bus_dma_segment_t seg[TX_SGL_SEGS]; 163 }; 164 165 static int service_iq(struct sge_iq *, int); 166 static struct mbuf *get_fl_payload(struct adapter *, struct sge_fl *, uint32_t); 167 static int t4_eth_rx(struct sge_iq *, const struct rss_header *, struct mbuf *); 168 static inline void init_iq(struct sge_iq *, struct adapter *, int, int, int); 169 static inline void init_fl(struct adapter *, struct sge_fl *, int, int, char *); 170 static inline void init_eq(struct sge_eq *, int, int, uint8_t, uint16_t, 171 char *); 172 static int alloc_ring(struct adapter *, size_t, bus_dma_tag_t *, bus_dmamap_t *, 173 bus_addr_t *, void **); 174 static int free_ring(struct adapter *, bus_dma_tag_t, bus_dmamap_t, bus_addr_t, 175 void *); 176 static int alloc_iq_fl(struct port_info *, struct sge_iq *, struct sge_fl *, 177 int, int); 178 static int free_iq_fl(struct port_info *, struct sge_iq *, struct sge_fl *); 179 static void add_fl_sysctls(struct sysctl_ctx_list *, struct sysctl_oid *, 180 struct sge_fl *); 181 static int alloc_fwq(struct adapter *); 182 static int free_fwq(struct adapter *); 183 static int alloc_mgmtq(struct adapter *); 184 static int free_mgmtq(struct adapter *); 185 static int alloc_rxq(struct port_info *, struct sge_rxq *, int, int, 186 struct sysctl_oid *); 187 static int free_rxq(struct port_info *, struct sge_rxq *); 188 #ifdef TCP_OFFLOAD 189 static int alloc_ofld_rxq(struct port_info *, struct sge_ofld_rxq *, int, int, 190 struct sysctl_oid *); 191 static int free_ofld_rxq(struct port_info *, struct sge_ofld_rxq *); 192 #endif 193 #ifdef DEV_NETMAP 194 static int alloc_nm_rxq(struct port_info *, struct sge_nm_rxq *, int, int, 195 struct sysctl_oid *); 196 static int free_nm_rxq(struct port_info *, struct sge_nm_rxq *); 197 static int alloc_nm_txq(struct port_info *, struct sge_nm_txq *, int, int, 198 struct sysctl_oid *); 199 static int free_nm_txq(struct port_info *, struct sge_nm_txq *); 200 #endif 201 static int ctrl_eq_alloc(struct adapter *, struct sge_eq *); 202 static int eth_eq_alloc(struct adapter *, struct port_info *, struct sge_eq *); 203 #ifdef TCP_OFFLOAD 204 static int ofld_eq_alloc(struct adapter *, struct port_info *, struct sge_eq *); 205 #endif 206 static int alloc_eq(struct adapter *, struct port_info *, struct sge_eq *); 207 static int free_eq(struct adapter *, struct sge_eq *); 208 static int alloc_wrq(struct adapter *, struct port_info *, struct sge_wrq *, 209 struct sysctl_oid *); 210 static int free_wrq(struct adapter *, struct sge_wrq *); 211 static int alloc_txq(struct port_info *, struct sge_txq *, int, 212 struct sysctl_oid *); 213 static int free_txq(struct port_info *, struct sge_txq *); 214 static void oneseg_dma_callback(void *, bus_dma_segment_t *, int, int); 215 static inline void ring_fl_db(struct adapter *, struct sge_fl *); 216 static int refill_fl(struct adapter *, struct sge_fl *, int); 217 static void refill_sfl(void *); 218 static int alloc_fl_sdesc(struct sge_fl *); 219 static void free_fl_sdesc(struct adapter *, struct sge_fl *); 220 static void find_best_refill_source(struct adapter *, struct sge_fl *, int); 221 static void find_safe_refill_source(struct adapter *, struct sge_fl *); 222 static void add_fl_to_sfl(struct adapter *, struct sge_fl *); 223 224 static int get_pkt_sgl(struct sge_txq *, struct mbuf **, struct sgl *, int); 225 static int free_pkt_sgl(struct sge_txq *, struct sgl *); 226 static int write_txpkt_wr(struct port_info *, struct sge_txq *, struct mbuf *, 227 struct sgl *); 228 static int add_to_txpkts(struct port_info *, struct sge_txq *, struct txpkts *, 229 struct mbuf *, struct sgl *); 230 static void write_txpkts_wr(struct sge_txq *, struct txpkts *); 231 static inline void write_ulp_cpl_sgl(struct port_info *, struct sge_txq *, 232 struct txpkts *, struct mbuf *, struct sgl *); 233 static int write_sgl_to_txd(struct sge_eq *, struct sgl *, caddr_t *); 234 static inline void copy_to_txd(struct sge_eq *, caddr_t, caddr_t *, int); 235 static inline void ring_eq_db(struct adapter *, struct sge_eq *); 236 static inline int reclaimable(struct sge_eq *); 237 static int reclaim_tx_descs(struct sge_txq *, int, int); 238 static void write_eqflush_wr(struct sge_eq *); 239 static __be64 get_flit(bus_dma_segment_t *, int, int); 240 static int handle_sge_egr_update(struct sge_iq *, const struct rss_header *, 241 struct mbuf *); 242 static int handle_fw_msg(struct sge_iq *, const struct rss_header *, 243 struct mbuf *); 244 245 static int sysctl_uint16(SYSCTL_HANDLER_ARGS); 246 static int sysctl_bufsizes(SYSCTL_HANDLER_ARGS); 247 248 static counter_u64_t extfree_refs; 249 static counter_u64_t extfree_rels; 250 251 /* 252 * Called on MOD_LOAD. Validates and calculates the SGE tunables. 253 */ 254 void 255 t4_sge_modload(void) 256 { 257 258 if (fl_pktshift < 0 || fl_pktshift > 7) { 259 printf("Invalid hw.cxgbe.fl_pktshift value (%d)," 260 " using 2 instead.\n", fl_pktshift); 261 fl_pktshift = 2; 262 } 263 264 if (spg_len != 64 && spg_len != 128) { 265 int len; 266 267 #if defined(__i386__) || defined(__amd64__) 268 len = cpu_clflush_line_size > 64 ? 128 : 64; 269 #else 270 len = 64; 271 #endif 272 if (spg_len != -1) { 273 printf("Invalid hw.cxgbe.spg_len value (%d)," 274 " using %d instead.\n", spg_len, len); 275 } 276 spg_len = len; 277 } 278 279 if (cong_drop < -1 || cong_drop > 1) { 280 printf("Invalid hw.cxgbe.cong_drop value (%d)," 281 " using 0 instead.\n", cong_drop); 282 cong_drop = 0; 283 } 284 285 extfree_refs = counter_u64_alloc(M_WAITOK); 286 extfree_rels = counter_u64_alloc(M_WAITOK); 287 counter_u64_zero(extfree_refs); 288 counter_u64_zero(extfree_rels); 289 } 290 291 void 292 t4_sge_modunload(void) 293 { 294 295 counter_u64_free(extfree_refs); 296 counter_u64_free(extfree_rels); 297 } 298 299 uint64_t 300 t4_sge_extfree_refs(void) 301 { 302 uint64_t refs, rels; 303 304 rels = counter_u64_fetch(extfree_rels); 305 refs = counter_u64_fetch(extfree_refs); 306 307 return (refs - rels); 308 } 309 310 void 311 t4_init_sge_cpl_handlers(struct adapter *sc) 312 { 313 314 t4_register_cpl_handler(sc, CPL_FW4_MSG, handle_fw_msg); 315 t4_register_cpl_handler(sc, CPL_FW6_MSG, handle_fw_msg); 316 t4_register_cpl_handler(sc, CPL_SGE_EGR_UPDATE, handle_sge_egr_update); 317 t4_register_cpl_handler(sc, CPL_RX_PKT, t4_eth_rx); 318 t4_register_fw_msg_handler(sc, FW6_TYPE_CMD_RPL, t4_handle_fw_rpl); 319 } 320 321 static inline void 322 setup_pad_and_pack_boundaries(struct adapter *sc) 323 { 324 uint32_t v, m; 325 int pad, pack; 326 327 pad = fl_pad; 328 if (fl_pad < 32 || fl_pad > 4096 || !powerof2(fl_pad)) { 329 /* 330 * If there is any chance that we might use buffer packing and 331 * the chip is a T4, then pick 64 as the pad/pack boundary. Set 332 * it to 32 in all other cases. 333 */ 334 pad = is_t4(sc) && buffer_packing ? 64 : 32; 335 336 /* 337 * For fl_pad = 0 we'll still write a reasonable value to the 338 * register but all the freelists will opt out of padding. 339 * We'll complain here only if the user tried to set it to a 340 * value greater than 0 that was invalid. 341 */ 342 if (fl_pad > 0) { 343 device_printf(sc->dev, "Invalid hw.cxgbe.fl_pad value" 344 " (%d), using %d instead.\n", fl_pad, pad); 345 } 346 } 347 m = V_INGPADBOUNDARY(M_INGPADBOUNDARY); 348 v = V_INGPADBOUNDARY(ilog2(pad) - 5); 349 t4_set_reg_field(sc, A_SGE_CONTROL, m, v); 350 351 if (is_t4(sc)) { 352 if (fl_pack != -1 && fl_pack != pad) { 353 /* Complain but carry on. */ 354 device_printf(sc->dev, "hw.cxgbe.fl_pack (%d) ignored," 355 " using %d instead.\n", fl_pack, pad); 356 } 357 return; 358 } 359 360 pack = fl_pack; 361 if (fl_pack < 16 || fl_pack == 32 || fl_pack > 4096 || 362 !powerof2(fl_pack)) { 363 pack = max(sc->params.pci.mps, CACHE_LINE_SIZE); 364 MPASS(powerof2(pack)); 365 if (pack < 16) 366 pack = 16; 367 if (pack == 32) 368 pack = 64; 369 if (pack > 4096) 370 pack = 4096; 371 if (fl_pack != -1) { 372 device_printf(sc->dev, "Invalid hw.cxgbe.fl_pack value" 373 " (%d), using %d instead.\n", fl_pack, pack); 374 } 375 } 376 m = V_INGPACKBOUNDARY(M_INGPACKBOUNDARY); 377 if (pack == 16) 378 v = V_INGPACKBOUNDARY(0); 379 else 380 v = V_INGPACKBOUNDARY(ilog2(pack) - 5); 381 382 MPASS(!is_t4(sc)); /* T4 doesn't have SGE_CONTROL2 */ 383 t4_set_reg_field(sc, A_SGE_CONTROL2, m, v); 384 } 385 386 /* 387 * adap->params.vpd.cclk must be set up before this is called. 388 */ 389 void 390 t4_tweak_chip_settings(struct adapter *sc) 391 { 392 int i; 393 uint32_t v, m; 394 int intr_timer[SGE_NTIMERS] = {1, 5, 10, 50, 100, 200}; 395 int timer_max = M_TIMERVALUE0 * 1000 / sc->params.vpd.cclk; 396 int intr_pktcount[SGE_NCOUNTERS] = {1, 8, 16, 32}; /* 63 max */ 397 uint16_t indsz = min(RX_COPY_THRESHOLD - 1, M_INDICATESIZE); 398 static int sge_flbuf_sizes[] = { 399 MCLBYTES, 400 #if MJUMPAGESIZE != MCLBYTES 401 MJUMPAGESIZE, 402 MJUMPAGESIZE - CL_METADATA_SIZE, 403 MJUMPAGESIZE - 2 * MSIZE - CL_METADATA_SIZE, 404 #endif 405 MJUM9BYTES, 406 MJUM16BYTES, 407 MCLBYTES - MSIZE - CL_METADATA_SIZE, 408 MJUM9BYTES - CL_METADATA_SIZE, 409 MJUM16BYTES - CL_METADATA_SIZE, 410 }; 411 412 KASSERT(sc->flags & MASTER_PF, 413 ("%s: trying to change chip settings when not master.", __func__)); 414 415 m = V_PKTSHIFT(M_PKTSHIFT) | F_RXPKTCPLMODE | F_EGRSTATUSPAGESIZE; 416 v = V_PKTSHIFT(fl_pktshift) | F_RXPKTCPLMODE | 417 V_EGRSTATUSPAGESIZE(spg_len == 128); 418 t4_set_reg_field(sc, A_SGE_CONTROL, m, v); 419 420 setup_pad_and_pack_boundaries(sc); 421 422 v = V_HOSTPAGESIZEPF0(PAGE_SHIFT - 10) | 423 V_HOSTPAGESIZEPF1(PAGE_SHIFT - 10) | 424 V_HOSTPAGESIZEPF2(PAGE_SHIFT - 10) | 425 V_HOSTPAGESIZEPF3(PAGE_SHIFT - 10) | 426 V_HOSTPAGESIZEPF4(PAGE_SHIFT - 10) | 427 V_HOSTPAGESIZEPF5(PAGE_SHIFT - 10) | 428 V_HOSTPAGESIZEPF6(PAGE_SHIFT - 10) | 429 V_HOSTPAGESIZEPF7(PAGE_SHIFT - 10); 430 t4_write_reg(sc, A_SGE_HOST_PAGE_SIZE, v); 431 432 KASSERT(nitems(sge_flbuf_sizes) <= SGE_FLBUF_SIZES, 433 ("%s: hw buffer size table too big", __func__)); 434 for (i = 0; i < min(nitems(sge_flbuf_sizes), SGE_FLBUF_SIZES); i++) { 435 t4_write_reg(sc, A_SGE_FL_BUFFER_SIZE0 + (4 * i), 436 sge_flbuf_sizes[i]); 437 } 438 439 v = V_THRESHOLD_0(intr_pktcount[0]) | V_THRESHOLD_1(intr_pktcount[1]) | 440 V_THRESHOLD_2(intr_pktcount[2]) | V_THRESHOLD_3(intr_pktcount[3]); 441 t4_write_reg(sc, A_SGE_INGRESS_RX_THRESHOLD, v); 442 443 KASSERT(intr_timer[0] <= timer_max, 444 ("%s: not a single usable timer (%d, %d)", __func__, intr_timer[0], 445 timer_max)); 446 for (i = 1; i < nitems(intr_timer); i++) { 447 KASSERT(intr_timer[i] >= intr_timer[i - 1], 448 ("%s: timers not listed in increasing order (%d)", 449 __func__, i)); 450 451 while (intr_timer[i] > timer_max) { 452 if (i == nitems(intr_timer) - 1) { 453 intr_timer[i] = timer_max; 454 break; 455 } 456 intr_timer[i] += intr_timer[i - 1]; 457 intr_timer[i] /= 2; 458 } 459 } 460 461 v = V_TIMERVALUE0(us_to_core_ticks(sc, intr_timer[0])) | 462 V_TIMERVALUE1(us_to_core_ticks(sc, intr_timer[1])); 463 t4_write_reg(sc, A_SGE_TIMER_VALUE_0_AND_1, v); 464 v = V_TIMERVALUE2(us_to_core_ticks(sc, intr_timer[2])) | 465 V_TIMERVALUE3(us_to_core_ticks(sc, intr_timer[3])); 466 t4_write_reg(sc, A_SGE_TIMER_VALUE_2_AND_3, v); 467 v = V_TIMERVALUE4(us_to_core_ticks(sc, intr_timer[4])) | 468 V_TIMERVALUE5(us_to_core_ticks(sc, intr_timer[5])); 469 t4_write_reg(sc, A_SGE_TIMER_VALUE_4_AND_5, v); 470 471 if (cong_drop == 0) { 472 m = F_TUNNELCNGDROP0 | F_TUNNELCNGDROP1 | F_TUNNELCNGDROP2 | 473 F_TUNNELCNGDROP3; 474 t4_set_reg_field(sc, A_TP_PARA_REG3, m, 0); 475 } 476 477 /* 4K, 16K, 64K, 256K DDP "page sizes" */ 478 v = V_HPZ0(0) | V_HPZ1(2) | V_HPZ2(4) | V_HPZ3(6); 479 t4_write_reg(sc, A_ULP_RX_TDDP_PSZ, v); 480 481 m = v = F_TDDPTAGTCB; 482 t4_set_reg_field(sc, A_ULP_RX_CTL, m, v); 483 484 m = V_INDICATESIZE(M_INDICATESIZE) | F_REARMDDPOFFSET | 485 F_RESETDDPOFFSET; 486 v = V_INDICATESIZE(indsz) | F_REARMDDPOFFSET | F_RESETDDPOFFSET; 487 t4_set_reg_field(sc, A_TP_PARA_REG5, m, v); 488 } 489 490 /* 491 * SGE wants the buffer to be at least 64B and then a multiple of 16. If 492 * padding is is use the buffer's start and end need to be aligned to the pad 493 * boundary as well. We'll just make sure that the size is a multiple of the 494 * boundary here, it is up to the buffer allocation code to make sure the start 495 * of the buffer is aligned as well. 496 */ 497 static inline int 498 hwsz_ok(struct adapter *sc, int hwsz) 499 { 500 int mask = fl_pad ? sc->sge.pad_boundary - 1 : 16 - 1; 501 502 return (hwsz >= 64 && (hwsz & mask) == 0); 503 } 504 505 /* 506 * XXX: driver really should be able to deal with unexpected settings. 507 */ 508 int 509 t4_read_chip_settings(struct adapter *sc) 510 { 511 struct sge *s = &sc->sge; 512 int i, j, n, rc = 0; 513 uint32_t m, v, r; 514 uint16_t indsz = min(RX_COPY_THRESHOLD - 1, M_INDICATESIZE); 515 static int sw_buf_sizes[] = { /* Sorted by size */ 516 MCLBYTES, 517 #if MJUMPAGESIZE != MCLBYTES 518 MJUMPAGESIZE, 519 #endif 520 MJUM9BYTES, 521 MJUM16BYTES 522 }; 523 struct sw_zone_info *swz, *safe_swz; 524 struct hw_buf_info *hwb; 525 526 m = V_PKTSHIFT(M_PKTSHIFT) | F_RXPKTCPLMODE | F_EGRSTATUSPAGESIZE; 527 v = V_PKTSHIFT(fl_pktshift) | F_RXPKTCPLMODE | 528 V_EGRSTATUSPAGESIZE(spg_len == 128); 529 r = t4_read_reg(sc, A_SGE_CONTROL); 530 if ((r & m) != v) { 531 device_printf(sc->dev, "invalid SGE_CONTROL(0x%x)\n", r); 532 rc = EINVAL; 533 } 534 s->pad_boundary = 1 << (G_INGPADBOUNDARY(r) + 5); 535 536 if (is_t4(sc)) 537 s->pack_boundary = s->pad_boundary; 538 else { 539 r = t4_read_reg(sc, A_SGE_CONTROL2); 540 if (G_INGPACKBOUNDARY(r) == 0) 541 s->pack_boundary = 16; 542 else 543 s->pack_boundary = 1 << (G_INGPACKBOUNDARY(r) + 5); 544 } 545 546 v = V_HOSTPAGESIZEPF0(PAGE_SHIFT - 10) | 547 V_HOSTPAGESIZEPF1(PAGE_SHIFT - 10) | 548 V_HOSTPAGESIZEPF2(PAGE_SHIFT - 10) | 549 V_HOSTPAGESIZEPF3(PAGE_SHIFT - 10) | 550 V_HOSTPAGESIZEPF4(PAGE_SHIFT - 10) | 551 V_HOSTPAGESIZEPF5(PAGE_SHIFT - 10) | 552 V_HOSTPAGESIZEPF6(PAGE_SHIFT - 10) | 553 V_HOSTPAGESIZEPF7(PAGE_SHIFT - 10); 554 r = t4_read_reg(sc, A_SGE_HOST_PAGE_SIZE); 555 if (r != v) { 556 device_printf(sc->dev, "invalid SGE_HOST_PAGE_SIZE(0x%x)\n", r); 557 rc = EINVAL; 558 } 559 560 /* Filter out unusable hw buffer sizes entirely (mark with -2). */ 561 hwb = &s->hw_buf_info[0]; 562 for (i = 0; i < nitems(s->hw_buf_info); i++, hwb++) { 563 r = t4_read_reg(sc, A_SGE_FL_BUFFER_SIZE0 + (4 * i)); 564 hwb->size = r; 565 hwb->zidx = hwsz_ok(sc, r) ? -1 : -2; 566 hwb->next = -1; 567 } 568 569 /* 570 * Create a sorted list in decreasing order of hw buffer sizes (and so 571 * increasing order of spare area) for each software zone. 572 * 573 * If padding is enabled then the start and end of the buffer must align 574 * to the pad boundary; if packing is enabled then they must align with 575 * the pack boundary as well. Allocations from the cluster zones are 576 * aligned to min(size, 4K), so the buffer starts at that alignment and 577 * ends at hwb->size alignment. If mbuf inlining is allowed the 578 * starting alignment will be reduced to MSIZE and the driver will 579 * exercise appropriate caution when deciding on the best buffer layout 580 * to use. 581 */ 582 n = 0; /* no usable buffer size to begin with */ 583 swz = &s->sw_zone_info[0]; 584 safe_swz = NULL; 585 for (i = 0; i < SW_ZONE_SIZES; i++, swz++) { 586 int8_t head = -1, tail = -1; 587 588 swz->size = sw_buf_sizes[i]; 589 swz->zone = m_getzone(swz->size); 590 swz->type = m_gettype(swz->size); 591 592 if (swz->size < PAGE_SIZE) { 593 MPASS(powerof2(swz->size)); 594 if (fl_pad && (swz->size % sc->sge.pad_boundary != 0)) 595 continue; 596 } 597 598 if (swz->size == safest_rx_cluster) 599 safe_swz = swz; 600 601 hwb = &s->hw_buf_info[0]; 602 for (j = 0; j < SGE_FLBUF_SIZES; j++, hwb++) { 603 if (hwb->zidx != -1 || hwb->size > swz->size) 604 continue; 605 #ifdef INVARIANTS 606 if (fl_pad) 607 MPASS(hwb->size % sc->sge.pad_boundary == 0); 608 #endif 609 hwb->zidx = i; 610 if (head == -1) 611 head = tail = j; 612 else if (hwb->size < s->hw_buf_info[tail].size) { 613 s->hw_buf_info[tail].next = j; 614 tail = j; 615 } else { 616 int8_t *cur; 617 struct hw_buf_info *t; 618 619 for (cur = &head; *cur != -1; cur = &t->next) { 620 t = &s->hw_buf_info[*cur]; 621 if (hwb->size == t->size) { 622 hwb->zidx = -2; 623 break; 624 } 625 if (hwb->size > t->size) { 626 hwb->next = *cur; 627 *cur = j; 628 break; 629 } 630 } 631 } 632 } 633 swz->head_hwidx = head; 634 swz->tail_hwidx = tail; 635 636 if (tail != -1) { 637 n++; 638 if (swz->size - s->hw_buf_info[tail].size >= 639 CL_METADATA_SIZE) 640 sc->flags |= BUF_PACKING_OK; 641 } 642 } 643 if (n == 0) { 644 device_printf(sc->dev, "no usable SGE FL buffer size.\n"); 645 rc = EINVAL; 646 } 647 648 s->safe_hwidx1 = -1; 649 s->safe_hwidx2 = -1; 650 if (safe_swz != NULL) { 651 s->safe_hwidx1 = safe_swz->head_hwidx; 652 for (i = safe_swz->head_hwidx; i != -1; i = hwb->next) { 653 int spare; 654 655 hwb = &s->hw_buf_info[i]; 656 #ifdef INVARIANTS 657 if (fl_pad) 658 MPASS(hwb->size % sc->sge.pad_boundary == 0); 659 #endif 660 spare = safe_swz->size - hwb->size; 661 if (spare >= CL_METADATA_SIZE) { 662 s->safe_hwidx2 = i; 663 break; 664 } 665 } 666 } 667 668 r = t4_read_reg(sc, A_SGE_INGRESS_RX_THRESHOLD); 669 s->counter_val[0] = G_THRESHOLD_0(r); 670 s->counter_val[1] = G_THRESHOLD_1(r); 671 s->counter_val[2] = G_THRESHOLD_2(r); 672 s->counter_val[3] = G_THRESHOLD_3(r); 673 674 r = t4_read_reg(sc, A_SGE_TIMER_VALUE_0_AND_1); 675 s->timer_val[0] = G_TIMERVALUE0(r) / core_ticks_per_usec(sc); 676 s->timer_val[1] = G_TIMERVALUE1(r) / core_ticks_per_usec(sc); 677 r = t4_read_reg(sc, A_SGE_TIMER_VALUE_2_AND_3); 678 s->timer_val[2] = G_TIMERVALUE2(r) / core_ticks_per_usec(sc); 679 s->timer_val[3] = G_TIMERVALUE3(r) / core_ticks_per_usec(sc); 680 r = t4_read_reg(sc, A_SGE_TIMER_VALUE_4_AND_5); 681 s->timer_val[4] = G_TIMERVALUE4(r) / core_ticks_per_usec(sc); 682 s->timer_val[5] = G_TIMERVALUE5(r) / core_ticks_per_usec(sc); 683 684 if (cong_drop == 0) { 685 m = F_TUNNELCNGDROP0 | F_TUNNELCNGDROP1 | F_TUNNELCNGDROP2 | 686 F_TUNNELCNGDROP3; 687 r = t4_read_reg(sc, A_TP_PARA_REG3); 688 if (r & m) { 689 device_printf(sc->dev, 690 "invalid TP_PARA_REG3(0x%x)\n", r); 691 rc = EINVAL; 692 } 693 } 694 695 v = V_HPZ0(0) | V_HPZ1(2) | V_HPZ2(4) | V_HPZ3(6); 696 r = t4_read_reg(sc, A_ULP_RX_TDDP_PSZ); 697 if (r != v) { 698 device_printf(sc->dev, "invalid ULP_RX_TDDP_PSZ(0x%x)\n", r); 699 rc = EINVAL; 700 } 701 702 m = v = F_TDDPTAGTCB; 703 r = t4_read_reg(sc, A_ULP_RX_CTL); 704 if ((r & m) != v) { 705 device_printf(sc->dev, "invalid ULP_RX_CTL(0x%x)\n", r); 706 rc = EINVAL; 707 } 708 709 m = V_INDICATESIZE(M_INDICATESIZE) | F_REARMDDPOFFSET | 710 F_RESETDDPOFFSET; 711 v = V_INDICATESIZE(indsz) | F_REARMDDPOFFSET | F_RESETDDPOFFSET; 712 r = t4_read_reg(sc, A_TP_PARA_REG5); 713 if ((r & m) != v) { 714 device_printf(sc->dev, "invalid TP_PARA_REG5(0x%x)\n", r); 715 rc = EINVAL; 716 } 717 718 r = t4_read_reg(sc, A_SGE_CONM_CTRL); 719 s->fl_starve_threshold = G_EGRTHRESHOLD(r) * 2 + 1; 720 if (is_t4(sc)) 721 s->fl_starve_threshold2 = s->fl_starve_threshold; 722 else 723 s->fl_starve_threshold2 = G_EGRTHRESHOLDPACKING(r) * 2 + 1; 724 725 /* egress queues: log2 of # of doorbells per BAR2 page */ 726 r = t4_read_reg(sc, A_SGE_EGRESS_QUEUES_PER_PAGE_PF); 727 r >>= S_QUEUESPERPAGEPF0 + 728 (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * sc->pf; 729 s->eq_s_qpp = r & M_QUEUESPERPAGEPF0; 730 731 /* ingress queues: log2 of # of doorbells per BAR2 page */ 732 r = t4_read_reg(sc, A_SGE_INGRESS_QUEUES_PER_PAGE_PF); 733 r >>= S_QUEUESPERPAGEPF0 + 734 (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * sc->pf; 735 s->iq_s_qpp = r & M_QUEUESPERPAGEPF0; 736 737 t4_init_tp_params(sc); 738 739 t4_read_mtu_tbl(sc, sc->params.mtus, NULL); 740 t4_load_mtus(sc, sc->params.mtus, sc->params.a_wnd, sc->params.b_wnd); 741 742 return (rc); 743 } 744 745 int 746 t4_create_dma_tag(struct adapter *sc) 747 { 748 int rc; 749 750 rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0, 751 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE, 752 BUS_SPACE_UNRESTRICTED, BUS_SPACE_MAXSIZE, BUS_DMA_ALLOCNOW, NULL, 753 NULL, &sc->dmat); 754 if (rc != 0) { 755 device_printf(sc->dev, 756 "failed to create main DMA tag: %d\n", rc); 757 } 758 759 return (rc); 760 } 761 762 void 763 t4_sge_sysctls(struct adapter *sc, struct sysctl_ctx_list *ctx, 764 struct sysctl_oid_list *children) 765 { 766 767 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "buffer_sizes", 768 CTLTYPE_STRING | CTLFLAG_RD, &sc->sge, 0, sysctl_bufsizes, "A", 769 "freelist buffer sizes"); 770 771 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "fl_pktshift", CTLFLAG_RD, 772 NULL, fl_pktshift, "payload DMA offset in rx buffer (bytes)"); 773 774 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "fl_pad", CTLFLAG_RD, 775 NULL, sc->sge.pad_boundary, "payload pad boundary (bytes)"); 776 777 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "spg_len", CTLFLAG_RD, 778 NULL, spg_len, "status page size (bytes)"); 779 780 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "cong_drop", CTLFLAG_RD, 781 NULL, cong_drop, "congestion drop setting"); 782 783 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "fl_pack", CTLFLAG_RD, 784 NULL, sc->sge.pack_boundary, "payload pack boundary (bytes)"); 785 } 786 787 int 788 t4_destroy_dma_tag(struct adapter *sc) 789 { 790 if (sc->dmat) 791 bus_dma_tag_destroy(sc->dmat); 792 793 return (0); 794 } 795 796 /* 797 * Allocate and initialize the firmware event queue and the management queue. 798 * 799 * Returns errno on failure. Resources allocated up to that point may still be 800 * allocated. Caller is responsible for cleanup in case this function fails. 801 */ 802 int 803 t4_setup_adapter_queues(struct adapter *sc) 804 { 805 int rc; 806 807 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 808 809 sysctl_ctx_init(&sc->ctx); 810 sc->flags |= ADAP_SYSCTL_CTX; 811 812 /* 813 * Firmware event queue 814 */ 815 rc = alloc_fwq(sc); 816 if (rc != 0) 817 return (rc); 818 819 /* 820 * Management queue. This is just a control queue that uses the fwq as 821 * its associated iq. 822 */ 823 rc = alloc_mgmtq(sc); 824 825 return (rc); 826 } 827 828 /* 829 * Idempotent 830 */ 831 int 832 t4_teardown_adapter_queues(struct adapter *sc) 833 { 834 835 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 836 837 /* Do this before freeing the queue */ 838 if (sc->flags & ADAP_SYSCTL_CTX) { 839 sysctl_ctx_free(&sc->ctx); 840 sc->flags &= ~ADAP_SYSCTL_CTX; 841 } 842 843 free_mgmtq(sc); 844 free_fwq(sc); 845 846 return (0); 847 } 848 849 static inline int 850 port_intr_count(struct port_info *pi) 851 { 852 int rc = 0; 853 854 if (pi->flags & INTR_RXQ) 855 rc += pi->nrxq; 856 #ifdef TCP_OFFLOAD 857 if (pi->flags & INTR_OFLD_RXQ) 858 rc += pi->nofldrxq; 859 #endif 860 #ifdef DEV_NETMAP 861 if (pi->flags & INTR_NM_RXQ) 862 rc += pi->nnmrxq; 863 #endif 864 return (rc); 865 } 866 867 static inline int 868 first_vector(struct port_info *pi) 869 { 870 struct adapter *sc = pi->adapter; 871 int rc = T4_EXTRA_INTR, i; 872 873 if (sc->intr_count == 1) 874 return (0); 875 876 for_each_port(sc, i) { 877 if (i == pi->port_id) 878 break; 879 880 rc += port_intr_count(sc->port[i]); 881 } 882 883 return (rc); 884 } 885 886 /* 887 * Given an arbitrary "index," come up with an iq that can be used by other 888 * queues (of this port) for interrupt forwarding, SGE egress updates, etc. 889 * The iq returned is guaranteed to be something that takes direct interrupts. 890 */ 891 static struct sge_iq * 892 port_intr_iq(struct port_info *pi, int idx) 893 { 894 struct adapter *sc = pi->adapter; 895 struct sge *s = &sc->sge; 896 struct sge_iq *iq = NULL; 897 int nintr, i; 898 899 if (sc->intr_count == 1) 900 return (&sc->sge.fwq); 901 902 nintr = port_intr_count(pi); 903 KASSERT(nintr != 0, 904 ("%s: pi %p has no exclusive interrupts, total interrupts = %d", 905 __func__, pi, sc->intr_count)); 906 #ifdef DEV_NETMAP 907 /* Exclude netmap queues as they can't take anyone else's interrupts */ 908 if (pi->flags & INTR_NM_RXQ) 909 nintr -= pi->nnmrxq; 910 KASSERT(nintr > 0, 911 ("%s: pi %p has nintr %d after netmap adjustment of %d", __func__, 912 pi, nintr, pi->nnmrxq)); 913 #endif 914 i = idx % nintr; 915 916 if (pi->flags & INTR_RXQ) { 917 if (i < pi->nrxq) { 918 iq = &s->rxq[pi->first_rxq + i].iq; 919 goto done; 920 } 921 i -= pi->nrxq; 922 } 923 #ifdef TCP_OFFLOAD 924 if (pi->flags & INTR_OFLD_RXQ) { 925 if (i < pi->nofldrxq) { 926 iq = &s->ofld_rxq[pi->first_ofld_rxq + i].iq; 927 goto done; 928 } 929 i -= pi->nofldrxq; 930 } 931 #endif 932 panic("%s: pi %p, intr_flags 0x%lx, idx %d, total intr %d\n", __func__, 933 pi, pi->flags & INTR_ALL, idx, nintr); 934 done: 935 MPASS(iq != NULL); 936 KASSERT(iq->flags & IQ_INTR, 937 ("%s: iq %p (port %p, intr_flags 0x%lx, idx %d)", __func__, iq, pi, 938 pi->flags & INTR_ALL, idx)); 939 return (iq); 940 } 941 942 /* Maximum payload that can be delivered with a single iq descriptor */ 943 static inline int 944 mtu_to_max_payload(struct adapter *sc, int mtu, const int toe) 945 { 946 int payload; 947 948 #ifdef TCP_OFFLOAD 949 if (toe) { 950 payload = sc->tt.rx_coalesce ? 951 G_RXCOALESCESIZE(t4_read_reg(sc, A_TP_PARA_REG2)) : mtu; 952 } else { 953 #endif 954 /* large enough even when hw VLAN extraction is disabled */ 955 payload = fl_pktshift + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + 956 mtu; 957 #ifdef TCP_OFFLOAD 958 } 959 #endif 960 961 return (payload); 962 } 963 964 int 965 t4_setup_port_queues(struct port_info *pi) 966 { 967 int rc = 0, i, j, intr_idx, iqid; 968 struct sge_rxq *rxq; 969 struct sge_txq *txq; 970 struct sge_wrq *ctrlq; 971 #ifdef TCP_OFFLOAD 972 struct sge_ofld_rxq *ofld_rxq; 973 struct sge_wrq *ofld_txq; 974 #endif 975 #ifdef DEV_NETMAP 976 struct sge_nm_rxq *nm_rxq; 977 struct sge_nm_txq *nm_txq; 978 #endif 979 char name[16]; 980 struct adapter *sc = pi->adapter; 981 struct ifnet *ifp = pi->ifp; 982 struct sysctl_oid *oid = device_get_sysctl_tree(pi->dev); 983 struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); 984 int maxp, mtu = ifp->if_mtu; 985 986 /* Interrupt vector to start from (when using multiple vectors) */ 987 intr_idx = first_vector(pi); 988 989 /* 990 * First pass over all NIC and TOE rx queues: 991 * a) initialize iq and fl 992 * b) allocate queue iff it will take direct interrupts. 993 */ 994 maxp = mtu_to_max_payload(sc, mtu, 0); 995 if (pi->flags & INTR_RXQ) { 996 oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "rxq", 997 CTLFLAG_RD, NULL, "rx queues"); 998 } 999 for_each_rxq(pi, i, rxq) { 1000 1001 init_iq(&rxq->iq, sc, pi->tmr_idx, pi->pktc_idx, pi->qsize_rxq); 1002 1003 snprintf(name, sizeof(name), "%s rxq%d-fl", 1004 device_get_nameunit(pi->dev), i); 1005 init_fl(sc, &rxq->fl, pi->qsize_rxq / 8, maxp, name); 1006 1007 if (pi->flags & INTR_RXQ) { 1008 rxq->iq.flags |= IQ_INTR; 1009 rc = alloc_rxq(pi, rxq, intr_idx, i, oid); 1010 if (rc != 0) 1011 goto done; 1012 intr_idx++; 1013 } 1014 } 1015 #ifdef TCP_OFFLOAD 1016 maxp = mtu_to_max_payload(sc, mtu, 1); 1017 if (is_offload(sc) && pi->flags & INTR_OFLD_RXQ) { 1018 oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "ofld_rxq", 1019 CTLFLAG_RD, NULL, 1020 "rx queues for offloaded TCP connections"); 1021 } 1022 for_each_ofld_rxq(pi, i, ofld_rxq) { 1023 1024 init_iq(&ofld_rxq->iq, sc, pi->tmr_idx, pi->pktc_idx, 1025 pi->qsize_rxq); 1026 1027 snprintf(name, sizeof(name), "%s ofld_rxq%d-fl", 1028 device_get_nameunit(pi->dev), i); 1029 init_fl(sc, &ofld_rxq->fl, pi->qsize_rxq / 8, maxp, name); 1030 1031 if (pi->flags & INTR_OFLD_RXQ) { 1032 ofld_rxq->iq.flags |= IQ_INTR; 1033 rc = alloc_ofld_rxq(pi, ofld_rxq, intr_idx, i, oid); 1034 if (rc != 0) 1035 goto done; 1036 intr_idx++; 1037 } 1038 } 1039 #endif 1040 #ifdef DEV_NETMAP 1041 /* 1042 * We don't have buffers to back the netmap rx queues right now so we 1043 * create the queues in a way that doesn't set off any congestion signal 1044 * in the chip. 1045 */ 1046 if (pi->flags & INTR_NM_RXQ) { 1047 oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "nm_rxq", 1048 CTLFLAG_RD, NULL, "rx queues for netmap"); 1049 for_each_nm_rxq(pi, i, nm_rxq) { 1050 rc = alloc_nm_rxq(pi, nm_rxq, intr_idx, i, oid); 1051 if (rc != 0) 1052 goto done; 1053 intr_idx++; 1054 } 1055 } 1056 #endif 1057 1058 /* 1059 * Second pass over all NIC and TOE rx queues. The queues forwarding 1060 * their interrupts are allocated now. 1061 */ 1062 j = 0; 1063 if (!(pi->flags & INTR_RXQ)) { 1064 oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "rxq", 1065 CTLFLAG_RD, NULL, "rx queues"); 1066 for_each_rxq(pi, i, rxq) { 1067 MPASS(!(rxq->iq.flags & IQ_INTR)); 1068 1069 intr_idx = port_intr_iq(pi, j)->abs_id; 1070 1071 rc = alloc_rxq(pi, rxq, intr_idx, i, oid); 1072 if (rc != 0) 1073 goto done; 1074 j++; 1075 } 1076 } 1077 #ifdef TCP_OFFLOAD 1078 if (is_offload(sc) && !(pi->flags & INTR_OFLD_RXQ)) { 1079 oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "ofld_rxq", 1080 CTLFLAG_RD, NULL, 1081 "rx queues for offloaded TCP connections"); 1082 for_each_ofld_rxq(pi, i, ofld_rxq) { 1083 MPASS(!(ofld_rxq->iq.flags & IQ_INTR)); 1084 1085 intr_idx = port_intr_iq(pi, j)->abs_id; 1086 1087 rc = alloc_ofld_rxq(pi, ofld_rxq, intr_idx, i, oid); 1088 if (rc != 0) 1089 goto done; 1090 j++; 1091 } 1092 } 1093 #endif 1094 #ifdef DEV_NETMAP 1095 if (!(pi->flags & INTR_NM_RXQ)) 1096 CXGBE_UNIMPLEMENTED(__func__); 1097 #endif 1098 1099 /* 1100 * Now the tx queues. Only one pass needed. 1101 */ 1102 oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "txq", CTLFLAG_RD, 1103 NULL, "tx queues"); 1104 j = 0; 1105 for_each_txq(pi, i, txq) { 1106 iqid = port_intr_iq(pi, j)->cntxt_id; 1107 snprintf(name, sizeof(name), "%s txq%d", 1108 device_get_nameunit(pi->dev), i); 1109 init_eq(&txq->eq, EQ_ETH, pi->qsize_txq, pi->tx_chan, iqid, 1110 name); 1111 1112 rc = alloc_txq(pi, txq, i, oid); 1113 if (rc != 0) 1114 goto done; 1115 j++; 1116 } 1117 #ifdef TCP_OFFLOAD 1118 oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "ofld_txq", 1119 CTLFLAG_RD, NULL, "tx queues for offloaded TCP connections"); 1120 for_each_ofld_txq(pi, i, ofld_txq) { 1121 struct sysctl_oid *oid2; 1122 1123 iqid = port_intr_iq(pi, j)->cntxt_id; 1124 snprintf(name, sizeof(name), "%s ofld_txq%d", 1125 device_get_nameunit(pi->dev), i); 1126 init_eq(&ofld_txq->eq, EQ_OFLD, pi->qsize_txq, pi->tx_chan, 1127 iqid, name); 1128 1129 snprintf(name, sizeof(name), "%d", i); 1130 oid2 = SYSCTL_ADD_NODE(&pi->ctx, SYSCTL_CHILDREN(oid), OID_AUTO, 1131 name, CTLFLAG_RD, NULL, "offload tx queue"); 1132 1133 rc = alloc_wrq(sc, pi, ofld_txq, oid2); 1134 if (rc != 0) 1135 goto done; 1136 j++; 1137 } 1138 #endif 1139 #ifdef DEV_NETMAP 1140 oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "nm_txq", 1141 CTLFLAG_RD, NULL, "tx queues for netmap use"); 1142 for_each_nm_txq(pi, i, nm_txq) { 1143 iqid = pi->first_nm_rxq + (j % pi->nnmrxq); 1144 rc = alloc_nm_txq(pi, nm_txq, iqid, i, oid); 1145 if (rc != 0) 1146 goto done; 1147 j++; 1148 } 1149 #endif 1150 1151 /* 1152 * Finally, the control queue. 1153 */ 1154 oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "ctrlq", CTLFLAG_RD, 1155 NULL, "ctrl queue"); 1156 ctrlq = &sc->sge.ctrlq[pi->port_id]; 1157 iqid = port_intr_iq(pi, 0)->cntxt_id; 1158 snprintf(name, sizeof(name), "%s ctrlq", device_get_nameunit(pi->dev)); 1159 init_eq(&ctrlq->eq, EQ_CTRL, CTRL_EQ_QSIZE, pi->tx_chan, iqid, name); 1160 rc = alloc_wrq(sc, pi, ctrlq, oid); 1161 1162 done: 1163 if (rc) 1164 t4_teardown_port_queues(pi); 1165 1166 return (rc); 1167 } 1168 1169 /* 1170 * Idempotent 1171 */ 1172 int 1173 t4_teardown_port_queues(struct port_info *pi) 1174 { 1175 int i; 1176 struct adapter *sc = pi->adapter; 1177 struct sge_rxq *rxq; 1178 struct sge_txq *txq; 1179 #ifdef TCP_OFFLOAD 1180 struct sge_ofld_rxq *ofld_rxq; 1181 struct sge_wrq *ofld_txq; 1182 #endif 1183 #ifdef DEV_NETMAP 1184 struct sge_nm_rxq *nm_rxq; 1185 struct sge_nm_txq *nm_txq; 1186 #endif 1187 1188 /* Do this before freeing the queues */ 1189 if (pi->flags & PORT_SYSCTL_CTX) { 1190 sysctl_ctx_free(&pi->ctx); 1191 pi->flags &= ~PORT_SYSCTL_CTX; 1192 } 1193 1194 /* 1195 * Take down all the tx queues first, as they reference the rx queues 1196 * (for egress updates, etc.). 1197 */ 1198 1199 free_wrq(sc, &sc->sge.ctrlq[pi->port_id]); 1200 1201 for_each_txq(pi, i, txq) { 1202 free_txq(pi, txq); 1203 } 1204 #ifdef TCP_OFFLOAD 1205 for_each_ofld_txq(pi, i, ofld_txq) { 1206 free_wrq(sc, ofld_txq); 1207 } 1208 #endif 1209 #ifdef DEV_NETMAP 1210 for_each_nm_txq(pi, i, nm_txq) 1211 free_nm_txq(pi, nm_txq); 1212 #endif 1213 1214 /* 1215 * Then take down the rx queues that forward their interrupts, as they 1216 * reference other rx queues. 1217 */ 1218 1219 for_each_rxq(pi, i, rxq) { 1220 if ((rxq->iq.flags & IQ_INTR) == 0) 1221 free_rxq(pi, rxq); 1222 } 1223 #ifdef TCP_OFFLOAD 1224 for_each_ofld_rxq(pi, i, ofld_rxq) { 1225 if ((ofld_rxq->iq.flags & IQ_INTR) == 0) 1226 free_ofld_rxq(pi, ofld_rxq); 1227 } 1228 #endif 1229 #ifdef DEV_NETMAP 1230 for_each_nm_rxq(pi, i, nm_rxq) 1231 free_nm_rxq(pi, nm_rxq); 1232 #endif 1233 1234 /* 1235 * Then take down the rx queues that take direct interrupts. 1236 */ 1237 1238 for_each_rxq(pi, i, rxq) { 1239 if (rxq->iq.flags & IQ_INTR) 1240 free_rxq(pi, rxq); 1241 } 1242 #ifdef TCP_OFFLOAD 1243 for_each_ofld_rxq(pi, i, ofld_rxq) { 1244 if (ofld_rxq->iq.flags & IQ_INTR) 1245 free_ofld_rxq(pi, ofld_rxq); 1246 } 1247 #endif 1248 #ifdef DEV_NETMAP 1249 CXGBE_UNIMPLEMENTED(__func__); 1250 #endif 1251 1252 return (0); 1253 } 1254 1255 /* 1256 * Deals with errors and the firmware event queue. All data rx queues forward 1257 * their interrupt to the firmware event queue. 1258 */ 1259 void 1260 t4_intr_all(void *arg) 1261 { 1262 struct adapter *sc = arg; 1263 struct sge_iq *fwq = &sc->sge.fwq; 1264 1265 t4_intr_err(arg); 1266 if (atomic_cmpset_int(&fwq->state, IQS_IDLE, IQS_BUSY)) { 1267 service_iq(fwq, 0); 1268 atomic_cmpset_int(&fwq->state, IQS_BUSY, IQS_IDLE); 1269 } 1270 } 1271 1272 /* Deals with error interrupts */ 1273 void 1274 t4_intr_err(void *arg) 1275 { 1276 struct adapter *sc = arg; 1277 1278 t4_write_reg(sc, MYPF_REG(A_PCIE_PF_CLI), 0); 1279 t4_slow_intr_handler(sc); 1280 } 1281 1282 void 1283 t4_intr_evt(void *arg) 1284 { 1285 struct sge_iq *iq = arg; 1286 1287 if (atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_BUSY)) { 1288 service_iq(iq, 0); 1289 atomic_cmpset_int(&iq->state, IQS_BUSY, IQS_IDLE); 1290 } 1291 } 1292 1293 void 1294 t4_intr(void *arg) 1295 { 1296 struct sge_iq *iq = arg; 1297 1298 if (atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_BUSY)) { 1299 service_iq(iq, 0); 1300 atomic_cmpset_int(&iq->state, IQS_BUSY, IQS_IDLE); 1301 } 1302 } 1303 1304 /* 1305 * Deals with anything and everything on the given ingress queue. 1306 */ 1307 static int 1308 service_iq(struct sge_iq *iq, int budget) 1309 { 1310 struct sge_iq *q; 1311 struct sge_rxq *rxq = iq_to_rxq(iq); /* Use iff iq is part of rxq */ 1312 struct sge_fl *fl; /* Use iff IQ_HAS_FL */ 1313 struct adapter *sc = iq->adapter; 1314 struct iq_desc *d = &iq->desc[iq->cidx]; 1315 int ndescs = 0, limit; 1316 int rsp_type, refill; 1317 uint32_t lq; 1318 uint16_t fl_hw_cidx; 1319 struct mbuf *m0; 1320 STAILQ_HEAD(, sge_iq) iql = STAILQ_HEAD_INITIALIZER(iql); 1321 #if defined(INET) || defined(INET6) 1322 const struct timeval lro_timeout = {0, sc->lro_timeout}; 1323 #endif 1324 1325 KASSERT(iq->state == IQS_BUSY, ("%s: iq %p not BUSY", __func__, iq)); 1326 1327 limit = budget ? budget : iq->qsize / 16; 1328 1329 if (iq->flags & IQ_HAS_FL) { 1330 fl = &rxq->fl; 1331 fl_hw_cidx = fl->hw_cidx; /* stable snapshot */ 1332 } else { 1333 fl = NULL; 1334 fl_hw_cidx = 0; /* to silence gcc warning */ 1335 } 1336 1337 /* 1338 * We always come back and check the descriptor ring for new indirect 1339 * interrupts and other responses after running a single handler. 1340 */ 1341 for (;;) { 1342 while ((d->rsp.u.type_gen & F_RSPD_GEN) == iq->gen) { 1343 1344 rmb(); 1345 1346 refill = 0; 1347 m0 = NULL; 1348 rsp_type = G_RSPD_TYPE(d->rsp.u.type_gen); 1349 lq = be32toh(d->rsp.pldbuflen_qid); 1350 1351 switch (rsp_type) { 1352 case X_RSPD_TYPE_FLBUF: 1353 1354 KASSERT(iq->flags & IQ_HAS_FL, 1355 ("%s: data for an iq (%p) with no freelist", 1356 __func__, iq)); 1357 1358 m0 = get_fl_payload(sc, fl, lq); 1359 if (__predict_false(m0 == NULL)) 1360 goto process_iql; 1361 refill = IDXDIFF(fl->hw_cidx, fl_hw_cidx, fl->sidx) > 2; 1362 #ifdef T4_PKT_TIMESTAMP 1363 /* 1364 * 60 bit timestamp for the payload is 1365 * *(uint64_t *)m0->m_pktdat. Note that it is 1366 * in the leading free-space in the mbuf. The 1367 * kernel can clobber it during a pullup, 1368 * m_copymdata, etc. You need to make sure that 1369 * the mbuf reaches you unmolested if you care 1370 * about the timestamp. 1371 */ 1372 *(uint64_t *)m0->m_pktdat = 1373 be64toh(ctrl->u.last_flit) & 1374 0xfffffffffffffff; 1375 #endif 1376 1377 /* fall through */ 1378 1379 case X_RSPD_TYPE_CPL: 1380 KASSERT(d->rss.opcode < NUM_CPL_CMDS, 1381 ("%s: bad opcode %02x.", __func__, 1382 d->rss.opcode)); 1383 sc->cpl_handler[d->rss.opcode](iq, &d->rss, m0); 1384 break; 1385 1386 case X_RSPD_TYPE_INTR: 1387 1388 /* 1389 * Interrupts should be forwarded only to queues 1390 * that are not forwarding their interrupts. 1391 * This means service_iq can recurse but only 1 1392 * level deep. 1393 */ 1394 KASSERT(budget == 0, 1395 ("%s: budget %u, rsp_type %u", __func__, 1396 budget, rsp_type)); 1397 1398 /* 1399 * There are 1K interrupt-capable queues (qids 0 1400 * through 1023). A response type indicating a 1401 * forwarded interrupt with a qid >= 1K is an 1402 * iWARP async notification. 1403 */ 1404 if (lq >= 1024) { 1405 sc->an_handler(iq, &d->rsp); 1406 break; 1407 } 1408 1409 q = sc->sge.iqmap[lq - sc->sge.iq_start]; 1410 if (atomic_cmpset_int(&q->state, IQS_IDLE, 1411 IQS_BUSY)) { 1412 if (service_iq(q, q->qsize / 16) == 0) { 1413 atomic_cmpset_int(&q->state, 1414 IQS_BUSY, IQS_IDLE); 1415 } else { 1416 STAILQ_INSERT_TAIL(&iql, q, 1417 link); 1418 } 1419 } 1420 break; 1421 1422 default: 1423 KASSERT(0, 1424 ("%s: illegal response type %d on iq %p", 1425 __func__, rsp_type, iq)); 1426 log(LOG_ERR, 1427 "%s: illegal response type %d on iq %p", 1428 device_get_nameunit(sc->dev), rsp_type, iq); 1429 break; 1430 } 1431 1432 d++; 1433 if (__predict_false(++iq->cidx == iq->sidx)) { 1434 iq->cidx = 0; 1435 iq->gen ^= F_RSPD_GEN; 1436 d = &iq->desc[0]; 1437 } 1438 if (__predict_false(++ndescs == limit)) { 1439 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), 1440 V_CIDXINC(ndescs) | 1441 V_INGRESSQID(iq->cntxt_id) | 1442 V_SEINTARM(V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX))); 1443 ndescs = 0; 1444 1445 #if defined(INET) || defined(INET6) 1446 if (iq->flags & IQ_LRO_ENABLED && 1447 sc->lro_timeout != 0) { 1448 tcp_lro_flush_inactive(&rxq->lro, 1449 &lro_timeout); 1450 } 1451 #endif 1452 1453 if (budget) { 1454 if (iq->flags & IQ_HAS_FL) { 1455 FL_LOCK(fl); 1456 refill_fl(sc, fl, 32); 1457 FL_UNLOCK(fl); 1458 } 1459 return (EINPROGRESS); 1460 } 1461 } 1462 if (refill) { 1463 FL_LOCK(fl); 1464 refill_fl(sc, fl, 32); 1465 FL_UNLOCK(fl); 1466 fl_hw_cidx = fl->hw_cidx; 1467 } 1468 } 1469 1470 process_iql: 1471 if (STAILQ_EMPTY(&iql)) 1472 break; 1473 1474 /* 1475 * Process the head only, and send it to the back of the list if 1476 * it's still not done. 1477 */ 1478 q = STAILQ_FIRST(&iql); 1479 STAILQ_REMOVE_HEAD(&iql, link); 1480 if (service_iq(q, q->qsize / 8) == 0) 1481 atomic_cmpset_int(&q->state, IQS_BUSY, IQS_IDLE); 1482 else 1483 STAILQ_INSERT_TAIL(&iql, q, link); 1484 } 1485 1486 #if defined(INET) || defined(INET6) 1487 if (iq->flags & IQ_LRO_ENABLED) { 1488 struct lro_ctrl *lro = &rxq->lro; 1489 struct lro_entry *l; 1490 1491 while (!SLIST_EMPTY(&lro->lro_active)) { 1492 l = SLIST_FIRST(&lro->lro_active); 1493 SLIST_REMOVE_HEAD(&lro->lro_active, next); 1494 tcp_lro_flush(lro, l); 1495 } 1496 } 1497 #endif 1498 1499 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), V_CIDXINC(ndescs) | 1500 V_INGRESSQID((u32)iq->cntxt_id) | V_SEINTARM(iq->intr_params)); 1501 1502 if (iq->flags & IQ_HAS_FL) { 1503 int starved; 1504 1505 FL_LOCK(fl); 1506 starved = refill_fl(sc, fl, 64); 1507 FL_UNLOCK(fl); 1508 if (__predict_false(starved != 0)) 1509 add_fl_to_sfl(sc, fl); 1510 } 1511 1512 return (0); 1513 } 1514 1515 static inline int 1516 cl_has_metadata(struct sge_fl *fl, struct cluster_layout *cll) 1517 { 1518 int rc = fl->flags & FL_BUF_PACKING || cll->region1 > 0; 1519 1520 if (rc) 1521 MPASS(cll->region3 >= CL_METADATA_SIZE); 1522 1523 return (rc); 1524 } 1525 1526 static inline struct cluster_metadata * 1527 cl_metadata(struct adapter *sc, struct sge_fl *fl, struct cluster_layout *cll, 1528 caddr_t cl) 1529 { 1530 1531 if (cl_has_metadata(fl, cll)) { 1532 struct sw_zone_info *swz = &sc->sge.sw_zone_info[cll->zidx]; 1533 1534 return ((struct cluster_metadata *)(cl + swz->size) - 1); 1535 } 1536 return (NULL); 1537 } 1538 1539 static void 1540 rxb_free(struct mbuf *m, void *arg1, void *arg2) 1541 { 1542 uma_zone_t zone = arg1; 1543 caddr_t cl = arg2; 1544 1545 uma_zfree(zone, cl); 1546 counter_u64_add(extfree_rels, 1); 1547 } 1548 1549 /* 1550 * The mbuf returned by this function could be allocated from zone_mbuf or 1551 * constructed in spare room in the cluster. 1552 * 1553 * The mbuf carries the payload in one of these ways 1554 * a) frame inside the mbuf (mbuf from zone_mbuf) 1555 * b) m_cljset (for clusters without metadata) zone_mbuf 1556 * c) m_extaddref (cluster with metadata) inline mbuf 1557 * d) m_extaddref (cluster with metadata) zone_mbuf 1558 */ 1559 static struct mbuf * 1560 get_scatter_segment(struct adapter *sc, struct sge_fl *fl, int fr_offset, 1561 int remaining) 1562 { 1563 struct mbuf *m; 1564 struct fl_sdesc *sd = &fl->sdesc[fl->cidx]; 1565 struct cluster_layout *cll = &sd->cll; 1566 struct sw_zone_info *swz = &sc->sge.sw_zone_info[cll->zidx]; 1567 struct hw_buf_info *hwb = &sc->sge.hw_buf_info[cll->hwidx]; 1568 struct cluster_metadata *clm = cl_metadata(sc, fl, cll, sd->cl); 1569 int len, blen; 1570 caddr_t payload; 1571 1572 blen = hwb->size - fl->rx_offset; /* max possible in this buf */ 1573 len = min(remaining, blen); 1574 payload = sd->cl + cll->region1 + fl->rx_offset; 1575 if (fl->flags & FL_BUF_PACKING) { 1576 const u_int l = fr_offset + len; 1577 const u_int pad = roundup2(l, fl->buf_boundary) - l; 1578 1579 if (fl->rx_offset + len + pad < hwb->size) 1580 blen = len + pad; 1581 MPASS(fl->rx_offset + blen <= hwb->size); 1582 } else { 1583 MPASS(fl->rx_offset == 0); /* not packing */ 1584 } 1585 1586 1587 if (sc->sc_do_rxcopy && len < RX_COPY_THRESHOLD) { 1588 1589 /* 1590 * Copy payload into a freshly allocated mbuf. 1591 */ 1592 1593 m = fr_offset == 0 ? 1594 m_gethdr(M_NOWAIT, MT_DATA) : m_get(M_NOWAIT, MT_DATA); 1595 if (m == NULL) 1596 return (NULL); 1597 fl->mbuf_allocated++; 1598 #ifdef T4_PKT_TIMESTAMP 1599 /* Leave room for a timestamp */ 1600 m->m_data += 8; 1601 #endif 1602 /* copy data to mbuf */ 1603 bcopy(payload, mtod(m, caddr_t), len); 1604 1605 } else if (sd->nmbuf * MSIZE < cll->region1) { 1606 1607 /* 1608 * There's spare room in the cluster for an mbuf. Create one 1609 * and associate it with the payload that's in the cluster. 1610 */ 1611 1612 MPASS(clm != NULL); 1613 m = (struct mbuf *)(sd->cl + sd->nmbuf * MSIZE); 1614 /* No bzero required */ 1615 if (m_init(m, NULL, 0, M_NOWAIT, MT_DATA, 1616 fr_offset == 0 ? M_PKTHDR | M_NOFREE : M_NOFREE)) 1617 return (NULL); 1618 fl->mbuf_inlined++; 1619 m_extaddref(m, payload, blen, &clm->refcount, rxb_free, 1620 swz->zone, sd->cl); 1621 if (sd->nmbuf++ == 0) 1622 counter_u64_add(extfree_refs, 1); 1623 1624 } else { 1625 1626 /* 1627 * Grab an mbuf from zone_mbuf and associate it with the 1628 * payload in the cluster. 1629 */ 1630 1631 m = fr_offset == 0 ? 1632 m_gethdr(M_NOWAIT, MT_DATA) : m_get(M_NOWAIT, MT_DATA); 1633 if (m == NULL) 1634 return (NULL); 1635 fl->mbuf_allocated++; 1636 if (clm != NULL) { 1637 m_extaddref(m, payload, blen, &clm->refcount, 1638 rxb_free, swz->zone, sd->cl); 1639 if (sd->nmbuf++ == 0) 1640 counter_u64_add(extfree_refs, 1); 1641 } else { 1642 m_cljset(m, sd->cl, swz->type); 1643 sd->cl = NULL; /* consumed, not a recycle candidate */ 1644 } 1645 } 1646 if (fr_offset == 0) 1647 m->m_pkthdr.len = remaining; 1648 m->m_len = len; 1649 1650 if (fl->flags & FL_BUF_PACKING) { 1651 fl->rx_offset += blen; 1652 MPASS(fl->rx_offset <= hwb->size); 1653 if (fl->rx_offset < hwb->size) 1654 return (m); /* without advancing the cidx */ 1655 } 1656 1657 if (__predict_false(++fl->cidx % 8 == 0)) { 1658 uint16_t cidx = fl->cidx / 8; 1659 1660 if (__predict_false(cidx == fl->sidx)) 1661 fl->cidx = cidx = 0; 1662 fl->hw_cidx = cidx; 1663 } 1664 fl->rx_offset = 0; 1665 1666 return (m); 1667 } 1668 1669 static struct mbuf * 1670 get_fl_payload(struct adapter *sc, struct sge_fl *fl, uint32_t len_newbuf) 1671 { 1672 struct mbuf *m0, *m, **pnext; 1673 u_int remaining; 1674 const u_int total = G_RSPD_LEN(len_newbuf); 1675 1676 if (__predict_false(fl->flags & FL_BUF_RESUME)) { 1677 M_ASSERTPKTHDR(fl->m0); 1678 MPASS(fl->m0->m_pkthdr.len == total); 1679 MPASS(fl->remaining < total); 1680 1681 m0 = fl->m0; 1682 pnext = fl->pnext; 1683 remaining = fl->remaining; 1684 fl->flags &= ~FL_BUF_RESUME; 1685 goto get_segment; 1686 } 1687 1688 if (fl->rx_offset > 0 && len_newbuf & F_RSPD_NEWBUF) { 1689 fl->rx_offset = 0; 1690 if (__predict_false(++fl->cidx % 8 == 0)) { 1691 uint16_t cidx = fl->cidx / 8; 1692 1693 if (__predict_false(cidx == fl->sidx)) 1694 fl->cidx = cidx = 0; 1695 fl->hw_cidx = cidx; 1696 } 1697 } 1698 1699 /* 1700 * Payload starts at rx_offset in the current hw buffer. Its length is 1701 * 'len' and it may span multiple hw buffers. 1702 */ 1703 1704 m0 = get_scatter_segment(sc, fl, 0, total); 1705 if (m0 == NULL) 1706 return (NULL); 1707 remaining = total - m0->m_len; 1708 pnext = &m0->m_next; 1709 while (remaining > 0) { 1710 get_segment: 1711 MPASS(fl->rx_offset == 0); 1712 m = get_scatter_segment(sc, fl, total - remaining, remaining); 1713 if (__predict_false(m == NULL)) { 1714 fl->m0 = m0; 1715 fl->pnext = pnext; 1716 fl->remaining = remaining; 1717 fl->flags |= FL_BUF_RESUME; 1718 return (NULL); 1719 } 1720 *pnext = m; 1721 pnext = &m->m_next; 1722 remaining -= m->m_len; 1723 } 1724 *pnext = NULL; 1725 1726 return (m0); 1727 } 1728 1729 static int 1730 t4_eth_rx(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m0) 1731 { 1732 struct sge_rxq *rxq = iq_to_rxq(iq); 1733 struct ifnet *ifp = rxq->ifp; 1734 const struct cpl_rx_pkt *cpl = (const void *)(rss + 1); 1735 #if defined(INET) || defined(INET6) 1736 struct lro_ctrl *lro = &rxq->lro; 1737 #endif 1738 1739 KASSERT(m0 != NULL, ("%s: no payload with opcode %02x", __func__, 1740 rss->opcode)); 1741 1742 m0->m_pkthdr.len -= fl_pktshift; 1743 m0->m_len -= fl_pktshift; 1744 m0->m_data += fl_pktshift; 1745 1746 m0->m_pkthdr.rcvif = ifp; 1747 M_HASHTYPE_SET(m0, M_HASHTYPE_OPAQUE); 1748 m0->m_pkthdr.flowid = be32toh(rss->hash_val); 1749 1750 if (cpl->csum_calc && !cpl->err_vec) { 1751 if (ifp->if_capenable & IFCAP_RXCSUM && 1752 cpl->l2info & htobe32(F_RXF_IP)) { 1753 m0->m_pkthdr.csum_flags = (CSUM_IP_CHECKED | 1754 CSUM_IP_VALID | CSUM_DATA_VALID | CSUM_PSEUDO_HDR); 1755 rxq->rxcsum++; 1756 } else if (ifp->if_capenable & IFCAP_RXCSUM_IPV6 && 1757 cpl->l2info & htobe32(F_RXF_IP6)) { 1758 m0->m_pkthdr.csum_flags = (CSUM_DATA_VALID_IPV6 | 1759 CSUM_PSEUDO_HDR); 1760 rxq->rxcsum++; 1761 } 1762 1763 if (__predict_false(cpl->ip_frag)) 1764 m0->m_pkthdr.csum_data = be16toh(cpl->csum); 1765 else 1766 m0->m_pkthdr.csum_data = 0xffff; 1767 } 1768 1769 if (cpl->vlan_ex) { 1770 m0->m_pkthdr.ether_vtag = be16toh(cpl->vlan); 1771 m0->m_flags |= M_VLANTAG; 1772 rxq->vlan_extraction++; 1773 } 1774 1775 #if defined(INET) || defined(INET6) 1776 if (cpl->l2info & htobe32(F_RXF_LRO) && 1777 iq->flags & IQ_LRO_ENABLED && 1778 tcp_lro_rx(lro, m0, 0) == 0) { 1779 /* queued for LRO */ 1780 } else 1781 #endif 1782 ifp->if_input(ifp, m0); 1783 1784 return (0); 1785 } 1786 1787 /* 1788 * Doesn't fail. Holds on to work requests it can't send right away. 1789 */ 1790 void 1791 t4_wrq_tx_locked(struct adapter *sc, struct sge_wrq *wrq, struct wrqe *wr) 1792 { 1793 struct sge_eq *eq = &wrq->eq; 1794 int can_reclaim; 1795 caddr_t dst; 1796 1797 TXQ_LOCK_ASSERT_OWNED(wrq); 1798 #ifdef TCP_OFFLOAD 1799 KASSERT((eq->flags & EQ_TYPEMASK) == EQ_OFLD || 1800 (eq->flags & EQ_TYPEMASK) == EQ_CTRL, 1801 ("%s: eq type %d", __func__, eq->flags & EQ_TYPEMASK)); 1802 #else 1803 KASSERT((eq->flags & EQ_TYPEMASK) == EQ_CTRL, 1804 ("%s: eq type %d", __func__, eq->flags & EQ_TYPEMASK)); 1805 #endif 1806 1807 if (__predict_true(wr != NULL)) 1808 STAILQ_INSERT_TAIL(&wrq->wr_list, wr, link); 1809 1810 can_reclaim = reclaimable(eq); 1811 if (__predict_false(eq->flags & EQ_STALLED)) { 1812 if (eq->avail + can_reclaim < tx_resume_threshold(eq)) 1813 return; 1814 eq->flags &= ~EQ_STALLED; 1815 eq->unstalled++; 1816 } 1817 eq->cidx += can_reclaim; 1818 eq->avail += can_reclaim; 1819 if (__predict_false(eq->cidx >= eq->cap)) 1820 eq->cidx -= eq->cap; 1821 1822 while ((wr = STAILQ_FIRST(&wrq->wr_list)) != NULL) { 1823 int ndesc; 1824 1825 if (__predict_false(wr->wr_len < 0 || 1826 wr->wr_len > SGE_MAX_WR_LEN || (wr->wr_len & 0x7))) { 1827 1828 #ifdef INVARIANTS 1829 panic("%s: work request with length %d", __func__, 1830 wr->wr_len); 1831 #endif 1832 #ifdef KDB 1833 kdb_backtrace(); 1834 #endif 1835 log(LOG_ERR, "%s: %s work request with length %d", 1836 device_get_nameunit(sc->dev), __func__, wr->wr_len); 1837 STAILQ_REMOVE_HEAD(&wrq->wr_list, link); 1838 free_wrqe(wr); 1839 continue; 1840 } 1841 1842 ndesc = howmany(wr->wr_len, EQ_ESIZE); 1843 if (eq->avail < ndesc) { 1844 wrq->no_desc++; 1845 break; 1846 } 1847 1848 dst = (void *)&eq->desc[eq->pidx]; 1849 copy_to_txd(eq, wrtod(wr), &dst, wr->wr_len); 1850 1851 eq->pidx += ndesc; 1852 eq->avail -= ndesc; 1853 if (__predict_false(eq->pidx >= eq->cap)) 1854 eq->pidx -= eq->cap; 1855 1856 eq->pending += ndesc; 1857 if (eq->pending >= 8) 1858 ring_eq_db(sc, eq); 1859 1860 wrq->tx_wrs++; 1861 STAILQ_REMOVE_HEAD(&wrq->wr_list, link); 1862 free_wrqe(wr); 1863 1864 if (eq->avail < 8) { 1865 can_reclaim = reclaimable(eq); 1866 eq->cidx += can_reclaim; 1867 eq->avail += can_reclaim; 1868 if (__predict_false(eq->cidx >= eq->cap)) 1869 eq->cidx -= eq->cap; 1870 } 1871 } 1872 1873 if (eq->pending) 1874 ring_eq_db(sc, eq); 1875 1876 if (wr != NULL) { 1877 eq->flags |= EQ_STALLED; 1878 if (callout_pending(&eq->tx_callout) == 0) 1879 callout_reset(&eq->tx_callout, 1, t4_tx_callout, eq); 1880 } 1881 } 1882 1883 /* Per-packet header in a coalesced tx WR, before the SGL starts (in flits) */ 1884 #define TXPKTS_PKT_HDR ((\ 1885 sizeof(struct ulp_txpkt) + \ 1886 sizeof(struct ulptx_idata) + \ 1887 sizeof(struct cpl_tx_pkt_core) \ 1888 ) / 8) 1889 1890 /* Header of a coalesced tx WR, before SGL of first packet (in flits) */ 1891 #define TXPKTS_WR_HDR (\ 1892 sizeof(struct fw_eth_tx_pkts_wr) / 8 + \ 1893 TXPKTS_PKT_HDR) 1894 1895 /* Header of a tx WR, before SGL of first packet (in flits) */ 1896 #define TXPKT_WR_HDR ((\ 1897 sizeof(struct fw_eth_tx_pkt_wr) + \ 1898 sizeof(struct cpl_tx_pkt_core) \ 1899 ) / 8 ) 1900 1901 /* Header of a tx LSO WR, before SGL of first packet (in flits) */ 1902 #define TXPKT_LSO_WR_HDR ((\ 1903 sizeof(struct fw_eth_tx_pkt_wr) + \ 1904 sizeof(struct cpl_tx_pkt_lso_core) + \ 1905 sizeof(struct cpl_tx_pkt_core) \ 1906 ) / 8 ) 1907 1908 int 1909 t4_eth_tx(struct ifnet *ifp, struct sge_txq *txq, struct mbuf *m) 1910 { 1911 struct port_info *pi = (void *)ifp->if_softc; 1912 struct adapter *sc = pi->adapter; 1913 struct sge_eq *eq = &txq->eq; 1914 struct buf_ring *br = txq->br; 1915 struct mbuf *next; 1916 int rc, coalescing, can_reclaim; 1917 struct txpkts txpkts; 1918 struct sgl sgl; 1919 1920 TXQ_LOCK_ASSERT_OWNED(txq); 1921 KASSERT(m, ("%s: called with nothing to do.", __func__)); 1922 KASSERT((eq->flags & EQ_TYPEMASK) == EQ_ETH, 1923 ("%s: eq type %d", __func__, eq->flags & EQ_TYPEMASK)); 1924 1925 prefetch(&eq->desc[eq->pidx]); 1926 prefetch(&txq->sdesc[eq->pidx]); 1927 1928 txpkts.npkt = 0;/* indicates there's nothing in txpkts */ 1929 coalescing = 0; 1930 1931 can_reclaim = reclaimable(eq); 1932 if (__predict_false(eq->flags & EQ_STALLED)) { 1933 if (eq->avail + can_reclaim < tx_resume_threshold(eq)) { 1934 txq->m = m; 1935 return (0); 1936 } 1937 eq->flags &= ~EQ_STALLED; 1938 eq->unstalled++; 1939 } 1940 1941 if (__predict_false(eq->flags & EQ_DOOMED)) { 1942 m_freem(m); 1943 while ((m = buf_ring_dequeue_sc(txq->br)) != NULL) 1944 m_freem(m); 1945 return (ENETDOWN); 1946 } 1947 1948 if (eq->avail < 8 && can_reclaim) 1949 reclaim_tx_descs(txq, can_reclaim, 32); 1950 1951 for (; m; m = next ? next : drbr_dequeue(ifp, br)) { 1952 1953 if (eq->avail < 8) 1954 break; 1955 1956 next = m->m_nextpkt; 1957 m->m_nextpkt = NULL; 1958 1959 if (next || buf_ring_peek(br)) 1960 coalescing = 1; 1961 1962 rc = get_pkt_sgl(txq, &m, &sgl, coalescing); 1963 if (rc != 0) { 1964 if (rc == ENOMEM) { 1965 1966 /* Short of resources, suspend tx */ 1967 1968 m->m_nextpkt = next; 1969 break; 1970 } 1971 1972 /* 1973 * Unrecoverable error for this packet, throw it away 1974 * and move on to the next. get_pkt_sgl may already 1975 * have freed m (it will be NULL in that case and the 1976 * m_freem here is still safe). 1977 */ 1978 1979 m_freem(m); 1980 continue; 1981 } 1982 1983 if (coalescing && 1984 add_to_txpkts(pi, txq, &txpkts, m, &sgl) == 0) { 1985 1986 /* Successfully absorbed into txpkts */ 1987 1988 write_ulp_cpl_sgl(pi, txq, &txpkts, m, &sgl); 1989 goto doorbell; 1990 } 1991 1992 /* 1993 * We weren't coalescing to begin with, or current frame could 1994 * not be coalesced (add_to_txpkts flushes txpkts if a frame 1995 * given to it can't be coalesced). Either way there should be 1996 * nothing in txpkts. 1997 */ 1998 KASSERT(txpkts.npkt == 0, 1999 ("%s: txpkts not empty: %d", __func__, txpkts.npkt)); 2000 2001 /* We're sending out individual packets now */ 2002 coalescing = 0; 2003 2004 if (eq->avail < 8) 2005 reclaim_tx_descs(txq, 0, 8); 2006 rc = write_txpkt_wr(pi, txq, m, &sgl); 2007 if (rc != 0) { 2008 2009 /* Short of hardware descriptors, suspend tx */ 2010 2011 /* 2012 * This is an unlikely but expensive failure. We've 2013 * done all the hard work (DMA mappings etc.) and now we 2014 * can't send out the packet. What's worse, we have to 2015 * spend even more time freeing up everything in sgl. 2016 */ 2017 txq->no_desc++; 2018 free_pkt_sgl(txq, &sgl); 2019 2020 m->m_nextpkt = next; 2021 break; 2022 } 2023 2024 ETHER_BPF_MTAP(ifp, m); 2025 if (sgl.nsegs == 0) 2026 m_freem(m); 2027 doorbell: 2028 if (eq->pending >= 8) 2029 ring_eq_db(sc, eq); 2030 2031 can_reclaim = reclaimable(eq); 2032 if (can_reclaim >= 32) 2033 reclaim_tx_descs(txq, can_reclaim, 64); 2034 } 2035 2036 if (txpkts.npkt > 0) 2037 write_txpkts_wr(txq, &txpkts); 2038 2039 /* 2040 * m not NULL means there was an error but we haven't thrown it away. 2041 * This can happen when we're short of tx descriptors (no_desc) or maybe 2042 * even DMA maps (no_dmamap). Either way, a credit flush and reclaim 2043 * will get things going again. 2044 */ 2045 if (m && !(eq->flags & EQ_CRFLUSHED)) { 2046 struct tx_sdesc *txsd = &txq->sdesc[eq->pidx]; 2047 2048 /* 2049 * If EQ_CRFLUSHED is not set then we know we have at least one 2050 * available descriptor because any WR that reduces eq->avail to 2051 * 0 also sets EQ_CRFLUSHED. 2052 */ 2053 KASSERT(eq->avail > 0, ("%s: no space for eqflush.", __func__)); 2054 2055 txsd->desc_used = 1; 2056 txsd->credits = 0; 2057 write_eqflush_wr(eq); 2058 } 2059 txq->m = m; 2060 2061 if (eq->pending) 2062 ring_eq_db(sc, eq); 2063 2064 reclaim_tx_descs(txq, 0, 128); 2065 2066 if (eq->flags & EQ_STALLED && callout_pending(&eq->tx_callout) == 0) 2067 callout_reset(&eq->tx_callout, 1, t4_tx_callout, eq); 2068 2069 return (0); 2070 } 2071 2072 void 2073 t4_update_fl_bufsize(struct ifnet *ifp) 2074 { 2075 struct port_info *pi = ifp->if_softc; 2076 struct adapter *sc = pi->adapter; 2077 struct sge_rxq *rxq; 2078 #ifdef TCP_OFFLOAD 2079 struct sge_ofld_rxq *ofld_rxq; 2080 #endif 2081 struct sge_fl *fl; 2082 int i, maxp, mtu = ifp->if_mtu; 2083 2084 maxp = mtu_to_max_payload(sc, mtu, 0); 2085 for_each_rxq(pi, i, rxq) { 2086 fl = &rxq->fl; 2087 2088 FL_LOCK(fl); 2089 find_best_refill_source(sc, fl, maxp); 2090 FL_UNLOCK(fl); 2091 } 2092 #ifdef TCP_OFFLOAD 2093 maxp = mtu_to_max_payload(sc, mtu, 1); 2094 for_each_ofld_rxq(pi, i, ofld_rxq) { 2095 fl = &ofld_rxq->fl; 2096 2097 FL_LOCK(fl); 2098 find_best_refill_source(sc, fl, maxp); 2099 FL_UNLOCK(fl); 2100 } 2101 #endif 2102 } 2103 2104 int 2105 can_resume_tx(struct sge_eq *eq) 2106 { 2107 2108 return (eq->avail + reclaimable(eq) >= tx_resume_threshold(eq)); 2109 } 2110 2111 static inline void 2112 init_iq(struct sge_iq *iq, struct adapter *sc, int tmr_idx, int pktc_idx, 2113 int qsize) 2114 { 2115 2116 KASSERT(tmr_idx >= 0 && tmr_idx < SGE_NTIMERS, 2117 ("%s: bad tmr_idx %d", __func__, tmr_idx)); 2118 KASSERT(pktc_idx < SGE_NCOUNTERS, /* -ve is ok, means don't use */ 2119 ("%s: bad pktc_idx %d", __func__, pktc_idx)); 2120 2121 iq->flags = 0; 2122 iq->adapter = sc; 2123 iq->intr_params = V_QINTR_TIMER_IDX(tmr_idx); 2124 iq->intr_pktc_idx = SGE_NCOUNTERS - 1; 2125 if (pktc_idx >= 0) { 2126 iq->intr_params |= F_QINTR_CNT_EN; 2127 iq->intr_pktc_idx = pktc_idx; 2128 } 2129 iq->qsize = roundup2(qsize, 16); /* See FW_IQ_CMD/iqsize */ 2130 iq->sidx = iq->qsize - spg_len / IQ_ESIZE; 2131 } 2132 2133 static inline void 2134 init_fl(struct adapter *sc, struct sge_fl *fl, int qsize, int maxp, char *name) 2135 { 2136 2137 fl->qsize = qsize; 2138 fl->sidx = qsize - spg_len / EQ_ESIZE; 2139 strlcpy(fl->lockname, name, sizeof(fl->lockname)); 2140 if (sc->flags & BUF_PACKING_OK && 2141 ((!is_t4(sc) && buffer_packing) || /* T5+: enabled unless 0 */ 2142 (is_t4(sc) && buffer_packing == 1)))/* T4: disabled unless 1 */ 2143 fl->flags |= FL_BUF_PACKING; 2144 find_best_refill_source(sc, fl, maxp); 2145 find_safe_refill_source(sc, fl); 2146 } 2147 2148 static inline void 2149 init_eq(struct sge_eq *eq, int eqtype, int qsize, uint8_t tx_chan, 2150 uint16_t iqid, char *name) 2151 { 2152 KASSERT(tx_chan < NCHAN, ("%s: bad tx channel %d", __func__, tx_chan)); 2153 KASSERT(eqtype <= EQ_TYPEMASK, ("%s: bad qtype %d", __func__, eqtype)); 2154 2155 eq->flags = eqtype & EQ_TYPEMASK; 2156 eq->tx_chan = tx_chan; 2157 eq->iqid = iqid; 2158 eq->qsize = qsize; 2159 strlcpy(eq->lockname, name, sizeof(eq->lockname)); 2160 2161 TASK_INIT(&eq->tx_task, 0, t4_tx_task, eq); 2162 callout_init(&eq->tx_callout, CALLOUT_MPSAFE); 2163 } 2164 2165 static int 2166 alloc_ring(struct adapter *sc, size_t len, bus_dma_tag_t *tag, 2167 bus_dmamap_t *map, bus_addr_t *pa, void **va) 2168 { 2169 int rc; 2170 2171 rc = bus_dma_tag_create(sc->dmat, 512, 0, BUS_SPACE_MAXADDR, 2172 BUS_SPACE_MAXADDR, NULL, NULL, len, 1, len, 0, NULL, NULL, tag); 2173 if (rc != 0) { 2174 device_printf(sc->dev, "cannot allocate DMA tag: %d\n", rc); 2175 goto done; 2176 } 2177 2178 rc = bus_dmamem_alloc(*tag, va, 2179 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, map); 2180 if (rc != 0) { 2181 device_printf(sc->dev, "cannot allocate DMA memory: %d\n", rc); 2182 goto done; 2183 } 2184 2185 rc = bus_dmamap_load(*tag, *map, *va, len, oneseg_dma_callback, pa, 0); 2186 if (rc != 0) { 2187 device_printf(sc->dev, "cannot load DMA map: %d\n", rc); 2188 goto done; 2189 } 2190 done: 2191 if (rc) 2192 free_ring(sc, *tag, *map, *pa, *va); 2193 2194 return (rc); 2195 } 2196 2197 static int 2198 free_ring(struct adapter *sc, bus_dma_tag_t tag, bus_dmamap_t map, 2199 bus_addr_t pa, void *va) 2200 { 2201 if (pa) 2202 bus_dmamap_unload(tag, map); 2203 if (va) 2204 bus_dmamem_free(tag, va, map); 2205 if (tag) 2206 bus_dma_tag_destroy(tag); 2207 2208 return (0); 2209 } 2210 2211 /* 2212 * Allocates the ring for an ingress queue and an optional freelist. If the 2213 * freelist is specified it will be allocated and then associated with the 2214 * ingress queue. 2215 * 2216 * Returns errno on failure. Resources allocated up to that point may still be 2217 * allocated. Caller is responsible for cleanup in case this function fails. 2218 * 2219 * If the ingress queue will take interrupts directly (iq->flags & IQ_INTR) then 2220 * the intr_idx specifies the vector, starting from 0. Otherwise it specifies 2221 * the abs_id of the ingress queue to which its interrupts should be forwarded. 2222 */ 2223 static int 2224 alloc_iq_fl(struct port_info *pi, struct sge_iq *iq, struct sge_fl *fl, 2225 int intr_idx, int cong) 2226 { 2227 int rc, i, cntxt_id; 2228 size_t len; 2229 struct fw_iq_cmd c; 2230 struct adapter *sc = iq->adapter; 2231 __be32 v = 0; 2232 2233 len = iq->qsize * IQ_ESIZE; 2234 rc = alloc_ring(sc, len, &iq->desc_tag, &iq->desc_map, &iq->ba, 2235 (void **)&iq->desc); 2236 if (rc != 0) 2237 return (rc); 2238 2239 bzero(&c, sizeof(c)); 2240 c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST | 2241 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(sc->pf) | 2242 V_FW_IQ_CMD_VFN(0)); 2243 2244 c.alloc_to_len16 = htobe32(F_FW_IQ_CMD_ALLOC | F_FW_IQ_CMD_IQSTART | 2245 FW_LEN16(c)); 2246 2247 /* Special handling for firmware event queue */ 2248 if (iq == &sc->sge.fwq) 2249 v |= F_FW_IQ_CMD_IQASYNCH; 2250 2251 if (iq->flags & IQ_INTR) { 2252 KASSERT(intr_idx < sc->intr_count, 2253 ("%s: invalid direct intr_idx %d", __func__, intr_idx)); 2254 } else 2255 v |= F_FW_IQ_CMD_IQANDST; 2256 v |= V_FW_IQ_CMD_IQANDSTINDEX(intr_idx); 2257 2258 c.type_to_iqandstindex = htobe32(v | 2259 V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) | 2260 V_FW_IQ_CMD_VIID(pi->viid) | 2261 V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_INTERRUPT)); 2262 c.iqdroprss_to_iqesize = htobe16(V_FW_IQ_CMD_IQPCIECH(pi->tx_chan) | 2263 F_FW_IQ_CMD_IQGTSMODE | 2264 V_FW_IQ_CMD_IQINTCNTTHRESH(iq->intr_pktc_idx) | 2265 V_FW_IQ_CMD_IQESIZE(ilog2(IQ_ESIZE) - 4)); 2266 c.iqsize = htobe16(iq->qsize); 2267 c.iqaddr = htobe64(iq->ba); 2268 if (cong >= 0) 2269 c.iqns_to_fl0congen = htobe32(F_FW_IQ_CMD_IQFLINTCONGEN); 2270 2271 if (fl) { 2272 mtx_init(&fl->fl_lock, fl->lockname, NULL, MTX_DEF); 2273 2274 len = fl->qsize * EQ_ESIZE; 2275 rc = alloc_ring(sc, len, &fl->desc_tag, &fl->desc_map, 2276 &fl->ba, (void **)&fl->desc); 2277 if (rc) 2278 return (rc); 2279 2280 /* Allocate space for one software descriptor per buffer. */ 2281 rc = alloc_fl_sdesc(fl); 2282 if (rc != 0) { 2283 device_printf(sc->dev, 2284 "failed to setup fl software descriptors: %d\n", 2285 rc); 2286 return (rc); 2287 } 2288 2289 if (fl->flags & FL_BUF_PACKING) { 2290 fl->lowat = roundup2(sc->sge.fl_starve_threshold2, 8); 2291 fl->buf_boundary = sc->sge.pack_boundary; 2292 } else { 2293 fl->lowat = roundup2(sc->sge.fl_starve_threshold, 8); 2294 fl->buf_boundary = 16; 2295 } 2296 if (fl_pad && fl->buf_boundary < sc->sge.pad_boundary) 2297 fl->buf_boundary = sc->sge.pad_boundary; 2298 2299 c.iqns_to_fl0congen |= 2300 htobe32(V_FW_IQ_CMD_FL0HOSTFCMODE(X_HOSTFCMODE_NONE) | 2301 F_FW_IQ_CMD_FL0FETCHRO | F_FW_IQ_CMD_FL0DATARO | 2302 (fl_pad ? F_FW_IQ_CMD_FL0PADEN : 0) | 2303 (fl->flags & FL_BUF_PACKING ? F_FW_IQ_CMD_FL0PACKEN : 2304 0)); 2305 if (cong >= 0) { 2306 c.iqns_to_fl0congen |= 2307 htobe32(V_FW_IQ_CMD_FL0CNGCHMAP(cong) | 2308 F_FW_IQ_CMD_FL0CONGCIF | 2309 F_FW_IQ_CMD_FL0CONGEN); 2310 } 2311 c.fl0dcaen_to_fl0cidxfthresh = 2312 htobe16(V_FW_IQ_CMD_FL0FBMIN(X_FETCHBURSTMIN_64B) | 2313 V_FW_IQ_CMD_FL0FBMAX(X_FETCHBURSTMAX_512B)); 2314 c.fl0size = htobe16(fl->qsize); 2315 c.fl0addr = htobe64(fl->ba); 2316 } 2317 2318 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); 2319 if (rc != 0) { 2320 device_printf(sc->dev, 2321 "failed to create ingress queue: %d\n", rc); 2322 return (rc); 2323 } 2324 2325 iq->cidx = 0; 2326 iq->gen = F_RSPD_GEN; 2327 iq->intr_next = iq->intr_params; 2328 iq->cntxt_id = be16toh(c.iqid); 2329 iq->abs_id = be16toh(c.physiqid); 2330 iq->flags |= IQ_ALLOCATED; 2331 2332 cntxt_id = iq->cntxt_id - sc->sge.iq_start; 2333 if (cntxt_id >= sc->sge.niq) { 2334 panic ("%s: iq->cntxt_id (%d) more than the max (%d)", __func__, 2335 cntxt_id, sc->sge.niq - 1); 2336 } 2337 sc->sge.iqmap[cntxt_id] = iq; 2338 2339 if (fl) { 2340 u_int qid; 2341 2342 iq->flags |= IQ_HAS_FL; 2343 fl->cntxt_id = be16toh(c.fl0id); 2344 fl->pidx = fl->cidx = 0; 2345 2346 cntxt_id = fl->cntxt_id - sc->sge.eq_start; 2347 if (cntxt_id >= sc->sge.neq) { 2348 panic("%s: fl->cntxt_id (%d) more than the max (%d)", 2349 __func__, cntxt_id, sc->sge.neq - 1); 2350 } 2351 sc->sge.eqmap[cntxt_id] = (void *)fl; 2352 2353 qid = fl->cntxt_id; 2354 if (isset(&sc->doorbells, DOORBELL_UDB)) { 2355 uint32_t s_qpp = sc->sge.eq_s_qpp; 2356 uint32_t mask = (1 << s_qpp) - 1; 2357 volatile uint8_t *udb; 2358 2359 udb = sc->udbs_base + UDBS_DB_OFFSET; 2360 udb += (qid >> s_qpp) << PAGE_SHIFT; 2361 qid &= mask; 2362 if (qid < PAGE_SIZE / UDBS_SEG_SIZE) { 2363 udb += qid << UDBS_SEG_SHIFT; 2364 qid = 0; 2365 } 2366 fl->udb = (volatile void *)udb; 2367 } 2368 fl->dbval = F_DBPRIO | V_QID(qid); 2369 if (is_t5(sc)) 2370 fl->dbval |= F_DBTYPE; 2371 2372 FL_LOCK(fl); 2373 /* Enough to make sure the SGE doesn't think it's starved */ 2374 refill_fl(sc, fl, fl->lowat); 2375 FL_UNLOCK(fl); 2376 } 2377 2378 if (is_t5(sc) && cong >= 0) { 2379 uint32_t param, val; 2380 2381 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | 2382 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) | 2383 V_FW_PARAMS_PARAM_YZ(iq->cntxt_id); 2384 if (cong == 0) 2385 val = 1 << 19; 2386 else { 2387 val = 2 << 19; 2388 for (i = 0; i < 4; i++) { 2389 if (cong & (1 << i)) 2390 val |= 1 << (i << 2); 2391 } 2392 } 2393 2394 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 2395 if (rc != 0) { 2396 /* report error but carry on */ 2397 device_printf(sc->dev, 2398 "failed to set congestion manager context for " 2399 "ingress queue %d: %d\n", iq->cntxt_id, rc); 2400 } 2401 } 2402 2403 /* Enable IQ interrupts */ 2404 atomic_store_rel_int(&iq->state, IQS_IDLE); 2405 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), V_SEINTARM(iq->intr_params) | 2406 V_INGRESSQID(iq->cntxt_id)); 2407 2408 return (0); 2409 } 2410 2411 static int 2412 free_iq_fl(struct port_info *pi, struct sge_iq *iq, struct sge_fl *fl) 2413 { 2414 int rc; 2415 struct adapter *sc = iq->adapter; 2416 device_t dev; 2417 2418 if (sc == NULL) 2419 return (0); /* nothing to do */ 2420 2421 dev = pi ? pi->dev : sc->dev; 2422 2423 if (iq->flags & IQ_ALLOCATED) { 2424 rc = -t4_iq_free(sc, sc->mbox, sc->pf, 0, 2425 FW_IQ_TYPE_FL_INT_CAP, iq->cntxt_id, 2426 fl ? fl->cntxt_id : 0xffff, 0xffff); 2427 if (rc != 0) { 2428 device_printf(dev, 2429 "failed to free queue %p: %d\n", iq, rc); 2430 return (rc); 2431 } 2432 iq->flags &= ~IQ_ALLOCATED; 2433 } 2434 2435 free_ring(sc, iq->desc_tag, iq->desc_map, iq->ba, iq->desc); 2436 2437 bzero(iq, sizeof(*iq)); 2438 2439 if (fl) { 2440 free_ring(sc, fl->desc_tag, fl->desc_map, fl->ba, 2441 fl->desc); 2442 2443 if (fl->sdesc) 2444 free_fl_sdesc(sc, fl); 2445 2446 if (mtx_initialized(&fl->fl_lock)) 2447 mtx_destroy(&fl->fl_lock); 2448 2449 bzero(fl, sizeof(*fl)); 2450 } 2451 2452 return (0); 2453 } 2454 2455 static void 2456 add_fl_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *oid, 2457 struct sge_fl *fl) 2458 { 2459 struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); 2460 2461 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "fl", CTLFLAG_RD, NULL, 2462 "freelist"); 2463 children = SYSCTL_CHILDREN(oid); 2464 2465 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cntxt_id", 2466 CTLTYPE_INT | CTLFLAG_RD, &fl->cntxt_id, 0, sysctl_uint16, "I", 2467 "SGE context id of the freelist"); 2468 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "padding", CTLFLAG_RD, NULL, 2469 fl_pad ? 1 : 0, "padding enabled"); 2470 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "packing", CTLFLAG_RD, NULL, 2471 fl->flags & FL_BUF_PACKING ? 1 : 0, "packing enabled"); 2472 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cidx", CTLFLAG_RD, &fl->cidx, 2473 0, "consumer index"); 2474 if (fl->flags & FL_BUF_PACKING) { 2475 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rx_offset", 2476 CTLFLAG_RD, &fl->rx_offset, 0, "packing rx offset"); 2477 } 2478 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "pidx", CTLFLAG_RD, &fl->pidx, 2479 0, "producer index"); 2480 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "mbuf_allocated", 2481 CTLFLAG_RD, &fl->mbuf_allocated, "# of mbuf allocated"); 2482 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "mbuf_inlined", 2483 CTLFLAG_RD, &fl->mbuf_inlined, "# of mbuf inlined in clusters"); 2484 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "cluster_allocated", 2485 CTLFLAG_RD, &fl->cl_allocated, "# of clusters allocated"); 2486 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "cluster_recycled", 2487 CTLFLAG_RD, &fl->cl_recycled, "# of clusters recycled"); 2488 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "cluster_fast_recycled", 2489 CTLFLAG_RD, &fl->cl_fast_recycled, "# of clusters recycled (fast)"); 2490 } 2491 2492 static int 2493 alloc_fwq(struct adapter *sc) 2494 { 2495 int rc, intr_idx; 2496 struct sge_iq *fwq = &sc->sge.fwq; 2497 struct sysctl_oid *oid = device_get_sysctl_tree(sc->dev); 2498 struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); 2499 2500 init_iq(fwq, sc, 0, 0, FW_IQ_QSIZE); 2501 fwq->flags |= IQ_INTR; /* always */ 2502 intr_idx = sc->intr_count > 1 ? 1 : 0; 2503 rc = alloc_iq_fl(sc->port[0], fwq, NULL, intr_idx, -1); 2504 if (rc != 0) { 2505 device_printf(sc->dev, 2506 "failed to create firmware event queue: %d\n", rc); 2507 return (rc); 2508 } 2509 2510 oid = SYSCTL_ADD_NODE(&sc->ctx, children, OID_AUTO, "fwq", CTLFLAG_RD, 2511 NULL, "firmware event queue"); 2512 children = SYSCTL_CHILDREN(oid); 2513 2514 SYSCTL_ADD_PROC(&sc->ctx, children, OID_AUTO, "abs_id", 2515 CTLTYPE_INT | CTLFLAG_RD, &fwq->abs_id, 0, sysctl_uint16, "I", 2516 "absolute id of the queue"); 2517 SYSCTL_ADD_PROC(&sc->ctx, children, OID_AUTO, "cntxt_id", 2518 CTLTYPE_INT | CTLFLAG_RD, &fwq->cntxt_id, 0, sysctl_uint16, "I", 2519 "SGE context id of the queue"); 2520 SYSCTL_ADD_PROC(&sc->ctx, children, OID_AUTO, "cidx", 2521 CTLTYPE_INT | CTLFLAG_RD, &fwq->cidx, 0, sysctl_uint16, "I", 2522 "consumer index"); 2523 2524 return (0); 2525 } 2526 2527 static int 2528 free_fwq(struct adapter *sc) 2529 { 2530 return free_iq_fl(NULL, &sc->sge.fwq, NULL); 2531 } 2532 2533 static int 2534 alloc_mgmtq(struct adapter *sc) 2535 { 2536 int rc; 2537 struct sge_wrq *mgmtq = &sc->sge.mgmtq; 2538 char name[16]; 2539 struct sysctl_oid *oid = device_get_sysctl_tree(sc->dev); 2540 struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); 2541 2542 oid = SYSCTL_ADD_NODE(&sc->ctx, children, OID_AUTO, "mgmtq", CTLFLAG_RD, 2543 NULL, "management queue"); 2544 2545 snprintf(name, sizeof(name), "%s mgmtq", device_get_nameunit(sc->dev)); 2546 init_eq(&mgmtq->eq, EQ_CTRL, CTRL_EQ_QSIZE, sc->port[0]->tx_chan, 2547 sc->sge.fwq.cntxt_id, name); 2548 rc = alloc_wrq(sc, NULL, mgmtq, oid); 2549 if (rc != 0) { 2550 device_printf(sc->dev, 2551 "failed to create management queue: %d\n", rc); 2552 return (rc); 2553 } 2554 2555 return (0); 2556 } 2557 2558 static int 2559 free_mgmtq(struct adapter *sc) 2560 { 2561 2562 return free_wrq(sc, &sc->sge.mgmtq); 2563 } 2564 2565 static inline int 2566 tnl_cong(struct port_info *pi) 2567 { 2568 2569 if (cong_drop == -1) 2570 return (-1); 2571 else if (cong_drop == 1) 2572 return (0); 2573 else 2574 return (pi->rx_chan_map); 2575 } 2576 2577 static int 2578 alloc_rxq(struct port_info *pi, struct sge_rxq *rxq, int intr_idx, int idx, 2579 struct sysctl_oid *oid) 2580 { 2581 int rc; 2582 struct sysctl_oid_list *children; 2583 char name[16]; 2584 2585 rc = alloc_iq_fl(pi, &rxq->iq, &rxq->fl, intr_idx, tnl_cong(pi)); 2586 if (rc != 0) 2587 return (rc); 2588 2589 /* 2590 * The freelist is just barely above the starvation threshold right now, 2591 * fill it up a bit more. 2592 */ 2593 FL_LOCK(&rxq->fl); 2594 refill_fl(pi->adapter, &rxq->fl, 128); 2595 FL_UNLOCK(&rxq->fl); 2596 2597 #if defined(INET) || defined(INET6) 2598 rc = tcp_lro_init(&rxq->lro); 2599 if (rc != 0) 2600 return (rc); 2601 rxq->lro.ifp = pi->ifp; /* also indicates LRO init'ed */ 2602 2603 if (pi->ifp->if_capenable & IFCAP_LRO) 2604 rxq->iq.flags |= IQ_LRO_ENABLED; 2605 #endif 2606 rxq->ifp = pi->ifp; 2607 2608 children = SYSCTL_CHILDREN(oid); 2609 2610 snprintf(name, sizeof(name), "%d", idx); 2611 oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, name, CTLFLAG_RD, 2612 NULL, "rx queue"); 2613 children = SYSCTL_CHILDREN(oid); 2614 2615 SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "abs_id", 2616 CTLTYPE_INT | CTLFLAG_RD, &rxq->iq.abs_id, 0, sysctl_uint16, "I", 2617 "absolute id of the queue"); 2618 SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "cntxt_id", 2619 CTLTYPE_INT | CTLFLAG_RD, &rxq->iq.cntxt_id, 0, sysctl_uint16, "I", 2620 "SGE context id of the queue"); 2621 SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "cidx", 2622 CTLTYPE_INT | CTLFLAG_RD, &rxq->iq.cidx, 0, sysctl_uint16, "I", 2623 "consumer index"); 2624 #if defined(INET) || defined(INET6) 2625 SYSCTL_ADD_INT(&pi->ctx, children, OID_AUTO, "lro_queued", CTLFLAG_RD, 2626 &rxq->lro.lro_queued, 0, NULL); 2627 SYSCTL_ADD_INT(&pi->ctx, children, OID_AUTO, "lro_flushed", CTLFLAG_RD, 2628 &rxq->lro.lro_flushed, 0, NULL); 2629 #endif 2630 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "rxcsum", CTLFLAG_RD, 2631 &rxq->rxcsum, "# of times hardware assisted with checksum"); 2632 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "vlan_extraction", 2633 CTLFLAG_RD, &rxq->vlan_extraction, 2634 "# of times hardware extracted 802.1Q tag"); 2635 2636 add_fl_sysctls(&pi->ctx, oid, &rxq->fl); 2637 2638 return (rc); 2639 } 2640 2641 static int 2642 free_rxq(struct port_info *pi, struct sge_rxq *rxq) 2643 { 2644 int rc; 2645 2646 #if defined(INET) || defined(INET6) 2647 if (rxq->lro.ifp) { 2648 tcp_lro_free(&rxq->lro); 2649 rxq->lro.ifp = NULL; 2650 } 2651 #endif 2652 2653 rc = free_iq_fl(pi, &rxq->iq, &rxq->fl); 2654 if (rc == 0) 2655 bzero(rxq, sizeof(*rxq)); 2656 2657 return (rc); 2658 } 2659 2660 #ifdef TCP_OFFLOAD 2661 static int 2662 alloc_ofld_rxq(struct port_info *pi, struct sge_ofld_rxq *ofld_rxq, 2663 int intr_idx, int idx, struct sysctl_oid *oid) 2664 { 2665 int rc; 2666 struct sysctl_oid_list *children; 2667 char name[16]; 2668 2669 rc = alloc_iq_fl(pi, &ofld_rxq->iq, &ofld_rxq->fl, intr_idx, 2670 pi->rx_chan_map); 2671 if (rc != 0) 2672 return (rc); 2673 2674 children = SYSCTL_CHILDREN(oid); 2675 2676 snprintf(name, sizeof(name), "%d", idx); 2677 oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, name, CTLFLAG_RD, 2678 NULL, "rx queue"); 2679 children = SYSCTL_CHILDREN(oid); 2680 2681 SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "abs_id", 2682 CTLTYPE_INT | CTLFLAG_RD, &ofld_rxq->iq.abs_id, 0, sysctl_uint16, 2683 "I", "absolute id of the queue"); 2684 SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "cntxt_id", 2685 CTLTYPE_INT | CTLFLAG_RD, &ofld_rxq->iq.cntxt_id, 0, sysctl_uint16, 2686 "I", "SGE context id of the queue"); 2687 SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "cidx", 2688 CTLTYPE_INT | CTLFLAG_RD, &ofld_rxq->iq.cidx, 0, sysctl_uint16, "I", 2689 "consumer index"); 2690 2691 add_fl_sysctls(&pi->ctx, oid, &ofld_rxq->fl); 2692 2693 return (rc); 2694 } 2695 2696 static int 2697 free_ofld_rxq(struct port_info *pi, struct sge_ofld_rxq *ofld_rxq) 2698 { 2699 int rc; 2700 2701 rc = free_iq_fl(pi, &ofld_rxq->iq, &ofld_rxq->fl); 2702 if (rc == 0) 2703 bzero(ofld_rxq, sizeof(*ofld_rxq)); 2704 2705 return (rc); 2706 } 2707 #endif 2708 2709 #ifdef DEV_NETMAP 2710 static int 2711 alloc_nm_rxq(struct port_info *pi, struct sge_nm_rxq *nm_rxq, int intr_idx, 2712 int idx, struct sysctl_oid *oid) 2713 { 2714 int rc; 2715 struct sysctl_oid_list *children; 2716 struct sysctl_ctx_list *ctx; 2717 char name[16]; 2718 size_t len; 2719 struct adapter *sc = pi->adapter; 2720 struct netmap_adapter *na = NA(pi->nm_ifp); 2721 2722 MPASS(na != NULL); 2723 2724 len = pi->qsize_rxq * IQ_ESIZE; 2725 rc = alloc_ring(sc, len, &nm_rxq->iq_desc_tag, &nm_rxq->iq_desc_map, 2726 &nm_rxq->iq_ba, (void **)&nm_rxq->iq_desc); 2727 if (rc != 0) 2728 return (rc); 2729 2730 len = na->num_rx_desc * EQ_ESIZE + spg_len; 2731 rc = alloc_ring(sc, len, &nm_rxq->fl_desc_tag, &nm_rxq->fl_desc_map, 2732 &nm_rxq->fl_ba, (void **)&nm_rxq->fl_desc); 2733 if (rc != 0) 2734 return (rc); 2735 2736 nm_rxq->pi = pi; 2737 nm_rxq->nid = idx; 2738 nm_rxq->iq_cidx = 0; 2739 nm_rxq->iq_sidx = pi->qsize_rxq - spg_len / IQ_ESIZE; 2740 nm_rxq->iq_gen = F_RSPD_GEN; 2741 nm_rxq->fl_pidx = nm_rxq->fl_cidx = 0; 2742 nm_rxq->fl_sidx = na->num_rx_desc; 2743 nm_rxq->intr_idx = intr_idx; 2744 2745 ctx = &pi->ctx; 2746 children = SYSCTL_CHILDREN(oid); 2747 2748 snprintf(name, sizeof(name), "%d", idx); 2749 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name, CTLFLAG_RD, NULL, 2750 "rx queue"); 2751 children = SYSCTL_CHILDREN(oid); 2752 2753 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "abs_id", 2754 CTLTYPE_INT | CTLFLAG_RD, &nm_rxq->iq_abs_id, 0, sysctl_uint16, 2755 "I", "absolute id of the queue"); 2756 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cntxt_id", 2757 CTLTYPE_INT | CTLFLAG_RD, &nm_rxq->iq_cntxt_id, 0, sysctl_uint16, 2758 "I", "SGE context id of the queue"); 2759 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cidx", 2760 CTLTYPE_INT | CTLFLAG_RD, &nm_rxq->iq_cidx, 0, sysctl_uint16, "I", 2761 "consumer index"); 2762 2763 children = SYSCTL_CHILDREN(oid); 2764 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "fl", CTLFLAG_RD, NULL, 2765 "freelist"); 2766 children = SYSCTL_CHILDREN(oid); 2767 2768 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cntxt_id", 2769 CTLTYPE_INT | CTLFLAG_RD, &nm_rxq->fl_cntxt_id, 0, sysctl_uint16, 2770 "I", "SGE context id of the freelist"); 2771 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cidx", CTLFLAG_RD, 2772 &nm_rxq->fl_cidx, 0, "consumer index"); 2773 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "pidx", CTLFLAG_RD, 2774 &nm_rxq->fl_pidx, 0, "producer index"); 2775 2776 return (rc); 2777 } 2778 2779 2780 static int 2781 free_nm_rxq(struct port_info *pi, struct sge_nm_rxq *nm_rxq) 2782 { 2783 struct adapter *sc = pi->adapter; 2784 2785 free_ring(sc, nm_rxq->iq_desc_tag, nm_rxq->iq_desc_map, nm_rxq->iq_ba, 2786 nm_rxq->iq_desc); 2787 free_ring(sc, nm_rxq->fl_desc_tag, nm_rxq->fl_desc_map, nm_rxq->fl_ba, 2788 nm_rxq->fl_desc); 2789 2790 return (0); 2791 } 2792 2793 static int 2794 alloc_nm_txq(struct port_info *pi, struct sge_nm_txq *nm_txq, int iqidx, int idx, 2795 struct sysctl_oid *oid) 2796 { 2797 int rc; 2798 size_t len; 2799 struct adapter *sc = pi->adapter; 2800 struct netmap_adapter *na = NA(pi->nm_ifp); 2801 char name[16]; 2802 struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); 2803 2804 len = na->num_tx_desc * EQ_ESIZE + spg_len; 2805 rc = alloc_ring(sc, len, &nm_txq->desc_tag, &nm_txq->desc_map, 2806 &nm_txq->ba, (void **)&nm_txq->desc); 2807 if (rc) 2808 return (rc); 2809 2810 nm_txq->pidx = nm_txq->cidx = 0; 2811 nm_txq->sidx = na->num_tx_desc; 2812 nm_txq->nid = idx; 2813 nm_txq->iqidx = iqidx; 2814 nm_txq->cpl_ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT) | 2815 V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(sc->pf)); 2816 2817 snprintf(name, sizeof(name), "%d", idx); 2818 oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, name, CTLFLAG_RD, 2819 NULL, "netmap tx queue"); 2820 children = SYSCTL_CHILDREN(oid); 2821 2822 SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD, 2823 &nm_txq->cntxt_id, 0, "SGE context id of the queue"); 2824 SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "cidx", 2825 CTLTYPE_INT | CTLFLAG_RD, &nm_txq->cidx, 0, sysctl_uint16, "I", 2826 "consumer index"); 2827 SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "pidx", 2828 CTLTYPE_INT | CTLFLAG_RD, &nm_txq->pidx, 0, sysctl_uint16, "I", 2829 "producer index"); 2830 2831 return (rc); 2832 } 2833 2834 static int 2835 free_nm_txq(struct port_info *pi, struct sge_nm_txq *nm_txq) 2836 { 2837 struct adapter *sc = pi->adapter; 2838 2839 free_ring(sc, nm_txq->desc_tag, nm_txq->desc_map, nm_txq->ba, 2840 nm_txq->desc); 2841 2842 return (0); 2843 } 2844 #endif 2845 2846 static int 2847 ctrl_eq_alloc(struct adapter *sc, struct sge_eq *eq) 2848 { 2849 int rc, cntxt_id; 2850 struct fw_eq_ctrl_cmd c; 2851 2852 bzero(&c, sizeof(c)); 2853 2854 c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | F_FW_CMD_REQUEST | 2855 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_CTRL_CMD_PFN(sc->pf) | 2856 V_FW_EQ_CTRL_CMD_VFN(0)); 2857 c.alloc_to_len16 = htobe32(F_FW_EQ_CTRL_CMD_ALLOC | 2858 F_FW_EQ_CTRL_CMD_EQSTART | FW_LEN16(c)); 2859 c.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_CMPLIQID(eq->iqid)); /* XXX */ 2860 c.physeqid_pkd = htobe32(0); 2861 c.fetchszm_to_iqid = 2862 htobe32(V_FW_EQ_CTRL_CMD_HOSTFCMODE(X_HOSTFCMODE_STATUS_PAGE) | 2863 V_FW_EQ_CTRL_CMD_PCIECHN(eq->tx_chan) | 2864 F_FW_EQ_CTRL_CMD_FETCHRO | V_FW_EQ_CTRL_CMD_IQID(eq->iqid)); 2865 c.dcaen_to_eqsize = 2866 htobe32(V_FW_EQ_CTRL_CMD_FBMIN(X_FETCHBURSTMIN_64B) | 2867 V_FW_EQ_CTRL_CMD_FBMAX(X_FETCHBURSTMAX_512B) | 2868 V_FW_EQ_CTRL_CMD_CIDXFTHRESH(X_CIDXFLUSHTHRESH_32) | 2869 V_FW_EQ_CTRL_CMD_EQSIZE(eq->qsize)); 2870 c.eqaddr = htobe64(eq->ba); 2871 2872 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); 2873 if (rc != 0) { 2874 device_printf(sc->dev, 2875 "failed to create control queue %d: %d\n", eq->tx_chan, rc); 2876 return (rc); 2877 } 2878 eq->flags |= EQ_ALLOCATED; 2879 2880 eq->cntxt_id = G_FW_EQ_CTRL_CMD_EQID(be32toh(c.cmpliqid_eqid)); 2881 cntxt_id = eq->cntxt_id - sc->sge.eq_start; 2882 if (cntxt_id >= sc->sge.neq) 2883 panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__, 2884 cntxt_id, sc->sge.neq - 1); 2885 sc->sge.eqmap[cntxt_id] = eq; 2886 2887 return (rc); 2888 } 2889 2890 static int 2891 eth_eq_alloc(struct adapter *sc, struct port_info *pi, struct sge_eq *eq) 2892 { 2893 int rc, cntxt_id; 2894 struct fw_eq_eth_cmd c; 2895 2896 bzero(&c, sizeof(c)); 2897 2898 c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST | 2899 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(sc->pf) | 2900 V_FW_EQ_ETH_CMD_VFN(0)); 2901 c.alloc_to_len16 = htobe32(F_FW_EQ_ETH_CMD_ALLOC | 2902 F_FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c)); 2903 c.autoequiqe_to_viid = htobe32(V_FW_EQ_ETH_CMD_VIID(pi->viid)); 2904 c.fetchszm_to_iqid = 2905 htobe32(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_STATUS_PAGE) | 2906 V_FW_EQ_ETH_CMD_PCIECHN(eq->tx_chan) | F_FW_EQ_ETH_CMD_FETCHRO | 2907 V_FW_EQ_ETH_CMD_IQID(eq->iqid)); 2908 c.dcaen_to_eqsize = htobe32(V_FW_EQ_ETH_CMD_FBMIN(X_FETCHBURSTMIN_64B) | 2909 V_FW_EQ_ETH_CMD_FBMAX(X_FETCHBURSTMAX_512B) | 2910 V_FW_EQ_ETH_CMD_CIDXFTHRESH(X_CIDXFLUSHTHRESH_32) | 2911 V_FW_EQ_ETH_CMD_EQSIZE(eq->qsize)); 2912 c.eqaddr = htobe64(eq->ba); 2913 2914 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); 2915 if (rc != 0) { 2916 device_printf(pi->dev, 2917 "failed to create Ethernet egress queue: %d\n", rc); 2918 return (rc); 2919 } 2920 eq->flags |= EQ_ALLOCATED; 2921 2922 eq->cntxt_id = G_FW_EQ_ETH_CMD_EQID(be32toh(c.eqid_pkd)); 2923 cntxt_id = eq->cntxt_id - sc->sge.eq_start; 2924 if (cntxt_id >= sc->sge.neq) 2925 panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__, 2926 cntxt_id, sc->sge.neq - 1); 2927 sc->sge.eqmap[cntxt_id] = eq; 2928 2929 return (rc); 2930 } 2931 2932 #ifdef TCP_OFFLOAD 2933 static int 2934 ofld_eq_alloc(struct adapter *sc, struct port_info *pi, struct sge_eq *eq) 2935 { 2936 int rc, cntxt_id; 2937 struct fw_eq_ofld_cmd c; 2938 2939 bzero(&c, sizeof(c)); 2940 2941 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_OFLD_CMD) | F_FW_CMD_REQUEST | 2942 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_OFLD_CMD_PFN(sc->pf) | 2943 V_FW_EQ_OFLD_CMD_VFN(0)); 2944 c.alloc_to_len16 = htonl(F_FW_EQ_OFLD_CMD_ALLOC | 2945 F_FW_EQ_OFLD_CMD_EQSTART | FW_LEN16(c)); 2946 c.fetchszm_to_iqid = 2947 htonl(V_FW_EQ_OFLD_CMD_HOSTFCMODE(X_HOSTFCMODE_STATUS_PAGE) | 2948 V_FW_EQ_OFLD_CMD_PCIECHN(eq->tx_chan) | 2949 F_FW_EQ_OFLD_CMD_FETCHRO | V_FW_EQ_OFLD_CMD_IQID(eq->iqid)); 2950 c.dcaen_to_eqsize = 2951 htobe32(V_FW_EQ_OFLD_CMD_FBMIN(X_FETCHBURSTMIN_64B) | 2952 V_FW_EQ_OFLD_CMD_FBMAX(X_FETCHBURSTMAX_512B) | 2953 V_FW_EQ_OFLD_CMD_CIDXFTHRESH(X_CIDXFLUSHTHRESH_32) | 2954 V_FW_EQ_OFLD_CMD_EQSIZE(eq->qsize)); 2955 c.eqaddr = htobe64(eq->ba); 2956 2957 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); 2958 if (rc != 0) { 2959 device_printf(pi->dev, 2960 "failed to create egress queue for TCP offload: %d\n", rc); 2961 return (rc); 2962 } 2963 eq->flags |= EQ_ALLOCATED; 2964 2965 eq->cntxt_id = G_FW_EQ_OFLD_CMD_EQID(be32toh(c.eqid_pkd)); 2966 cntxt_id = eq->cntxt_id - sc->sge.eq_start; 2967 if (cntxt_id >= sc->sge.neq) 2968 panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__, 2969 cntxt_id, sc->sge.neq - 1); 2970 sc->sge.eqmap[cntxt_id] = eq; 2971 2972 return (rc); 2973 } 2974 #endif 2975 2976 static int 2977 alloc_eq(struct adapter *sc, struct port_info *pi, struct sge_eq *eq) 2978 { 2979 int rc; 2980 size_t len; 2981 2982 mtx_init(&eq->eq_lock, eq->lockname, NULL, MTX_DEF); 2983 2984 len = eq->qsize * EQ_ESIZE; 2985 rc = alloc_ring(sc, len, &eq->desc_tag, &eq->desc_map, 2986 &eq->ba, (void **)&eq->desc); 2987 if (rc) 2988 return (rc); 2989 2990 eq->cap = eq->qsize - spg_len / EQ_ESIZE; 2991 eq->spg = (void *)&eq->desc[eq->cap]; 2992 eq->avail = eq->cap - 1; /* one less to avoid cidx = pidx */ 2993 eq->pidx = eq->cidx = 0; 2994 eq->doorbells = sc->doorbells; 2995 2996 switch (eq->flags & EQ_TYPEMASK) { 2997 case EQ_CTRL: 2998 rc = ctrl_eq_alloc(sc, eq); 2999 break; 3000 3001 case EQ_ETH: 3002 rc = eth_eq_alloc(sc, pi, eq); 3003 break; 3004 3005 #ifdef TCP_OFFLOAD 3006 case EQ_OFLD: 3007 rc = ofld_eq_alloc(sc, pi, eq); 3008 break; 3009 #endif 3010 3011 default: 3012 panic("%s: invalid eq type %d.", __func__, 3013 eq->flags & EQ_TYPEMASK); 3014 } 3015 if (rc != 0) { 3016 device_printf(sc->dev, 3017 "failed to allocate egress queue(%d): %d\n", 3018 eq->flags & EQ_TYPEMASK, rc); 3019 } 3020 3021 eq->tx_callout.c_cpu = eq->cntxt_id % mp_ncpus; 3022 3023 if (isset(&eq->doorbells, DOORBELL_UDB) || 3024 isset(&eq->doorbells, DOORBELL_UDBWC) || 3025 isset(&eq->doorbells, DOORBELL_WCWR)) { 3026 uint32_t s_qpp = sc->sge.eq_s_qpp; 3027 uint32_t mask = (1 << s_qpp) - 1; 3028 volatile uint8_t *udb; 3029 3030 udb = sc->udbs_base + UDBS_DB_OFFSET; 3031 udb += (eq->cntxt_id >> s_qpp) << PAGE_SHIFT; /* pg offset */ 3032 eq->udb_qid = eq->cntxt_id & mask; /* id in page */ 3033 if (eq->udb_qid >= PAGE_SIZE / UDBS_SEG_SIZE) 3034 clrbit(&eq->doorbells, DOORBELL_WCWR); 3035 else { 3036 udb += eq->udb_qid << UDBS_SEG_SHIFT; /* seg offset */ 3037 eq->udb_qid = 0; 3038 } 3039 eq->udb = (volatile void *)udb; 3040 } 3041 3042 return (rc); 3043 } 3044 3045 static int 3046 free_eq(struct adapter *sc, struct sge_eq *eq) 3047 { 3048 int rc; 3049 3050 if (eq->flags & EQ_ALLOCATED) { 3051 switch (eq->flags & EQ_TYPEMASK) { 3052 case EQ_CTRL: 3053 rc = -t4_ctrl_eq_free(sc, sc->mbox, sc->pf, 0, 3054 eq->cntxt_id); 3055 break; 3056 3057 case EQ_ETH: 3058 rc = -t4_eth_eq_free(sc, sc->mbox, sc->pf, 0, 3059 eq->cntxt_id); 3060 break; 3061 3062 #ifdef TCP_OFFLOAD 3063 case EQ_OFLD: 3064 rc = -t4_ofld_eq_free(sc, sc->mbox, sc->pf, 0, 3065 eq->cntxt_id); 3066 break; 3067 #endif 3068 3069 default: 3070 panic("%s: invalid eq type %d.", __func__, 3071 eq->flags & EQ_TYPEMASK); 3072 } 3073 if (rc != 0) { 3074 device_printf(sc->dev, 3075 "failed to free egress queue (%d): %d\n", 3076 eq->flags & EQ_TYPEMASK, rc); 3077 return (rc); 3078 } 3079 eq->flags &= ~EQ_ALLOCATED; 3080 } 3081 3082 free_ring(sc, eq->desc_tag, eq->desc_map, eq->ba, eq->desc); 3083 3084 if (mtx_initialized(&eq->eq_lock)) 3085 mtx_destroy(&eq->eq_lock); 3086 3087 bzero(eq, sizeof(*eq)); 3088 return (0); 3089 } 3090 3091 static int 3092 alloc_wrq(struct adapter *sc, struct port_info *pi, struct sge_wrq *wrq, 3093 struct sysctl_oid *oid) 3094 { 3095 int rc; 3096 struct sysctl_ctx_list *ctx = pi ? &pi->ctx : &sc->ctx; 3097 struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); 3098 3099 rc = alloc_eq(sc, pi, &wrq->eq); 3100 if (rc) 3101 return (rc); 3102 3103 wrq->adapter = sc; 3104 STAILQ_INIT(&wrq->wr_list); 3105 3106 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD, 3107 &wrq->eq.cntxt_id, 0, "SGE context id of the queue"); 3108 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cidx", 3109 CTLTYPE_INT | CTLFLAG_RD, &wrq->eq.cidx, 0, sysctl_uint16, "I", 3110 "consumer index"); 3111 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pidx", 3112 CTLTYPE_INT | CTLFLAG_RD, &wrq->eq.pidx, 0, sysctl_uint16, "I", 3113 "producer index"); 3114 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "tx_wrs", CTLFLAG_RD, 3115 &wrq->tx_wrs, "# of work requests"); 3116 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "no_desc", CTLFLAG_RD, 3117 &wrq->no_desc, 0, 3118 "# of times queue ran out of hardware descriptors"); 3119 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "unstalled", CTLFLAG_RD, 3120 &wrq->eq.unstalled, 0, "# of times queue recovered after stall"); 3121 3122 return (rc); 3123 } 3124 3125 static int 3126 free_wrq(struct adapter *sc, struct sge_wrq *wrq) 3127 { 3128 int rc; 3129 3130 rc = free_eq(sc, &wrq->eq); 3131 if (rc) 3132 return (rc); 3133 3134 bzero(wrq, sizeof(*wrq)); 3135 return (0); 3136 } 3137 3138 static int 3139 alloc_txq(struct port_info *pi, struct sge_txq *txq, int idx, 3140 struct sysctl_oid *oid) 3141 { 3142 int rc; 3143 struct adapter *sc = pi->adapter; 3144 struct sge_eq *eq = &txq->eq; 3145 char name[16]; 3146 struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); 3147 3148 rc = alloc_eq(sc, pi, eq); 3149 if (rc) 3150 return (rc); 3151 3152 txq->ifp = pi->ifp; 3153 3154 txq->sdesc = malloc(eq->cap * sizeof(struct tx_sdesc), M_CXGBE, 3155 M_ZERO | M_WAITOK); 3156 txq->br = buf_ring_alloc(eq->qsize, M_CXGBE, M_WAITOK, &eq->eq_lock); 3157 3158 rc = bus_dma_tag_create(sc->dmat, 1, 0, BUS_SPACE_MAXADDR, 3159 BUS_SPACE_MAXADDR, NULL, NULL, 64 * 1024, TX_SGL_SEGS, 3160 BUS_SPACE_MAXSIZE, BUS_DMA_ALLOCNOW, NULL, NULL, &txq->tx_tag); 3161 if (rc != 0) { 3162 device_printf(sc->dev, 3163 "failed to create tx DMA tag: %d\n", rc); 3164 return (rc); 3165 } 3166 3167 /* 3168 * We can stuff ~10 frames in an 8-descriptor txpkts WR (8 is the SGE 3169 * limit for any WR). txq->no_dmamap events shouldn't occur if maps is 3170 * sized for the worst case. 3171 */ 3172 rc = t4_alloc_tx_maps(&txq->txmaps, txq->tx_tag, eq->qsize * 10 / 8, 3173 M_WAITOK); 3174 if (rc != 0) { 3175 device_printf(sc->dev, "failed to setup tx DMA maps: %d\n", rc); 3176 return (rc); 3177 } 3178 3179 snprintf(name, sizeof(name), "%d", idx); 3180 oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, name, CTLFLAG_RD, 3181 NULL, "tx queue"); 3182 children = SYSCTL_CHILDREN(oid); 3183 3184 SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD, 3185 &eq->cntxt_id, 0, "SGE context id of the queue"); 3186 SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "cidx", 3187 CTLTYPE_INT | CTLFLAG_RD, &eq->cidx, 0, sysctl_uint16, "I", 3188 "consumer index"); 3189 SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "pidx", 3190 CTLTYPE_INT | CTLFLAG_RD, &eq->pidx, 0, sysctl_uint16, "I", 3191 "producer index"); 3192 3193 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "txcsum", CTLFLAG_RD, 3194 &txq->txcsum, "# of times hardware assisted with checksum"); 3195 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "vlan_insertion", 3196 CTLFLAG_RD, &txq->vlan_insertion, 3197 "# of times hardware inserted 802.1Q tag"); 3198 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "tso_wrs", CTLFLAG_RD, 3199 &txq->tso_wrs, "# of TSO work requests"); 3200 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "imm_wrs", CTLFLAG_RD, 3201 &txq->imm_wrs, "# of work requests with immediate data"); 3202 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "sgl_wrs", CTLFLAG_RD, 3203 &txq->sgl_wrs, "# of work requests with direct SGL"); 3204 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "txpkt_wrs", CTLFLAG_RD, 3205 &txq->txpkt_wrs, "# of txpkt work requests (one pkt/WR)"); 3206 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "txpkts_wrs", CTLFLAG_RD, 3207 &txq->txpkts_wrs, "# of txpkts work requests (multiple pkts/WR)"); 3208 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "txpkts_pkts", CTLFLAG_RD, 3209 &txq->txpkts_pkts, "# of frames tx'd using txpkts work requests"); 3210 3211 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "br_drops", CTLFLAG_RD, 3212 &txq->br->br_drops, "# of drops in the buf_ring for this queue"); 3213 SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "no_dmamap", CTLFLAG_RD, 3214 &txq->no_dmamap, 0, "# of times txq ran out of DMA maps"); 3215 SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "no_desc", CTLFLAG_RD, 3216 &txq->no_desc, 0, "# of times txq ran out of hardware descriptors"); 3217 SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "egr_update", CTLFLAG_RD, 3218 &eq->egr_update, 0, "egress update notifications from the SGE"); 3219 SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "unstalled", CTLFLAG_RD, 3220 &eq->unstalled, 0, "# of times txq recovered after stall"); 3221 3222 return (rc); 3223 } 3224 3225 static int 3226 free_txq(struct port_info *pi, struct sge_txq *txq) 3227 { 3228 int rc; 3229 struct adapter *sc = pi->adapter; 3230 struct sge_eq *eq = &txq->eq; 3231 3232 rc = free_eq(sc, eq); 3233 if (rc) 3234 return (rc); 3235 3236 free(txq->sdesc, M_CXGBE); 3237 3238 if (txq->txmaps.maps) 3239 t4_free_tx_maps(&txq->txmaps, txq->tx_tag); 3240 3241 buf_ring_free(txq->br, M_CXGBE); 3242 3243 if (txq->tx_tag) 3244 bus_dma_tag_destroy(txq->tx_tag); 3245 3246 bzero(txq, sizeof(*txq)); 3247 return (0); 3248 } 3249 3250 static void 3251 oneseg_dma_callback(void *arg, bus_dma_segment_t *segs, int nseg, int error) 3252 { 3253 bus_addr_t *ba = arg; 3254 3255 KASSERT(nseg == 1, 3256 ("%s meant for single segment mappings only.", __func__)); 3257 3258 *ba = error ? 0 : segs->ds_addr; 3259 } 3260 3261 static inline void 3262 ring_fl_db(struct adapter *sc, struct sge_fl *fl) 3263 { 3264 uint32_t n, v; 3265 3266 n = IDXDIFF(fl->pidx / 8, fl->dbidx, fl->sidx); 3267 MPASS(n > 0); 3268 3269 wmb(); 3270 v = fl->dbval | V_PIDX(n); 3271 if (fl->udb) 3272 *fl->udb = htole32(v); 3273 else 3274 t4_write_reg(sc, MYPF_REG(A_SGE_PF_KDOORBELL), v); 3275 IDXINCR(fl->dbidx, n, fl->sidx); 3276 } 3277 3278 /* 3279 * Fills up the freelist by allocating upto 'n' buffers. Buffers that are 3280 * recycled do not count towards this allocation budget. 3281 * 3282 * Returns non-zero to indicate that this freelist should be added to the list 3283 * of starving freelists. 3284 */ 3285 static int 3286 refill_fl(struct adapter *sc, struct sge_fl *fl, int n) 3287 { 3288 __be64 *d; 3289 struct fl_sdesc *sd; 3290 uintptr_t pa; 3291 caddr_t cl; 3292 struct cluster_layout *cll; 3293 struct sw_zone_info *swz; 3294 struct cluster_metadata *clm; 3295 uint16_t max_pidx; 3296 uint16_t hw_cidx = fl->hw_cidx; /* stable snapshot */ 3297 3298 FL_LOCK_ASSERT_OWNED(fl); 3299 3300 /* 3301 * We always stop at the begining of the hardware descriptor that's just 3302 * before the one with the hw cidx. This is to avoid hw pidx = hw cidx, 3303 * which would mean an empty freelist to the chip. 3304 */ 3305 max_pidx = __predict_false(hw_cidx == 0) ? fl->sidx - 1 : hw_cidx - 1; 3306 if (fl->pidx == max_pidx * 8) 3307 return (0); 3308 3309 d = &fl->desc[fl->pidx]; 3310 sd = &fl->sdesc[fl->pidx]; 3311 cll = &fl->cll_def; /* default layout */ 3312 swz = &sc->sge.sw_zone_info[cll->zidx]; 3313 3314 while (n > 0) { 3315 3316 if (sd->cl != NULL) { 3317 3318 if (sd->nmbuf == 0) { 3319 /* 3320 * Fast recycle without involving any atomics on 3321 * the cluster's metadata (if the cluster has 3322 * metadata). This happens when all frames 3323 * received in the cluster were small enough to 3324 * fit within a single mbuf each. 3325 */ 3326 fl->cl_fast_recycled++; 3327 #ifdef INVARIANTS 3328 clm = cl_metadata(sc, fl, &sd->cll, sd->cl); 3329 if (clm != NULL) 3330 MPASS(clm->refcount == 1); 3331 #endif 3332 goto recycled_fast; 3333 } 3334 3335 /* 3336 * Cluster is guaranteed to have metadata. Clusters 3337 * without metadata always take the fast recycle path 3338 * when they're recycled. 3339 */ 3340 clm = cl_metadata(sc, fl, &sd->cll, sd->cl); 3341 MPASS(clm != NULL); 3342 3343 if (atomic_fetchadd_int(&clm->refcount, -1) == 1) { 3344 fl->cl_recycled++; 3345 counter_u64_add(extfree_rels, 1); 3346 goto recycled; 3347 } 3348 sd->cl = NULL; /* gave up my reference */ 3349 } 3350 MPASS(sd->cl == NULL); 3351 alloc: 3352 cl = uma_zalloc(swz->zone, M_NOWAIT); 3353 if (__predict_false(cl == NULL)) { 3354 if (cll == &fl->cll_alt || fl->cll_alt.zidx == -1 || 3355 fl->cll_def.zidx == fl->cll_alt.zidx) 3356 break; 3357 3358 /* fall back to the safe zone */ 3359 cll = &fl->cll_alt; 3360 swz = &sc->sge.sw_zone_info[cll->zidx]; 3361 goto alloc; 3362 } 3363 fl->cl_allocated++; 3364 n--; 3365 3366 pa = pmap_kextract((vm_offset_t)cl); 3367 pa += cll->region1; 3368 sd->cl = cl; 3369 sd->cll = *cll; 3370 *d = htobe64(pa | cll->hwidx); 3371 clm = cl_metadata(sc, fl, cll, cl); 3372 if (clm != NULL) { 3373 recycled: 3374 #ifdef INVARIANTS 3375 clm->sd = sd; 3376 #endif 3377 clm->refcount = 1; 3378 } 3379 sd->nmbuf = 0; 3380 recycled_fast: 3381 d++; 3382 sd++; 3383 if (__predict_false(++fl->pidx % 8 == 0)) { 3384 uint16_t pidx = fl->pidx / 8; 3385 3386 if (__predict_false(pidx == fl->sidx)) { 3387 fl->pidx = 0; 3388 pidx = 0; 3389 sd = fl->sdesc; 3390 d = fl->desc; 3391 } 3392 if (pidx == max_pidx) 3393 break; 3394 3395 if (IDXDIFF(pidx, fl->dbidx, fl->sidx) >= 4) 3396 ring_fl_db(sc, fl); 3397 } 3398 } 3399 3400 if (fl->pidx / 8 != fl->dbidx) 3401 ring_fl_db(sc, fl); 3402 3403 return (FL_RUNNING_LOW(fl) && !(fl->flags & FL_STARVING)); 3404 } 3405 3406 /* 3407 * Attempt to refill all starving freelists. 3408 */ 3409 static void 3410 refill_sfl(void *arg) 3411 { 3412 struct adapter *sc = arg; 3413 struct sge_fl *fl, *fl_temp; 3414 3415 mtx_lock(&sc->sfl_lock); 3416 TAILQ_FOREACH_SAFE(fl, &sc->sfl, link, fl_temp) { 3417 FL_LOCK(fl); 3418 refill_fl(sc, fl, 64); 3419 if (FL_NOT_RUNNING_LOW(fl) || fl->flags & FL_DOOMED) { 3420 TAILQ_REMOVE(&sc->sfl, fl, link); 3421 fl->flags &= ~FL_STARVING; 3422 } 3423 FL_UNLOCK(fl); 3424 } 3425 3426 if (!TAILQ_EMPTY(&sc->sfl)) 3427 callout_schedule(&sc->sfl_callout, hz / 5); 3428 mtx_unlock(&sc->sfl_lock); 3429 } 3430 3431 static int 3432 alloc_fl_sdesc(struct sge_fl *fl) 3433 { 3434 3435 fl->sdesc = malloc(fl->sidx * 8 * sizeof(struct fl_sdesc), M_CXGBE, 3436 M_ZERO | M_WAITOK); 3437 3438 return (0); 3439 } 3440 3441 static void 3442 free_fl_sdesc(struct adapter *sc, struct sge_fl *fl) 3443 { 3444 struct fl_sdesc *sd; 3445 struct cluster_metadata *clm; 3446 struct cluster_layout *cll; 3447 int i; 3448 3449 sd = fl->sdesc; 3450 for (i = 0; i < fl->sidx * 8; i++, sd++) { 3451 if (sd->cl == NULL) 3452 continue; 3453 3454 cll = &sd->cll; 3455 clm = cl_metadata(sc, fl, cll, sd->cl); 3456 if (sd->nmbuf == 0) 3457 uma_zfree(sc->sge.sw_zone_info[cll->zidx].zone, sd->cl); 3458 else if (clm && atomic_fetchadd_int(&clm->refcount, -1) == 1) { 3459 uma_zfree(sc->sge.sw_zone_info[cll->zidx].zone, sd->cl); 3460 counter_u64_add(extfree_rels, 1); 3461 } 3462 sd->cl = NULL; 3463 } 3464 3465 free(fl->sdesc, M_CXGBE); 3466 fl->sdesc = NULL; 3467 } 3468 3469 int 3470 t4_alloc_tx_maps(struct tx_maps *txmaps, bus_dma_tag_t tx_tag, int count, 3471 int flags) 3472 { 3473 struct tx_map *txm; 3474 int i, rc; 3475 3476 txmaps->map_total = txmaps->map_avail = count; 3477 txmaps->map_cidx = txmaps->map_pidx = 0; 3478 3479 txmaps->maps = malloc(count * sizeof(struct tx_map), M_CXGBE, 3480 M_ZERO | flags); 3481 3482 txm = txmaps->maps; 3483 for (i = 0; i < count; i++, txm++) { 3484 rc = bus_dmamap_create(tx_tag, 0, &txm->map); 3485 if (rc != 0) 3486 goto failed; 3487 } 3488 3489 return (0); 3490 failed: 3491 while (--i >= 0) { 3492 txm--; 3493 bus_dmamap_destroy(tx_tag, txm->map); 3494 } 3495 KASSERT(txm == txmaps->maps, ("%s: EDOOFUS", __func__)); 3496 3497 free(txmaps->maps, M_CXGBE); 3498 txmaps->maps = NULL; 3499 3500 return (rc); 3501 } 3502 3503 void 3504 t4_free_tx_maps(struct tx_maps *txmaps, bus_dma_tag_t tx_tag) 3505 { 3506 struct tx_map *txm; 3507 int i; 3508 3509 txm = txmaps->maps; 3510 for (i = 0; i < txmaps->map_total; i++, txm++) { 3511 3512 if (txm->m) { 3513 bus_dmamap_unload(tx_tag, txm->map); 3514 m_freem(txm->m); 3515 txm->m = NULL; 3516 } 3517 3518 bus_dmamap_destroy(tx_tag, txm->map); 3519 } 3520 3521 free(txmaps->maps, M_CXGBE); 3522 txmaps->maps = NULL; 3523 } 3524 3525 /* 3526 * We'll do immediate data tx for non-TSO, but only when not coalescing. We're 3527 * willing to use upto 2 hardware descriptors which means a maximum of 96 bytes 3528 * of immediate data. 3529 */ 3530 #define IMM_LEN ( \ 3531 2 * EQ_ESIZE \ 3532 - sizeof(struct fw_eth_tx_pkt_wr) \ 3533 - sizeof(struct cpl_tx_pkt_core)) 3534 3535 /* 3536 * Returns non-zero on failure, no need to cleanup anything in that case. 3537 * 3538 * Note 1: We always try to defrag the mbuf if required and return EFBIG only 3539 * if the resulting chain still won't fit in a tx descriptor. 3540 * 3541 * Note 2: We'll pullup the mbuf chain if TSO is requested and the first mbuf 3542 * does not have the TCP header in it. 3543 */ 3544 static int 3545 get_pkt_sgl(struct sge_txq *txq, struct mbuf **fp, struct sgl *sgl, 3546 int sgl_only) 3547 { 3548 struct mbuf *m = *fp; 3549 struct tx_maps *txmaps; 3550 struct tx_map *txm; 3551 int rc, defragged = 0, n; 3552 3553 TXQ_LOCK_ASSERT_OWNED(txq); 3554 3555 if (m->m_pkthdr.tso_segsz) 3556 sgl_only = 1; /* Do not allow immediate data with LSO */ 3557 3558 start: sgl->nsegs = 0; 3559 3560 if (m->m_pkthdr.len <= IMM_LEN && !sgl_only) 3561 return (0); /* nsegs = 0 tells caller to use imm. tx */ 3562 3563 txmaps = &txq->txmaps; 3564 if (txmaps->map_avail == 0) { 3565 txq->no_dmamap++; 3566 return (ENOMEM); 3567 } 3568 txm = &txmaps->maps[txmaps->map_pidx]; 3569 3570 if (m->m_pkthdr.tso_segsz && m->m_len < 50) { 3571 *fp = m_pullup(m, 50); 3572 m = *fp; 3573 if (m == NULL) 3574 return (ENOBUFS); 3575 } 3576 3577 rc = bus_dmamap_load_mbuf_sg(txq->tx_tag, txm->map, m, sgl->seg, 3578 &sgl->nsegs, BUS_DMA_NOWAIT); 3579 if (rc == EFBIG && defragged == 0) { 3580 m = m_defrag(m, M_NOWAIT); 3581 if (m == NULL) 3582 return (EFBIG); 3583 3584 defragged = 1; 3585 *fp = m; 3586 goto start; 3587 } 3588 if (rc != 0) 3589 return (rc); 3590 3591 txm->m = m; 3592 txmaps->map_avail--; 3593 if (++txmaps->map_pidx == txmaps->map_total) 3594 txmaps->map_pidx = 0; 3595 3596 KASSERT(sgl->nsegs > 0 && sgl->nsegs <= TX_SGL_SEGS, 3597 ("%s: bad DMA mapping (%d segments)", __func__, sgl->nsegs)); 3598 3599 /* 3600 * Store the # of flits required to hold this frame's SGL in nflits. An 3601 * SGL has a (ULPTX header + len0, addr0) tuple optionally followed by 3602 * multiple (len0 + len1, addr0, addr1) tuples. If addr1 is not used 3603 * then len1 must be set to 0. 3604 */ 3605 n = sgl->nsegs - 1; 3606 sgl->nflits = (3 * n) / 2 + (n & 1) + 2; 3607 3608 return (0); 3609 } 3610 3611 3612 /* 3613 * Releases all the txq resources used up in the specified sgl. 3614 */ 3615 static int 3616 free_pkt_sgl(struct sge_txq *txq, struct sgl *sgl) 3617 { 3618 struct tx_maps *txmaps; 3619 struct tx_map *txm; 3620 3621 TXQ_LOCK_ASSERT_OWNED(txq); 3622 3623 if (sgl->nsegs == 0) 3624 return (0); /* didn't use any map */ 3625 3626 txmaps = &txq->txmaps; 3627 3628 /* 1 pkt uses exactly 1 map, back it out */ 3629 3630 txmaps->map_avail++; 3631 if (txmaps->map_pidx > 0) 3632 txmaps->map_pidx--; 3633 else 3634 txmaps->map_pidx = txmaps->map_total - 1; 3635 3636 txm = &txmaps->maps[txmaps->map_pidx]; 3637 bus_dmamap_unload(txq->tx_tag, txm->map); 3638 txm->m = NULL; 3639 3640 return (0); 3641 } 3642 3643 static int 3644 write_txpkt_wr(struct port_info *pi, struct sge_txq *txq, struct mbuf *m, 3645 struct sgl *sgl) 3646 { 3647 struct sge_eq *eq = &txq->eq; 3648 struct fw_eth_tx_pkt_wr *wr; 3649 struct cpl_tx_pkt_core *cpl; 3650 uint32_t ctrl; /* used in many unrelated places */ 3651 uint64_t ctrl1; 3652 int nflits, ndesc, pktlen; 3653 struct tx_sdesc *txsd; 3654 caddr_t dst; 3655 3656 TXQ_LOCK_ASSERT_OWNED(txq); 3657 3658 pktlen = m->m_pkthdr.len; 3659 3660 /* 3661 * Do we have enough flits to send this frame out? 3662 */ 3663 ctrl = sizeof(struct cpl_tx_pkt_core); 3664 if (m->m_pkthdr.tso_segsz) { 3665 nflits = TXPKT_LSO_WR_HDR; 3666 ctrl += sizeof(struct cpl_tx_pkt_lso_core); 3667 } else 3668 nflits = TXPKT_WR_HDR; 3669 if (sgl->nsegs > 0) 3670 nflits += sgl->nflits; 3671 else { 3672 nflits += howmany(pktlen, 8); 3673 ctrl += pktlen; 3674 } 3675 ndesc = howmany(nflits, 8); 3676 if (ndesc > eq->avail) 3677 return (ENOMEM); 3678 3679 /* Firmware work request header */ 3680 wr = (void *)&eq->desc[eq->pidx]; 3681 wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_PKT_WR) | 3682 V_FW_ETH_TX_PKT_WR_IMMDLEN(ctrl)); 3683 ctrl = V_FW_WR_LEN16(howmany(nflits, 2)); 3684 if (eq->avail == ndesc) { 3685 if (!(eq->flags & EQ_CRFLUSHED)) { 3686 ctrl |= F_FW_WR_EQUEQ | F_FW_WR_EQUIQ; 3687 eq->flags |= EQ_CRFLUSHED; 3688 } 3689 eq->flags |= EQ_STALLED; 3690 } 3691 3692 wr->equiq_to_len16 = htobe32(ctrl); 3693 wr->r3 = 0; 3694 3695 if (m->m_pkthdr.tso_segsz) { 3696 struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1); 3697 struct ether_header *eh; 3698 void *l3hdr; 3699 #if defined(INET) || defined(INET6) 3700 struct tcphdr *tcp; 3701 #endif 3702 uint16_t eh_type; 3703 3704 ctrl = V_LSO_OPCODE(CPL_TX_PKT_LSO) | F_LSO_FIRST_SLICE | 3705 F_LSO_LAST_SLICE; 3706 3707 eh = mtod(m, struct ether_header *); 3708 eh_type = ntohs(eh->ether_type); 3709 if (eh_type == ETHERTYPE_VLAN) { 3710 struct ether_vlan_header *evh = (void *)eh; 3711 3712 ctrl |= V_LSO_ETHHDR_LEN(1); 3713 l3hdr = evh + 1; 3714 eh_type = ntohs(evh->evl_proto); 3715 } else 3716 l3hdr = eh + 1; 3717 3718 switch (eh_type) { 3719 #ifdef INET6 3720 case ETHERTYPE_IPV6: 3721 { 3722 struct ip6_hdr *ip6 = l3hdr; 3723 3724 /* 3725 * XXX-BZ For now we do not pretend to support 3726 * IPv6 extension headers. 3727 */ 3728 KASSERT(ip6->ip6_nxt == IPPROTO_TCP, ("%s: CSUM_TSO " 3729 "with ip6_nxt != TCP: %u", __func__, ip6->ip6_nxt)); 3730 tcp = (struct tcphdr *)(ip6 + 1); 3731 ctrl |= F_LSO_IPV6; 3732 ctrl |= V_LSO_IPHDR_LEN(sizeof(*ip6) >> 2) | 3733 V_LSO_TCPHDR_LEN(tcp->th_off); 3734 break; 3735 } 3736 #endif 3737 #ifdef INET 3738 case ETHERTYPE_IP: 3739 { 3740 struct ip *ip = l3hdr; 3741 3742 tcp = (void *)((uintptr_t)ip + ip->ip_hl * 4); 3743 ctrl |= V_LSO_IPHDR_LEN(ip->ip_hl) | 3744 V_LSO_TCPHDR_LEN(tcp->th_off); 3745 break; 3746 } 3747 #endif 3748 default: 3749 panic("%s: CSUM_TSO but no supported IP version " 3750 "(0x%04x)", __func__, eh_type); 3751 } 3752 3753 lso->lso_ctrl = htobe32(ctrl); 3754 lso->ipid_ofst = htobe16(0); 3755 lso->mss = htobe16(m->m_pkthdr.tso_segsz); 3756 lso->seqno_offset = htobe32(0); 3757 lso->len = htobe32(pktlen); 3758 3759 cpl = (void *)(lso + 1); 3760 3761 txq->tso_wrs++; 3762 } else 3763 cpl = (void *)(wr + 1); 3764 3765 /* Checksum offload */ 3766 ctrl1 = 0; 3767 if (!(m->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TSO))) 3768 ctrl1 |= F_TXPKT_IPCSUM_DIS; 3769 if (!(m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP | CSUM_UDP_IPV6 | 3770 CSUM_TCP_IPV6 | CSUM_TSO))) 3771 ctrl1 |= F_TXPKT_L4CSUM_DIS; 3772 if (m->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP | 3773 CSUM_UDP_IPV6 | CSUM_TCP_IPV6 | CSUM_TSO)) 3774 txq->txcsum++; /* some hardware assistance provided */ 3775 3776 /* VLAN tag insertion */ 3777 if (m->m_flags & M_VLANTAG) { 3778 ctrl1 |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(m->m_pkthdr.ether_vtag); 3779 txq->vlan_insertion++; 3780 } 3781 3782 /* CPL header */ 3783 cpl->ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT) | 3784 V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(pi->adapter->pf)); 3785 cpl->pack = 0; 3786 cpl->len = htobe16(pktlen); 3787 cpl->ctrl1 = htobe64(ctrl1); 3788 3789 /* Software descriptor */ 3790 txsd = &txq->sdesc[eq->pidx]; 3791 txsd->desc_used = ndesc; 3792 3793 eq->pending += ndesc; 3794 eq->avail -= ndesc; 3795 eq->pidx += ndesc; 3796 if (eq->pidx >= eq->cap) 3797 eq->pidx -= eq->cap; 3798 3799 /* SGL */ 3800 dst = (void *)(cpl + 1); 3801 if (sgl->nsegs > 0) { 3802 txsd->credits = 1; 3803 txq->sgl_wrs++; 3804 write_sgl_to_txd(eq, sgl, &dst); 3805 } else { 3806 txsd->credits = 0; 3807 txq->imm_wrs++; 3808 for (; m; m = m->m_next) { 3809 copy_to_txd(eq, mtod(m, caddr_t), &dst, m->m_len); 3810 #ifdef INVARIANTS 3811 pktlen -= m->m_len; 3812 #endif 3813 } 3814 #ifdef INVARIANTS 3815 KASSERT(pktlen == 0, ("%s: %d bytes left.", __func__, pktlen)); 3816 #endif 3817 3818 } 3819 3820 txq->txpkt_wrs++; 3821 return (0); 3822 } 3823 3824 /* 3825 * Returns 0 to indicate that m has been accepted into a coalesced tx work 3826 * request. It has either been folded into txpkts or txpkts was flushed and m 3827 * has started a new coalesced work request (as the first frame in a fresh 3828 * txpkts). 3829 * 3830 * Returns non-zero to indicate a failure - caller is responsible for 3831 * transmitting m, if there was anything in txpkts it has been flushed. 3832 */ 3833 static int 3834 add_to_txpkts(struct port_info *pi, struct sge_txq *txq, struct txpkts *txpkts, 3835 struct mbuf *m, struct sgl *sgl) 3836 { 3837 struct sge_eq *eq = &txq->eq; 3838 int can_coalesce; 3839 struct tx_sdesc *txsd; 3840 int flits; 3841 3842 TXQ_LOCK_ASSERT_OWNED(txq); 3843 3844 KASSERT(sgl->nsegs, ("%s: can't coalesce imm data", __func__)); 3845 3846 if (txpkts->npkt > 0) { 3847 flits = TXPKTS_PKT_HDR + sgl->nflits; 3848 can_coalesce = m->m_pkthdr.tso_segsz == 0 && 3849 txpkts->nflits + flits <= TX_WR_FLITS && 3850 txpkts->nflits + flits <= eq->avail * 8 && 3851 txpkts->plen + m->m_pkthdr.len < 65536; 3852 3853 if (can_coalesce) { 3854 txpkts->npkt++; 3855 txpkts->nflits += flits; 3856 txpkts->plen += m->m_pkthdr.len; 3857 3858 txsd = &txq->sdesc[eq->pidx]; 3859 txsd->credits++; 3860 3861 return (0); 3862 } 3863 3864 /* 3865 * Couldn't coalesce m into txpkts. The first order of business 3866 * is to send txpkts on its way. Then we'll revisit m. 3867 */ 3868 write_txpkts_wr(txq, txpkts); 3869 } 3870 3871 /* 3872 * Check if we can start a new coalesced tx work request with m as 3873 * the first packet in it. 3874 */ 3875 3876 KASSERT(txpkts->npkt == 0, ("%s: txpkts not empty", __func__)); 3877 3878 flits = TXPKTS_WR_HDR + sgl->nflits; 3879 can_coalesce = m->m_pkthdr.tso_segsz == 0 && 3880 flits <= eq->avail * 8 && flits <= TX_WR_FLITS; 3881 3882 if (can_coalesce == 0) 3883 return (EINVAL); 3884 3885 /* 3886 * Start a fresh coalesced tx WR with m as the first frame in it. 3887 */ 3888 txpkts->npkt = 1; 3889 txpkts->nflits = flits; 3890 txpkts->flitp = &eq->desc[eq->pidx].flit[2]; 3891 txpkts->plen = m->m_pkthdr.len; 3892 3893 txsd = &txq->sdesc[eq->pidx]; 3894 txsd->credits = 1; 3895 3896 return (0); 3897 } 3898 3899 /* 3900 * Note that write_txpkts_wr can never run out of hardware descriptors (but 3901 * write_txpkt_wr can). add_to_txpkts ensures that a frame is accepted for 3902 * coalescing only if sufficient hardware descriptors are available. 3903 */ 3904 static void 3905 write_txpkts_wr(struct sge_txq *txq, struct txpkts *txpkts) 3906 { 3907 struct sge_eq *eq = &txq->eq; 3908 struct fw_eth_tx_pkts_wr *wr; 3909 struct tx_sdesc *txsd; 3910 uint32_t ctrl; 3911 int ndesc; 3912 3913 TXQ_LOCK_ASSERT_OWNED(txq); 3914 3915 ndesc = howmany(txpkts->nflits, 8); 3916 3917 wr = (void *)&eq->desc[eq->pidx]; 3918 wr->op_pkd = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_WR)); 3919 ctrl = V_FW_WR_LEN16(howmany(txpkts->nflits, 2)); 3920 if (eq->avail == ndesc) { 3921 if (!(eq->flags & EQ_CRFLUSHED)) { 3922 ctrl |= F_FW_WR_EQUEQ | F_FW_WR_EQUIQ; 3923 eq->flags |= EQ_CRFLUSHED; 3924 } 3925 eq->flags |= EQ_STALLED; 3926 } 3927 wr->equiq_to_len16 = htobe32(ctrl); 3928 wr->plen = htobe16(txpkts->plen); 3929 wr->npkt = txpkts->npkt; 3930 wr->r3 = wr->type = 0; 3931 3932 /* Everything else already written */ 3933 3934 txsd = &txq->sdesc[eq->pidx]; 3935 txsd->desc_used = ndesc; 3936 3937 KASSERT(eq->avail >= ndesc, ("%s: out of descriptors", __func__)); 3938 3939 eq->pending += ndesc; 3940 eq->avail -= ndesc; 3941 eq->pidx += ndesc; 3942 if (eq->pidx >= eq->cap) 3943 eq->pidx -= eq->cap; 3944 3945 txq->txpkts_pkts += txpkts->npkt; 3946 txq->txpkts_wrs++; 3947 txpkts->npkt = 0; /* emptied */ 3948 } 3949 3950 static inline void 3951 write_ulp_cpl_sgl(struct port_info *pi, struct sge_txq *txq, 3952 struct txpkts *txpkts, struct mbuf *m, struct sgl *sgl) 3953 { 3954 struct ulp_txpkt *ulpmc; 3955 struct ulptx_idata *ulpsc; 3956 struct cpl_tx_pkt_core *cpl; 3957 struct sge_eq *eq = &txq->eq; 3958 uintptr_t flitp, start, end; 3959 uint64_t ctrl; 3960 caddr_t dst; 3961 3962 KASSERT(txpkts->npkt > 0, ("%s: txpkts is empty", __func__)); 3963 3964 start = (uintptr_t)eq->desc; 3965 end = (uintptr_t)eq->spg; 3966 3967 /* Checksum offload */ 3968 ctrl = 0; 3969 if (!(m->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TSO))) 3970 ctrl |= F_TXPKT_IPCSUM_DIS; 3971 if (!(m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP | CSUM_UDP_IPV6 | 3972 CSUM_TCP_IPV6 | CSUM_TSO))) 3973 ctrl |= F_TXPKT_L4CSUM_DIS; 3974 if (m->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP | 3975 CSUM_UDP_IPV6 | CSUM_TCP_IPV6 | CSUM_TSO)) 3976 txq->txcsum++; /* some hardware assistance provided */ 3977 3978 /* VLAN tag insertion */ 3979 if (m->m_flags & M_VLANTAG) { 3980 ctrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(m->m_pkthdr.ether_vtag); 3981 txq->vlan_insertion++; 3982 } 3983 3984 /* 3985 * The previous packet's SGL must have ended at a 16 byte boundary (this 3986 * is required by the firmware/hardware). It follows that flitp cannot 3987 * wrap around between the ULPTX master command and ULPTX subcommand (8 3988 * bytes each), and that it can not wrap around in the middle of the 3989 * cpl_tx_pkt_core either. 3990 */ 3991 flitp = (uintptr_t)txpkts->flitp; 3992 KASSERT((flitp & 0xf) == 0, 3993 ("%s: last SGL did not end at 16 byte boundary: %p", 3994 __func__, txpkts->flitp)); 3995 3996 /* ULP master command */ 3997 ulpmc = (void *)flitp; 3998 ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0) | 3999 V_ULP_TXPKT_FID(eq->iqid)); 4000 ulpmc->len = htonl(howmany(sizeof(*ulpmc) + sizeof(*ulpsc) + 4001 sizeof(*cpl) + 8 * sgl->nflits, 16)); 4002 4003 /* ULP subcommand */ 4004 ulpsc = (void *)(ulpmc + 1); 4005 ulpsc->cmd_more = htobe32(V_ULPTX_CMD((u32)ULP_TX_SC_IMM) | 4006 F_ULP_TX_SC_MORE); 4007 ulpsc->len = htobe32(sizeof(struct cpl_tx_pkt_core)); 4008 4009 flitp += sizeof(*ulpmc) + sizeof(*ulpsc); 4010 if (flitp == end) 4011 flitp = start; 4012 4013 /* CPL_TX_PKT */ 4014 cpl = (void *)flitp; 4015 cpl->ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT) | 4016 V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(pi->adapter->pf)); 4017 cpl->pack = 0; 4018 cpl->len = htobe16(m->m_pkthdr.len); 4019 cpl->ctrl1 = htobe64(ctrl); 4020 4021 flitp += sizeof(*cpl); 4022 if (flitp == end) 4023 flitp = start; 4024 4025 /* SGL for this frame */ 4026 dst = (caddr_t)flitp; 4027 txpkts->nflits += write_sgl_to_txd(eq, sgl, &dst); 4028 txpkts->flitp = (void *)dst; 4029 4030 KASSERT(((uintptr_t)dst & 0xf) == 0, 4031 ("%s: SGL ends at %p (not a 16 byte boundary)", __func__, dst)); 4032 } 4033 4034 /* 4035 * If the SGL ends on an address that is not 16 byte aligned, this function will 4036 * add a 0 filled flit at the end. It returns 1 in that case. 4037 */ 4038 static int 4039 write_sgl_to_txd(struct sge_eq *eq, struct sgl *sgl, caddr_t *to) 4040 { 4041 __be64 *flitp, *end; 4042 struct ulptx_sgl *usgl; 4043 bus_dma_segment_t *seg; 4044 int i, padded; 4045 4046 KASSERT(sgl->nsegs > 0 && sgl->nflits > 0, 4047 ("%s: bad SGL - nsegs=%d, nflits=%d", 4048 __func__, sgl->nsegs, sgl->nflits)); 4049 4050 KASSERT(((uintptr_t)(*to) & 0xf) == 0, 4051 ("%s: SGL must start at a 16 byte boundary: %p", __func__, *to)); 4052 4053 flitp = (__be64 *)(*to); 4054 end = flitp + sgl->nflits; 4055 seg = &sgl->seg[0]; 4056 usgl = (void *)flitp; 4057 4058 /* 4059 * We start at a 16 byte boundary somewhere inside the tx descriptor 4060 * ring, so we're at least 16 bytes away from the status page. There is 4061 * no chance of a wrap around in the middle of usgl (which is 16 bytes). 4062 */ 4063 4064 usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) | 4065 V_ULPTX_NSGE(sgl->nsegs)); 4066 usgl->len0 = htobe32(seg->ds_len); 4067 usgl->addr0 = htobe64(seg->ds_addr); 4068 seg++; 4069 4070 if ((uintptr_t)end <= (uintptr_t)eq->spg) { 4071 4072 /* Won't wrap around at all */ 4073 4074 for (i = 0; i < sgl->nsegs - 1; i++, seg++) { 4075 usgl->sge[i / 2].len[i & 1] = htobe32(seg->ds_len); 4076 usgl->sge[i / 2].addr[i & 1] = htobe64(seg->ds_addr); 4077 } 4078 if (i & 1) 4079 usgl->sge[i / 2].len[1] = htobe32(0); 4080 } else { 4081 4082 /* Will wrap somewhere in the rest of the SGL */ 4083 4084 /* 2 flits already written, write the rest flit by flit */ 4085 flitp = (void *)(usgl + 1); 4086 for (i = 0; i < sgl->nflits - 2; i++) { 4087 if ((uintptr_t)flitp == (uintptr_t)eq->spg) 4088 flitp = (void *)eq->desc; 4089 *flitp++ = get_flit(seg, sgl->nsegs - 1, i); 4090 } 4091 end = flitp; 4092 } 4093 4094 if ((uintptr_t)end & 0xf) { 4095 *(uint64_t *)end = 0; 4096 end++; 4097 padded = 1; 4098 } else 4099 padded = 0; 4100 4101 if ((uintptr_t)end == (uintptr_t)eq->spg) 4102 *to = (void *)eq->desc; 4103 else 4104 *to = (void *)end; 4105 4106 return (padded); 4107 } 4108 4109 static inline void 4110 copy_to_txd(struct sge_eq *eq, caddr_t from, caddr_t *to, int len) 4111 { 4112 if (__predict_true((uintptr_t)(*to) + len <= (uintptr_t)eq->spg)) { 4113 bcopy(from, *to, len); 4114 (*to) += len; 4115 } else { 4116 int portion = (uintptr_t)eq->spg - (uintptr_t)(*to); 4117 4118 bcopy(from, *to, portion); 4119 from += portion; 4120 portion = len - portion; /* remaining */ 4121 bcopy(from, (void *)eq->desc, portion); 4122 (*to) = (caddr_t)eq->desc + portion; 4123 } 4124 } 4125 4126 static inline void 4127 ring_eq_db(struct adapter *sc, struct sge_eq *eq) 4128 { 4129 u_int db, pending; 4130 4131 db = eq->doorbells; 4132 pending = eq->pending; 4133 if (pending > 1) 4134 clrbit(&db, DOORBELL_WCWR); 4135 eq->pending = 0; 4136 wmb(); 4137 4138 switch (ffs(db) - 1) { 4139 case DOORBELL_UDB: 4140 *eq->udb = htole32(V_QID(eq->udb_qid) | V_PIDX(pending)); 4141 return; 4142 4143 case DOORBELL_WCWR: { 4144 volatile uint64_t *dst, *src; 4145 int i; 4146 4147 /* 4148 * Queues whose 128B doorbell segment fits in the page do not 4149 * use relative qid (udb_qid is always 0). Only queues with 4150 * doorbell segments can do WCWR. 4151 */ 4152 KASSERT(eq->udb_qid == 0 && pending == 1, 4153 ("%s: inappropriate doorbell (0x%x, %d, %d) for eq %p", 4154 __func__, eq->doorbells, pending, eq->pidx, eq)); 4155 4156 dst = (volatile void *)((uintptr_t)eq->udb + UDBS_WR_OFFSET - 4157 UDBS_DB_OFFSET); 4158 i = eq->pidx ? eq->pidx - 1 : eq->cap - 1; 4159 src = (void *)&eq->desc[i]; 4160 while (src != (void *)&eq->desc[i + 1]) 4161 *dst++ = *src++; 4162 wmb(); 4163 return; 4164 } 4165 4166 case DOORBELL_UDBWC: 4167 *eq->udb = htole32(V_QID(eq->udb_qid) | V_PIDX(pending)); 4168 wmb(); 4169 return; 4170 4171 case DOORBELL_KDB: 4172 t4_write_reg(sc, MYPF_REG(A_SGE_PF_KDOORBELL), 4173 V_QID(eq->cntxt_id) | V_PIDX(pending)); 4174 return; 4175 } 4176 } 4177 4178 static inline int 4179 reclaimable(struct sge_eq *eq) 4180 { 4181 unsigned int cidx; 4182 4183 cidx = eq->spg->cidx; /* stable snapshot */ 4184 cidx = be16toh(cidx); 4185 4186 if (cidx >= eq->cidx) 4187 return (cidx - eq->cidx); 4188 else 4189 return (cidx + eq->cap - eq->cidx); 4190 } 4191 4192 /* 4193 * There are "can_reclaim" tx descriptors ready to be reclaimed. Reclaim as 4194 * many as possible but stop when there are around "n" mbufs to free. 4195 * 4196 * The actual number reclaimed is provided as the return value. 4197 */ 4198 static int 4199 reclaim_tx_descs(struct sge_txq *txq, int can_reclaim, int n) 4200 { 4201 struct tx_sdesc *txsd; 4202 struct tx_maps *txmaps; 4203 struct tx_map *txm; 4204 unsigned int reclaimed, maps; 4205 struct sge_eq *eq = &txq->eq; 4206 4207 TXQ_LOCK_ASSERT_OWNED(txq); 4208 4209 if (can_reclaim == 0) 4210 can_reclaim = reclaimable(eq); 4211 4212 maps = reclaimed = 0; 4213 while (can_reclaim && maps < n) { 4214 int ndesc; 4215 4216 txsd = &txq->sdesc[eq->cidx]; 4217 ndesc = txsd->desc_used; 4218 4219 /* Firmware doesn't return "partial" credits. */ 4220 KASSERT(can_reclaim >= ndesc, 4221 ("%s: unexpected number of credits: %d, %d", 4222 __func__, can_reclaim, ndesc)); 4223 4224 maps += txsd->credits; 4225 4226 reclaimed += ndesc; 4227 can_reclaim -= ndesc; 4228 4229 eq->cidx += ndesc; 4230 if (__predict_false(eq->cidx >= eq->cap)) 4231 eq->cidx -= eq->cap; 4232 } 4233 4234 txmaps = &txq->txmaps; 4235 txm = &txmaps->maps[txmaps->map_cidx]; 4236 if (maps) 4237 prefetch(txm->m); 4238 4239 eq->avail += reclaimed; 4240 KASSERT(eq->avail < eq->cap, /* avail tops out at (cap - 1) */ 4241 ("%s: too many descriptors available", __func__)); 4242 4243 txmaps->map_avail += maps; 4244 KASSERT(txmaps->map_avail <= txmaps->map_total, 4245 ("%s: too many maps available", __func__)); 4246 4247 while (maps--) { 4248 struct tx_map *next; 4249 4250 next = txm + 1; 4251 if (__predict_false(txmaps->map_cidx + 1 == txmaps->map_total)) 4252 next = txmaps->maps; 4253 prefetch(next->m); 4254 4255 bus_dmamap_unload(txq->tx_tag, txm->map); 4256 m_freem(txm->m); 4257 txm->m = NULL; 4258 4259 txm = next; 4260 if (__predict_false(++txmaps->map_cidx == txmaps->map_total)) 4261 txmaps->map_cidx = 0; 4262 } 4263 4264 return (reclaimed); 4265 } 4266 4267 static void 4268 write_eqflush_wr(struct sge_eq *eq) 4269 { 4270 struct fw_eq_flush_wr *wr; 4271 4272 EQ_LOCK_ASSERT_OWNED(eq); 4273 KASSERT(eq->avail > 0, ("%s: no descriptors left.", __func__)); 4274 KASSERT(!(eq->flags & EQ_CRFLUSHED), ("%s: flushed already", __func__)); 4275 4276 wr = (void *)&eq->desc[eq->pidx]; 4277 bzero(wr, sizeof(*wr)); 4278 wr->opcode = FW_EQ_FLUSH_WR; 4279 wr->equiq_to_len16 = htobe32(V_FW_WR_LEN16(sizeof(*wr) / 16) | 4280 F_FW_WR_EQUEQ | F_FW_WR_EQUIQ); 4281 4282 eq->flags |= (EQ_CRFLUSHED | EQ_STALLED); 4283 eq->pending++; 4284 eq->avail--; 4285 if (++eq->pidx == eq->cap) 4286 eq->pidx = 0; 4287 } 4288 4289 static __be64 4290 get_flit(bus_dma_segment_t *sgl, int nsegs, int idx) 4291 { 4292 int i = (idx / 3) * 2; 4293 4294 switch (idx % 3) { 4295 case 0: { 4296 __be64 rc; 4297 4298 rc = htobe32(sgl[i].ds_len); 4299 if (i + 1 < nsegs) 4300 rc |= (uint64_t)htobe32(sgl[i + 1].ds_len) << 32; 4301 4302 return (rc); 4303 } 4304 case 1: 4305 return htobe64(sgl[i].ds_addr); 4306 case 2: 4307 return htobe64(sgl[i + 1].ds_addr); 4308 } 4309 4310 return (0); 4311 } 4312 4313 static void 4314 find_best_refill_source(struct adapter *sc, struct sge_fl *fl, int maxp) 4315 { 4316 int8_t zidx, hwidx, idx; 4317 uint16_t region1, region3; 4318 int spare, spare_needed, n; 4319 struct sw_zone_info *swz; 4320 struct hw_buf_info *hwb, *hwb_list = &sc->sge.hw_buf_info[0]; 4321 4322 /* 4323 * Buffer Packing: Look for PAGE_SIZE or larger zone which has a bufsize 4324 * large enough for the max payload and cluster metadata. Otherwise 4325 * settle for the largest bufsize that leaves enough room in the cluster 4326 * for metadata. 4327 * 4328 * Without buffer packing: Look for the smallest zone which has a 4329 * bufsize large enough for the max payload. Settle for the largest 4330 * bufsize available if there's nothing big enough for max payload. 4331 */ 4332 spare_needed = fl->flags & FL_BUF_PACKING ? CL_METADATA_SIZE : 0; 4333 swz = &sc->sge.sw_zone_info[0]; 4334 hwidx = -1; 4335 for (zidx = 0; zidx < SW_ZONE_SIZES; zidx++, swz++) { 4336 if (swz->size > largest_rx_cluster) { 4337 if (__predict_true(hwidx != -1)) 4338 break; 4339 4340 /* 4341 * This is a misconfiguration. largest_rx_cluster is 4342 * preventing us from finding a refill source. See 4343 * dev.t5nex.<n>.buffer_sizes to figure out why. 4344 */ 4345 device_printf(sc->dev, "largest_rx_cluster=%u leaves no" 4346 " refill source for fl %p (dma %u). Ignored.\n", 4347 largest_rx_cluster, fl, maxp); 4348 } 4349 for (idx = swz->head_hwidx; idx != -1; idx = hwb->next) { 4350 hwb = &hwb_list[idx]; 4351 spare = swz->size - hwb->size; 4352 if (spare < spare_needed) 4353 continue; 4354 4355 hwidx = idx; /* best option so far */ 4356 if (hwb->size >= maxp) { 4357 4358 if ((fl->flags & FL_BUF_PACKING) == 0) 4359 goto done; /* stop looking (not packing) */ 4360 4361 if (swz->size >= safest_rx_cluster) 4362 goto done; /* stop looking (packing) */ 4363 } 4364 break; /* keep looking, next zone */ 4365 } 4366 } 4367 done: 4368 /* A usable hwidx has been located. */ 4369 MPASS(hwidx != -1); 4370 hwb = &hwb_list[hwidx]; 4371 zidx = hwb->zidx; 4372 swz = &sc->sge.sw_zone_info[zidx]; 4373 region1 = 0; 4374 region3 = swz->size - hwb->size; 4375 4376 /* 4377 * Stay within this zone and see if there is a better match when mbuf 4378 * inlining is allowed. Remember that the hwidx's are sorted in 4379 * decreasing order of size (so in increasing order of spare area). 4380 */ 4381 for (idx = hwidx; idx != -1; idx = hwb->next) { 4382 hwb = &hwb_list[idx]; 4383 spare = swz->size - hwb->size; 4384 4385 if (allow_mbufs_in_cluster == 0 || hwb->size < maxp) 4386 break; 4387 4388 /* 4389 * Do not inline mbufs if doing so would violate the pad/pack 4390 * boundary alignment requirement. 4391 */ 4392 if (fl_pad && (MSIZE % sc->sge.pad_boundary) != 0) 4393 continue; 4394 if (fl->flags & FL_BUF_PACKING && 4395 (MSIZE % sc->sge.pack_boundary) != 0) 4396 continue; 4397 4398 if (spare < CL_METADATA_SIZE + MSIZE) 4399 continue; 4400 n = (spare - CL_METADATA_SIZE) / MSIZE; 4401 if (n > howmany(hwb->size, maxp)) 4402 break; 4403 4404 hwidx = idx; 4405 if (fl->flags & FL_BUF_PACKING) { 4406 region1 = n * MSIZE; 4407 region3 = spare - region1; 4408 } else { 4409 region1 = MSIZE; 4410 region3 = spare - region1; 4411 break; 4412 } 4413 } 4414 4415 KASSERT(zidx >= 0 && zidx < SW_ZONE_SIZES, 4416 ("%s: bad zone %d for fl %p, maxp %d", __func__, zidx, fl, maxp)); 4417 KASSERT(hwidx >= 0 && hwidx <= SGE_FLBUF_SIZES, 4418 ("%s: bad hwidx %d for fl %p, maxp %d", __func__, hwidx, fl, maxp)); 4419 KASSERT(region1 + sc->sge.hw_buf_info[hwidx].size + region3 == 4420 sc->sge.sw_zone_info[zidx].size, 4421 ("%s: bad buffer layout for fl %p, maxp %d. " 4422 "cl %d; r1 %d, payload %d, r3 %d", __func__, fl, maxp, 4423 sc->sge.sw_zone_info[zidx].size, region1, 4424 sc->sge.hw_buf_info[hwidx].size, region3)); 4425 if (fl->flags & FL_BUF_PACKING || region1 > 0) { 4426 KASSERT(region3 >= CL_METADATA_SIZE, 4427 ("%s: no room for metadata. fl %p, maxp %d; " 4428 "cl %d; r1 %d, payload %d, r3 %d", __func__, fl, maxp, 4429 sc->sge.sw_zone_info[zidx].size, region1, 4430 sc->sge.hw_buf_info[hwidx].size, region3)); 4431 KASSERT(region1 % MSIZE == 0, 4432 ("%s: bad mbuf region for fl %p, maxp %d. " 4433 "cl %d; r1 %d, payload %d, r3 %d", __func__, fl, maxp, 4434 sc->sge.sw_zone_info[zidx].size, region1, 4435 sc->sge.hw_buf_info[hwidx].size, region3)); 4436 } 4437 4438 fl->cll_def.zidx = zidx; 4439 fl->cll_def.hwidx = hwidx; 4440 fl->cll_def.region1 = region1; 4441 fl->cll_def.region3 = region3; 4442 } 4443 4444 static void 4445 find_safe_refill_source(struct adapter *sc, struct sge_fl *fl) 4446 { 4447 struct sge *s = &sc->sge; 4448 struct hw_buf_info *hwb; 4449 struct sw_zone_info *swz; 4450 int spare; 4451 int8_t hwidx; 4452 4453 if (fl->flags & FL_BUF_PACKING) 4454 hwidx = s->safe_hwidx2; /* with room for metadata */ 4455 else if (allow_mbufs_in_cluster && s->safe_hwidx2 != -1) { 4456 hwidx = s->safe_hwidx2; 4457 hwb = &s->hw_buf_info[hwidx]; 4458 swz = &s->sw_zone_info[hwb->zidx]; 4459 spare = swz->size - hwb->size; 4460 4461 /* no good if there isn't room for an mbuf as well */ 4462 if (spare < CL_METADATA_SIZE + MSIZE) 4463 hwidx = s->safe_hwidx1; 4464 } else 4465 hwidx = s->safe_hwidx1; 4466 4467 if (hwidx == -1) { 4468 /* No fallback source */ 4469 fl->cll_alt.hwidx = -1; 4470 fl->cll_alt.zidx = -1; 4471 4472 return; 4473 } 4474 4475 hwb = &s->hw_buf_info[hwidx]; 4476 swz = &s->sw_zone_info[hwb->zidx]; 4477 spare = swz->size - hwb->size; 4478 fl->cll_alt.hwidx = hwidx; 4479 fl->cll_alt.zidx = hwb->zidx; 4480 if (allow_mbufs_in_cluster && 4481 (fl_pad == 0 || (MSIZE % sc->sge.pad_boundary) == 0)) 4482 fl->cll_alt.region1 = ((spare - CL_METADATA_SIZE) / MSIZE) * MSIZE; 4483 else 4484 fl->cll_alt.region1 = 0; 4485 fl->cll_alt.region3 = spare - fl->cll_alt.region1; 4486 } 4487 4488 static void 4489 add_fl_to_sfl(struct adapter *sc, struct sge_fl *fl) 4490 { 4491 mtx_lock(&sc->sfl_lock); 4492 FL_LOCK(fl); 4493 if ((fl->flags & FL_DOOMED) == 0) { 4494 fl->flags |= FL_STARVING; 4495 TAILQ_INSERT_TAIL(&sc->sfl, fl, link); 4496 callout_reset(&sc->sfl_callout, hz / 5, refill_sfl, sc); 4497 } 4498 FL_UNLOCK(fl); 4499 mtx_unlock(&sc->sfl_lock); 4500 } 4501 4502 static int 4503 handle_sge_egr_update(struct sge_iq *iq, const struct rss_header *rss, 4504 struct mbuf *m) 4505 { 4506 const struct cpl_sge_egr_update *cpl = (const void *)(rss + 1); 4507 unsigned int qid = G_EGR_QID(ntohl(cpl->opcode_qid)); 4508 struct adapter *sc = iq->adapter; 4509 struct sge *s = &sc->sge; 4510 struct sge_eq *eq; 4511 4512 KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__, 4513 rss->opcode)); 4514 4515 eq = s->eqmap[qid - s->eq_start]; 4516 EQ_LOCK(eq); 4517 KASSERT(eq->flags & EQ_CRFLUSHED, 4518 ("%s: unsolicited egress update", __func__)); 4519 eq->flags &= ~EQ_CRFLUSHED; 4520 eq->egr_update++; 4521 4522 if (__predict_false(eq->flags & EQ_DOOMED)) 4523 wakeup_one(eq); 4524 else if (eq->flags & EQ_STALLED && can_resume_tx(eq)) 4525 taskqueue_enqueue(sc->tq[eq->tx_chan], &eq->tx_task); 4526 EQ_UNLOCK(eq); 4527 4528 return (0); 4529 } 4530 4531 /* handle_fw_msg works for both fw4_msg and fw6_msg because this is valid */ 4532 CTASSERT(offsetof(struct cpl_fw4_msg, data) == \ 4533 offsetof(struct cpl_fw6_msg, data)); 4534 4535 static int 4536 handle_fw_msg(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 4537 { 4538 struct adapter *sc = iq->adapter; 4539 const struct cpl_fw6_msg *cpl = (const void *)(rss + 1); 4540 4541 KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__, 4542 rss->opcode)); 4543 4544 if (cpl->type == FW_TYPE_RSSCPL || cpl->type == FW6_TYPE_RSSCPL) { 4545 const struct rss_header *rss2; 4546 4547 rss2 = (const struct rss_header *)&cpl->data[0]; 4548 return (sc->cpl_handler[rss2->opcode](iq, rss2, m)); 4549 } 4550 4551 return (sc->fw_msg_handler[cpl->type](sc, &cpl->data[0])); 4552 } 4553 4554 static int 4555 sysctl_uint16(SYSCTL_HANDLER_ARGS) 4556 { 4557 uint16_t *id = arg1; 4558 int i = *id; 4559 4560 return sysctl_handle_int(oidp, &i, 0, req); 4561 } 4562 4563 static int 4564 sysctl_bufsizes(SYSCTL_HANDLER_ARGS) 4565 { 4566 struct sge *s = arg1; 4567 struct hw_buf_info *hwb = &s->hw_buf_info[0]; 4568 struct sw_zone_info *swz = &s->sw_zone_info[0]; 4569 int i, rc; 4570 struct sbuf sb; 4571 char c; 4572 4573 sbuf_new(&sb, NULL, 32, SBUF_AUTOEXTEND); 4574 for (i = 0; i < SGE_FLBUF_SIZES; i++, hwb++) { 4575 if (hwb->zidx >= 0 && swz[hwb->zidx].size <= largest_rx_cluster) 4576 c = '*'; 4577 else 4578 c = '\0'; 4579 4580 sbuf_printf(&sb, "%u%c ", hwb->size, c); 4581 } 4582 sbuf_trim(&sb); 4583 sbuf_finish(&sb); 4584 rc = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req); 4585 sbuf_delete(&sb); 4586 return (rc); 4587 } 4588