1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2011 Chelsio Communications, Inc. 5 * All rights reserved. 6 * Written by: Navdeep Parhar <np@FreeBSD.org> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include "opt_inet.h" 34 #include "opt_inet6.h" 35 #include "opt_kern_tls.h" 36 #include "opt_ratelimit.h" 37 38 #include <sys/types.h> 39 #include <sys/eventhandler.h> 40 #include <sys/mbuf.h> 41 #include <sys/socket.h> 42 #include <sys/kernel.h> 43 #include <sys/ktls.h> 44 #include <sys/malloc.h> 45 #include <sys/queue.h> 46 #include <sys/sbuf.h> 47 #include <sys/taskqueue.h> 48 #include <sys/time.h> 49 #include <sys/sglist.h> 50 #include <sys/sysctl.h> 51 #include <sys/smp.h> 52 #include <sys/socketvar.h> 53 #include <sys/counter.h> 54 #include <net/bpf.h> 55 #include <net/ethernet.h> 56 #include <net/if.h> 57 #include <net/if_vlan_var.h> 58 #include <netinet/in.h> 59 #include <netinet/ip.h> 60 #include <netinet/ip6.h> 61 #include <netinet/tcp.h> 62 #include <netinet/udp.h> 63 #include <machine/in_cksum.h> 64 #include <machine/md_var.h> 65 #include <vm/vm.h> 66 #include <vm/pmap.h> 67 #ifdef DEV_NETMAP 68 #include <machine/bus.h> 69 #include <sys/selinfo.h> 70 #include <net/if_var.h> 71 #include <net/netmap.h> 72 #include <dev/netmap/netmap_kern.h> 73 #endif 74 75 #include "common/common.h" 76 #include "common/t4_regs.h" 77 #include "common/t4_regs_values.h" 78 #include "common/t4_msg.h" 79 #include "t4_l2t.h" 80 #include "t4_mp_ring.h" 81 82 #ifdef T4_PKT_TIMESTAMP 83 #define RX_COPY_THRESHOLD (MINCLSIZE - 8) 84 #else 85 #define RX_COPY_THRESHOLD MINCLSIZE 86 #endif 87 88 /* Internal mbuf flags stored in PH_loc.eight[1]. */ 89 #define MC_NOMAP 0x01 90 #define MC_RAW_WR 0x02 91 #define MC_TLS 0x04 92 93 /* 94 * Ethernet frames are DMA'd at this byte offset into the freelist buffer. 95 * 0-7 are valid values. 96 */ 97 static int fl_pktshift = 0; 98 SYSCTL_INT(_hw_cxgbe, OID_AUTO, fl_pktshift, CTLFLAG_RDTUN, &fl_pktshift, 0, 99 "payload DMA offset in rx buffer (bytes)"); 100 101 /* 102 * Pad ethernet payload up to this boundary. 103 * -1: driver should figure out a good value. 104 * 0: disable padding. 105 * Any power of 2 from 32 to 4096 (both inclusive) is also a valid value. 106 */ 107 int fl_pad = -1; 108 SYSCTL_INT(_hw_cxgbe, OID_AUTO, fl_pad, CTLFLAG_RDTUN, &fl_pad, 0, 109 "payload pad boundary (bytes)"); 110 111 /* 112 * Status page length. 113 * -1: driver should figure out a good value. 114 * 64 or 128 are the only other valid values. 115 */ 116 static int spg_len = -1; 117 SYSCTL_INT(_hw_cxgbe, OID_AUTO, spg_len, CTLFLAG_RDTUN, &spg_len, 0, 118 "status page size (bytes)"); 119 120 /* 121 * Congestion drops. 122 * -1: no congestion feedback (not recommended). 123 * 0: backpressure the channel instead of dropping packets right away. 124 * 1: no backpressure, drop packets for the congested queue immediately. 125 */ 126 static int cong_drop = 0; 127 SYSCTL_INT(_hw_cxgbe, OID_AUTO, cong_drop, CTLFLAG_RDTUN, &cong_drop, 0, 128 "Congestion control for RX queues (0 = backpressure, 1 = drop"); 129 130 /* 131 * Deliver multiple frames in the same free list buffer if they fit. 132 * -1: let the driver decide whether to enable buffer packing or not. 133 * 0: disable buffer packing. 134 * 1: enable buffer packing. 135 */ 136 static int buffer_packing = -1; 137 SYSCTL_INT(_hw_cxgbe, OID_AUTO, buffer_packing, CTLFLAG_RDTUN, &buffer_packing, 138 0, "Enable buffer packing"); 139 140 /* 141 * Start next frame in a packed buffer at this boundary. 142 * -1: driver should figure out a good value. 143 * T4: driver will ignore this and use the same value as fl_pad above. 144 * T5: 16, or a power of 2 from 64 to 4096 (both inclusive) is a valid value. 145 */ 146 static int fl_pack = -1; 147 SYSCTL_INT(_hw_cxgbe, OID_AUTO, fl_pack, CTLFLAG_RDTUN, &fl_pack, 0, 148 "payload pack boundary (bytes)"); 149 150 /* 151 * Largest rx cluster size that the driver is allowed to allocate. 152 */ 153 static int largest_rx_cluster = MJUM16BYTES; 154 SYSCTL_INT(_hw_cxgbe, OID_AUTO, largest_rx_cluster, CTLFLAG_RDTUN, 155 &largest_rx_cluster, 0, "Largest rx cluster (bytes)"); 156 157 /* 158 * Size of cluster allocation that's most likely to succeed. The driver will 159 * fall back to this size if it fails to allocate clusters larger than this. 160 */ 161 static int safest_rx_cluster = PAGE_SIZE; 162 SYSCTL_INT(_hw_cxgbe, OID_AUTO, safest_rx_cluster, CTLFLAG_RDTUN, 163 &safest_rx_cluster, 0, "Safe rx cluster (bytes)"); 164 165 #ifdef RATELIMIT 166 /* 167 * Knob to control TCP timestamp rewriting, and the granularity of the tick used 168 * for rewriting. -1 and 0-3 are all valid values. 169 * -1: hardware should leave the TCP timestamps alone. 170 * 0: 1ms 171 * 1: 100us 172 * 2: 10us 173 * 3: 1us 174 */ 175 static int tsclk = -1; 176 SYSCTL_INT(_hw_cxgbe, OID_AUTO, tsclk, CTLFLAG_RDTUN, &tsclk, 0, 177 "Control TCP timestamp rewriting when using pacing"); 178 179 static int eo_max_backlog = 1024 * 1024; 180 SYSCTL_INT(_hw_cxgbe, OID_AUTO, eo_max_backlog, CTLFLAG_RDTUN, &eo_max_backlog, 181 0, "Maximum backlog of ratelimited data per flow"); 182 #endif 183 184 /* 185 * The interrupt holdoff timers are multiplied by this value on T6+. 186 * 1 and 3-17 (both inclusive) are legal values. 187 */ 188 static int tscale = 1; 189 SYSCTL_INT(_hw_cxgbe, OID_AUTO, tscale, CTLFLAG_RDTUN, &tscale, 0, 190 "Interrupt holdoff timer scale on T6+"); 191 192 /* 193 * Number of LRO entries in the lro_ctrl structure per rx queue. 194 */ 195 static int lro_entries = TCP_LRO_ENTRIES; 196 SYSCTL_INT(_hw_cxgbe, OID_AUTO, lro_entries, CTLFLAG_RDTUN, &lro_entries, 0, 197 "Number of LRO entries per RX queue"); 198 199 /* 200 * This enables presorting of frames before they're fed into tcp_lro_rx. 201 */ 202 static int lro_mbufs = 0; 203 SYSCTL_INT(_hw_cxgbe, OID_AUTO, lro_mbufs, CTLFLAG_RDTUN, &lro_mbufs, 0, 204 "Enable presorting of LRO frames"); 205 206 static int service_iq(struct sge_iq *, int); 207 static int service_iq_fl(struct sge_iq *, int); 208 static struct mbuf *get_fl_payload(struct adapter *, struct sge_fl *, uint32_t); 209 static int eth_rx(struct adapter *, struct sge_rxq *, const struct iq_desc *, 210 u_int); 211 static inline void init_iq(struct sge_iq *, struct adapter *, int, int, int); 212 static inline void init_fl(struct adapter *, struct sge_fl *, int, int, char *); 213 static inline void init_eq(struct adapter *, struct sge_eq *, int, int, uint8_t, 214 uint16_t, char *); 215 static int alloc_ring(struct adapter *, size_t, bus_dma_tag_t *, bus_dmamap_t *, 216 bus_addr_t *, void **); 217 static int free_ring(struct adapter *, bus_dma_tag_t, bus_dmamap_t, bus_addr_t, 218 void *); 219 static int alloc_iq_fl(struct vi_info *, struct sge_iq *, struct sge_fl *, 220 int, int); 221 static int free_iq_fl(struct vi_info *, struct sge_iq *, struct sge_fl *); 222 static void add_iq_sysctls(struct sysctl_ctx_list *, struct sysctl_oid *, 223 struct sge_iq *); 224 static void add_fl_sysctls(struct adapter *, struct sysctl_ctx_list *, 225 struct sysctl_oid *, struct sge_fl *); 226 static int alloc_fwq(struct adapter *); 227 static int free_fwq(struct adapter *); 228 static int alloc_ctrlq(struct adapter *, struct sge_wrq *, int, 229 struct sysctl_oid *); 230 static int alloc_rxq(struct vi_info *, struct sge_rxq *, int, int, 231 struct sysctl_oid *); 232 static int free_rxq(struct vi_info *, struct sge_rxq *); 233 #ifdef TCP_OFFLOAD 234 static int alloc_ofld_rxq(struct vi_info *, struct sge_ofld_rxq *, int, int, 235 struct sysctl_oid *); 236 static int free_ofld_rxq(struct vi_info *, struct sge_ofld_rxq *); 237 #endif 238 #ifdef DEV_NETMAP 239 static int alloc_nm_rxq(struct vi_info *, struct sge_nm_rxq *, int, int, 240 struct sysctl_oid *); 241 static int free_nm_rxq(struct vi_info *, struct sge_nm_rxq *); 242 static int alloc_nm_txq(struct vi_info *, struct sge_nm_txq *, int, int, 243 struct sysctl_oid *); 244 static int free_nm_txq(struct vi_info *, struct sge_nm_txq *); 245 #endif 246 static int ctrl_eq_alloc(struct adapter *, struct sge_eq *); 247 static int eth_eq_alloc(struct adapter *, struct vi_info *, struct sge_eq *); 248 #if defined(TCP_OFFLOAD) || defined(RATELIMIT) 249 static int ofld_eq_alloc(struct adapter *, struct vi_info *, struct sge_eq *); 250 #endif 251 static int alloc_eq(struct adapter *, struct vi_info *, struct sge_eq *); 252 static int free_eq(struct adapter *, struct sge_eq *); 253 static int alloc_wrq(struct adapter *, struct vi_info *, struct sge_wrq *, 254 struct sysctl_oid *); 255 static int free_wrq(struct adapter *, struct sge_wrq *); 256 static int alloc_txq(struct vi_info *, struct sge_txq *, int, 257 struct sysctl_oid *); 258 static int free_txq(struct vi_info *, struct sge_txq *); 259 static void oneseg_dma_callback(void *, bus_dma_segment_t *, int, int); 260 static inline void ring_fl_db(struct adapter *, struct sge_fl *); 261 static int refill_fl(struct adapter *, struct sge_fl *, int); 262 static void refill_sfl(void *); 263 static int alloc_fl_sdesc(struct sge_fl *); 264 static void free_fl_sdesc(struct adapter *, struct sge_fl *); 265 static int find_refill_source(struct adapter *, int, bool); 266 static void add_fl_to_sfl(struct adapter *, struct sge_fl *); 267 268 static inline void get_pkt_gl(struct mbuf *, struct sglist *); 269 static inline u_int txpkt_len16(u_int, u_int); 270 static inline u_int txpkt_vm_len16(u_int, u_int); 271 static inline u_int txpkts0_len16(u_int); 272 static inline u_int txpkts1_len16(void); 273 static u_int write_raw_wr(struct sge_txq *, void *, struct mbuf *, u_int); 274 static u_int write_txpkt_wr(struct adapter *, struct sge_txq *, struct mbuf *, 275 u_int); 276 static u_int write_txpkt_vm_wr(struct adapter *, struct sge_txq *, 277 struct mbuf *); 278 static int add_to_txpkts_vf(struct adapter *, struct sge_txq *, struct mbuf *, 279 int, bool *); 280 static int add_to_txpkts_pf(struct adapter *, struct sge_txq *, struct mbuf *, 281 int, bool *); 282 static u_int write_txpkts_wr(struct adapter *, struct sge_txq *); 283 static u_int write_txpkts_vm_wr(struct adapter *, struct sge_txq *); 284 static void write_gl_to_txd(struct sge_txq *, struct mbuf *, caddr_t *, int); 285 static inline void copy_to_txd(struct sge_eq *, caddr_t, caddr_t *, int); 286 static inline void ring_eq_db(struct adapter *, struct sge_eq *, u_int); 287 static inline uint16_t read_hw_cidx(struct sge_eq *); 288 static inline u_int reclaimable_tx_desc(struct sge_eq *); 289 static inline u_int total_available_tx_desc(struct sge_eq *); 290 static u_int reclaim_tx_descs(struct sge_txq *, u_int); 291 static void tx_reclaim(void *, int); 292 static __be64 get_flit(struct sglist_seg *, int, int); 293 static int handle_sge_egr_update(struct sge_iq *, const struct rss_header *, 294 struct mbuf *); 295 static int handle_fw_msg(struct sge_iq *, const struct rss_header *, 296 struct mbuf *); 297 static int t4_handle_wrerr_rpl(struct adapter *, const __be64 *); 298 static void wrq_tx_drain(void *, int); 299 static void drain_wrq_wr_list(struct adapter *, struct sge_wrq *); 300 301 static int sysctl_uint16(SYSCTL_HANDLER_ARGS); 302 static int sysctl_bufsizes(SYSCTL_HANDLER_ARGS); 303 #ifdef RATELIMIT 304 static inline u_int txpkt_eo_len16(u_int, u_int, u_int); 305 static int ethofld_fw4_ack(struct sge_iq *, const struct rss_header *, 306 struct mbuf *); 307 #endif 308 309 static counter_u64_t extfree_refs; 310 static counter_u64_t extfree_rels; 311 312 an_handler_t t4_an_handler; 313 fw_msg_handler_t t4_fw_msg_handler[NUM_FW6_TYPES]; 314 cpl_handler_t t4_cpl_handler[NUM_CPL_CMDS]; 315 cpl_handler_t set_tcb_rpl_handlers[NUM_CPL_COOKIES]; 316 cpl_handler_t l2t_write_rpl_handlers[NUM_CPL_COOKIES]; 317 cpl_handler_t act_open_rpl_handlers[NUM_CPL_COOKIES]; 318 cpl_handler_t abort_rpl_rss_handlers[NUM_CPL_COOKIES]; 319 cpl_handler_t fw4_ack_handlers[NUM_CPL_COOKIES]; 320 321 void 322 t4_register_an_handler(an_handler_t h) 323 { 324 uintptr_t *loc; 325 326 MPASS(h == NULL || t4_an_handler == NULL); 327 328 loc = (uintptr_t *)&t4_an_handler; 329 atomic_store_rel_ptr(loc, (uintptr_t)h); 330 } 331 332 void 333 t4_register_fw_msg_handler(int type, fw_msg_handler_t h) 334 { 335 uintptr_t *loc; 336 337 MPASS(type < nitems(t4_fw_msg_handler)); 338 MPASS(h == NULL || t4_fw_msg_handler[type] == NULL); 339 /* 340 * These are dispatched by the handler for FW{4|6}_CPL_MSG using the CPL 341 * handler dispatch table. Reject any attempt to install a handler for 342 * this subtype. 343 */ 344 MPASS(type != FW_TYPE_RSSCPL); 345 MPASS(type != FW6_TYPE_RSSCPL); 346 347 loc = (uintptr_t *)&t4_fw_msg_handler[type]; 348 atomic_store_rel_ptr(loc, (uintptr_t)h); 349 } 350 351 void 352 t4_register_cpl_handler(int opcode, cpl_handler_t h) 353 { 354 uintptr_t *loc; 355 356 MPASS(opcode < nitems(t4_cpl_handler)); 357 MPASS(h == NULL || t4_cpl_handler[opcode] == NULL); 358 359 loc = (uintptr_t *)&t4_cpl_handler[opcode]; 360 atomic_store_rel_ptr(loc, (uintptr_t)h); 361 } 362 363 static int 364 set_tcb_rpl_handler(struct sge_iq *iq, const struct rss_header *rss, 365 struct mbuf *m) 366 { 367 const struct cpl_set_tcb_rpl *cpl = (const void *)(rss + 1); 368 u_int tid; 369 int cookie; 370 371 MPASS(m == NULL); 372 373 tid = GET_TID(cpl); 374 if (is_hpftid(iq->adapter, tid) || is_ftid(iq->adapter, tid)) { 375 /* 376 * The return code for filter-write is put in the CPL cookie so 377 * we have to rely on the hardware tid (is_ftid) to determine 378 * that this is a response to a filter. 379 */ 380 cookie = CPL_COOKIE_FILTER; 381 } else { 382 cookie = G_COOKIE(cpl->cookie); 383 } 384 MPASS(cookie > CPL_COOKIE_RESERVED); 385 MPASS(cookie < nitems(set_tcb_rpl_handlers)); 386 387 return (set_tcb_rpl_handlers[cookie](iq, rss, m)); 388 } 389 390 static int 391 l2t_write_rpl_handler(struct sge_iq *iq, const struct rss_header *rss, 392 struct mbuf *m) 393 { 394 const struct cpl_l2t_write_rpl *rpl = (const void *)(rss + 1); 395 unsigned int cookie; 396 397 MPASS(m == NULL); 398 399 cookie = GET_TID(rpl) & F_SYNC_WR ? CPL_COOKIE_TOM : CPL_COOKIE_FILTER; 400 return (l2t_write_rpl_handlers[cookie](iq, rss, m)); 401 } 402 403 static int 404 act_open_rpl_handler(struct sge_iq *iq, const struct rss_header *rss, 405 struct mbuf *m) 406 { 407 const struct cpl_act_open_rpl *cpl = (const void *)(rss + 1); 408 u_int cookie = G_TID_COOKIE(G_AOPEN_ATID(be32toh(cpl->atid_status))); 409 410 MPASS(m == NULL); 411 MPASS(cookie != CPL_COOKIE_RESERVED); 412 413 return (act_open_rpl_handlers[cookie](iq, rss, m)); 414 } 415 416 static int 417 abort_rpl_rss_handler(struct sge_iq *iq, const struct rss_header *rss, 418 struct mbuf *m) 419 { 420 struct adapter *sc = iq->adapter; 421 u_int cookie; 422 423 MPASS(m == NULL); 424 if (is_hashfilter(sc)) 425 cookie = CPL_COOKIE_HASHFILTER; 426 else 427 cookie = CPL_COOKIE_TOM; 428 429 return (abort_rpl_rss_handlers[cookie](iq, rss, m)); 430 } 431 432 static int 433 fw4_ack_handler(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 434 { 435 struct adapter *sc = iq->adapter; 436 const struct cpl_fw4_ack *cpl = (const void *)(rss + 1); 437 unsigned int tid = G_CPL_FW4_ACK_FLOWID(be32toh(OPCODE_TID(cpl))); 438 u_int cookie; 439 440 MPASS(m == NULL); 441 if (is_etid(sc, tid)) 442 cookie = CPL_COOKIE_ETHOFLD; 443 else 444 cookie = CPL_COOKIE_TOM; 445 446 return (fw4_ack_handlers[cookie](iq, rss, m)); 447 } 448 449 static void 450 t4_init_shared_cpl_handlers(void) 451 { 452 453 t4_register_cpl_handler(CPL_SET_TCB_RPL, set_tcb_rpl_handler); 454 t4_register_cpl_handler(CPL_L2T_WRITE_RPL, l2t_write_rpl_handler); 455 t4_register_cpl_handler(CPL_ACT_OPEN_RPL, act_open_rpl_handler); 456 t4_register_cpl_handler(CPL_ABORT_RPL_RSS, abort_rpl_rss_handler); 457 t4_register_cpl_handler(CPL_FW4_ACK, fw4_ack_handler); 458 } 459 460 void 461 t4_register_shared_cpl_handler(int opcode, cpl_handler_t h, int cookie) 462 { 463 uintptr_t *loc; 464 465 MPASS(opcode < nitems(t4_cpl_handler)); 466 MPASS(cookie > CPL_COOKIE_RESERVED); 467 MPASS(cookie < NUM_CPL_COOKIES); 468 MPASS(t4_cpl_handler[opcode] != NULL); 469 470 switch (opcode) { 471 case CPL_SET_TCB_RPL: 472 loc = (uintptr_t *)&set_tcb_rpl_handlers[cookie]; 473 break; 474 case CPL_L2T_WRITE_RPL: 475 loc = (uintptr_t *)&l2t_write_rpl_handlers[cookie]; 476 break; 477 case CPL_ACT_OPEN_RPL: 478 loc = (uintptr_t *)&act_open_rpl_handlers[cookie]; 479 break; 480 case CPL_ABORT_RPL_RSS: 481 loc = (uintptr_t *)&abort_rpl_rss_handlers[cookie]; 482 break; 483 case CPL_FW4_ACK: 484 loc = (uintptr_t *)&fw4_ack_handlers[cookie]; 485 break; 486 default: 487 MPASS(0); 488 return; 489 } 490 MPASS(h == NULL || *loc == (uintptr_t)NULL); 491 atomic_store_rel_ptr(loc, (uintptr_t)h); 492 } 493 494 /* 495 * Called on MOD_LOAD. Validates and calculates the SGE tunables. 496 */ 497 void 498 t4_sge_modload(void) 499 { 500 501 if (fl_pktshift < 0 || fl_pktshift > 7) { 502 printf("Invalid hw.cxgbe.fl_pktshift value (%d)," 503 " using 0 instead.\n", fl_pktshift); 504 fl_pktshift = 0; 505 } 506 507 if (spg_len != 64 && spg_len != 128) { 508 int len; 509 510 #if defined(__i386__) || defined(__amd64__) 511 len = cpu_clflush_line_size > 64 ? 128 : 64; 512 #else 513 len = 64; 514 #endif 515 if (spg_len != -1) { 516 printf("Invalid hw.cxgbe.spg_len value (%d)," 517 " using %d instead.\n", spg_len, len); 518 } 519 spg_len = len; 520 } 521 522 if (cong_drop < -1 || cong_drop > 1) { 523 printf("Invalid hw.cxgbe.cong_drop value (%d)," 524 " using 0 instead.\n", cong_drop); 525 cong_drop = 0; 526 } 527 528 if (tscale != 1 && (tscale < 3 || tscale > 17)) { 529 printf("Invalid hw.cxgbe.tscale value (%d)," 530 " using 1 instead.\n", tscale); 531 tscale = 1; 532 } 533 534 extfree_refs = counter_u64_alloc(M_WAITOK); 535 extfree_rels = counter_u64_alloc(M_WAITOK); 536 counter_u64_zero(extfree_refs); 537 counter_u64_zero(extfree_rels); 538 539 t4_init_shared_cpl_handlers(); 540 t4_register_cpl_handler(CPL_FW4_MSG, handle_fw_msg); 541 t4_register_cpl_handler(CPL_FW6_MSG, handle_fw_msg); 542 t4_register_cpl_handler(CPL_SGE_EGR_UPDATE, handle_sge_egr_update); 543 #ifdef RATELIMIT 544 t4_register_shared_cpl_handler(CPL_FW4_ACK, ethofld_fw4_ack, 545 CPL_COOKIE_ETHOFLD); 546 #endif 547 t4_register_fw_msg_handler(FW6_TYPE_CMD_RPL, t4_handle_fw_rpl); 548 t4_register_fw_msg_handler(FW6_TYPE_WRERR_RPL, t4_handle_wrerr_rpl); 549 } 550 551 void 552 t4_sge_modunload(void) 553 { 554 555 counter_u64_free(extfree_refs); 556 counter_u64_free(extfree_rels); 557 } 558 559 uint64_t 560 t4_sge_extfree_refs(void) 561 { 562 uint64_t refs, rels; 563 564 rels = counter_u64_fetch(extfree_rels); 565 refs = counter_u64_fetch(extfree_refs); 566 567 return (refs - rels); 568 } 569 570 /* max 4096 */ 571 #define MAX_PACK_BOUNDARY 512 572 573 static inline void 574 setup_pad_and_pack_boundaries(struct adapter *sc) 575 { 576 uint32_t v, m; 577 int pad, pack, pad_shift; 578 579 pad_shift = chip_id(sc) > CHELSIO_T5 ? X_T6_INGPADBOUNDARY_SHIFT : 580 X_INGPADBOUNDARY_SHIFT; 581 pad = fl_pad; 582 if (fl_pad < (1 << pad_shift) || 583 fl_pad > (1 << (pad_shift + M_INGPADBOUNDARY)) || 584 !powerof2(fl_pad)) { 585 /* 586 * If there is any chance that we might use buffer packing and 587 * the chip is a T4, then pick 64 as the pad/pack boundary. Set 588 * it to the minimum allowed in all other cases. 589 */ 590 pad = is_t4(sc) && buffer_packing ? 64 : 1 << pad_shift; 591 592 /* 593 * For fl_pad = 0 we'll still write a reasonable value to the 594 * register but all the freelists will opt out of padding. 595 * We'll complain here only if the user tried to set it to a 596 * value greater than 0 that was invalid. 597 */ 598 if (fl_pad > 0) { 599 device_printf(sc->dev, "Invalid hw.cxgbe.fl_pad value" 600 " (%d), using %d instead.\n", fl_pad, pad); 601 } 602 } 603 m = V_INGPADBOUNDARY(M_INGPADBOUNDARY); 604 v = V_INGPADBOUNDARY(ilog2(pad) - pad_shift); 605 t4_set_reg_field(sc, A_SGE_CONTROL, m, v); 606 607 if (is_t4(sc)) { 608 if (fl_pack != -1 && fl_pack != pad) { 609 /* Complain but carry on. */ 610 device_printf(sc->dev, "hw.cxgbe.fl_pack (%d) ignored," 611 " using %d instead.\n", fl_pack, pad); 612 } 613 return; 614 } 615 616 pack = fl_pack; 617 if (fl_pack < 16 || fl_pack == 32 || fl_pack > 4096 || 618 !powerof2(fl_pack)) { 619 if (sc->params.pci.mps > MAX_PACK_BOUNDARY) 620 pack = MAX_PACK_BOUNDARY; 621 else 622 pack = max(sc->params.pci.mps, CACHE_LINE_SIZE); 623 MPASS(powerof2(pack)); 624 if (pack < 16) 625 pack = 16; 626 if (pack == 32) 627 pack = 64; 628 if (pack > 4096) 629 pack = 4096; 630 if (fl_pack != -1) { 631 device_printf(sc->dev, "Invalid hw.cxgbe.fl_pack value" 632 " (%d), using %d instead.\n", fl_pack, pack); 633 } 634 } 635 m = V_INGPACKBOUNDARY(M_INGPACKBOUNDARY); 636 if (pack == 16) 637 v = V_INGPACKBOUNDARY(0); 638 else 639 v = V_INGPACKBOUNDARY(ilog2(pack) - 5); 640 641 MPASS(!is_t4(sc)); /* T4 doesn't have SGE_CONTROL2 */ 642 t4_set_reg_field(sc, A_SGE_CONTROL2, m, v); 643 } 644 645 /* 646 * adap->params.vpd.cclk must be set up before this is called. 647 */ 648 void 649 t4_tweak_chip_settings(struct adapter *sc) 650 { 651 int i, reg; 652 uint32_t v, m; 653 int intr_timer[SGE_NTIMERS] = {1, 5, 10, 50, 100, 200}; 654 int timer_max = M_TIMERVALUE0 * 1000 / sc->params.vpd.cclk; 655 int intr_pktcount[SGE_NCOUNTERS] = {1, 8, 16, 32}; /* 63 max */ 656 uint16_t indsz = min(RX_COPY_THRESHOLD - 1, M_INDICATESIZE); 657 static int sw_buf_sizes[] = { 658 MCLBYTES, 659 #if MJUMPAGESIZE != MCLBYTES 660 MJUMPAGESIZE, 661 #endif 662 MJUM9BYTES, 663 MJUM16BYTES 664 }; 665 666 KASSERT(sc->flags & MASTER_PF, 667 ("%s: trying to change chip settings when not master.", __func__)); 668 669 m = V_PKTSHIFT(M_PKTSHIFT) | F_RXPKTCPLMODE | F_EGRSTATUSPAGESIZE; 670 v = V_PKTSHIFT(fl_pktshift) | F_RXPKTCPLMODE | 671 V_EGRSTATUSPAGESIZE(spg_len == 128); 672 t4_set_reg_field(sc, A_SGE_CONTROL, m, v); 673 674 setup_pad_and_pack_boundaries(sc); 675 676 v = V_HOSTPAGESIZEPF0(PAGE_SHIFT - 10) | 677 V_HOSTPAGESIZEPF1(PAGE_SHIFT - 10) | 678 V_HOSTPAGESIZEPF2(PAGE_SHIFT - 10) | 679 V_HOSTPAGESIZEPF3(PAGE_SHIFT - 10) | 680 V_HOSTPAGESIZEPF4(PAGE_SHIFT - 10) | 681 V_HOSTPAGESIZEPF5(PAGE_SHIFT - 10) | 682 V_HOSTPAGESIZEPF6(PAGE_SHIFT - 10) | 683 V_HOSTPAGESIZEPF7(PAGE_SHIFT - 10); 684 t4_write_reg(sc, A_SGE_HOST_PAGE_SIZE, v); 685 686 t4_write_reg(sc, A_SGE_FL_BUFFER_SIZE0, 4096); 687 t4_write_reg(sc, A_SGE_FL_BUFFER_SIZE1, 65536); 688 reg = A_SGE_FL_BUFFER_SIZE2; 689 for (i = 0; i < nitems(sw_buf_sizes); i++) { 690 MPASS(reg <= A_SGE_FL_BUFFER_SIZE15); 691 t4_write_reg(sc, reg, sw_buf_sizes[i]); 692 reg += 4; 693 MPASS(reg <= A_SGE_FL_BUFFER_SIZE15); 694 t4_write_reg(sc, reg, sw_buf_sizes[i] - CL_METADATA_SIZE); 695 reg += 4; 696 } 697 698 v = V_THRESHOLD_0(intr_pktcount[0]) | V_THRESHOLD_1(intr_pktcount[1]) | 699 V_THRESHOLD_2(intr_pktcount[2]) | V_THRESHOLD_3(intr_pktcount[3]); 700 t4_write_reg(sc, A_SGE_INGRESS_RX_THRESHOLD, v); 701 702 KASSERT(intr_timer[0] <= timer_max, 703 ("%s: not a single usable timer (%d, %d)", __func__, intr_timer[0], 704 timer_max)); 705 for (i = 1; i < nitems(intr_timer); i++) { 706 KASSERT(intr_timer[i] >= intr_timer[i - 1], 707 ("%s: timers not listed in increasing order (%d)", 708 __func__, i)); 709 710 while (intr_timer[i] > timer_max) { 711 if (i == nitems(intr_timer) - 1) { 712 intr_timer[i] = timer_max; 713 break; 714 } 715 intr_timer[i] += intr_timer[i - 1]; 716 intr_timer[i] /= 2; 717 } 718 } 719 720 v = V_TIMERVALUE0(us_to_core_ticks(sc, intr_timer[0])) | 721 V_TIMERVALUE1(us_to_core_ticks(sc, intr_timer[1])); 722 t4_write_reg(sc, A_SGE_TIMER_VALUE_0_AND_1, v); 723 v = V_TIMERVALUE2(us_to_core_ticks(sc, intr_timer[2])) | 724 V_TIMERVALUE3(us_to_core_ticks(sc, intr_timer[3])); 725 t4_write_reg(sc, A_SGE_TIMER_VALUE_2_AND_3, v); 726 v = V_TIMERVALUE4(us_to_core_ticks(sc, intr_timer[4])) | 727 V_TIMERVALUE5(us_to_core_ticks(sc, intr_timer[5])); 728 t4_write_reg(sc, A_SGE_TIMER_VALUE_4_AND_5, v); 729 730 if (chip_id(sc) >= CHELSIO_T6) { 731 m = V_TSCALE(M_TSCALE); 732 if (tscale == 1) 733 v = 0; 734 else 735 v = V_TSCALE(tscale - 2); 736 t4_set_reg_field(sc, A_SGE_ITP_CONTROL, m, v); 737 738 if (sc->debug_flags & DF_DISABLE_TCB_CACHE) { 739 m = V_RDTHRESHOLD(M_RDTHRESHOLD) | F_WRTHRTHRESHEN | 740 V_WRTHRTHRESH(M_WRTHRTHRESH); 741 t4_tp_pio_read(sc, &v, 1, A_TP_CMM_CONFIG, 1); 742 v &= ~m; 743 v |= V_RDTHRESHOLD(1) | F_WRTHRTHRESHEN | 744 V_WRTHRTHRESH(16); 745 t4_tp_pio_write(sc, &v, 1, A_TP_CMM_CONFIG, 1); 746 } 747 } 748 749 /* 4K, 16K, 64K, 256K DDP "page sizes" for TDDP */ 750 v = V_HPZ0(0) | V_HPZ1(2) | V_HPZ2(4) | V_HPZ3(6); 751 t4_write_reg(sc, A_ULP_RX_TDDP_PSZ, v); 752 753 /* 754 * 4K, 8K, 16K, 64K DDP "page sizes" for iSCSI DDP. These have been 755 * chosen with MAXPHYS = 128K in mind. The largest DDP buffer that we 756 * may have to deal with is MAXPHYS + 1 page. 757 */ 758 v = V_HPZ0(0) | V_HPZ1(1) | V_HPZ2(2) | V_HPZ3(4); 759 t4_write_reg(sc, A_ULP_RX_ISCSI_PSZ, v); 760 761 /* We use multiple DDP page sizes both in plain-TOE and ISCSI modes. */ 762 m = v = F_TDDPTAGTCB | F_ISCSITAGTCB; 763 t4_set_reg_field(sc, A_ULP_RX_CTL, m, v); 764 765 m = V_INDICATESIZE(M_INDICATESIZE) | F_REARMDDPOFFSET | 766 F_RESETDDPOFFSET; 767 v = V_INDICATESIZE(indsz) | F_REARMDDPOFFSET | F_RESETDDPOFFSET; 768 t4_set_reg_field(sc, A_TP_PARA_REG5, m, v); 769 } 770 771 /* 772 * SGE wants the buffer to be at least 64B and then a multiple of 16. Its 773 * address mut be 16B aligned. If padding is in use the buffer's start and end 774 * need to be aligned to the pad boundary as well. We'll just make sure that 775 * the size is a multiple of the pad boundary here, it is up to the buffer 776 * allocation code to make sure the start of the buffer is aligned. 777 */ 778 static inline int 779 hwsz_ok(struct adapter *sc, int hwsz) 780 { 781 int mask = fl_pad ? sc->params.sge.pad_boundary - 1 : 16 - 1; 782 783 return (hwsz >= 64 && (hwsz & mask) == 0); 784 } 785 786 /* 787 * XXX: driver really should be able to deal with unexpected settings. 788 */ 789 int 790 t4_read_chip_settings(struct adapter *sc) 791 { 792 struct sge *s = &sc->sge; 793 struct sge_params *sp = &sc->params.sge; 794 int i, j, n, rc = 0; 795 uint32_t m, v, r; 796 uint16_t indsz = min(RX_COPY_THRESHOLD - 1, M_INDICATESIZE); 797 static int sw_buf_sizes[] = { /* Sorted by size */ 798 MCLBYTES, 799 #if MJUMPAGESIZE != MCLBYTES 800 MJUMPAGESIZE, 801 #endif 802 MJUM9BYTES, 803 MJUM16BYTES 804 }; 805 struct rx_buf_info *rxb; 806 807 m = F_RXPKTCPLMODE; 808 v = F_RXPKTCPLMODE; 809 r = sc->params.sge.sge_control; 810 if ((r & m) != v) { 811 device_printf(sc->dev, "invalid SGE_CONTROL(0x%x)\n", r); 812 rc = EINVAL; 813 } 814 815 /* 816 * If this changes then every single use of PAGE_SHIFT in the driver 817 * needs to be carefully reviewed for PAGE_SHIFT vs sp->page_shift. 818 */ 819 if (sp->page_shift != PAGE_SHIFT) { 820 device_printf(sc->dev, "invalid SGE_HOST_PAGE_SIZE(0x%x)\n", r); 821 rc = EINVAL; 822 } 823 824 s->safe_zidx = -1; 825 rxb = &s->rx_buf_info[0]; 826 for (i = 0; i < SW_ZONE_SIZES; i++, rxb++) { 827 rxb->size1 = sw_buf_sizes[i]; 828 rxb->zone = m_getzone(rxb->size1); 829 rxb->type = m_gettype(rxb->size1); 830 rxb->size2 = 0; 831 rxb->hwidx1 = -1; 832 rxb->hwidx2 = -1; 833 for (j = 0; j < SGE_FLBUF_SIZES; j++) { 834 int hwsize = sp->sge_fl_buffer_size[j]; 835 836 if (!hwsz_ok(sc, hwsize)) 837 continue; 838 839 /* hwidx for size1 */ 840 if (rxb->hwidx1 == -1 && rxb->size1 == hwsize) 841 rxb->hwidx1 = j; 842 843 /* hwidx for size2 (buffer packing) */ 844 if (rxb->size1 - CL_METADATA_SIZE < hwsize) 845 continue; 846 n = rxb->size1 - hwsize - CL_METADATA_SIZE; 847 if (n == 0) { 848 rxb->hwidx2 = j; 849 rxb->size2 = hwsize; 850 break; /* stop looking */ 851 } 852 if (rxb->hwidx2 != -1) { 853 if (n < sp->sge_fl_buffer_size[rxb->hwidx2] - 854 hwsize - CL_METADATA_SIZE) { 855 rxb->hwidx2 = j; 856 rxb->size2 = hwsize; 857 } 858 } else if (n <= 2 * CL_METADATA_SIZE) { 859 rxb->hwidx2 = j; 860 rxb->size2 = hwsize; 861 } 862 } 863 if (rxb->hwidx2 != -1) 864 sc->flags |= BUF_PACKING_OK; 865 if (s->safe_zidx == -1 && rxb->size1 == safest_rx_cluster) 866 s->safe_zidx = i; 867 } 868 869 if (sc->flags & IS_VF) 870 return (0); 871 872 v = V_HPZ0(0) | V_HPZ1(2) | V_HPZ2(4) | V_HPZ3(6); 873 r = t4_read_reg(sc, A_ULP_RX_TDDP_PSZ); 874 if (r != v) { 875 device_printf(sc->dev, "invalid ULP_RX_TDDP_PSZ(0x%x)\n", r); 876 rc = EINVAL; 877 } 878 879 m = v = F_TDDPTAGTCB; 880 r = t4_read_reg(sc, A_ULP_RX_CTL); 881 if ((r & m) != v) { 882 device_printf(sc->dev, "invalid ULP_RX_CTL(0x%x)\n", r); 883 rc = EINVAL; 884 } 885 886 m = V_INDICATESIZE(M_INDICATESIZE) | F_REARMDDPOFFSET | 887 F_RESETDDPOFFSET; 888 v = V_INDICATESIZE(indsz) | F_REARMDDPOFFSET | F_RESETDDPOFFSET; 889 r = t4_read_reg(sc, A_TP_PARA_REG5); 890 if ((r & m) != v) { 891 device_printf(sc->dev, "invalid TP_PARA_REG5(0x%x)\n", r); 892 rc = EINVAL; 893 } 894 895 t4_init_tp_params(sc, 1); 896 897 t4_read_mtu_tbl(sc, sc->params.mtus, NULL); 898 t4_load_mtus(sc, sc->params.mtus, sc->params.a_wnd, sc->params.b_wnd); 899 900 return (rc); 901 } 902 903 int 904 t4_create_dma_tag(struct adapter *sc) 905 { 906 int rc; 907 908 rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0, 909 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE, 910 BUS_SPACE_UNRESTRICTED, BUS_SPACE_MAXSIZE, BUS_DMA_ALLOCNOW, NULL, 911 NULL, &sc->dmat); 912 if (rc != 0) { 913 device_printf(sc->dev, 914 "failed to create main DMA tag: %d\n", rc); 915 } 916 917 return (rc); 918 } 919 920 void 921 t4_sge_sysctls(struct adapter *sc, struct sysctl_ctx_list *ctx, 922 struct sysctl_oid_list *children) 923 { 924 struct sge_params *sp = &sc->params.sge; 925 926 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "buffer_sizes", 927 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0, 928 sysctl_bufsizes, "A", "freelist buffer sizes"); 929 930 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "fl_pktshift", CTLFLAG_RD, 931 NULL, sp->fl_pktshift, "payload DMA offset in rx buffer (bytes)"); 932 933 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "fl_pad", CTLFLAG_RD, 934 NULL, sp->pad_boundary, "payload pad boundary (bytes)"); 935 936 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "spg_len", CTLFLAG_RD, 937 NULL, sp->spg_len, "status page size (bytes)"); 938 939 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "cong_drop", CTLFLAG_RD, 940 NULL, cong_drop, "congestion drop setting"); 941 942 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "fl_pack", CTLFLAG_RD, 943 NULL, sp->pack_boundary, "payload pack boundary (bytes)"); 944 } 945 946 int 947 t4_destroy_dma_tag(struct adapter *sc) 948 { 949 if (sc->dmat) 950 bus_dma_tag_destroy(sc->dmat); 951 952 return (0); 953 } 954 955 /* 956 * Allocate and initialize the firmware event queue, control queues, and special 957 * purpose rx queues owned by the adapter. 958 * 959 * Returns errno on failure. Resources allocated up to that point may still be 960 * allocated. Caller is responsible for cleanup in case this function fails. 961 */ 962 int 963 t4_setup_adapter_queues(struct adapter *sc) 964 { 965 struct sysctl_oid *oid; 966 struct sysctl_oid_list *children; 967 int rc, i; 968 969 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 970 971 sysctl_ctx_init(&sc->ctx); 972 sc->flags |= ADAP_SYSCTL_CTX; 973 974 /* 975 * Firmware event queue 976 */ 977 rc = alloc_fwq(sc); 978 if (rc != 0) 979 return (rc); 980 981 /* 982 * That's all for the VF driver. 983 */ 984 if (sc->flags & IS_VF) 985 return (rc); 986 987 oid = device_get_sysctl_tree(sc->dev); 988 children = SYSCTL_CHILDREN(oid); 989 990 /* 991 * XXX: General purpose rx queues, one per port. 992 */ 993 994 /* 995 * Control queues, one per port. 996 */ 997 oid = SYSCTL_ADD_NODE(&sc->ctx, children, OID_AUTO, "ctrlq", 998 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "control queues"); 999 for_each_port(sc, i) { 1000 struct sge_wrq *ctrlq = &sc->sge.ctrlq[i]; 1001 1002 rc = alloc_ctrlq(sc, ctrlq, i, oid); 1003 if (rc != 0) 1004 return (rc); 1005 } 1006 1007 return (rc); 1008 } 1009 1010 /* 1011 * Idempotent 1012 */ 1013 int 1014 t4_teardown_adapter_queues(struct adapter *sc) 1015 { 1016 int i; 1017 1018 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 1019 1020 /* Do this before freeing the queue */ 1021 if (sc->flags & ADAP_SYSCTL_CTX) { 1022 sysctl_ctx_free(&sc->ctx); 1023 sc->flags &= ~ADAP_SYSCTL_CTX; 1024 } 1025 1026 if (!(sc->flags & IS_VF)) { 1027 for_each_port(sc, i) 1028 free_wrq(sc, &sc->sge.ctrlq[i]); 1029 } 1030 free_fwq(sc); 1031 1032 return (0); 1033 } 1034 1035 /* Maximum payload that could arrive with a single iq descriptor. */ 1036 static inline int 1037 max_rx_payload(struct adapter *sc, struct ifnet *ifp, const bool ofld) 1038 { 1039 int maxp; 1040 1041 /* large enough even when hw VLAN extraction is disabled */ 1042 maxp = sc->params.sge.fl_pktshift + ETHER_HDR_LEN + 1043 ETHER_VLAN_ENCAP_LEN + ifp->if_mtu; 1044 if (ofld && sc->tt.tls && sc->cryptocaps & FW_CAPS_CONFIG_TLSKEYS && 1045 maxp < sc->params.tp.max_rx_pdu) 1046 maxp = sc->params.tp.max_rx_pdu; 1047 return (maxp); 1048 } 1049 1050 int 1051 t4_setup_vi_queues(struct vi_info *vi) 1052 { 1053 int rc = 0, i, intr_idx, iqidx; 1054 struct sge_rxq *rxq; 1055 struct sge_txq *txq; 1056 #ifdef TCP_OFFLOAD 1057 struct sge_ofld_rxq *ofld_rxq; 1058 #endif 1059 #if defined(TCP_OFFLOAD) || defined(RATELIMIT) 1060 struct sge_wrq *ofld_txq; 1061 #endif 1062 #ifdef DEV_NETMAP 1063 int saved_idx; 1064 struct sge_nm_rxq *nm_rxq; 1065 struct sge_nm_txq *nm_txq; 1066 #endif 1067 char name[16]; 1068 struct port_info *pi = vi->pi; 1069 struct adapter *sc = pi->adapter; 1070 struct ifnet *ifp = vi->ifp; 1071 struct sysctl_oid *oid = device_get_sysctl_tree(vi->dev); 1072 struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); 1073 int maxp; 1074 1075 /* Interrupt vector to start from (when using multiple vectors) */ 1076 intr_idx = vi->first_intr; 1077 1078 #ifdef DEV_NETMAP 1079 saved_idx = intr_idx; 1080 if (ifp->if_capabilities & IFCAP_NETMAP) { 1081 1082 /* netmap is supported with direct interrupts only. */ 1083 MPASS(!forwarding_intr_to_fwq(sc)); 1084 1085 /* 1086 * We don't have buffers to back the netmap rx queues 1087 * right now so we create the queues in a way that 1088 * doesn't set off any congestion signal in the chip. 1089 */ 1090 oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "nm_rxq", 1091 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "rx queues"); 1092 for_each_nm_rxq(vi, i, nm_rxq) { 1093 rc = alloc_nm_rxq(vi, nm_rxq, intr_idx, i, oid); 1094 if (rc != 0) 1095 goto done; 1096 intr_idx++; 1097 } 1098 1099 oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "nm_txq", 1100 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "tx queues"); 1101 for_each_nm_txq(vi, i, nm_txq) { 1102 iqidx = vi->first_nm_rxq + (i % vi->nnmrxq); 1103 rc = alloc_nm_txq(vi, nm_txq, iqidx, i, oid); 1104 if (rc != 0) 1105 goto done; 1106 } 1107 } 1108 1109 /* Normal rx queues and netmap rx queues share the same interrupts. */ 1110 intr_idx = saved_idx; 1111 #endif 1112 1113 /* 1114 * Allocate rx queues first because a default iqid is required when 1115 * creating a tx queue. 1116 */ 1117 maxp = max_rx_payload(sc, ifp, false); 1118 oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "rxq", 1119 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "rx queues"); 1120 for_each_rxq(vi, i, rxq) { 1121 1122 init_iq(&rxq->iq, sc, vi->tmr_idx, vi->pktc_idx, vi->qsize_rxq); 1123 1124 snprintf(name, sizeof(name), "%s rxq%d-fl", 1125 device_get_nameunit(vi->dev), i); 1126 init_fl(sc, &rxq->fl, vi->qsize_rxq / 8, maxp, name); 1127 1128 rc = alloc_rxq(vi, rxq, 1129 forwarding_intr_to_fwq(sc) ? -1 : intr_idx, i, oid); 1130 if (rc != 0) 1131 goto done; 1132 intr_idx++; 1133 } 1134 #ifdef DEV_NETMAP 1135 if (ifp->if_capabilities & IFCAP_NETMAP) 1136 intr_idx = saved_idx + max(vi->nrxq, vi->nnmrxq); 1137 #endif 1138 #ifdef TCP_OFFLOAD 1139 maxp = max_rx_payload(sc, ifp, true); 1140 oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "ofld_rxq", 1141 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "rx queues for offloaded TCP connections"); 1142 for_each_ofld_rxq(vi, i, ofld_rxq) { 1143 1144 init_iq(&ofld_rxq->iq, sc, vi->ofld_tmr_idx, vi->ofld_pktc_idx, 1145 vi->qsize_rxq); 1146 1147 snprintf(name, sizeof(name), "%s ofld_rxq%d-fl", 1148 device_get_nameunit(vi->dev), i); 1149 init_fl(sc, &ofld_rxq->fl, vi->qsize_rxq / 8, maxp, name); 1150 1151 rc = alloc_ofld_rxq(vi, ofld_rxq, 1152 forwarding_intr_to_fwq(sc) ? -1 : intr_idx, i, oid); 1153 if (rc != 0) 1154 goto done; 1155 intr_idx++; 1156 } 1157 #endif 1158 1159 /* 1160 * Now the tx queues. 1161 */ 1162 oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "txq", 1163 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "tx queues"); 1164 for_each_txq(vi, i, txq) { 1165 iqidx = vi->first_rxq + (i % vi->nrxq); 1166 snprintf(name, sizeof(name), "%s txq%d", 1167 device_get_nameunit(vi->dev), i); 1168 init_eq(sc, &txq->eq, EQ_ETH, vi->qsize_txq, pi->tx_chan, 1169 sc->sge.rxq[iqidx].iq.cntxt_id, name); 1170 1171 rc = alloc_txq(vi, txq, i, oid); 1172 if (rc != 0) 1173 goto done; 1174 } 1175 #if defined(TCP_OFFLOAD) || defined(RATELIMIT) 1176 oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "ofld_txq", 1177 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "tx queues for TOE/ETHOFLD"); 1178 for_each_ofld_txq(vi, i, ofld_txq) { 1179 struct sysctl_oid *oid2; 1180 1181 snprintf(name, sizeof(name), "%s ofld_txq%d", 1182 device_get_nameunit(vi->dev), i); 1183 if (vi->nofldrxq > 0) { 1184 iqidx = vi->first_ofld_rxq + (i % vi->nofldrxq); 1185 init_eq(sc, &ofld_txq->eq, EQ_OFLD, vi->qsize_txq, 1186 pi->tx_chan, sc->sge.ofld_rxq[iqidx].iq.cntxt_id, 1187 name); 1188 } else { 1189 iqidx = vi->first_rxq + (i % vi->nrxq); 1190 init_eq(sc, &ofld_txq->eq, EQ_OFLD, vi->qsize_txq, 1191 pi->tx_chan, sc->sge.rxq[iqidx].iq.cntxt_id, name); 1192 } 1193 1194 snprintf(name, sizeof(name), "%d", i); 1195 oid2 = SYSCTL_ADD_NODE(&vi->ctx, SYSCTL_CHILDREN(oid), OID_AUTO, 1196 name, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "offload tx queue"); 1197 1198 rc = alloc_wrq(sc, vi, ofld_txq, oid2); 1199 if (rc != 0) 1200 goto done; 1201 } 1202 #endif 1203 done: 1204 if (rc) 1205 t4_teardown_vi_queues(vi); 1206 1207 return (rc); 1208 } 1209 1210 /* 1211 * Idempotent 1212 */ 1213 int 1214 t4_teardown_vi_queues(struct vi_info *vi) 1215 { 1216 int i; 1217 struct sge_rxq *rxq; 1218 struct sge_txq *txq; 1219 #if defined(TCP_OFFLOAD) || defined(RATELIMIT) 1220 struct port_info *pi = vi->pi; 1221 struct adapter *sc = pi->adapter; 1222 struct sge_wrq *ofld_txq; 1223 #endif 1224 #ifdef TCP_OFFLOAD 1225 struct sge_ofld_rxq *ofld_rxq; 1226 #endif 1227 #ifdef DEV_NETMAP 1228 struct sge_nm_rxq *nm_rxq; 1229 struct sge_nm_txq *nm_txq; 1230 #endif 1231 1232 /* Do this before freeing the queues */ 1233 if (vi->flags & VI_SYSCTL_CTX) { 1234 sysctl_ctx_free(&vi->ctx); 1235 vi->flags &= ~VI_SYSCTL_CTX; 1236 } 1237 1238 #ifdef DEV_NETMAP 1239 if (vi->ifp->if_capabilities & IFCAP_NETMAP) { 1240 for_each_nm_txq(vi, i, nm_txq) { 1241 free_nm_txq(vi, nm_txq); 1242 } 1243 1244 for_each_nm_rxq(vi, i, nm_rxq) { 1245 free_nm_rxq(vi, nm_rxq); 1246 } 1247 } 1248 #endif 1249 1250 /* 1251 * Take down all the tx queues first, as they reference the rx queues 1252 * (for egress updates, etc.). 1253 */ 1254 1255 for_each_txq(vi, i, txq) { 1256 free_txq(vi, txq); 1257 } 1258 #if defined(TCP_OFFLOAD) || defined(RATELIMIT) 1259 for_each_ofld_txq(vi, i, ofld_txq) { 1260 free_wrq(sc, ofld_txq); 1261 } 1262 #endif 1263 1264 /* 1265 * Then take down the rx queues. 1266 */ 1267 1268 for_each_rxq(vi, i, rxq) { 1269 free_rxq(vi, rxq); 1270 } 1271 #ifdef TCP_OFFLOAD 1272 for_each_ofld_rxq(vi, i, ofld_rxq) { 1273 free_ofld_rxq(vi, ofld_rxq); 1274 } 1275 #endif 1276 1277 return (0); 1278 } 1279 1280 /* 1281 * Interrupt handler when the driver is using only 1 interrupt. This is a very 1282 * unusual scenario. 1283 * 1284 * a) Deals with errors, if any. 1285 * b) Services firmware event queue, which is taking interrupts for all other 1286 * queues. 1287 */ 1288 void 1289 t4_intr_all(void *arg) 1290 { 1291 struct adapter *sc = arg; 1292 struct sge_iq *fwq = &sc->sge.fwq; 1293 1294 MPASS(sc->intr_count == 1); 1295 1296 if (sc->intr_type == INTR_INTX) 1297 t4_write_reg(sc, MYPF_REG(A_PCIE_PF_CLI), 0); 1298 1299 t4_intr_err(arg); 1300 t4_intr_evt(fwq); 1301 } 1302 1303 /* 1304 * Interrupt handler for errors (installed directly when multiple interrupts are 1305 * being used, or called by t4_intr_all). 1306 */ 1307 void 1308 t4_intr_err(void *arg) 1309 { 1310 struct adapter *sc = arg; 1311 uint32_t v; 1312 const bool verbose = (sc->debug_flags & DF_VERBOSE_SLOWINTR) != 0; 1313 1314 if (sc->flags & ADAP_ERR) 1315 return; 1316 1317 v = t4_read_reg(sc, MYPF_REG(A_PL_PF_INT_CAUSE)); 1318 if (v & F_PFSW) { 1319 sc->swintr++; 1320 t4_write_reg(sc, MYPF_REG(A_PL_PF_INT_CAUSE), v); 1321 } 1322 1323 t4_slow_intr_handler(sc, verbose); 1324 } 1325 1326 /* 1327 * Interrupt handler for iq-only queues. The firmware event queue is the only 1328 * such queue right now. 1329 */ 1330 void 1331 t4_intr_evt(void *arg) 1332 { 1333 struct sge_iq *iq = arg; 1334 1335 if (atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_BUSY)) { 1336 service_iq(iq, 0); 1337 (void) atomic_cmpset_int(&iq->state, IQS_BUSY, IQS_IDLE); 1338 } 1339 } 1340 1341 /* 1342 * Interrupt handler for iq+fl queues. 1343 */ 1344 void 1345 t4_intr(void *arg) 1346 { 1347 struct sge_iq *iq = arg; 1348 1349 if (atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_BUSY)) { 1350 service_iq_fl(iq, 0); 1351 (void) atomic_cmpset_int(&iq->state, IQS_BUSY, IQS_IDLE); 1352 } 1353 } 1354 1355 #ifdef DEV_NETMAP 1356 /* 1357 * Interrupt handler for netmap rx queues. 1358 */ 1359 void 1360 t4_nm_intr(void *arg) 1361 { 1362 struct sge_nm_rxq *nm_rxq = arg; 1363 1364 if (atomic_cmpset_int(&nm_rxq->nm_state, NM_ON, NM_BUSY)) { 1365 service_nm_rxq(nm_rxq); 1366 (void) atomic_cmpset_int(&nm_rxq->nm_state, NM_BUSY, NM_ON); 1367 } 1368 } 1369 1370 /* 1371 * Interrupt handler for vectors shared between NIC and netmap rx queues. 1372 */ 1373 void 1374 t4_vi_intr(void *arg) 1375 { 1376 struct irq *irq = arg; 1377 1378 MPASS(irq->nm_rxq != NULL); 1379 t4_nm_intr(irq->nm_rxq); 1380 1381 MPASS(irq->rxq != NULL); 1382 t4_intr(irq->rxq); 1383 } 1384 #endif 1385 1386 /* 1387 * Deals with interrupts on an iq-only (no freelist) queue. 1388 */ 1389 static int 1390 service_iq(struct sge_iq *iq, int budget) 1391 { 1392 struct sge_iq *q; 1393 struct adapter *sc = iq->adapter; 1394 struct iq_desc *d = &iq->desc[iq->cidx]; 1395 int ndescs = 0, limit; 1396 int rsp_type; 1397 uint32_t lq; 1398 STAILQ_HEAD(, sge_iq) iql = STAILQ_HEAD_INITIALIZER(iql); 1399 1400 KASSERT(iq->state == IQS_BUSY, ("%s: iq %p not BUSY", __func__, iq)); 1401 KASSERT((iq->flags & IQ_HAS_FL) == 0, 1402 ("%s: called for iq %p with fl (iq->flags 0x%x)", __func__, iq, 1403 iq->flags)); 1404 MPASS((iq->flags & IQ_ADJ_CREDIT) == 0); 1405 MPASS((iq->flags & IQ_LRO_ENABLED) == 0); 1406 1407 limit = budget ? budget : iq->qsize / 16; 1408 1409 /* 1410 * We always come back and check the descriptor ring for new indirect 1411 * interrupts and other responses after running a single handler. 1412 */ 1413 for (;;) { 1414 while ((d->rsp.u.type_gen & F_RSPD_GEN) == iq->gen) { 1415 1416 rmb(); 1417 1418 rsp_type = G_RSPD_TYPE(d->rsp.u.type_gen); 1419 lq = be32toh(d->rsp.pldbuflen_qid); 1420 1421 switch (rsp_type) { 1422 case X_RSPD_TYPE_FLBUF: 1423 panic("%s: data for an iq (%p) with no freelist", 1424 __func__, iq); 1425 1426 /* NOTREACHED */ 1427 1428 case X_RSPD_TYPE_CPL: 1429 KASSERT(d->rss.opcode < NUM_CPL_CMDS, 1430 ("%s: bad opcode %02x.", __func__, 1431 d->rss.opcode)); 1432 t4_cpl_handler[d->rss.opcode](iq, &d->rss, NULL); 1433 break; 1434 1435 case X_RSPD_TYPE_INTR: 1436 /* 1437 * There are 1K interrupt-capable queues (qids 0 1438 * through 1023). A response type indicating a 1439 * forwarded interrupt with a qid >= 1K is an 1440 * iWARP async notification. 1441 */ 1442 if (__predict_true(lq >= 1024)) { 1443 t4_an_handler(iq, &d->rsp); 1444 break; 1445 } 1446 1447 q = sc->sge.iqmap[lq - sc->sge.iq_start - 1448 sc->sge.iq_base]; 1449 if (atomic_cmpset_int(&q->state, IQS_IDLE, 1450 IQS_BUSY)) { 1451 if (service_iq_fl(q, q->qsize / 16) == 0) { 1452 (void) atomic_cmpset_int(&q->state, 1453 IQS_BUSY, IQS_IDLE); 1454 } else { 1455 STAILQ_INSERT_TAIL(&iql, q, 1456 link); 1457 } 1458 } 1459 break; 1460 1461 default: 1462 KASSERT(0, 1463 ("%s: illegal response type %d on iq %p", 1464 __func__, rsp_type, iq)); 1465 log(LOG_ERR, 1466 "%s: illegal response type %d on iq %p", 1467 device_get_nameunit(sc->dev), rsp_type, iq); 1468 break; 1469 } 1470 1471 d++; 1472 if (__predict_false(++iq->cidx == iq->sidx)) { 1473 iq->cidx = 0; 1474 iq->gen ^= F_RSPD_GEN; 1475 d = &iq->desc[0]; 1476 } 1477 if (__predict_false(++ndescs == limit)) { 1478 t4_write_reg(sc, sc->sge_gts_reg, 1479 V_CIDXINC(ndescs) | 1480 V_INGRESSQID(iq->cntxt_id) | 1481 V_SEINTARM(V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX))); 1482 ndescs = 0; 1483 1484 if (budget) { 1485 return (EINPROGRESS); 1486 } 1487 } 1488 } 1489 1490 if (STAILQ_EMPTY(&iql)) 1491 break; 1492 1493 /* 1494 * Process the head only, and send it to the back of the list if 1495 * it's still not done. 1496 */ 1497 q = STAILQ_FIRST(&iql); 1498 STAILQ_REMOVE_HEAD(&iql, link); 1499 if (service_iq_fl(q, q->qsize / 8) == 0) 1500 (void) atomic_cmpset_int(&q->state, IQS_BUSY, IQS_IDLE); 1501 else 1502 STAILQ_INSERT_TAIL(&iql, q, link); 1503 } 1504 1505 t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(ndescs) | 1506 V_INGRESSQID((u32)iq->cntxt_id) | V_SEINTARM(iq->intr_params)); 1507 1508 return (0); 1509 } 1510 1511 static inline int 1512 sort_before_lro(struct lro_ctrl *lro) 1513 { 1514 1515 return (lro->lro_mbuf_max != 0); 1516 } 1517 1518 static inline uint64_t 1519 last_flit_to_ns(struct adapter *sc, uint64_t lf) 1520 { 1521 uint64_t n = be64toh(lf) & 0xfffffffffffffff; /* 60b, not 64b. */ 1522 1523 if (n > UINT64_MAX / 1000000) 1524 return (n / sc->params.vpd.cclk * 1000000); 1525 else 1526 return (n * 1000000 / sc->params.vpd.cclk); 1527 } 1528 1529 static inline void 1530 move_to_next_rxbuf(struct sge_fl *fl) 1531 { 1532 1533 fl->rx_offset = 0; 1534 if (__predict_false((++fl->cidx & 7) == 0)) { 1535 uint16_t cidx = fl->cidx >> 3; 1536 1537 if (__predict_false(cidx == fl->sidx)) 1538 fl->cidx = cidx = 0; 1539 fl->hw_cidx = cidx; 1540 } 1541 } 1542 1543 /* 1544 * Deals with interrupts on an iq+fl queue. 1545 */ 1546 static int 1547 service_iq_fl(struct sge_iq *iq, int budget) 1548 { 1549 struct sge_rxq *rxq = iq_to_rxq(iq); 1550 struct sge_fl *fl; 1551 struct adapter *sc = iq->adapter; 1552 struct iq_desc *d = &iq->desc[iq->cidx]; 1553 int ndescs, limit; 1554 int rsp_type, starved; 1555 uint32_t lq; 1556 uint16_t fl_hw_cidx; 1557 struct mbuf *m0; 1558 #if defined(INET) || defined(INET6) 1559 const struct timeval lro_timeout = {0, sc->lro_timeout}; 1560 struct lro_ctrl *lro = &rxq->lro; 1561 #endif 1562 1563 KASSERT(iq->state == IQS_BUSY, ("%s: iq %p not BUSY", __func__, iq)); 1564 MPASS(iq->flags & IQ_HAS_FL); 1565 1566 ndescs = 0; 1567 #if defined(INET) || defined(INET6) 1568 if (iq->flags & IQ_ADJ_CREDIT) { 1569 MPASS(sort_before_lro(lro)); 1570 iq->flags &= ~IQ_ADJ_CREDIT; 1571 if ((d->rsp.u.type_gen & F_RSPD_GEN) != iq->gen) { 1572 tcp_lro_flush_all(lro); 1573 t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(1) | 1574 V_INGRESSQID((u32)iq->cntxt_id) | 1575 V_SEINTARM(iq->intr_params)); 1576 return (0); 1577 } 1578 ndescs = 1; 1579 } 1580 #else 1581 MPASS((iq->flags & IQ_ADJ_CREDIT) == 0); 1582 #endif 1583 1584 limit = budget ? budget : iq->qsize / 16; 1585 fl = &rxq->fl; 1586 fl_hw_cidx = fl->hw_cidx; /* stable snapshot */ 1587 while ((d->rsp.u.type_gen & F_RSPD_GEN) == iq->gen) { 1588 1589 rmb(); 1590 1591 m0 = NULL; 1592 rsp_type = G_RSPD_TYPE(d->rsp.u.type_gen); 1593 lq = be32toh(d->rsp.pldbuflen_qid); 1594 1595 switch (rsp_type) { 1596 case X_RSPD_TYPE_FLBUF: 1597 if (lq & F_RSPD_NEWBUF) { 1598 if (fl->rx_offset > 0) 1599 move_to_next_rxbuf(fl); 1600 lq = G_RSPD_LEN(lq); 1601 } 1602 if (IDXDIFF(fl->hw_cidx, fl_hw_cidx, fl->sidx) > 4) { 1603 FL_LOCK(fl); 1604 refill_fl(sc, fl, 64); 1605 FL_UNLOCK(fl); 1606 fl_hw_cidx = fl->hw_cidx; 1607 } 1608 1609 if (d->rss.opcode == CPL_RX_PKT) { 1610 if (__predict_true(eth_rx(sc, rxq, d, lq) == 0)) 1611 break; 1612 goto out; 1613 } 1614 m0 = get_fl_payload(sc, fl, lq); 1615 if (__predict_false(m0 == NULL)) 1616 goto out; 1617 1618 /* fall through */ 1619 1620 case X_RSPD_TYPE_CPL: 1621 KASSERT(d->rss.opcode < NUM_CPL_CMDS, 1622 ("%s: bad opcode %02x.", __func__, d->rss.opcode)); 1623 t4_cpl_handler[d->rss.opcode](iq, &d->rss, m0); 1624 break; 1625 1626 case X_RSPD_TYPE_INTR: 1627 1628 /* 1629 * There are 1K interrupt-capable queues (qids 0 1630 * through 1023). A response type indicating a 1631 * forwarded interrupt with a qid >= 1K is an 1632 * iWARP async notification. That is the only 1633 * acceptable indirect interrupt on this queue. 1634 */ 1635 if (__predict_false(lq < 1024)) { 1636 panic("%s: indirect interrupt on iq_fl %p " 1637 "with qid %u", __func__, iq, lq); 1638 } 1639 1640 t4_an_handler(iq, &d->rsp); 1641 break; 1642 1643 default: 1644 KASSERT(0, ("%s: illegal response type %d on iq %p", 1645 __func__, rsp_type, iq)); 1646 log(LOG_ERR, "%s: illegal response type %d on iq %p", 1647 device_get_nameunit(sc->dev), rsp_type, iq); 1648 break; 1649 } 1650 1651 d++; 1652 if (__predict_false(++iq->cidx == iq->sidx)) { 1653 iq->cidx = 0; 1654 iq->gen ^= F_RSPD_GEN; 1655 d = &iq->desc[0]; 1656 } 1657 if (__predict_false(++ndescs == limit)) { 1658 t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(ndescs) | 1659 V_INGRESSQID(iq->cntxt_id) | 1660 V_SEINTARM(V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX))); 1661 1662 #if defined(INET) || defined(INET6) 1663 if (iq->flags & IQ_LRO_ENABLED && 1664 !sort_before_lro(lro) && 1665 sc->lro_timeout != 0) { 1666 tcp_lro_flush_inactive(lro, &lro_timeout); 1667 } 1668 #endif 1669 if (budget) 1670 return (EINPROGRESS); 1671 ndescs = 0; 1672 } 1673 } 1674 out: 1675 #if defined(INET) || defined(INET6) 1676 if (iq->flags & IQ_LRO_ENABLED) { 1677 if (ndescs > 0 && lro->lro_mbuf_count > 8) { 1678 MPASS(sort_before_lro(lro)); 1679 /* hold back one credit and don't flush LRO state */ 1680 iq->flags |= IQ_ADJ_CREDIT; 1681 ndescs--; 1682 } else { 1683 tcp_lro_flush_all(lro); 1684 } 1685 } 1686 #endif 1687 1688 t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(ndescs) | 1689 V_INGRESSQID((u32)iq->cntxt_id) | V_SEINTARM(iq->intr_params)); 1690 1691 FL_LOCK(fl); 1692 starved = refill_fl(sc, fl, 64); 1693 FL_UNLOCK(fl); 1694 if (__predict_false(starved != 0)) 1695 add_fl_to_sfl(sc, fl); 1696 1697 return (0); 1698 } 1699 1700 static inline struct cluster_metadata * 1701 cl_metadata(struct fl_sdesc *sd) 1702 { 1703 1704 return ((void *)(sd->cl + sd->moff)); 1705 } 1706 1707 static void 1708 rxb_free(struct mbuf *m) 1709 { 1710 struct cluster_metadata *clm = m->m_ext.ext_arg1; 1711 1712 uma_zfree(clm->zone, clm->cl); 1713 counter_u64_add(extfree_rels, 1); 1714 } 1715 1716 /* 1717 * The mbuf returned comes from zone_muf and carries the payload in one of these 1718 * ways 1719 * a) complete frame inside the mbuf 1720 * b) m_cljset (for clusters without metadata) 1721 * d) m_extaddref (cluster with metadata) 1722 */ 1723 static struct mbuf * 1724 get_scatter_segment(struct adapter *sc, struct sge_fl *fl, int fr_offset, 1725 int remaining) 1726 { 1727 struct mbuf *m; 1728 struct fl_sdesc *sd = &fl->sdesc[fl->cidx]; 1729 struct rx_buf_info *rxb = &sc->sge.rx_buf_info[sd->zidx]; 1730 struct cluster_metadata *clm; 1731 int len, blen; 1732 caddr_t payload; 1733 1734 if (fl->flags & FL_BUF_PACKING) { 1735 u_int l, pad; 1736 1737 blen = rxb->size2 - fl->rx_offset; /* max possible in this buf */ 1738 len = min(remaining, blen); 1739 payload = sd->cl + fl->rx_offset; 1740 1741 l = fr_offset + len; 1742 pad = roundup2(l, fl->buf_boundary) - l; 1743 if (fl->rx_offset + len + pad < rxb->size2) 1744 blen = len + pad; 1745 MPASS(fl->rx_offset + blen <= rxb->size2); 1746 } else { 1747 MPASS(fl->rx_offset == 0); /* not packing */ 1748 blen = rxb->size1; 1749 len = min(remaining, blen); 1750 payload = sd->cl; 1751 } 1752 1753 if (fr_offset == 0) { 1754 m = m_gethdr(M_NOWAIT, MT_DATA); 1755 if (__predict_false(m == NULL)) 1756 return (NULL); 1757 m->m_pkthdr.len = remaining; 1758 } else { 1759 m = m_get(M_NOWAIT, MT_DATA); 1760 if (__predict_false(m == NULL)) 1761 return (NULL); 1762 } 1763 m->m_len = len; 1764 1765 if (sc->sc_do_rxcopy && len < RX_COPY_THRESHOLD) { 1766 /* copy data to mbuf */ 1767 bcopy(payload, mtod(m, caddr_t), len); 1768 if (fl->flags & FL_BUF_PACKING) { 1769 fl->rx_offset += blen; 1770 MPASS(fl->rx_offset <= rxb->size2); 1771 if (fl->rx_offset < rxb->size2) 1772 return (m); /* without advancing the cidx */ 1773 } 1774 } else if (fl->flags & FL_BUF_PACKING) { 1775 clm = cl_metadata(sd); 1776 if (sd->nmbuf++ == 0) { 1777 clm->refcount = 1; 1778 clm->zone = rxb->zone; 1779 clm->cl = sd->cl; 1780 counter_u64_add(extfree_refs, 1); 1781 } 1782 m_extaddref(m, payload, blen, &clm->refcount, rxb_free, clm, 1783 NULL); 1784 1785 fl->rx_offset += blen; 1786 MPASS(fl->rx_offset <= rxb->size2); 1787 if (fl->rx_offset < rxb->size2) 1788 return (m); /* without advancing the cidx */ 1789 } else { 1790 m_cljset(m, sd->cl, rxb->type); 1791 sd->cl = NULL; /* consumed, not a recycle candidate */ 1792 } 1793 1794 move_to_next_rxbuf(fl); 1795 1796 return (m); 1797 } 1798 1799 static struct mbuf * 1800 get_fl_payload(struct adapter *sc, struct sge_fl *fl, const u_int plen) 1801 { 1802 struct mbuf *m0, *m, **pnext; 1803 u_int remaining; 1804 1805 if (__predict_false(fl->flags & FL_BUF_RESUME)) { 1806 M_ASSERTPKTHDR(fl->m0); 1807 MPASS(fl->m0->m_pkthdr.len == plen); 1808 MPASS(fl->remaining < plen); 1809 1810 m0 = fl->m0; 1811 pnext = fl->pnext; 1812 remaining = fl->remaining; 1813 fl->flags &= ~FL_BUF_RESUME; 1814 goto get_segment; 1815 } 1816 1817 /* 1818 * Payload starts at rx_offset in the current hw buffer. Its length is 1819 * 'len' and it may span multiple hw buffers. 1820 */ 1821 1822 m0 = get_scatter_segment(sc, fl, 0, plen); 1823 if (m0 == NULL) 1824 return (NULL); 1825 remaining = plen - m0->m_len; 1826 pnext = &m0->m_next; 1827 while (remaining > 0) { 1828 get_segment: 1829 MPASS(fl->rx_offset == 0); 1830 m = get_scatter_segment(sc, fl, plen - remaining, remaining); 1831 if (__predict_false(m == NULL)) { 1832 fl->m0 = m0; 1833 fl->pnext = pnext; 1834 fl->remaining = remaining; 1835 fl->flags |= FL_BUF_RESUME; 1836 return (NULL); 1837 } 1838 *pnext = m; 1839 pnext = &m->m_next; 1840 remaining -= m->m_len; 1841 } 1842 *pnext = NULL; 1843 1844 M_ASSERTPKTHDR(m0); 1845 return (m0); 1846 } 1847 1848 static int 1849 skip_scatter_segment(struct adapter *sc, struct sge_fl *fl, int fr_offset, 1850 int remaining) 1851 { 1852 struct fl_sdesc *sd = &fl->sdesc[fl->cidx]; 1853 struct rx_buf_info *rxb = &sc->sge.rx_buf_info[sd->zidx]; 1854 int len, blen; 1855 1856 if (fl->flags & FL_BUF_PACKING) { 1857 u_int l, pad; 1858 1859 blen = rxb->size2 - fl->rx_offset; /* max possible in this buf */ 1860 len = min(remaining, blen); 1861 1862 l = fr_offset + len; 1863 pad = roundup2(l, fl->buf_boundary) - l; 1864 if (fl->rx_offset + len + pad < rxb->size2) 1865 blen = len + pad; 1866 fl->rx_offset += blen; 1867 MPASS(fl->rx_offset <= rxb->size2); 1868 if (fl->rx_offset < rxb->size2) 1869 return (len); /* without advancing the cidx */ 1870 } else { 1871 MPASS(fl->rx_offset == 0); /* not packing */ 1872 blen = rxb->size1; 1873 len = min(remaining, blen); 1874 } 1875 move_to_next_rxbuf(fl); 1876 return (len); 1877 } 1878 1879 static inline void 1880 skip_fl_payload(struct adapter *sc, struct sge_fl *fl, int plen) 1881 { 1882 int remaining, fr_offset, len; 1883 1884 fr_offset = 0; 1885 remaining = plen; 1886 while (remaining > 0) { 1887 len = skip_scatter_segment(sc, fl, fr_offset, remaining); 1888 fr_offset += len; 1889 remaining -= len; 1890 } 1891 } 1892 1893 static inline int 1894 get_segment_len(struct adapter *sc, struct sge_fl *fl, int plen) 1895 { 1896 int len; 1897 struct fl_sdesc *sd = &fl->sdesc[fl->cidx]; 1898 struct rx_buf_info *rxb = &sc->sge.rx_buf_info[sd->zidx]; 1899 1900 if (fl->flags & FL_BUF_PACKING) 1901 len = rxb->size2 - fl->rx_offset; 1902 else 1903 len = rxb->size1; 1904 1905 return (min(plen, len)); 1906 } 1907 1908 static int 1909 eth_rx(struct adapter *sc, struct sge_rxq *rxq, const struct iq_desc *d, 1910 u_int plen) 1911 { 1912 struct mbuf *m0; 1913 struct ifnet *ifp = rxq->ifp; 1914 struct sge_fl *fl = &rxq->fl; 1915 struct vi_info *vi = ifp->if_softc; 1916 const struct cpl_rx_pkt *cpl; 1917 #if defined(INET) || defined(INET6) 1918 struct lro_ctrl *lro = &rxq->lro; 1919 #endif 1920 static const int sw_hashtype[4][2] = { 1921 {M_HASHTYPE_NONE, M_HASHTYPE_NONE}, 1922 {M_HASHTYPE_RSS_IPV4, M_HASHTYPE_RSS_IPV6}, 1923 {M_HASHTYPE_RSS_TCP_IPV4, M_HASHTYPE_RSS_TCP_IPV6}, 1924 {M_HASHTYPE_RSS_UDP_IPV4, M_HASHTYPE_RSS_UDP_IPV6}, 1925 }; 1926 1927 MPASS(plen > sc->params.sge.fl_pktshift); 1928 if (vi->pfil != NULL && PFIL_HOOKED_IN(vi->pfil) && 1929 __predict_true((fl->flags & FL_BUF_RESUME) == 0)) { 1930 struct fl_sdesc *sd = &fl->sdesc[fl->cidx]; 1931 caddr_t frame; 1932 int rc, slen; 1933 1934 slen = get_segment_len(sc, fl, plen) - 1935 sc->params.sge.fl_pktshift; 1936 frame = sd->cl + fl->rx_offset + sc->params.sge.fl_pktshift; 1937 CURVNET_SET_QUIET(ifp->if_vnet); 1938 rc = pfil_run_hooks(vi->pfil, frame, ifp, 1939 slen | PFIL_MEMPTR | PFIL_IN, NULL); 1940 CURVNET_RESTORE(); 1941 if (rc == PFIL_DROPPED || rc == PFIL_CONSUMED) { 1942 skip_fl_payload(sc, fl, plen); 1943 return (0); 1944 } 1945 if (rc == PFIL_REALLOCED) { 1946 skip_fl_payload(sc, fl, plen); 1947 m0 = pfil_mem2mbuf(frame); 1948 goto have_mbuf; 1949 } 1950 } 1951 1952 m0 = get_fl_payload(sc, fl, plen); 1953 if (__predict_false(m0 == NULL)) 1954 return (ENOMEM); 1955 1956 m0->m_pkthdr.len -= sc->params.sge.fl_pktshift; 1957 m0->m_len -= sc->params.sge.fl_pktshift; 1958 m0->m_data += sc->params.sge.fl_pktshift; 1959 1960 have_mbuf: 1961 m0->m_pkthdr.rcvif = ifp; 1962 M_HASHTYPE_SET(m0, sw_hashtype[d->rss.hash_type][d->rss.ipv6]); 1963 m0->m_pkthdr.flowid = be32toh(d->rss.hash_val); 1964 1965 cpl = (const void *)(&d->rss + 1); 1966 if (cpl->csum_calc && !(cpl->err_vec & sc->params.tp.err_vec_mask)) { 1967 if (ifp->if_capenable & IFCAP_RXCSUM && 1968 cpl->l2info & htobe32(F_RXF_IP)) { 1969 m0->m_pkthdr.csum_flags = (CSUM_IP_CHECKED | 1970 CSUM_IP_VALID | CSUM_DATA_VALID | CSUM_PSEUDO_HDR); 1971 rxq->rxcsum++; 1972 } else if (ifp->if_capenable & IFCAP_RXCSUM_IPV6 && 1973 cpl->l2info & htobe32(F_RXF_IP6)) { 1974 m0->m_pkthdr.csum_flags = (CSUM_DATA_VALID_IPV6 | 1975 CSUM_PSEUDO_HDR); 1976 rxq->rxcsum++; 1977 } 1978 1979 if (__predict_false(cpl->ip_frag)) 1980 m0->m_pkthdr.csum_data = be16toh(cpl->csum); 1981 else 1982 m0->m_pkthdr.csum_data = 0xffff; 1983 } 1984 1985 if (cpl->vlan_ex) { 1986 m0->m_pkthdr.ether_vtag = be16toh(cpl->vlan); 1987 m0->m_flags |= M_VLANTAG; 1988 rxq->vlan_extraction++; 1989 } 1990 1991 if (rxq->iq.flags & IQ_RX_TIMESTAMP) { 1992 /* 1993 * Fill up rcv_tstmp but do not set M_TSTMP. 1994 * rcv_tstmp is not in the format that the 1995 * kernel expects and we don't want to mislead 1996 * it. For now this is only for custom code 1997 * that knows how to interpret cxgbe's stamp. 1998 */ 1999 m0->m_pkthdr.rcv_tstmp = 2000 last_flit_to_ns(sc, d->rsp.u.last_flit); 2001 #ifdef notyet 2002 m0->m_flags |= M_TSTMP; 2003 #endif 2004 } 2005 2006 #ifdef NUMA 2007 m0->m_pkthdr.numa_domain = ifp->if_numa_domain; 2008 #endif 2009 #if defined(INET) || defined(INET6) 2010 if (rxq->iq.flags & IQ_LRO_ENABLED && 2011 (M_HASHTYPE_GET(m0) == M_HASHTYPE_RSS_TCP_IPV4 || 2012 M_HASHTYPE_GET(m0) == M_HASHTYPE_RSS_TCP_IPV6)) { 2013 if (sort_before_lro(lro)) { 2014 tcp_lro_queue_mbuf(lro, m0); 2015 return (0); /* queued for sort, then LRO */ 2016 } 2017 if (tcp_lro_rx(lro, m0, 0) == 0) 2018 return (0); /* queued for LRO */ 2019 } 2020 #endif 2021 ifp->if_input(ifp, m0); 2022 2023 return (0); 2024 } 2025 2026 /* 2027 * Must drain the wrq or make sure that someone else will. 2028 */ 2029 static void 2030 wrq_tx_drain(void *arg, int n) 2031 { 2032 struct sge_wrq *wrq = arg; 2033 struct sge_eq *eq = &wrq->eq; 2034 2035 EQ_LOCK(eq); 2036 if (TAILQ_EMPTY(&wrq->incomplete_wrs) && !STAILQ_EMPTY(&wrq->wr_list)) 2037 drain_wrq_wr_list(wrq->adapter, wrq); 2038 EQ_UNLOCK(eq); 2039 } 2040 2041 static void 2042 drain_wrq_wr_list(struct adapter *sc, struct sge_wrq *wrq) 2043 { 2044 struct sge_eq *eq = &wrq->eq; 2045 u_int available, dbdiff; /* # of hardware descriptors */ 2046 u_int n; 2047 struct wrqe *wr; 2048 struct fw_eth_tx_pkt_wr *dst; /* any fw WR struct will do */ 2049 2050 EQ_LOCK_ASSERT_OWNED(eq); 2051 MPASS(TAILQ_EMPTY(&wrq->incomplete_wrs)); 2052 wr = STAILQ_FIRST(&wrq->wr_list); 2053 MPASS(wr != NULL); /* Must be called with something useful to do */ 2054 MPASS(eq->pidx == eq->dbidx); 2055 dbdiff = 0; 2056 2057 do { 2058 eq->cidx = read_hw_cidx(eq); 2059 if (eq->pidx == eq->cidx) 2060 available = eq->sidx - 1; 2061 else 2062 available = IDXDIFF(eq->cidx, eq->pidx, eq->sidx) - 1; 2063 2064 MPASS(wr->wrq == wrq); 2065 n = howmany(wr->wr_len, EQ_ESIZE); 2066 if (available < n) 2067 break; 2068 2069 dst = (void *)&eq->desc[eq->pidx]; 2070 if (__predict_true(eq->sidx - eq->pidx > n)) { 2071 /* Won't wrap, won't end exactly at the status page. */ 2072 bcopy(&wr->wr[0], dst, wr->wr_len); 2073 eq->pidx += n; 2074 } else { 2075 int first_portion = (eq->sidx - eq->pidx) * EQ_ESIZE; 2076 2077 bcopy(&wr->wr[0], dst, first_portion); 2078 if (wr->wr_len > first_portion) { 2079 bcopy(&wr->wr[first_portion], &eq->desc[0], 2080 wr->wr_len - first_portion); 2081 } 2082 eq->pidx = n - (eq->sidx - eq->pidx); 2083 } 2084 wrq->tx_wrs_copied++; 2085 2086 if (available < eq->sidx / 4 && 2087 atomic_cmpset_int(&eq->equiq, 0, 1)) { 2088 /* 2089 * XXX: This is not 100% reliable with some 2090 * types of WRs. But this is a very unusual 2091 * situation for an ofld/ctrl queue anyway. 2092 */ 2093 dst->equiq_to_len16 |= htobe32(F_FW_WR_EQUIQ | 2094 F_FW_WR_EQUEQ); 2095 } 2096 2097 dbdiff += n; 2098 if (dbdiff >= 16) { 2099 ring_eq_db(sc, eq, dbdiff); 2100 dbdiff = 0; 2101 } 2102 2103 STAILQ_REMOVE_HEAD(&wrq->wr_list, link); 2104 free_wrqe(wr); 2105 MPASS(wrq->nwr_pending > 0); 2106 wrq->nwr_pending--; 2107 MPASS(wrq->ndesc_needed >= n); 2108 wrq->ndesc_needed -= n; 2109 } while ((wr = STAILQ_FIRST(&wrq->wr_list)) != NULL); 2110 2111 if (dbdiff) 2112 ring_eq_db(sc, eq, dbdiff); 2113 } 2114 2115 /* 2116 * Doesn't fail. Holds on to work requests it can't send right away. 2117 */ 2118 void 2119 t4_wrq_tx_locked(struct adapter *sc, struct sge_wrq *wrq, struct wrqe *wr) 2120 { 2121 #ifdef INVARIANTS 2122 struct sge_eq *eq = &wrq->eq; 2123 #endif 2124 2125 EQ_LOCK_ASSERT_OWNED(eq); 2126 MPASS(wr != NULL); 2127 MPASS(wr->wr_len > 0 && wr->wr_len <= SGE_MAX_WR_LEN); 2128 MPASS((wr->wr_len & 0x7) == 0); 2129 2130 STAILQ_INSERT_TAIL(&wrq->wr_list, wr, link); 2131 wrq->nwr_pending++; 2132 wrq->ndesc_needed += howmany(wr->wr_len, EQ_ESIZE); 2133 2134 if (!TAILQ_EMPTY(&wrq->incomplete_wrs)) 2135 return; /* commit_wrq_wr will drain wr_list as well. */ 2136 2137 drain_wrq_wr_list(sc, wrq); 2138 2139 /* Doorbell must have caught up to the pidx. */ 2140 MPASS(eq->pidx == eq->dbidx); 2141 } 2142 2143 void 2144 t4_update_fl_bufsize(struct ifnet *ifp) 2145 { 2146 struct vi_info *vi = ifp->if_softc; 2147 struct adapter *sc = vi->adapter; 2148 struct sge_rxq *rxq; 2149 #ifdef TCP_OFFLOAD 2150 struct sge_ofld_rxq *ofld_rxq; 2151 #endif 2152 struct sge_fl *fl; 2153 int i, maxp; 2154 2155 maxp = max_rx_payload(sc, ifp, false); 2156 for_each_rxq(vi, i, rxq) { 2157 fl = &rxq->fl; 2158 2159 FL_LOCK(fl); 2160 fl->zidx = find_refill_source(sc, maxp, 2161 fl->flags & FL_BUF_PACKING); 2162 FL_UNLOCK(fl); 2163 } 2164 #ifdef TCP_OFFLOAD 2165 maxp = max_rx_payload(sc, ifp, true); 2166 for_each_ofld_rxq(vi, i, ofld_rxq) { 2167 fl = &ofld_rxq->fl; 2168 2169 FL_LOCK(fl); 2170 fl->zidx = find_refill_source(sc, maxp, 2171 fl->flags & FL_BUF_PACKING); 2172 FL_UNLOCK(fl); 2173 } 2174 #endif 2175 } 2176 2177 static inline int 2178 mbuf_nsegs(struct mbuf *m) 2179 { 2180 2181 M_ASSERTPKTHDR(m); 2182 KASSERT(m->m_pkthdr.l5hlen > 0, 2183 ("%s: mbuf %p missing information on # of segments.", __func__, m)); 2184 2185 return (m->m_pkthdr.l5hlen); 2186 } 2187 2188 static inline void 2189 set_mbuf_nsegs(struct mbuf *m, uint8_t nsegs) 2190 { 2191 2192 M_ASSERTPKTHDR(m); 2193 m->m_pkthdr.l5hlen = nsegs; 2194 } 2195 2196 static inline int 2197 mbuf_cflags(struct mbuf *m) 2198 { 2199 2200 M_ASSERTPKTHDR(m); 2201 return (m->m_pkthdr.PH_loc.eight[4]); 2202 } 2203 2204 static inline void 2205 set_mbuf_cflags(struct mbuf *m, uint8_t flags) 2206 { 2207 2208 M_ASSERTPKTHDR(m); 2209 m->m_pkthdr.PH_loc.eight[4] = flags; 2210 } 2211 2212 static inline int 2213 mbuf_len16(struct mbuf *m) 2214 { 2215 int n; 2216 2217 M_ASSERTPKTHDR(m); 2218 n = m->m_pkthdr.PH_loc.eight[0]; 2219 if (!(mbuf_cflags(m) & MC_TLS)) 2220 MPASS(n > 0 && n <= SGE_MAX_WR_LEN / 16); 2221 2222 return (n); 2223 } 2224 2225 static inline void 2226 set_mbuf_len16(struct mbuf *m, uint8_t len16) 2227 { 2228 2229 M_ASSERTPKTHDR(m); 2230 m->m_pkthdr.PH_loc.eight[0] = len16; 2231 } 2232 2233 #ifdef RATELIMIT 2234 static inline int 2235 mbuf_eo_nsegs(struct mbuf *m) 2236 { 2237 2238 M_ASSERTPKTHDR(m); 2239 return (m->m_pkthdr.PH_loc.eight[1]); 2240 } 2241 2242 static inline void 2243 set_mbuf_eo_nsegs(struct mbuf *m, uint8_t nsegs) 2244 { 2245 2246 M_ASSERTPKTHDR(m); 2247 m->m_pkthdr.PH_loc.eight[1] = nsegs; 2248 } 2249 2250 static inline int 2251 mbuf_eo_len16(struct mbuf *m) 2252 { 2253 int n; 2254 2255 M_ASSERTPKTHDR(m); 2256 n = m->m_pkthdr.PH_loc.eight[2]; 2257 MPASS(n > 0 && n <= SGE_MAX_WR_LEN / 16); 2258 2259 return (n); 2260 } 2261 2262 static inline void 2263 set_mbuf_eo_len16(struct mbuf *m, uint8_t len16) 2264 { 2265 2266 M_ASSERTPKTHDR(m); 2267 m->m_pkthdr.PH_loc.eight[2] = len16; 2268 } 2269 2270 static inline int 2271 mbuf_eo_tsclk_tsoff(struct mbuf *m) 2272 { 2273 2274 M_ASSERTPKTHDR(m); 2275 return (m->m_pkthdr.PH_loc.eight[3]); 2276 } 2277 2278 static inline void 2279 set_mbuf_eo_tsclk_tsoff(struct mbuf *m, uint8_t tsclk_tsoff) 2280 { 2281 2282 M_ASSERTPKTHDR(m); 2283 m->m_pkthdr.PH_loc.eight[3] = tsclk_tsoff; 2284 } 2285 2286 static inline int 2287 needs_eo(struct cxgbe_snd_tag *cst) 2288 { 2289 2290 return (cst != NULL && cst->type == IF_SND_TAG_TYPE_RATE_LIMIT); 2291 } 2292 #endif 2293 2294 /* 2295 * Try to allocate an mbuf to contain a raw work request. To make it 2296 * easy to construct the work request, don't allocate a chain but a 2297 * single mbuf. 2298 */ 2299 struct mbuf * 2300 alloc_wr_mbuf(int len, int how) 2301 { 2302 struct mbuf *m; 2303 2304 if (len <= MHLEN) 2305 m = m_gethdr(how, MT_DATA); 2306 else if (len <= MCLBYTES) 2307 m = m_getcl(how, MT_DATA, M_PKTHDR); 2308 else 2309 m = NULL; 2310 if (m == NULL) 2311 return (NULL); 2312 m->m_pkthdr.len = len; 2313 m->m_len = len; 2314 set_mbuf_cflags(m, MC_RAW_WR); 2315 set_mbuf_len16(m, howmany(len, 16)); 2316 return (m); 2317 } 2318 2319 static inline int 2320 needs_hwcsum(struct mbuf *m) 2321 { 2322 2323 M_ASSERTPKTHDR(m); 2324 2325 return (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP | CSUM_IP | 2326 CSUM_TSO | CSUM_UDP_IPV6 | CSUM_TCP_IPV6)); 2327 } 2328 2329 static inline int 2330 needs_tso(struct mbuf *m) 2331 { 2332 2333 M_ASSERTPKTHDR(m); 2334 2335 return (m->m_pkthdr.csum_flags & CSUM_TSO); 2336 } 2337 2338 static inline int 2339 needs_l3_csum(struct mbuf *m) 2340 { 2341 2342 M_ASSERTPKTHDR(m); 2343 2344 return (m->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TSO)); 2345 } 2346 2347 static inline int 2348 needs_tcp_csum(struct mbuf *m) 2349 { 2350 2351 M_ASSERTPKTHDR(m); 2352 return (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_TCP_IPV6 | CSUM_TSO)); 2353 } 2354 2355 #ifdef RATELIMIT 2356 static inline int 2357 needs_l4_csum(struct mbuf *m) 2358 { 2359 2360 M_ASSERTPKTHDR(m); 2361 2362 return (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP | CSUM_UDP_IPV6 | 2363 CSUM_TCP_IPV6 | CSUM_TSO)); 2364 } 2365 2366 static inline int 2367 needs_udp_csum(struct mbuf *m) 2368 { 2369 2370 M_ASSERTPKTHDR(m); 2371 return (m->m_pkthdr.csum_flags & (CSUM_UDP | CSUM_UDP_IPV6)); 2372 } 2373 #endif 2374 2375 static inline int 2376 needs_vlan_insertion(struct mbuf *m) 2377 { 2378 2379 M_ASSERTPKTHDR(m); 2380 2381 return (m->m_flags & M_VLANTAG); 2382 } 2383 2384 static void * 2385 m_advance(struct mbuf **pm, int *poffset, int len) 2386 { 2387 struct mbuf *m = *pm; 2388 int offset = *poffset; 2389 uintptr_t p = 0; 2390 2391 MPASS(len > 0); 2392 2393 for (;;) { 2394 if (offset + len < m->m_len) { 2395 offset += len; 2396 p = mtod(m, uintptr_t) + offset; 2397 break; 2398 } 2399 len -= m->m_len - offset; 2400 m = m->m_next; 2401 offset = 0; 2402 MPASS(m != NULL); 2403 } 2404 *poffset = offset; 2405 *pm = m; 2406 return ((void *)p); 2407 } 2408 2409 static inline int 2410 count_mbuf_ext_pgs(struct mbuf *m, int skip, vm_paddr_t *nextaddr) 2411 { 2412 vm_paddr_t paddr; 2413 int i, len, off, pglen, pgoff, seglen, segoff; 2414 int nsegs = 0; 2415 2416 M_ASSERTEXTPG(m); 2417 off = mtod(m, vm_offset_t); 2418 len = m->m_len; 2419 off += skip; 2420 len -= skip; 2421 2422 if (m->m_epg_hdrlen != 0) { 2423 if (off >= m->m_epg_hdrlen) { 2424 off -= m->m_epg_hdrlen; 2425 } else { 2426 seglen = m->m_epg_hdrlen - off; 2427 segoff = off; 2428 seglen = min(seglen, len); 2429 off = 0; 2430 len -= seglen; 2431 paddr = pmap_kextract( 2432 (vm_offset_t)&m->m_epg_hdr[segoff]); 2433 if (*nextaddr != paddr) 2434 nsegs++; 2435 *nextaddr = paddr + seglen; 2436 } 2437 } 2438 pgoff = m->m_epg_1st_off; 2439 for (i = 0; i < m->m_epg_npgs && len > 0; i++) { 2440 pglen = m_epg_pagelen(m, i, pgoff); 2441 if (off >= pglen) { 2442 off -= pglen; 2443 pgoff = 0; 2444 continue; 2445 } 2446 seglen = pglen - off; 2447 segoff = pgoff + off; 2448 off = 0; 2449 seglen = min(seglen, len); 2450 len -= seglen; 2451 paddr = m->m_epg_pa[i] + segoff; 2452 if (*nextaddr != paddr) 2453 nsegs++; 2454 *nextaddr = paddr + seglen; 2455 pgoff = 0; 2456 }; 2457 if (len != 0) { 2458 seglen = min(len, m->m_epg_trllen - off); 2459 len -= seglen; 2460 paddr = pmap_kextract((vm_offset_t)&m->m_epg_trail[off]); 2461 if (*nextaddr != paddr) 2462 nsegs++; 2463 *nextaddr = paddr + seglen; 2464 } 2465 2466 return (nsegs); 2467 } 2468 2469 2470 /* 2471 * Can deal with empty mbufs in the chain that have m_len = 0, but the chain 2472 * must have at least one mbuf that's not empty. It is possible for this 2473 * routine to return 0 if skip accounts for all the contents of the mbuf chain. 2474 */ 2475 static inline int 2476 count_mbuf_nsegs(struct mbuf *m, int skip, uint8_t *cflags) 2477 { 2478 vm_paddr_t nextaddr, paddr; 2479 vm_offset_t va; 2480 int len, nsegs; 2481 2482 M_ASSERTPKTHDR(m); 2483 MPASS(m->m_pkthdr.len > 0); 2484 MPASS(m->m_pkthdr.len >= skip); 2485 2486 nsegs = 0; 2487 nextaddr = 0; 2488 for (; m; m = m->m_next) { 2489 len = m->m_len; 2490 if (__predict_false(len == 0)) 2491 continue; 2492 if (skip >= len) { 2493 skip -= len; 2494 continue; 2495 } 2496 if ((m->m_flags & M_EXTPG) != 0) { 2497 *cflags |= MC_NOMAP; 2498 nsegs += count_mbuf_ext_pgs(m, skip, &nextaddr); 2499 skip = 0; 2500 continue; 2501 } 2502 va = mtod(m, vm_offset_t) + skip; 2503 len -= skip; 2504 skip = 0; 2505 paddr = pmap_kextract(va); 2506 nsegs += sglist_count((void *)(uintptr_t)va, len); 2507 if (paddr == nextaddr) 2508 nsegs--; 2509 nextaddr = pmap_kextract(va + len - 1) + 1; 2510 } 2511 2512 return (nsegs); 2513 } 2514 2515 /* 2516 * Analyze the mbuf to determine its tx needs. The mbuf passed in may change: 2517 * a) caller can assume it's been freed if this function returns with an error. 2518 * b) it may get defragged up if the gather list is too long for the hardware. 2519 */ 2520 int 2521 parse_pkt(struct adapter *sc, struct mbuf **mp) 2522 { 2523 struct mbuf *m0 = *mp, *m; 2524 int rc, nsegs, defragged = 0, offset; 2525 struct ether_header *eh; 2526 void *l3hdr; 2527 #if defined(INET) || defined(INET6) 2528 struct tcphdr *tcp; 2529 #endif 2530 #if defined(KERN_TLS) || defined(RATELIMIT) 2531 struct cxgbe_snd_tag *cst; 2532 #endif 2533 uint16_t eh_type; 2534 uint8_t cflags; 2535 2536 cflags = 0; 2537 M_ASSERTPKTHDR(m0); 2538 if (__predict_false(m0->m_pkthdr.len < ETHER_HDR_LEN)) { 2539 rc = EINVAL; 2540 fail: 2541 m_freem(m0); 2542 *mp = NULL; 2543 return (rc); 2544 } 2545 restart: 2546 /* 2547 * First count the number of gather list segments in the payload. 2548 * Defrag the mbuf if nsegs exceeds the hardware limit. 2549 */ 2550 M_ASSERTPKTHDR(m0); 2551 MPASS(m0->m_pkthdr.len > 0); 2552 nsegs = count_mbuf_nsegs(m0, 0, &cflags); 2553 #if defined(KERN_TLS) || defined(RATELIMIT) 2554 if (m0->m_pkthdr.csum_flags & CSUM_SND_TAG) 2555 cst = mst_to_cst(m0->m_pkthdr.snd_tag); 2556 else 2557 cst = NULL; 2558 #endif 2559 #ifdef KERN_TLS 2560 if (cst != NULL && cst->type == IF_SND_TAG_TYPE_TLS) { 2561 int len16; 2562 2563 cflags |= MC_TLS; 2564 set_mbuf_cflags(m0, cflags); 2565 rc = t6_ktls_parse_pkt(m0, &nsegs, &len16); 2566 if (rc != 0) 2567 goto fail; 2568 set_mbuf_nsegs(m0, nsegs); 2569 set_mbuf_len16(m0, len16); 2570 return (0); 2571 } 2572 #endif 2573 if (nsegs > (needs_tso(m0) ? TX_SGL_SEGS_TSO : TX_SGL_SEGS)) { 2574 if (defragged++ > 0 || (m = m_defrag(m0, M_NOWAIT)) == NULL) { 2575 rc = EFBIG; 2576 goto fail; 2577 } 2578 *mp = m0 = m; /* update caller's copy after defrag */ 2579 goto restart; 2580 } 2581 2582 if (__predict_false(nsegs > 2 && m0->m_pkthdr.len <= MHLEN && 2583 !(cflags & MC_NOMAP))) { 2584 m0 = m_pullup(m0, m0->m_pkthdr.len); 2585 if (m0 == NULL) { 2586 /* Should have left well enough alone. */ 2587 rc = EFBIG; 2588 goto fail; 2589 } 2590 *mp = m0; /* update caller's copy after pullup */ 2591 goto restart; 2592 } 2593 set_mbuf_nsegs(m0, nsegs); 2594 set_mbuf_cflags(m0, cflags); 2595 if (sc->flags & IS_VF) 2596 set_mbuf_len16(m0, txpkt_vm_len16(nsegs, needs_tso(m0))); 2597 else 2598 set_mbuf_len16(m0, txpkt_len16(nsegs, needs_tso(m0))); 2599 2600 #ifdef RATELIMIT 2601 /* 2602 * Ethofld is limited to TCP and UDP for now, and only when L4 hw 2603 * checksumming is enabled. needs_l4_csum happens to check for all the 2604 * right things. 2605 */ 2606 if (__predict_false(needs_eo(cst) && !needs_l4_csum(m0))) { 2607 m_snd_tag_rele(m0->m_pkthdr.snd_tag); 2608 m0->m_pkthdr.snd_tag = NULL; 2609 m0->m_pkthdr.csum_flags &= ~CSUM_SND_TAG; 2610 cst = NULL; 2611 } 2612 #endif 2613 2614 if (!needs_hwcsum(m0) 2615 #ifdef RATELIMIT 2616 && !needs_eo(cst) 2617 #endif 2618 ) 2619 return (0); 2620 2621 m = m0; 2622 eh = mtod(m, struct ether_header *); 2623 eh_type = ntohs(eh->ether_type); 2624 if (eh_type == ETHERTYPE_VLAN) { 2625 struct ether_vlan_header *evh = (void *)eh; 2626 2627 eh_type = ntohs(evh->evl_proto); 2628 m0->m_pkthdr.l2hlen = sizeof(*evh); 2629 } else 2630 m0->m_pkthdr.l2hlen = sizeof(*eh); 2631 2632 offset = 0; 2633 l3hdr = m_advance(&m, &offset, m0->m_pkthdr.l2hlen); 2634 2635 switch (eh_type) { 2636 #ifdef INET6 2637 case ETHERTYPE_IPV6: 2638 { 2639 struct ip6_hdr *ip6 = l3hdr; 2640 2641 MPASS(!needs_tso(m0) || ip6->ip6_nxt == IPPROTO_TCP); 2642 2643 m0->m_pkthdr.l3hlen = sizeof(*ip6); 2644 break; 2645 } 2646 #endif 2647 #ifdef INET 2648 case ETHERTYPE_IP: 2649 { 2650 struct ip *ip = l3hdr; 2651 2652 m0->m_pkthdr.l3hlen = ip->ip_hl * 4; 2653 break; 2654 } 2655 #endif 2656 default: 2657 panic("%s: ethertype 0x%04x unknown. if_cxgbe must be compiled" 2658 " with the same INET/INET6 options as the kernel.", 2659 __func__, eh_type); 2660 } 2661 2662 #if defined(INET) || defined(INET6) 2663 if (needs_tcp_csum(m0)) { 2664 tcp = m_advance(&m, &offset, m0->m_pkthdr.l3hlen); 2665 m0->m_pkthdr.l4hlen = tcp->th_off * 4; 2666 #ifdef RATELIMIT 2667 if (tsclk >= 0 && *(uint32_t *)(tcp + 1) == ntohl(0x0101080a)) { 2668 set_mbuf_eo_tsclk_tsoff(m0, 2669 V_FW_ETH_TX_EO_WR_TSCLK(tsclk) | 2670 V_FW_ETH_TX_EO_WR_TSOFF(sizeof(*tcp) / 2 + 1)); 2671 } else 2672 set_mbuf_eo_tsclk_tsoff(m0, 0); 2673 } else if (needs_udp_csum(m0)) { 2674 m0->m_pkthdr.l4hlen = sizeof(struct udphdr); 2675 #endif 2676 } 2677 #ifdef RATELIMIT 2678 if (needs_eo(cst)) { 2679 u_int immhdrs; 2680 2681 /* EO WRs have the headers in the WR and not the GL. */ 2682 immhdrs = m0->m_pkthdr.l2hlen + m0->m_pkthdr.l3hlen + 2683 m0->m_pkthdr.l4hlen; 2684 cflags = 0; 2685 nsegs = count_mbuf_nsegs(m0, immhdrs, &cflags); 2686 MPASS(cflags == mbuf_cflags(m0)); 2687 set_mbuf_eo_nsegs(m0, nsegs); 2688 set_mbuf_eo_len16(m0, 2689 txpkt_eo_len16(nsegs, immhdrs, needs_tso(m0))); 2690 } 2691 #endif 2692 #endif 2693 MPASS(m0 == *mp); 2694 return (0); 2695 } 2696 2697 void * 2698 start_wrq_wr(struct sge_wrq *wrq, int len16, struct wrq_cookie *cookie) 2699 { 2700 struct sge_eq *eq = &wrq->eq; 2701 struct adapter *sc = wrq->adapter; 2702 int ndesc, available; 2703 struct wrqe *wr; 2704 void *w; 2705 2706 MPASS(len16 > 0); 2707 ndesc = tx_len16_to_desc(len16); 2708 MPASS(ndesc > 0 && ndesc <= SGE_MAX_WR_NDESC); 2709 2710 EQ_LOCK(eq); 2711 2712 if (TAILQ_EMPTY(&wrq->incomplete_wrs) && !STAILQ_EMPTY(&wrq->wr_list)) 2713 drain_wrq_wr_list(sc, wrq); 2714 2715 if (!STAILQ_EMPTY(&wrq->wr_list)) { 2716 slowpath: 2717 EQ_UNLOCK(eq); 2718 wr = alloc_wrqe(len16 * 16, wrq); 2719 if (__predict_false(wr == NULL)) 2720 return (NULL); 2721 cookie->pidx = -1; 2722 cookie->ndesc = ndesc; 2723 return (&wr->wr); 2724 } 2725 2726 eq->cidx = read_hw_cidx(eq); 2727 if (eq->pidx == eq->cidx) 2728 available = eq->sidx - 1; 2729 else 2730 available = IDXDIFF(eq->cidx, eq->pidx, eq->sidx) - 1; 2731 if (available < ndesc) 2732 goto slowpath; 2733 2734 cookie->pidx = eq->pidx; 2735 cookie->ndesc = ndesc; 2736 TAILQ_INSERT_TAIL(&wrq->incomplete_wrs, cookie, link); 2737 2738 w = &eq->desc[eq->pidx]; 2739 IDXINCR(eq->pidx, ndesc, eq->sidx); 2740 if (__predict_false(cookie->pidx + ndesc > eq->sidx)) { 2741 w = &wrq->ss[0]; 2742 wrq->ss_pidx = cookie->pidx; 2743 wrq->ss_len = len16 * 16; 2744 } 2745 2746 EQ_UNLOCK(eq); 2747 2748 return (w); 2749 } 2750 2751 void 2752 commit_wrq_wr(struct sge_wrq *wrq, void *w, struct wrq_cookie *cookie) 2753 { 2754 struct sge_eq *eq = &wrq->eq; 2755 struct adapter *sc = wrq->adapter; 2756 int ndesc, pidx; 2757 struct wrq_cookie *prev, *next; 2758 2759 if (cookie->pidx == -1) { 2760 struct wrqe *wr = __containerof(w, struct wrqe, wr); 2761 2762 t4_wrq_tx(sc, wr); 2763 return; 2764 } 2765 2766 if (__predict_false(w == &wrq->ss[0])) { 2767 int n = (eq->sidx - wrq->ss_pidx) * EQ_ESIZE; 2768 2769 MPASS(wrq->ss_len > n); /* WR had better wrap around. */ 2770 bcopy(&wrq->ss[0], &eq->desc[wrq->ss_pidx], n); 2771 bcopy(&wrq->ss[n], &eq->desc[0], wrq->ss_len - n); 2772 wrq->tx_wrs_ss++; 2773 } else 2774 wrq->tx_wrs_direct++; 2775 2776 EQ_LOCK(eq); 2777 ndesc = cookie->ndesc; /* Can be more than SGE_MAX_WR_NDESC here. */ 2778 pidx = cookie->pidx; 2779 MPASS(pidx >= 0 && pidx < eq->sidx); 2780 prev = TAILQ_PREV(cookie, wrq_incomplete_wrs, link); 2781 next = TAILQ_NEXT(cookie, link); 2782 if (prev == NULL) { 2783 MPASS(pidx == eq->dbidx); 2784 if (next == NULL || ndesc >= 16) { 2785 int available; 2786 struct fw_eth_tx_pkt_wr *dst; /* any fw WR struct will do */ 2787 2788 /* 2789 * Note that the WR via which we'll request tx updates 2790 * is at pidx and not eq->pidx, which has moved on 2791 * already. 2792 */ 2793 dst = (void *)&eq->desc[pidx]; 2794 available = IDXDIFF(eq->cidx, eq->pidx, eq->sidx) - 1; 2795 if (available < eq->sidx / 4 && 2796 atomic_cmpset_int(&eq->equiq, 0, 1)) { 2797 /* 2798 * XXX: This is not 100% reliable with some 2799 * types of WRs. But this is a very unusual 2800 * situation for an ofld/ctrl queue anyway. 2801 */ 2802 dst->equiq_to_len16 |= htobe32(F_FW_WR_EQUIQ | 2803 F_FW_WR_EQUEQ); 2804 } 2805 2806 ring_eq_db(wrq->adapter, eq, ndesc); 2807 } else { 2808 MPASS(IDXDIFF(next->pidx, pidx, eq->sidx) == ndesc); 2809 next->pidx = pidx; 2810 next->ndesc += ndesc; 2811 } 2812 } else { 2813 MPASS(IDXDIFF(pidx, prev->pidx, eq->sidx) == prev->ndesc); 2814 prev->ndesc += ndesc; 2815 } 2816 TAILQ_REMOVE(&wrq->incomplete_wrs, cookie, link); 2817 2818 if (TAILQ_EMPTY(&wrq->incomplete_wrs) && !STAILQ_EMPTY(&wrq->wr_list)) 2819 drain_wrq_wr_list(sc, wrq); 2820 2821 #ifdef INVARIANTS 2822 if (TAILQ_EMPTY(&wrq->incomplete_wrs)) { 2823 /* Doorbell must have caught up to the pidx. */ 2824 MPASS(wrq->eq.pidx == wrq->eq.dbidx); 2825 } 2826 #endif 2827 EQ_UNLOCK(eq); 2828 } 2829 2830 static u_int 2831 can_resume_eth_tx(struct mp_ring *r) 2832 { 2833 struct sge_eq *eq = r->cookie; 2834 2835 return (total_available_tx_desc(eq) > eq->sidx / 8); 2836 } 2837 2838 static inline bool 2839 cannot_use_txpkts(struct mbuf *m) 2840 { 2841 /* maybe put a GL limit too, to avoid silliness? */ 2842 2843 return (needs_tso(m) || (mbuf_cflags(m) & (MC_RAW_WR | MC_TLS)) != 0); 2844 } 2845 2846 static inline int 2847 discard_tx(struct sge_eq *eq) 2848 { 2849 2850 return ((eq->flags & (EQ_ENABLED | EQ_QFLUSH)) != EQ_ENABLED); 2851 } 2852 2853 static inline int 2854 wr_can_update_eq(void *p) 2855 { 2856 struct fw_eth_tx_pkts_wr *wr = p; 2857 2858 switch (G_FW_WR_OP(be32toh(wr->op_pkd))) { 2859 case FW_ULPTX_WR: 2860 case FW_ETH_TX_PKT_WR: 2861 case FW_ETH_TX_PKTS_WR: 2862 case FW_ETH_TX_PKTS2_WR: 2863 case FW_ETH_TX_PKT_VM_WR: 2864 case FW_ETH_TX_PKTS_VM_WR: 2865 return (1); 2866 default: 2867 return (0); 2868 } 2869 } 2870 2871 static inline void 2872 set_txupdate_flags(struct sge_txq *txq, u_int avail, 2873 struct fw_eth_tx_pkt_wr *wr) 2874 { 2875 struct sge_eq *eq = &txq->eq; 2876 struct txpkts *txp = &txq->txp; 2877 2878 if ((txp->npkt > 0 || avail < eq->sidx / 2) && 2879 atomic_cmpset_int(&eq->equiq, 0, 1)) { 2880 wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ | F_FW_WR_EQUIQ); 2881 eq->equeqidx = eq->pidx; 2882 } else if (IDXDIFF(eq->pidx, eq->equeqidx, eq->sidx) >= 32) { 2883 wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ); 2884 eq->equeqidx = eq->pidx; 2885 } 2886 } 2887 2888 /* 2889 * r->items[cidx] to r->items[pidx], with a wraparound at r->size, are ready to 2890 * be consumed. Return the actual number consumed. 0 indicates a stall. 2891 */ 2892 static u_int 2893 eth_tx(struct mp_ring *r, u_int cidx, u_int pidx, bool *coalescing) 2894 { 2895 struct sge_txq *txq = r->cookie; 2896 struct ifnet *ifp = txq->ifp; 2897 struct sge_eq *eq = &txq->eq; 2898 struct txpkts *txp = &txq->txp; 2899 struct vi_info *vi = ifp->if_softc; 2900 struct adapter *sc = vi->adapter; 2901 u_int total, remaining; /* # of packets */ 2902 u_int n, avail, dbdiff; /* # of hardware descriptors */ 2903 int i, rc; 2904 struct mbuf *m0; 2905 bool snd; 2906 void *wr; /* start of the last WR written to the ring */ 2907 2908 TXQ_LOCK_ASSERT_OWNED(txq); 2909 2910 remaining = IDXDIFF(pidx, cidx, r->size); 2911 if (__predict_false(discard_tx(eq))) { 2912 for (i = 0; i < txp->npkt; i++) 2913 m_freem(txp->mb[i]); 2914 txp->npkt = 0; 2915 while (cidx != pidx) { 2916 m0 = r->items[cidx]; 2917 m_freem(m0); 2918 if (++cidx == r->size) 2919 cidx = 0; 2920 } 2921 reclaim_tx_descs(txq, eq->sidx); 2922 *coalescing = false; 2923 return (remaining); /* emptied */ 2924 } 2925 2926 /* How many hardware descriptors do we have readily available. */ 2927 if (eq->pidx == eq->cidx) { 2928 avail = eq->sidx - 1; 2929 if (txp->score++ >= 5) 2930 txp->score = 5; /* tx is completely idle, reset. */ 2931 } else 2932 avail = IDXDIFF(eq->cidx, eq->pidx, eq->sidx) - 1; 2933 2934 total = 0; 2935 if (remaining == 0) { 2936 if (txp->score-- == 1) /* egr_update had to drain txpkts */ 2937 txp->score = 1; 2938 goto send_txpkts; 2939 } 2940 2941 dbdiff = 0; 2942 MPASS(remaining > 0); 2943 while (remaining > 0) { 2944 m0 = r->items[cidx]; 2945 M_ASSERTPKTHDR(m0); 2946 MPASS(m0->m_nextpkt == NULL); 2947 2948 if (avail < 2 * SGE_MAX_WR_NDESC) 2949 avail += reclaim_tx_descs(txq, 64); 2950 2951 if (txp->npkt > 0 || remaining > 1 || txp->score > 3 || 2952 atomic_load_int(&txq->eq.equiq) != 0) { 2953 if (sc->flags & IS_VF) 2954 rc = add_to_txpkts_vf(sc, txq, m0, avail, &snd); 2955 else 2956 rc = add_to_txpkts_pf(sc, txq, m0, avail, &snd); 2957 } else { 2958 snd = false; 2959 rc = EINVAL; 2960 } 2961 if (snd) { 2962 MPASS(txp->npkt > 0); 2963 for (i = 0; i < txp->npkt; i++) 2964 ETHER_BPF_MTAP(ifp, txp->mb[i]); 2965 if (txp->npkt > 1) { 2966 if (txp->score++ >= 10) 2967 txp->score = 10; 2968 MPASS(avail >= tx_len16_to_desc(txp->len16)); 2969 if (sc->flags & IS_VF) 2970 n = write_txpkts_vm_wr(sc, txq); 2971 else 2972 n = write_txpkts_wr(sc, txq); 2973 } else { 2974 MPASS(avail >= 2975 tx_len16_to_desc(mbuf_len16(txp->mb[0]))); 2976 if (sc->flags & IS_VF) 2977 n = write_txpkt_vm_wr(sc, txq, 2978 txp->mb[0]); 2979 else 2980 n = write_txpkt_wr(sc, txq, txp->mb[0], 2981 avail); 2982 } 2983 MPASS(n <= SGE_MAX_WR_NDESC); 2984 avail -= n; 2985 dbdiff += n; 2986 wr = &eq->desc[eq->pidx]; 2987 IDXINCR(eq->pidx, n, eq->sidx); 2988 txp->npkt = 0; /* emptied */ 2989 } 2990 if (rc == 0) { 2991 /* m0 was coalesced into txq->txpkts. */ 2992 goto next_mbuf; 2993 } 2994 if (rc == EAGAIN) { 2995 /* 2996 * m0 is suitable for tx coalescing but could not be 2997 * combined with the existing txq->txpkts, which has now 2998 * been transmitted. Start a new txpkts with m0. 2999 */ 3000 MPASS(snd); 3001 MPASS(txp->npkt == 0); 3002 continue; 3003 } 3004 3005 MPASS(rc != 0 && rc != EAGAIN); 3006 MPASS(txp->npkt == 0); 3007 wr = &eq->desc[eq->pidx]; 3008 if (mbuf_cflags(m0) & MC_RAW_WR) { 3009 n = write_raw_wr(txq, wr, m0, avail); 3010 #ifdef KERN_TLS 3011 } else if (mbuf_cflags(m0) & MC_TLS) { 3012 ETHER_BPF_MTAP(ifp, m0); 3013 n = t6_ktls_write_wr(txq, wr, m0, mbuf_nsegs(m0), 3014 avail); 3015 #endif 3016 } else { 3017 n = tx_len16_to_desc(mbuf_len16(m0)); 3018 if (__predict_false(avail < n)) { 3019 avail += reclaim_tx_descs(txq, 32); 3020 if (avail < n) 3021 break; /* out of descriptors */ 3022 } 3023 ETHER_BPF_MTAP(ifp, m0); 3024 if (sc->flags & IS_VF) 3025 n = write_txpkt_vm_wr(sc, txq, m0); 3026 else 3027 n = write_txpkt_wr(sc, txq, m0, avail); 3028 } 3029 MPASS(n >= 1 && n <= avail); 3030 if (!(mbuf_cflags(m0) & MC_TLS)) 3031 MPASS(n <= SGE_MAX_WR_NDESC); 3032 3033 avail -= n; 3034 dbdiff += n; 3035 IDXINCR(eq->pidx, n, eq->sidx); 3036 3037 if (dbdiff >= 512 / EQ_ESIZE) { /* X_FETCHBURSTMAX_512B */ 3038 if (wr_can_update_eq(wr)) 3039 set_txupdate_flags(txq, avail, wr); 3040 ring_eq_db(sc, eq, dbdiff); 3041 avail += reclaim_tx_descs(txq, 32); 3042 dbdiff = 0; 3043 } 3044 next_mbuf: 3045 total++; 3046 remaining--; 3047 if (__predict_false(++cidx == r->size)) 3048 cidx = 0; 3049 } 3050 if (dbdiff != 0) { 3051 if (wr_can_update_eq(wr)) 3052 set_txupdate_flags(txq, avail, wr); 3053 ring_eq_db(sc, eq, dbdiff); 3054 reclaim_tx_descs(txq, 32); 3055 } else if (eq->pidx == eq->cidx && txp->npkt > 0 && 3056 atomic_load_int(&txq->eq.equiq) == 0) { 3057 /* 3058 * If nothing was submitted to the chip for tx (it was coalesced 3059 * into txpkts instead) and there is no tx update outstanding 3060 * then we need to send txpkts now. 3061 */ 3062 send_txpkts: 3063 MPASS(txp->npkt > 0); 3064 for (i = 0; i < txp->npkt; i++) 3065 ETHER_BPF_MTAP(ifp, txp->mb[i]); 3066 if (txp->npkt > 1) { 3067 MPASS(avail >= tx_len16_to_desc(txp->len16)); 3068 if (sc->flags & IS_VF) 3069 n = write_txpkts_vm_wr(sc, txq); 3070 else 3071 n = write_txpkts_wr(sc, txq); 3072 } else { 3073 MPASS(avail >= 3074 tx_len16_to_desc(mbuf_len16(txp->mb[0]))); 3075 if (sc->flags & IS_VF) 3076 n = write_txpkt_vm_wr(sc, txq, txp->mb[0]); 3077 else 3078 n = write_txpkt_wr(sc, txq, txp->mb[0], avail); 3079 } 3080 MPASS(n <= SGE_MAX_WR_NDESC); 3081 wr = &eq->desc[eq->pidx]; 3082 IDXINCR(eq->pidx, n, eq->sidx); 3083 txp->npkt = 0; /* emptied */ 3084 3085 MPASS(wr_can_update_eq(wr)); 3086 set_txupdate_flags(txq, avail - n, wr); 3087 ring_eq_db(sc, eq, n); 3088 reclaim_tx_descs(txq, 32); 3089 } 3090 *coalescing = txp->npkt > 0; 3091 3092 return (total); 3093 } 3094 3095 static inline void 3096 init_iq(struct sge_iq *iq, struct adapter *sc, int tmr_idx, int pktc_idx, 3097 int qsize) 3098 { 3099 3100 KASSERT(tmr_idx >= 0 && tmr_idx < SGE_NTIMERS, 3101 ("%s: bad tmr_idx %d", __func__, tmr_idx)); 3102 KASSERT(pktc_idx < SGE_NCOUNTERS, /* -ve is ok, means don't use */ 3103 ("%s: bad pktc_idx %d", __func__, pktc_idx)); 3104 3105 iq->flags = 0; 3106 iq->adapter = sc; 3107 iq->intr_params = V_QINTR_TIMER_IDX(tmr_idx); 3108 iq->intr_pktc_idx = SGE_NCOUNTERS - 1; 3109 if (pktc_idx >= 0) { 3110 iq->intr_params |= F_QINTR_CNT_EN; 3111 iq->intr_pktc_idx = pktc_idx; 3112 } 3113 iq->qsize = roundup2(qsize, 16); /* See FW_IQ_CMD/iqsize */ 3114 iq->sidx = iq->qsize - sc->params.sge.spg_len / IQ_ESIZE; 3115 } 3116 3117 static inline void 3118 init_fl(struct adapter *sc, struct sge_fl *fl, int qsize, int maxp, char *name) 3119 { 3120 3121 fl->qsize = qsize; 3122 fl->sidx = qsize - sc->params.sge.spg_len / EQ_ESIZE; 3123 strlcpy(fl->lockname, name, sizeof(fl->lockname)); 3124 if (sc->flags & BUF_PACKING_OK && 3125 ((!is_t4(sc) && buffer_packing) || /* T5+: enabled unless 0 */ 3126 (is_t4(sc) && buffer_packing == 1)))/* T4: disabled unless 1 */ 3127 fl->flags |= FL_BUF_PACKING; 3128 fl->zidx = find_refill_source(sc, maxp, fl->flags & FL_BUF_PACKING); 3129 fl->safe_zidx = sc->sge.safe_zidx; 3130 } 3131 3132 static inline void 3133 init_eq(struct adapter *sc, struct sge_eq *eq, int eqtype, int qsize, 3134 uint8_t tx_chan, uint16_t iqid, char *name) 3135 { 3136 KASSERT(eqtype <= EQ_TYPEMASK, ("%s: bad qtype %d", __func__, eqtype)); 3137 3138 eq->flags = eqtype & EQ_TYPEMASK; 3139 eq->tx_chan = tx_chan; 3140 eq->iqid = iqid; 3141 eq->sidx = qsize - sc->params.sge.spg_len / EQ_ESIZE; 3142 strlcpy(eq->lockname, name, sizeof(eq->lockname)); 3143 } 3144 3145 static int 3146 alloc_ring(struct adapter *sc, size_t len, bus_dma_tag_t *tag, 3147 bus_dmamap_t *map, bus_addr_t *pa, void **va) 3148 { 3149 int rc; 3150 3151 rc = bus_dma_tag_create(sc->dmat, 512, 0, BUS_SPACE_MAXADDR, 3152 BUS_SPACE_MAXADDR, NULL, NULL, len, 1, len, 0, NULL, NULL, tag); 3153 if (rc != 0) { 3154 device_printf(sc->dev, "cannot allocate DMA tag: %d\n", rc); 3155 goto done; 3156 } 3157 3158 rc = bus_dmamem_alloc(*tag, va, 3159 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, map); 3160 if (rc != 0) { 3161 device_printf(sc->dev, "cannot allocate DMA memory: %d\n", rc); 3162 goto done; 3163 } 3164 3165 rc = bus_dmamap_load(*tag, *map, *va, len, oneseg_dma_callback, pa, 0); 3166 if (rc != 0) { 3167 device_printf(sc->dev, "cannot load DMA map: %d\n", rc); 3168 goto done; 3169 } 3170 done: 3171 if (rc) 3172 free_ring(sc, *tag, *map, *pa, *va); 3173 3174 return (rc); 3175 } 3176 3177 static int 3178 free_ring(struct adapter *sc, bus_dma_tag_t tag, bus_dmamap_t map, 3179 bus_addr_t pa, void *va) 3180 { 3181 if (pa) 3182 bus_dmamap_unload(tag, map); 3183 if (va) 3184 bus_dmamem_free(tag, va, map); 3185 if (tag) 3186 bus_dma_tag_destroy(tag); 3187 3188 return (0); 3189 } 3190 3191 /* 3192 * Allocates the ring for an ingress queue and an optional freelist. If the 3193 * freelist is specified it will be allocated and then associated with the 3194 * ingress queue. 3195 * 3196 * Returns errno on failure. Resources allocated up to that point may still be 3197 * allocated. Caller is responsible for cleanup in case this function fails. 3198 * 3199 * If the ingress queue will take interrupts directly then the intr_idx 3200 * specifies the vector, starting from 0. -1 means the interrupts for this 3201 * queue should be forwarded to the fwq. 3202 */ 3203 static int 3204 alloc_iq_fl(struct vi_info *vi, struct sge_iq *iq, struct sge_fl *fl, 3205 int intr_idx, int cong) 3206 { 3207 int rc, i, cntxt_id; 3208 size_t len; 3209 struct fw_iq_cmd c; 3210 struct port_info *pi = vi->pi; 3211 struct adapter *sc = iq->adapter; 3212 struct sge_params *sp = &sc->params.sge; 3213 __be32 v = 0; 3214 3215 len = iq->qsize * IQ_ESIZE; 3216 rc = alloc_ring(sc, len, &iq->desc_tag, &iq->desc_map, &iq->ba, 3217 (void **)&iq->desc); 3218 if (rc != 0) 3219 return (rc); 3220 3221 bzero(&c, sizeof(c)); 3222 c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST | 3223 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(sc->pf) | 3224 V_FW_IQ_CMD_VFN(0)); 3225 3226 c.alloc_to_len16 = htobe32(F_FW_IQ_CMD_ALLOC | F_FW_IQ_CMD_IQSTART | 3227 FW_LEN16(c)); 3228 3229 /* Special handling for firmware event queue */ 3230 if (iq == &sc->sge.fwq) 3231 v |= F_FW_IQ_CMD_IQASYNCH; 3232 3233 if (intr_idx < 0) { 3234 /* Forwarded interrupts, all headed to fwq */ 3235 v |= F_FW_IQ_CMD_IQANDST; 3236 v |= V_FW_IQ_CMD_IQANDSTINDEX(sc->sge.fwq.cntxt_id); 3237 } else { 3238 KASSERT(intr_idx < sc->intr_count, 3239 ("%s: invalid direct intr_idx %d", __func__, intr_idx)); 3240 v |= V_FW_IQ_CMD_IQANDSTINDEX(intr_idx); 3241 } 3242 3243 c.type_to_iqandstindex = htobe32(v | 3244 V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) | 3245 V_FW_IQ_CMD_VIID(vi->viid) | 3246 V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_INTERRUPT)); 3247 c.iqdroprss_to_iqesize = htobe16(V_FW_IQ_CMD_IQPCIECH(pi->tx_chan) | 3248 F_FW_IQ_CMD_IQGTSMODE | 3249 V_FW_IQ_CMD_IQINTCNTTHRESH(iq->intr_pktc_idx) | 3250 V_FW_IQ_CMD_IQESIZE(ilog2(IQ_ESIZE) - 4)); 3251 c.iqsize = htobe16(iq->qsize); 3252 c.iqaddr = htobe64(iq->ba); 3253 if (cong >= 0) 3254 c.iqns_to_fl0congen = htobe32(F_FW_IQ_CMD_IQFLINTCONGEN); 3255 3256 if (fl) { 3257 mtx_init(&fl->fl_lock, fl->lockname, NULL, MTX_DEF); 3258 3259 len = fl->qsize * EQ_ESIZE; 3260 rc = alloc_ring(sc, len, &fl->desc_tag, &fl->desc_map, 3261 &fl->ba, (void **)&fl->desc); 3262 if (rc) 3263 return (rc); 3264 3265 /* Allocate space for one software descriptor per buffer. */ 3266 rc = alloc_fl_sdesc(fl); 3267 if (rc != 0) { 3268 device_printf(sc->dev, 3269 "failed to setup fl software descriptors: %d\n", 3270 rc); 3271 return (rc); 3272 } 3273 3274 if (fl->flags & FL_BUF_PACKING) { 3275 fl->lowat = roundup2(sp->fl_starve_threshold2, 8); 3276 fl->buf_boundary = sp->pack_boundary; 3277 } else { 3278 fl->lowat = roundup2(sp->fl_starve_threshold, 8); 3279 fl->buf_boundary = 16; 3280 } 3281 if (fl_pad && fl->buf_boundary < sp->pad_boundary) 3282 fl->buf_boundary = sp->pad_boundary; 3283 3284 c.iqns_to_fl0congen |= 3285 htobe32(V_FW_IQ_CMD_FL0HOSTFCMODE(X_HOSTFCMODE_NONE) | 3286 F_FW_IQ_CMD_FL0FETCHRO | F_FW_IQ_CMD_FL0DATARO | 3287 (fl_pad ? F_FW_IQ_CMD_FL0PADEN : 0) | 3288 (fl->flags & FL_BUF_PACKING ? F_FW_IQ_CMD_FL0PACKEN : 3289 0)); 3290 if (cong >= 0) { 3291 c.iqns_to_fl0congen |= 3292 htobe32(V_FW_IQ_CMD_FL0CNGCHMAP(cong) | 3293 F_FW_IQ_CMD_FL0CONGCIF | 3294 F_FW_IQ_CMD_FL0CONGEN); 3295 } 3296 c.fl0dcaen_to_fl0cidxfthresh = 3297 htobe16(V_FW_IQ_CMD_FL0FBMIN(chip_id(sc) <= CHELSIO_T5 ? 3298 X_FETCHBURSTMIN_128B : X_FETCHBURSTMIN_64B_T6) | 3299 V_FW_IQ_CMD_FL0FBMAX(chip_id(sc) <= CHELSIO_T5 ? 3300 X_FETCHBURSTMAX_512B : X_FETCHBURSTMAX_256B)); 3301 c.fl0size = htobe16(fl->qsize); 3302 c.fl0addr = htobe64(fl->ba); 3303 } 3304 3305 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); 3306 if (rc != 0) { 3307 device_printf(sc->dev, 3308 "failed to create ingress queue: %d\n", rc); 3309 return (rc); 3310 } 3311 3312 iq->cidx = 0; 3313 iq->gen = F_RSPD_GEN; 3314 iq->intr_next = iq->intr_params; 3315 iq->cntxt_id = be16toh(c.iqid); 3316 iq->abs_id = be16toh(c.physiqid); 3317 iq->flags |= IQ_ALLOCATED; 3318 3319 cntxt_id = iq->cntxt_id - sc->sge.iq_start; 3320 if (cntxt_id >= sc->sge.niq) { 3321 panic ("%s: iq->cntxt_id (%d) more than the max (%d)", __func__, 3322 cntxt_id, sc->sge.niq - 1); 3323 } 3324 sc->sge.iqmap[cntxt_id] = iq; 3325 3326 if (fl) { 3327 u_int qid; 3328 3329 iq->flags |= IQ_HAS_FL; 3330 fl->cntxt_id = be16toh(c.fl0id); 3331 fl->pidx = fl->cidx = 0; 3332 3333 cntxt_id = fl->cntxt_id - sc->sge.eq_start; 3334 if (cntxt_id >= sc->sge.neq) { 3335 panic("%s: fl->cntxt_id (%d) more than the max (%d)", 3336 __func__, cntxt_id, sc->sge.neq - 1); 3337 } 3338 sc->sge.eqmap[cntxt_id] = (void *)fl; 3339 3340 qid = fl->cntxt_id; 3341 if (isset(&sc->doorbells, DOORBELL_UDB)) { 3342 uint32_t s_qpp = sc->params.sge.eq_s_qpp; 3343 uint32_t mask = (1 << s_qpp) - 1; 3344 volatile uint8_t *udb; 3345 3346 udb = sc->udbs_base + UDBS_DB_OFFSET; 3347 udb += (qid >> s_qpp) << PAGE_SHIFT; 3348 qid &= mask; 3349 if (qid < PAGE_SIZE / UDBS_SEG_SIZE) { 3350 udb += qid << UDBS_SEG_SHIFT; 3351 qid = 0; 3352 } 3353 fl->udb = (volatile void *)udb; 3354 } 3355 fl->dbval = V_QID(qid) | sc->chip_params->sge_fl_db; 3356 3357 FL_LOCK(fl); 3358 /* Enough to make sure the SGE doesn't think it's starved */ 3359 refill_fl(sc, fl, fl->lowat); 3360 FL_UNLOCK(fl); 3361 } 3362 3363 if (chip_id(sc) >= CHELSIO_T5 && !(sc->flags & IS_VF) && cong >= 0) { 3364 uint32_t param, val; 3365 3366 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | 3367 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) | 3368 V_FW_PARAMS_PARAM_YZ(iq->cntxt_id); 3369 if (cong == 0) 3370 val = 1 << 19; 3371 else { 3372 val = 2 << 19; 3373 for (i = 0; i < 4; i++) { 3374 if (cong & (1 << i)) 3375 val |= 1 << (i << 2); 3376 } 3377 } 3378 3379 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 3380 if (rc != 0) { 3381 /* report error but carry on */ 3382 device_printf(sc->dev, 3383 "failed to set congestion manager context for " 3384 "ingress queue %d: %d\n", iq->cntxt_id, rc); 3385 } 3386 } 3387 3388 /* Enable IQ interrupts */ 3389 atomic_store_rel_int(&iq->state, IQS_IDLE); 3390 t4_write_reg(sc, sc->sge_gts_reg, V_SEINTARM(iq->intr_params) | 3391 V_INGRESSQID(iq->cntxt_id)); 3392 3393 return (0); 3394 } 3395 3396 static int 3397 free_iq_fl(struct vi_info *vi, struct sge_iq *iq, struct sge_fl *fl) 3398 { 3399 int rc; 3400 struct adapter *sc = iq->adapter; 3401 device_t dev; 3402 3403 if (sc == NULL) 3404 return (0); /* nothing to do */ 3405 3406 dev = vi ? vi->dev : sc->dev; 3407 3408 if (iq->flags & IQ_ALLOCATED) { 3409 rc = -t4_iq_free(sc, sc->mbox, sc->pf, 0, 3410 FW_IQ_TYPE_FL_INT_CAP, iq->cntxt_id, 3411 fl ? fl->cntxt_id : 0xffff, 0xffff); 3412 if (rc != 0) { 3413 device_printf(dev, 3414 "failed to free queue %p: %d\n", iq, rc); 3415 return (rc); 3416 } 3417 iq->flags &= ~IQ_ALLOCATED; 3418 } 3419 3420 free_ring(sc, iq->desc_tag, iq->desc_map, iq->ba, iq->desc); 3421 3422 bzero(iq, sizeof(*iq)); 3423 3424 if (fl) { 3425 free_ring(sc, fl->desc_tag, fl->desc_map, fl->ba, 3426 fl->desc); 3427 3428 if (fl->sdesc) 3429 free_fl_sdesc(sc, fl); 3430 3431 if (mtx_initialized(&fl->fl_lock)) 3432 mtx_destroy(&fl->fl_lock); 3433 3434 bzero(fl, sizeof(*fl)); 3435 } 3436 3437 return (0); 3438 } 3439 3440 static void 3441 add_iq_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *oid, 3442 struct sge_iq *iq) 3443 { 3444 struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); 3445 3446 SYSCTL_ADD_UAUTO(ctx, children, OID_AUTO, "ba", CTLFLAG_RD, &iq->ba, 3447 "bus address of descriptor ring"); 3448 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "dmalen", CTLFLAG_RD, NULL, 3449 iq->qsize * IQ_ESIZE, "descriptor ring size in bytes"); 3450 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "abs_id", 3451 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, &iq->abs_id, 0, 3452 sysctl_uint16, "I", "absolute id of the queue"); 3453 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cntxt_id", 3454 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, &iq->cntxt_id, 0, 3455 sysctl_uint16, "I", "SGE context id of the queue"); 3456 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cidx", 3457 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, &iq->cidx, 0, 3458 sysctl_uint16, "I", "consumer index"); 3459 } 3460 3461 static void 3462 add_fl_sysctls(struct adapter *sc, struct sysctl_ctx_list *ctx, 3463 struct sysctl_oid *oid, struct sge_fl *fl) 3464 { 3465 struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); 3466 3467 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "fl", 3468 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "freelist"); 3469 children = SYSCTL_CHILDREN(oid); 3470 3471 SYSCTL_ADD_UAUTO(ctx, children, OID_AUTO, "ba", CTLFLAG_RD, 3472 &fl->ba, "bus address of descriptor ring"); 3473 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "dmalen", CTLFLAG_RD, NULL, 3474 fl->sidx * EQ_ESIZE + sc->params.sge.spg_len, 3475 "desc ring size in bytes"); 3476 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cntxt_id", 3477 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, &fl->cntxt_id, 0, 3478 sysctl_uint16, "I", "SGE context id of the freelist"); 3479 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "padding", CTLFLAG_RD, NULL, 3480 fl_pad ? 1 : 0, "padding enabled"); 3481 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "packing", CTLFLAG_RD, NULL, 3482 fl->flags & FL_BUF_PACKING ? 1 : 0, "packing enabled"); 3483 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cidx", CTLFLAG_RD, &fl->cidx, 3484 0, "consumer index"); 3485 if (fl->flags & FL_BUF_PACKING) { 3486 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rx_offset", 3487 CTLFLAG_RD, &fl->rx_offset, 0, "packing rx offset"); 3488 } 3489 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "pidx", CTLFLAG_RD, &fl->pidx, 3490 0, "producer index"); 3491 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "cluster_allocated", 3492 CTLFLAG_RD, &fl->cl_allocated, "# of clusters allocated"); 3493 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "cluster_recycled", 3494 CTLFLAG_RD, &fl->cl_recycled, "# of clusters recycled"); 3495 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "cluster_fast_recycled", 3496 CTLFLAG_RD, &fl->cl_fast_recycled, "# of clusters recycled (fast)"); 3497 } 3498 3499 static int 3500 alloc_fwq(struct adapter *sc) 3501 { 3502 int rc, intr_idx; 3503 struct sge_iq *fwq = &sc->sge.fwq; 3504 struct sysctl_oid *oid = device_get_sysctl_tree(sc->dev); 3505 struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); 3506 3507 init_iq(fwq, sc, 0, 0, FW_IQ_QSIZE); 3508 if (sc->flags & IS_VF) 3509 intr_idx = 0; 3510 else 3511 intr_idx = sc->intr_count > 1 ? 1 : 0; 3512 rc = alloc_iq_fl(&sc->port[0]->vi[0], fwq, NULL, intr_idx, -1); 3513 if (rc != 0) { 3514 device_printf(sc->dev, 3515 "failed to create firmware event queue: %d\n", rc); 3516 return (rc); 3517 } 3518 3519 oid = SYSCTL_ADD_NODE(&sc->ctx, children, OID_AUTO, "fwq", 3520 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "firmware event queue"); 3521 add_iq_sysctls(&sc->ctx, oid, fwq); 3522 3523 return (0); 3524 } 3525 3526 static int 3527 free_fwq(struct adapter *sc) 3528 { 3529 return free_iq_fl(NULL, &sc->sge.fwq, NULL); 3530 } 3531 3532 static int 3533 alloc_ctrlq(struct adapter *sc, struct sge_wrq *ctrlq, int idx, 3534 struct sysctl_oid *oid) 3535 { 3536 int rc; 3537 char name[16]; 3538 struct sysctl_oid_list *children; 3539 3540 snprintf(name, sizeof(name), "%s ctrlq%d", device_get_nameunit(sc->dev), 3541 idx); 3542 init_eq(sc, &ctrlq->eq, EQ_CTRL, CTRL_EQ_QSIZE, sc->port[idx]->tx_chan, 3543 sc->sge.fwq.cntxt_id, name); 3544 3545 children = SYSCTL_CHILDREN(oid); 3546 snprintf(name, sizeof(name), "%d", idx); 3547 oid = SYSCTL_ADD_NODE(&sc->ctx, children, OID_AUTO, name, 3548 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "ctrl queue"); 3549 rc = alloc_wrq(sc, NULL, ctrlq, oid); 3550 3551 return (rc); 3552 } 3553 3554 int 3555 tnl_cong(struct port_info *pi, int drop) 3556 { 3557 3558 if (drop == -1) 3559 return (-1); 3560 else if (drop == 1) 3561 return (0); 3562 else 3563 return (pi->rx_e_chan_map); 3564 } 3565 3566 static int 3567 alloc_rxq(struct vi_info *vi, struct sge_rxq *rxq, int intr_idx, int idx, 3568 struct sysctl_oid *oid) 3569 { 3570 int rc; 3571 struct adapter *sc = vi->adapter; 3572 struct sysctl_oid_list *children; 3573 char name[16]; 3574 3575 rc = alloc_iq_fl(vi, &rxq->iq, &rxq->fl, intr_idx, 3576 tnl_cong(vi->pi, cong_drop)); 3577 if (rc != 0) 3578 return (rc); 3579 3580 if (idx == 0) 3581 sc->sge.iq_base = rxq->iq.abs_id - rxq->iq.cntxt_id; 3582 else 3583 KASSERT(rxq->iq.cntxt_id + sc->sge.iq_base == rxq->iq.abs_id, 3584 ("iq_base mismatch")); 3585 KASSERT(sc->sge.iq_base == 0 || sc->flags & IS_VF, 3586 ("PF with non-zero iq_base")); 3587 3588 /* 3589 * The freelist is just barely above the starvation threshold right now, 3590 * fill it up a bit more. 3591 */ 3592 FL_LOCK(&rxq->fl); 3593 refill_fl(sc, &rxq->fl, 128); 3594 FL_UNLOCK(&rxq->fl); 3595 3596 #if defined(INET) || defined(INET6) 3597 rc = tcp_lro_init_args(&rxq->lro, vi->ifp, lro_entries, lro_mbufs); 3598 if (rc != 0) 3599 return (rc); 3600 MPASS(rxq->lro.ifp == vi->ifp); /* also indicates LRO init'ed */ 3601 3602 if (vi->ifp->if_capenable & IFCAP_LRO) 3603 rxq->iq.flags |= IQ_LRO_ENABLED; 3604 #endif 3605 if (vi->ifp->if_capenable & IFCAP_HWRXTSTMP) 3606 rxq->iq.flags |= IQ_RX_TIMESTAMP; 3607 rxq->ifp = vi->ifp; 3608 3609 children = SYSCTL_CHILDREN(oid); 3610 3611 snprintf(name, sizeof(name), "%d", idx); 3612 oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, name, 3613 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "rx queue"); 3614 children = SYSCTL_CHILDREN(oid); 3615 3616 add_iq_sysctls(&vi->ctx, oid, &rxq->iq); 3617 #if defined(INET) || defined(INET6) 3618 SYSCTL_ADD_U64(&vi->ctx, children, OID_AUTO, "lro_queued", CTLFLAG_RD, 3619 &rxq->lro.lro_queued, 0, NULL); 3620 SYSCTL_ADD_U64(&vi->ctx, children, OID_AUTO, "lro_flushed", CTLFLAG_RD, 3621 &rxq->lro.lro_flushed, 0, NULL); 3622 #endif 3623 SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "rxcsum", CTLFLAG_RD, 3624 &rxq->rxcsum, "# of times hardware assisted with checksum"); 3625 SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "vlan_extraction", 3626 CTLFLAG_RD, &rxq->vlan_extraction, 3627 "# of times hardware extracted 802.1Q tag"); 3628 3629 add_fl_sysctls(sc, &vi->ctx, oid, &rxq->fl); 3630 3631 return (rc); 3632 } 3633 3634 static int 3635 free_rxq(struct vi_info *vi, struct sge_rxq *rxq) 3636 { 3637 int rc; 3638 3639 #if defined(INET) || defined(INET6) 3640 if (rxq->lro.ifp) { 3641 tcp_lro_free(&rxq->lro); 3642 rxq->lro.ifp = NULL; 3643 } 3644 #endif 3645 3646 rc = free_iq_fl(vi, &rxq->iq, &rxq->fl); 3647 if (rc == 0) 3648 bzero(rxq, sizeof(*rxq)); 3649 3650 return (rc); 3651 } 3652 3653 #ifdef TCP_OFFLOAD 3654 static int 3655 alloc_ofld_rxq(struct vi_info *vi, struct sge_ofld_rxq *ofld_rxq, 3656 int intr_idx, int idx, struct sysctl_oid *oid) 3657 { 3658 struct port_info *pi = vi->pi; 3659 int rc; 3660 struct sysctl_oid_list *children; 3661 char name[16]; 3662 3663 rc = alloc_iq_fl(vi, &ofld_rxq->iq, &ofld_rxq->fl, intr_idx, 0); 3664 if (rc != 0) 3665 return (rc); 3666 3667 children = SYSCTL_CHILDREN(oid); 3668 3669 snprintf(name, sizeof(name), "%d", idx); 3670 oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, name, 3671 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "rx queue"); 3672 add_iq_sysctls(&vi->ctx, oid, &ofld_rxq->iq); 3673 add_fl_sysctls(pi->adapter, &vi->ctx, oid, &ofld_rxq->fl); 3674 3675 return (rc); 3676 } 3677 3678 static int 3679 free_ofld_rxq(struct vi_info *vi, struct sge_ofld_rxq *ofld_rxq) 3680 { 3681 int rc; 3682 3683 rc = free_iq_fl(vi, &ofld_rxq->iq, &ofld_rxq->fl); 3684 if (rc == 0) 3685 bzero(ofld_rxq, sizeof(*ofld_rxq)); 3686 3687 return (rc); 3688 } 3689 #endif 3690 3691 #ifdef DEV_NETMAP 3692 static int 3693 alloc_nm_rxq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq, int intr_idx, 3694 int idx, struct sysctl_oid *oid) 3695 { 3696 int rc; 3697 struct sysctl_oid_list *children; 3698 struct sysctl_ctx_list *ctx; 3699 char name[16]; 3700 size_t len; 3701 struct adapter *sc = vi->adapter; 3702 struct netmap_adapter *na = NA(vi->ifp); 3703 3704 MPASS(na != NULL); 3705 3706 len = vi->qsize_rxq * IQ_ESIZE; 3707 rc = alloc_ring(sc, len, &nm_rxq->iq_desc_tag, &nm_rxq->iq_desc_map, 3708 &nm_rxq->iq_ba, (void **)&nm_rxq->iq_desc); 3709 if (rc != 0) 3710 return (rc); 3711 3712 len = na->num_rx_desc * EQ_ESIZE + sc->params.sge.spg_len; 3713 rc = alloc_ring(sc, len, &nm_rxq->fl_desc_tag, &nm_rxq->fl_desc_map, 3714 &nm_rxq->fl_ba, (void **)&nm_rxq->fl_desc); 3715 if (rc != 0) 3716 return (rc); 3717 3718 nm_rxq->vi = vi; 3719 nm_rxq->nid = idx; 3720 nm_rxq->iq_cidx = 0; 3721 nm_rxq->iq_sidx = vi->qsize_rxq - sc->params.sge.spg_len / IQ_ESIZE; 3722 nm_rxq->iq_gen = F_RSPD_GEN; 3723 nm_rxq->fl_pidx = nm_rxq->fl_cidx = 0; 3724 nm_rxq->fl_sidx = na->num_rx_desc; 3725 nm_rxq->fl_sidx2 = nm_rxq->fl_sidx; /* copy for rxsync cacheline */ 3726 nm_rxq->intr_idx = intr_idx; 3727 nm_rxq->iq_cntxt_id = INVALID_NM_RXQ_CNTXT_ID; 3728 3729 ctx = &vi->ctx; 3730 children = SYSCTL_CHILDREN(oid); 3731 3732 snprintf(name, sizeof(name), "%d", idx); 3733 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name, 3734 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "rx queue"); 3735 children = SYSCTL_CHILDREN(oid); 3736 3737 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "abs_id", 3738 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, &nm_rxq->iq_abs_id, 3739 0, sysctl_uint16, "I", "absolute id of the queue"); 3740 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cntxt_id", 3741 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, &nm_rxq->iq_cntxt_id, 3742 0, sysctl_uint16, "I", "SGE context id of the queue"); 3743 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cidx", 3744 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, &nm_rxq->iq_cidx, 0, 3745 sysctl_uint16, "I", "consumer index"); 3746 3747 children = SYSCTL_CHILDREN(oid); 3748 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "fl", 3749 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "freelist"); 3750 children = SYSCTL_CHILDREN(oid); 3751 3752 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cntxt_id", 3753 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, &nm_rxq->fl_cntxt_id, 3754 0, sysctl_uint16, "I", "SGE context id of the freelist"); 3755 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cidx", CTLFLAG_RD, 3756 &nm_rxq->fl_cidx, 0, "consumer index"); 3757 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "pidx", CTLFLAG_RD, 3758 &nm_rxq->fl_pidx, 0, "producer index"); 3759 3760 return (rc); 3761 } 3762 3763 3764 static int 3765 free_nm_rxq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq) 3766 { 3767 struct adapter *sc = vi->adapter; 3768 3769 if (vi->flags & VI_INIT_DONE) 3770 MPASS(nm_rxq->iq_cntxt_id == INVALID_NM_RXQ_CNTXT_ID); 3771 else 3772 MPASS(nm_rxq->iq_cntxt_id == 0); 3773 3774 free_ring(sc, nm_rxq->iq_desc_tag, nm_rxq->iq_desc_map, nm_rxq->iq_ba, 3775 nm_rxq->iq_desc); 3776 free_ring(sc, nm_rxq->fl_desc_tag, nm_rxq->fl_desc_map, nm_rxq->fl_ba, 3777 nm_rxq->fl_desc); 3778 3779 return (0); 3780 } 3781 3782 static int 3783 alloc_nm_txq(struct vi_info *vi, struct sge_nm_txq *nm_txq, int iqidx, int idx, 3784 struct sysctl_oid *oid) 3785 { 3786 int rc; 3787 size_t len; 3788 struct port_info *pi = vi->pi; 3789 struct adapter *sc = pi->adapter; 3790 struct netmap_adapter *na = NA(vi->ifp); 3791 char name[16]; 3792 struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); 3793 3794 len = na->num_tx_desc * EQ_ESIZE + sc->params.sge.spg_len; 3795 rc = alloc_ring(sc, len, &nm_txq->desc_tag, &nm_txq->desc_map, 3796 &nm_txq->ba, (void **)&nm_txq->desc); 3797 if (rc) 3798 return (rc); 3799 3800 nm_txq->pidx = nm_txq->cidx = 0; 3801 nm_txq->sidx = na->num_tx_desc; 3802 nm_txq->nid = idx; 3803 nm_txq->iqidx = iqidx; 3804 nm_txq->cpl_ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT) | 3805 V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(sc->pf) | 3806 V_TXPKT_VF(vi->vin) | V_TXPKT_VF_VLD(vi->vfvld)); 3807 if (sc->params.fw_vers >= FW_VERSION32(1, 24, 11, 0)) 3808 nm_txq->op_pkd = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS2_WR)); 3809 else 3810 nm_txq->op_pkd = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_WR)); 3811 nm_txq->cntxt_id = INVALID_NM_TXQ_CNTXT_ID; 3812 3813 snprintf(name, sizeof(name), "%d", idx); 3814 oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, name, 3815 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "netmap tx queue"); 3816 children = SYSCTL_CHILDREN(oid); 3817 3818 SYSCTL_ADD_UINT(&vi->ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD, 3819 &nm_txq->cntxt_id, 0, "SGE context id of the queue"); 3820 SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "cidx", 3821 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, &nm_txq->cidx, 0, 3822 sysctl_uint16, "I", "consumer index"); 3823 SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "pidx", 3824 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, &nm_txq->pidx, 0, 3825 sysctl_uint16, "I", "producer index"); 3826 3827 return (rc); 3828 } 3829 3830 static int 3831 free_nm_txq(struct vi_info *vi, struct sge_nm_txq *nm_txq) 3832 { 3833 struct adapter *sc = vi->adapter; 3834 3835 if (vi->flags & VI_INIT_DONE) 3836 MPASS(nm_txq->cntxt_id == INVALID_NM_TXQ_CNTXT_ID); 3837 else 3838 MPASS(nm_txq->cntxt_id == 0); 3839 3840 free_ring(sc, nm_txq->desc_tag, nm_txq->desc_map, nm_txq->ba, 3841 nm_txq->desc); 3842 3843 return (0); 3844 } 3845 #endif 3846 3847 /* 3848 * Returns a reasonable automatic cidx flush threshold for a given queue size. 3849 */ 3850 static u_int 3851 qsize_to_fthresh(int qsize) 3852 { 3853 u_int fthresh; 3854 3855 while (!powerof2(qsize)) 3856 qsize++; 3857 fthresh = ilog2(qsize); 3858 if (fthresh > X_CIDXFLUSHTHRESH_128) 3859 fthresh = X_CIDXFLUSHTHRESH_128; 3860 3861 return (fthresh); 3862 } 3863 3864 static int 3865 ctrl_eq_alloc(struct adapter *sc, struct sge_eq *eq) 3866 { 3867 int rc, cntxt_id; 3868 struct fw_eq_ctrl_cmd c; 3869 int qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE; 3870 3871 bzero(&c, sizeof(c)); 3872 3873 c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | F_FW_CMD_REQUEST | 3874 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_CTRL_CMD_PFN(sc->pf) | 3875 V_FW_EQ_CTRL_CMD_VFN(0)); 3876 c.alloc_to_len16 = htobe32(F_FW_EQ_CTRL_CMD_ALLOC | 3877 F_FW_EQ_CTRL_CMD_EQSTART | FW_LEN16(c)); 3878 c.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_CMPLIQID(eq->iqid)); 3879 c.physeqid_pkd = htobe32(0); 3880 c.fetchszm_to_iqid = 3881 htobe32(V_FW_EQ_CTRL_CMD_HOSTFCMODE(X_HOSTFCMODE_STATUS_PAGE) | 3882 V_FW_EQ_CTRL_CMD_PCIECHN(eq->tx_chan) | 3883 F_FW_EQ_CTRL_CMD_FETCHRO | V_FW_EQ_CTRL_CMD_IQID(eq->iqid)); 3884 c.dcaen_to_eqsize = 3885 htobe32(V_FW_EQ_CTRL_CMD_FBMIN(chip_id(sc) <= CHELSIO_T5 ? 3886 X_FETCHBURSTMIN_64B : X_FETCHBURSTMIN_64B_T6) | 3887 V_FW_EQ_CTRL_CMD_FBMAX(X_FETCHBURSTMAX_512B) | 3888 V_FW_EQ_CTRL_CMD_CIDXFTHRESH(qsize_to_fthresh(qsize)) | 3889 V_FW_EQ_CTRL_CMD_EQSIZE(qsize)); 3890 c.eqaddr = htobe64(eq->ba); 3891 3892 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); 3893 if (rc != 0) { 3894 device_printf(sc->dev, 3895 "failed to create control queue %d: %d\n", eq->tx_chan, rc); 3896 return (rc); 3897 } 3898 eq->flags |= EQ_ALLOCATED; 3899 3900 eq->cntxt_id = G_FW_EQ_CTRL_CMD_EQID(be32toh(c.cmpliqid_eqid)); 3901 cntxt_id = eq->cntxt_id - sc->sge.eq_start; 3902 if (cntxt_id >= sc->sge.neq) 3903 panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__, 3904 cntxt_id, sc->sge.neq - 1); 3905 sc->sge.eqmap[cntxt_id] = eq; 3906 3907 return (rc); 3908 } 3909 3910 static int 3911 eth_eq_alloc(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq) 3912 { 3913 int rc, cntxt_id; 3914 struct fw_eq_eth_cmd c; 3915 int qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE; 3916 3917 bzero(&c, sizeof(c)); 3918 3919 c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST | 3920 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(sc->pf) | 3921 V_FW_EQ_ETH_CMD_VFN(0)); 3922 c.alloc_to_len16 = htobe32(F_FW_EQ_ETH_CMD_ALLOC | 3923 F_FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c)); 3924 c.autoequiqe_to_viid = htobe32(F_FW_EQ_ETH_CMD_AUTOEQUIQE | 3925 F_FW_EQ_ETH_CMD_AUTOEQUEQE | V_FW_EQ_ETH_CMD_VIID(vi->viid)); 3926 c.fetchszm_to_iqid = 3927 htobe32(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) | 3928 V_FW_EQ_ETH_CMD_PCIECHN(eq->tx_chan) | F_FW_EQ_ETH_CMD_FETCHRO | 3929 V_FW_EQ_ETH_CMD_IQID(eq->iqid)); 3930 c.dcaen_to_eqsize = 3931 htobe32(V_FW_EQ_ETH_CMD_FBMIN(chip_id(sc) <= CHELSIO_T5 ? 3932 X_FETCHBURSTMIN_64B : X_FETCHBURSTMIN_64B_T6) | 3933 V_FW_EQ_ETH_CMD_FBMAX(X_FETCHBURSTMAX_512B) | 3934 V_FW_EQ_ETH_CMD_EQSIZE(qsize)); 3935 c.eqaddr = htobe64(eq->ba); 3936 3937 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); 3938 if (rc != 0) { 3939 device_printf(vi->dev, 3940 "failed to create Ethernet egress queue: %d\n", rc); 3941 return (rc); 3942 } 3943 eq->flags |= EQ_ALLOCATED; 3944 3945 eq->cntxt_id = G_FW_EQ_ETH_CMD_EQID(be32toh(c.eqid_pkd)); 3946 eq->abs_id = G_FW_EQ_ETH_CMD_PHYSEQID(be32toh(c.physeqid_pkd)); 3947 cntxt_id = eq->cntxt_id - sc->sge.eq_start; 3948 if (cntxt_id >= sc->sge.neq) 3949 panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__, 3950 cntxt_id, sc->sge.neq - 1); 3951 sc->sge.eqmap[cntxt_id] = eq; 3952 3953 return (rc); 3954 } 3955 3956 #if defined(TCP_OFFLOAD) || defined(RATELIMIT) 3957 static int 3958 ofld_eq_alloc(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq) 3959 { 3960 int rc, cntxt_id; 3961 struct fw_eq_ofld_cmd c; 3962 int qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE; 3963 3964 bzero(&c, sizeof(c)); 3965 3966 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_OFLD_CMD) | F_FW_CMD_REQUEST | 3967 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_OFLD_CMD_PFN(sc->pf) | 3968 V_FW_EQ_OFLD_CMD_VFN(0)); 3969 c.alloc_to_len16 = htonl(F_FW_EQ_OFLD_CMD_ALLOC | 3970 F_FW_EQ_OFLD_CMD_EQSTART | FW_LEN16(c)); 3971 c.fetchszm_to_iqid = 3972 htonl(V_FW_EQ_OFLD_CMD_HOSTFCMODE(X_HOSTFCMODE_STATUS_PAGE) | 3973 V_FW_EQ_OFLD_CMD_PCIECHN(eq->tx_chan) | 3974 F_FW_EQ_OFLD_CMD_FETCHRO | V_FW_EQ_OFLD_CMD_IQID(eq->iqid)); 3975 c.dcaen_to_eqsize = 3976 htobe32(V_FW_EQ_OFLD_CMD_FBMIN(chip_id(sc) <= CHELSIO_T5 ? 3977 X_FETCHBURSTMIN_64B : X_FETCHBURSTMIN_64B_T6) | 3978 V_FW_EQ_OFLD_CMD_FBMAX(X_FETCHBURSTMAX_512B) | 3979 V_FW_EQ_OFLD_CMD_CIDXFTHRESH(qsize_to_fthresh(qsize)) | 3980 V_FW_EQ_OFLD_CMD_EQSIZE(qsize)); 3981 c.eqaddr = htobe64(eq->ba); 3982 3983 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); 3984 if (rc != 0) { 3985 device_printf(vi->dev, 3986 "failed to create egress queue for TCP offload: %d\n", rc); 3987 return (rc); 3988 } 3989 eq->flags |= EQ_ALLOCATED; 3990 3991 eq->cntxt_id = G_FW_EQ_OFLD_CMD_EQID(be32toh(c.eqid_pkd)); 3992 cntxt_id = eq->cntxt_id - sc->sge.eq_start; 3993 if (cntxt_id >= sc->sge.neq) 3994 panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__, 3995 cntxt_id, sc->sge.neq - 1); 3996 sc->sge.eqmap[cntxt_id] = eq; 3997 3998 return (rc); 3999 } 4000 #endif 4001 4002 static int 4003 alloc_eq(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq) 4004 { 4005 int rc, qsize; 4006 size_t len; 4007 4008 mtx_init(&eq->eq_lock, eq->lockname, NULL, MTX_DEF); 4009 4010 qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE; 4011 len = qsize * EQ_ESIZE; 4012 rc = alloc_ring(sc, len, &eq->desc_tag, &eq->desc_map, 4013 &eq->ba, (void **)&eq->desc); 4014 if (rc) 4015 return (rc); 4016 4017 eq->pidx = eq->cidx = eq->dbidx = 0; 4018 /* Note that equeqidx is not used with sge_wrq (OFLD/CTRL) queues. */ 4019 eq->equeqidx = 0; 4020 eq->doorbells = sc->doorbells; 4021 4022 switch (eq->flags & EQ_TYPEMASK) { 4023 case EQ_CTRL: 4024 rc = ctrl_eq_alloc(sc, eq); 4025 break; 4026 4027 case EQ_ETH: 4028 rc = eth_eq_alloc(sc, vi, eq); 4029 break; 4030 4031 #if defined(TCP_OFFLOAD) || defined(RATELIMIT) 4032 case EQ_OFLD: 4033 rc = ofld_eq_alloc(sc, vi, eq); 4034 break; 4035 #endif 4036 4037 default: 4038 panic("%s: invalid eq type %d.", __func__, 4039 eq->flags & EQ_TYPEMASK); 4040 } 4041 if (rc != 0) { 4042 device_printf(sc->dev, 4043 "failed to allocate egress queue(%d): %d\n", 4044 eq->flags & EQ_TYPEMASK, rc); 4045 } 4046 4047 if (isset(&eq->doorbells, DOORBELL_UDB) || 4048 isset(&eq->doorbells, DOORBELL_UDBWC) || 4049 isset(&eq->doorbells, DOORBELL_WCWR)) { 4050 uint32_t s_qpp = sc->params.sge.eq_s_qpp; 4051 uint32_t mask = (1 << s_qpp) - 1; 4052 volatile uint8_t *udb; 4053 4054 udb = sc->udbs_base + UDBS_DB_OFFSET; 4055 udb += (eq->cntxt_id >> s_qpp) << PAGE_SHIFT; /* pg offset */ 4056 eq->udb_qid = eq->cntxt_id & mask; /* id in page */ 4057 if (eq->udb_qid >= PAGE_SIZE / UDBS_SEG_SIZE) 4058 clrbit(&eq->doorbells, DOORBELL_WCWR); 4059 else { 4060 udb += eq->udb_qid << UDBS_SEG_SHIFT; /* seg offset */ 4061 eq->udb_qid = 0; 4062 } 4063 eq->udb = (volatile void *)udb; 4064 } 4065 4066 return (rc); 4067 } 4068 4069 static int 4070 free_eq(struct adapter *sc, struct sge_eq *eq) 4071 { 4072 int rc; 4073 4074 if (eq->flags & EQ_ALLOCATED) { 4075 switch (eq->flags & EQ_TYPEMASK) { 4076 case EQ_CTRL: 4077 rc = -t4_ctrl_eq_free(sc, sc->mbox, sc->pf, 0, 4078 eq->cntxt_id); 4079 break; 4080 4081 case EQ_ETH: 4082 rc = -t4_eth_eq_free(sc, sc->mbox, sc->pf, 0, 4083 eq->cntxt_id); 4084 break; 4085 4086 #if defined(TCP_OFFLOAD) || defined(RATELIMIT) 4087 case EQ_OFLD: 4088 rc = -t4_ofld_eq_free(sc, sc->mbox, sc->pf, 0, 4089 eq->cntxt_id); 4090 break; 4091 #endif 4092 4093 default: 4094 panic("%s: invalid eq type %d.", __func__, 4095 eq->flags & EQ_TYPEMASK); 4096 } 4097 if (rc != 0) { 4098 device_printf(sc->dev, 4099 "failed to free egress queue (%d): %d\n", 4100 eq->flags & EQ_TYPEMASK, rc); 4101 return (rc); 4102 } 4103 eq->flags &= ~EQ_ALLOCATED; 4104 } 4105 4106 free_ring(sc, eq->desc_tag, eq->desc_map, eq->ba, eq->desc); 4107 4108 if (mtx_initialized(&eq->eq_lock)) 4109 mtx_destroy(&eq->eq_lock); 4110 4111 bzero(eq, sizeof(*eq)); 4112 return (0); 4113 } 4114 4115 static int 4116 alloc_wrq(struct adapter *sc, struct vi_info *vi, struct sge_wrq *wrq, 4117 struct sysctl_oid *oid) 4118 { 4119 int rc; 4120 struct sysctl_ctx_list *ctx = vi ? &vi->ctx : &sc->ctx; 4121 struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); 4122 4123 rc = alloc_eq(sc, vi, &wrq->eq); 4124 if (rc) 4125 return (rc); 4126 4127 wrq->adapter = sc; 4128 TASK_INIT(&wrq->wrq_tx_task, 0, wrq_tx_drain, wrq); 4129 TAILQ_INIT(&wrq->incomplete_wrs); 4130 STAILQ_INIT(&wrq->wr_list); 4131 wrq->nwr_pending = 0; 4132 wrq->ndesc_needed = 0; 4133 4134 SYSCTL_ADD_UAUTO(ctx, children, OID_AUTO, "ba", CTLFLAG_RD, 4135 &wrq->eq.ba, "bus address of descriptor ring"); 4136 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "dmalen", CTLFLAG_RD, NULL, 4137 wrq->eq.sidx * EQ_ESIZE + sc->params.sge.spg_len, 4138 "desc ring size in bytes"); 4139 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD, 4140 &wrq->eq.cntxt_id, 0, "SGE context id of the queue"); 4141 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cidx", 4142 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, &wrq->eq.cidx, 0, 4143 sysctl_uint16, "I", "consumer index"); 4144 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pidx", 4145 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, &wrq->eq.pidx, 0, 4146 sysctl_uint16, "I", "producer index"); 4147 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sidx", CTLFLAG_RD, NULL, 4148 wrq->eq.sidx, "status page index"); 4149 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "tx_wrs_direct", CTLFLAG_RD, 4150 &wrq->tx_wrs_direct, "# of work requests (direct)"); 4151 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "tx_wrs_copied", CTLFLAG_RD, 4152 &wrq->tx_wrs_copied, "# of work requests (copied)"); 4153 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "tx_wrs_sspace", CTLFLAG_RD, 4154 &wrq->tx_wrs_ss, "# of work requests (copied from scratch space)"); 4155 4156 return (rc); 4157 } 4158 4159 static int 4160 free_wrq(struct adapter *sc, struct sge_wrq *wrq) 4161 { 4162 int rc; 4163 4164 rc = free_eq(sc, &wrq->eq); 4165 if (rc) 4166 return (rc); 4167 4168 bzero(wrq, sizeof(*wrq)); 4169 return (0); 4170 } 4171 4172 static int 4173 alloc_txq(struct vi_info *vi, struct sge_txq *txq, int idx, 4174 struct sysctl_oid *oid) 4175 { 4176 int rc; 4177 struct port_info *pi = vi->pi; 4178 struct adapter *sc = pi->adapter; 4179 struct sge_eq *eq = &txq->eq; 4180 struct txpkts *txp; 4181 char name[16]; 4182 struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); 4183 4184 rc = mp_ring_alloc(&txq->r, eq->sidx, txq, eth_tx, can_resume_eth_tx, 4185 M_CXGBE, &eq->eq_lock, M_WAITOK); 4186 if (rc != 0) { 4187 device_printf(sc->dev, "failed to allocate mp_ring: %d\n", rc); 4188 return (rc); 4189 } 4190 4191 rc = alloc_eq(sc, vi, eq); 4192 if (rc != 0) { 4193 mp_ring_free(txq->r); 4194 txq->r = NULL; 4195 return (rc); 4196 } 4197 4198 /* Can't fail after this point. */ 4199 4200 if (idx == 0) 4201 sc->sge.eq_base = eq->abs_id - eq->cntxt_id; 4202 else 4203 KASSERT(eq->cntxt_id + sc->sge.eq_base == eq->abs_id, 4204 ("eq_base mismatch")); 4205 KASSERT(sc->sge.eq_base == 0 || sc->flags & IS_VF, 4206 ("PF with non-zero eq_base")); 4207 4208 TASK_INIT(&txq->tx_reclaim_task, 0, tx_reclaim, eq); 4209 txq->ifp = vi->ifp; 4210 txq->gl = sglist_alloc(TX_SGL_SEGS, M_WAITOK); 4211 if (sc->flags & IS_VF) 4212 txq->cpl_ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT_XT) | 4213 V_TXPKT_INTF(pi->tx_chan)); 4214 else 4215 txq->cpl_ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT_XT) | 4216 V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(sc->pf) | 4217 V_TXPKT_VF(vi->vin) | V_TXPKT_VF_VLD(vi->vfvld)); 4218 txq->tc_idx = -1; 4219 txq->sdesc = malloc(eq->sidx * sizeof(struct tx_sdesc), M_CXGBE, 4220 M_ZERO | M_WAITOK); 4221 4222 txp = &txq->txp; 4223 txp->score = 5; 4224 MPASS(nitems(txp->mb) >= sc->params.max_pkts_per_eth_tx_pkts_wr); 4225 txq->txp.max_npkt = min(nitems(txp->mb), 4226 sc->params.max_pkts_per_eth_tx_pkts_wr); 4227 4228 snprintf(name, sizeof(name), "%d", idx); 4229 oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, name, 4230 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "tx queue"); 4231 children = SYSCTL_CHILDREN(oid); 4232 4233 SYSCTL_ADD_UAUTO(&vi->ctx, children, OID_AUTO, "ba", CTLFLAG_RD, 4234 &eq->ba, "bus address of descriptor ring"); 4235 SYSCTL_ADD_INT(&vi->ctx, children, OID_AUTO, "dmalen", CTLFLAG_RD, NULL, 4236 eq->sidx * EQ_ESIZE + sc->params.sge.spg_len, 4237 "desc ring size in bytes"); 4238 SYSCTL_ADD_UINT(&vi->ctx, children, OID_AUTO, "abs_id", CTLFLAG_RD, 4239 &eq->abs_id, 0, "absolute id of the queue"); 4240 SYSCTL_ADD_UINT(&vi->ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD, 4241 &eq->cntxt_id, 0, "SGE context id of the queue"); 4242 SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "cidx", 4243 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, &eq->cidx, 0, 4244 sysctl_uint16, "I", "consumer index"); 4245 SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "pidx", 4246 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, &eq->pidx, 0, 4247 sysctl_uint16, "I", "producer index"); 4248 SYSCTL_ADD_INT(&vi->ctx, children, OID_AUTO, "sidx", CTLFLAG_RD, NULL, 4249 eq->sidx, "status page index"); 4250 4251 SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "tc", 4252 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, vi, idx, sysctl_tc, 4253 "I", "traffic class (-1 means none)"); 4254 4255 SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "txcsum", CTLFLAG_RD, 4256 &txq->txcsum, "# of times hardware assisted with checksum"); 4257 SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "vlan_insertion", 4258 CTLFLAG_RD, &txq->vlan_insertion, 4259 "# of times hardware inserted 802.1Q tag"); 4260 SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "tso_wrs", CTLFLAG_RD, 4261 &txq->tso_wrs, "# of TSO work requests"); 4262 SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "imm_wrs", CTLFLAG_RD, 4263 &txq->imm_wrs, "# of work requests with immediate data"); 4264 SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "sgl_wrs", CTLFLAG_RD, 4265 &txq->sgl_wrs, "# of work requests with direct SGL"); 4266 SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "txpkt_wrs", CTLFLAG_RD, 4267 &txq->txpkt_wrs, "# of txpkt work requests (one pkt/WR)"); 4268 SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "txpkts0_wrs", 4269 CTLFLAG_RD, &txq->txpkts0_wrs, 4270 "# of txpkts (type 0) work requests"); 4271 SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "txpkts1_wrs", 4272 CTLFLAG_RD, &txq->txpkts1_wrs, 4273 "# of txpkts (type 1) work requests"); 4274 SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "txpkts0_pkts", 4275 CTLFLAG_RD, &txq->txpkts0_pkts, 4276 "# of frames tx'd using type0 txpkts work requests"); 4277 SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "txpkts1_pkts", 4278 CTLFLAG_RD, &txq->txpkts1_pkts, 4279 "# of frames tx'd using type1 txpkts work requests"); 4280 SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "raw_wrs", CTLFLAG_RD, 4281 &txq->raw_wrs, "# of raw work requests (non-packets)"); 4282 4283 #ifdef KERN_TLS 4284 if (sc->flags & KERN_TLS_OK) { 4285 SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, 4286 "kern_tls_records", CTLFLAG_RD, &txq->kern_tls_records, 4287 "# of NIC TLS records transmitted"); 4288 SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, 4289 "kern_tls_short", CTLFLAG_RD, &txq->kern_tls_short, 4290 "# of short NIC TLS records transmitted"); 4291 SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, 4292 "kern_tls_partial", CTLFLAG_RD, &txq->kern_tls_partial, 4293 "# of partial NIC TLS records transmitted"); 4294 SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, 4295 "kern_tls_full", CTLFLAG_RD, &txq->kern_tls_full, 4296 "# of full NIC TLS records transmitted"); 4297 SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, 4298 "kern_tls_octets", CTLFLAG_RD, &txq->kern_tls_octets, 4299 "# of payload octets in transmitted NIC TLS records"); 4300 SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, 4301 "kern_tls_waste", CTLFLAG_RD, &txq->kern_tls_waste, 4302 "# of octets DMAd but not transmitted in NIC TLS records"); 4303 SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, 4304 "kern_tls_options", CTLFLAG_RD, &txq->kern_tls_options, 4305 "# of NIC TLS options-only packets transmitted"); 4306 SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, 4307 "kern_tls_header", CTLFLAG_RD, &txq->kern_tls_header, 4308 "# of NIC TLS header-only packets transmitted"); 4309 SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, 4310 "kern_tls_fin", CTLFLAG_RD, &txq->kern_tls_fin, 4311 "# of NIC TLS FIN-only packets transmitted"); 4312 SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, 4313 "kern_tls_fin_short", CTLFLAG_RD, &txq->kern_tls_fin_short, 4314 "# of NIC TLS padded FIN packets on short TLS records"); 4315 SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, 4316 "kern_tls_cbc", CTLFLAG_RD, &txq->kern_tls_cbc, 4317 "# of NIC TLS sessions using AES-CBC"); 4318 SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, 4319 "kern_tls_gcm", CTLFLAG_RD, &txq->kern_tls_gcm, 4320 "# of NIC TLS sessions using AES-GCM"); 4321 } 4322 #endif 4323 mp_ring_sysctls(txq->r, &vi->ctx, children); 4324 4325 return (0); 4326 } 4327 4328 static int 4329 free_txq(struct vi_info *vi, struct sge_txq *txq) 4330 { 4331 int rc; 4332 struct adapter *sc = vi->adapter; 4333 struct sge_eq *eq = &txq->eq; 4334 4335 rc = free_eq(sc, eq); 4336 if (rc) 4337 return (rc); 4338 4339 sglist_free(txq->gl); 4340 free(txq->sdesc, M_CXGBE); 4341 mp_ring_free(txq->r); 4342 4343 bzero(txq, sizeof(*txq)); 4344 return (0); 4345 } 4346 4347 static void 4348 oneseg_dma_callback(void *arg, bus_dma_segment_t *segs, int nseg, int error) 4349 { 4350 bus_addr_t *ba = arg; 4351 4352 KASSERT(nseg == 1, 4353 ("%s meant for single segment mappings only.", __func__)); 4354 4355 *ba = error ? 0 : segs->ds_addr; 4356 } 4357 4358 static inline void 4359 ring_fl_db(struct adapter *sc, struct sge_fl *fl) 4360 { 4361 uint32_t n, v; 4362 4363 n = IDXDIFF(fl->pidx >> 3, fl->dbidx, fl->sidx); 4364 MPASS(n > 0); 4365 4366 wmb(); 4367 v = fl->dbval | V_PIDX(n); 4368 if (fl->udb) 4369 *fl->udb = htole32(v); 4370 else 4371 t4_write_reg(sc, sc->sge_kdoorbell_reg, v); 4372 IDXINCR(fl->dbidx, n, fl->sidx); 4373 } 4374 4375 /* 4376 * Fills up the freelist by allocating up to 'n' buffers. Buffers that are 4377 * recycled do not count towards this allocation budget. 4378 * 4379 * Returns non-zero to indicate that this freelist should be added to the list 4380 * of starving freelists. 4381 */ 4382 static int 4383 refill_fl(struct adapter *sc, struct sge_fl *fl, int n) 4384 { 4385 __be64 *d; 4386 struct fl_sdesc *sd; 4387 uintptr_t pa; 4388 caddr_t cl; 4389 struct rx_buf_info *rxb; 4390 struct cluster_metadata *clm; 4391 uint16_t max_pidx; 4392 uint16_t hw_cidx = fl->hw_cidx; /* stable snapshot */ 4393 4394 FL_LOCK_ASSERT_OWNED(fl); 4395 4396 /* 4397 * We always stop at the beginning of the hardware descriptor that's just 4398 * before the one with the hw cidx. This is to avoid hw pidx = hw cidx, 4399 * which would mean an empty freelist to the chip. 4400 */ 4401 max_pidx = __predict_false(hw_cidx == 0) ? fl->sidx - 1 : hw_cidx - 1; 4402 if (fl->pidx == max_pidx * 8) 4403 return (0); 4404 4405 d = &fl->desc[fl->pidx]; 4406 sd = &fl->sdesc[fl->pidx]; 4407 4408 while (n > 0) { 4409 4410 if (sd->cl != NULL) { 4411 4412 if (sd->nmbuf == 0) { 4413 /* 4414 * Fast recycle without involving any atomics on 4415 * the cluster's metadata (if the cluster has 4416 * metadata). This happens when all frames 4417 * received in the cluster were small enough to 4418 * fit within a single mbuf each. 4419 */ 4420 fl->cl_fast_recycled++; 4421 goto recycled; 4422 } 4423 4424 /* 4425 * Cluster is guaranteed to have metadata. Clusters 4426 * without metadata always take the fast recycle path 4427 * when they're recycled. 4428 */ 4429 clm = cl_metadata(sd); 4430 MPASS(clm != NULL); 4431 4432 if (atomic_fetchadd_int(&clm->refcount, -1) == 1) { 4433 fl->cl_recycled++; 4434 counter_u64_add(extfree_rels, 1); 4435 goto recycled; 4436 } 4437 sd->cl = NULL; /* gave up my reference */ 4438 } 4439 MPASS(sd->cl == NULL); 4440 rxb = &sc->sge.rx_buf_info[fl->zidx]; 4441 cl = uma_zalloc(rxb->zone, M_NOWAIT); 4442 if (__predict_false(cl == NULL)) { 4443 if (fl->zidx != fl->safe_zidx) { 4444 rxb = &sc->sge.rx_buf_info[fl->safe_zidx]; 4445 cl = uma_zalloc(rxb->zone, M_NOWAIT); 4446 } 4447 if (cl == NULL) 4448 break; 4449 } 4450 fl->cl_allocated++; 4451 n--; 4452 4453 pa = pmap_kextract((vm_offset_t)cl); 4454 sd->cl = cl; 4455 sd->zidx = fl->zidx; 4456 4457 if (fl->flags & FL_BUF_PACKING) { 4458 *d = htobe64(pa | rxb->hwidx2); 4459 sd->moff = rxb->size2; 4460 } else { 4461 *d = htobe64(pa | rxb->hwidx1); 4462 sd->moff = 0; 4463 } 4464 recycled: 4465 sd->nmbuf = 0; 4466 d++; 4467 sd++; 4468 if (__predict_false((++fl->pidx & 7) == 0)) { 4469 uint16_t pidx = fl->pidx >> 3; 4470 4471 if (__predict_false(pidx == fl->sidx)) { 4472 fl->pidx = 0; 4473 pidx = 0; 4474 sd = fl->sdesc; 4475 d = fl->desc; 4476 } 4477 if (n < 8 || pidx == max_pidx) 4478 break; 4479 4480 if (IDXDIFF(pidx, fl->dbidx, fl->sidx) >= 4) 4481 ring_fl_db(sc, fl); 4482 } 4483 } 4484 4485 if ((fl->pidx >> 3) != fl->dbidx) 4486 ring_fl_db(sc, fl); 4487 4488 return (FL_RUNNING_LOW(fl) && !(fl->flags & FL_STARVING)); 4489 } 4490 4491 /* 4492 * Attempt to refill all starving freelists. 4493 */ 4494 static void 4495 refill_sfl(void *arg) 4496 { 4497 struct adapter *sc = arg; 4498 struct sge_fl *fl, *fl_temp; 4499 4500 mtx_assert(&sc->sfl_lock, MA_OWNED); 4501 TAILQ_FOREACH_SAFE(fl, &sc->sfl, link, fl_temp) { 4502 FL_LOCK(fl); 4503 refill_fl(sc, fl, 64); 4504 if (FL_NOT_RUNNING_LOW(fl) || fl->flags & FL_DOOMED) { 4505 TAILQ_REMOVE(&sc->sfl, fl, link); 4506 fl->flags &= ~FL_STARVING; 4507 } 4508 FL_UNLOCK(fl); 4509 } 4510 4511 if (!TAILQ_EMPTY(&sc->sfl)) 4512 callout_schedule(&sc->sfl_callout, hz / 5); 4513 } 4514 4515 static int 4516 alloc_fl_sdesc(struct sge_fl *fl) 4517 { 4518 4519 fl->sdesc = malloc(fl->sidx * 8 * sizeof(struct fl_sdesc), M_CXGBE, 4520 M_ZERO | M_WAITOK); 4521 4522 return (0); 4523 } 4524 4525 static void 4526 free_fl_sdesc(struct adapter *sc, struct sge_fl *fl) 4527 { 4528 struct fl_sdesc *sd; 4529 struct cluster_metadata *clm; 4530 int i; 4531 4532 sd = fl->sdesc; 4533 for (i = 0; i < fl->sidx * 8; i++, sd++) { 4534 if (sd->cl == NULL) 4535 continue; 4536 4537 if (sd->nmbuf == 0) 4538 uma_zfree(sc->sge.rx_buf_info[sd->zidx].zone, sd->cl); 4539 else if (fl->flags & FL_BUF_PACKING) { 4540 clm = cl_metadata(sd); 4541 if (atomic_fetchadd_int(&clm->refcount, -1) == 1) { 4542 uma_zfree(sc->sge.rx_buf_info[sd->zidx].zone, 4543 sd->cl); 4544 counter_u64_add(extfree_rels, 1); 4545 } 4546 } 4547 sd->cl = NULL; 4548 } 4549 4550 free(fl->sdesc, M_CXGBE); 4551 fl->sdesc = NULL; 4552 } 4553 4554 static inline void 4555 get_pkt_gl(struct mbuf *m, struct sglist *gl) 4556 { 4557 int rc; 4558 4559 M_ASSERTPKTHDR(m); 4560 4561 sglist_reset(gl); 4562 rc = sglist_append_mbuf(gl, m); 4563 if (__predict_false(rc != 0)) { 4564 panic("%s: mbuf %p (%d segs) was vetted earlier but now fails " 4565 "with %d.", __func__, m, mbuf_nsegs(m), rc); 4566 } 4567 4568 KASSERT(gl->sg_nseg == mbuf_nsegs(m), 4569 ("%s: nsegs changed for mbuf %p from %d to %d", __func__, m, 4570 mbuf_nsegs(m), gl->sg_nseg)); 4571 KASSERT(gl->sg_nseg > 0 && 4572 gl->sg_nseg <= (needs_tso(m) ? TX_SGL_SEGS_TSO : TX_SGL_SEGS), 4573 ("%s: %d segments, should have been 1 <= nsegs <= %d", __func__, 4574 gl->sg_nseg, needs_tso(m) ? TX_SGL_SEGS_TSO : TX_SGL_SEGS)); 4575 } 4576 4577 /* 4578 * len16 for a txpkt WR with a GL. Includes the firmware work request header. 4579 */ 4580 static inline u_int 4581 txpkt_len16(u_int nsegs, u_int tso) 4582 { 4583 u_int n; 4584 4585 MPASS(nsegs > 0); 4586 4587 nsegs--; /* first segment is part of ulptx_sgl */ 4588 n = sizeof(struct fw_eth_tx_pkt_wr) + sizeof(struct cpl_tx_pkt_core) + 4589 sizeof(struct ulptx_sgl) + 8 * ((3 * nsegs) / 2 + (nsegs & 1)); 4590 if (tso) 4591 n += sizeof(struct cpl_tx_pkt_lso_core); 4592 4593 return (howmany(n, 16)); 4594 } 4595 4596 /* 4597 * len16 for a txpkt_vm WR with a GL. Includes the firmware work 4598 * request header. 4599 */ 4600 static inline u_int 4601 txpkt_vm_len16(u_int nsegs, u_int tso) 4602 { 4603 u_int n; 4604 4605 MPASS(nsegs > 0); 4606 4607 nsegs--; /* first segment is part of ulptx_sgl */ 4608 n = sizeof(struct fw_eth_tx_pkt_vm_wr) + 4609 sizeof(struct cpl_tx_pkt_core) + 4610 sizeof(struct ulptx_sgl) + 8 * ((3 * nsegs) / 2 + (nsegs & 1)); 4611 if (tso) 4612 n += sizeof(struct cpl_tx_pkt_lso_core); 4613 4614 return (howmany(n, 16)); 4615 } 4616 4617 /* 4618 * len16 for a txpkts type 0 WR with a GL. Does not include the firmware work 4619 * request header. 4620 */ 4621 static inline u_int 4622 txpkts0_len16(u_int nsegs) 4623 { 4624 u_int n; 4625 4626 MPASS(nsegs > 0); 4627 4628 nsegs--; /* first segment is part of ulptx_sgl */ 4629 n = sizeof(struct ulp_txpkt) + sizeof(struct ulptx_idata) + 4630 sizeof(struct cpl_tx_pkt_core) + sizeof(struct ulptx_sgl) + 4631 8 * ((3 * nsegs) / 2 + (nsegs & 1)); 4632 4633 return (howmany(n, 16)); 4634 } 4635 4636 /* 4637 * len16 for a txpkts type 1 WR with a GL. Does not include the firmware work 4638 * request header. 4639 */ 4640 static inline u_int 4641 txpkts1_len16(void) 4642 { 4643 u_int n; 4644 4645 n = sizeof(struct cpl_tx_pkt_core) + sizeof(struct ulptx_sgl); 4646 4647 return (howmany(n, 16)); 4648 } 4649 4650 static inline u_int 4651 imm_payload(u_int ndesc) 4652 { 4653 u_int n; 4654 4655 n = ndesc * EQ_ESIZE - sizeof(struct fw_eth_tx_pkt_wr) - 4656 sizeof(struct cpl_tx_pkt_core); 4657 4658 return (n); 4659 } 4660 4661 static inline uint64_t 4662 csum_to_ctrl(struct adapter *sc, struct mbuf *m) 4663 { 4664 uint64_t ctrl; 4665 int csum_type; 4666 4667 M_ASSERTPKTHDR(m); 4668 4669 if (needs_hwcsum(m) == 0) 4670 return (F_TXPKT_IPCSUM_DIS | F_TXPKT_L4CSUM_DIS); 4671 4672 ctrl = 0; 4673 if (needs_l3_csum(m) == 0) 4674 ctrl |= F_TXPKT_IPCSUM_DIS; 4675 switch (m->m_pkthdr.csum_flags & 4676 (CSUM_IP_TCP | CSUM_IP_UDP | CSUM_IP6_TCP | CSUM_IP6_UDP)) { 4677 case CSUM_IP_TCP: 4678 csum_type = TX_CSUM_TCPIP; 4679 break; 4680 case CSUM_IP_UDP: 4681 csum_type = TX_CSUM_UDPIP; 4682 break; 4683 case CSUM_IP6_TCP: 4684 csum_type = TX_CSUM_TCPIP6; 4685 break; 4686 case CSUM_IP6_UDP: 4687 csum_type = TX_CSUM_UDPIP6; 4688 break; 4689 default: 4690 /* needs_hwcsum told us that at least some hwcsum is needed. */ 4691 MPASS(ctrl == 0); 4692 MPASS(m->m_pkthdr.csum_flags & CSUM_IP); 4693 ctrl |= F_TXPKT_L4CSUM_DIS; 4694 csum_type = TX_CSUM_IP; 4695 break; 4696 } 4697 4698 MPASS(m->m_pkthdr.l2hlen > 0); 4699 MPASS(m->m_pkthdr.l3hlen > 0); 4700 ctrl |= V_TXPKT_CSUM_TYPE(csum_type) | 4701 V_TXPKT_IPHDR_LEN(m->m_pkthdr.l3hlen); 4702 if (chip_id(sc) <= CHELSIO_T5) 4703 ctrl |= V_TXPKT_ETHHDR_LEN(m->m_pkthdr.l2hlen - ETHER_HDR_LEN); 4704 else 4705 ctrl |= V_T6_TXPKT_ETHHDR_LEN(m->m_pkthdr.l2hlen - ETHER_HDR_LEN); 4706 4707 return (ctrl); 4708 } 4709 4710 #define VM_TX_L2HDR_LEN 16 /* ethmacdst to vlantci */ 4711 4712 /* 4713 * Write a VM txpkt WR for this packet to the hardware descriptors, update the 4714 * software descriptor, and advance the pidx. It is guaranteed that enough 4715 * descriptors are available. 4716 * 4717 * The return value is the # of hardware descriptors used. 4718 */ 4719 static u_int 4720 write_txpkt_vm_wr(struct adapter *sc, struct sge_txq *txq, struct mbuf *m0) 4721 { 4722 struct sge_eq *eq; 4723 struct fw_eth_tx_pkt_vm_wr *wr; 4724 struct tx_sdesc *txsd; 4725 struct cpl_tx_pkt_core *cpl; 4726 uint32_t ctrl; /* used in many unrelated places */ 4727 uint64_t ctrl1; 4728 int len16, ndesc, pktlen, nsegs; 4729 caddr_t dst; 4730 4731 TXQ_LOCK_ASSERT_OWNED(txq); 4732 M_ASSERTPKTHDR(m0); 4733 4734 len16 = mbuf_len16(m0); 4735 nsegs = mbuf_nsegs(m0); 4736 pktlen = m0->m_pkthdr.len; 4737 ctrl = sizeof(struct cpl_tx_pkt_core); 4738 if (needs_tso(m0)) 4739 ctrl += sizeof(struct cpl_tx_pkt_lso_core); 4740 ndesc = tx_len16_to_desc(len16); 4741 4742 /* Firmware work request header */ 4743 eq = &txq->eq; 4744 wr = (void *)&eq->desc[eq->pidx]; 4745 wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_PKT_VM_WR) | 4746 V_FW_ETH_TX_PKT_WR_IMMDLEN(ctrl)); 4747 4748 ctrl = V_FW_WR_LEN16(len16); 4749 wr->equiq_to_len16 = htobe32(ctrl); 4750 wr->r3[0] = 0; 4751 wr->r3[1] = 0; 4752 4753 /* 4754 * Copy over ethmacdst, ethmacsrc, ethtype, and vlantci. 4755 * vlantci is ignored unless the ethtype is 0x8100, so it's 4756 * simpler to always copy it rather than making it 4757 * conditional. Also, it seems that we do not have to set 4758 * vlantci or fake the ethtype when doing VLAN tag insertion. 4759 */ 4760 m_copydata(m0, 0, VM_TX_L2HDR_LEN, wr->ethmacdst); 4761 4762 if (needs_tso(m0)) { 4763 struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1); 4764 4765 KASSERT(m0->m_pkthdr.l2hlen > 0 && m0->m_pkthdr.l3hlen > 0 && 4766 m0->m_pkthdr.l4hlen > 0, 4767 ("%s: mbuf %p needs TSO but missing header lengths", 4768 __func__, m0)); 4769 4770 ctrl = V_LSO_OPCODE(CPL_TX_PKT_LSO) | F_LSO_FIRST_SLICE | 4771 F_LSO_LAST_SLICE | V_LSO_ETHHDR_LEN((m0->m_pkthdr.l2hlen - 4772 ETHER_HDR_LEN) >> 2) | 4773 V_LSO_IPHDR_LEN(m0->m_pkthdr.l3hlen >> 2) | 4774 V_LSO_TCPHDR_LEN(m0->m_pkthdr.l4hlen >> 2); 4775 if (m0->m_pkthdr.l3hlen == sizeof(struct ip6_hdr)) 4776 ctrl |= F_LSO_IPV6; 4777 4778 lso->lso_ctrl = htobe32(ctrl); 4779 lso->ipid_ofst = htobe16(0); 4780 lso->mss = htobe16(m0->m_pkthdr.tso_segsz); 4781 lso->seqno_offset = htobe32(0); 4782 lso->len = htobe32(pktlen); 4783 4784 cpl = (void *)(lso + 1); 4785 4786 txq->tso_wrs++; 4787 } else 4788 cpl = (void *)(wr + 1); 4789 4790 /* Checksum offload */ 4791 ctrl1 = csum_to_ctrl(sc, m0); 4792 if (ctrl1 != (F_TXPKT_IPCSUM_DIS | F_TXPKT_L4CSUM_DIS)) 4793 txq->txcsum++; /* some hardware assistance provided */ 4794 4795 /* VLAN tag insertion */ 4796 if (needs_vlan_insertion(m0)) { 4797 ctrl1 |= F_TXPKT_VLAN_VLD | 4798 V_TXPKT_VLAN(m0->m_pkthdr.ether_vtag); 4799 txq->vlan_insertion++; 4800 } 4801 4802 /* CPL header */ 4803 cpl->ctrl0 = txq->cpl_ctrl0; 4804 cpl->pack = 0; 4805 cpl->len = htobe16(pktlen); 4806 cpl->ctrl1 = htobe64(ctrl1); 4807 4808 /* SGL */ 4809 dst = (void *)(cpl + 1); 4810 4811 /* 4812 * A packet using TSO will use up an entire descriptor for the 4813 * firmware work request header, LSO CPL, and TX_PKT_XT CPL. 4814 * If this descriptor is the last descriptor in the ring, wrap 4815 * around to the front of the ring explicitly for the start of 4816 * the sgl. 4817 */ 4818 if (dst == (void *)&eq->desc[eq->sidx]) { 4819 dst = (void *)&eq->desc[0]; 4820 write_gl_to_txd(txq, m0, &dst, 0); 4821 } else 4822 write_gl_to_txd(txq, m0, &dst, eq->sidx - ndesc < eq->pidx); 4823 txq->sgl_wrs++; 4824 txq->txpkt_wrs++; 4825 4826 txsd = &txq->sdesc[eq->pidx]; 4827 txsd->m = m0; 4828 txsd->desc_used = ndesc; 4829 4830 return (ndesc); 4831 } 4832 4833 /* 4834 * Write a raw WR to the hardware descriptors, update the software 4835 * descriptor, and advance the pidx. It is guaranteed that enough 4836 * descriptors are available. 4837 * 4838 * The return value is the # of hardware descriptors used. 4839 */ 4840 static u_int 4841 write_raw_wr(struct sge_txq *txq, void *wr, struct mbuf *m0, u_int available) 4842 { 4843 struct sge_eq *eq = &txq->eq; 4844 struct tx_sdesc *txsd; 4845 struct mbuf *m; 4846 caddr_t dst; 4847 int len16, ndesc; 4848 4849 len16 = mbuf_len16(m0); 4850 ndesc = tx_len16_to_desc(len16); 4851 MPASS(ndesc <= available); 4852 4853 dst = wr; 4854 for (m = m0; m != NULL; m = m->m_next) 4855 copy_to_txd(eq, mtod(m, caddr_t), &dst, m->m_len); 4856 4857 txq->raw_wrs++; 4858 4859 txsd = &txq->sdesc[eq->pidx]; 4860 txsd->m = m0; 4861 txsd->desc_used = ndesc; 4862 4863 return (ndesc); 4864 } 4865 4866 /* 4867 * Write a txpkt WR for this packet to the hardware descriptors, update the 4868 * software descriptor, and advance the pidx. It is guaranteed that enough 4869 * descriptors are available. 4870 * 4871 * The return value is the # of hardware descriptors used. 4872 */ 4873 static u_int 4874 write_txpkt_wr(struct adapter *sc, struct sge_txq *txq, struct mbuf *m0, 4875 u_int available) 4876 { 4877 struct sge_eq *eq; 4878 struct fw_eth_tx_pkt_wr *wr; 4879 struct tx_sdesc *txsd; 4880 struct cpl_tx_pkt_core *cpl; 4881 uint32_t ctrl; /* used in many unrelated places */ 4882 uint64_t ctrl1; 4883 int len16, ndesc, pktlen, nsegs; 4884 caddr_t dst; 4885 4886 TXQ_LOCK_ASSERT_OWNED(txq); 4887 M_ASSERTPKTHDR(m0); 4888 4889 len16 = mbuf_len16(m0); 4890 nsegs = mbuf_nsegs(m0); 4891 pktlen = m0->m_pkthdr.len; 4892 ctrl = sizeof(struct cpl_tx_pkt_core); 4893 if (needs_tso(m0)) 4894 ctrl += sizeof(struct cpl_tx_pkt_lso_core); 4895 else if (!(mbuf_cflags(m0) & MC_NOMAP) && pktlen <= imm_payload(2) && 4896 available >= 2) { 4897 /* Immediate data. Recalculate len16 and set nsegs to 0. */ 4898 ctrl += pktlen; 4899 len16 = howmany(sizeof(struct fw_eth_tx_pkt_wr) + 4900 sizeof(struct cpl_tx_pkt_core) + pktlen, 16); 4901 nsegs = 0; 4902 } 4903 ndesc = tx_len16_to_desc(len16); 4904 MPASS(ndesc <= available); 4905 4906 /* Firmware work request header */ 4907 eq = &txq->eq; 4908 wr = (void *)&eq->desc[eq->pidx]; 4909 wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_PKT_WR) | 4910 V_FW_ETH_TX_PKT_WR_IMMDLEN(ctrl)); 4911 4912 ctrl = V_FW_WR_LEN16(len16); 4913 wr->equiq_to_len16 = htobe32(ctrl); 4914 wr->r3 = 0; 4915 4916 if (needs_tso(m0)) { 4917 struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1); 4918 4919 KASSERT(m0->m_pkthdr.l2hlen > 0 && m0->m_pkthdr.l3hlen > 0 && 4920 m0->m_pkthdr.l4hlen > 0, 4921 ("%s: mbuf %p needs TSO but missing header lengths", 4922 __func__, m0)); 4923 4924 ctrl = V_LSO_OPCODE(CPL_TX_PKT_LSO) | F_LSO_FIRST_SLICE | 4925 F_LSO_LAST_SLICE | V_LSO_ETHHDR_LEN((m0->m_pkthdr.l2hlen - 4926 ETHER_HDR_LEN) >> 2) | 4927 V_LSO_IPHDR_LEN(m0->m_pkthdr.l3hlen >> 2) | 4928 V_LSO_TCPHDR_LEN(m0->m_pkthdr.l4hlen >> 2); 4929 if (m0->m_pkthdr.l3hlen == sizeof(struct ip6_hdr)) 4930 ctrl |= F_LSO_IPV6; 4931 4932 lso->lso_ctrl = htobe32(ctrl); 4933 lso->ipid_ofst = htobe16(0); 4934 lso->mss = htobe16(m0->m_pkthdr.tso_segsz); 4935 lso->seqno_offset = htobe32(0); 4936 lso->len = htobe32(pktlen); 4937 4938 cpl = (void *)(lso + 1); 4939 4940 txq->tso_wrs++; 4941 } else 4942 cpl = (void *)(wr + 1); 4943 4944 /* Checksum offload */ 4945 ctrl1 = csum_to_ctrl(sc, m0); 4946 if (ctrl1 != (F_TXPKT_IPCSUM_DIS | F_TXPKT_L4CSUM_DIS)) 4947 txq->txcsum++; /* some hardware assistance provided */ 4948 4949 /* VLAN tag insertion */ 4950 if (needs_vlan_insertion(m0)) { 4951 ctrl1 |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(m0->m_pkthdr.ether_vtag); 4952 txq->vlan_insertion++; 4953 } 4954 4955 /* CPL header */ 4956 cpl->ctrl0 = txq->cpl_ctrl0; 4957 cpl->pack = 0; 4958 cpl->len = htobe16(pktlen); 4959 cpl->ctrl1 = htobe64(ctrl1); 4960 4961 /* SGL */ 4962 dst = (void *)(cpl + 1); 4963 if (nsegs > 0) { 4964 4965 write_gl_to_txd(txq, m0, &dst, eq->sidx - ndesc < eq->pidx); 4966 txq->sgl_wrs++; 4967 } else { 4968 struct mbuf *m; 4969 4970 for (m = m0; m != NULL; m = m->m_next) { 4971 copy_to_txd(eq, mtod(m, caddr_t), &dst, m->m_len); 4972 #ifdef INVARIANTS 4973 pktlen -= m->m_len; 4974 #endif 4975 } 4976 #ifdef INVARIANTS 4977 KASSERT(pktlen == 0, ("%s: %d bytes left.", __func__, pktlen)); 4978 #endif 4979 txq->imm_wrs++; 4980 } 4981 4982 txq->txpkt_wrs++; 4983 4984 txsd = &txq->sdesc[eq->pidx]; 4985 txsd->m = m0; 4986 txsd->desc_used = ndesc; 4987 4988 return (ndesc); 4989 } 4990 4991 static inline bool 4992 cmp_l2hdr(struct txpkts *txp, struct mbuf *m) 4993 { 4994 int len; 4995 4996 MPASS(txp->npkt > 0); 4997 MPASS(m->m_len >= VM_TX_L2HDR_LEN); 4998 4999 if (txp->ethtype == be16toh(ETHERTYPE_VLAN)) 5000 len = VM_TX_L2HDR_LEN; 5001 else 5002 len = sizeof(struct ether_header); 5003 5004 return (memcmp(m->m_data, &txp->ethmacdst[0], len) != 0); 5005 } 5006 5007 static inline void 5008 save_l2hdr(struct txpkts *txp, struct mbuf *m) 5009 { 5010 MPASS(m->m_len >= VM_TX_L2HDR_LEN); 5011 5012 memcpy(&txp->ethmacdst[0], mtod(m, const void *), VM_TX_L2HDR_LEN); 5013 } 5014 5015 static int 5016 add_to_txpkts_vf(struct adapter *sc, struct sge_txq *txq, struct mbuf *m, 5017 int avail, bool *send) 5018 { 5019 struct txpkts *txp = &txq->txp; 5020 5021 MPASS(sc->flags & IS_VF); 5022 5023 /* Cannot have TSO and coalesce at the same time. */ 5024 if (cannot_use_txpkts(m)) { 5025 cannot_coalesce: 5026 *send = txp->npkt > 0; 5027 return (EINVAL); 5028 } 5029 5030 /* VF allows coalescing of type 1 (1 GL) only */ 5031 if (mbuf_nsegs(m) > 1) 5032 goto cannot_coalesce; 5033 5034 *send = false; 5035 if (txp->npkt > 0) { 5036 MPASS(tx_len16_to_desc(txp->len16) <= avail); 5037 MPASS(txp->npkt < txp->max_npkt); 5038 MPASS(txp->wr_type == 1); /* VF supports type 1 only */ 5039 5040 if (tx_len16_to_desc(txp->len16 + txpkts1_len16()) > avail) { 5041 retry_after_send: 5042 *send = true; 5043 return (EAGAIN); 5044 } 5045 if (m->m_pkthdr.len + txp->plen > 65535) 5046 goto retry_after_send; 5047 if (cmp_l2hdr(txp, m)) 5048 goto retry_after_send; 5049 5050 txp->len16 += txpkts1_len16(); 5051 txp->plen += m->m_pkthdr.len; 5052 txp->mb[txp->npkt++] = m; 5053 if (txp->npkt == txp->max_npkt) 5054 *send = true; 5055 } else { 5056 txp->len16 = howmany(sizeof(struct fw_eth_tx_pkts_vm_wr), 16) + 5057 txpkts1_len16(); 5058 if (tx_len16_to_desc(txp->len16) > avail) 5059 goto cannot_coalesce; 5060 txp->npkt = 1; 5061 txp->wr_type = 1; 5062 txp->plen = m->m_pkthdr.len; 5063 txp->mb[0] = m; 5064 save_l2hdr(txp, m); 5065 } 5066 return (0); 5067 } 5068 5069 static int 5070 add_to_txpkts_pf(struct adapter *sc, struct sge_txq *txq, struct mbuf *m, 5071 int avail, bool *send) 5072 { 5073 struct txpkts *txp = &txq->txp; 5074 int nsegs; 5075 5076 MPASS(!(sc->flags & IS_VF)); 5077 5078 /* Cannot have TSO and coalesce at the same time. */ 5079 if (cannot_use_txpkts(m)) { 5080 cannot_coalesce: 5081 *send = txp->npkt > 0; 5082 return (EINVAL); 5083 } 5084 5085 *send = false; 5086 nsegs = mbuf_nsegs(m); 5087 if (txp->npkt == 0) { 5088 if (m->m_pkthdr.len > 65535) 5089 goto cannot_coalesce; 5090 if (nsegs > 1) { 5091 txp->wr_type = 0; 5092 txp->len16 = 5093 howmany(sizeof(struct fw_eth_tx_pkts_wr), 16) + 5094 txpkts0_len16(nsegs); 5095 } else { 5096 txp->wr_type = 1; 5097 txp->len16 = 5098 howmany(sizeof(struct fw_eth_tx_pkts_wr), 16) + 5099 txpkts1_len16(); 5100 } 5101 if (tx_len16_to_desc(txp->len16) > avail) 5102 goto cannot_coalesce; 5103 txp->npkt = 1; 5104 txp->plen = m->m_pkthdr.len; 5105 txp->mb[0] = m; 5106 } else { 5107 MPASS(tx_len16_to_desc(txp->len16) <= avail); 5108 MPASS(txp->npkt < txp->max_npkt); 5109 5110 if (m->m_pkthdr.len + txp->plen > 65535) { 5111 retry_after_send: 5112 *send = true; 5113 return (EAGAIN); 5114 } 5115 5116 MPASS(txp->wr_type == 0 || txp->wr_type == 1); 5117 if (txp->wr_type == 0) { 5118 if (tx_len16_to_desc(txp->len16 + 5119 txpkts0_len16(nsegs)) > min(avail, SGE_MAX_WR_NDESC)) 5120 goto retry_after_send; 5121 txp->len16 += txpkts0_len16(nsegs); 5122 } else { 5123 if (nsegs != 1) 5124 goto retry_after_send; 5125 if (tx_len16_to_desc(txp->len16 + txpkts1_len16()) > 5126 avail) 5127 goto retry_after_send; 5128 txp->len16 += txpkts1_len16(); 5129 } 5130 5131 txp->plen += m->m_pkthdr.len; 5132 txp->mb[txp->npkt++] = m; 5133 if (txp->npkt == txp->max_npkt) 5134 *send = true; 5135 } 5136 return (0); 5137 } 5138 5139 /* 5140 * Write a txpkts WR for the packets in txp to the hardware descriptors, update 5141 * the software descriptor, and advance the pidx. It is guaranteed that enough 5142 * descriptors are available. 5143 * 5144 * The return value is the # of hardware descriptors used. 5145 */ 5146 static u_int 5147 write_txpkts_wr(struct adapter *sc, struct sge_txq *txq) 5148 { 5149 const struct txpkts *txp = &txq->txp; 5150 struct sge_eq *eq = &txq->eq; 5151 struct fw_eth_tx_pkts_wr *wr; 5152 struct tx_sdesc *txsd; 5153 struct cpl_tx_pkt_core *cpl; 5154 uint64_t ctrl1; 5155 int ndesc, i, checkwrap; 5156 struct mbuf *m, *last; 5157 void *flitp; 5158 5159 TXQ_LOCK_ASSERT_OWNED(txq); 5160 MPASS(txp->npkt > 0); 5161 MPASS(txp->len16 <= howmany(SGE_MAX_WR_LEN, 16)); 5162 5163 wr = (void *)&eq->desc[eq->pidx]; 5164 wr->op_pkd = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_WR)); 5165 wr->equiq_to_len16 = htobe32(V_FW_WR_LEN16(txp->len16)); 5166 wr->plen = htobe16(txp->plen); 5167 wr->npkt = txp->npkt; 5168 wr->r3 = 0; 5169 wr->type = txp->wr_type; 5170 flitp = wr + 1; 5171 5172 /* 5173 * At this point we are 16B into a hardware descriptor. If checkwrap is 5174 * set then we know the WR is going to wrap around somewhere. We'll 5175 * check for that at appropriate points. 5176 */ 5177 ndesc = tx_len16_to_desc(txp->len16); 5178 last = NULL; 5179 checkwrap = eq->sidx - ndesc < eq->pidx; 5180 for (i = 0; i < txp->npkt; i++) { 5181 m = txp->mb[i]; 5182 if (txp->wr_type == 0) { 5183 struct ulp_txpkt *ulpmc; 5184 struct ulptx_idata *ulpsc; 5185 5186 /* ULP master command */ 5187 ulpmc = flitp; 5188 ulpmc->cmd_dest = htobe32(V_ULPTX_CMD(ULP_TX_PKT) | 5189 V_ULP_TXPKT_DEST(0) | V_ULP_TXPKT_FID(eq->iqid)); 5190 ulpmc->len = htobe32(txpkts0_len16(mbuf_nsegs(m))); 5191 5192 /* ULP subcommand */ 5193 ulpsc = (void *)(ulpmc + 1); 5194 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM) | 5195 F_ULP_TX_SC_MORE); 5196 ulpsc->len = htobe32(sizeof(struct cpl_tx_pkt_core)); 5197 5198 cpl = (void *)(ulpsc + 1); 5199 if (checkwrap && 5200 (uintptr_t)cpl == (uintptr_t)&eq->desc[eq->sidx]) 5201 cpl = (void *)&eq->desc[0]; 5202 } else { 5203 cpl = flitp; 5204 } 5205 5206 /* Checksum offload */ 5207 ctrl1 = csum_to_ctrl(sc, m); 5208 if (ctrl1 != (F_TXPKT_IPCSUM_DIS | F_TXPKT_L4CSUM_DIS)) 5209 txq->txcsum++; /* some hardware assistance provided */ 5210 5211 /* VLAN tag insertion */ 5212 if (needs_vlan_insertion(m)) { 5213 ctrl1 |= F_TXPKT_VLAN_VLD | 5214 V_TXPKT_VLAN(m->m_pkthdr.ether_vtag); 5215 txq->vlan_insertion++; 5216 } 5217 5218 /* CPL header */ 5219 cpl->ctrl0 = txq->cpl_ctrl0; 5220 cpl->pack = 0; 5221 cpl->len = htobe16(m->m_pkthdr.len); 5222 cpl->ctrl1 = htobe64(ctrl1); 5223 5224 flitp = cpl + 1; 5225 if (checkwrap && 5226 (uintptr_t)flitp == (uintptr_t)&eq->desc[eq->sidx]) 5227 flitp = (void *)&eq->desc[0]; 5228 5229 write_gl_to_txd(txq, m, (caddr_t *)(&flitp), checkwrap); 5230 5231 if (last != NULL) 5232 last->m_nextpkt = m; 5233 last = m; 5234 } 5235 5236 txq->sgl_wrs++; 5237 if (txp->wr_type == 0) { 5238 txq->txpkts0_pkts += txp->npkt; 5239 txq->txpkts0_wrs++; 5240 } else { 5241 txq->txpkts1_pkts += txp->npkt; 5242 txq->txpkts1_wrs++; 5243 } 5244 5245 txsd = &txq->sdesc[eq->pidx]; 5246 txsd->m = txp->mb[0]; 5247 txsd->desc_used = ndesc; 5248 5249 return (ndesc); 5250 } 5251 5252 static u_int 5253 write_txpkts_vm_wr(struct adapter *sc, struct sge_txq *txq) 5254 { 5255 const struct txpkts *txp = &txq->txp; 5256 struct sge_eq *eq = &txq->eq; 5257 struct fw_eth_tx_pkts_vm_wr *wr; 5258 struct tx_sdesc *txsd; 5259 struct cpl_tx_pkt_core *cpl; 5260 uint64_t ctrl1; 5261 int ndesc, i; 5262 struct mbuf *m, *last; 5263 void *flitp; 5264 5265 TXQ_LOCK_ASSERT_OWNED(txq); 5266 MPASS(txp->npkt > 0); 5267 MPASS(txp->wr_type == 1); /* VF supports type 1 only */ 5268 MPASS(txp->mb[0] != NULL); 5269 MPASS(txp->len16 <= howmany(SGE_MAX_WR_LEN, 16)); 5270 5271 wr = (void *)&eq->desc[eq->pidx]; 5272 wr->op_pkd = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_VM_WR)); 5273 wr->equiq_to_len16 = htobe32(V_FW_WR_LEN16(txp->len16)); 5274 wr->r3 = 0; 5275 wr->plen = htobe16(txp->plen); 5276 wr->npkt = txp->npkt; 5277 wr->r4 = 0; 5278 memcpy(&wr->ethmacdst[0], &txp->ethmacdst[0], 16); 5279 flitp = wr + 1; 5280 5281 /* 5282 * At this point we are 32B into a hardware descriptor. Each mbuf in 5283 * the WR will take 32B so we check for the end of the descriptor ring 5284 * before writing odd mbufs (mb[1], 3, 5, ..) 5285 */ 5286 ndesc = tx_len16_to_desc(txp->len16); 5287 last = NULL; 5288 for (i = 0; i < txp->npkt; i++) { 5289 m = txp->mb[i]; 5290 if (i & 1 && (uintptr_t)flitp == (uintptr_t)&eq->desc[eq->sidx]) 5291 flitp = &eq->desc[0]; 5292 cpl = flitp; 5293 5294 /* Checksum offload */ 5295 ctrl1 = csum_to_ctrl(sc, m); 5296 if (ctrl1 != (F_TXPKT_IPCSUM_DIS | F_TXPKT_L4CSUM_DIS)) 5297 txq->txcsum++; /* some hardware assistance provided */ 5298 5299 /* VLAN tag insertion */ 5300 if (needs_vlan_insertion(m)) { 5301 ctrl1 |= F_TXPKT_VLAN_VLD | 5302 V_TXPKT_VLAN(m->m_pkthdr.ether_vtag); 5303 txq->vlan_insertion++; 5304 } 5305 5306 /* CPL header */ 5307 cpl->ctrl0 = txq->cpl_ctrl0; 5308 cpl->pack = 0; 5309 cpl->len = htobe16(m->m_pkthdr.len); 5310 cpl->ctrl1 = htobe64(ctrl1); 5311 5312 flitp = cpl + 1; 5313 MPASS(mbuf_nsegs(m) == 1); 5314 write_gl_to_txd(txq, m, (caddr_t *)(&flitp), 0); 5315 5316 if (last != NULL) 5317 last->m_nextpkt = m; 5318 last = m; 5319 } 5320 5321 txq->sgl_wrs++; 5322 txq->txpkts1_pkts += txp->npkt; 5323 txq->txpkts1_wrs++; 5324 5325 txsd = &txq->sdesc[eq->pidx]; 5326 txsd->m = txp->mb[0]; 5327 txsd->desc_used = ndesc; 5328 5329 return (ndesc); 5330 } 5331 5332 /* 5333 * If the SGL ends on an address that is not 16 byte aligned, this function will 5334 * add a 0 filled flit at the end. 5335 */ 5336 static void 5337 write_gl_to_txd(struct sge_txq *txq, struct mbuf *m, caddr_t *to, int checkwrap) 5338 { 5339 struct sge_eq *eq = &txq->eq; 5340 struct sglist *gl = txq->gl; 5341 struct sglist_seg *seg; 5342 __be64 *flitp, *wrap; 5343 struct ulptx_sgl *usgl; 5344 int i, nflits, nsegs; 5345 5346 KASSERT(((uintptr_t)(*to) & 0xf) == 0, 5347 ("%s: SGL must start at a 16 byte boundary: %p", __func__, *to)); 5348 MPASS((uintptr_t)(*to) >= (uintptr_t)&eq->desc[0]); 5349 MPASS((uintptr_t)(*to) < (uintptr_t)&eq->desc[eq->sidx]); 5350 5351 get_pkt_gl(m, gl); 5352 nsegs = gl->sg_nseg; 5353 MPASS(nsegs > 0); 5354 5355 nflits = (3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1) + 2; 5356 flitp = (__be64 *)(*to); 5357 wrap = (__be64 *)(&eq->desc[eq->sidx]); 5358 seg = &gl->sg_segs[0]; 5359 usgl = (void *)flitp; 5360 5361 /* 5362 * We start at a 16 byte boundary somewhere inside the tx descriptor 5363 * ring, so we're at least 16 bytes away from the status page. There is 5364 * no chance of a wrap around in the middle of usgl (which is 16 bytes). 5365 */ 5366 5367 usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) | 5368 V_ULPTX_NSGE(nsegs)); 5369 usgl->len0 = htobe32(seg->ss_len); 5370 usgl->addr0 = htobe64(seg->ss_paddr); 5371 seg++; 5372 5373 if (checkwrap == 0 || (uintptr_t)(flitp + nflits) <= (uintptr_t)wrap) { 5374 5375 /* Won't wrap around at all */ 5376 5377 for (i = 0; i < nsegs - 1; i++, seg++) { 5378 usgl->sge[i / 2].len[i & 1] = htobe32(seg->ss_len); 5379 usgl->sge[i / 2].addr[i & 1] = htobe64(seg->ss_paddr); 5380 } 5381 if (i & 1) 5382 usgl->sge[i / 2].len[1] = htobe32(0); 5383 flitp += nflits; 5384 } else { 5385 5386 /* Will wrap somewhere in the rest of the SGL */ 5387 5388 /* 2 flits already written, write the rest flit by flit */ 5389 flitp = (void *)(usgl + 1); 5390 for (i = 0; i < nflits - 2; i++) { 5391 if (flitp == wrap) 5392 flitp = (void *)eq->desc; 5393 *flitp++ = get_flit(seg, nsegs - 1, i); 5394 } 5395 } 5396 5397 if (nflits & 1) { 5398 MPASS(((uintptr_t)flitp) & 0xf); 5399 *flitp++ = 0; 5400 } 5401 5402 MPASS((((uintptr_t)flitp) & 0xf) == 0); 5403 if (__predict_false(flitp == wrap)) 5404 *to = (void *)eq->desc; 5405 else 5406 *to = (void *)flitp; 5407 } 5408 5409 static inline void 5410 copy_to_txd(struct sge_eq *eq, caddr_t from, caddr_t *to, int len) 5411 { 5412 5413 MPASS((uintptr_t)(*to) >= (uintptr_t)&eq->desc[0]); 5414 MPASS((uintptr_t)(*to) < (uintptr_t)&eq->desc[eq->sidx]); 5415 5416 if (__predict_true((uintptr_t)(*to) + len <= 5417 (uintptr_t)&eq->desc[eq->sidx])) { 5418 bcopy(from, *to, len); 5419 (*to) += len; 5420 } else { 5421 int portion = (uintptr_t)&eq->desc[eq->sidx] - (uintptr_t)(*to); 5422 5423 bcopy(from, *to, portion); 5424 from += portion; 5425 portion = len - portion; /* remaining */ 5426 bcopy(from, (void *)eq->desc, portion); 5427 (*to) = (caddr_t)eq->desc + portion; 5428 } 5429 } 5430 5431 static inline void 5432 ring_eq_db(struct adapter *sc, struct sge_eq *eq, u_int n) 5433 { 5434 u_int db; 5435 5436 MPASS(n > 0); 5437 5438 db = eq->doorbells; 5439 if (n > 1) 5440 clrbit(&db, DOORBELL_WCWR); 5441 wmb(); 5442 5443 switch (ffs(db) - 1) { 5444 case DOORBELL_UDB: 5445 *eq->udb = htole32(V_QID(eq->udb_qid) | V_PIDX(n)); 5446 break; 5447 5448 case DOORBELL_WCWR: { 5449 volatile uint64_t *dst, *src; 5450 int i; 5451 5452 /* 5453 * Queues whose 128B doorbell segment fits in the page do not 5454 * use relative qid (udb_qid is always 0). Only queues with 5455 * doorbell segments can do WCWR. 5456 */ 5457 KASSERT(eq->udb_qid == 0 && n == 1, 5458 ("%s: inappropriate doorbell (0x%x, %d, %d) for eq %p", 5459 __func__, eq->doorbells, n, eq->dbidx, eq)); 5460 5461 dst = (volatile void *)((uintptr_t)eq->udb + UDBS_WR_OFFSET - 5462 UDBS_DB_OFFSET); 5463 i = eq->dbidx; 5464 src = (void *)&eq->desc[i]; 5465 while (src != (void *)&eq->desc[i + 1]) 5466 *dst++ = *src++; 5467 wmb(); 5468 break; 5469 } 5470 5471 case DOORBELL_UDBWC: 5472 *eq->udb = htole32(V_QID(eq->udb_qid) | V_PIDX(n)); 5473 wmb(); 5474 break; 5475 5476 case DOORBELL_KDB: 5477 t4_write_reg(sc, sc->sge_kdoorbell_reg, 5478 V_QID(eq->cntxt_id) | V_PIDX(n)); 5479 break; 5480 } 5481 5482 IDXINCR(eq->dbidx, n, eq->sidx); 5483 } 5484 5485 static inline u_int 5486 reclaimable_tx_desc(struct sge_eq *eq) 5487 { 5488 uint16_t hw_cidx; 5489 5490 hw_cidx = read_hw_cidx(eq); 5491 return (IDXDIFF(hw_cidx, eq->cidx, eq->sidx)); 5492 } 5493 5494 static inline u_int 5495 total_available_tx_desc(struct sge_eq *eq) 5496 { 5497 uint16_t hw_cidx, pidx; 5498 5499 hw_cidx = read_hw_cidx(eq); 5500 pidx = eq->pidx; 5501 5502 if (pidx == hw_cidx) 5503 return (eq->sidx - 1); 5504 else 5505 return (IDXDIFF(hw_cidx, pidx, eq->sidx) - 1); 5506 } 5507 5508 static inline uint16_t 5509 read_hw_cidx(struct sge_eq *eq) 5510 { 5511 struct sge_qstat *spg = (void *)&eq->desc[eq->sidx]; 5512 uint16_t cidx = spg->cidx; /* stable snapshot */ 5513 5514 return (be16toh(cidx)); 5515 } 5516 5517 /* 5518 * Reclaim 'n' descriptors approximately. 5519 */ 5520 static u_int 5521 reclaim_tx_descs(struct sge_txq *txq, u_int n) 5522 { 5523 struct tx_sdesc *txsd; 5524 struct sge_eq *eq = &txq->eq; 5525 u_int can_reclaim, reclaimed; 5526 5527 TXQ_LOCK_ASSERT_OWNED(txq); 5528 MPASS(n > 0); 5529 5530 reclaimed = 0; 5531 can_reclaim = reclaimable_tx_desc(eq); 5532 while (can_reclaim && reclaimed < n) { 5533 int ndesc; 5534 struct mbuf *m, *nextpkt; 5535 5536 txsd = &txq->sdesc[eq->cidx]; 5537 ndesc = txsd->desc_used; 5538 5539 /* Firmware doesn't return "partial" credits. */ 5540 KASSERT(can_reclaim >= ndesc, 5541 ("%s: unexpected number of credits: %d, %d", 5542 __func__, can_reclaim, ndesc)); 5543 KASSERT(ndesc != 0, 5544 ("%s: descriptor with no credits: cidx %d", 5545 __func__, eq->cidx)); 5546 5547 for (m = txsd->m; m != NULL; m = nextpkt) { 5548 nextpkt = m->m_nextpkt; 5549 m->m_nextpkt = NULL; 5550 m_freem(m); 5551 } 5552 reclaimed += ndesc; 5553 can_reclaim -= ndesc; 5554 IDXINCR(eq->cidx, ndesc, eq->sidx); 5555 } 5556 5557 return (reclaimed); 5558 } 5559 5560 static void 5561 tx_reclaim(void *arg, int n) 5562 { 5563 struct sge_txq *txq = arg; 5564 struct sge_eq *eq = &txq->eq; 5565 5566 do { 5567 if (TXQ_TRYLOCK(txq) == 0) 5568 break; 5569 n = reclaim_tx_descs(txq, 32); 5570 if (eq->cidx == eq->pidx) 5571 eq->equeqidx = eq->pidx; 5572 TXQ_UNLOCK(txq); 5573 } while (n > 0); 5574 } 5575 5576 static __be64 5577 get_flit(struct sglist_seg *segs, int nsegs, int idx) 5578 { 5579 int i = (idx / 3) * 2; 5580 5581 switch (idx % 3) { 5582 case 0: { 5583 uint64_t rc; 5584 5585 rc = (uint64_t)segs[i].ss_len << 32; 5586 if (i + 1 < nsegs) 5587 rc |= (uint64_t)(segs[i + 1].ss_len); 5588 5589 return (htobe64(rc)); 5590 } 5591 case 1: 5592 return (htobe64(segs[i].ss_paddr)); 5593 case 2: 5594 return (htobe64(segs[i + 1].ss_paddr)); 5595 } 5596 5597 return (0); 5598 } 5599 5600 static int 5601 find_refill_source(struct adapter *sc, int maxp, bool packing) 5602 { 5603 int i, zidx = -1; 5604 struct rx_buf_info *rxb = &sc->sge.rx_buf_info[0]; 5605 5606 if (packing) { 5607 for (i = 0; i < SW_ZONE_SIZES; i++, rxb++) { 5608 if (rxb->hwidx2 == -1) 5609 continue; 5610 if (rxb->size1 < PAGE_SIZE && 5611 rxb->size1 < largest_rx_cluster) 5612 continue; 5613 if (rxb->size1 > largest_rx_cluster) 5614 break; 5615 MPASS(rxb->size1 - rxb->size2 >= CL_METADATA_SIZE); 5616 if (rxb->size2 >= maxp) 5617 return (i); 5618 zidx = i; 5619 } 5620 } else { 5621 for (i = 0; i < SW_ZONE_SIZES; i++, rxb++) { 5622 if (rxb->hwidx1 == -1) 5623 continue; 5624 if (rxb->size1 > largest_rx_cluster) 5625 break; 5626 if (rxb->size1 >= maxp) 5627 return (i); 5628 zidx = i; 5629 } 5630 } 5631 5632 return (zidx); 5633 } 5634 5635 static void 5636 add_fl_to_sfl(struct adapter *sc, struct sge_fl *fl) 5637 { 5638 mtx_lock(&sc->sfl_lock); 5639 FL_LOCK(fl); 5640 if ((fl->flags & FL_DOOMED) == 0) { 5641 fl->flags |= FL_STARVING; 5642 TAILQ_INSERT_TAIL(&sc->sfl, fl, link); 5643 callout_reset(&sc->sfl_callout, hz / 5, refill_sfl, sc); 5644 } 5645 FL_UNLOCK(fl); 5646 mtx_unlock(&sc->sfl_lock); 5647 } 5648 5649 static void 5650 handle_wrq_egr_update(struct adapter *sc, struct sge_eq *eq) 5651 { 5652 struct sge_wrq *wrq = (void *)eq; 5653 5654 atomic_readandclear_int(&eq->equiq); 5655 taskqueue_enqueue(sc->tq[eq->tx_chan], &wrq->wrq_tx_task); 5656 } 5657 5658 static void 5659 handle_eth_egr_update(struct adapter *sc, struct sge_eq *eq) 5660 { 5661 struct sge_txq *txq = (void *)eq; 5662 5663 MPASS((eq->flags & EQ_TYPEMASK) == EQ_ETH); 5664 5665 atomic_readandclear_int(&eq->equiq); 5666 if (mp_ring_is_idle(txq->r)) 5667 taskqueue_enqueue(sc->tq[eq->tx_chan], &txq->tx_reclaim_task); 5668 else 5669 mp_ring_check_drainage(txq->r, 64); 5670 } 5671 5672 static int 5673 handle_sge_egr_update(struct sge_iq *iq, const struct rss_header *rss, 5674 struct mbuf *m) 5675 { 5676 const struct cpl_sge_egr_update *cpl = (const void *)(rss + 1); 5677 unsigned int qid = G_EGR_QID(ntohl(cpl->opcode_qid)); 5678 struct adapter *sc = iq->adapter; 5679 struct sge *s = &sc->sge; 5680 struct sge_eq *eq; 5681 static void (*h[])(struct adapter *, struct sge_eq *) = {NULL, 5682 &handle_wrq_egr_update, &handle_eth_egr_update, 5683 &handle_wrq_egr_update}; 5684 5685 KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__, 5686 rss->opcode)); 5687 5688 eq = s->eqmap[qid - s->eq_start - s->eq_base]; 5689 (*h[eq->flags & EQ_TYPEMASK])(sc, eq); 5690 5691 return (0); 5692 } 5693 5694 /* handle_fw_msg works for both fw4_msg and fw6_msg because this is valid */ 5695 CTASSERT(offsetof(struct cpl_fw4_msg, data) == \ 5696 offsetof(struct cpl_fw6_msg, data)); 5697 5698 static int 5699 handle_fw_msg(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 5700 { 5701 struct adapter *sc = iq->adapter; 5702 const struct cpl_fw6_msg *cpl = (const void *)(rss + 1); 5703 5704 KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__, 5705 rss->opcode)); 5706 5707 if (cpl->type == FW_TYPE_RSSCPL || cpl->type == FW6_TYPE_RSSCPL) { 5708 const struct rss_header *rss2; 5709 5710 rss2 = (const struct rss_header *)&cpl->data[0]; 5711 return (t4_cpl_handler[rss2->opcode](iq, rss2, m)); 5712 } 5713 5714 return (t4_fw_msg_handler[cpl->type](sc, &cpl->data[0])); 5715 } 5716 5717 /** 5718 * t4_handle_wrerr_rpl - process a FW work request error message 5719 * @adap: the adapter 5720 * @rpl: start of the FW message 5721 */ 5722 static int 5723 t4_handle_wrerr_rpl(struct adapter *adap, const __be64 *rpl) 5724 { 5725 u8 opcode = *(const u8 *)rpl; 5726 const struct fw_error_cmd *e = (const void *)rpl; 5727 unsigned int i; 5728 5729 if (opcode != FW_ERROR_CMD) { 5730 log(LOG_ERR, 5731 "%s: Received WRERR_RPL message with opcode %#x\n", 5732 device_get_nameunit(adap->dev), opcode); 5733 return (EINVAL); 5734 } 5735 log(LOG_ERR, "%s: FW_ERROR (%s) ", device_get_nameunit(adap->dev), 5736 G_FW_ERROR_CMD_FATAL(be32toh(e->op_to_type)) ? "fatal" : 5737 "non-fatal"); 5738 switch (G_FW_ERROR_CMD_TYPE(be32toh(e->op_to_type))) { 5739 case FW_ERROR_TYPE_EXCEPTION: 5740 log(LOG_ERR, "exception info:\n"); 5741 for (i = 0; i < nitems(e->u.exception.info); i++) 5742 log(LOG_ERR, "%s%08x", i == 0 ? "\t" : " ", 5743 be32toh(e->u.exception.info[i])); 5744 log(LOG_ERR, "\n"); 5745 break; 5746 case FW_ERROR_TYPE_HWMODULE: 5747 log(LOG_ERR, "HW module regaddr %08x regval %08x\n", 5748 be32toh(e->u.hwmodule.regaddr), 5749 be32toh(e->u.hwmodule.regval)); 5750 break; 5751 case FW_ERROR_TYPE_WR: 5752 log(LOG_ERR, "WR cidx %d PF %d VF %d eqid %d hdr:\n", 5753 be16toh(e->u.wr.cidx), 5754 G_FW_ERROR_CMD_PFN(be16toh(e->u.wr.pfn_vfn)), 5755 G_FW_ERROR_CMD_VFN(be16toh(e->u.wr.pfn_vfn)), 5756 be32toh(e->u.wr.eqid)); 5757 for (i = 0; i < nitems(e->u.wr.wrhdr); i++) 5758 log(LOG_ERR, "%s%02x", i == 0 ? "\t" : " ", 5759 e->u.wr.wrhdr[i]); 5760 log(LOG_ERR, "\n"); 5761 break; 5762 case FW_ERROR_TYPE_ACL: 5763 log(LOG_ERR, "ACL cidx %d PF %d VF %d eqid %d %s", 5764 be16toh(e->u.acl.cidx), 5765 G_FW_ERROR_CMD_PFN(be16toh(e->u.acl.pfn_vfn)), 5766 G_FW_ERROR_CMD_VFN(be16toh(e->u.acl.pfn_vfn)), 5767 be32toh(e->u.acl.eqid), 5768 G_FW_ERROR_CMD_MV(be16toh(e->u.acl.mv_pkd)) ? "vlanid" : 5769 "MAC"); 5770 for (i = 0; i < nitems(e->u.acl.val); i++) 5771 log(LOG_ERR, " %02x", e->u.acl.val[i]); 5772 log(LOG_ERR, "\n"); 5773 break; 5774 default: 5775 log(LOG_ERR, "type %#x\n", 5776 G_FW_ERROR_CMD_TYPE(be32toh(e->op_to_type))); 5777 return (EINVAL); 5778 } 5779 return (0); 5780 } 5781 5782 static int 5783 sysctl_uint16(SYSCTL_HANDLER_ARGS) 5784 { 5785 uint16_t *id = arg1; 5786 int i = *id; 5787 5788 return sysctl_handle_int(oidp, &i, 0, req); 5789 } 5790 5791 static inline bool 5792 bufidx_used(struct adapter *sc, int idx) 5793 { 5794 struct rx_buf_info *rxb = &sc->sge.rx_buf_info[0]; 5795 int i; 5796 5797 for (i = 0; i < SW_ZONE_SIZES; i++, rxb++) { 5798 if (rxb->size1 > largest_rx_cluster) 5799 continue; 5800 if (rxb->hwidx1 == idx || rxb->hwidx2 == idx) 5801 return (true); 5802 } 5803 5804 return (false); 5805 } 5806 5807 static int 5808 sysctl_bufsizes(SYSCTL_HANDLER_ARGS) 5809 { 5810 struct adapter *sc = arg1; 5811 struct sge_params *sp = &sc->params.sge; 5812 int i, rc; 5813 struct sbuf sb; 5814 char c; 5815 5816 sbuf_new(&sb, NULL, 128, SBUF_AUTOEXTEND); 5817 for (i = 0; i < SGE_FLBUF_SIZES; i++) { 5818 if (bufidx_used(sc, i)) 5819 c = '*'; 5820 else 5821 c = '\0'; 5822 5823 sbuf_printf(&sb, "%u%c ", sp->sge_fl_buffer_size[i], c); 5824 } 5825 sbuf_trim(&sb); 5826 sbuf_finish(&sb); 5827 rc = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req); 5828 sbuf_delete(&sb); 5829 return (rc); 5830 } 5831 5832 #ifdef RATELIMIT 5833 /* 5834 * len16 for a txpkt WR with a GL. Includes the firmware work request header. 5835 */ 5836 static inline u_int 5837 txpkt_eo_len16(u_int nsegs, u_int immhdrs, u_int tso) 5838 { 5839 u_int n; 5840 5841 MPASS(immhdrs > 0); 5842 5843 n = roundup2(sizeof(struct fw_eth_tx_eo_wr) + 5844 sizeof(struct cpl_tx_pkt_core) + immhdrs, 16); 5845 if (__predict_false(nsegs == 0)) 5846 goto done; 5847 5848 nsegs--; /* first segment is part of ulptx_sgl */ 5849 n += sizeof(struct ulptx_sgl) + 8 * ((3 * nsegs) / 2 + (nsegs & 1)); 5850 if (tso) 5851 n += sizeof(struct cpl_tx_pkt_lso_core); 5852 5853 done: 5854 return (howmany(n, 16)); 5855 } 5856 5857 #define ETID_FLOWC_NPARAMS 6 5858 #define ETID_FLOWC_LEN (roundup2((sizeof(struct fw_flowc_wr) + \ 5859 ETID_FLOWC_NPARAMS * sizeof(struct fw_flowc_mnemval)), 16)) 5860 #define ETID_FLOWC_LEN16 (howmany(ETID_FLOWC_LEN, 16)) 5861 5862 static int 5863 send_etid_flowc_wr(struct cxgbe_rate_tag *cst, struct port_info *pi, 5864 struct vi_info *vi) 5865 { 5866 struct wrq_cookie cookie; 5867 u_int pfvf = pi->adapter->pf << S_FW_VIID_PFN; 5868 struct fw_flowc_wr *flowc; 5869 5870 mtx_assert(&cst->lock, MA_OWNED); 5871 MPASS((cst->flags & (EO_FLOWC_PENDING | EO_FLOWC_RPL_PENDING)) == 5872 EO_FLOWC_PENDING); 5873 5874 flowc = start_wrq_wr(cst->eo_txq, ETID_FLOWC_LEN16, &cookie); 5875 if (__predict_false(flowc == NULL)) 5876 return (ENOMEM); 5877 5878 bzero(flowc, ETID_FLOWC_LEN); 5879 flowc->op_to_nparams = htobe32(V_FW_WR_OP(FW_FLOWC_WR) | 5880 V_FW_FLOWC_WR_NPARAMS(ETID_FLOWC_NPARAMS) | V_FW_WR_COMPL(0)); 5881 flowc->flowid_len16 = htonl(V_FW_WR_LEN16(ETID_FLOWC_LEN16) | 5882 V_FW_WR_FLOWID(cst->etid)); 5883 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; 5884 flowc->mnemval[0].val = htobe32(pfvf); 5885 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; 5886 flowc->mnemval[1].val = htobe32(pi->tx_chan); 5887 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT; 5888 flowc->mnemval[2].val = htobe32(pi->tx_chan); 5889 flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID; 5890 flowc->mnemval[3].val = htobe32(cst->iqid); 5891 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_EOSTATE; 5892 flowc->mnemval[4].val = htobe32(FW_FLOWC_MNEM_EOSTATE_ESTABLISHED); 5893 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_SCHEDCLASS; 5894 flowc->mnemval[5].val = htobe32(cst->schedcl); 5895 5896 commit_wrq_wr(cst->eo_txq, flowc, &cookie); 5897 5898 cst->flags &= ~EO_FLOWC_PENDING; 5899 cst->flags |= EO_FLOWC_RPL_PENDING; 5900 MPASS(cst->tx_credits >= ETID_FLOWC_LEN16); /* flowc is first WR. */ 5901 cst->tx_credits -= ETID_FLOWC_LEN16; 5902 5903 return (0); 5904 } 5905 5906 #define ETID_FLUSH_LEN16 (howmany(sizeof (struct fw_flowc_wr), 16)) 5907 5908 void 5909 send_etid_flush_wr(struct cxgbe_rate_tag *cst) 5910 { 5911 struct fw_flowc_wr *flowc; 5912 struct wrq_cookie cookie; 5913 5914 mtx_assert(&cst->lock, MA_OWNED); 5915 5916 flowc = start_wrq_wr(cst->eo_txq, ETID_FLUSH_LEN16, &cookie); 5917 if (__predict_false(flowc == NULL)) 5918 CXGBE_UNIMPLEMENTED(__func__); 5919 5920 bzero(flowc, ETID_FLUSH_LEN16 * 16); 5921 flowc->op_to_nparams = htobe32(V_FW_WR_OP(FW_FLOWC_WR) | 5922 V_FW_FLOWC_WR_NPARAMS(0) | F_FW_WR_COMPL); 5923 flowc->flowid_len16 = htobe32(V_FW_WR_LEN16(ETID_FLUSH_LEN16) | 5924 V_FW_WR_FLOWID(cst->etid)); 5925 5926 commit_wrq_wr(cst->eo_txq, flowc, &cookie); 5927 5928 cst->flags |= EO_FLUSH_RPL_PENDING; 5929 MPASS(cst->tx_credits >= ETID_FLUSH_LEN16); 5930 cst->tx_credits -= ETID_FLUSH_LEN16; 5931 cst->ncompl++; 5932 } 5933 5934 static void 5935 write_ethofld_wr(struct cxgbe_rate_tag *cst, struct fw_eth_tx_eo_wr *wr, 5936 struct mbuf *m0, int compl) 5937 { 5938 struct cpl_tx_pkt_core *cpl; 5939 uint64_t ctrl1; 5940 uint32_t ctrl; /* used in many unrelated places */ 5941 int len16, pktlen, nsegs, immhdrs; 5942 caddr_t dst; 5943 uintptr_t p; 5944 struct ulptx_sgl *usgl; 5945 struct sglist sg; 5946 struct sglist_seg segs[38]; /* XXX: find real limit. XXX: get off the stack */ 5947 5948 mtx_assert(&cst->lock, MA_OWNED); 5949 M_ASSERTPKTHDR(m0); 5950 KASSERT(m0->m_pkthdr.l2hlen > 0 && m0->m_pkthdr.l3hlen > 0 && 5951 m0->m_pkthdr.l4hlen > 0, 5952 ("%s: ethofld mbuf %p is missing header lengths", __func__, m0)); 5953 5954 len16 = mbuf_eo_len16(m0); 5955 nsegs = mbuf_eo_nsegs(m0); 5956 pktlen = m0->m_pkthdr.len; 5957 ctrl = sizeof(struct cpl_tx_pkt_core); 5958 if (needs_tso(m0)) 5959 ctrl += sizeof(struct cpl_tx_pkt_lso_core); 5960 immhdrs = m0->m_pkthdr.l2hlen + m0->m_pkthdr.l3hlen + m0->m_pkthdr.l4hlen; 5961 ctrl += immhdrs; 5962 5963 wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_EO_WR) | 5964 V_FW_ETH_TX_EO_WR_IMMDLEN(ctrl) | V_FW_WR_COMPL(!!compl)); 5965 wr->equiq_to_len16 = htobe32(V_FW_WR_LEN16(len16) | 5966 V_FW_WR_FLOWID(cst->etid)); 5967 wr->r3 = 0; 5968 if (needs_udp_csum(m0)) { 5969 wr->u.udpseg.type = FW_ETH_TX_EO_TYPE_UDPSEG; 5970 wr->u.udpseg.ethlen = m0->m_pkthdr.l2hlen; 5971 wr->u.udpseg.iplen = htobe16(m0->m_pkthdr.l3hlen); 5972 wr->u.udpseg.udplen = m0->m_pkthdr.l4hlen; 5973 wr->u.udpseg.rtplen = 0; 5974 wr->u.udpseg.r4 = 0; 5975 wr->u.udpseg.mss = htobe16(pktlen - immhdrs); 5976 wr->u.udpseg.schedpktsize = wr->u.udpseg.mss; 5977 wr->u.udpseg.plen = htobe32(pktlen - immhdrs); 5978 cpl = (void *)(wr + 1); 5979 } else { 5980 MPASS(needs_tcp_csum(m0)); 5981 wr->u.tcpseg.type = FW_ETH_TX_EO_TYPE_TCPSEG; 5982 wr->u.tcpseg.ethlen = m0->m_pkthdr.l2hlen; 5983 wr->u.tcpseg.iplen = htobe16(m0->m_pkthdr.l3hlen); 5984 wr->u.tcpseg.tcplen = m0->m_pkthdr.l4hlen; 5985 wr->u.tcpseg.tsclk_tsoff = mbuf_eo_tsclk_tsoff(m0); 5986 wr->u.tcpseg.r4 = 0; 5987 wr->u.tcpseg.r5 = 0; 5988 wr->u.tcpseg.plen = htobe32(pktlen - immhdrs); 5989 5990 if (needs_tso(m0)) { 5991 struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1); 5992 5993 wr->u.tcpseg.mss = htobe16(m0->m_pkthdr.tso_segsz); 5994 5995 ctrl = V_LSO_OPCODE(CPL_TX_PKT_LSO) | 5996 F_LSO_FIRST_SLICE | F_LSO_LAST_SLICE | 5997 V_LSO_ETHHDR_LEN((m0->m_pkthdr.l2hlen - 5998 ETHER_HDR_LEN) >> 2) | 5999 V_LSO_IPHDR_LEN(m0->m_pkthdr.l3hlen >> 2) | 6000 V_LSO_TCPHDR_LEN(m0->m_pkthdr.l4hlen >> 2); 6001 if (m0->m_pkthdr.l3hlen == sizeof(struct ip6_hdr)) 6002 ctrl |= F_LSO_IPV6; 6003 lso->lso_ctrl = htobe32(ctrl); 6004 lso->ipid_ofst = htobe16(0); 6005 lso->mss = htobe16(m0->m_pkthdr.tso_segsz); 6006 lso->seqno_offset = htobe32(0); 6007 lso->len = htobe32(pktlen); 6008 6009 cpl = (void *)(lso + 1); 6010 } else { 6011 wr->u.tcpseg.mss = htobe16(0xffff); 6012 cpl = (void *)(wr + 1); 6013 } 6014 } 6015 6016 /* Checksum offload must be requested for ethofld. */ 6017 MPASS(needs_l4_csum(m0)); 6018 ctrl1 = csum_to_ctrl(cst->adapter, m0); 6019 6020 /* VLAN tag insertion */ 6021 if (needs_vlan_insertion(m0)) { 6022 ctrl1 |= F_TXPKT_VLAN_VLD | 6023 V_TXPKT_VLAN(m0->m_pkthdr.ether_vtag); 6024 } 6025 6026 /* CPL header */ 6027 cpl->ctrl0 = cst->ctrl0; 6028 cpl->pack = 0; 6029 cpl->len = htobe16(pktlen); 6030 cpl->ctrl1 = htobe64(ctrl1); 6031 6032 /* Copy Ethernet, IP & TCP/UDP hdrs as immediate data */ 6033 p = (uintptr_t)(cpl + 1); 6034 m_copydata(m0, 0, immhdrs, (void *)p); 6035 6036 /* SGL */ 6037 dst = (void *)(cpl + 1); 6038 if (nsegs > 0) { 6039 int i, pad; 6040 6041 /* zero-pad upto next 16Byte boundary, if not 16Byte aligned */ 6042 p += immhdrs; 6043 pad = 16 - (immhdrs & 0xf); 6044 bzero((void *)p, pad); 6045 6046 usgl = (void *)(p + pad); 6047 usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) | 6048 V_ULPTX_NSGE(nsegs)); 6049 6050 sglist_init(&sg, nitems(segs), segs); 6051 for (; m0 != NULL; m0 = m0->m_next) { 6052 if (__predict_false(m0->m_len == 0)) 6053 continue; 6054 if (immhdrs >= m0->m_len) { 6055 immhdrs -= m0->m_len; 6056 continue; 6057 } 6058 if (m0->m_flags & M_EXTPG) 6059 sglist_append_mbuf_epg(&sg, m0, 6060 mtod(m0, vm_offset_t), m0->m_len); 6061 else 6062 sglist_append(&sg, mtod(m0, char *) + immhdrs, 6063 m0->m_len - immhdrs); 6064 immhdrs = 0; 6065 } 6066 MPASS(sg.sg_nseg == nsegs); 6067 6068 /* 6069 * Zero pad last 8B in case the WR doesn't end on a 16B 6070 * boundary. 6071 */ 6072 *(uint64_t *)((char *)wr + len16 * 16 - 8) = 0; 6073 6074 usgl->len0 = htobe32(segs[0].ss_len); 6075 usgl->addr0 = htobe64(segs[0].ss_paddr); 6076 for (i = 0; i < nsegs - 1; i++) { 6077 usgl->sge[i / 2].len[i & 1] = htobe32(segs[i + 1].ss_len); 6078 usgl->sge[i / 2].addr[i & 1] = htobe64(segs[i + 1].ss_paddr); 6079 } 6080 if (i & 1) 6081 usgl->sge[i / 2].len[1] = htobe32(0); 6082 } 6083 6084 } 6085 6086 static void 6087 ethofld_tx(struct cxgbe_rate_tag *cst) 6088 { 6089 struct mbuf *m; 6090 struct wrq_cookie cookie; 6091 int next_credits, compl; 6092 struct fw_eth_tx_eo_wr *wr; 6093 6094 mtx_assert(&cst->lock, MA_OWNED); 6095 6096 while ((m = mbufq_first(&cst->pending_tx)) != NULL) { 6097 M_ASSERTPKTHDR(m); 6098 6099 /* How many len16 credits do we need to send this mbuf. */ 6100 next_credits = mbuf_eo_len16(m); 6101 MPASS(next_credits > 0); 6102 if (next_credits > cst->tx_credits) { 6103 /* 6104 * Tx will make progress eventually because there is at 6105 * least one outstanding fw4_ack that will return 6106 * credits and kick the tx. 6107 */ 6108 MPASS(cst->ncompl > 0); 6109 return; 6110 } 6111 wr = start_wrq_wr(cst->eo_txq, next_credits, &cookie); 6112 if (__predict_false(wr == NULL)) { 6113 /* XXX: wishful thinking, not a real assertion. */ 6114 MPASS(cst->ncompl > 0); 6115 return; 6116 } 6117 cst->tx_credits -= next_credits; 6118 cst->tx_nocompl += next_credits; 6119 compl = cst->ncompl == 0 || cst->tx_nocompl >= cst->tx_total / 2; 6120 ETHER_BPF_MTAP(cst->com.com.ifp, m); 6121 write_ethofld_wr(cst, wr, m, compl); 6122 commit_wrq_wr(cst->eo_txq, wr, &cookie); 6123 if (compl) { 6124 cst->ncompl++; 6125 cst->tx_nocompl = 0; 6126 } 6127 (void) mbufq_dequeue(&cst->pending_tx); 6128 6129 /* 6130 * Drop the mbuf's reference on the tag now rather 6131 * than waiting until m_freem(). This ensures that 6132 * cxgbe_rate_tag_free gets called when the inp drops 6133 * its reference on the tag and there are no more 6134 * mbufs in the pending_tx queue and can flush any 6135 * pending requests. Otherwise if the last mbuf 6136 * doesn't request a completion the etid will never be 6137 * released. 6138 */ 6139 m->m_pkthdr.snd_tag = NULL; 6140 m->m_pkthdr.csum_flags &= ~CSUM_SND_TAG; 6141 m_snd_tag_rele(&cst->com.com); 6142 6143 mbufq_enqueue(&cst->pending_fwack, m); 6144 } 6145 } 6146 6147 int 6148 ethofld_transmit(struct ifnet *ifp, struct mbuf *m0) 6149 { 6150 struct cxgbe_rate_tag *cst; 6151 int rc; 6152 6153 MPASS(m0->m_nextpkt == NULL); 6154 MPASS(m0->m_pkthdr.csum_flags & CSUM_SND_TAG); 6155 MPASS(m0->m_pkthdr.snd_tag != NULL); 6156 cst = mst_to_crt(m0->m_pkthdr.snd_tag); 6157 6158 mtx_lock(&cst->lock); 6159 MPASS(cst->flags & EO_SND_TAG_REF); 6160 6161 if (__predict_false(cst->flags & EO_FLOWC_PENDING)) { 6162 struct vi_info *vi = ifp->if_softc; 6163 struct port_info *pi = vi->pi; 6164 struct adapter *sc = pi->adapter; 6165 const uint32_t rss_mask = vi->rss_size - 1; 6166 uint32_t rss_hash; 6167 6168 cst->eo_txq = &sc->sge.ofld_txq[vi->first_ofld_txq]; 6169 if (M_HASHTYPE_ISHASH(m0)) 6170 rss_hash = m0->m_pkthdr.flowid; 6171 else 6172 rss_hash = arc4random(); 6173 /* We assume RSS hashing */ 6174 cst->iqid = vi->rss[rss_hash & rss_mask]; 6175 cst->eo_txq += rss_hash % vi->nofldtxq; 6176 rc = send_etid_flowc_wr(cst, pi, vi); 6177 if (rc != 0) 6178 goto done; 6179 } 6180 6181 if (__predict_false(cst->plen + m0->m_pkthdr.len > eo_max_backlog)) { 6182 rc = ENOBUFS; 6183 goto done; 6184 } 6185 6186 mbufq_enqueue(&cst->pending_tx, m0); 6187 cst->plen += m0->m_pkthdr.len; 6188 6189 /* 6190 * Hold an extra reference on the tag while generating work 6191 * requests to ensure that we don't try to free the tag during 6192 * ethofld_tx() in case we are sending the final mbuf after 6193 * the inp was freed. 6194 */ 6195 m_snd_tag_ref(&cst->com.com); 6196 ethofld_tx(cst); 6197 mtx_unlock(&cst->lock); 6198 m_snd_tag_rele(&cst->com.com); 6199 return (0); 6200 6201 done: 6202 mtx_unlock(&cst->lock); 6203 if (__predict_false(rc != 0)) 6204 m_freem(m0); 6205 return (rc); 6206 } 6207 6208 static int 6209 ethofld_fw4_ack(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m0) 6210 { 6211 struct adapter *sc = iq->adapter; 6212 const struct cpl_fw4_ack *cpl = (const void *)(rss + 1); 6213 struct mbuf *m; 6214 u_int etid = G_CPL_FW4_ACK_FLOWID(be32toh(OPCODE_TID(cpl))); 6215 struct cxgbe_rate_tag *cst; 6216 uint8_t credits = cpl->credits; 6217 6218 cst = lookup_etid(sc, etid); 6219 mtx_lock(&cst->lock); 6220 if (__predict_false(cst->flags & EO_FLOWC_RPL_PENDING)) { 6221 MPASS(credits >= ETID_FLOWC_LEN16); 6222 credits -= ETID_FLOWC_LEN16; 6223 cst->flags &= ~EO_FLOWC_RPL_PENDING; 6224 } 6225 6226 KASSERT(cst->ncompl > 0, 6227 ("%s: etid %u (%p) wasn't expecting completion.", 6228 __func__, etid, cst)); 6229 cst->ncompl--; 6230 6231 while (credits > 0) { 6232 m = mbufq_dequeue(&cst->pending_fwack); 6233 if (__predict_false(m == NULL)) { 6234 /* 6235 * The remaining credits are for the final flush that 6236 * was issued when the tag was freed by the kernel. 6237 */ 6238 MPASS((cst->flags & 6239 (EO_FLUSH_RPL_PENDING | EO_SND_TAG_REF)) == 6240 EO_FLUSH_RPL_PENDING); 6241 MPASS(credits == ETID_FLUSH_LEN16); 6242 MPASS(cst->tx_credits + cpl->credits == cst->tx_total); 6243 MPASS(cst->ncompl == 0); 6244 6245 cst->flags &= ~EO_FLUSH_RPL_PENDING; 6246 cst->tx_credits += cpl->credits; 6247 cxgbe_rate_tag_free_locked(cst); 6248 return (0); /* cst is gone. */ 6249 } 6250 KASSERT(m != NULL, 6251 ("%s: too many credits (%u, %u)", __func__, cpl->credits, 6252 credits)); 6253 KASSERT(credits >= mbuf_eo_len16(m), 6254 ("%s: too few credits (%u, %u, %u)", __func__, 6255 cpl->credits, credits, mbuf_eo_len16(m))); 6256 credits -= mbuf_eo_len16(m); 6257 cst->plen -= m->m_pkthdr.len; 6258 m_freem(m); 6259 } 6260 6261 cst->tx_credits += cpl->credits; 6262 MPASS(cst->tx_credits <= cst->tx_total); 6263 6264 if (cst->flags & EO_SND_TAG_REF) { 6265 /* 6266 * As with ethofld_transmit(), hold an extra reference 6267 * so that the tag is stable across ethold_tx(). 6268 */ 6269 m_snd_tag_ref(&cst->com.com); 6270 m = mbufq_first(&cst->pending_tx); 6271 if (m != NULL && cst->tx_credits >= mbuf_eo_len16(m)) 6272 ethofld_tx(cst); 6273 mtx_unlock(&cst->lock); 6274 m_snd_tag_rele(&cst->com.com); 6275 } else { 6276 /* 6277 * There shouldn't be any pending packets if the tag 6278 * was freed by the kernel since any pending packet 6279 * should hold a reference to the tag. 6280 */ 6281 MPASS(mbufq_first(&cst->pending_tx) == NULL); 6282 mtx_unlock(&cst->lock); 6283 } 6284 6285 return (0); 6286 } 6287 #endif 6288