1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2011 Chelsio Communications, Inc. 5 * All rights reserved. 6 * Written by: Navdeep Parhar <np@FreeBSD.org> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include "opt_inet.h" 34 #include "opt_inet6.h" 35 #include "opt_ratelimit.h" 36 37 #include <sys/types.h> 38 #include <sys/eventhandler.h> 39 #include <sys/mbuf.h> 40 #include <sys/socket.h> 41 #include <sys/kernel.h> 42 #include <sys/malloc.h> 43 #include <sys/queue.h> 44 #include <sys/sbuf.h> 45 #include <sys/taskqueue.h> 46 #include <sys/time.h> 47 #include <sys/sglist.h> 48 #include <sys/sysctl.h> 49 #include <sys/smp.h> 50 #include <sys/counter.h> 51 #include <net/bpf.h> 52 #include <net/ethernet.h> 53 #include <net/if.h> 54 #include <net/if_vlan_var.h> 55 #include <netinet/in.h> 56 #include <netinet/ip.h> 57 #include <netinet/ip6.h> 58 #include <netinet/tcp.h> 59 #include <netinet/udp.h> 60 #include <machine/in_cksum.h> 61 #include <machine/md_var.h> 62 #include <vm/vm.h> 63 #include <vm/pmap.h> 64 #ifdef DEV_NETMAP 65 #include <machine/bus.h> 66 #include <sys/selinfo.h> 67 #include <net/if_var.h> 68 #include <net/netmap.h> 69 #include <dev/netmap/netmap_kern.h> 70 #endif 71 72 #include "common/common.h" 73 #include "common/t4_regs.h" 74 #include "common/t4_regs_values.h" 75 #include "common/t4_msg.h" 76 #include "t4_l2t.h" 77 #include "t4_mp_ring.h" 78 79 #ifdef T4_PKT_TIMESTAMP 80 #define RX_COPY_THRESHOLD (MINCLSIZE - 8) 81 #else 82 #define RX_COPY_THRESHOLD MINCLSIZE 83 #endif 84 85 /* Internal mbuf flags stored in PH_loc.eight[1]. */ 86 #define MC_RAW_WR 0x02 87 88 /* 89 * Ethernet frames are DMA'd at this byte offset into the freelist buffer. 90 * 0-7 are valid values. 91 */ 92 static int fl_pktshift = 0; 93 SYSCTL_INT(_hw_cxgbe, OID_AUTO, fl_pktshift, CTLFLAG_RDTUN, &fl_pktshift, 0, 94 "payload DMA offset in rx buffer (bytes)"); 95 96 /* 97 * Pad ethernet payload up to this boundary. 98 * -1: driver should figure out a good value. 99 * 0: disable padding. 100 * Any power of 2 from 32 to 4096 (both inclusive) is also a valid value. 101 */ 102 int fl_pad = -1; 103 SYSCTL_INT(_hw_cxgbe, OID_AUTO, fl_pad, CTLFLAG_RDTUN, &fl_pad, 0, 104 "payload pad boundary (bytes)"); 105 106 /* 107 * Status page length. 108 * -1: driver should figure out a good value. 109 * 64 or 128 are the only other valid values. 110 */ 111 static int spg_len = -1; 112 SYSCTL_INT(_hw_cxgbe, OID_AUTO, spg_len, CTLFLAG_RDTUN, &spg_len, 0, 113 "status page size (bytes)"); 114 115 /* 116 * Congestion drops. 117 * -1: no congestion feedback (not recommended). 118 * 0: backpressure the channel instead of dropping packets right away. 119 * 1: no backpressure, drop packets for the congested queue immediately. 120 */ 121 static int cong_drop = 0; 122 SYSCTL_INT(_hw_cxgbe, OID_AUTO, cong_drop, CTLFLAG_RDTUN, &cong_drop, 0, 123 "Congestion control for RX queues (0 = backpressure, 1 = drop"); 124 125 /* 126 * Deliver multiple frames in the same free list buffer if they fit. 127 * -1: let the driver decide whether to enable buffer packing or not. 128 * 0: disable buffer packing. 129 * 1: enable buffer packing. 130 */ 131 static int buffer_packing = -1; 132 SYSCTL_INT(_hw_cxgbe, OID_AUTO, buffer_packing, CTLFLAG_RDTUN, &buffer_packing, 133 0, "Enable buffer packing"); 134 135 /* 136 * Start next frame in a packed buffer at this boundary. 137 * -1: driver should figure out a good value. 138 * T4: driver will ignore this and use the same value as fl_pad above. 139 * T5: 16, or a power of 2 from 64 to 4096 (both inclusive) is a valid value. 140 */ 141 static int fl_pack = -1; 142 SYSCTL_INT(_hw_cxgbe, OID_AUTO, fl_pack, CTLFLAG_RDTUN, &fl_pack, 0, 143 "payload pack boundary (bytes)"); 144 145 /* 146 * Allow the driver to create mbuf(s) in a cluster allocated for rx. 147 * 0: never; always allocate mbufs from the zone_mbuf UMA zone. 148 * 1: ok to create mbuf(s) within a cluster if there is room. 149 */ 150 static int allow_mbufs_in_cluster = 1; 151 SYSCTL_INT(_hw_cxgbe, OID_AUTO, allow_mbufs_in_cluster, CTLFLAG_RDTUN, 152 &allow_mbufs_in_cluster, 0, 153 "Allow driver to create mbufs within a rx cluster"); 154 155 /* 156 * Largest rx cluster size that the driver is allowed to allocate. 157 */ 158 static int largest_rx_cluster = MJUM16BYTES; 159 SYSCTL_INT(_hw_cxgbe, OID_AUTO, largest_rx_cluster, CTLFLAG_RDTUN, 160 &largest_rx_cluster, 0, "Largest rx cluster (bytes)"); 161 162 /* 163 * Size of cluster allocation that's most likely to succeed. The driver will 164 * fall back to this size if it fails to allocate clusters larger than this. 165 */ 166 static int safest_rx_cluster = PAGE_SIZE; 167 SYSCTL_INT(_hw_cxgbe, OID_AUTO, safest_rx_cluster, CTLFLAG_RDTUN, 168 &safest_rx_cluster, 0, "Safe rx cluster (bytes)"); 169 170 #ifdef RATELIMIT 171 /* 172 * Knob to control TCP timestamp rewriting, and the granularity of the tick used 173 * for rewriting. -1 and 0-3 are all valid values. 174 * -1: hardware should leave the TCP timestamps alone. 175 * 0: 1ms 176 * 1: 100us 177 * 2: 10us 178 * 3: 1us 179 */ 180 static int tsclk = -1; 181 SYSCTL_INT(_hw_cxgbe, OID_AUTO, tsclk, CTLFLAG_RDTUN, &tsclk, 0, 182 "Control TCP timestamp rewriting when using pacing"); 183 184 static int eo_max_backlog = 1024 * 1024; 185 SYSCTL_INT(_hw_cxgbe, OID_AUTO, eo_max_backlog, CTLFLAG_RDTUN, &eo_max_backlog, 186 0, "Maximum backlog of ratelimited data per flow"); 187 #endif 188 189 /* 190 * The interrupt holdoff timers are multiplied by this value on T6+. 191 * 1 and 3-17 (both inclusive) are legal values. 192 */ 193 static int tscale = 1; 194 SYSCTL_INT(_hw_cxgbe, OID_AUTO, tscale, CTLFLAG_RDTUN, &tscale, 0, 195 "Interrupt holdoff timer scale on T6+"); 196 197 /* 198 * Number of LRO entries in the lro_ctrl structure per rx queue. 199 */ 200 static int lro_entries = TCP_LRO_ENTRIES; 201 SYSCTL_INT(_hw_cxgbe, OID_AUTO, lro_entries, CTLFLAG_RDTUN, &lro_entries, 0, 202 "Number of LRO entries per RX queue"); 203 204 /* 205 * This enables presorting of frames before they're fed into tcp_lro_rx. 206 */ 207 static int lro_mbufs = 0; 208 SYSCTL_INT(_hw_cxgbe, OID_AUTO, lro_mbufs, CTLFLAG_RDTUN, &lro_mbufs, 0, 209 "Enable presorting of LRO frames"); 210 211 struct txpkts { 212 u_int wr_type; /* type 0 or type 1 */ 213 u_int npkt; /* # of packets in this work request */ 214 u_int plen; /* total payload (sum of all packets) */ 215 u_int len16; /* # of 16B pieces used by this work request */ 216 }; 217 218 /* A packet's SGL. This + m_pkthdr has all info needed for tx */ 219 struct sgl { 220 struct sglist sg; 221 struct sglist_seg seg[TX_SGL_SEGS]; 222 }; 223 224 static int service_iq(struct sge_iq *, int); 225 static int service_iq_fl(struct sge_iq *, int); 226 static struct mbuf *get_fl_payload(struct adapter *, struct sge_fl *, uint32_t); 227 static int t4_eth_rx(struct sge_iq *, const struct rss_header *, struct mbuf *); 228 static inline void init_iq(struct sge_iq *, struct adapter *, int, int, int); 229 static inline void init_fl(struct adapter *, struct sge_fl *, int, int, char *); 230 static inline void init_eq(struct adapter *, struct sge_eq *, int, int, uint8_t, 231 uint16_t, char *); 232 static int alloc_ring(struct adapter *, size_t, bus_dma_tag_t *, bus_dmamap_t *, 233 bus_addr_t *, void **); 234 static int free_ring(struct adapter *, bus_dma_tag_t, bus_dmamap_t, bus_addr_t, 235 void *); 236 static int alloc_iq_fl(struct vi_info *, struct sge_iq *, struct sge_fl *, 237 int, int); 238 static int free_iq_fl(struct vi_info *, struct sge_iq *, struct sge_fl *); 239 static void add_iq_sysctls(struct sysctl_ctx_list *, struct sysctl_oid *, 240 struct sge_iq *); 241 static void add_fl_sysctls(struct adapter *, struct sysctl_ctx_list *, 242 struct sysctl_oid *, struct sge_fl *); 243 static int alloc_fwq(struct adapter *); 244 static int free_fwq(struct adapter *); 245 static int alloc_ctrlq(struct adapter *, struct sge_wrq *, int, 246 struct sysctl_oid *); 247 static int alloc_rxq(struct vi_info *, struct sge_rxq *, int, int, 248 struct sysctl_oid *); 249 static int free_rxq(struct vi_info *, struct sge_rxq *); 250 #ifdef TCP_OFFLOAD 251 static int alloc_ofld_rxq(struct vi_info *, struct sge_ofld_rxq *, int, int, 252 struct sysctl_oid *); 253 static int free_ofld_rxq(struct vi_info *, struct sge_ofld_rxq *); 254 #endif 255 #ifdef DEV_NETMAP 256 static int alloc_nm_rxq(struct vi_info *, struct sge_nm_rxq *, int, int, 257 struct sysctl_oid *); 258 static int free_nm_rxq(struct vi_info *, struct sge_nm_rxq *); 259 static int alloc_nm_txq(struct vi_info *, struct sge_nm_txq *, int, int, 260 struct sysctl_oid *); 261 static int free_nm_txq(struct vi_info *, struct sge_nm_txq *); 262 #endif 263 static int ctrl_eq_alloc(struct adapter *, struct sge_eq *); 264 static int eth_eq_alloc(struct adapter *, struct vi_info *, struct sge_eq *); 265 #if defined(TCP_OFFLOAD) || defined(RATELIMIT) 266 static int ofld_eq_alloc(struct adapter *, struct vi_info *, struct sge_eq *); 267 #endif 268 static int alloc_eq(struct adapter *, struct vi_info *, struct sge_eq *); 269 static int free_eq(struct adapter *, struct sge_eq *); 270 static int alloc_wrq(struct adapter *, struct vi_info *, struct sge_wrq *, 271 struct sysctl_oid *); 272 static int free_wrq(struct adapter *, struct sge_wrq *); 273 static int alloc_txq(struct vi_info *, struct sge_txq *, int, 274 struct sysctl_oid *); 275 static int free_txq(struct vi_info *, struct sge_txq *); 276 static void oneseg_dma_callback(void *, bus_dma_segment_t *, int, int); 277 static inline void ring_fl_db(struct adapter *, struct sge_fl *); 278 static int refill_fl(struct adapter *, struct sge_fl *, int); 279 static void refill_sfl(void *); 280 static int alloc_fl_sdesc(struct sge_fl *); 281 static void free_fl_sdesc(struct adapter *, struct sge_fl *); 282 static void find_best_refill_source(struct adapter *, struct sge_fl *, int); 283 static void find_safe_refill_source(struct adapter *, struct sge_fl *); 284 static void add_fl_to_sfl(struct adapter *, struct sge_fl *); 285 286 static inline void get_pkt_gl(struct mbuf *, struct sglist *); 287 static inline u_int txpkt_len16(u_int, u_int); 288 static inline u_int txpkt_vm_len16(u_int, u_int); 289 static inline u_int txpkts0_len16(u_int); 290 static inline u_int txpkts1_len16(void); 291 static u_int write_raw_wr(struct sge_txq *, void *, struct mbuf *, u_int); 292 static u_int write_txpkt_wr(struct sge_txq *, struct fw_eth_tx_pkt_wr *, 293 struct mbuf *, u_int); 294 static u_int write_txpkt_vm_wr(struct adapter *, struct sge_txq *, 295 struct fw_eth_tx_pkt_vm_wr *, struct mbuf *, u_int); 296 static int try_txpkts(struct mbuf *, struct mbuf *, struct txpkts *, u_int); 297 static int add_to_txpkts(struct mbuf *, struct txpkts *, u_int); 298 static u_int write_txpkts_wr(struct sge_txq *, struct fw_eth_tx_pkts_wr *, 299 struct mbuf *, const struct txpkts *, u_int); 300 static void write_gl_to_txd(struct sge_txq *, struct mbuf *, caddr_t *, int); 301 static inline void copy_to_txd(struct sge_eq *, caddr_t, caddr_t *, int); 302 static inline void ring_eq_db(struct adapter *, struct sge_eq *, u_int); 303 static inline uint16_t read_hw_cidx(struct sge_eq *); 304 static inline u_int reclaimable_tx_desc(struct sge_eq *); 305 static inline u_int total_available_tx_desc(struct sge_eq *); 306 static u_int reclaim_tx_descs(struct sge_txq *, u_int); 307 static void tx_reclaim(void *, int); 308 static __be64 get_flit(struct sglist_seg *, int, int); 309 static int handle_sge_egr_update(struct sge_iq *, const struct rss_header *, 310 struct mbuf *); 311 static int handle_fw_msg(struct sge_iq *, const struct rss_header *, 312 struct mbuf *); 313 static int t4_handle_wrerr_rpl(struct adapter *, const __be64 *); 314 static void wrq_tx_drain(void *, int); 315 static void drain_wrq_wr_list(struct adapter *, struct sge_wrq *); 316 317 static int sysctl_uint16(SYSCTL_HANDLER_ARGS); 318 static int sysctl_bufsizes(SYSCTL_HANDLER_ARGS); 319 #ifdef RATELIMIT 320 static inline u_int txpkt_eo_len16(u_int, u_int, u_int); 321 static int ethofld_fw4_ack(struct sge_iq *, const struct rss_header *, 322 struct mbuf *); 323 #endif 324 325 static counter_u64_t extfree_refs; 326 static counter_u64_t extfree_rels; 327 328 an_handler_t t4_an_handler; 329 fw_msg_handler_t t4_fw_msg_handler[NUM_FW6_TYPES]; 330 cpl_handler_t t4_cpl_handler[NUM_CPL_CMDS]; 331 cpl_handler_t set_tcb_rpl_handlers[NUM_CPL_COOKIES]; 332 cpl_handler_t l2t_write_rpl_handlers[NUM_CPL_COOKIES]; 333 cpl_handler_t act_open_rpl_handlers[NUM_CPL_COOKIES]; 334 cpl_handler_t abort_rpl_rss_handlers[NUM_CPL_COOKIES]; 335 cpl_handler_t fw4_ack_handlers[NUM_CPL_COOKIES]; 336 337 void 338 t4_register_an_handler(an_handler_t h) 339 { 340 uintptr_t *loc; 341 342 MPASS(h == NULL || t4_an_handler == NULL); 343 344 loc = (uintptr_t *)&t4_an_handler; 345 atomic_store_rel_ptr(loc, (uintptr_t)h); 346 } 347 348 void 349 t4_register_fw_msg_handler(int type, fw_msg_handler_t h) 350 { 351 uintptr_t *loc; 352 353 MPASS(type < nitems(t4_fw_msg_handler)); 354 MPASS(h == NULL || t4_fw_msg_handler[type] == NULL); 355 /* 356 * These are dispatched by the handler for FW{4|6}_CPL_MSG using the CPL 357 * handler dispatch table. Reject any attempt to install a handler for 358 * this subtype. 359 */ 360 MPASS(type != FW_TYPE_RSSCPL); 361 MPASS(type != FW6_TYPE_RSSCPL); 362 363 loc = (uintptr_t *)&t4_fw_msg_handler[type]; 364 atomic_store_rel_ptr(loc, (uintptr_t)h); 365 } 366 367 void 368 t4_register_cpl_handler(int opcode, cpl_handler_t h) 369 { 370 uintptr_t *loc; 371 372 MPASS(opcode < nitems(t4_cpl_handler)); 373 MPASS(h == NULL || t4_cpl_handler[opcode] == NULL); 374 375 loc = (uintptr_t *)&t4_cpl_handler[opcode]; 376 atomic_store_rel_ptr(loc, (uintptr_t)h); 377 } 378 379 static int 380 set_tcb_rpl_handler(struct sge_iq *iq, const struct rss_header *rss, 381 struct mbuf *m) 382 { 383 const struct cpl_set_tcb_rpl *cpl = (const void *)(rss + 1); 384 u_int tid; 385 int cookie; 386 387 MPASS(m == NULL); 388 389 tid = GET_TID(cpl); 390 if (is_hpftid(iq->adapter, tid) || is_ftid(iq->adapter, tid)) { 391 /* 392 * The return code for filter-write is put in the CPL cookie so 393 * we have to rely on the hardware tid (is_ftid) to determine 394 * that this is a response to a filter. 395 */ 396 cookie = CPL_COOKIE_FILTER; 397 } else { 398 cookie = G_COOKIE(cpl->cookie); 399 } 400 MPASS(cookie > CPL_COOKIE_RESERVED); 401 MPASS(cookie < nitems(set_tcb_rpl_handlers)); 402 403 return (set_tcb_rpl_handlers[cookie](iq, rss, m)); 404 } 405 406 static int 407 l2t_write_rpl_handler(struct sge_iq *iq, const struct rss_header *rss, 408 struct mbuf *m) 409 { 410 const struct cpl_l2t_write_rpl *rpl = (const void *)(rss + 1); 411 unsigned int cookie; 412 413 MPASS(m == NULL); 414 415 cookie = GET_TID(rpl) & F_SYNC_WR ? CPL_COOKIE_TOM : CPL_COOKIE_FILTER; 416 return (l2t_write_rpl_handlers[cookie](iq, rss, m)); 417 } 418 419 static int 420 act_open_rpl_handler(struct sge_iq *iq, const struct rss_header *rss, 421 struct mbuf *m) 422 { 423 const struct cpl_act_open_rpl *cpl = (const void *)(rss + 1); 424 u_int cookie = G_TID_COOKIE(G_AOPEN_ATID(be32toh(cpl->atid_status))); 425 426 MPASS(m == NULL); 427 MPASS(cookie != CPL_COOKIE_RESERVED); 428 429 return (act_open_rpl_handlers[cookie](iq, rss, m)); 430 } 431 432 static int 433 abort_rpl_rss_handler(struct sge_iq *iq, const struct rss_header *rss, 434 struct mbuf *m) 435 { 436 struct adapter *sc = iq->adapter; 437 u_int cookie; 438 439 MPASS(m == NULL); 440 if (is_hashfilter(sc)) 441 cookie = CPL_COOKIE_HASHFILTER; 442 else 443 cookie = CPL_COOKIE_TOM; 444 445 return (abort_rpl_rss_handlers[cookie](iq, rss, m)); 446 } 447 448 static int 449 fw4_ack_handler(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 450 { 451 struct adapter *sc = iq->adapter; 452 const struct cpl_fw4_ack *cpl = (const void *)(rss + 1); 453 unsigned int tid = G_CPL_FW4_ACK_FLOWID(be32toh(OPCODE_TID(cpl))); 454 u_int cookie; 455 456 MPASS(m == NULL); 457 if (is_etid(sc, tid)) 458 cookie = CPL_COOKIE_ETHOFLD; 459 else 460 cookie = CPL_COOKIE_TOM; 461 462 return (fw4_ack_handlers[cookie](iq, rss, m)); 463 } 464 465 static void 466 t4_init_shared_cpl_handlers(void) 467 { 468 469 t4_register_cpl_handler(CPL_SET_TCB_RPL, set_tcb_rpl_handler); 470 t4_register_cpl_handler(CPL_L2T_WRITE_RPL, l2t_write_rpl_handler); 471 t4_register_cpl_handler(CPL_ACT_OPEN_RPL, act_open_rpl_handler); 472 t4_register_cpl_handler(CPL_ABORT_RPL_RSS, abort_rpl_rss_handler); 473 t4_register_cpl_handler(CPL_FW4_ACK, fw4_ack_handler); 474 } 475 476 void 477 t4_register_shared_cpl_handler(int opcode, cpl_handler_t h, int cookie) 478 { 479 uintptr_t *loc; 480 481 MPASS(opcode < nitems(t4_cpl_handler)); 482 MPASS(cookie > CPL_COOKIE_RESERVED); 483 MPASS(cookie < NUM_CPL_COOKIES); 484 MPASS(t4_cpl_handler[opcode] != NULL); 485 486 switch (opcode) { 487 case CPL_SET_TCB_RPL: 488 loc = (uintptr_t *)&set_tcb_rpl_handlers[cookie]; 489 break; 490 case CPL_L2T_WRITE_RPL: 491 loc = (uintptr_t *)&l2t_write_rpl_handlers[cookie]; 492 break; 493 case CPL_ACT_OPEN_RPL: 494 loc = (uintptr_t *)&act_open_rpl_handlers[cookie]; 495 break; 496 case CPL_ABORT_RPL_RSS: 497 loc = (uintptr_t *)&abort_rpl_rss_handlers[cookie]; 498 break; 499 case CPL_FW4_ACK: 500 loc = (uintptr_t *)&fw4_ack_handlers[cookie]; 501 break; 502 default: 503 MPASS(0); 504 return; 505 } 506 MPASS(h == NULL || *loc == (uintptr_t)NULL); 507 atomic_store_rel_ptr(loc, (uintptr_t)h); 508 } 509 510 /* 511 * Called on MOD_LOAD. Validates and calculates the SGE tunables. 512 */ 513 void 514 t4_sge_modload(void) 515 { 516 517 if (fl_pktshift < 0 || fl_pktshift > 7) { 518 printf("Invalid hw.cxgbe.fl_pktshift value (%d)," 519 " using 0 instead.\n", fl_pktshift); 520 fl_pktshift = 0; 521 } 522 523 if (spg_len != 64 && spg_len != 128) { 524 int len; 525 526 #if defined(__i386__) || defined(__amd64__) 527 len = cpu_clflush_line_size > 64 ? 128 : 64; 528 #else 529 len = 64; 530 #endif 531 if (spg_len != -1) { 532 printf("Invalid hw.cxgbe.spg_len value (%d)," 533 " using %d instead.\n", spg_len, len); 534 } 535 spg_len = len; 536 } 537 538 if (cong_drop < -1 || cong_drop > 1) { 539 printf("Invalid hw.cxgbe.cong_drop value (%d)," 540 " using 0 instead.\n", cong_drop); 541 cong_drop = 0; 542 } 543 544 if (tscale != 1 && (tscale < 3 || tscale > 17)) { 545 printf("Invalid hw.cxgbe.tscale value (%d)," 546 " using 1 instead.\n", tscale); 547 tscale = 1; 548 } 549 550 extfree_refs = counter_u64_alloc(M_WAITOK); 551 extfree_rels = counter_u64_alloc(M_WAITOK); 552 counter_u64_zero(extfree_refs); 553 counter_u64_zero(extfree_rels); 554 555 t4_init_shared_cpl_handlers(); 556 t4_register_cpl_handler(CPL_FW4_MSG, handle_fw_msg); 557 t4_register_cpl_handler(CPL_FW6_MSG, handle_fw_msg); 558 t4_register_cpl_handler(CPL_SGE_EGR_UPDATE, handle_sge_egr_update); 559 t4_register_cpl_handler(CPL_RX_PKT, t4_eth_rx); 560 #ifdef RATELIMIT 561 t4_register_shared_cpl_handler(CPL_FW4_ACK, ethofld_fw4_ack, 562 CPL_COOKIE_ETHOFLD); 563 #endif 564 t4_register_fw_msg_handler(FW6_TYPE_CMD_RPL, t4_handle_fw_rpl); 565 t4_register_fw_msg_handler(FW6_TYPE_WRERR_RPL, t4_handle_wrerr_rpl); 566 } 567 568 void 569 t4_sge_modunload(void) 570 { 571 572 counter_u64_free(extfree_refs); 573 counter_u64_free(extfree_rels); 574 } 575 576 uint64_t 577 t4_sge_extfree_refs(void) 578 { 579 uint64_t refs, rels; 580 581 rels = counter_u64_fetch(extfree_rels); 582 refs = counter_u64_fetch(extfree_refs); 583 584 return (refs - rels); 585 } 586 587 static inline void 588 setup_pad_and_pack_boundaries(struct adapter *sc) 589 { 590 uint32_t v, m; 591 int pad, pack, pad_shift; 592 593 pad_shift = chip_id(sc) > CHELSIO_T5 ? X_T6_INGPADBOUNDARY_SHIFT : 594 X_INGPADBOUNDARY_SHIFT; 595 pad = fl_pad; 596 if (fl_pad < (1 << pad_shift) || 597 fl_pad > (1 << (pad_shift + M_INGPADBOUNDARY)) || 598 !powerof2(fl_pad)) { 599 /* 600 * If there is any chance that we might use buffer packing and 601 * the chip is a T4, then pick 64 as the pad/pack boundary. Set 602 * it to the minimum allowed in all other cases. 603 */ 604 pad = is_t4(sc) && buffer_packing ? 64 : 1 << pad_shift; 605 606 /* 607 * For fl_pad = 0 we'll still write a reasonable value to the 608 * register but all the freelists will opt out of padding. 609 * We'll complain here only if the user tried to set it to a 610 * value greater than 0 that was invalid. 611 */ 612 if (fl_pad > 0) { 613 device_printf(sc->dev, "Invalid hw.cxgbe.fl_pad value" 614 " (%d), using %d instead.\n", fl_pad, pad); 615 } 616 } 617 m = V_INGPADBOUNDARY(M_INGPADBOUNDARY); 618 v = V_INGPADBOUNDARY(ilog2(pad) - pad_shift); 619 t4_set_reg_field(sc, A_SGE_CONTROL, m, v); 620 621 if (is_t4(sc)) { 622 if (fl_pack != -1 && fl_pack != pad) { 623 /* Complain but carry on. */ 624 device_printf(sc->dev, "hw.cxgbe.fl_pack (%d) ignored," 625 " using %d instead.\n", fl_pack, pad); 626 } 627 return; 628 } 629 630 pack = fl_pack; 631 if (fl_pack < 16 || fl_pack == 32 || fl_pack > 4096 || 632 !powerof2(fl_pack)) { 633 pack = max(sc->params.pci.mps, CACHE_LINE_SIZE); 634 MPASS(powerof2(pack)); 635 if (pack < 16) 636 pack = 16; 637 if (pack == 32) 638 pack = 64; 639 if (pack > 4096) 640 pack = 4096; 641 if (fl_pack != -1) { 642 device_printf(sc->dev, "Invalid hw.cxgbe.fl_pack value" 643 " (%d), using %d instead.\n", fl_pack, pack); 644 } 645 } 646 m = V_INGPACKBOUNDARY(M_INGPACKBOUNDARY); 647 if (pack == 16) 648 v = V_INGPACKBOUNDARY(0); 649 else 650 v = V_INGPACKBOUNDARY(ilog2(pack) - 5); 651 652 MPASS(!is_t4(sc)); /* T4 doesn't have SGE_CONTROL2 */ 653 t4_set_reg_field(sc, A_SGE_CONTROL2, m, v); 654 } 655 656 /* 657 * adap->params.vpd.cclk must be set up before this is called. 658 */ 659 void 660 t4_tweak_chip_settings(struct adapter *sc) 661 { 662 int i; 663 uint32_t v, m; 664 int intr_timer[SGE_NTIMERS] = {1, 5, 10, 50, 100, 200}; 665 int timer_max = M_TIMERVALUE0 * 1000 / sc->params.vpd.cclk; 666 int intr_pktcount[SGE_NCOUNTERS] = {1, 8, 16, 32}; /* 63 max */ 667 uint16_t indsz = min(RX_COPY_THRESHOLD - 1, M_INDICATESIZE); 668 static int sge_flbuf_sizes[] = { 669 MCLBYTES, 670 #if MJUMPAGESIZE != MCLBYTES 671 MJUMPAGESIZE, 672 MJUMPAGESIZE - CL_METADATA_SIZE, 673 MJUMPAGESIZE - 2 * MSIZE - CL_METADATA_SIZE, 674 #endif 675 MJUM9BYTES, 676 MJUM16BYTES, 677 MCLBYTES - MSIZE - CL_METADATA_SIZE, 678 MJUM9BYTES - CL_METADATA_SIZE, 679 MJUM16BYTES - CL_METADATA_SIZE, 680 }; 681 682 KASSERT(sc->flags & MASTER_PF, 683 ("%s: trying to change chip settings when not master.", __func__)); 684 685 m = V_PKTSHIFT(M_PKTSHIFT) | F_RXPKTCPLMODE | F_EGRSTATUSPAGESIZE; 686 v = V_PKTSHIFT(fl_pktshift) | F_RXPKTCPLMODE | 687 V_EGRSTATUSPAGESIZE(spg_len == 128); 688 t4_set_reg_field(sc, A_SGE_CONTROL, m, v); 689 690 setup_pad_and_pack_boundaries(sc); 691 692 v = V_HOSTPAGESIZEPF0(PAGE_SHIFT - 10) | 693 V_HOSTPAGESIZEPF1(PAGE_SHIFT - 10) | 694 V_HOSTPAGESIZEPF2(PAGE_SHIFT - 10) | 695 V_HOSTPAGESIZEPF3(PAGE_SHIFT - 10) | 696 V_HOSTPAGESIZEPF4(PAGE_SHIFT - 10) | 697 V_HOSTPAGESIZEPF5(PAGE_SHIFT - 10) | 698 V_HOSTPAGESIZEPF6(PAGE_SHIFT - 10) | 699 V_HOSTPAGESIZEPF7(PAGE_SHIFT - 10); 700 t4_write_reg(sc, A_SGE_HOST_PAGE_SIZE, v); 701 702 KASSERT(nitems(sge_flbuf_sizes) <= SGE_FLBUF_SIZES, 703 ("%s: hw buffer size table too big", __func__)); 704 t4_write_reg(sc, A_SGE_FL_BUFFER_SIZE0, 4096); 705 t4_write_reg(sc, A_SGE_FL_BUFFER_SIZE1, 65536); 706 for (i = 0; i < min(nitems(sge_flbuf_sizes), SGE_FLBUF_SIZES); i++) { 707 t4_write_reg(sc, A_SGE_FL_BUFFER_SIZE15 - (4 * i), 708 sge_flbuf_sizes[i]); 709 } 710 711 v = V_THRESHOLD_0(intr_pktcount[0]) | V_THRESHOLD_1(intr_pktcount[1]) | 712 V_THRESHOLD_2(intr_pktcount[2]) | V_THRESHOLD_3(intr_pktcount[3]); 713 t4_write_reg(sc, A_SGE_INGRESS_RX_THRESHOLD, v); 714 715 KASSERT(intr_timer[0] <= timer_max, 716 ("%s: not a single usable timer (%d, %d)", __func__, intr_timer[0], 717 timer_max)); 718 for (i = 1; i < nitems(intr_timer); i++) { 719 KASSERT(intr_timer[i] >= intr_timer[i - 1], 720 ("%s: timers not listed in increasing order (%d)", 721 __func__, i)); 722 723 while (intr_timer[i] > timer_max) { 724 if (i == nitems(intr_timer) - 1) { 725 intr_timer[i] = timer_max; 726 break; 727 } 728 intr_timer[i] += intr_timer[i - 1]; 729 intr_timer[i] /= 2; 730 } 731 } 732 733 v = V_TIMERVALUE0(us_to_core_ticks(sc, intr_timer[0])) | 734 V_TIMERVALUE1(us_to_core_ticks(sc, intr_timer[1])); 735 t4_write_reg(sc, A_SGE_TIMER_VALUE_0_AND_1, v); 736 v = V_TIMERVALUE2(us_to_core_ticks(sc, intr_timer[2])) | 737 V_TIMERVALUE3(us_to_core_ticks(sc, intr_timer[3])); 738 t4_write_reg(sc, A_SGE_TIMER_VALUE_2_AND_3, v); 739 v = V_TIMERVALUE4(us_to_core_ticks(sc, intr_timer[4])) | 740 V_TIMERVALUE5(us_to_core_ticks(sc, intr_timer[5])); 741 t4_write_reg(sc, A_SGE_TIMER_VALUE_4_AND_5, v); 742 743 if (chip_id(sc) >= CHELSIO_T6) { 744 m = V_TSCALE(M_TSCALE); 745 if (tscale == 1) 746 v = 0; 747 else 748 v = V_TSCALE(tscale - 2); 749 t4_set_reg_field(sc, A_SGE_ITP_CONTROL, m, v); 750 751 if (sc->debug_flags & DF_DISABLE_TCB_CACHE) { 752 m = V_RDTHRESHOLD(M_RDTHRESHOLD) | F_WRTHRTHRESHEN | 753 V_WRTHRTHRESH(M_WRTHRTHRESH); 754 t4_tp_pio_read(sc, &v, 1, A_TP_CMM_CONFIG, 1); 755 v &= ~m; 756 v |= V_RDTHRESHOLD(1) | F_WRTHRTHRESHEN | 757 V_WRTHRTHRESH(16); 758 t4_tp_pio_write(sc, &v, 1, A_TP_CMM_CONFIG, 1); 759 } 760 } 761 762 /* 4K, 16K, 64K, 256K DDP "page sizes" for TDDP */ 763 v = V_HPZ0(0) | V_HPZ1(2) | V_HPZ2(4) | V_HPZ3(6); 764 t4_write_reg(sc, A_ULP_RX_TDDP_PSZ, v); 765 766 /* 767 * 4K, 8K, 16K, 64K DDP "page sizes" for iSCSI DDP. These have been 768 * chosen with MAXPHYS = 128K in mind. The largest DDP buffer that we 769 * may have to deal with is MAXPHYS + 1 page. 770 */ 771 v = V_HPZ0(0) | V_HPZ1(1) | V_HPZ2(2) | V_HPZ3(4); 772 t4_write_reg(sc, A_ULP_RX_ISCSI_PSZ, v); 773 774 /* We use multiple DDP page sizes both in plain-TOE and ISCSI modes. */ 775 m = v = F_TDDPTAGTCB | F_ISCSITAGTCB; 776 t4_set_reg_field(sc, A_ULP_RX_CTL, m, v); 777 778 m = V_INDICATESIZE(M_INDICATESIZE) | F_REARMDDPOFFSET | 779 F_RESETDDPOFFSET; 780 v = V_INDICATESIZE(indsz) | F_REARMDDPOFFSET | F_RESETDDPOFFSET; 781 t4_set_reg_field(sc, A_TP_PARA_REG5, m, v); 782 } 783 784 /* 785 * SGE wants the buffer to be at least 64B and then a multiple of 16. If 786 * padding is in use, the buffer's start and end need to be aligned to the pad 787 * boundary as well. We'll just make sure that the size is a multiple of the 788 * boundary here, it is up to the buffer allocation code to make sure the start 789 * of the buffer is aligned as well. 790 */ 791 static inline int 792 hwsz_ok(struct adapter *sc, int hwsz) 793 { 794 int mask = fl_pad ? sc->params.sge.pad_boundary - 1 : 16 - 1; 795 796 return (hwsz >= 64 && (hwsz & mask) == 0); 797 } 798 799 /* 800 * XXX: driver really should be able to deal with unexpected settings. 801 */ 802 int 803 t4_read_chip_settings(struct adapter *sc) 804 { 805 struct sge *s = &sc->sge; 806 struct sge_params *sp = &sc->params.sge; 807 int i, j, n, rc = 0; 808 uint32_t m, v, r; 809 uint16_t indsz = min(RX_COPY_THRESHOLD - 1, M_INDICATESIZE); 810 static int sw_buf_sizes[] = { /* Sorted by size */ 811 MCLBYTES, 812 #if MJUMPAGESIZE != MCLBYTES 813 MJUMPAGESIZE, 814 #endif 815 MJUM9BYTES, 816 MJUM16BYTES 817 }; 818 struct sw_zone_info *swz, *safe_swz; 819 struct hw_buf_info *hwb; 820 821 m = F_RXPKTCPLMODE; 822 v = F_RXPKTCPLMODE; 823 r = sc->params.sge.sge_control; 824 if ((r & m) != v) { 825 device_printf(sc->dev, "invalid SGE_CONTROL(0x%x)\n", r); 826 rc = EINVAL; 827 } 828 829 /* 830 * If this changes then every single use of PAGE_SHIFT in the driver 831 * needs to be carefully reviewed for PAGE_SHIFT vs sp->page_shift. 832 */ 833 if (sp->page_shift != PAGE_SHIFT) { 834 device_printf(sc->dev, "invalid SGE_HOST_PAGE_SIZE(0x%x)\n", r); 835 rc = EINVAL; 836 } 837 838 /* Filter out unusable hw buffer sizes entirely (mark with -2). */ 839 hwb = &s->hw_buf_info[0]; 840 for (i = 0; i < nitems(s->hw_buf_info); i++, hwb++) { 841 r = sc->params.sge.sge_fl_buffer_size[i]; 842 hwb->size = r; 843 hwb->zidx = hwsz_ok(sc, r) ? -1 : -2; 844 hwb->next = -1; 845 } 846 847 /* 848 * Create a sorted list in decreasing order of hw buffer sizes (and so 849 * increasing order of spare area) for each software zone. 850 * 851 * If padding is enabled then the start and end of the buffer must align 852 * to the pad boundary; if packing is enabled then they must align with 853 * the pack boundary as well. Allocations from the cluster zones are 854 * aligned to min(size, 4K), so the buffer starts at that alignment and 855 * ends at hwb->size alignment. If mbuf inlining is allowed the 856 * starting alignment will be reduced to MSIZE and the driver will 857 * exercise appropriate caution when deciding on the best buffer layout 858 * to use. 859 */ 860 n = 0; /* no usable buffer size to begin with */ 861 swz = &s->sw_zone_info[0]; 862 safe_swz = NULL; 863 for (i = 0; i < SW_ZONE_SIZES; i++, swz++) { 864 int8_t head = -1, tail = -1; 865 866 swz->size = sw_buf_sizes[i]; 867 swz->zone = m_getzone(swz->size); 868 swz->type = m_gettype(swz->size); 869 870 if (swz->size < PAGE_SIZE) { 871 MPASS(powerof2(swz->size)); 872 if (fl_pad && (swz->size % sp->pad_boundary != 0)) 873 continue; 874 } 875 876 if (swz->size == safest_rx_cluster) 877 safe_swz = swz; 878 879 hwb = &s->hw_buf_info[0]; 880 for (j = 0; j < SGE_FLBUF_SIZES; j++, hwb++) { 881 if (hwb->zidx != -1 || hwb->size > swz->size) 882 continue; 883 #ifdef INVARIANTS 884 if (fl_pad) 885 MPASS(hwb->size % sp->pad_boundary == 0); 886 #endif 887 hwb->zidx = i; 888 if (head == -1) 889 head = tail = j; 890 else if (hwb->size < s->hw_buf_info[tail].size) { 891 s->hw_buf_info[tail].next = j; 892 tail = j; 893 } else { 894 int8_t *cur; 895 struct hw_buf_info *t; 896 897 for (cur = &head; *cur != -1; cur = &t->next) { 898 t = &s->hw_buf_info[*cur]; 899 if (hwb->size == t->size) { 900 hwb->zidx = -2; 901 break; 902 } 903 if (hwb->size > t->size) { 904 hwb->next = *cur; 905 *cur = j; 906 break; 907 } 908 } 909 } 910 } 911 swz->head_hwidx = head; 912 swz->tail_hwidx = tail; 913 914 if (tail != -1) { 915 n++; 916 if (swz->size - s->hw_buf_info[tail].size >= 917 CL_METADATA_SIZE) 918 sc->flags |= BUF_PACKING_OK; 919 } 920 } 921 if (n == 0) { 922 device_printf(sc->dev, "no usable SGE FL buffer size.\n"); 923 rc = EINVAL; 924 } 925 926 s->safe_hwidx1 = -1; 927 s->safe_hwidx2 = -1; 928 if (safe_swz != NULL) { 929 s->safe_hwidx1 = safe_swz->head_hwidx; 930 for (i = safe_swz->head_hwidx; i != -1; i = hwb->next) { 931 int spare; 932 933 hwb = &s->hw_buf_info[i]; 934 #ifdef INVARIANTS 935 if (fl_pad) 936 MPASS(hwb->size % sp->pad_boundary == 0); 937 #endif 938 spare = safe_swz->size - hwb->size; 939 if (spare >= CL_METADATA_SIZE) { 940 s->safe_hwidx2 = i; 941 break; 942 } 943 } 944 } 945 946 if (sc->flags & IS_VF) 947 return (0); 948 949 v = V_HPZ0(0) | V_HPZ1(2) | V_HPZ2(4) | V_HPZ3(6); 950 r = t4_read_reg(sc, A_ULP_RX_TDDP_PSZ); 951 if (r != v) { 952 device_printf(sc->dev, "invalid ULP_RX_TDDP_PSZ(0x%x)\n", r); 953 rc = EINVAL; 954 } 955 956 m = v = F_TDDPTAGTCB; 957 r = t4_read_reg(sc, A_ULP_RX_CTL); 958 if ((r & m) != v) { 959 device_printf(sc->dev, "invalid ULP_RX_CTL(0x%x)\n", r); 960 rc = EINVAL; 961 } 962 963 m = V_INDICATESIZE(M_INDICATESIZE) | F_REARMDDPOFFSET | 964 F_RESETDDPOFFSET; 965 v = V_INDICATESIZE(indsz) | F_REARMDDPOFFSET | F_RESETDDPOFFSET; 966 r = t4_read_reg(sc, A_TP_PARA_REG5); 967 if ((r & m) != v) { 968 device_printf(sc->dev, "invalid TP_PARA_REG5(0x%x)\n", r); 969 rc = EINVAL; 970 } 971 972 t4_init_tp_params(sc, 1); 973 974 t4_read_mtu_tbl(sc, sc->params.mtus, NULL); 975 t4_load_mtus(sc, sc->params.mtus, sc->params.a_wnd, sc->params.b_wnd); 976 977 return (rc); 978 } 979 980 int 981 t4_create_dma_tag(struct adapter *sc) 982 { 983 int rc; 984 985 rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0, 986 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE, 987 BUS_SPACE_UNRESTRICTED, BUS_SPACE_MAXSIZE, BUS_DMA_ALLOCNOW, NULL, 988 NULL, &sc->dmat); 989 if (rc != 0) { 990 device_printf(sc->dev, 991 "failed to create main DMA tag: %d\n", rc); 992 } 993 994 return (rc); 995 } 996 997 void 998 t4_sge_sysctls(struct adapter *sc, struct sysctl_ctx_list *ctx, 999 struct sysctl_oid_list *children) 1000 { 1001 struct sge_params *sp = &sc->params.sge; 1002 1003 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "buffer_sizes", 1004 CTLTYPE_STRING | CTLFLAG_RD, &sc->sge, 0, sysctl_bufsizes, "A", 1005 "freelist buffer sizes"); 1006 1007 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "fl_pktshift", CTLFLAG_RD, 1008 NULL, sp->fl_pktshift, "payload DMA offset in rx buffer (bytes)"); 1009 1010 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "fl_pad", CTLFLAG_RD, 1011 NULL, sp->pad_boundary, "payload pad boundary (bytes)"); 1012 1013 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "spg_len", CTLFLAG_RD, 1014 NULL, sp->spg_len, "status page size (bytes)"); 1015 1016 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "cong_drop", CTLFLAG_RD, 1017 NULL, cong_drop, "congestion drop setting"); 1018 1019 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "fl_pack", CTLFLAG_RD, 1020 NULL, sp->pack_boundary, "payload pack boundary (bytes)"); 1021 } 1022 1023 int 1024 t4_destroy_dma_tag(struct adapter *sc) 1025 { 1026 if (sc->dmat) 1027 bus_dma_tag_destroy(sc->dmat); 1028 1029 return (0); 1030 } 1031 1032 /* 1033 * Allocate and initialize the firmware event queue, control queues, and special 1034 * purpose rx queues owned by the adapter. 1035 * 1036 * Returns errno on failure. Resources allocated up to that point may still be 1037 * allocated. Caller is responsible for cleanup in case this function fails. 1038 */ 1039 int 1040 t4_setup_adapter_queues(struct adapter *sc) 1041 { 1042 struct sysctl_oid *oid; 1043 struct sysctl_oid_list *children; 1044 int rc, i; 1045 1046 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 1047 1048 sysctl_ctx_init(&sc->ctx); 1049 sc->flags |= ADAP_SYSCTL_CTX; 1050 1051 /* 1052 * Firmware event queue 1053 */ 1054 rc = alloc_fwq(sc); 1055 if (rc != 0) 1056 return (rc); 1057 1058 /* 1059 * That's all for the VF driver. 1060 */ 1061 if (sc->flags & IS_VF) 1062 return (rc); 1063 1064 oid = device_get_sysctl_tree(sc->dev); 1065 children = SYSCTL_CHILDREN(oid); 1066 1067 /* 1068 * XXX: General purpose rx queues, one per port. 1069 */ 1070 1071 /* 1072 * Control queues, one per port. 1073 */ 1074 oid = SYSCTL_ADD_NODE(&sc->ctx, children, OID_AUTO, "ctrlq", 1075 CTLFLAG_RD, NULL, "control queues"); 1076 for_each_port(sc, i) { 1077 struct sge_wrq *ctrlq = &sc->sge.ctrlq[i]; 1078 1079 rc = alloc_ctrlq(sc, ctrlq, i, oid); 1080 if (rc != 0) 1081 return (rc); 1082 } 1083 1084 return (rc); 1085 } 1086 1087 /* 1088 * Idempotent 1089 */ 1090 int 1091 t4_teardown_adapter_queues(struct adapter *sc) 1092 { 1093 int i; 1094 1095 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 1096 1097 /* Do this before freeing the queue */ 1098 if (sc->flags & ADAP_SYSCTL_CTX) { 1099 sysctl_ctx_free(&sc->ctx); 1100 sc->flags &= ~ADAP_SYSCTL_CTX; 1101 } 1102 1103 if (!(sc->flags & IS_VF)) { 1104 for_each_port(sc, i) 1105 free_wrq(sc, &sc->sge.ctrlq[i]); 1106 } 1107 free_fwq(sc); 1108 1109 return (0); 1110 } 1111 1112 /* Maximum payload that can be delivered with a single iq descriptor */ 1113 static inline int 1114 mtu_to_max_payload(struct adapter *sc, int mtu, const int toe) 1115 { 1116 int payload; 1117 1118 #ifdef TCP_OFFLOAD 1119 if (toe) { 1120 int rxcs = G_RXCOALESCESIZE(t4_read_reg(sc, A_TP_PARA_REG2)); 1121 1122 /* Note that COP can set rx_coalesce on/off per connection. */ 1123 payload = max(mtu, rxcs); 1124 } else { 1125 #endif 1126 /* large enough even when hw VLAN extraction is disabled */ 1127 payload = sc->params.sge.fl_pktshift + ETHER_HDR_LEN + 1128 ETHER_VLAN_ENCAP_LEN + mtu; 1129 #ifdef TCP_OFFLOAD 1130 } 1131 #endif 1132 1133 return (payload); 1134 } 1135 1136 int 1137 t4_setup_vi_queues(struct vi_info *vi) 1138 { 1139 int rc = 0, i, intr_idx, iqidx; 1140 struct sge_rxq *rxq; 1141 struct sge_txq *txq; 1142 #ifdef TCP_OFFLOAD 1143 struct sge_ofld_rxq *ofld_rxq; 1144 #endif 1145 #if defined(TCP_OFFLOAD) || defined(RATELIMIT) 1146 struct sge_wrq *ofld_txq; 1147 #endif 1148 #ifdef DEV_NETMAP 1149 int saved_idx; 1150 struct sge_nm_rxq *nm_rxq; 1151 struct sge_nm_txq *nm_txq; 1152 #endif 1153 char name[16]; 1154 struct port_info *pi = vi->pi; 1155 struct adapter *sc = pi->adapter; 1156 struct ifnet *ifp = vi->ifp; 1157 struct sysctl_oid *oid = device_get_sysctl_tree(vi->dev); 1158 struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); 1159 int maxp, mtu = ifp->if_mtu; 1160 1161 /* Interrupt vector to start from (when using multiple vectors) */ 1162 intr_idx = vi->first_intr; 1163 1164 #ifdef DEV_NETMAP 1165 saved_idx = intr_idx; 1166 if (ifp->if_capabilities & IFCAP_NETMAP) { 1167 1168 /* netmap is supported with direct interrupts only. */ 1169 MPASS(!forwarding_intr_to_fwq(sc)); 1170 1171 /* 1172 * We don't have buffers to back the netmap rx queues 1173 * right now so we create the queues in a way that 1174 * doesn't set off any congestion signal in the chip. 1175 */ 1176 oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "nm_rxq", 1177 CTLFLAG_RD, NULL, "rx queues"); 1178 for_each_nm_rxq(vi, i, nm_rxq) { 1179 rc = alloc_nm_rxq(vi, nm_rxq, intr_idx, i, oid); 1180 if (rc != 0) 1181 goto done; 1182 intr_idx++; 1183 } 1184 1185 oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "nm_txq", 1186 CTLFLAG_RD, NULL, "tx queues"); 1187 for_each_nm_txq(vi, i, nm_txq) { 1188 iqidx = vi->first_nm_rxq + (i % vi->nnmrxq); 1189 rc = alloc_nm_txq(vi, nm_txq, iqidx, i, oid); 1190 if (rc != 0) 1191 goto done; 1192 } 1193 } 1194 1195 /* Normal rx queues and netmap rx queues share the same interrupts. */ 1196 intr_idx = saved_idx; 1197 #endif 1198 1199 /* 1200 * Allocate rx queues first because a default iqid is required when 1201 * creating a tx queue. 1202 */ 1203 maxp = mtu_to_max_payload(sc, mtu, 0); 1204 oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "rxq", 1205 CTLFLAG_RD, NULL, "rx queues"); 1206 for_each_rxq(vi, i, rxq) { 1207 1208 init_iq(&rxq->iq, sc, vi->tmr_idx, vi->pktc_idx, vi->qsize_rxq); 1209 1210 snprintf(name, sizeof(name), "%s rxq%d-fl", 1211 device_get_nameunit(vi->dev), i); 1212 init_fl(sc, &rxq->fl, vi->qsize_rxq / 8, maxp, name); 1213 1214 rc = alloc_rxq(vi, rxq, 1215 forwarding_intr_to_fwq(sc) ? -1 : intr_idx, i, oid); 1216 if (rc != 0) 1217 goto done; 1218 intr_idx++; 1219 } 1220 #ifdef DEV_NETMAP 1221 if (ifp->if_capabilities & IFCAP_NETMAP) 1222 intr_idx = saved_idx + max(vi->nrxq, vi->nnmrxq); 1223 #endif 1224 #ifdef TCP_OFFLOAD 1225 maxp = mtu_to_max_payload(sc, mtu, 1); 1226 oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "ofld_rxq", 1227 CTLFLAG_RD, NULL, "rx queues for offloaded TCP connections"); 1228 for_each_ofld_rxq(vi, i, ofld_rxq) { 1229 1230 init_iq(&ofld_rxq->iq, sc, vi->ofld_tmr_idx, vi->ofld_pktc_idx, 1231 vi->qsize_rxq); 1232 1233 snprintf(name, sizeof(name), "%s ofld_rxq%d-fl", 1234 device_get_nameunit(vi->dev), i); 1235 init_fl(sc, &ofld_rxq->fl, vi->qsize_rxq / 8, maxp, name); 1236 1237 rc = alloc_ofld_rxq(vi, ofld_rxq, 1238 forwarding_intr_to_fwq(sc) ? -1 : intr_idx, i, oid); 1239 if (rc != 0) 1240 goto done; 1241 intr_idx++; 1242 } 1243 #endif 1244 1245 /* 1246 * Now the tx queues. 1247 */ 1248 oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "txq", CTLFLAG_RD, 1249 NULL, "tx queues"); 1250 for_each_txq(vi, i, txq) { 1251 iqidx = vi->first_rxq + (i % vi->nrxq); 1252 snprintf(name, sizeof(name), "%s txq%d", 1253 device_get_nameunit(vi->dev), i); 1254 init_eq(sc, &txq->eq, EQ_ETH, vi->qsize_txq, pi->tx_chan, 1255 sc->sge.rxq[iqidx].iq.cntxt_id, name); 1256 1257 rc = alloc_txq(vi, txq, i, oid); 1258 if (rc != 0) 1259 goto done; 1260 } 1261 #if defined(TCP_OFFLOAD) || defined(RATELIMIT) 1262 oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "ofld_txq", 1263 CTLFLAG_RD, NULL, "tx queues for TOE/ETHOFLD"); 1264 for_each_ofld_txq(vi, i, ofld_txq) { 1265 struct sysctl_oid *oid2; 1266 1267 snprintf(name, sizeof(name), "%s ofld_txq%d", 1268 device_get_nameunit(vi->dev), i); 1269 if (vi->nofldrxq > 0) { 1270 iqidx = vi->first_ofld_rxq + (i % vi->nofldrxq); 1271 init_eq(sc, &ofld_txq->eq, EQ_OFLD, vi->qsize_txq, 1272 pi->tx_chan, sc->sge.ofld_rxq[iqidx].iq.cntxt_id, 1273 name); 1274 } else { 1275 iqidx = vi->first_rxq + (i % vi->nrxq); 1276 init_eq(sc, &ofld_txq->eq, EQ_OFLD, vi->qsize_txq, 1277 pi->tx_chan, sc->sge.rxq[iqidx].iq.cntxt_id, name); 1278 } 1279 1280 snprintf(name, sizeof(name), "%d", i); 1281 oid2 = SYSCTL_ADD_NODE(&vi->ctx, SYSCTL_CHILDREN(oid), OID_AUTO, 1282 name, CTLFLAG_RD, NULL, "offload tx queue"); 1283 1284 rc = alloc_wrq(sc, vi, ofld_txq, oid2); 1285 if (rc != 0) 1286 goto done; 1287 } 1288 #endif 1289 done: 1290 if (rc) 1291 t4_teardown_vi_queues(vi); 1292 1293 return (rc); 1294 } 1295 1296 /* 1297 * Idempotent 1298 */ 1299 int 1300 t4_teardown_vi_queues(struct vi_info *vi) 1301 { 1302 int i; 1303 struct sge_rxq *rxq; 1304 struct sge_txq *txq; 1305 #if defined(TCP_OFFLOAD) || defined(RATELIMIT) 1306 struct port_info *pi = vi->pi; 1307 struct adapter *sc = pi->adapter; 1308 struct sge_wrq *ofld_txq; 1309 #endif 1310 #ifdef TCP_OFFLOAD 1311 struct sge_ofld_rxq *ofld_rxq; 1312 #endif 1313 #ifdef DEV_NETMAP 1314 struct sge_nm_rxq *nm_rxq; 1315 struct sge_nm_txq *nm_txq; 1316 #endif 1317 1318 /* Do this before freeing the queues */ 1319 if (vi->flags & VI_SYSCTL_CTX) { 1320 sysctl_ctx_free(&vi->ctx); 1321 vi->flags &= ~VI_SYSCTL_CTX; 1322 } 1323 1324 #ifdef DEV_NETMAP 1325 if (vi->ifp->if_capabilities & IFCAP_NETMAP) { 1326 for_each_nm_txq(vi, i, nm_txq) { 1327 free_nm_txq(vi, nm_txq); 1328 } 1329 1330 for_each_nm_rxq(vi, i, nm_rxq) { 1331 free_nm_rxq(vi, nm_rxq); 1332 } 1333 } 1334 #endif 1335 1336 /* 1337 * Take down all the tx queues first, as they reference the rx queues 1338 * (for egress updates, etc.). 1339 */ 1340 1341 for_each_txq(vi, i, txq) { 1342 free_txq(vi, txq); 1343 } 1344 #if defined(TCP_OFFLOAD) || defined(RATELIMIT) 1345 for_each_ofld_txq(vi, i, ofld_txq) { 1346 free_wrq(sc, ofld_txq); 1347 } 1348 #endif 1349 1350 /* 1351 * Then take down the rx queues. 1352 */ 1353 1354 for_each_rxq(vi, i, rxq) { 1355 free_rxq(vi, rxq); 1356 } 1357 #ifdef TCP_OFFLOAD 1358 for_each_ofld_rxq(vi, i, ofld_rxq) { 1359 free_ofld_rxq(vi, ofld_rxq); 1360 } 1361 #endif 1362 1363 return (0); 1364 } 1365 1366 /* 1367 * Interrupt handler when the driver is using only 1 interrupt. This is a very 1368 * unusual scenario. 1369 * 1370 * a) Deals with errors, if any. 1371 * b) Services firmware event queue, which is taking interrupts for all other 1372 * queues. 1373 */ 1374 void 1375 t4_intr_all(void *arg) 1376 { 1377 struct adapter *sc = arg; 1378 struct sge_iq *fwq = &sc->sge.fwq; 1379 1380 MPASS(sc->intr_count == 1); 1381 1382 if (sc->intr_type == INTR_INTX) 1383 t4_write_reg(sc, MYPF_REG(A_PCIE_PF_CLI), 0); 1384 1385 t4_intr_err(arg); 1386 t4_intr_evt(fwq); 1387 } 1388 1389 /* 1390 * Interrupt handler for errors (installed directly when multiple interrupts are 1391 * being used, or called by t4_intr_all). 1392 */ 1393 void 1394 t4_intr_err(void *arg) 1395 { 1396 struct adapter *sc = arg; 1397 const bool verbose = (sc->debug_flags & DF_VERBOSE_SLOWINTR) != 0; 1398 1399 if (sc->flags & ADAP_ERR) 1400 return; 1401 1402 t4_slow_intr_handler(sc, verbose); 1403 } 1404 1405 /* 1406 * Interrupt handler for iq-only queues. The firmware event queue is the only 1407 * such queue right now. 1408 */ 1409 void 1410 t4_intr_evt(void *arg) 1411 { 1412 struct sge_iq *iq = arg; 1413 1414 if (atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_BUSY)) { 1415 service_iq(iq, 0); 1416 (void) atomic_cmpset_int(&iq->state, IQS_BUSY, IQS_IDLE); 1417 } 1418 } 1419 1420 /* 1421 * Interrupt handler for iq+fl queues. 1422 */ 1423 void 1424 t4_intr(void *arg) 1425 { 1426 struct sge_iq *iq = arg; 1427 1428 if (atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_BUSY)) { 1429 service_iq_fl(iq, 0); 1430 (void) atomic_cmpset_int(&iq->state, IQS_BUSY, IQS_IDLE); 1431 } 1432 } 1433 1434 #ifdef DEV_NETMAP 1435 /* 1436 * Interrupt handler for netmap rx queues. 1437 */ 1438 void 1439 t4_nm_intr(void *arg) 1440 { 1441 struct sge_nm_rxq *nm_rxq = arg; 1442 1443 if (atomic_cmpset_int(&nm_rxq->nm_state, NM_ON, NM_BUSY)) { 1444 service_nm_rxq(nm_rxq); 1445 (void) atomic_cmpset_int(&nm_rxq->nm_state, NM_BUSY, NM_ON); 1446 } 1447 } 1448 1449 /* 1450 * Interrupt handler for vectors shared between NIC and netmap rx queues. 1451 */ 1452 void 1453 t4_vi_intr(void *arg) 1454 { 1455 struct irq *irq = arg; 1456 1457 MPASS(irq->nm_rxq != NULL); 1458 t4_nm_intr(irq->nm_rxq); 1459 1460 MPASS(irq->rxq != NULL); 1461 t4_intr(irq->rxq); 1462 } 1463 #endif 1464 1465 /* 1466 * Deals with interrupts on an iq-only (no freelist) queue. 1467 */ 1468 static int 1469 service_iq(struct sge_iq *iq, int budget) 1470 { 1471 struct sge_iq *q; 1472 struct adapter *sc = iq->adapter; 1473 struct iq_desc *d = &iq->desc[iq->cidx]; 1474 int ndescs = 0, limit; 1475 int rsp_type; 1476 uint32_t lq; 1477 STAILQ_HEAD(, sge_iq) iql = STAILQ_HEAD_INITIALIZER(iql); 1478 1479 KASSERT(iq->state == IQS_BUSY, ("%s: iq %p not BUSY", __func__, iq)); 1480 KASSERT((iq->flags & IQ_HAS_FL) == 0, 1481 ("%s: called for iq %p with fl (iq->flags 0x%x)", __func__, iq, 1482 iq->flags)); 1483 MPASS((iq->flags & IQ_ADJ_CREDIT) == 0); 1484 MPASS((iq->flags & IQ_LRO_ENABLED) == 0); 1485 1486 limit = budget ? budget : iq->qsize / 16; 1487 1488 /* 1489 * We always come back and check the descriptor ring for new indirect 1490 * interrupts and other responses after running a single handler. 1491 */ 1492 for (;;) { 1493 while ((d->rsp.u.type_gen & F_RSPD_GEN) == iq->gen) { 1494 1495 rmb(); 1496 1497 rsp_type = G_RSPD_TYPE(d->rsp.u.type_gen); 1498 lq = be32toh(d->rsp.pldbuflen_qid); 1499 1500 switch (rsp_type) { 1501 case X_RSPD_TYPE_FLBUF: 1502 panic("%s: data for an iq (%p) with no freelist", 1503 __func__, iq); 1504 1505 /* NOTREACHED */ 1506 1507 case X_RSPD_TYPE_CPL: 1508 KASSERT(d->rss.opcode < NUM_CPL_CMDS, 1509 ("%s: bad opcode %02x.", __func__, 1510 d->rss.opcode)); 1511 t4_cpl_handler[d->rss.opcode](iq, &d->rss, NULL); 1512 break; 1513 1514 case X_RSPD_TYPE_INTR: 1515 /* 1516 * There are 1K interrupt-capable queues (qids 0 1517 * through 1023). A response type indicating a 1518 * forwarded interrupt with a qid >= 1K is an 1519 * iWARP async notification. 1520 */ 1521 if (__predict_true(lq >= 1024)) { 1522 t4_an_handler(iq, &d->rsp); 1523 break; 1524 } 1525 1526 q = sc->sge.iqmap[lq - sc->sge.iq_start - 1527 sc->sge.iq_base]; 1528 if (atomic_cmpset_int(&q->state, IQS_IDLE, 1529 IQS_BUSY)) { 1530 if (service_iq_fl(q, q->qsize / 16) == 0) { 1531 (void) atomic_cmpset_int(&q->state, 1532 IQS_BUSY, IQS_IDLE); 1533 } else { 1534 STAILQ_INSERT_TAIL(&iql, q, 1535 link); 1536 } 1537 } 1538 break; 1539 1540 default: 1541 KASSERT(0, 1542 ("%s: illegal response type %d on iq %p", 1543 __func__, rsp_type, iq)); 1544 log(LOG_ERR, 1545 "%s: illegal response type %d on iq %p", 1546 device_get_nameunit(sc->dev), rsp_type, iq); 1547 break; 1548 } 1549 1550 d++; 1551 if (__predict_false(++iq->cidx == iq->sidx)) { 1552 iq->cidx = 0; 1553 iq->gen ^= F_RSPD_GEN; 1554 d = &iq->desc[0]; 1555 } 1556 if (__predict_false(++ndescs == limit)) { 1557 t4_write_reg(sc, sc->sge_gts_reg, 1558 V_CIDXINC(ndescs) | 1559 V_INGRESSQID(iq->cntxt_id) | 1560 V_SEINTARM(V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX))); 1561 ndescs = 0; 1562 1563 if (budget) { 1564 return (EINPROGRESS); 1565 } 1566 } 1567 } 1568 1569 if (STAILQ_EMPTY(&iql)) 1570 break; 1571 1572 /* 1573 * Process the head only, and send it to the back of the list if 1574 * it's still not done. 1575 */ 1576 q = STAILQ_FIRST(&iql); 1577 STAILQ_REMOVE_HEAD(&iql, link); 1578 if (service_iq_fl(q, q->qsize / 8) == 0) 1579 (void) atomic_cmpset_int(&q->state, IQS_BUSY, IQS_IDLE); 1580 else 1581 STAILQ_INSERT_TAIL(&iql, q, link); 1582 } 1583 1584 t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(ndescs) | 1585 V_INGRESSQID((u32)iq->cntxt_id) | V_SEINTARM(iq->intr_params)); 1586 1587 return (0); 1588 } 1589 1590 static inline int 1591 sort_before_lro(struct lro_ctrl *lro) 1592 { 1593 1594 return (lro->lro_mbuf_max != 0); 1595 } 1596 1597 static inline uint64_t 1598 last_flit_to_ns(struct adapter *sc, uint64_t lf) 1599 { 1600 uint64_t n = be64toh(lf) & 0xfffffffffffffff; /* 60b, not 64b. */ 1601 1602 if (n > UINT64_MAX / 1000000) 1603 return (n / sc->params.vpd.cclk * 1000000); 1604 else 1605 return (n * 1000000 / sc->params.vpd.cclk); 1606 } 1607 1608 /* 1609 * Deals with interrupts on an iq+fl queue. 1610 */ 1611 static int 1612 service_iq_fl(struct sge_iq *iq, int budget) 1613 { 1614 struct sge_rxq *rxq = iq_to_rxq(iq); 1615 struct sge_fl *fl; 1616 struct adapter *sc = iq->adapter; 1617 struct iq_desc *d = &iq->desc[iq->cidx]; 1618 int ndescs = 0, limit; 1619 int rsp_type, refill, starved; 1620 uint32_t lq; 1621 uint16_t fl_hw_cidx; 1622 struct mbuf *m0; 1623 #if defined(INET) || defined(INET6) 1624 const struct timeval lro_timeout = {0, sc->lro_timeout}; 1625 struct lro_ctrl *lro = &rxq->lro; 1626 #endif 1627 1628 KASSERT(iq->state == IQS_BUSY, ("%s: iq %p not BUSY", __func__, iq)); 1629 MPASS(iq->flags & IQ_HAS_FL); 1630 1631 limit = budget ? budget : iq->qsize / 16; 1632 fl = &rxq->fl; 1633 fl_hw_cidx = fl->hw_cidx; /* stable snapshot */ 1634 1635 #if defined(INET) || defined(INET6) 1636 if (iq->flags & IQ_ADJ_CREDIT) { 1637 MPASS(sort_before_lro(lro)); 1638 iq->flags &= ~IQ_ADJ_CREDIT; 1639 if ((d->rsp.u.type_gen & F_RSPD_GEN) != iq->gen) { 1640 tcp_lro_flush_all(lro); 1641 t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(1) | 1642 V_INGRESSQID((u32)iq->cntxt_id) | 1643 V_SEINTARM(iq->intr_params)); 1644 return (0); 1645 } 1646 ndescs = 1; 1647 } 1648 #else 1649 MPASS((iq->flags & IQ_ADJ_CREDIT) == 0); 1650 #endif 1651 1652 while ((d->rsp.u.type_gen & F_RSPD_GEN) == iq->gen) { 1653 1654 rmb(); 1655 1656 refill = 0; 1657 m0 = NULL; 1658 rsp_type = G_RSPD_TYPE(d->rsp.u.type_gen); 1659 lq = be32toh(d->rsp.pldbuflen_qid); 1660 1661 switch (rsp_type) { 1662 case X_RSPD_TYPE_FLBUF: 1663 1664 m0 = get_fl_payload(sc, fl, lq); 1665 if (__predict_false(m0 == NULL)) 1666 goto out; 1667 refill = IDXDIFF(fl->hw_cidx, fl_hw_cidx, fl->sidx) > 2; 1668 1669 if (iq->flags & IQ_RX_TIMESTAMP) { 1670 /* 1671 * Fill up rcv_tstmp but do not set M_TSTMP. 1672 * rcv_tstmp is not in the format that the 1673 * kernel expects and we don't want to mislead 1674 * it. For now this is only for custom code 1675 * that knows how to interpret cxgbe's stamp. 1676 */ 1677 m0->m_pkthdr.rcv_tstmp = 1678 last_flit_to_ns(sc, d->rsp.u.last_flit); 1679 #ifdef notyet 1680 m0->m_flags |= M_TSTMP; 1681 #endif 1682 } 1683 1684 /* fall through */ 1685 1686 case X_RSPD_TYPE_CPL: 1687 KASSERT(d->rss.opcode < NUM_CPL_CMDS, 1688 ("%s: bad opcode %02x.", __func__, d->rss.opcode)); 1689 t4_cpl_handler[d->rss.opcode](iq, &d->rss, m0); 1690 break; 1691 1692 case X_RSPD_TYPE_INTR: 1693 1694 /* 1695 * There are 1K interrupt-capable queues (qids 0 1696 * through 1023). A response type indicating a 1697 * forwarded interrupt with a qid >= 1K is an 1698 * iWARP async notification. That is the only 1699 * acceptable indirect interrupt on this queue. 1700 */ 1701 if (__predict_false(lq < 1024)) { 1702 panic("%s: indirect interrupt on iq_fl %p " 1703 "with qid %u", __func__, iq, lq); 1704 } 1705 1706 t4_an_handler(iq, &d->rsp); 1707 break; 1708 1709 default: 1710 KASSERT(0, ("%s: illegal response type %d on iq %p", 1711 __func__, rsp_type, iq)); 1712 log(LOG_ERR, "%s: illegal response type %d on iq %p", 1713 device_get_nameunit(sc->dev), rsp_type, iq); 1714 break; 1715 } 1716 1717 d++; 1718 if (__predict_false(++iq->cidx == iq->sidx)) { 1719 iq->cidx = 0; 1720 iq->gen ^= F_RSPD_GEN; 1721 d = &iq->desc[0]; 1722 } 1723 if (__predict_false(++ndescs == limit)) { 1724 t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(ndescs) | 1725 V_INGRESSQID(iq->cntxt_id) | 1726 V_SEINTARM(V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX))); 1727 ndescs = 0; 1728 1729 #if defined(INET) || defined(INET6) 1730 if (iq->flags & IQ_LRO_ENABLED && 1731 !sort_before_lro(lro) && 1732 sc->lro_timeout != 0) { 1733 tcp_lro_flush_inactive(lro, &lro_timeout); 1734 } 1735 #endif 1736 if (budget) { 1737 FL_LOCK(fl); 1738 refill_fl(sc, fl, 32); 1739 FL_UNLOCK(fl); 1740 1741 return (EINPROGRESS); 1742 } 1743 } 1744 if (refill) { 1745 FL_LOCK(fl); 1746 refill_fl(sc, fl, 32); 1747 FL_UNLOCK(fl); 1748 fl_hw_cidx = fl->hw_cidx; 1749 } 1750 } 1751 out: 1752 #if defined(INET) || defined(INET6) 1753 if (iq->flags & IQ_LRO_ENABLED) { 1754 if (ndescs > 0 && lro->lro_mbuf_count > 8) { 1755 MPASS(sort_before_lro(lro)); 1756 /* hold back one credit and don't flush LRO state */ 1757 iq->flags |= IQ_ADJ_CREDIT; 1758 ndescs--; 1759 } else { 1760 tcp_lro_flush_all(lro); 1761 } 1762 } 1763 #endif 1764 1765 t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(ndescs) | 1766 V_INGRESSQID((u32)iq->cntxt_id) | V_SEINTARM(iq->intr_params)); 1767 1768 FL_LOCK(fl); 1769 starved = refill_fl(sc, fl, 64); 1770 FL_UNLOCK(fl); 1771 if (__predict_false(starved != 0)) 1772 add_fl_to_sfl(sc, fl); 1773 1774 return (0); 1775 } 1776 1777 static inline int 1778 cl_has_metadata(struct sge_fl *fl, struct cluster_layout *cll) 1779 { 1780 int rc = fl->flags & FL_BUF_PACKING || cll->region1 > 0; 1781 1782 if (rc) 1783 MPASS(cll->region3 >= CL_METADATA_SIZE); 1784 1785 return (rc); 1786 } 1787 1788 static inline struct cluster_metadata * 1789 cl_metadata(struct adapter *sc, struct sge_fl *fl, struct cluster_layout *cll, 1790 caddr_t cl) 1791 { 1792 1793 if (cl_has_metadata(fl, cll)) { 1794 struct sw_zone_info *swz = &sc->sge.sw_zone_info[cll->zidx]; 1795 1796 return ((struct cluster_metadata *)(cl + swz->size) - 1); 1797 } 1798 return (NULL); 1799 } 1800 1801 static void 1802 rxb_free(struct mbuf *m) 1803 { 1804 uma_zone_t zone = m->m_ext.ext_arg1; 1805 void *cl = m->m_ext.ext_arg2; 1806 1807 uma_zfree(zone, cl); 1808 counter_u64_add(extfree_rels, 1); 1809 } 1810 1811 /* 1812 * The mbuf returned by this function could be allocated from zone_mbuf or 1813 * constructed in spare room in the cluster. 1814 * 1815 * The mbuf carries the payload in one of these ways 1816 * a) frame inside the mbuf (mbuf from zone_mbuf) 1817 * b) m_cljset (for clusters without metadata) zone_mbuf 1818 * c) m_extaddref (cluster with metadata) inline mbuf 1819 * d) m_extaddref (cluster with metadata) zone_mbuf 1820 */ 1821 static struct mbuf * 1822 get_scatter_segment(struct adapter *sc, struct sge_fl *fl, int fr_offset, 1823 int remaining) 1824 { 1825 struct mbuf *m; 1826 struct fl_sdesc *sd = &fl->sdesc[fl->cidx]; 1827 struct cluster_layout *cll = &sd->cll; 1828 struct sw_zone_info *swz = &sc->sge.sw_zone_info[cll->zidx]; 1829 struct hw_buf_info *hwb = &sc->sge.hw_buf_info[cll->hwidx]; 1830 struct cluster_metadata *clm = cl_metadata(sc, fl, cll, sd->cl); 1831 int len, blen; 1832 caddr_t payload; 1833 1834 blen = hwb->size - fl->rx_offset; /* max possible in this buf */ 1835 len = min(remaining, blen); 1836 payload = sd->cl + cll->region1 + fl->rx_offset; 1837 if (fl->flags & FL_BUF_PACKING) { 1838 const u_int l = fr_offset + len; 1839 const u_int pad = roundup2(l, fl->buf_boundary) - l; 1840 1841 if (fl->rx_offset + len + pad < hwb->size) 1842 blen = len + pad; 1843 MPASS(fl->rx_offset + blen <= hwb->size); 1844 } else { 1845 MPASS(fl->rx_offset == 0); /* not packing */ 1846 } 1847 1848 1849 if (sc->sc_do_rxcopy && len < RX_COPY_THRESHOLD) { 1850 1851 /* 1852 * Copy payload into a freshly allocated mbuf. 1853 */ 1854 1855 m = fr_offset == 0 ? 1856 m_gethdr(M_NOWAIT, MT_DATA) : m_get(M_NOWAIT, MT_DATA); 1857 if (m == NULL) 1858 return (NULL); 1859 fl->mbuf_allocated++; 1860 1861 /* copy data to mbuf */ 1862 bcopy(payload, mtod(m, caddr_t), len); 1863 1864 } else if (sd->nmbuf * MSIZE < cll->region1) { 1865 1866 /* 1867 * There's spare room in the cluster for an mbuf. Create one 1868 * and associate it with the payload that's in the cluster. 1869 */ 1870 1871 MPASS(clm != NULL); 1872 m = (struct mbuf *)(sd->cl + sd->nmbuf * MSIZE); 1873 /* No bzero required */ 1874 if (m_init(m, M_NOWAIT, MT_DATA, 1875 fr_offset == 0 ? M_PKTHDR | M_NOFREE : M_NOFREE)) 1876 return (NULL); 1877 fl->mbuf_inlined++; 1878 m_extaddref(m, payload, blen, &clm->refcount, rxb_free, 1879 swz->zone, sd->cl); 1880 if (sd->nmbuf++ == 0) 1881 counter_u64_add(extfree_refs, 1); 1882 1883 } else { 1884 1885 /* 1886 * Grab an mbuf from zone_mbuf and associate it with the 1887 * payload in the cluster. 1888 */ 1889 1890 m = fr_offset == 0 ? 1891 m_gethdr(M_NOWAIT, MT_DATA) : m_get(M_NOWAIT, MT_DATA); 1892 if (m == NULL) 1893 return (NULL); 1894 fl->mbuf_allocated++; 1895 if (clm != NULL) { 1896 m_extaddref(m, payload, blen, &clm->refcount, 1897 rxb_free, swz->zone, sd->cl); 1898 if (sd->nmbuf++ == 0) 1899 counter_u64_add(extfree_refs, 1); 1900 } else { 1901 m_cljset(m, sd->cl, swz->type); 1902 sd->cl = NULL; /* consumed, not a recycle candidate */ 1903 } 1904 } 1905 if (fr_offset == 0) 1906 m->m_pkthdr.len = remaining; 1907 m->m_len = len; 1908 1909 if (fl->flags & FL_BUF_PACKING) { 1910 fl->rx_offset += blen; 1911 MPASS(fl->rx_offset <= hwb->size); 1912 if (fl->rx_offset < hwb->size) 1913 return (m); /* without advancing the cidx */ 1914 } 1915 1916 if (__predict_false(++fl->cidx % 8 == 0)) { 1917 uint16_t cidx = fl->cidx / 8; 1918 1919 if (__predict_false(cidx == fl->sidx)) 1920 fl->cidx = cidx = 0; 1921 fl->hw_cidx = cidx; 1922 } 1923 fl->rx_offset = 0; 1924 1925 return (m); 1926 } 1927 1928 static struct mbuf * 1929 get_fl_payload(struct adapter *sc, struct sge_fl *fl, uint32_t len_newbuf) 1930 { 1931 struct mbuf *m0, *m, **pnext; 1932 u_int remaining; 1933 const u_int total = G_RSPD_LEN(len_newbuf); 1934 1935 if (__predict_false(fl->flags & FL_BUF_RESUME)) { 1936 M_ASSERTPKTHDR(fl->m0); 1937 MPASS(fl->m0->m_pkthdr.len == total); 1938 MPASS(fl->remaining < total); 1939 1940 m0 = fl->m0; 1941 pnext = fl->pnext; 1942 remaining = fl->remaining; 1943 fl->flags &= ~FL_BUF_RESUME; 1944 goto get_segment; 1945 } 1946 1947 if (fl->rx_offset > 0 && len_newbuf & F_RSPD_NEWBUF) { 1948 fl->rx_offset = 0; 1949 if (__predict_false(++fl->cidx % 8 == 0)) { 1950 uint16_t cidx = fl->cidx / 8; 1951 1952 if (__predict_false(cidx == fl->sidx)) 1953 fl->cidx = cidx = 0; 1954 fl->hw_cidx = cidx; 1955 } 1956 } 1957 1958 /* 1959 * Payload starts at rx_offset in the current hw buffer. Its length is 1960 * 'len' and it may span multiple hw buffers. 1961 */ 1962 1963 m0 = get_scatter_segment(sc, fl, 0, total); 1964 if (m0 == NULL) 1965 return (NULL); 1966 remaining = total - m0->m_len; 1967 pnext = &m0->m_next; 1968 while (remaining > 0) { 1969 get_segment: 1970 MPASS(fl->rx_offset == 0); 1971 m = get_scatter_segment(sc, fl, total - remaining, remaining); 1972 if (__predict_false(m == NULL)) { 1973 fl->m0 = m0; 1974 fl->pnext = pnext; 1975 fl->remaining = remaining; 1976 fl->flags |= FL_BUF_RESUME; 1977 return (NULL); 1978 } 1979 *pnext = m; 1980 pnext = &m->m_next; 1981 remaining -= m->m_len; 1982 } 1983 *pnext = NULL; 1984 1985 M_ASSERTPKTHDR(m0); 1986 return (m0); 1987 } 1988 1989 static int 1990 t4_eth_rx(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m0) 1991 { 1992 struct sge_rxq *rxq = iq_to_rxq(iq); 1993 struct ifnet *ifp = rxq->ifp; 1994 struct adapter *sc = iq->adapter; 1995 const struct cpl_rx_pkt *cpl = (const void *)(rss + 1); 1996 #if defined(INET) || defined(INET6) 1997 struct lro_ctrl *lro = &rxq->lro; 1998 #endif 1999 static const int sw_hashtype[4][2] = { 2000 {M_HASHTYPE_NONE, M_HASHTYPE_NONE}, 2001 {M_HASHTYPE_RSS_IPV4, M_HASHTYPE_RSS_IPV6}, 2002 {M_HASHTYPE_RSS_TCP_IPV4, M_HASHTYPE_RSS_TCP_IPV6}, 2003 {M_HASHTYPE_RSS_UDP_IPV4, M_HASHTYPE_RSS_UDP_IPV6}, 2004 }; 2005 2006 KASSERT(m0 != NULL, ("%s: no payload with opcode %02x", __func__, 2007 rss->opcode)); 2008 2009 m0->m_pkthdr.len -= sc->params.sge.fl_pktshift; 2010 m0->m_len -= sc->params.sge.fl_pktshift; 2011 m0->m_data += sc->params.sge.fl_pktshift; 2012 2013 m0->m_pkthdr.rcvif = ifp; 2014 M_HASHTYPE_SET(m0, sw_hashtype[rss->hash_type][rss->ipv6]); 2015 m0->m_pkthdr.flowid = be32toh(rss->hash_val); 2016 2017 if (cpl->csum_calc && !(cpl->err_vec & sc->params.tp.err_vec_mask)) { 2018 if (ifp->if_capenable & IFCAP_RXCSUM && 2019 cpl->l2info & htobe32(F_RXF_IP)) { 2020 m0->m_pkthdr.csum_flags = (CSUM_IP_CHECKED | 2021 CSUM_IP_VALID | CSUM_DATA_VALID | CSUM_PSEUDO_HDR); 2022 rxq->rxcsum++; 2023 } else if (ifp->if_capenable & IFCAP_RXCSUM_IPV6 && 2024 cpl->l2info & htobe32(F_RXF_IP6)) { 2025 m0->m_pkthdr.csum_flags = (CSUM_DATA_VALID_IPV6 | 2026 CSUM_PSEUDO_HDR); 2027 rxq->rxcsum++; 2028 } 2029 2030 if (__predict_false(cpl->ip_frag)) 2031 m0->m_pkthdr.csum_data = be16toh(cpl->csum); 2032 else 2033 m0->m_pkthdr.csum_data = 0xffff; 2034 } 2035 2036 if (cpl->vlan_ex) { 2037 m0->m_pkthdr.ether_vtag = be16toh(cpl->vlan); 2038 m0->m_flags |= M_VLANTAG; 2039 rxq->vlan_extraction++; 2040 } 2041 2042 #if defined(INET) || defined(INET6) 2043 if (iq->flags & IQ_LRO_ENABLED) { 2044 if (sort_before_lro(lro)) { 2045 tcp_lro_queue_mbuf(lro, m0); 2046 return (0); /* queued for sort, then LRO */ 2047 } 2048 if (tcp_lro_rx(lro, m0, 0) == 0) 2049 return (0); /* queued for LRO */ 2050 } 2051 #endif 2052 ifp->if_input(ifp, m0); 2053 2054 return (0); 2055 } 2056 2057 /* 2058 * Must drain the wrq or make sure that someone else will. 2059 */ 2060 static void 2061 wrq_tx_drain(void *arg, int n) 2062 { 2063 struct sge_wrq *wrq = arg; 2064 struct sge_eq *eq = &wrq->eq; 2065 2066 EQ_LOCK(eq); 2067 if (TAILQ_EMPTY(&wrq->incomplete_wrs) && !STAILQ_EMPTY(&wrq->wr_list)) 2068 drain_wrq_wr_list(wrq->adapter, wrq); 2069 EQ_UNLOCK(eq); 2070 } 2071 2072 static void 2073 drain_wrq_wr_list(struct adapter *sc, struct sge_wrq *wrq) 2074 { 2075 struct sge_eq *eq = &wrq->eq; 2076 u_int available, dbdiff; /* # of hardware descriptors */ 2077 u_int n; 2078 struct wrqe *wr; 2079 struct fw_eth_tx_pkt_wr *dst; /* any fw WR struct will do */ 2080 2081 EQ_LOCK_ASSERT_OWNED(eq); 2082 MPASS(TAILQ_EMPTY(&wrq->incomplete_wrs)); 2083 wr = STAILQ_FIRST(&wrq->wr_list); 2084 MPASS(wr != NULL); /* Must be called with something useful to do */ 2085 MPASS(eq->pidx == eq->dbidx); 2086 dbdiff = 0; 2087 2088 do { 2089 eq->cidx = read_hw_cidx(eq); 2090 if (eq->pidx == eq->cidx) 2091 available = eq->sidx - 1; 2092 else 2093 available = IDXDIFF(eq->cidx, eq->pidx, eq->sidx) - 1; 2094 2095 MPASS(wr->wrq == wrq); 2096 n = howmany(wr->wr_len, EQ_ESIZE); 2097 if (available < n) 2098 break; 2099 2100 dst = (void *)&eq->desc[eq->pidx]; 2101 if (__predict_true(eq->sidx - eq->pidx > n)) { 2102 /* Won't wrap, won't end exactly at the status page. */ 2103 bcopy(&wr->wr[0], dst, wr->wr_len); 2104 eq->pidx += n; 2105 } else { 2106 int first_portion = (eq->sidx - eq->pidx) * EQ_ESIZE; 2107 2108 bcopy(&wr->wr[0], dst, first_portion); 2109 if (wr->wr_len > first_portion) { 2110 bcopy(&wr->wr[first_portion], &eq->desc[0], 2111 wr->wr_len - first_portion); 2112 } 2113 eq->pidx = n - (eq->sidx - eq->pidx); 2114 } 2115 wrq->tx_wrs_copied++; 2116 2117 if (available < eq->sidx / 4 && 2118 atomic_cmpset_int(&eq->equiq, 0, 1)) { 2119 /* 2120 * XXX: This is not 100% reliable with some 2121 * types of WRs. But this is a very unusual 2122 * situation for an ofld/ctrl queue anyway. 2123 */ 2124 dst->equiq_to_len16 |= htobe32(F_FW_WR_EQUIQ | 2125 F_FW_WR_EQUEQ); 2126 } 2127 2128 dbdiff += n; 2129 if (dbdiff >= 16) { 2130 ring_eq_db(sc, eq, dbdiff); 2131 dbdiff = 0; 2132 } 2133 2134 STAILQ_REMOVE_HEAD(&wrq->wr_list, link); 2135 free_wrqe(wr); 2136 MPASS(wrq->nwr_pending > 0); 2137 wrq->nwr_pending--; 2138 MPASS(wrq->ndesc_needed >= n); 2139 wrq->ndesc_needed -= n; 2140 } while ((wr = STAILQ_FIRST(&wrq->wr_list)) != NULL); 2141 2142 if (dbdiff) 2143 ring_eq_db(sc, eq, dbdiff); 2144 } 2145 2146 /* 2147 * Doesn't fail. Holds on to work requests it can't send right away. 2148 */ 2149 void 2150 t4_wrq_tx_locked(struct adapter *sc, struct sge_wrq *wrq, struct wrqe *wr) 2151 { 2152 #ifdef INVARIANTS 2153 struct sge_eq *eq = &wrq->eq; 2154 #endif 2155 2156 EQ_LOCK_ASSERT_OWNED(eq); 2157 MPASS(wr != NULL); 2158 MPASS(wr->wr_len > 0 && wr->wr_len <= SGE_MAX_WR_LEN); 2159 MPASS((wr->wr_len & 0x7) == 0); 2160 2161 STAILQ_INSERT_TAIL(&wrq->wr_list, wr, link); 2162 wrq->nwr_pending++; 2163 wrq->ndesc_needed += howmany(wr->wr_len, EQ_ESIZE); 2164 2165 if (!TAILQ_EMPTY(&wrq->incomplete_wrs)) 2166 return; /* commit_wrq_wr will drain wr_list as well. */ 2167 2168 drain_wrq_wr_list(sc, wrq); 2169 2170 /* Doorbell must have caught up to the pidx. */ 2171 MPASS(eq->pidx == eq->dbidx); 2172 } 2173 2174 void 2175 t4_update_fl_bufsize(struct ifnet *ifp) 2176 { 2177 struct vi_info *vi = ifp->if_softc; 2178 struct adapter *sc = vi->pi->adapter; 2179 struct sge_rxq *rxq; 2180 #ifdef TCP_OFFLOAD 2181 struct sge_ofld_rxq *ofld_rxq; 2182 #endif 2183 struct sge_fl *fl; 2184 int i, maxp, mtu = ifp->if_mtu; 2185 2186 maxp = mtu_to_max_payload(sc, mtu, 0); 2187 for_each_rxq(vi, i, rxq) { 2188 fl = &rxq->fl; 2189 2190 FL_LOCK(fl); 2191 find_best_refill_source(sc, fl, maxp); 2192 FL_UNLOCK(fl); 2193 } 2194 #ifdef TCP_OFFLOAD 2195 maxp = mtu_to_max_payload(sc, mtu, 1); 2196 for_each_ofld_rxq(vi, i, ofld_rxq) { 2197 fl = &ofld_rxq->fl; 2198 2199 FL_LOCK(fl); 2200 find_best_refill_source(sc, fl, maxp); 2201 FL_UNLOCK(fl); 2202 } 2203 #endif 2204 } 2205 2206 static inline int 2207 mbuf_nsegs(struct mbuf *m) 2208 { 2209 2210 M_ASSERTPKTHDR(m); 2211 KASSERT(m->m_pkthdr.l5hlen > 0, 2212 ("%s: mbuf %p missing information on # of segments.", __func__, m)); 2213 2214 return (m->m_pkthdr.l5hlen); 2215 } 2216 2217 static inline void 2218 set_mbuf_nsegs(struct mbuf *m, uint8_t nsegs) 2219 { 2220 2221 M_ASSERTPKTHDR(m); 2222 m->m_pkthdr.l5hlen = nsegs; 2223 } 2224 2225 static inline int 2226 mbuf_cflags(struct mbuf *m) 2227 { 2228 2229 M_ASSERTPKTHDR(m); 2230 return (m->m_pkthdr.PH_loc.eight[4]); 2231 } 2232 2233 static inline void 2234 set_mbuf_cflags(struct mbuf *m, uint8_t flags) 2235 { 2236 2237 M_ASSERTPKTHDR(m); 2238 m->m_pkthdr.PH_loc.eight[4] = flags; 2239 } 2240 2241 static inline int 2242 mbuf_len16(struct mbuf *m) 2243 { 2244 int n; 2245 2246 M_ASSERTPKTHDR(m); 2247 n = m->m_pkthdr.PH_loc.eight[0]; 2248 MPASS(n > 0 && n <= SGE_MAX_WR_LEN / 16); 2249 2250 return (n); 2251 } 2252 2253 static inline void 2254 set_mbuf_len16(struct mbuf *m, uint8_t len16) 2255 { 2256 2257 M_ASSERTPKTHDR(m); 2258 m->m_pkthdr.PH_loc.eight[0] = len16; 2259 } 2260 2261 #ifdef RATELIMIT 2262 static inline int 2263 mbuf_eo_nsegs(struct mbuf *m) 2264 { 2265 2266 M_ASSERTPKTHDR(m); 2267 return (m->m_pkthdr.PH_loc.eight[1]); 2268 } 2269 2270 static inline void 2271 set_mbuf_eo_nsegs(struct mbuf *m, uint8_t nsegs) 2272 { 2273 2274 M_ASSERTPKTHDR(m); 2275 m->m_pkthdr.PH_loc.eight[1] = nsegs; 2276 } 2277 2278 static inline int 2279 mbuf_eo_len16(struct mbuf *m) 2280 { 2281 int n; 2282 2283 M_ASSERTPKTHDR(m); 2284 n = m->m_pkthdr.PH_loc.eight[2]; 2285 MPASS(n > 0 && n <= SGE_MAX_WR_LEN / 16); 2286 2287 return (n); 2288 } 2289 2290 static inline void 2291 set_mbuf_eo_len16(struct mbuf *m, uint8_t len16) 2292 { 2293 2294 M_ASSERTPKTHDR(m); 2295 m->m_pkthdr.PH_loc.eight[2] = len16; 2296 } 2297 2298 static inline int 2299 mbuf_eo_tsclk_tsoff(struct mbuf *m) 2300 { 2301 2302 M_ASSERTPKTHDR(m); 2303 return (m->m_pkthdr.PH_loc.eight[3]); 2304 } 2305 2306 static inline void 2307 set_mbuf_eo_tsclk_tsoff(struct mbuf *m, uint8_t tsclk_tsoff) 2308 { 2309 2310 M_ASSERTPKTHDR(m); 2311 m->m_pkthdr.PH_loc.eight[3] = tsclk_tsoff; 2312 } 2313 2314 static inline int 2315 needs_eo(struct mbuf *m) 2316 { 2317 2318 return (m->m_pkthdr.snd_tag != NULL); 2319 } 2320 #endif 2321 2322 /* 2323 * Try to allocate an mbuf to contain a raw work request. To make it 2324 * easy to construct the work request, don't allocate a chain but a 2325 * single mbuf. 2326 */ 2327 struct mbuf * 2328 alloc_wr_mbuf(int len, int how) 2329 { 2330 struct mbuf *m; 2331 2332 if (len <= MHLEN) 2333 m = m_gethdr(how, MT_DATA); 2334 else if (len <= MCLBYTES) 2335 m = m_getcl(how, MT_DATA, M_PKTHDR); 2336 else 2337 m = NULL; 2338 if (m == NULL) 2339 return (NULL); 2340 m->m_pkthdr.len = len; 2341 m->m_len = len; 2342 set_mbuf_cflags(m, MC_RAW_WR); 2343 set_mbuf_len16(m, howmany(len, 16)); 2344 return (m); 2345 } 2346 2347 static inline int 2348 needs_tso(struct mbuf *m) 2349 { 2350 2351 M_ASSERTPKTHDR(m); 2352 2353 return (m->m_pkthdr.csum_flags & CSUM_TSO); 2354 } 2355 2356 static inline int 2357 needs_l3_csum(struct mbuf *m) 2358 { 2359 2360 M_ASSERTPKTHDR(m); 2361 2362 return (m->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TSO)); 2363 } 2364 2365 static inline int 2366 needs_l4_csum(struct mbuf *m) 2367 { 2368 2369 M_ASSERTPKTHDR(m); 2370 2371 return (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP | CSUM_UDP_IPV6 | 2372 CSUM_TCP_IPV6 | CSUM_TSO)); 2373 } 2374 2375 static inline int 2376 needs_tcp_csum(struct mbuf *m) 2377 { 2378 2379 M_ASSERTPKTHDR(m); 2380 return (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_TCP_IPV6 | CSUM_TSO)); 2381 } 2382 2383 #ifdef RATELIMIT 2384 static inline int 2385 needs_udp_csum(struct mbuf *m) 2386 { 2387 2388 M_ASSERTPKTHDR(m); 2389 return (m->m_pkthdr.csum_flags & (CSUM_UDP | CSUM_UDP_IPV6)); 2390 } 2391 #endif 2392 2393 static inline int 2394 needs_vlan_insertion(struct mbuf *m) 2395 { 2396 2397 M_ASSERTPKTHDR(m); 2398 2399 return (m->m_flags & M_VLANTAG); 2400 } 2401 2402 static void * 2403 m_advance(struct mbuf **pm, int *poffset, int len) 2404 { 2405 struct mbuf *m = *pm; 2406 int offset = *poffset; 2407 uintptr_t p = 0; 2408 2409 MPASS(len > 0); 2410 2411 for (;;) { 2412 if (offset + len < m->m_len) { 2413 offset += len; 2414 p = mtod(m, uintptr_t) + offset; 2415 break; 2416 } 2417 len -= m->m_len - offset; 2418 m = m->m_next; 2419 offset = 0; 2420 MPASS(m != NULL); 2421 } 2422 *poffset = offset; 2423 *pm = m; 2424 return ((void *)p); 2425 } 2426 2427 /* 2428 * Can deal with empty mbufs in the chain that have m_len = 0, but the chain 2429 * must have at least one mbuf that's not empty. It is possible for this 2430 * routine to return 0 if skip accounts for all the contents of the mbuf chain. 2431 */ 2432 static inline int 2433 count_mbuf_nsegs(struct mbuf *m, int skip) 2434 { 2435 vm_paddr_t lastb, next; 2436 vm_offset_t va; 2437 int len, nsegs; 2438 2439 M_ASSERTPKTHDR(m); 2440 MPASS(m->m_pkthdr.len > 0); 2441 MPASS(m->m_pkthdr.len >= skip); 2442 2443 nsegs = 0; 2444 lastb = 0; 2445 for (; m; m = m->m_next) { 2446 2447 len = m->m_len; 2448 if (__predict_false(len == 0)) 2449 continue; 2450 if (skip >= len) { 2451 skip -= len; 2452 continue; 2453 } 2454 va = mtod(m, vm_offset_t) + skip; 2455 len -= skip; 2456 skip = 0; 2457 next = pmap_kextract(va); 2458 nsegs += sglist_count((void *)(uintptr_t)va, len); 2459 if (lastb + 1 == next) 2460 nsegs--; 2461 lastb = pmap_kextract(va + len - 1); 2462 } 2463 2464 return (nsegs); 2465 } 2466 2467 /* 2468 * Analyze the mbuf to determine its tx needs. The mbuf passed in may change: 2469 * a) caller can assume it's been freed if this function returns with an error. 2470 * b) it may get defragged up if the gather list is too long for the hardware. 2471 */ 2472 int 2473 parse_pkt(struct adapter *sc, struct mbuf **mp) 2474 { 2475 struct mbuf *m0 = *mp, *m; 2476 int rc, nsegs, defragged = 0, offset; 2477 struct ether_header *eh; 2478 void *l3hdr; 2479 #if defined(INET) || defined(INET6) 2480 struct tcphdr *tcp; 2481 #endif 2482 uint16_t eh_type; 2483 2484 M_ASSERTPKTHDR(m0); 2485 if (__predict_false(m0->m_pkthdr.len < ETHER_HDR_LEN)) { 2486 rc = EINVAL; 2487 fail: 2488 m_freem(m0); 2489 *mp = NULL; 2490 return (rc); 2491 } 2492 restart: 2493 /* 2494 * First count the number of gather list segments in the payload. 2495 * Defrag the mbuf if nsegs exceeds the hardware limit. 2496 */ 2497 M_ASSERTPKTHDR(m0); 2498 MPASS(m0->m_pkthdr.len > 0); 2499 nsegs = count_mbuf_nsegs(m0, 0); 2500 if (nsegs > (needs_tso(m0) ? TX_SGL_SEGS_TSO : TX_SGL_SEGS)) { 2501 if (defragged++ > 0 || (m = m_defrag(m0, M_NOWAIT)) == NULL) { 2502 rc = EFBIG; 2503 goto fail; 2504 } 2505 *mp = m0 = m; /* update caller's copy after defrag */ 2506 goto restart; 2507 } 2508 2509 if (__predict_false(nsegs > 2 && m0->m_pkthdr.len <= MHLEN)) { 2510 m0 = m_pullup(m0, m0->m_pkthdr.len); 2511 if (m0 == NULL) { 2512 /* Should have left well enough alone. */ 2513 rc = EFBIG; 2514 goto fail; 2515 } 2516 *mp = m0; /* update caller's copy after pullup */ 2517 goto restart; 2518 } 2519 set_mbuf_nsegs(m0, nsegs); 2520 set_mbuf_cflags(m0, 0); 2521 if (sc->flags & IS_VF) 2522 set_mbuf_len16(m0, txpkt_vm_len16(nsegs, needs_tso(m0))); 2523 else 2524 set_mbuf_len16(m0, txpkt_len16(nsegs, needs_tso(m0))); 2525 2526 #ifdef RATELIMIT 2527 /* 2528 * Ethofld is limited to TCP and UDP for now, and only when L4 hw 2529 * checksumming is enabled. needs_l4_csum happens to check for all the 2530 * right things. 2531 */ 2532 if (__predict_false(needs_eo(m0) && !needs_l4_csum(m0))) 2533 m0->m_pkthdr.snd_tag = NULL; 2534 #endif 2535 2536 if (!needs_tso(m0) && 2537 #ifdef RATELIMIT 2538 !needs_eo(m0) && 2539 #endif 2540 !(sc->flags & IS_VF && (needs_l3_csum(m0) || needs_l4_csum(m0)))) 2541 return (0); 2542 2543 m = m0; 2544 eh = mtod(m, struct ether_header *); 2545 eh_type = ntohs(eh->ether_type); 2546 if (eh_type == ETHERTYPE_VLAN) { 2547 struct ether_vlan_header *evh = (void *)eh; 2548 2549 eh_type = ntohs(evh->evl_proto); 2550 m0->m_pkthdr.l2hlen = sizeof(*evh); 2551 } else 2552 m0->m_pkthdr.l2hlen = sizeof(*eh); 2553 2554 offset = 0; 2555 l3hdr = m_advance(&m, &offset, m0->m_pkthdr.l2hlen); 2556 2557 switch (eh_type) { 2558 #ifdef INET6 2559 case ETHERTYPE_IPV6: 2560 { 2561 struct ip6_hdr *ip6 = l3hdr; 2562 2563 MPASS(!needs_tso(m0) || ip6->ip6_nxt == IPPROTO_TCP); 2564 2565 m0->m_pkthdr.l3hlen = sizeof(*ip6); 2566 break; 2567 } 2568 #endif 2569 #ifdef INET 2570 case ETHERTYPE_IP: 2571 { 2572 struct ip *ip = l3hdr; 2573 2574 m0->m_pkthdr.l3hlen = ip->ip_hl * 4; 2575 break; 2576 } 2577 #endif 2578 default: 2579 panic("%s: ethertype 0x%04x unknown. if_cxgbe must be compiled" 2580 " with the same INET/INET6 options as the kernel.", 2581 __func__, eh_type); 2582 } 2583 2584 #if defined(INET) || defined(INET6) 2585 if (needs_tcp_csum(m0)) { 2586 tcp = m_advance(&m, &offset, m0->m_pkthdr.l3hlen); 2587 m0->m_pkthdr.l4hlen = tcp->th_off * 4; 2588 #ifdef RATELIMIT 2589 if (tsclk >= 0 && *(uint32_t *)(tcp + 1) == ntohl(0x0101080a)) { 2590 set_mbuf_eo_tsclk_tsoff(m0, 2591 V_FW_ETH_TX_EO_WR_TSCLK(tsclk) | 2592 V_FW_ETH_TX_EO_WR_TSOFF(sizeof(*tcp) / 2 + 1)); 2593 } else 2594 set_mbuf_eo_tsclk_tsoff(m0, 0); 2595 } else if (needs_udp_csum(m)) { 2596 m0->m_pkthdr.l4hlen = sizeof(struct udphdr); 2597 #endif 2598 } 2599 #ifdef RATELIMIT 2600 if (needs_eo(m0)) { 2601 u_int immhdrs; 2602 2603 /* EO WRs have the headers in the WR and not the GL. */ 2604 immhdrs = m0->m_pkthdr.l2hlen + m0->m_pkthdr.l3hlen + 2605 m0->m_pkthdr.l4hlen; 2606 nsegs = count_mbuf_nsegs(m0, immhdrs); 2607 set_mbuf_eo_nsegs(m0, nsegs); 2608 set_mbuf_eo_len16(m0, 2609 txpkt_eo_len16(nsegs, immhdrs, needs_tso(m0))); 2610 } 2611 #endif 2612 #endif 2613 MPASS(m0 == *mp); 2614 return (0); 2615 } 2616 2617 void * 2618 start_wrq_wr(struct sge_wrq *wrq, int len16, struct wrq_cookie *cookie) 2619 { 2620 struct sge_eq *eq = &wrq->eq; 2621 struct adapter *sc = wrq->adapter; 2622 int ndesc, available; 2623 struct wrqe *wr; 2624 void *w; 2625 2626 MPASS(len16 > 0); 2627 ndesc = howmany(len16, EQ_ESIZE / 16); 2628 MPASS(ndesc > 0 && ndesc <= SGE_MAX_WR_NDESC); 2629 2630 EQ_LOCK(eq); 2631 2632 if (TAILQ_EMPTY(&wrq->incomplete_wrs) && !STAILQ_EMPTY(&wrq->wr_list)) 2633 drain_wrq_wr_list(sc, wrq); 2634 2635 if (!STAILQ_EMPTY(&wrq->wr_list)) { 2636 slowpath: 2637 EQ_UNLOCK(eq); 2638 wr = alloc_wrqe(len16 * 16, wrq); 2639 if (__predict_false(wr == NULL)) 2640 return (NULL); 2641 cookie->pidx = -1; 2642 cookie->ndesc = ndesc; 2643 return (&wr->wr); 2644 } 2645 2646 eq->cidx = read_hw_cidx(eq); 2647 if (eq->pidx == eq->cidx) 2648 available = eq->sidx - 1; 2649 else 2650 available = IDXDIFF(eq->cidx, eq->pidx, eq->sidx) - 1; 2651 if (available < ndesc) 2652 goto slowpath; 2653 2654 cookie->pidx = eq->pidx; 2655 cookie->ndesc = ndesc; 2656 TAILQ_INSERT_TAIL(&wrq->incomplete_wrs, cookie, link); 2657 2658 w = &eq->desc[eq->pidx]; 2659 IDXINCR(eq->pidx, ndesc, eq->sidx); 2660 if (__predict_false(cookie->pidx + ndesc > eq->sidx)) { 2661 w = &wrq->ss[0]; 2662 wrq->ss_pidx = cookie->pidx; 2663 wrq->ss_len = len16 * 16; 2664 } 2665 2666 EQ_UNLOCK(eq); 2667 2668 return (w); 2669 } 2670 2671 void 2672 commit_wrq_wr(struct sge_wrq *wrq, void *w, struct wrq_cookie *cookie) 2673 { 2674 struct sge_eq *eq = &wrq->eq; 2675 struct adapter *sc = wrq->adapter; 2676 int ndesc, pidx; 2677 struct wrq_cookie *prev, *next; 2678 2679 if (cookie->pidx == -1) { 2680 struct wrqe *wr = __containerof(w, struct wrqe, wr); 2681 2682 t4_wrq_tx(sc, wr); 2683 return; 2684 } 2685 2686 if (__predict_false(w == &wrq->ss[0])) { 2687 int n = (eq->sidx - wrq->ss_pidx) * EQ_ESIZE; 2688 2689 MPASS(wrq->ss_len > n); /* WR had better wrap around. */ 2690 bcopy(&wrq->ss[0], &eq->desc[wrq->ss_pidx], n); 2691 bcopy(&wrq->ss[n], &eq->desc[0], wrq->ss_len - n); 2692 wrq->tx_wrs_ss++; 2693 } else 2694 wrq->tx_wrs_direct++; 2695 2696 EQ_LOCK(eq); 2697 ndesc = cookie->ndesc; /* Can be more than SGE_MAX_WR_NDESC here. */ 2698 pidx = cookie->pidx; 2699 MPASS(pidx >= 0 && pidx < eq->sidx); 2700 prev = TAILQ_PREV(cookie, wrq_incomplete_wrs, link); 2701 next = TAILQ_NEXT(cookie, link); 2702 if (prev == NULL) { 2703 MPASS(pidx == eq->dbidx); 2704 if (next == NULL || ndesc >= 16) { 2705 int available; 2706 struct fw_eth_tx_pkt_wr *dst; /* any fw WR struct will do */ 2707 2708 /* 2709 * Note that the WR via which we'll request tx updates 2710 * is at pidx and not eq->pidx, which has moved on 2711 * already. 2712 */ 2713 dst = (void *)&eq->desc[pidx]; 2714 available = IDXDIFF(eq->cidx, eq->pidx, eq->sidx) - 1; 2715 if (available < eq->sidx / 4 && 2716 atomic_cmpset_int(&eq->equiq, 0, 1)) { 2717 /* 2718 * XXX: This is not 100% reliable with some 2719 * types of WRs. But this is a very unusual 2720 * situation for an ofld/ctrl queue anyway. 2721 */ 2722 dst->equiq_to_len16 |= htobe32(F_FW_WR_EQUIQ | 2723 F_FW_WR_EQUEQ); 2724 } 2725 2726 ring_eq_db(wrq->adapter, eq, ndesc); 2727 } else { 2728 MPASS(IDXDIFF(next->pidx, pidx, eq->sidx) == ndesc); 2729 next->pidx = pidx; 2730 next->ndesc += ndesc; 2731 } 2732 } else { 2733 MPASS(IDXDIFF(pidx, prev->pidx, eq->sidx) == prev->ndesc); 2734 prev->ndesc += ndesc; 2735 } 2736 TAILQ_REMOVE(&wrq->incomplete_wrs, cookie, link); 2737 2738 if (TAILQ_EMPTY(&wrq->incomplete_wrs) && !STAILQ_EMPTY(&wrq->wr_list)) 2739 drain_wrq_wr_list(sc, wrq); 2740 2741 #ifdef INVARIANTS 2742 if (TAILQ_EMPTY(&wrq->incomplete_wrs)) { 2743 /* Doorbell must have caught up to the pidx. */ 2744 MPASS(wrq->eq.pidx == wrq->eq.dbidx); 2745 } 2746 #endif 2747 EQ_UNLOCK(eq); 2748 } 2749 2750 static u_int 2751 can_resume_eth_tx(struct mp_ring *r) 2752 { 2753 struct sge_eq *eq = r->cookie; 2754 2755 return (total_available_tx_desc(eq) > eq->sidx / 8); 2756 } 2757 2758 static inline int 2759 cannot_use_txpkts(struct mbuf *m) 2760 { 2761 /* maybe put a GL limit too, to avoid silliness? */ 2762 2763 return (needs_tso(m) || (mbuf_cflags(m) & MC_RAW_WR) != 0); 2764 } 2765 2766 static inline int 2767 discard_tx(struct sge_eq *eq) 2768 { 2769 2770 return ((eq->flags & (EQ_ENABLED | EQ_QFLUSH)) != EQ_ENABLED); 2771 } 2772 2773 static inline int 2774 wr_can_update_eq(struct fw_eth_tx_pkts_wr *wr) 2775 { 2776 2777 switch (G_FW_WR_OP(be32toh(wr->op_pkd))) { 2778 case FW_ULPTX_WR: 2779 case FW_ETH_TX_PKT_WR: 2780 case FW_ETH_TX_PKTS_WR: 2781 case FW_ETH_TX_PKT_VM_WR: 2782 return (1); 2783 default: 2784 return (0); 2785 } 2786 } 2787 2788 /* 2789 * r->items[cidx] to r->items[pidx], with a wraparound at r->size, are ready to 2790 * be consumed. Return the actual number consumed. 0 indicates a stall. 2791 */ 2792 static u_int 2793 eth_tx(struct mp_ring *r, u_int cidx, u_int pidx) 2794 { 2795 struct sge_txq *txq = r->cookie; 2796 struct sge_eq *eq = &txq->eq; 2797 struct ifnet *ifp = txq->ifp; 2798 struct vi_info *vi = ifp->if_softc; 2799 struct port_info *pi = vi->pi; 2800 struct adapter *sc = pi->adapter; 2801 u_int total, remaining; /* # of packets */ 2802 u_int available, dbdiff; /* # of hardware descriptors */ 2803 u_int n, next_cidx; 2804 struct mbuf *m0, *tail; 2805 struct txpkts txp; 2806 struct fw_eth_tx_pkts_wr *wr; /* any fw WR struct will do */ 2807 2808 remaining = IDXDIFF(pidx, cidx, r->size); 2809 MPASS(remaining > 0); /* Must not be called without work to do. */ 2810 total = 0; 2811 2812 TXQ_LOCK(txq); 2813 if (__predict_false(discard_tx(eq))) { 2814 while (cidx != pidx) { 2815 m0 = r->items[cidx]; 2816 m_freem(m0); 2817 if (++cidx == r->size) 2818 cidx = 0; 2819 } 2820 reclaim_tx_descs(txq, 2048); 2821 total = remaining; 2822 goto done; 2823 } 2824 2825 /* How many hardware descriptors do we have readily available. */ 2826 if (eq->pidx == eq->cidx) 2827 available = eq->sidx - 1; 2828 else 2829 available = IDXDIFF(eq->cidx, eq->pidx, eq->sidx) - 1; 2830 dbdiff = IDXDIFF(eq->pidx, eq->dbidx, eq->sidx); 2831 2832 while (remaining > 0) { 2833 2834 m0 = r->items[cidx]; 2835 M_ASSERTPKTHDR(m0); 2836 MPASS(m0->m_nextpkt == NULL); 2837 2838 if (available < SGE_MAX_WR_NDESC) { 2839 available += reclaim_tx_descs(txq, 64); 2840 if (available < howmany(mbuf_len16(m0), EQ_ESIZE / 16)) 2841 break; /* out of descriptors */ 2842 } 2843 2844 next_cidx = cidx + 1; 2845 if (__predict_false(next_cidx == r->size)) 2846 next_cidx = 0; 2847 2848 wr = (void *)&eq->desc[eq->pidx]; 2849 if (sc->flags & IS_VF) { 2850 total++; 2851 remaining--; 2852 ETHER_BPF_MTAP(ifp, m0); 2853 n = write_txpkt_vm_wr(sc, txq, (void *)wr, m0, 2854 available); 2855 } else if (remaining > 1 && 2856 try_txpkts(m0, r->items[next_cidx], &txp, available) == 0) { 2857 2858 /* pkts at cidx, next_cidx should both be in txp. */ 2859 MPASS(txp.npkt == 2); 2860 tail = r->items[next_cidx]; 2861 MPASS(tail->m_nextpkt == NULL); 2862 ETHER_BPF_MTAP(ifp, m0); 2863 ETHER_BPF_MTAP(ifp, tail); 2864 m0->m_nextpkt = tail; 2865 2866 if (__predict_false(++next_cidx == r->size)) 2867 next_cidx = 0; 2868 2869 while (next_cidx != pidx) { 2870 if (add_to_txpkts(r->items[next_cidx], &txp, 2871 available) != 0) 2872 break; 2873 tail->m_nextpkt = r->items[next_cidx]; 2874 tail = tail->m_nextpkt; 2875 ETHER_BPF_MTAP(ifp, tail); 2876 if (__predict_false(++next_cidx == r->size)) 2877 next_cidx = 0; 2878 } 2879 2880 n = write_txpkts_wr(txq, wr, m0, &txp, available); 2881 total += txp.npkt; 2882 remaining -= txp.npkt; 2883 } else if (mbuf_cflags(m0) & MC_RAW_WR) { 2884 total++; 2885 remaining--; 2886 n = write_raw_wr(txq, (void *)wr, m0, available); 2887 } else { 2888 total++; 2889 remaining--; 2890 ETHER_BPF_MTAP(ifp, m0); 2891 n = write_txpkt_wr(txq, (void *)wr, m0, available); 2892 } 2893 MPASS(n >= 1 && n <= available && n <= SGE_MAX_WR_NDESC); 2894 2895 available -= n; 2896 dbdiff += n; 2897 IDXINCR(eq->pidx, n, eq->sidx); 2898 2899 if (wr_can_update_eq(wr)) { 2900 if (total_available_tx_desc(eq) < eq->sidx / 4 && 2901 atomic_cmpset_int(&eq->equiq, 0, 1)) { 2902 wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUIQ | 2903 F_FW_WR_EQUEQ); 2904 eq->equeqidx = eq->pidx; 2905 } else if (IDXDIFF(eq->pidx, eq->equeqidx, eq->sidx) >= 2906 32) { 2907 wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ); 2908 eq->equeqidx = eq->pidx; 2909 } 2910 } 2911 2912 if (dbdiff >= 16 && remaining >= 4) { 2913 ring_eq_db(sc, eq, dbdiff); 2914 available += reclaim_tx_descs(txq, 4 * dbdiff); 2915 dbdiff = 0; 2916 } 2917 2918 cidx = next_cidx; 2919 } 2920 if (dbdiff != 0) { 2921 ring_eq_db(sc, eq, dbdiff); 2922 reclaim_tx_descs(txq, 32); 2923 } 2924 done: 2925 TXQ_UNLOCK(txq); 2926 2927 return (total); 2928 } 2929 2930 static inline void 2931 init_iq(struct sge_iq *iq, struct adapter *sc, int tmr_idx, int pktc_idx, 2932 int qsize) 2933 { 2934 2935 KASSERT(tmr_idx >= 0 && tmr_idx < SGE_NTIMERS, 2936 ("%s: bad tmr_idx %d", __func__, tmr_idx)); 2937 KASSERT(pktc_idx < SGE_NCOUNTERS, /* -ve is ok, means don't use */ 2938 ("%s: bad pktc_idx %d", __func__, pktc_idx)); 2939 2940 iq->flags = 0; 2941 iq->adapter = sc; 2942 iq->intr_params = V_QINTR_TIMER_IDX(tmr_idx); 2943 iq->intr_pktc_idx = SGE_NCOUNTERS - 1; 2944 if (pktc_idx >= 0) { 2945 iq->intr_params |= F_QINTR_CNT_EN; 2946 iq->intr_pktc_idx = pktc_idx; 2947 } 2948 iq->qsize = roundup2(qsize, 16); /* See FW_IQ_CMD/iqsize */ 2949 iq->sidx = iq->qsize - sc->params.sge.spg_len / IQ_ESIZE; 2950 } 2951 2952 static inline void 2953 init_fl(struct adapter *sc, struct sge_fl *fl, int qsize, int maxp, char *name) 2954 { 2955 2956 fl->qsize = qsize; 2957 fl->sidx = qsize - sc->params.sge.spg_len / EQ_ESIZE; 2958 strlcpy(fl->lockname, name, sizeof(fl->lockname)); 2959 if (sc->flags & BUF_PACKING_OK && 2960 ((!is_t4(sc) && buffer_packing) || /* T5+: enabled unless 0 */ 2961 (is_t4(sc) && buffer_packing == 1)))/* T4: disabled unless 1 */ 2962 fl->flags |= FL_BUF_PACKING; 2963 find_best_refill_source(sc, fl, maxp); 2964 find_safe_refill_source(sc, fl); 2965 } 2966 2967 static inline void 2968 init_eq(struct adapter *sc, struct sge_eq *eq, int eqtype, int qsize, 2969 uint8_t tx_chan, uint16_t iqid, char *name) 2970 { 2971 KASSERT(eqtype <= EQ_TYPEMASK, ("%s: bad qtype %d", __func__, eqtype)); 2972 2973 eq->flags = eqtype & EQ_TYPEMASK; 2974 eq->tx_chan = tx_chan; 2975 eq->iqid = iqid; 2976 eq->sidx = qsize - sc->params.sge.spg_len / EQ_ESIZE; 2977 strlcpy(eq->lockname, name, sizeof(eq->lockname)); 2978 } 2979 2980 static int 2981 alloc_ring(struct adapter *sc, size_t len, bus_dma_tag_t *tag, 2982 bus_dmamap_t *map, bus_addr_t *pa, void **va) 2983 { 2984 int rc; 2985 2986 rc = bus_dma_tag_create(sc->dmat, 512, 0, BUS_SPACE_MAXADDR, 2987 BUS_SPACE_MAXADDR, NULL, NULL, len, 1, len, 0, NULL, NULL, tag); 2988 if (rc != 0) { 2989 device_printf(sc->dev, "cannot allocate DMA tag: %d\n", rc); 2990 goto done; 2991 } 2992 2993 rc = bus_dmamem_alloc(*tag, va, 2994 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, map); 2995 if (rc != 0) { 2996 device_printf(sc->dev, "cannot allocate DMA memory: %d\n", rc); 2997 goto done; 2998 } 2999 3000 rc = bus_dmamap_load(*tag, *map, *va, len, oneseg_dma_callback, pa, 0); 3001 if (rc != 0) { 3002 device_printf(sc->dev, "cannot load DMA map: %d\n", rc); 3003 goto done; 3004 } 3005 done: 3006 if (rc) 3007 free_ring(sc, *tag, *map, *pa, *va); 3008 3009 return (rc); 3010 } 3011 3012 static int 3013 free_ring(struct adapter *sc, bus_dma_tag_t tag, bus_dmamap_t map, 3014 bus_addr_t pa, void *va) 3015 { 3016 if (pa) 3017 bus_dmamap_unload(tag, map); 3018 if (va) 3019 bus_dmamem_free(tag, va, map); 3020 if (tag) 3021 bus_dma_tag_destroy(tag); 3022 3023 return (0); 3024 } 3025 3026 /* 3027 * Allocates the ring for an ingress queue and an optional freelist. If the 3028 * freelist is specified it will be allocated and then associated with the 3029 * ingress queue. 3030 * 3031 * Returns errno on failure. Resources allocated up to that point may still be 3032 * allocated. Caller is responsible for cleanup in case this function fails. 3033 * 3034 * If the ingress queue will take interrupts directly then the intr_idx 3035 * specifies the vector, starting from 0. -1 means the interrupts for this 3036 * queue should be forwarded to the fwq. 3037 */ 3038 static int 3039 alloc_iq_fl(struct vi_info *vi, struct sge_iq *iq, struct sge_fl *fl, 3040 int intr_idx, int cong) 3041 { 3042 int rc, i, cntxt_id; 3043 size_t len; 3044 struct fw_iq_cmd c; 3045 struct port_info *pi = vi->pi; 3046 struct adapter *sc = iq->adapter; 3047 struct sge_params *sp = &sc->params.sge; 3048 __be32 v = 0; 3049 3050 len = iq->qsize * IQ_ESIZE; 3051 rc = alloc_ring(sc, len, &iq->desc_tag, &iq->desc_map, &iq->ba, 3052 (void **)&iq->desc); 3053 if (rc != 0) 3054 return (rc); 3055 3056 bzero(&c, sizeof(c)); 3057 c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST | 3058 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(sc->pf) | 3059 V_FW_IQ_CMD_VFN(0)); 3060 3061 c.alloc_to_len16 = htobe32(F_FW_IQ_CMD_ALLOC | F_FW_IQ_CMD_IQSTART | 3062 FW_LEN16(c)); 3063 3064 /* Special handling for firmware event queue */ 3065 if (iq == &sc->sge.fwq) 3066 v |= F_FW_IQ_CMD_IQASYNCH; 3067 3068 if (intr_idx < 0) { 3069 /* Forwarded interrupts, all headed to fwq */ 3070 v |= F_FW_IQ_CMD_IQANDST; 3071 v |= V_FW_IQ_CMD_IQANDSTINDEX(sc->sge.fwq.cntxt_id); 3072 } else { 3073 KASSERT(intr_idx < sc->intr_count, 3074 ("%s: invalid direct intr_idx %d", __func__, intr_idx)); 3075 v |= V_FW_IQ_CMD_IQANDSTINDEX(intr_idx); 3076 } 3077 3078 c.type_to_iqandstindex = htobe32(v | 3079 V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) | 3080 V_FW_IQ_CMD_VIID(vi->viid) | 3081 V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_INTERRUPT)); 3082 c.iqdroprss_to_iqesize = htobe16(V_FW_IQ_CMD_IQPCIECH(pi->tx_chan) | 3083 F_FW_IQ_CMD_IQGTSMODE | 3084 V_FW_IQ_CMD_IQINTCNTTHRESH(iq->intr_pktc_idx) | 3085 V_FW_IQ_CMD_IQESIZE(ilog2(IQ_ESIZE) - 4)); 3086 c.iqsize = htobe16(iq->qsize); 3087 c.iqaddr = htobe64(iq->ba); 3088 if (cong >= 0) 3089 c.iqns_to_fl0congen = htobe32(F_FW_IQ_CMD_IQFLINTCONGEN); 3090 3091 if (fl) { 3092 mtx_init(&fl->fl_lock, fl->lockname, NULL, MTX_DEF); 3093 3094 len = fl->qsize * EQ_ESIZE; 3095 rc = alloc_ring(sc, len, &fl->desc_tag, &fl->desc_map, 3096 &fl->ba, (void **)&fl->desc); 3097 if (rc) 3098 return (rc); 3099 3100 /* Allocate space for one software descriptor per buffer. */ 3101 rc = alloc_fl_sdesc(fl); 3102 if (rc != 0) { 3103 device_printf(sc->dev, 3104 "failed to setup fl software descriptors: %d\n", 3105 rc); 3106 return (rc); 3107 } 3108 3109 if (fl->flags & FL_BUF_PACKING) { 3110 fl->lowat = roundup2(sp->fl_starve_threshold2, 8); 3111 fl->buf_boundary = sp->pack_boundary; 3112 } else { 3113 fl->lowat = roundup2(sp->fl_starve_threshold, 8); 3114 fl->buf_boundary = 16; 3115 } 3116 if (fl_pad && fl->buf_boundary < sp->pad_boundary) 3117 fl->buf_boundary = sp->pad_boundary; 3118 3119 c.iqns_to_fl0congen |= 3120 htobe32(V_FW_IQ_CMD_FL0HOSTFCMODE(X_HOSTFCMODE_NONE) | 3121 F_FW_IQ_CMD_FL0FETCHRO | F_FW_IQ_CMD_FL0DATARO | 3122 (fl_pad ? F_FW_IQ_CMD_FL0PADEN : 0) | 3123 (fl->flags & FL_BUF_PACKING ? F_FW_IQ_CMD_FL0PACKEN : 3124 0)); 3125 if (cong >= 0) { 3126 c.iqns_to_fl0congen |= 3127 htobe32(V_FW_IQ_CMD_FL0CNGCHMAP(cong) | 3128 F_FW_IQ_CMD_FL0CONGCIF | 3129 F_FW_IQ_CMD_FL0CONGEN); 3130 } 3131 c.fl0dcaen_to_fl0cidxfthresh = 3132 htobe16(V_FW_IQ_CMD_FL0FBMIN(chip_id(sc) <= CHELSIO_T5 ? 3133 X_FETCHBURSTMIN_128B : X_FETCHBURSTMIN_64B) | 3134 V_FW_IQ_CMD_FL0FBMAX(chip_id(sc) <= CHELSIO_T5 ? 3135 X_FETCHBURSTMAX_512B : X_FETCHBURSTMAX_256B)); 3136 c.fl0size = htobe16(fl->qsize); 3137 c.fl0addr = htobe64(fl->ba); 3138 } 3139 3140 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); 3141 if (rc != 0) { 3142 device_printf(sc->dev, 3143 "failed to create ingress queue: %d\n", rc); 3144 return (rc); 3145 } 3146 3147 iq->cidx = 0; 3148 iq->gen = F_RSPD_GEN; 3149 iq->intr_next = iq->intr_params; 3150 iq->cntxt_id = be16toh(c.iqid); 3151 iq->abs_id = be16toh(c.physiqid); 3152 iq->flags |= IQ_ALLOCATED; 3153 3154 cntxt_id = iq->cntxt_id - sc->sge.iq_start; 3155 if (cntxt_id >= sc->sge.niq) { 3156 panic ("%s: iq->cntxt_id (%d) more than the max (%d)", __func__, 3157 cntxt_id, sc->sge.niq - 1); 3158 } 3159 sc->sge.iqmap[cntxt_id] = iq; 3160 3161 if (fl) { 3162 u_int qid; 3163 3164 iq->flags |= IQ_HAS_FL; 3165 fl->cntxt_id = be16toh(c.fl0id); 3166 fl->pidx = fl->cidx = 0; 3167 3168 cntxt_id = fl->cntxt_id - sc->sge.eq_start; 3169 if (cntxt_id >= sc->sge.neq) { 3170 panic("%s: fl->cntxt_id (%d) more than the max (%d)", 3171 __func__, cntxt_id, sc->sge.neq - 1); 3172 } 3173 sc->sge.eqmap[cntxt_id] = (void *)fl; 3174 3175 qid = fl->cntxt_id; 3176 if (isset(&sc->doorbells, DOORBELL_UDB)) { 3177 uint32_t s_qpp = sc->params.sge.eq_s_qpp; 3178 uint32_t mask = (1 << s_qpp) - 1; 3179 volatile uint8_t *udb; 3180 3181 udb = sc->udbs_base + UDBS_DB_OFFSET; 3182 udb += (qid >> s_qpp) << PAGE_SHIFT; 3183 qid &= mask; 3184 if (qid < PAGE_SIZE / UDBS_SEG_SIZE) { 3185 udb += qid << UDBS_SEG_SHIFT; 3186 qid = 0; 3187 } 3188 fl->udb = (volatile void *)udb; 3189 } 3190 fl->dbval = V_QID(qid) | sc->chip_params->sge_fl_db; 3191 3192 FL_LOCK(fl); 3193 /* Enough to make sure the SGE doesn't think it's starved */ 3194 refill_fl(sc, fl, fl->lowat); 3195 FL_UNLOCK(fl); 3196 } 3197 3198 if (chip_id(sc) >= CHELSIO_T5 && !(sc->flags & IS_VF) && cong >= 0) { 3199 uint32_t param, val; 3200 3201 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | 3202 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) | 3203 V_FW_PARAMS_PARAM_YZ(iq->cntxt_id); 3204 if (cong == 0) 3205 val = 1 << 19; 3206 else { 3207 val = 2 << 19; 3208 for (i = 0; i < 4; i++) { 3209 if (cong & (1 << i)) 3210 val |= 1 << (i << 2); 3211 } 3212 } 3213 3214 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 3215 if (rc != 0) { 3216 /* report error but carry on */ 3217 device_printf(sc->dev, 3218 "failed to set congestion manager context for " 3219 "ingress queue %d: %d\n", iq->cntxt_id, rc); 3220 } 3221 } 3222 3223 /* Enable IQ interrupts */ 3224 atomic_store_rel_int(&iq->state, IQS_IDLE); 3225 t4_write_reg(sc, sc->sge_gts_reg, V_SEINTARM(iq->intr_params) | 3226 V_INGRESSQID(iq->cntxt_id)); 3227 3228 return (0); 3229 } 3230 3231 static int 3232 free_iq_fl(struct vi_info *vi, struct sge_iq *iq, struct sge_fl *fl) 3233 { 3234 int rc; 3235 struct adapter *sc = iq->adapter; 3236 device_t dev; 3237 3238 if (sc == NULL) 3239 return (0); /* nothing to do */ 3240 3241 dev = vi ? vi->dev : sc->dev; 3242 3243 if (iq->flags & IQ_ALLOCATED) { 3244 rc = -t4_iq_free(sc, sc->mbox, sc->pf, 0, 3245 FW_IQ_TYPE_FL_INT_CAP, iq->cntxt_id, 3246 fl ? fl->cntxt_id : 0xffff, 0xffff); 3247 if (rc != 0) { 3248 device_printf(dev, 3249 "failed to free queue %p: %d\n", iq, rc); 3250 return (rc); 3251 } 3252 iq->flags &= ~IQ_ALLOCATED; 3253 } 3254 3255 free_ring(sc, iq->desc_tag, iq->desc_map, iq->ba, iq->desc); 3256 3257 bzero(iq, sizeof(*iq)); 3258 3259 if (fl) { 3260 free_ring(sc, fl->desc_tag, fl->desc_map, fl->ba, 3261 fl->desc); 3262 3263 if (fl->sdesc) 3264 free_fl_sdesc(sc, fl); 3265 3266 if (mtx_initialized(&fl->fl_lock)) 3267 mtx_destroy(&fl->fl_lock); 3268 3269 bzero(fl, sizeof(*fl)); 3270 } 3271 3272 return (0); 3273 } 3274 3275 static void 3276 add_iq_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *oid, 3277 struct sge_iq *iq) 3278 { 3279 struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); 3280 3281 SYSCTL_ADD_UAUTO(ctx, children, OID_AUTO, "ba", CTLFLAG_RD, &iq->ba, 3282 "bus address of descriptor ring"); 3283 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "dmalen", CTLFLAG_RD, NULL, 3284 iq->qsize * IQ_ESIZE, "descriptor ring size in bytes"); 3285 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "abs_id", 3286 CTLTYPE_INT | CTLFLAG_RD, &iq->abs_id, 0, sysctl_uint16, "I", 3287 "absolute id of the queue"); 3288 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cntxt_id", 3289 CTLTYPE_INT | CTLFLAG_RD, &iq->cntxt_id, 0, sysctl_uint16, "I", 3290 "SGE context id of the queue"); 3291 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cidx", 3292 CTLTYPE_INT | CTLFLAG_RD, &iq->cidx, 0, sysctl_uint16, "I", 3293 "consumer index"); 3294 } 3295 3296 static void 3297 add_fl_sysctls(struct adapter *sc, struct sysctl_ctx_list *ctx, 3298 struct sysctl_oid *oid, struct sge_fl *fl) 3299 { 3300 struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); 3301 3302 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "fl", CTLFLAG_RD, NULL, 3303 "freelist"); 3304 children = SYSCTL_CHILDREN(oid); 3305 3306 SYSCTL_ADD_UAUTO(ctx, children, OID_AUTO, "ba", CTLFLAG_RD, 3307 &fl->ba, "bus address of descriptor ring"); 3308 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "dmalen", CTLFLAG_RD, NULL, 3309 fl->sidx * EQ_ESIZE + sc->params.sge.spg_len, 3310 "desc ring size in bytes"); 3311 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cntxt_id", 3312 CTLTYPE_INT | CTLFLAG_RD, &fl->cntxt_id, 0, sysctl_uint16, "I", 3313 "SGE context id of the freelist"); 3314 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "padding", CTLFLAG_RD, NULL, 3315 fl_pad ? 1 : 0, "padding enabled"); 3316 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "packing", CTLFLAG_RD, NULL, 3317 fl->flags & FL_BUF_PACKING ? 1 : 0, "packing enabled"); 3318 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cidx", CTLFLAG_RD, &fl->cidx, 3319 0, "consumer index"); 3320 if (fl->flags & FL_BUF_PACKING) { 3321 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rx_offset", 3322 CTLFLAG_RD, &fl->rx_offset, 0, "packing rx offset"); 3323 } 3324 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "pidx", CTLFLAG_RD, &fl->pidx, 3325 0, "producer index"); 3326 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "mbuf_allocated", 3327 CTLFLAG_RD, &fl->mbuf_allocated, "# of mbuf allocated"); 3328 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "mbuf_inlined", 3329 CTLFLAG_RD, &fl->mbuf_inlined, "# of mbuf inlined in clusters"); 3330 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "cluster_allocated", 3331 CTLFLAG_RD, &fl->cl_allocated, "# of clusters allocated"); 3332 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "cluster_recycled", 3333 CTLFLAG_RD, &fl->cl_recycled, "# of clusters recycled"); 3334 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "cluster_fast_recycled", 3335 CTLFLAG_RD, &fl->cl_fast_recycled, "# of clusters recycled (fast)"); 3336 } 3337 3338 static int 3339 alloc_fwq(struct adapter *sc) 3340 { 3341 int rc, intr_idx; 3342 struct sge_iq *fwq = &sc->sge.fwq; 3343 struct sysctl_oid *oid = device_get_sysctl_tree(sc->dev); 3344 struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); 3345 3346 init_iq(fwq, sc, 0, 0, FW_IQ_QSIZE); 3347 if (sc->flags & IS_VF) 3348 intr_idx = 0; 3349 else 3350 intr_idx = sc->intr_count > 1 ? 1 : 0; 3351 rc = alloc_iq_fl(&sc->port[0]->vi[0], fwq, NULL, intr_idx, -1); 3352 if (rc != 0) { 3353 device_printf(sc->dev, 3354 "failed to create firmware event queue: %d\n", rc); 3355 return (rc); 3356 } 3357 3358 oid = SYSCTL_ADD_NODE(&sc->ctx, children, OID_AUTO, "fwq", CTLFLAG_RD, 3359 NULL, "firmware event queue"); 3360 add_iq_sysctls(&sc->ctx, oid, fwq); 3361 3362 return (0); 3363 } 3364 3365 static int 3366 free_fwq(struct adapter *sc) 3367 { 3368 return free_iq_fl(NULL, &sc->sge.fwq, NULL); 3369 } 3370 3371 static int 3372 alloc_ctrlq(struct adapter *sc, struct sge_wrq *ctrlq, int idx, 3373 struct sysctl_oid *oid) 3374 { 3375 int rc; 3376 char name[16]; 3377 struct sysctl_oid_list *children; 3378 3379 snprintf(name, sizeof(name), "%s ctrlq%d", device_get_nameunit(sc->dev), 3380 idx); 3381 init_eq(sc, &ctrlq->eq, EQ_CTRL, CTRL_EQ_QSIZE, sc->port[idx]->tx_chan, 3382 sc->sge.fwq.cntxt_id, name); 3383 3384 children = SYSCTL_CHILDREN(oid); 3385 snprintf(name, sizeof(name), "%d", idx); 3386 oid = SYSCTL_ADD_NODE(&sc->ctx, children, OID_AUTO, name, CTLFLAG_RD, 3387 NULL, "ctrl queue"); 3388 rc = alloc_wrq(sc, NULL, ctrlq, oid); 3389 3390 return (rc); 3391 } 3392 3393 int 3394 tnl_cong(struct port_info *pi, int drop) 3395 { 3396 3397 if (drop == -1) 3398 return (-1); 3399 else if (drop == 1) 3400 return (0); 3401 else 3402 return (pi->rx_e_chan_map); 3403 } 3404 3405 static int 3406 alloc_rxq(struct vi_info *vi, struct sge_rxq *rxq, int intr_idx, int idx, 3407 struct sysctl_oid *oid) 3408 { 3409 int rc; 3410 struct adapter *sc = vi->pi->adapter; 3411 struct sysctl_oid_list *children; 3412 char name[16]; 3413 3414 rc = alloc_iq_fl(vi, &rxq->iq, &rxq->fl, intr_idx, 3415 tnl_cong(vi->pi, cong_drop)); 3416 if (rc != 0) 3417 return (rc); 3418 3419 if (idx == 0) 3420 sc->sge.iq_base = rxq->iq.abs_id - rxq->iq.cntxt_id; 3421 else 3422 KASSERT(rxq->iq.cntxt_id + sc->sge.iq_base == rxq->iq.abs_id, 3423 ("iq_base mismatch")); 3424 KASSERT(sc->sge.iq_base == 0 || sc->flags & IS_VF, 3425 ("PF with non-zero iq_base")); 3426 3427 /* 3428 * The freelist is just barely above the starvation threshold right now, 3429 * fill it up a bit more. 3430 */ 3431 FL_LOCK(&rxq->fl); 3432 refill_fl(sc, &rxq->fl, 128); 3433 FL_UNLOCK(&rxq->fl); 3434 3435 #if defined(INET) || defined(INET6) 3436 rc = tcp_lro_init_args(&rxq->lro, vi->ifp, lro_entries, lro_mbufs); 3437 if (rc != 0) 3438 return (rc); 3439 MPASS(rxq->lro.ifp == vi->ifp); /* also indicates LRO init'ed */ 3440 3441 if (vi->ifp->if_capenable & IFCAP_LRO) 3442 rxq->iq.flags |= IQ_LRO_ENABLED; 3443 #endif 3444 if (vi->ifp->if_capenable & IFCAP_HWRXTSTMP) 3445 rxq->iq.flags |= IQ_RX_TIMESTAMP; 3446 rxq->ifp = vi->ifp; 3447 3448 children = SYSCTL_CHILDREN(oid); 3449 3450 snprintf(name, sizeof(name), "%d", idx); 3451 oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, name, CTLFLAG_RD, 3452 NULL, "rx queue"); 3453 children = SYSCTL_CHILDREN(oid); 3454 3455 add_iq_sysctls(&vi->ctx, oid, &rxq->iq); 3456 #if defined(INET) || defined(INET6) 3457 SYSCTL_ADD_U64(&vi->ctx, children, OID_AUTO, "lro_queued", CTLFLAG_RD, 3458 &rxq->lro.lro_queued, 0, NULL); 3459 SYSCTL_ADD_U64(&vi->ctx, children, OID_AUTO, "lro_flushed", CTLFLAG_RD, 3460 &rxq->lro.lro_flushed, 0, NULL); 3461 #endif 3462 SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "rxcsum", CTLFLAG_RD, 3463 &rxq->rxcsum, "# of times hardware assisted with checksum"); 3464 SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "vlan_extraction", 3465 CTLFLAG_RD, &rxq->vlan_extraction, 3466 "# of times hardware extracted 802.1Q tag"); 3467 3468 add_fl_sysctls(sc, &vi->ctx, oid, &rxq->fl); 3469 3470 return (rc); 3471 } 3472 3473 static int 3474 free_rxq(struct vi_info *vi, struct sge_rxq *rxq) 3475 { 3476 int rc; 3477 3478 #if defined(INET) || defined(INET6) 3479 if (rxq->lro.ifp) { 3480 tcp_lro_free(&rxq->lro); 3481 rxq->lro.ifp = NULL; 3482 } 3483 #endif 3484 3485 rc = free_iq_fl(vi, &rxq->iq, &rxq->fl); 3486 if (rc == 0) 3487 bzero(rxq, sizeof(*rxq)); 3488 3489 return (rc); 3490 } 3491 3492 #ifdef TCP_OFFLOAD 3493 static int 3494 alloc_ofld_rxq(struct vi_info *vi, struct sge_ofld_rxq *ofld_rxq, 3495 int intr_idx, int idx, struct sysctl_oid *oid) 3496 { 3497 struct port_info *pi = vi->pi; 3498 int rc; 3499 struct sysctl_oid_list *children; 3500 char name[16]; 3501 3502 rc = alloc_iq_fl(vi, &ofld_rxq->iq, &ofld_rxq->fl, intr_idx, 0); 3503 if (rc != 0) 3504 return (rc); 3505 3506 children = SYSCTL_CHILDREN(oid); 3507 3508 snprintf(name, sizeof(name), "%d", idx); 3509 oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, name, CTLFLAG_RD, 3510 NULL, "rx queue"); 3511 add_iq_sysctls(&vi->ctx, oid, &ofld_rxq->iq); 3512 add_fl_sysctls(pi->adapter, &vi->ctx, oid, &ofld_rxq->fl); 3513 3514 return (rc); 3515 } 3516 3517 static int 3518 free_ofld_rxq(struct vi_info *vi, struct sge_ofld_rxq *ofld_rxq) 3519 { 3520 int rc; 3521 3522 rc = free_iq_fl(vi, &ofld_rxq->iq, &ofld_rxq->fl); 3523 if (rc == 0) 3524 bzero(ofld_rxq, sizeof(*ofld_rxq)); 3525 3526 return (rc); 3527 } 3528 #endif 3529 3530 #ifdef DEV_NETMAP 3531 static int 3532 alloc_nm_rxq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq, int intr_idx, 3533 int idx, struct sysctl_oid *oid) 3534 { 3535 int rc; 3536 struct sysctl_oid_list *children; 3537 struct sysctl_ctx_list *ctx; 3538 char name[16]; 3539 size_t len; 3540 struct adapter *sc = vi->pi->adapter; 3541 struct netmap_adapter *na = NA(vi->ifp); 3542 3543 MPASS(na != NULL); 3544 3545 len = vi->qsize_rxq * IQ_ESIZE; 3546 rc = alloc_ring(sc, len, &nm_rxq->iq_desc_tag, &nm_rxq->iq_desc_map, 3547 &nm_rxq->iq_ba, (void **)&nm_rxq->iq_desc); 3548 if (rc != 0) 3549 return (rc); 3550 3551 len = na->num_rx_desc * EQ_ESIZE + sc->params.sge.spg_len; 3552 rc = alloc_ring(sc, len, &nm_rxq->fl_desc_tag, &nm_rxq->fl_desc_map, 3553 &nm_rxq->fl_ba, (void **)&nm_rxq->fl_desc); 3554 if (rc != 0) 3555 return (rc); 3556 3557 nm_rxq->vi = vi; 3558 nm_rxq->nid = idx; 3559 nm_rxq->iq_cidx = 0; 3560 nm_rxq->iq_sidx = vi->qsize_rxq - sc->params.sge.spg_len / IQ_ESIZE; 3561 nm_rxq->iq_gen = F_RSPD_GEN; 3562 nm_rxq->fl_pidx = nm_rxq->fl_cidx = 0; 3563 nm_rxq->fl_sidx = na->num_rx_desc; 3564 nm_rxq->intr_idx = intr_idx; 3565 nm_rxq->iq_cntxt_id = INVALID_NM_RXQ_CNTXT_ID; 3566 3567 ctx = &vi->ctx; 3568 children = SYSCTL_CHILDREN(oid); 3569 3570 snprintf(name, sizeof(name), "%d", idx); 3571 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name, CTLFLAG_RD, NULL, 3572 "rx queue"); 3573 children = SYSCTL_CHILDREN(oid); 3574 3575 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "abs_id", 3576 CTLTYPE_INT | CTLFLAG_RD, &nm_rxq->iq_abs_id, 0, sysctl_uint16, 3577 "I", "absolute id of the queue"); 3578 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cntxt_id", 3579 CTLTYPE_INT | CTLFLAG_RD, &nm_rxq->iq_cntxt_id, 0, sysctl_uint16, 3580 "I", "SGE context id of the queue"); 3581 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cidx", 3582 CTLTYPE_INT | CTLFLAG_RD, &nm_rxq->iq_cidx, 0, sysctl_uint16, "I", 3583 "consumer index"); 3584 3585 children = SYSCTL_CHILDREN(oid); 3586 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "fl", CTLFLAG_RD, NULL, 3587 "freelist"); 3588 children = SYSCTL_CHILDREN(oid); 3589 3590 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cntxt_id", 3591 CTLTYPE_INT | CTLFLAG_RD, &nm_rxq->fl_cntxt_id, 0, sysctl_uint16, 3592 "I", "SGE context id of the freelist"); 3593 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cidx", CTLFLAG_RD, 3594 &nm_rxq->fl_cidx, 0, "consumer index"); 3595 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "pidx", CTLFLAG_RD, 3596 &nm_rxq->fl_pidx, 0, "producer index"); 3597 3598 return (rc); 3599 } 3600 3601 3602 static int 3603 free_nm_rxq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq) 3604 { 3605 struct adapter *sc = vi->pi->adapter; 3606 3607 if (vi->flags & VI_INIT_DONE) 3608 MPASS(nm_rxq->iq_cntxt_id == INVALID_NM_RXQ_CNTXT_ID); 3609 else 3610 MPASS(nm_rxq->iq_cntxt_id == 0); 3611 3612 free_ring(sc, nm_rxq->iq_desc_tag, nm_rxq->iq_desc_map, nm_rxq->iq_ba, 3613 nm_rxq->iq_desc); 3614 free_ring(sc, nm_rxq->fl_desc_tag, nm_rxq->fl_desc_map, nm_rxq->fl_ba, 3615 nm_rxq->fl_desc); 3616 3617 return (0); 3618 } 3619 3620 static int 3621 alloc_nm_txq(struct vi_info *vi, struct sge_nm_txq *nm_txq, int iqidx, int idx, 3622 struct sysctl_oid *oid) 3623 { 3624 int rc; 3625 size_t len; 3626 struct port_info *pi = vi->pi; 3627 struct adapter *sc = pi->adapter; 3628 struct netmap_adapter *na = NA(vi->ifp); 3629 char name[16]; 3630 struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); 3631 3632 len = na->num_tx_desc * EQ_ESIZE + sc->params.sge.spg_len; 3633 rc = alloc_ring(sc, len, &nm_txq->desc_tag, &nm_txq->desc_map, 3634 &nm_txq->ba, (void **)&nm_txq->desc); 3635 if (rc) 3636 return (rc); 3637 3638 nm_txq->pidx = nm_txq->cidx = 0; 3639 nm_txq->sidx = na->num_tx_desc; 3640 nm_txq->nid = idx; 3641 nm_txq->iqidx = iqidx; 3642 nm_txq->cpl_ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT) | 3643 V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(sc->pf) | 3644 V_TXPKT_VF(vi->vin) | V_TXPKT_VF_VLD(vi->vfvld)); 3645 nm_txq->cntxt_id = INVALID_NM_TXQ_CNTXT_ID; 3646 3647 snprintf(name, sizeof(name), "%d", idx); 3648 oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, name, CTLFLAG_RD, 3649 NULL, "netmap tx queue"); 3650 children = SYSCTL_CHILDREN(oid); 3651 3652 SYSCTL_ADD_UINT(&vi->ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD, 3653 &nm_txq->cntxt_id, 0, "SGE context id of the queue"); 3654 SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "cidx", 3655 CTLTYPE_INT | CTLFLAG_RD, &nm_txq->cidx, 0, sysctl_uint16, "I", 3656 "consumer index"); 3657 SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "pidx", 3658 CTLTYPE_INT | CTLFLAG_RD, &nm_txq->pidx, 0, sysctl_uint16, "I", 3659 "producer index"); 3660 3661 return (rc); 3662 } 3663 3664 static int 3665 free_nm_txq(struct vi_info *vi, struct sge_nm_txq *nm_txq) 3666 { 3667 struct adapter *sc = vi->pi->adapter; 3668 3669 if (vi->flags & VI_INIT_DONE) 3670 MPASS(nm_txq->cntxt_id == INVALID_NM_TXQ_CNTXT_ID); 3671 else 3672 MPASS(nm_txq->cntxt_id == 0); 3673 3674 free_ring(sc, nm_txq->desc_tag, nm_txq->desc_map, nm_txq->ba, 3675 nm_txq->desc); 3676 3677 return (0); 3678 } 3679 #endif 3680 3681 /* 3682 * Returns a reasonable automatic cidx flush threshold for a given queue size. 3683 */ 3684 static u_int 3685 qsize_to_fthresh(int qsize) 3686 { 3687 u_int fthresh; 3688 3689 while (!powerof2(qsize)) 3690 qsize++; 3691 fthresh = ilog2(qsize); 3692 if (fthresh > X_CIDXFLUSHTHRESH_128) 3693 fthresh = X_CIDXFLUSHTHRESH_128; 3694 3695 return (fthresh); 3696 } 3697 3698 static int 3699 ctrl_eq_alloc(struct adapter *sc, struct sge_eq *eq) 3700 { 3701 int rc, cntxt_id; 3702 struct fw_eq_ctrl_cmd c; 3703 int qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE; 3704 3705 bzero(&c, sizeof(c)); 3706 3707 c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | F_FW_CMD_REQUEST | 3708 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_CTRL_CMD_PFN(sc->pf) | 3709 V_FW_EQ_CTRL_CMD_VFN(0)); 3710 c.alloc_to_len16 = htobe32(F_FW_EQ_CTRL_CMD_ALLOC | 3711 F_FW_EQ_CTRL_CMD_EQSTART | FW_LEN16(c)); 3712 c.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_CMPLIQID(eq->iqid)); 3713 c.physeqid_pkd = htobe32(0); 3714 c.fetchszm_to_iqid = 3715 htobe32(V_FW_EQ_CTRL_CMD_HOSTFCMODE(X_HOSTFCMODE_STATUS_PAGE) | 3716 V_FW_EQ_CTRL_CMD_PCIECHN(eq->tx_chan) | 3717 F_FW_EQ_CTRL_CMD_FETCHRO | V_FW_EQ_CTRL_CMD_IQID(eq->iqid)); 3718 c.dcaen_to_eqsize = 3719 htobe32(V_FW_EQ_CTRL_CMD_FBMIN(X_FETCHBURSTMIN_64B) | 3720 V_FW_EQ_CTRL_CMD_FBMAX(X_FETCHBURSTMAX_512B) | 3721 V_FW_EQ_CTRL_CMD_CIDXFTHRESH(qsize_to_fthresh(qsize)) | 3722 V_FW_EQ_CTRL_CMD_EQSIZE(qsize)); 3723 c.eqaddr = htobe64(eq->ba); 3724 3725 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); 3726 if (rc != 0) { 3727 device_printf(sc->dev, 3728 "failed to create control queue %d: %d\n", eq->tx_chan, rc); 3729 return (rc); 3730 } 3731 eq->flags |= EQ_ALLOCATED; 3732 3733 eq->cntxt_id = G_FW_EQ_CTRL_CMD_EQID(be32toh(c.cmpliqid_eqid)); 3734 cntxt_id = eq->cntxt_id - sc->sge.eq_start; 3735 if (cntxt_id >= sc->sge.neq) 3736 panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__, 3737 cntxt_id, sc->sge.neq - 1); 3738 sc->sge.eqmap[cntxt_id] = eq; 3739 3740 return (rc); 3741 } 3742 3743 static int 3744 eth_eq_alloc(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq) 3745 { 3746 int rc, cntxt_id; 3747 struct fw_eq_eth_cmd c; 3748 int qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE; 3749 3750 bzero(&c, sizeof(c)); 3751 3752 c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST | 3753 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(sc->pf) | 3754 V_FW_EQ_ETH_CMD_VFN(0)); 3755 c.alloc_to_len16 = htobe32(F_FW_EQ_ETH_CMD_ALLOC | 3756 F_FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c)); 3757 c.autoequiqe_to_viid = htobe32(F_FW_EQ_ETH_CMD_AUTOEQUIQE | 3758 F_FW_EQ_ETH_CMD_AUTOEQUEQE | V_FW_EQ_ETH_CMD_VIID(vi->viid)); 3759 c.fetchszm_to_iqid = 3760 htobe32(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) | 3761 V_FW_EQ_ETH_CMD_PCIECHN(eq->tx_chan) | F_FW_EQ_ETH_CMD_FETCHRO | 3762 V_FW_EQ_ETH_CMD_IQID(eq->iqid)); 3763 c.dcaen_to_eqsize = htobe32(V_FW_EQ_ETH_CMD_FBMIN(X_FETCHBURSTMIN_64B) | 3764 V_FW_EQ_ETH_CMD_FBMAX(X_FETCHBURSTMAX_512B) | 3765 V_FW_EQ_ETH_CMD_EQSIZE(qsize)); 3766 c.eqaddr = htobe64(eq->ba); 3767 3768 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); 3769 if (rc != 0) { 3770 device_printf(vi->dev, 3771 "failed to create Ethernet egress queue: %d\n", rc); 3772 return (rc); 3773 } 3774 eq->flags |= EQ_ALLOCATED; 3775 3776 eq->cntxt_id = G_FW_EQ_ETH_CMD_EQID(be32toh(c.eqid_pkd)); 3777 eq->abs_id = G_FW_EQ_ETH_CMD_PHYSEQID(be32toh(c.physeqid_pkd)); 3778 cntxt_id = eq->cntxt_id - sc->sge.eq_start; 3779 if (cntxt_id >= sc->sge.neq) 3780 panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__, 3781 cntxt_id, sc->sge.neq - 1); 3782 sc->sge.eqmap[cntxt_id] = eq; 3783 3784 return (rc); 3785 } 3786 3787 #if defined(TCP_OFFLOAD) || defined(RATELIMIT) 3788 static int 3789 ofld_eq_alloc(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq) 3790 { 3791 int rc, cntxt_id; 3792 struct fw_eq_ofld_cmd c; 3793 int qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE; 3794 3795 bzero(&c, sizeof(c)); 3796 3797 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_OFLD_CMD) | F_FW_CMD_REQUEST | 3798 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_OFLD_CMD_PFN(sc->pf) | 3799 V_FW_EQ_OFLD_CMD_VFN(0)); 3800 c.alloc_to_len16 = htonl(F_FW_EQ_OFLD_CMD_ALLOC | 3801 F_FW_EQ_OFLD_CMD_EQSTART | FW_LEN16(c)); 3802 c.fetchszm_to_iqid = 3803 htonl(V_FW_EQ_OFLD_CMD_HOSTFCMODE(X_HOSTFCMODE_STATUS_PAGE) | 3804 V_FW_EQ_OFLD_CMD_PCIECHN(eq->tx_chan) | 3805 F_FW_EQ_OFLD_CMD_FETCHRO | V_FW_EQ_OFLD_CMD_IQID(eq->iqid)); 3806 c.dcaen_to_eqsize = 3807 htobe32(V_FW_EQ_OFLD_CMD_FBMIN(X_FETCHBURSTMIN_64B) | 3808 V_FW_EQ_OFLD_CMD_FBMAX(X_FETCHBURSTMAX_512B) | 3809 V_FW_EQ_OFLD_CMD_CIDXFTHRESH(qsize_to_fthresh(qsize)) | 3810 V_FW_EQ_OFLD_CMD_EQSIZE(qsize)); 3811 c.eqaddr = htobe64(eq->ba); 3812 3813 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); 3814 if (rc != 0) { 3815 device_printf(vi->dev, 3816 "failed to create egress queue for TCP offload: %d\n", rc); 3817 return (rc); 3818 } 3819 eq->flags |= EQ_ALLOCATED; 3820 3821 eq->cntxt_id = G_FW_EQ_OFLD_CMD_EQID(be32toh(c.eqid_pkd)); 3822 cntxt_id = eq->cntxt_id - sc->sge.eq_start; 3823 if (cntxt_id >= sc->sge.neq) 3824 panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__, 3825 cntxt_id, sc->sge.neq - 1); 3826 sc->sge.eqmap[cntxt_id] = eq; 3827 3828 return (rc); 3829 } 3830 #endif 3831 3832 static int 3833 alloc_eq(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq) 3834 { 3835 int rc, qsize; 3836 size_t len; 3837 3838 mtx_init(&eq->eq_lock, eq->lockname, NULL, MTX_DEF); 3839 3840 qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE; 3841 len = qsize * EQ_ESIZE; 3842 rc = alloc_ring(sc, len, &eq->desc_tag, &eq->desc_map, 3843 &eq->ba, (void **)&eq->desc); 3844 if (rc) 3845 return (rc); 3846 3847 eq->pidx = eq->cidx = eq->dbidx = 0; 3848 /* Note that equeqidx is not used with sge_wrq (OFLD/CTRL) queues. */ 3849 eq->equeqidx = 0; 3850 eq->doorbells = sc->doorbells; 3851 3852 switch (eq->flags & EQ_TYPEMASK) { 3853 case EQ_CTRL: 3854 rc = ctrl_eq_alloc(sc, eq); 3855 break; 3856 3857 case EQ_ETH: 3858 rc = eth_eq_alloc(sc, vi, eq); 3859 break; 3860 3861 #if defined(TCP_OFFLOAD) || defined(RATELIMIT) 3862 case EQ_OFLD: 3863 rc = ofld_eq_alloc(sc, vi, eq); 3864 break; 3865 #endif 3866 3867 default: 3868 panic("%s: invalid eq type %d.", __func__, 3869 eq->flags & EQ_TYPEMASK); 3870 } 3871 if (rc != 0) { 3872 device_printf(sc->dev, 3873 "failed to allocate egress queue(%d): %d\n", 3874 eq->flags & EQ_TYPEMASK, rc); 3875 } 3876 3877 if (isset(&eq->doorbells, DOORBELL_UDB) || 3878 isset(&eq->doorbells, DOORBELL_UDBWC) || 3879 isset(&eq->doorbells, DOORBELL_WCWR)) { 3880 uint32_t s_qpp = sc->params.sge.eq_s_qpp; 3881 uint32_t mask = (1 << s_qpp) - 1; 3882 volatile uint8_t *udb; 3883 3884 udb = sc->udbs_base + UDBS_DB_OFFSET; 3885 udb += (eq->cntxt_id >> s_qpp) << PAGE_SHIFT; /* pg offset */ 3886 eq->udb_qid = eq->cntxt_id & mask; /* id in page */ 3887 if (eq->udb_qid >= PAGE_SIZE / UDBS_SEG_SIZE) 3888 clrbit(&eq->doorbells, DOORBELL_WCWR); 3889 else { 3890 udb += eq->udb_qid << UDBS_SEG_SHIFT; /* seg offset */ 3891 eq->udb_qid = 0; 3892 } 3893 eq->udb = (volatile void *)udb; 3894 } 3895 3896 return (rc); 3897 } 3898 3899 static int 3900 free_eq(struct adapter *sc, struct sge_eq *eq) 3901 { 3902 int rc; 3903 3904 if (eq->flags & EQ_ALLOCATED) { 3905 switch (eq->flags & EQ_TYPEMASK) { 3906 case EQ_CTRL: 3907 rc = -t4_ctrl_eq_free(sc, sc->mbox, sc->pf, 0, 3908 eq->cntxt_id); 3909 break; 3910 3911 case EQ_ETH: 3912 rc = -t4_eth_eq_free(sc, sc->mbox, sc->pf, 0, 3913 eq->cntxt_id); 3914 break; 3915 3916 #if defined(TCP_OFFLOAD) || defined(RATELIMIT) 3917 case EQ_OFLD: 3918 rc = -t4_ofld_eq_free(sc, sc->mbox, sc->pf, 0, 3919 eq->cntxt_id); 3920 break; 3921 #endif 3922 3923 default: 3924 panic("%s: invalid eq type %d.", __func__, 3925 eq->flags & EQ_TYPEMASK); 3926 } 3927 if (rc != 0) { 3928 device_printf(sc->dev, 3929 "failed to free egress queue (%d): %d\n", 3930 eq->flags & EQ_TYPEMASK, rc); 3931 return (rc); 3932 } 3933 eq->flags &= ~EQ_ALLOCATED; 3934 } 3935 3936 free_ring(sc, eq->desc_tag, eq->desc_map, eq->ba, eq->desc); 3937 3938 if (mtx_initialized(&eq->eq_lock)) 3939 mtx_destroy(&eq->eq_lock); 3940 3941 bzero(eq, sizeof(*eq)); 3942 return (0); 3943 } 3944 3945 static int 3946 alloc_wrq(struct adapter *sc, struct vi_info *vi, struct sge_wrq *wrq, 3947 struct sysctl_oid *oid) 3948 { 3949 int rc; 3950 struct sysctl_ctx_list *ctx = vi ? &vi->ctx : &sc->ctx; 3951 struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); 3952 3953 rc = alloc_eq(sc, vi, &wrq->eq); 3954 if (rc) 3955 return (rc); 3956 3957 wrq->adapter = sc; 3958 TASK_INIT(&wrq->wrq_tx_task, 0, wrq_tx_drain, wrq); 3959 TAILQ_INIT(&wrq->incomplete_wrs); 3960 STAILQ_INIT(&wrq->wr_list); 3961 wrq->nwr_pending = 0; 3962 wrq->ndesc_needed = 0; 3963 3964 SYSCTL_ADD_UAUTO(ctx, children, OID_AUTO, "ba", CTLFLAG_RD, 3965 &wrq->eq.ba, "bus address of descriptor ring"); 3966 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "dmalen", CTLFLAG_RD, NULL, 3967 wrq->eq.sidx * EQ_ESIZE + sc->params.sge.spg_len, 3968 "desc ring size in bytes"); 3969 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD, 3970 &wrq->eq.cntxt_id, 0, "SGE context id of the queue"); 3971 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cidx", 3972 CTLTYPE_INT | CTLFLAG_RD, &wrq->eq.cidx, 0, sysctl_uint16, "I", 3973 "consumer index"); 3974 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pidx", 3975 CTLTYPE_INT | CTLFLAG_RD, &wrq->eq.pidx, 0, sysctl_uint16, "I", 3976 "producer index"); 3977 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sidx", CTLFLAG_RD, NULL, 3978 wrq->eq.sidx, "status page index"); 3979 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "tx_wrs_direct", CTLFLAG_RD, 3980 &wrq->tx_wrs_direct, "# of work requests (direct)"); 3981 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "tx_wrs_copied", CTLFLAG_RD, 3982 &wrq->tx_wrs_copied, "# of work requests (copied)"); 3983 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "tx_wrs_sspace", CTLFLAG_RD, 3984 &wrq->tx_wrs_ss, "# of work requests (copied from scratch space)"); 3985 3986 return (rc); 3987 } 3988 3989 static int 3990 free_wrq(struct adapter *sc, struct sge_wrq *wrq) 3991 { 3992 int rc; 3993 3994 rc = free_eq(sc, &wrq->eq); 3995 if (rc) 3996 return (rc); 3997 3998 bzero(wrq, sizeof(*wrq)); 3999 return (0); 4000 } 4001 4002 static int 4003 alloc_txq(struct vi_info *vi, struct sge_txq *txq, int idx, 4004 struct sysctl_oid *oid) 4005 { 4006 int rc; 4007 struct port_info *pi = vi->pi; 4008 struct adapter *sc = pi->adapter; 4009 struct sge_eq *eq = &txq->eq; 4010 char name[16]; 4011 struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); 4012 4013 rc = mp_ring_alloc(&txq->r, eq->sidx, txq, eth_tx, can_resume_eth_tx, 4014 M_CXGBE, M_WAITOK); 4015 if (rc != 0) { 4016 device_printf(sc->dev, "failed to allocate mp_ring: %d\n", rc); 4017 return (rc); 4018 } 4019 4020 rc = alloc_eq(sc, vi, eq); 4021 if (rc != 0) { 4022 mp_ring_free(txq->r); 4023 txq->r = NULL; 4024 return (rc); 4025 } 4026 4027 /* Can't fail after this point. */ 4028 4029 if (idx == 0) 4030 sc->sge.eq_base = eq->abs_id - eq->cntxt_id; 4031 else 4032 KASSERT(eq->cntxt_id + sc->sge.eq_base == eq->abs_id, 4033 ("eq_base mismatch")); 4034 KASSERT(sc->sge.eq_base == 0 || sc->flags & IS_VF, 4035 ("PF with non-zero eq_base")); 4036 4037 TASK_INIT(&txq->tx_reclaim_task, 0, tx_reclaim, eq); 4038 txq->ifp = vi->ifp; 4039 txq->gl = sglist_alloc(TX_SGL_SEGS, M_WAITOK); 4040 if (sc->flags & IS_VF) 4041 txq->cpl_ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT_XT) | 4042 V_TXPKT_INTF(pi->tx_chan)); 4043 else 4044 txq->cpl_ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT) | 4045 V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(sc->pf) | 4046 V_TXPKT_VF(vi->vin) | V_TXPKT_VF_VLD(vi->vfvld)); 4047 txq->tc_idx = -1; 4048 txq->sdesc = malloc(eq->sidx * sizeof(struct tx_sdesc), M_CXGBE, 4049 M_ZERO | M_WAITOK); 4050 4051 snprintf(name, sizeof(name), "%d", idx); 4052 oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, name, CTLFLAG_RD, 4053 NULL, "tx queue"); 4054 children = SYSCTL_CHILDREN(oid); 4055 4056 SYSCTL_ADD_UAUTO(&vi->ctx, children, OID_AUTO, "ba", CTLFLAG_RD, 4057 &eq->ba, "bus address of descriptor ring"); 4058 SYSCTL_ADD_INT(&vi->ctx, children, OID_AUTO, "dmalen", CTLFLAG_RD, NULL, 4059 eq->sidx * EQ_ESIZE + sc->params.sge.spg_len, 4060 "desc ring size in bytes"); 4061 SYSCTL_ADD_UINT(&vi->ctx, children, OID_AUTO, "abs_id", CTLFLAG_RD, 4062 &eq->abs_id, 0, "absolute id of the queue"); 4063 SYSCTL_ADD_UINT(&vi->ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD, 4064 &eq->cntxt_id, 0, "SGE context id of the queue"); 4065 SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "cidx", 4066 CTLTYPE_INT | CTLFLAG_RD, &eq->cidx, 0, sysctl_uint16, "I", 4067 "consumer index"); 4068 SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "pidx", 4069 CTLTYPE_INT | CTLFLAG_RD, &eq->pidx, 0, sysctl_uint16, "I", 4070 "producer index"); 4071 SYSCTL_ADD_INT(&vi->ctx, children, OID_AUTO, "sidx", CTLFLAG_RD, NULL, 4072 eq->sidx, "status page index"); 4073 4074 SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "tc", 4075 CTLTYPE_INT | CTLFLAG_RW, vi, idx, sysctl_tc, "I", 4076 "traffic class (-1 means none)"); 4077 4078 SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "txcsum", CTLFLAG_RD, 4079 &txq->txcsum, "# of times hardware assisted with checksum"); 4080 SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "vlan_insertion", 4081 CTLFLAG_RD, &txq->vlan_insertion, 4082 "# of times hardware inserted 802.1Q tag"); 4083 SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "tso_wrs", CTLFLAG_RD, 4084 &txq->tso_wrs, "# of TSO work requests"); 4085 SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "imm_wrs", CTLFLAG_RD, 4086 &txq->imm_wrs, "# of work requests with immediate data"); 4087 SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "sgl_wrs", CTLFLAG_RD, 4088 &txq->sgl_wrs, "# of work requests with direct SGL"); 4089 SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "txpkt_wrs", CTLFLAG_RD, 4090 &txq->txpkt_wrs, "# of txpkt work requests (one pkt/WR)"); 4091 SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "txpkts0_wrs", 4092 CTLFLAG_RD, &txq->txpkts0_wrs, 4093 "# of txpkts (type 0) work requests"); 4094 SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "txpkts1_wrs", 4095 CTLFLAG_RD, &txq->txpkts1_wrs, 4096 "# of txpkts (type 1) work requests"); 4097 SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "txpkts0_pkts", 4098 CTLFLAG_RD, &txq->txpkts0_pkts, 4099 "# of frames tx'd using type0 txpkts work requests"); 4100 SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "txpkts1_pkts", 4101 CTLFLAG_RD, &txq->txpkts1_pkts, 4102 "# of frames tx'd using type1 txpkts work requests"); 4103 SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "raw_wrs", CTLFLAG_RD, 4104 &txq->raw_wrs, "# of raw work requests (non-packets)"); 4105 4106 SYSCTL_ADD_COUNTER_U64(&vi->ctx, children, OID_AUTO, "r_enqueues", 4107 CTLFLAG_RD, &txq->r->enqueues, 4108 "# of enqueues to the mp_ring for this queue"); 4109 SYSCTL_ADD_COUNTER_U64(&vi->ctx, children, OID_AUTO, "r_drops", 4110 CTLFLAG_RD, &txq->r->drops, 4111 "# of drops in the mp_ring for this queue"); 4112 SYSCTL_ADD_COUNTER_U64(&vi->ctx, children, OID_AUTO, "r_starts", 4113 CTLFLAG_RD, &txq->r->starts, 4114 "# of normal consumer starts in the mp_ring for this queue"); 4115 SYSCTL_ADD_COUNTER_U64(&vi->ctx, children, OID_AUTO, "r_stalls", 4116 CTLFLAG_RD, &txq->r->stalls, 4117 "# of consumer stalls in the mp_ring for this queue"); 4118 SYSCTL_ADD_COUNTER_U64(&vi->ctx, children, OID_AUTO, "r_restarts", 4119 CTLFLAG_RD, &txq->r->restarts, 4120 "# of consumer restarts in the mp_ring for this queue"); 4121 SYSCTL_ADD_COUNTER_U64(&vi->ctx, children, OID_AUTO, "r_abdications", 4122 CTLFLAG_RD, &txq->r->abdications, 4123 "# of consumer abdications in the mp_ring for this queue"); 4124 4125 return (0); 4126 } 4127 4128 static int 4129 free_txq(struct vi_info *vi, struct sge_txq *txq) 4130 { 4131 int rc; 4132 struct adapter *sc = vi->pi->adapter; 4133 struct sge_eq *eq = &txq->eq; 4134 4135 rc = free_eq(sc, eq); 4136 if (rc) 4137 return (rc); 4138 4139 sglist_free(txq->gl); 4140 free(txq->sdesc, M_CXGBE); 4141 mp_ring_free(txq->r); 4142 4143 bzero(txq, sizeof(*txq)); 4144 return (0); 4145 } 4146 4147 static void 4148 oneseg_dma_callback(void *arg, bus_dma_segment_t *segs, int nseg, int error) 4149 { 4150 bus_addr_t *ba = arg; 4151 4152 KASSERT(nseg == 1, 4153 ("%s meant for single segment mappings only.", __func__)); 4154 4155 *ba = error ? 0 : segs->ds_addr; 4156 } 4157 4158 static inline void 4159 ring_fl_db(struct adapter *sc, struct sge_fl *fl) 4160 { 4161 uint32_t n, v; 4162 4163 n = IDXDIFF(fl->pidx / 8, fl->dbidx, fl->sidx); 4164 MPASS(n > 0); 4165 4166 wmb(); 4167 v = fl->dbval | V_PIDX(n); 4168 if (fl->udb) 4169 *fl->udb = htole32(v); 4170 else 4171 t4_write_reg(sc, sc->sge_kdoorbell_reg, v); 4172 IDXINCR(fl->dbidx, n, fl->sidx); 4173 } 4174 4175 /* 4176 * Fills up the freelist by allocating up to 'n' buffers. Buffers that are 4177 * recycled do not count towards this allocation budget. 4178 * 4179 * Returns non-zero to indicate that this freelist should be added to the list 4180 * of starving freelists. 4181 */ 4182 static int 4183 refill_fl(struct adapter *sc, struct sge_fl *fl, int n) 4184 { 4185 __be64 *d; 4186 struct fl_sdesc *sd; 4187 uintptr_t pa; 4188 caddr_t cl; 4189 struct cluster_layout *cll; 4190 struct sw_zone_info *swz; 4191 struct cluster_metadata *clm; 4192 uint16_t max_pidx; 4193 uint16_t hw_cidx = fl->hw_cidx; /* stable snapshot */ 4194 4195 FL_LOCK_ASSERT_OWNED(fl); 4196 4197 /* 4198 * We always stop at the beginning of the hardware descriptor that's just 4199 * before the one with the hw cidx. This is to avoid hw pidx = hw cidx, 4200 * which would mean an empty freelist to the chip. 4201 */ 4202 max_pidx = __predict_false(hw_cidx == 0) ? fl->sidx - 1 : hw_cidx - 1; 4203 if (fl->pidx == max_pidx * 8) 4204 return (0); 4205 4206 d = &fl->desc[fl->pidx]; 4207 sd = &fl->sdesc[fl->pidx]; 4208 cll = &fl->cll_def; /* default layout */ 4209 swz = &sc->sge.sw_zone_info[cll->zidx]; 4210 4211 while (n > 0) { 4212 4213 if (sd->cl != NULL) { 4214 4215 if (sd->nmbuf == 0) { 4216 /* 4217 * Fast recycle without involving any atomics on 4218 * the cluster's metadata (if the cluster has 4219 * metadata). This happens when all frames 4220 * received in the cluster were small enough to 4221 * fit within a single mbuf each. 4222 */ 4223 fl->cl_fast_recycled++; 4224 #ifdef INVARIANTS 4225 clm = cl_metadata(sc, fl, &sd->cll, sd->cl); 4226 if (clm != NULL) 4227 MPASS(clm->refcount == 1); 4228 #endif 4229 goto recycled_fast; 4230 } 4231 4232 /* 4233 * Cluster is guaranteed to have metadata. Clusters 4234 * without metadata always take the fast recycle path 4235 * when they're recycled. 4236 */ 4237 clm = cl_metadata(sc, fl, &sd->cll, sd->cl); 4238 MPASS(clm != NULL); 4239 4240 if (atomic_fetchadd_int(&clm->refcount, -1) == 1) { 4241 fl->cl_recycled++; 4242 counter_u64_add(extfree_rels, 1); 4243 goto recycled; 4244 } 4245 sd->cl = NULL; /* gave up my reference */ 4246 } 4247 MPASS(sd->cl == NULL); 4248 alloc: 4249 cl = uma_zalloc(swz->zone, M_NOWAIT); 4250 if (__predict_false(cl == NULL)) { 4251 if (cll == &fl->cll_alt || fl->cll_alt.zidx == -1 || 4252 fl->cll_def.zidx == fl->cll_alt.zidx) 4253 break; 4254 4255 /* fall back to the safe zone */ 4256 cll = &fl->cll_alt; 4257 swz = &sc->sge.sw_zone_info[cll->zidx]; 4258 goto alloc; 4259 } 4260 fl->cl_allocated++; 4261 n--; 4262 4263 pa = pmap_kextract((vm_offset_t)cl); 4264 pa += cll->region1; 4265 sd->cl = cl; 4266 sd->cll = *cll; 4267 *d = htobe64(pa | cll->hwidx); 4268 clm = cl_metadata(sc, fl, cll, cl); 4269 if (clm != NULL) { 4270 recycled: 4271 #ifdef INVARIANTS 4272 clm->sd = sd; 4273 #endif 4274 clm->refcount = 1; 4275 } 4276 sd->nmbuf = 0; 4277 recycled_fast: 4278 d++; 4279 sd++; 4280 if (__predict_false(++fl->pidx % 8 == 0)) { 4281 uint16_t pidx = fl->pidx / 8; 4282 4283 if (__predict_false(pidx == fl->sidx)) { 4284 fl->pidx = 0; 4285 pidx = 0; 4286 sd = fl->sdesc; 4287 d = fl->desc; 4288 } 4289 if (pidx == max_pidx) 4290 break; 4291 4292 if (IDXDIFF(pidx, fl->dbidx, fl->sidx) >= 4) 4293 ring_fl_db(sc, fl); 4294 } 4295 } 4296 4297 if (fl->pidx / 8 != fl->dbidx) 4298 ring_fl_db(sc, fl); 4299 4300 return (FL_RUNNING_LOW(fl) && !(fl->flags & FL_STARVING)); 4301 } 4302 4303 /* 4304 * Attempt to refill all starving freelists. 4305 */ 4306 static void 4307 refill_sfl(void *arg) 4308 { 4309 struct adapter *sc = arg; 4310 struct sge_fl *fl, *fl_temp; 4311 4312 mtx_assert(&sc->sfl_lock, MA_OWNED); 4313 TAILQ_FOREACH_SAFE(fl, &sc->sfl, link, fl_temp) { 4314 FL_LOCK(fl); 4315 refill_fl(sc, fl, 64); 4316 if (FL_NOT_RUNNING_LOW(fl) || fl->flags & FL_DOOMED) { 4317 TAILQ_REMOVE(&sc->sfl, fl, link); 4318 fl->flags &= ~FL_STARVING; 4319 } 4320 FL_UNLOCK(fl); 4321 } 4322 4323 if (!TAILQ_EMPTY(&sc->sfl)) 4324 callout_schedule(&sc->sfl_callout, hz / 5); 4325 } 4326 4327 static int 4328 alloc_fl_sdesc(struct sge_fl *fl) 4329 { 4330 4331 fl->sdesc = malloc(fl->sidx * 8 * sizeof(struct fl_sdesc), M_CXGBE, 4332 M_ZERO | M_WAITOK); 4333 4334 return (0); 4335 } 4336 4337 static void 4338 free_fl_sdesc(struct adapter *sc, struct sge_fl *fl) 4339 { 4340 struct fl_sdesc *sd; 4341 struct cluster_metadata *clm; 4342 struct cluster_layout *cll; 4343 int i; 4344 4345 sd = fl->sdesc; 4346 for (i = 0; i < fl->sidx * 8; i++, sd++) { 4347 if (sd->cl == NULL) 4348 continue; 4349 4350 cll = &sd->cll; 4351 clm = cl_metadata(sc, fl, cll, sd->cl); 4352 if (sd->nmbuf == 0) 4353 uma_zfree(sc->sge.sw_zone_info[cll->zidx].zone, sd->cl); 4354 else if (clm && atomic_fetchadd_int(&clm->refcount, -1) == 1) { 4355 uma_zfree(sc->sge.sw_zone_info[cll->zidx].zone, sd->cl); 4356 counter_u64_add(extfree_rels, 1); 4357 } 4358 sd->cl = NULL; 4359 } 4360 4361 free(fl->sdesc, M_CXGBE); 4362 fl->sdesc = NULL; 4363 } 4364 4365 static inline void 4366 get_pkt_gl(struct mbuf *m, struct sglist *gl) 4367 { 4368 int rc; 4369 4370 M_ASSERTPKTHDR(m); 4371 4372 sglist_reset(gl); 4373 rc = sglist_append_mbuf(gl, m); 4374 if (__predict_false(rc != 0)) { 4375 panic("%s: mbuf %p (%d segs) was vetted earlier but now fails " 4376 "with %d.", __func__, m, mbuf_nsegs(m), rc); 4377 } 4378 4379 KASSERT(gl->sg_nseg == mbuf_nsegs(m), 4380 ("%s: nsegs changed for mbuf %p from %d to %d", __func__, m, 4381 mbuf_nsegs(m), gl->sg_nseg)); 4382 KASSERT(gl->sg_nseg > 0 && 4383 gl->sg_nseg <= (needs_tso(m) ? TX_SGL_SEGS_TSO : TX_SGL_SEGS), 4384 ("%s: %d segments, should have been 1 <= nsegs <= %d", __func__, 4385 gl->sg_nseg, needs_tso(m) ? TX_SGL_SEGS_TSO : TX_SGL_SEGS)); 4386 } 4387 4388 /* 4389 * len16 for a txpkt WR with a GL. Includes the firmware work request header. 4390 */ 4391 static inline u_int 4392 txpkt_len16(u_int nsegs, u_int tso) 4393 { 4394 u_int n; 4395 4396 MPASS(nsegs > 0); 4397 4398 nsegs--; /* first segment is part of ulptx_sgl */ 4399 n = sizeof(struct fw_eth_tx_pkt_wr) + sizeof(struct cpl_tx_pkt_core) + 4400 sizeof(struct ulptx_sgl) + 8 * ((3 * nsegs) / 2 + (nsegs & 1)); 4401 if (tso) 4402 n += sizeof(struct cpl_tx_pkt_lso_core); 4403 4404 return (howmany(n, 16)); 4405 } 4406 4407 /* 4408 * len16 for a txpkt_vm WR with a GL. Includes the firmware work 4409 * request header. 4410 */ 4411 static inline u_int 4412 txpkt_vm_len16(u_int nsegs, u_int tso) 4413 { 4414 u_int n; 4415 4416 MPASS(nsegs > 0); 4417 4418 nsegs--; /* first segment is part of ulptx_sgl */ 4419 n = sizeof(struct fw_eth_tx_pkt_vm_wr) + 4420 sizeof(struct cpl_tx_pkt_core) + 4421 sizeof(struct ulptx_sgl) + 8 * ((3 * nsegs) / 2 + (nsegs & 1)); 4422 if (tso) 4423 n += sizeof(struct cpl_tx_pkt_lso_core); 4424 4425 return (howmany(n, 16)); 4426 } 4427 4428 /* 4429 * len16 for a txpkts type 0 WR with a GL. Does not include the firmware work 4430 * request header. 4431 */ 4432 static inline u_int 4433 txpkts0_len16(u_int nsegs) 4434 { 4435 u_int n; 4436 4437 MPASS(nsegs > 0); 4438 4439 nsegs--; /* first segment is part of ulptx_sgl */ 4440 n = sizeof(struct ulp_txpkt) + sizeof(struct ulptx_idata) + 4441 sizeof(struct cpl_tx_pkt_core) + sizeof(struct ulptx_sgl) + 4442 8 * ((3 * nsegs) / 2 + (nsegs & 1)); 4443 4444 return (howmany(n, 16)); 4445 } 4446 4447 /* 4448 * len16 for a txpkts type 1 WR with a GL. Does not include the firmware work 4449 * request header. 4450 */ 4451 static inline u_int 4452 txpkts1_len16(void) 4453 { 4454 u_int n; 4455 4456 n = sizeof(struct cpl_tx_pkt_core) + sizeof(struct ulptx_sgl); 4457 4458 return (howmany(n, 16)); 4459 } 4460 4461 static inline u_int 4462 imm_payload(u_int ndesc) 4463 { 4464 u_int n; 4465 4466 n = ndesc * EQ_ESIZE - sizeof(struct fw_eth_tx_pkt_wr) - 4467 sizeof(struct cpl_tx_pkt_core); 4468 4469 return (n); 4470 } 4471 4472 /* 4473 * Write a VM txpkt WR for this packet to the hardware descriptors, update the 4474 * software descriptor, and advance the pidx. It is guaranteed that enough 4475 * descriptors are available. 4476 * 4477 * The return value is the # of hardware descriptors used. 4478 */ 4479 static u_int 4480 write_txpkt_vm_wr(struct adapter *sc, struct sge_txq *txq, 4481 struct fw_eth_tx_pkt_vm_wr *wr, struct mbuf *m0, u_int available) 4482 { 4483 struct sge_eq *eq = &txq->eq; 4484 struct tx_sdesc *txsd; 4485 struct cpl_tx_pkt_core *cpl; 4486 uint32_t ctrl; /* used in many unrelated places */ 4487 uint64_t ctrl1; 4488 int csum_type, len16, ndesc, pktlen, nsegs; 4489 caddr_t dst; 4490 4491 TXQ_LOCK_ASSERT_OWNED(txq); 4492 M_ASSERTPKTHDR(m0); 4493 MPASS(available > 0 && available < eq->sidx); 4494 4495 len16 = mbuf_len16(m0); 4496 nsegs = mbuf_nsegs(m0); 4497 pktlen = m0->m_pkthdr.len; 4498 ctrl = sizeof(struct cpl_tx_pkt_core); 4499 if (needs_tso(m0)) 4500 ctrl += sizeof(struct cpl_tx_pkt_lso_core); 4501 ndesc = howmany(len16, EQ_ESIZE / 16); 4502 MPASS(ndesc <= available); 4503 4504 /* Firmware work request header */ 4505 MPASS(wr == (void *)&eq->desc[eq->pidx]); 4506 wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_PKT_VM_WR) | 4507 V_FW_ETH_TX_PKT_WR_IMMDLEN(ctrl)); 4508 4509 ctrl = V_FW_WR_LEN16(len16); 4510 wr->equiq_to_len16 = htobe32(ctrl); 4511 wr->r3[0] = 0; 4512 wr->r3[1] = 0; 4513 4514 /* 4515 * Copy over ethmacdst, ethmacsrc, ethtype, and vlantci. 4516 * vlantci is ignored unless the ethtype is 0x8100, so it's 4517 * simpler to always copy it rather than making it 4518 * conditional. Also, it seems that we do not have to set 4519 * vlantci or fake the ethtype when doing VLAN tag insertion. 4520 */ 4521 m_copydata(m0, 0, sizeof(struct ether_header) + 2, wr->ethmacdst); 4522 4523 csum_type = -1; 4524 if (needs_tso(m0)) { 4525 struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1); 4526 4527 KASSERT(m0->m_pkthdr.l2hlen > 0 && m0->m_pkthdr.l3hlen > 0 && 4528 m0->m_pkthdr.l4hlen > 0, 4529 ("%s: mbuf %p needs TSO but missing header lengths", 4530 __func__, m0)); 4531 4532 ctrl = V_LSO_OPCODE(CPL_TX_PKT_LSO) | F_LSO_FIRST_SLICE | 4533 F_LSO_LAST_SLICE | V_LSO_IPHDR_LEN(m0->m_pkthdr.l3hlen >> 2) 4534 | V_LSO_TCPHDR_LEN(m0->m_pkthdr.l4hlen >> 2); 4535 if (m0->m_pkthdr.l2hlen == sizeof(struct ether_vlan_header)) 4536 ctrl |= V_LSO_ETHHDR_LEN(1); 4537 if (m0->m_pkthdr.l3hlen == sizeof(struct ip6_hdr)) 4538 ctrl |= F_LSO_IPV6; 4539 4540 lso->lso_ctrl = htobe32(ctrl); 4541 lso->ipid_ofst = htobe16(0); 4542 lso->mss = htobe16(m0->m_pkthdr.tso_segsz); 4543 lso->seqno_offset = htobe32(0); 4544 lso->len = htobe32(pktlen); 4545 4546 if (m0->m_pkthdr.l3hlen == sizeof(struct ip6_hdr)) 4547 csum_type = TX_CSUM_TCPIP6; 4548 else 4549 csum_type = TX_CSUM_TCPIP; 4550 4551 cpl = (void *)(lso + 1); 4552 4553 txq->tso_wrs++; 4554 } else { 4555 if (m0->m_pkthdr.csum_flags & CSUM_IP_TCP) 4556 csum_type = TX_CSUM_TCPIP; 4557 else if (m0->m_pkthdr.csum_flags & CSUM_IP_UDP) 4558 csum_type = TX_CSUM_UDPIP; 4559 else if (m0->m_pkthdr.csum_flags & CSUM_IP6_TCP) 4560 csum_type = TX_CSUM_TCPIP6; 4561 else if (m0->m_pkthdr.csum_flags & CSUM_IP6_UDP) 4562 csum_type = TX_CSUM_UDPIP6; 4563 #if defined(INET) 4564 else if (m0->m_pkthdr.csum_flags & CSUM_IP) { 4565 /* 4566 * XXX: The firmware appears to stomp on the 4567 * fragment/flags field of the IP header when 4568 * using TX_CSUM_IP. Fall back to doing 4569 * software checksums. 4570 */ 4571 u_short *sump; 4572 struct mbuf *m; 4573 int offset; 4574 4575 m = m0; 4576 offset = 0; 4577 sump = m_advance(&m, &offset, m0->m_pkthdr.l2hlen + 4578 offsetof(struct ip, ip_sum)); 4579 *sump = in_cksum_skip(m0, m0->m_pkthdr.l2hlen + 4580 m0->m_pkthdr.l3hlen, m0->m_pkthdr.l2hlen); 4581 m0->m_pkthdr.csum_flags &= ~CSUM_IP; 4582 } 4583 #endif 4584 4585 cpl = (void *)(wr + 1); 4586 } 4587 4588 /* Checksum offload */ 4589 ctrl1 = 0; 4590 if (needs_l3_csum(m0) == 0) 4591 ctrl1 |= F_TXPKT_IPCSUM_DIS; 4592 if (csum_type >= 0) { 4593 KASSERT(m0->m_pkthdr.l2hlen > 0 && m0->m_pkthdr.l3hlen > 0, 4594 ("%s: mbuf %p needs checksum offload but missing header lengths", 4595 __func__, m0)); 4596 4597 if (chip_id(sc) <= CHELSIO_T5) { 4598 ctrl1 |= V_TXPKT_ETHHDR_LEN(m0->m_pkthdr.l2hlen - 4599 ETHER_HDR_LEN); 4600 } else { 4601 ctrl1 |= V_T6_TXPKT_ETHHDR_LEN(m0->m_pkthdr.l2hlen - 4602 ETHER_HDR_LEN); 4603 } 4604 ctrl1 |= V_TXPKT_IPHDR_LEN(m0->m_pkthdr.l3hlen); 4605 ctrl1 |= V_TXPKT_CSUM_TYPE(csum_type); 4606 } else 4607 ctrl1 |= F_TXPKT_L4CSUM_DIS; 4608 if (m0->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP | 4609 CSUM_UDP_IPV6 | CSUM_TCP_IPV6 | CSUM_TSO)) 4610 txq->txcsum++; /* some hardware assistance provided */ 4611 4612 /* VLAN tag insertion */ 4613 if (needs_vlan_insertion(m0)) { 4614 ctrl1 |= F_TXPKT_VLAN_VLD | 4615 V_TXPKT_VLAN(m0->m_pkthdr.ether_vtag); 4616 txq->vlan_insertion++; 4617 } 4618 4619 /* CPL header */ 4620 cpl->ctrl0 = txq->cpl_ctrl0; 4621 cpl->pack = 0; 4622 cpl->len = htobe16(pktlen); 4623 cpl->ctrl1 = htobe64(ctrl1); 4624 4625 /* SGL */ 4626 dst = (void *)(cpl + 1); 4627 4628 /* 4629 * A packet using TSO will use up an entire descriptor for the 4630 * firmware work request header, LSO CPL, and TX_PKT_XT CPL. 4631 * If this descriptor is the last descriptor in the ring, wrap 4632 * around to the front of the ring explicitly for the start of 4633 * the sgl. 4634 */ 4635 if (dst == (void *)&eq->desc[eq->sidx]) { 4636 dst = (void *)&eq->desc[0]; 4637 write_gl_to_txd(txq, m0, &dst, 0); 4638 } else 4639 write_gl_to_txd(txq, m0, &dst, eq->sidx - ndesc < eq->pidx); 4640 txq->sgl_wrs++; 4641 4642 txq->txpkt_wrs++; 4643 4644 txsd = &txq->sdesc[eq->pidx]; 4645 txsd->m = m0; 4646 txsd->desc_used = ndesc; 4647 4648 return (ndesc); 4649 } 4650 4651 /* 4652 * Write a raw WR to the hardware descriptors, update the software 4653 * descriptor, and advance the pidx. It is guaranteed that enough 4654 * descriptors are available. 4655 * 4656 * The return value is the # of hardware descriptors used. 4657 */ 4658 static u_int 4659 write_raw_wr(struct sge_txq *txq, void *wr, struct mbuf *m0, u_int available) 4660 { 4661 struct sge_eq *eq = &txq->eq; 4662 struct tx_sdesc *txsd; 4663 struct mbuf *m; 4664 caddr_t dst; 4665 int len16, ndesc; 4666 4667 len16 = mbuf_len16(m0); 4668 ndesc = howmany(len16, EQ_ESIZE / 16); 4669 MPASS(ndesc <= available); 4670 4671 dst = wr; 4672 for (m = m0; m != NULL; m = m->m_next) 4673 copy_to_txd(eq, mtod(m, caddr_t), &dst, m->m_len); 4674 4675 txq->raw_wrs++; 4676 4677 txsd = &txq->sdesc[eq->pidx]; 4678 txsd->m = m0; 4679 txsd->desc_used = ndesc; 4680 4681 return (ndesc); 4682 } 4683 4684 /* 4685 * Write a txpkt WR for this packet to the hardware descriptors, update the 4686 * software descriptor, and advance the pidx. It is guaranteed that enough 4687 * descriptors are available. 4688 * 4689 * The return value is the # of hardware descriptors used. 4690 */ 4691 static u_int 4692 write_txpkt_wr(struct sge_txq *txq, struct fw_eth_tx_pkt_wr *wr, 4693 struct mbuf *m0, u_int available) 4694 { 4695 struct sge_eq *eq = &txq->eq; 4696 struct tx_sdesc *txsd; 4697 struct cpl_tx_pkt_core *cpl; 4698 uint32_t ctrl; /* used in many unrelated places */ 4699 uint64_t ctrl1; 4700 int len16, ndesc, pktlen, nsegs; 4701 caddr_t dst; 4702 4703 TXQ_LOCK_ASSERT_OWNED(txq); 4704 M_ASSERTPKTHDR(m0); 4705 MPASS(available > 0 && available < eq->sidx); 4706 4707 len16 = mbuf_len16(m0); 4708 nsegs = mbuf_nsegs(m0); 4709 pktlen = m0->m_pkthdr.len; 4710 ctrl = sizeof(struct cpl_tx_pkt_core); 4711 if (needs_tso(m0)) 4712 ctrl += sizeof(struct cpl_tx_pkt_lso_core); 4713 else if (pktlen <= imm_payload(2) && available >= 2) { 4714 /* Immediate data. Recalculate len16 and set nsegs to 0. */ 4715 ctrl += pktlen; 4716 len16 = howmany(sizeof(struct fw_eth_tx_pkt_wr) + 4717 sizeof(struct cpl_tx_pkt_core) + pktlen, 16); 4718 nsegs = 0; 4719 } 4720 ndesc = howmany(len16, EQ_ESIZE / 16); 4721 MPASS(ndesc <= available); 4722 4723 /* Firmware work request header */ 4724 MPASS(wr == (void *)&eq->desc[eq->pidx]); 4725 wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_PKT_WR) | 4726 V_FW_ETH_TX_PKT_WR_IMMDLEN(ctrl)); 4727 4728 ctrl = V_FW_WR_LEN16(len16); 4729 wr->equiq_to_len16 = htobe32(ctrl); 4730 wr->r3 = 0; 4731 4732 if (needs_tso(m0)) { 4733 struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1); 4734 4735 KASSERT(m0->m_pkthdr.l2hlen > 0 && m0->m_pkthdr.l3hlen > 0 && 4736 m0->m_pkthdr.l4hlen > 0, 4737 ("%s: mbuf %p needs TSO but missing header lengths", 4738 __func__, m0)); 4739 4740 ctrl = V_LSO_OPCODE(CPL_TX_PKT_LSO) | F_LSO_FIRST_SLICE | 4741 F_LSO_LAST_SLICE | V_LSO_IPHDR_LEN(m0->m_pkthdr.l3hlen >> 2) 4742 | V_LSO_TCPHDR_LEN(m0->m_pkthdr.l4hlen >> 2); 4743 if (m0->m_pkthdr.l2hlen == sizeof(struct ether_vlan_header)) 4744 ctrl |= V_LSO_ETHHDR_LEN(1); 4745 if (m0->m_pkthdr.l3hlen == sizeof(struct ip6_hdr)) 4746 ctrl |= F_LSO_IPV6; 4747 4748 lso->lso_ctrl = htobe32(ctrl); 4749 lso->ipid_ofst = htobe16(0); 4750 lso->mss = htobe16(m0->m_pkthdr.tso_segsz); 4751 lso->seqno_offset = htobe32(0); 4752 lso->len = htobe32(pktlen); 4753 4754 cpl = (void *)(lso + 1); 4755 4756 txq->tso_wrs++; 4757 } else 4758 cpl = (void *)(wr + 1); 4759 4760 /* Checksum offload */ 4761 ctrl1 = 0; 4762 if (needs_l3_csum(m0) == 0) 4763 ctrl1 |= F_TXPKT_IPCSUM_DIS; 4764 if (needs_l4_csum(m0) == 0) 4765 ctrl1 |= F_TXPKT_L4CSUM_DIS; 4766 if (m0->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP | 4767 CSUM_UDP_IPV6 | CSUM_TCP_IPV6 | CSUM_TSO)) 4768 txq->txcsum++; /* some hardware assistance provided */ 4769 4770 /* VLAN tag insertion */ 4771 if (needs_vlan_insertion(m0)) { 4772 ctrl1 |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(m0->m_pkthdr.ether_vtag); 4773 txq->vlan_insertion++; 4774 } 4775 4776 /* CPL header */ 4777 cpl->ctrl0 = txq->cpl_ctrl0; 4778 cpl->pack = 0; 4779 cpl->len = htobe16(pktlen); 4780 cpl->ctrl1 = htobe64(ctrl1); 4781 4782 /* SGL */ 4783 dst = (void *)(cpl + 1); 4784 if (nsegs > 0) { 4785 4786 write_gl_to_txd(txq, m0, &dst, eq->sidx - ndesc < eq->pidx); 4787 txq->sgl_wrs++; 4788 } else { 4789 struct mbuf *m; 4790 4791 for (m = m0; m != NULL; m = m->m_next) { 4792 copy_to_txd(eq, mtod(m, caddr_t), &dst, m->m_len); 4793 #ifdef INVARIANTS 4794 pktlen -= m->m_len; 4795 #endif 4796 } 4797 #ifdef INVARIANTS 4798 KASSERT(pktlen == 0, ("%s: %d bytes left.", __func__, pktlen)); 4799 #endif 4800 txq->imm_wrs++; 4801 } 4802 4803 txq->txpkt_wrs++; 4804 4805 txsd = &txq->sdesc[eq->pidx]; 4806 txsd->m = m0; 4807 txsd->desc_used = ndesc; 4808 4809 return (ndesc); 4810 } 4811 4812 static int 4813 try_txpkts(struct mbuf *m, struct mbuf *n, struct txpkts *txp, u_int available) 4814 { 4815 u_int needed, nsegs1, nsegs2, l1, l2; 4816 4817 if (cannot_use_txpkts(m) || cannot_use_txpkts(n)) 4818 return (1); 4819 4820 nsegs1 = mbuf_nsegs(m); 4821 nsegs2 = mbuf_nsegs(n); 4822 if (nsegs1 + nsegs2 == 2) { 4823 txp->wr_type = 1; 4824 l1 = l2 = txpkts1_len16(); 4825 } else { 4826 txp->wr_type = 0; 4827 l1 = txpkts0_len16(nsegs1); 4828 l2 = txpkts0_len16(nsegs2); 4829 } 4830 txp->len16 = howmany(sizeof(struct fw_eth_tx_pkts_wr), 16) + l1 + l2; 4831 needed = howmany(txp->len16, EQ_ESIZE / 16); 4832 if (needed > SGE_MAX_WR_NDESC || needed > available) 4833 return (1); 4834 4835 txp->plen = m->m_pkthdr.len + n->m_pkthdr.len; 4836 if (txp->plen > 65535) 4837 return (1); 4838 4839 txp->npkt = 2; 4840 set_mbuf_len16(m, l1); 4841 set_mbuf_len16(n, l2); 4842 4843 return (0); 4844 } 4845 4846 static int 4847 add_to_txpkts(struct mbuf *m, struct txpkts *txp, u_int available) 4848 { 4849 u_int plen, len16, needed, nsegs; 4850 4851 MPASS(txp->wr_type == 0 || txp->wr_type == 1); 4852 4853 if (cannot_use_txpkts(m)) 4854 return (1); 4855 4856 nsegs = mbuf_nsegs(m); 4857 if (txp->wr_type == 1 && nsegs != 1) 4858 return (1); 4859 4860 plen = txp->plen + m->m_pkthdr.len; 4861 if (plen > 65535) 4862 return (1); 4863 4864 if (txp->wr_type == 0) 4865 len16 = txpkts0_len16(nsegs); 4866 else 4867 len16 = txpkts1_len16(); 4868 needed = howmany(txp->len16 + len16, EQ_ESIZE / 16); 4869 if (needed > SGE_MAX_WR_NDESC || needed > available) 4870 return (1); 4871 4872 txp->npkt++; 4873 txp->plen = plen; 4874 txp->len16 += len16; 4875 set_mbuf_len16(m, len16); 4876 4877 return (0); 4878 } 4879 4880 /* 4881 * Write a txpkts WR for the packets in txp to the hardware descriptors, update 4882 * the software descriptor, and advance the pidx. It is guaranteed that enough 4883 * descriptors are available. 4884 * 4885 * The return value is the # of hardware descriptors used. 4886 */ 4887 static u_int 4888 write_txpkts_wr(struct sge_txq *txq, struct fw_eth_tx_pkts_wr *wr, 4889 struct mbuf *m0, const struct txpkts *txp, u_int available) 4890 { 4891 struct sge_eq *eq = &txq->eq; 4892 struct tx_sdesc *txsd; 4893 struct cpl_tx_pkt_core *cpl; 4894 uint32_t ctrl; 4895 uint64_t ctrl1; 4896 int ndesc, checkwrap; 4897 struct mbuf *m; 4898 void *flitp; 4899 4900 TXQ_LOCK_ASSERT_OWNED(txq); 4901 MPASS(txp->npkt > 0); 4902 MPASS(txp->plen < 65536); 4903 MPASS(m0 != NULL); 4904 MPASS(m0->m_nextpkt != NULL); 4905 MPASS(txp->len16 <= howmany(SGE_MAX_WR_LEN, 16)); 4906 MPASS(available > 0 && available < eq->sidx); 4907 4908 ndesc = howmany(txp->len16, EQ_ESIZE / 16); 4909 MPASS(ndesc <= available); 4910 4911 MPASS(wr == (void *)&eq->desc[eq->pidx]); 4912 wr->op_pkd = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_WR)); 4913 ctrl = V_FW_WR_LEN16(txp->len16); 4914 wr->equiq_to_len16 = htobe32(ctrl); 4915 wr->plen = htobe16(txp->plen); 4916 wr->npkt = txp->npkt; 4917 wr->r3 = 0; 4918 wr->type = txp->wr_type; 4919 flitp = wr + 1; 4920 4921 /* 4922 * At this point we are 16B into a hardware descriptor. If checkwrap is 4923 * set then we know the WR is going to wrap around somewhere. We'll 4924 * check for that at appropriate points. 4925 */ 4926 checkwrap = eq->sidx - ndesc < eq->pidx; 4927 for (m = m0; m != NULL; m = m->m_nextpkt) { 4928 if (txp->wr_type == 0) { 4929 struct ulp_txpkt *ulpmc; 4930 struct ulptx_idata *ulpsc; 4931 4932 /* ULP master command */ 4933 ulpmc = flitp; 4934 ulpmc->cmd_dest = htobe32(V_ULPTX_CMD(ULP_TX_PKT) | 4935 V_ULP_TXPKT_DEST(0) | V_ULP_TXPKT_FID(eq->iqid)); 4936 ulpmc->len = htobe32(mbuf_len16(m)); 4937 4938 /* ULP subcommand */ 4939 ulpsc = (void *)(ulpmc + 1); 4940 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM) | 4941 F_ULP_TX_SC_MORE); 4942 ulpsc->len = htobe32(sizeof(struct cpl_tx_pkt_core)); 4943 4944 cpl = (void *)(ulpsc + 1); 4945 if (checkwrap && 4946 (uintptr_t)cpl == (uintptr_t)&eq->desc[eq->sidx]) 4947 cpl = (void *)&eq->desc[0]; 4948 } else { 4949 cpl = flitp; 4950 } 4951 4952 /* Checksum offload */ 4953 ctrl1 = 0; 4954 if (needs_l3_csum(m) == 0) 4955 ctrl1 |= F_TXPKT_IPCSUM_DIS; 4956 if (needs_l4_csum(m) == 0) 4957 ctrl1 |= F_TXPKT_L4CSUM_DIS; 4958 if (m->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP | 4959 CSUM_UDP_IPV6 | CSUM_TCP_IPV6 | CSUM_TSO)) 4960 txq->txcsum++; /* some hardware assistance provided */ 4961 4962 /* VLAN tag insertion */ 4963 if (needs_vlan_insertion(m)) { 4964 ctrl1 |= F_TXPKT_VLAN_VLD | 4965 V_TXPKT_VLAN(m->m_pkthdr.ether_vtag); 4966 txq->vlan_insertion++; 4967 } 4968 4969 /* CPL header */ 4970 cpl->ctrl0 = txq->cpl_ctrl0; 4971 cpl->pack = 0; 4972 cpl->len = htobe16(m->m_pkthdr.len); 4973 cpl->ctrl1 = htobe64(ctrl1); 4974 4975 flitp = cpl + 1; 4976 if (checkwrap && 4977 (uintptr_t)flitp == (uintptr_t)&eq->desc[eq->sidx]) 4978 flitp = (void *)&eq->desc[0]; 4979 4980 write_gl_to_txd(txq, m, (caddr_t *)(&flitp), checkwrap); 4981 4982 } 4983 4984 if (txp->wr_type == 0) { 4985 txq->txpkts0_pkts += txp->npkt; 4986 txq->txpkts0_wrs++; 4987 } else { 4988 txq->txpkts1_pkts += txp->npkt; 4989 txq->txpkts1_wrs++; 4990 } 4991 4992 txsd = &txq->sdesc[eq->pidx]; 4993 txsd->m = m0; 4994 txsd->desc_used = ndesc; 4995 4996 return (ndesc); 4997 } 4998 4999 /* 5000 * If the SGL ends on an address that is not 16 byte aligned, this function will 5001 * add a 0 filled flit at the end. 5002 */ 5003 static void 5004 write_gl_to_txd(struct sge_txq *txq, struct mbuf *m, caddr_t *to, int checkwrap) 5005 { 5006 struct sge_eq *eq = &txq->eq; 5007 struct sglist *gl = txq->gl; 5008 struct sglist_seg *seg; 5009 __be64 *flitp, *wrap; 5010 struct ulptx_sgl *usgl; 5011 int i, nflits, nsegs; 5012 5013 KASSERT(((uintptr_t)(*to) & 0xf) == 0, 5014 ("%s: SGL must start at a 16 byte boundary: %p", __func__, *to)); 5015 MPASS((uintptr_t)(*to) >= (uintptr_t)&eq->desc[0]); 5016 MPASS((uintptr_t)(*to) < (uintptr_t)&eq->desc[eq->sidx]); 5017 5018 get_pkt_gl(m, gl); 5019 nsegs = gl->sg_nseg; 5020 MPASS(nsegs > 0); 5021 5022 nflits = (3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1) + 2; 5023 flitp = (__be64 *)(*to); 5024 wrap = (__be64 *)(&eq->desc[eq->sidx]); 5025 seg = &gl->sg_segs[0]; 5026 usgl = (void *)flitp; 5027 5028 /* 5029 * We start at a 16 byte boundary somewhere inside the tx descriptor 5030 * ring, so we're at least 16 bytes away from the status page. There is 5031 * no chance of a wrap around in the middle of usgl (which is 16 bytes). 5032 */ 5033 5034 usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) | 5035 V_ULPTX_NSGE(nsegs)); 5036 usgl->len0 = htobe32(seg->ss_len); 5037 usgl->addr0 = htobe64(seg->ss_paddr); 5038 seg++; 5039 5040 if (checkwrap == 0 || (uintptr_t)(flitp + nflits) <= (uintptr_t)wrap) { 5041 5042 /* Won't wrap around at all */ 5043 5044 for (i = 0; i < nsegs - 1; i++, seg++) { 5045 usgl->sge[i / 2].len[i & 1] = htobe32(seg->ss_len); 5046 usgl->sge[i / 2].addr[i & 1] = htobe64(seg->ss_paddr); 5047 } 5048 if (i & 1) 5049 usgl->sge[i / 2].len[1] = htobe32(0); 5050 flitp += nflits; 5051 } else { 5052 5053 /* Will wrap somewhere in the rest of the SGL */ 5054 5055 /* 2 flits already written, write the rest flit by flit */ 5056 flitp = (void *)(usgl + 1); 5057 for (i = 0; i < nflits - 2; i++) { 5058 if (flitp == wrap) 5059 flitp = (void *)eq->desc; 5060 *flitp++ = get_flit(seg, nsegs - 1, i); 5061 } 5062 } 5063 5064 if (nflits & 1) { 5065 MPASS(((uintptr_t)flitp) & 0xf); 5066 *flitp++ = 0; 5067 } 5068 5069 MPASS((((uintptr_t)flitp) & 0xf) == 0); 5070 if (__predict_false(flitp == wrap)) 5071 *to = (void *)eq->desc; 5072 else 5073 *to = (void *)flitp; 5074 } 5075 5076 static inline void 5077 copy_to_txd(struct sge_eq *eq, caddr_t from, caddr_t *to, int len) 5078 { 5079 5080 MPASS((uintptr_t)(*to) >= (uintptr_t)&eq->desc[0]); 5081 MPASS((uintptr_t)(*to) < (uintptr_t)&eq->desc[eq->sidx]); 5082 5083 if (__predict_true((uintptr_t)(*to) + len <= 5084 (uintptr_t)&eq->desc[eq->sidx])) { 5085 bcopy(from, *to, len); 5086 (*to) += len; 5087 } else { 5088 int portion = (uintptr_t)&eq->desc[eq->sidx] - (uintptr_t)(*to); 5089 5090 bcopy(from, *to, portion); 5091 from += portion; 5092 portion = len - portion; /* remaining */ 5093 bcopy(from, (void *)eq->desc, portion); 5094 (*to) = (caddr_t)eq->desc + portion; 5095 } 5096 } 5097 5098 static inline void 5099 ring_eq_db(struct adapter *sc, struct sge_eq *eq, u_int n) 5100 { 5101 u_int db; 5102 5103 MPASS(n > 0); 5104 5105 db = eq->doorbells; 5106 if (n > 1) 5107 clrbit(&db, DOORBELL_WCWR); 5108 wmb(); 5109 5110 switch (ffs(db) - 1) { 5111 case DOORBELL_UDB: 5112 *eq->udb = htole32(V_QID(eq->udb_qid) | V_PIDX(n)); 5113 break; 5114 5115 case DOORBELL_WCWR: { 5116 volatile uint64_t *dst, *src; 5117 int i; 5118 5119 /* 5120 * Queues whose 128B doorbell segment fits in the page do not 5121 * use relative qid (udb_qid is always 0). Only queues with 5122 * doorbell segments can do WCWR. 5123 */ 5124 KASSERT(eq->udb_qid == 0 && n == 1, 5125 ("%s: inappropriate doorbell (0x%x, %d, %d) for eq %p", 5126 __func__, eq->doorbells, n, eq->dbidx, eq)); 5127 5128 dst = (volatile void *)((uintptr_t)eq->udb + UDBS_WR_OFFSET - 5129 UDBS_DB_OFFSET); 5130 i = eq->dbidx; 5131 src = (void *)&eq->desc[i]; 5132 while (src != (void *)&eq->desc[i + 1]) 5133 *dst++ = *src++; 5134 wmb(); 5135 break; 5136 } 5137 5138 case DOORBELL_UDBWC: 5139 *eq->udb = htole32(V_QID(eq->udb_qid) | V_PIDX(n)); 5140 wmb(); 5141 break; 5142 5143 case DOORBELL_KDB: 5144 t4_write_reg(sc, sc->sge_kdoorbell_reg, 5145 V_QID(eq->cntxt_id) | V_PIDX(n)); 5146 break; 5147 } 5148 5149 IDXINCR(eq->dbidx, n, eq->sidx); 5150 } 5151 5152 static inline u_int 5153 reclaimable_tx_desc(struct sge_eq *eq) 5154 { 5155 uint16_t hw_cidx; 5156 5157 hw_cidx = read_hw_cidx(eq); 5158 return (IDXDIFF(hw_cidx, eq->cidx, eq->sidx)); 5159 } 5160 5161 static inline u_int 5162 total_available_tx_desc(struct sge_eq *eq) 5163 { 5164 uint16_t hw_cidx, pidx; 5165 5166 hw_cidx = read_hw_cidx(eq); 5167 pidx = eq->pidx; 5168 5169 if (pidx == hw_cidx) 5170 return (eq->sidx - 1); 5171 else 5172 return (IDXDIFF(hw_cidx, pidx, eq->sidx) - 1); 5173 } 5174 5175 static inline uint16_t 5176 read_hw_cidx(struct sge_eq *eq) 5177 { 5178 struct sge_qstat *spg = (void *)&eq->desc[eq->sidx]; 5179 uint16_t cidx = spg->cidx; /* stable snapshot */ 5180 5181 return (be16toh(cidx)); 5182 } 5183 5184 /* 5185 * Reclaim 'n' descriptors approximately. 5186 */ 5187 static u_int 5188 reclaim_tx_descs(struct sge_txq *txq, u_int n) 5189 { 5190 struct tx_sdesc *txsd; 5191 struct sge_eq *eq = &txq->eq; 5192 u_int can_reclaim, reclaimed; 5193 5194 TXQ_LOCK_ASSERT_OWNED(txq); 5195 MPASS(n > 0); 5196 5197 reclaimed = 0; 5198 can_reclaim = reclaimable_tx_desc(eq); 5199 while (can_reclaim && reclaimed < n) { 5200 int ndesc; 5201 struct mbuf *m, *nextpkt; 5202 5203 txsd = &txq->sdesc[eq->cidx]; 5204 ndesc = txsd->desc_used; 5205 5206 /* Firmware doesn't return "partial" credits. */ 5207 KASSERT(can_reclaim >= ndesc, 5208 ("%s: unexpected number of credits: %d, %d", 5209 __func__, can_reclaim, ndesc)); 5210 KASSERT(ndesc != 0, 5211 ("%s: descriptor with no credits: cidx %d", 5212 __func__, eq->cidx)); 5213 5214 for (m = txsd->m; m != NULL; m = nextpkt) { 5215 nextpkt = m->m_nextpkt; 5216 m->m_nextpkt = NULL; 5217 m_freem(m); 5218 } 5219 reclaimed += ndesc; 5220 can_reclaim -= ndesc; 5221 IDXINCR(eq->cidx, ndesc, eq->sidx); 5222 } 5223 5224 return (reclaimed); 5225 } 5226 5227 static void 5228 tx_reclaim(void *arg, int n) 5229 { 5230 struct sge_txq *txq = arg; 5231 struct sge_eq *eq = &txq->eq; 5232 5233 do { 5234 if (TXQ_TRYLOCK(txq) == 0) 5235 break; 5236 n = reclaim_tx_descs(txq, 32); 5237 if (eq->cidx == eq->pidx) 5238 eq->equeqidx = eq->pidx; 5239 TXQ_UNLOCK(txq); 5240 } while (n > 0); 5241 } 5242 5243 static __be64 5244 get_flit(struct sglist_seg *segs, int nsegs, int idx) 5245 { 5246 int i = (idx / 3) * 2; 5247 5248 switch (idx % 3) { 5249 case 0: { 5250 uint64_t rc; 5251 5252 rc = (uint64_t)segs[i].ss_len << 32; 5253 if (i + 1 < nsegs) 5254 rc |= (uint64_t)(segs[i + 1].ss_len); 5255 5256 return (htobe64(rc)); 5257 } 5258 case 1: 5259 return (htobe64(segs[i].ss_paddr)); 5260 case 2: 5261 return (htobe64(segs[i + 1].ss_paddr)); 5262 } 5263 5264 return (0); 5265 } 5266 5267 static void 5268 find_best_refill_source(struct adapter *sc, struct sge_fl *fl, int maxp) 5269 { 5270 int8_t zidx, hwidx, idx; 5271 uint16_t region1, region3; 5272 int spare, spare_needed, n; 5273 struct sw_zone_info *swz; 5274 struct hw_buf_info *hwb, *hwb_list = &sc->sge.hw_buf_info[0]; 5275 5276 /* 5277 * Buffer Packing: Look for PAGE_SIZE or larger zone which has a bufsize 5278 * large enough for the max payload and cluster metadata. Otherwise 5279 * settle for the largest bufsize that leaves enough room in the cluster 5280 * for metadata. 5281 * 5282 * Without buffer packing: Look for the smallest zone which has a 5283 * bufsize large enough for the max payload. Settle for the largest 5284 * bufsize available if there's nothing big enough for max payload. 5285 */ 5286 spare_needed = fl->flags & FL_BUF_PACKING ? CL_METADATA_SIZE : 0; 5287 swz = &sc->sge.sw_zone_info[0]; 5288 hwidx = -1; 5289 for (zidx = 0; zidx < SW_ZONE_SIZES; zidx++, swz++) { 5290 if (swz->size > largest_rx_cluster) { 5291 if (__predict_true(hwidx != -1)) 5292 break; 5293 5294 /* 5295 * This is a misconfiguration. largest_rx_cluster is 5296 * preventing us from finding a refill source. See 5297 * dev.t5nex.<n>.buffer_sizes to figure out why. 5298 */ 5299 device_printf(sc->dev, "largest_rx_cluster=%u leaves no" 5300 " refill source for fl %p (dma %u). Ignored.\n", 5301 largest_rx_cluster, fl, maxp); 5302 } 5303 for (idx = swz->head_hwidx; idx != -1; idx = hwb->next) { 5304 hwb = &hwb_list[idx]; 5305 spare = swz->size - hwb->size; 5306 if (spare < spare_needed) 5307 continue; 5308 5309 hwidx = idx; /* best option so far */ 5310 if (hwb->size >= maxp) { 5311 5312 if ((fl->flags & FL_BUF_PACKING) == 0) 5313 goto done; /* stop looking (not packing) */ 5314 5315 if (swz->size >= safest_rx_cluster) 5316 goto done; /* stop looking (packing) */ 5317 } 5318 break; /* keep looking, next zone */ 5319 } 5320 } 5321 done: 5322 /* A usable hwidx has been located. */ 5323 MPASS(hwidx != -1); 5324 hwb = &hwb_list[hwidx]; 5325 zidx = hwb->zidx; 5326 swz = &sc->sge.sw_zone_info[zidx]; 5327 region1 = 0; 5328 region3 = swz->size - hwb->size; 5329 5330 /* 5331 * Stay within this zone and see if there is a better match when mbuf 5332 * inlining is allowed. Remember that the hwidx's are sorted in 5333 * decreasing order of size (so in increasing order of spare area). 5334 */ 5335 for (idx = hwidx; idx != -1; idx = hwb->next) { 5336 hwb = &hwb_list[idx]; 5337 spare = swz->size - hwb->size; 5338 5339 if (allow_mbufs_in_cluster == 0 || hwb->size < maxp) 5340 break; 5341 5342 /* 5343 * Do not inline mbufs if doing so would violate the pad/pack 5344 * boundary alignment requirement. 5345 */ 5346 if (fl_pad && (MSIZE % sc->params.sge.pad_boundary) != 0) 5347 continue; 5348 if (fl->flags & FL_BUF_PACKING && 5349 (MSIZE % sc->params.sge.pack_boundary) != 0) 5350 continue; 5351 5352 if (spare < CL_METADATA_SIZE + MSIZE) 5353 continue; 5354 n = (spare - CL_METADATA_SIZE) / MSIZE; 5355 if (n > howmany(hwb->size, maxp)) 5356 break; 5357 5358 hwidx = idx; 5359 if (fl->flags & FL_BUF_PACKING) { 5360 region1 = n * MSIZE; 5361 region3 = spare - region1; 5362 } else { 5363 region1 = MSIZE; 5364 region3 = spare - region1; 5365 break; 5366 } 5367 } 5368 5369 KASSERT(zidx >= 0 && zidx < SW_ZONE_SIZES, 5370 ("%s: bad zone %d for fl %p, maxp %d", __func__, zidx, fl, maxp)); 5371 KASSERT(hwidx >= 0 && hwidx <= SGE_FLBUF_SIZES, 5372 ("%s: bad hwidx %d for fl %p, maxp %d", __func__, hwidx, fl, maxp)); 5373 KASSERT(region1 + sc->sge.hw_buf_info[hwidx].size + region3 == 5374 sc->sge.sw_zone_info[zidx].size, 5375 ("%s: bad buffer layout for fl %p, maxp %d. " 5376 "cl %d; r1 %d, payload %d, r3 %d", __func__, fl, maxp, 5377 sc->sge.sw_zone_info[zidx].size, region1, 5378 sc->sge.hw_buf_info[hwidx].size, region3)); 5379 if (fl->flags & FL_BUF_PACKING || region1 > 0) { 5380 KASSERT(region3 >= CL_METADATA_SIZE, 5381 ("%s: no room for metadata. fl %p, maxp %d; " 5382 "cl %d; r1 %d, payload %d, r3 %d", __func__, fl, maxp, 5383 sc->sge.sw_zone_info[zidx].size, region1, 5384 sc->sge.hw_buf_info[hwidx].size, region3)); 5385 KASSERT(region1 % MSIZE == 0, 5386 ("%s: bad mbuf region for fl %p, maxp %d. " 5387 "cl %d; r1 %d, payload %d, r3 %d", __func__, fl, maxp, 5388 sc->sge.sw_zone_info[zidx].size, region1, 5389 sc->sge.hw_buf_info[hwidx].size, region3)); 5390 } 5391 5392 fl->cll_def.zidx = zidx; 5393 fl->cll_def.hwidx = hwidx; 5394 fl->cll_def.region1 = region1; 5395 fl->cll_def.region3 = region3; 5396 } 5397 5398 static void 5399 find_safe_refill_source(struct adapter *sc, struct sge_fl *fl) 5400 { 5401 struct sge *s = &sc->sge; 5402 struct hw_buf_info *hwb; 5403 struct sw_zone_info *swz; 5404 int spare; 5405 int8_t hwidx; 5406 5407 if (fl->flags & FL_BUF_PACKING) 5408 hwidx = s->safe_hwidx2; /* with room for metadata */ 5409 else if (allow_mbufs_in_cluster && s->safe_hwidx2 != -1) { 5410 hwidx = s->safe_hwidx2; 5411 hwb = &s->hw_buf_info[hwidx]; 5412 swz = &s->sw_zone_info[hwb->zidx]; 5413 spare = swz->size - hwb->size; 5414 5415 /* no good if there isn't room for an mbuf as well */ 5416 if (spare < CL_METADATA_SIZE + MSIZE) 5417 hwidx = s->safe_hwidx1; 5418 } else 5419 hwidx = s->safe_hwidx1; 5420 5421 if (hwidx == -1) { 5422 /* No fallback source */ 5423 fl->cll_alt.hwidx = -1; 5424 fl->cll_alt.zidx = -1; 5425 5426 return; 5427 } 5428 5429 hwb = &s->hw_buf_info[hwidx]; 5430 swz = &s->sw_zone_info[hwb->zidx]; 5431 spare = swz->size - hwb->size; 5432 fl->cll_alt.hwidx = hwidx; 5433 fl->cll_alt.zidx = hwb->zidx; 5434 if (allow_mbufs_in_cluster && 5435 (fl_pad == 0 || (MSIZE % sc->params.sge.pad_boundary) == 0)) 5436 fl->cll_alt.region1 = ((spare - CL_METADATA_SIZE) / MSIZE) * MSIZE; 5437 else 5438 fl->cll_alt.region1 = 0; 5439 fl->cll_alt.region3 = spare - fl->cll_alt.region1; 5440 } 5441 5442 static void 5443 add_fl_to_sfl(struct adapter *sc, struct sge_fl *fl) 5444 { 5445 mtx_lock(&sc->sfl_lock); 5446 FL_LOCK(fl); 5447 if ((fl->flags & FL_DOOMED) == 0) { 5448 fl->flags |= FL_STARVING; 5449 TAILQ_INSERT_TAIL(&sc->sfl, fl, link); 5450 callout_reset(&sc->sfl_callout, hz / 5, refill_sfl, sc); 5451 } 5452 FL_UNLOCK(fl); 5453 mtx_unlock(&sc->sfl_lock); 5454 } 5455 5456 static void 5457 handle_wrq_egr_update(struct adapter *sc, struct sge_eq *eq) 5458 { 5459 struct sge_wrq *wrq = (void *)eq; 5460 5461 atomic_readandclear_int(&eq->equiq); 5462 taskqueue_enqueue(sc->tq[eq->tx_chan], &wrq->wrq_tx_task); 5463 } 5464 5465 static void 5466 handle_eth_egr_update(struct adapter *sc, struct sge_eq *eq) 5467 { 5468 struct sge_txq *txq = (void *)eq; 5469 5470 MPASS((eq->flags & EQ_TYPEMASK) == EQ_ETH); 5471 5472 atomic_readandclear_int(&eq->equiq); 5473 mp_ring_check_drainage(txq->r, 0); 5474 taskqueue_enqueue(sc->tq[eq->tx_chan], &txq->tx_reclaim_task); 5475 } 5476 5477 static int 5478 handle_sge_egr_update(struct sge_iq *iq, const struct rss_header *rss, 5479 struct mbuf *m) 5480 { 5481 const struct cpl_sge_egr_update *cpl = (const void *)(rss + 1); 5482 unsigned int qid = G_EGR_QID(ntohl(cpl->opcode_qid)); 5483 struct adapter *sc = iq->adapter; 5484 struct sge *s = &sc->sge; 5485 struct sge_eq *eq; 5486 static void (*h[])(struct adapter *, struct sge_eq *) = {NULL, 5487 &handle_wrq_egr_update, &handle_eth_egr_update, 5488 &handle_wrq_egr_update}; 5489 5490 KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__, 5491 rss->opcode)); 5492 5493 eq = s->eqmap[qid - s->eq_start - s->eq_base]; 5494 (*h[eq->flags & EQ_TYPEMASK])(sc, eq); 5495 5496 return (0); 5497 } 5498 5499 /* handle_fw_msg works for both fw4_msg and fw6_msg because this is valid */ 5500 CTASSERT(offsetof(struct cpl_fw4_msg, data) == \ 5501 offsetof(struct cpl_fw6_msg, data)); 5502 5503 static int 5504 handle_fw_msg(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 5505 { 5506 struct adapter *sc = iq->adapter; 5507 const struct cpl_fw6_msg *cpl = (const void *)(rss + 1); 5508 5509 KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__, 5510 rss->opcode)); 5511 5512 if (cpl->type == FW_TYPE_RSSCPL || cpl->type == FW6_TYPE_RSSCPL) { 5513 const struct rss_header *rss2; 5514 5515 rss2 = (const struct rss_header *)&cpl->data[0]; 5516 return (t4_cpl_handler[rss2->opcode](iq, rss2, m)); 5517 } 5518 5519 return (t4_fw_msg_handler[cpl->type](sc, &cpl->data[0])); 5520 } 5521 5522 /** 5523 * t4_handle_wrerr_rpl - process a FW work request error message 5524 * @adap: the adapter 5525 * @rpl: start of the FW message 5526 */ 5527 static int 5528 t4_handle_wrerr_rpl(struct adapter *adap, const __be64 *rpl) 5529 { 5530 u8 opcode = *(const u8 *)rpl; 5531 const struct fw_error_cmd *e = (const void *)rpl; 5532 unsigned int i; 5533 5534 if (opcode != FW_ERROR_CMD) { 5535 log(LOG_ERR, 5536 "%s: Received WRERR_RPL message with opcode %#x\n", 5537 device_get_nameunit(adap->dev), opcode); 5538 return (EINVAL); 5539 } 5540 log(LOG_ERR, "%s: FW_ERROR (%s) ", device_get_nameunit(adap->dev), 5541 G_FW_ERROR_CMD_FATAL(be32toh(e->op_to_type)) ? "fatal" : 5542 "non-fatal"); 5543 switch (G_FW_ERROR_CMD_TYPE(be32toh(e->op_to_type))) { 5544 case FW_ERROR_TYPE_EXCEPTION: 5545 log(LOG_ERR, "exception info:\n"); 5546 for (i = 0; i < nitems(e->u.exception.info); i++) 5547 log(LOG_ERR, "%s%08x", i == 0 ? "\t" : " ", 5548 be32toh(e->u.exception.info[i])); 5549 log(LOG_ERR, "\n"); 5550 break; 5551 case FW_ERROR_TYPE_HWMODULE: 5552 log(LOG_ERR, "HW module regaddr %08x regval %08x\n", 5553 be32toh(e->u.hwmodule.regaddr), 5554 be32toh(e->u.hwmodule.regval)); 5555 break; 5556 case FW_ERROR_TYPE_WR: 5557 log(LOG_ERR, "WR cidx %d PF %d VF %d eqid %d hdr:\n", 5558 be16toh(e->u.wr.cidx), 5559 G_FW_ERROR_CMD_PFN(be16toh(e->u.wr.pfn_vfn)), 5560 G_FW_ERROR_CMD_VFN(be16toh(e->u.wr.pfn_vfn)), 5561 be32toh(e->u.wr.eqid)); 5562 for (i = 0; i < nitems(e->u.wr.wrhdr); i++) 5563 log(LOG_ERR, "%s%02x", i == 0 ? "\t" : " ", 5564 e->u.wr.wrhdr[i]); 5565 log(LOG_ERR, "\n"); 5566 break; 5567 case FW_ERROR_TYPE_ACL: 5568 log(LOG_ERR, "ACL cidx %d PF %d VF %d eqid %d %s", 5569 be16toh(e->u.acl.cidx), 5570 G_FW_ERROR_CMD_PFN(be16toh(e->u.acl.pfn_vfn)), 5571 G_FW_ERROR_CMD_VFN(be16toh(e->u.acl.pfn_vfn)), 5572 be32toh(e->u.acl.eqid), 5573 G_FW_ERROR_CMD_MV(be16toh(e->u.acl.mv_pkd)) ? "vlanid" : 5574 "MAC"); 5575 for (i = 0; i < nitems(e->u.acl.val); i++) 5576 log(LOG_ERR, " %02x", e->u.acl.val[i]); 5577 log(LOG_ERR, "\n"); 5578 break; 5579 default: 5580 log(LOG_ERR, "type %#x\n", 5581 G_FW_ERROR_CMD_TYPE(be32toh(e->op_to_type))); 5582 return (EINVAL); 5583 } 5584 return (0); 5585 } 5586 5587 static int 5588 sysctl_uint16(SYSCTL_HANDLER_ARGS) 5589 { 5590 uint16_t *id = arg1; 5591 int i = *id; 5592 5593 return sysctl_handle_int(oidp, &i, 0, req); 5594 } 5595 5596 static int 5597 sysctl_bufsizes(SYSCTL_HANDLER_ARGS) 5598 { 5599 struct sge *s = arg1; 5600 struct hw_buf_info *hwb = &s->hw_buf_info[0]; 5601 struct sw_zone_info *swz = &s->sw_zone_info[0]; 5602 int i, rc; 5603 struct sbuf sb; 5604 char c; 5605 5606 sbuf_new(&sb, NULL, 32, SBUF_AUTOEXTEND); 5607 for (i = 0; i < SGE_FLBUF_SIZES; i++, hwb++) { 5608 if (hwb->zidx >= 0 && swz[hwb->zidx].size <= largest_rx_cluster) 5609 c = '*'; 5610 else 5611 c = '\0'; 5612 5613 sbuf_printf(&sb, "%u%c ", hwb->size, c); 5614 } 5615 sbuf_trim(&sb); 5616 sbuf_finish(&sb); 5617 rc = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req); 5618 sbuf_delete(&sb); 5619 return (rc); 5620 } 5621 5622 #ifdef RATELIMIT 5623 /* 5624 * len16 for a txpkt WR with a GL. Includes the firmware work request header. 5625 */ 5626 static inline u_int 5627 txpkt_eo_len16(u_int nsegs, u_int immhdrs, u_int tso) 5628 { 5629 u_int n; 5630 5631 MPASS(immhdrs > 0); 5632 5633 n = roundup2(sizeof(struct fw_eth_tx_eo_wr) + 5634 sizeof(struct cpl_tx_pkt_core) + immhdrs, 16); 5635 if (__predict_false(nsegs == 0)) 5636 goto done; 5637 5638 nsegs--; /* first segment is part of ulptx_sgl */ 5639 n += sizeof(struct ulptx_sgl) + 8 * ((3 * nsegs) / 2 + (nsegs & 1)); 5640 if (tso) 5641 n += sizeof(struct cpl_tx_pkt_lso_core); 5642 5643 done: 5644 return (howmany(n, 16)); 5645 } 5646 5647 #define ETID_FLOWC_NPARAMS 6 5648 #define ETID_FLOWC_LEN (roundup2((sizeof(struct fw_flowc_wr) + \ 5649 ETID_FLOWC_NPARAMS * sizeof(struct fw_flowc_mnemval)), 16)) 5650 #define ETID_FLOWC_LEN16 (howmany(ETID_FLOWC_LEN, 16)) 5651 5652 static int 5653 send_etid_flowc_wr(struct cxgbe_snd_tag *cst, struct port_info *pi, 5654 struct vi_info *vi) 5655 { 5656 struct wrq_cookie cookie; 5657 u_int pfvf = pi->adapter->pf << S_FW_VIID_PFN; 5658 struct fw_flowc_wr *flowc; 5659 5660 mtx_assert(&cst->lock, MA_OWNED); 5661 MPASS((cst->flags & (EO_FLOWC_PENDING | EO_FLOWC_RPL_PENDING)) == 5662 EO_FLOWC_PENDING); 5663 5664 flowc = start_wrq_wr(cst->eo_txq, ETID_FLOWC_LEN16, &cookie); 5665 if (__predict_false(flowc == NULL)) 5666 return (ENOMEM); 5667 5668 bzero(flowc, ETID_FLOWC_LEN); 5669 flowc->op_to_nparams = htobe32(V_FW_WR_OP(FW_FLOWC_WR) | 5670 V_FW_FLOWC_WR_NPARAMS(ETID_FLOWC_NPARAMS) | V_FW_WR_COMPL(0)); 5671 flowc->flowid_len16 = htonl(V_FW_WR_LEN16(ETID_FLOWC_LEN16) | 5672 V_FW_WR_FLOWID(cst->etid)); 5673 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; 5674 flowc->mnemval[0].val = htobe32(pfvf); 5675 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; 5676 flowc->mnemval[1].val = htobe32(pi->tx_chan); 5677 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT; 5678 flowc->mnemval[2].val = htobe32(pi->tx_chan); 5679 flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID; 5680 flowc->mnemval[3].val = htobe32(cst->iqid); 5681 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_EOSTATE; 5682 flowc->mnemval[4].val = htobe32(FW_FLOWC_MNEM_EOSTATE_ESTABLISHED); 5683 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_SCHEDCLASS; 5684 flowc->mnemval[5].val = htobe32(cst->schedcl); 5685 5686 commit_wrq_wr(cst->eo_txq, flowc, &cookie); 5687 5688 cst->flags &= ~EO_FLOWC_PENDING; 5689 cst->flags |= EO_FLOWC_RPL_PENDING; 5690 MPASS(cst->tx_credits >= ETID_FLOWC_LEN16); /* flowc is first WR. */ 5691 cst->tx_credits -= ETID_FLOWC_LEN16; 5692 5693 return (0); 5694 } 5695 5696 #define ETID_FLUSH_LEN16 (howmany(sizeof (struct fw_flowc_wr), 16)) 5697 5698 void 5699 send_etid_flush_wr(struct cxgbe_snd_tag *cst) 5700 { 5701 struct fw_flowc_wr *flowc; 5702 struct wrq_cookie cookie; 5703 5704 mtx_assert(&cst->lock, MA_OWNED); 5705 5706 flowc = start_wrq_wr(cst->eo_txq, ETID_FLUSH_LEN16, &cookie); 5707 if (__predict_false(flowc == NULL)) 5708 CXGBE_UNIMPLEMENTED(__func__); 5709 5710 bzero(flowc, ETID_FLUSH_LEN16 * 16); 5711 flowc->op_to_nparams = htobe32(V_FW_WR_OP(FW_FLOWC_WR) | 5712 V_FW_FLOWC_WR_NPARAMS(0) | F_FW_WR_COMPL); 5713 flowc->flowid_len16 = htobe32(V_FW_WR_LEN16(ETID_FLUSH_LEN16) | 5714 V_FW_WR_FLOWID(cst->etid)); 5715 5716 commit_wrq_wr(cst->eo_txq, flowc, &cookie); 5717 5718 cst->flags |= EO_FLUSH_RPL_PENDING; 5719 MPASS(cst->tx_credits >= ETID_FLUSH_LEN16); 5720 cst->tx_credits -= ETID_FLUSH_LEN16; 5721 cst->ncompl++; 5722 } 5723 5724 static void 5725 write_ethofld_wr(struct cxgbe_snd_tag *cst, struct fw_eth_tx_eo_wr *wr, 5726 struct mbuf *m0, int compl) 5727 { 5728 struct cpl_tx_pkt_core *cpl; 5729 uint64_t ctrl1; 5730 uint32_t ctrl; /* used in many unrelated places */ 5731 int len16, pktlen, nsegs, immhdrs; 5732 caddr_t dst; 5733 uintptr_t p; 5734 struct ulptx_sgl *usgl; 5735 struct sglist sg; 5736 struct sglist_seg segs[38]; /* XXX: find real limit. XXX: get off the stack */ 5737 5738 mtx_assert(&cst->lock, MA_OWNED); 5739 M_ASSERTPKTHDR(m0); 5740 KASSERT(m0->m_pkthdr.l2hlen > 0 && m0->m_pkthdr.l3hlen > 0 && 5741 m0->m_pkthdr.l4hlen > 0, 5742 ("%s: ethofld mbuf %p is missing header lengths", __func__, m0)); 5743 5744 len16 = mbuf_eo_len16(m0); 5745 nsegs = mbuf_eo_nsegs(m0); 5746 pktlen = m0->m_pkthdr.len; 5747 ctrl = sizeof(struct cpl_tx_pkt_core); 5748 if (needs_tso(m0)) 5749 ctrl += sizeof(struct cpl_tx_pkt_lso_core); 5750 immhdrs = m0->m_pkthdr.l2hlen + m0->m_pkthdr.l3hlen + m0->m_pkthdr.l4hlen; 5751 ctrl += immhdrs; 5752 5753 wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_EO_WR) | 5754 V_FW_ETH_TX_EO_WR_IMMDLEN(ctrl) | V_FW_WR_COMPL(!!compl)); 5755 wr->equiq_to_len16 = htobe32(V_FW_WR_LEN16(len16) | 5756 V_FW_WR_FLOWID(cst->etid)); 5757 wr->r3 = 0; 5758 if (needs_udp_csum(m0)) { 5759 wr->u.udpseg.type = FW_ETH_TX_EO_TYPE_UDPSEG; 5760 wr->u.udpseg.ethlen = m0->m_pkthdr.l2hlen; 5761 wr->u.udpseg.iplen = htobe16(m0->m_pkthdr.l3hlen); 5762 wr->u.udpseg.udplen = m0->m_pkthdr.l4hlen; 5763 wr->u.udpseg.rtplen = 0; 5764 wr->u.udpseg.r4 = 0; 5765 wr->u.udpseg.mss = htobe16(pktlen - immhdrs); 5766 wr->u.udpseg.schedpktsize = wr->u.udpseg.mss; 5767 wr->u.udpseg.plen = htobe32(pktlen - immhdrs); 5768 cpl = (void *)(wr + 1); 5769 } else { 5770 MPASS(needs_tcp_csum(m0)); 5771 wr->u.tcpseg.type = FW_ETH_TX_EO_TYPE_TCPSEG; 5772 wr->u.tcpseg.ethlen = m0->m_pkthdr.l2hlen; 5773 wr->u.tcpseg.iplen = htobe16(m0->m_pkthdr.l3hlen); 5774 wr->u.tcpseg.tcplen = m0->m_pkthdr.l4hlen; 5775 wr->u.tcpseg.tsclk_tsoff = mbuf_eo_tsclk_tsoff(m0); 5776 wr->u.tcpseg.r4 = 0; 5777 wr->u.tcpseg.r5 = 0; 5778 wr->u.tcpseg.plen = htobe32(pktlen - immhdrs); 5779 5780 if (needs_tso(m0)) { 5781 struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1); 5782 5783 wr->u.tcpseg.mss = htobe16(m0->m_pkthdr.tso_segsz); 5784 5785 ctrl = V_LSO_OPCODE(CPL_TX_PKT_LSO) | 5786 F_LSO_FIRST_SLICE | F_LSO_LAST_SLICE | 5787 V_LSO_IPHDR_LEN(m0->m_pkthdr.l3hlen >> 2) | 5788 V_LSO_TCPHDR_LEN(m0->m_pkthdr.l4hlen >> 2); 5789 if (m0->m_pkthdr.l2hlen == sizeof(struct ether_vlan_header)) 5790 ctrl |= V_LSO_ETHHDR_LEN(1); 5791 if (m0->m_pkthdr.l3hlen == sizeof(struct ip6_hdr)) 5792 ctrl |= F_LSO_IPV6; 5793 lso->lso_ctrl = htobe32(ctrl); 5794 lso->ipid_ofst = htobe16(0); 5795 lso->mss = htobe16(m0->m_pkthdr.tso_segsz); 5796 lso->seqno_offset = htobe32(0); 5797 lso->len = htobe32(pktlen); 5798 5799 cpl = (void *)(lso + 1); 5800 } else { 5801 wr->u.tcpseg.mss = htobe16(0xffff); 5802 cpl = (void *)(wr + 1); 5803 } 5804 } 5805 5806 /* Checksum offload must be requested for ethofld. */ 5807 ctrl1 = 0; 5808 MPASS(needs_l4_csum(m0)); 5809 5810 /* VLAN tag insertion */ 5811 if (needs_vlan_insertion(m0)) { 5812 ctrl1 |= F_TXPKT_VLAN_VLD | 5813 V_TXPKT_VLAN(m0->m_pkthdr.ether_vtag); 5814 } 5815 5816 /* CPL header */ 5817 cpl->ctrl0 = cst->ctrl0; 5818 cpl->pack = 0; 5819 cpl->len = htobe16(pktlen); 5820 cpl->ctrl1 = htobe64(ctrl1); 5821 5822 /* Copy Ethernet, IP & TCP/UDP hdrs as immediate data */ 5823 p = (uintptr_t)(cpl + 1); 5824 m_copydata(m0, 0, immhdrs, (void *)p); 5825 5826 /* SGL */ 5827 dst = (void *)(cpl + 1); 5828 if (nsegs > 0) { 5829 int i, pad; 5830 5831 /* zero-pad upto next 16Byte boundary, if not 16Byte aligned */ 5832 p += immhdrs; 5833 pad = 16 - (immhdrs & 0xf); 5834 bzero((void *)p, pad); 5835 5836 usgl = (void *)(p + pad); 5837 usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) | 5838 V_ULPTX_NSGE(nsegs)); 5839 5840 sglist_init(&sg, nitems(segs), segs); 5841 for (; m0 != NULL; m0 = m0->m_next) { 5842 if (__predict_false(m0->m_len == 0)) 5843 continue; 5844 if (immhdrs >= m0->m_len) { 5845 immhdrs -= m0->m_len; 5846 continue; 5847 } 5848 5849 sglist_append(&sg, mtod(m0, char *) + immhdrs, 5850 m0->m_len - immhdrs); 5851 immhdrs = 0; 5852 } 5853 MPASS(sg.sg_nseg == nsegs); 5854 5855 /* 5856 * Zero pad last 8B in case the WR doesn't end on a 16B 5857 * boundary. 5858 */ 5859 *(uint64_t *)((char *)wr + len16 * 16 - 8) = 0; 5860 5861 usgl->len0 = htobe32(segs[0].ss_len); 5862 usgl->addr0 = htobe64(segs[0].ss_paddr); 5863 for (i = 0; i < nsegs - 1; i++) { 5864 usgl->sge[i / 2].len[i & 1] = htobe32(segs[i + 1].ss_len); 5865 usgl->sge[i / 2].addr[i & 1] = htobe64(segs[i + 1].ss_paddr); 5866 } 5867 if (i & 1) 5868 usgl->sge[i / 2].len[1] = htobe32(0); 5869 } 5870 5871 } 5872 5873 static void 5874 ethofld_tx(struct cxgbe_snd_tag *cst) 5875 { 5876 struct mbuf *m; 5877 struct wrq_cookie cookie; 5878 int next_credits, compl; 5879 struct fw_eth_tx_eo_wr *wr; 5880 5881 mtx_assert(&cst->lock, MA_OWNED); 5882 5883 while ((m = mbufq_first(&cst->pending_tx)) != NULL) { 5884 M_ASSERTPKTHDR(m); 5885 5886 /* How many len16 credits do we need to send this mbuf. */ 5887 next_credits = mbuf_eo_len16(m); 5888 MPASS(next_credits > 0); 5889 if (next_credits > cst->tx_credits) { 5890 /* 5891 * Tx will make progress eventually because there is at 5892 * least one outstanding fw4_ack that will return 5893 * credits and kick the tx. 5894 */ 5895 MPASS(cst->ncompl > 0); 5896 return; 5897 } 5898 wr = start_wrq_wr(cst->eo_txq, next_credits, &cookie); 5899 if (__predict_false(wr == NULL)) { 5900 /* XXX: wishful thinking, not a real assertion. */ 5901 MPASS(cst->ncompl > 0); 5902 return; 5903 } 5904 cst->tx_credits -= next_credits; 5905 cst->tx_nocompl += next_credits; 5906 compl = cst->ncompl == 0 || cst->tx_nocompl >= cst->tx_total / 2; 5907 ETHER_BPF_MTAP(cst->com.ifp, m); 5908 write_ethofld_wr(cst, wr, m, compl); 5909 commit_wrq_wr(cst->eo_txq, wr, &cookie); 5910 if (compl) { 5911 cst->ncompl++; 5912 cst->tx_nocompl = 0; 5913 } 5914 (void) mbufq_dequeue(&cst->pending_tx); 5915 mbufq_enqueue(&cst->pending_fwack, m); 5916 } 5917 } 5918 5919 int 5920 ethofld_transmit(struct ifnet *ifp, struct mbuf *m0) 5921 { 5922 struct cxgbe_snd_tag *cst; 5923 int rc; 5924 5925 MPASS(m0->m_nextpkt == NULL); 5926 MPASS(m0->m_pkthdr.snd_tag != NULL); 5927 cst = mst_to_cst(m0->m_pkthdr.snd_tag); 5928 5929 mtx_lock(&cst->lock); 5930 MPASS(cst->flags & EO_SND_TAG_REF); 5931 5932 if (__predict_false(cst->flags & EO_FLOWC_PENDING)) { 5933 struct vi_info *vi = ifp->if_softc; 5934 struct port_info *pi = vi->pi; 5935 struct adapter *sc = pi->adapter; 5936 const uint32_t rss_mask = vi->rss_size - 1; 5937 uint32_t rss_hash; 5938 5939 cst->eo_txq = &sc->sge.ofld_txq[vi->first_ofld_txq]; 5940 if (M_HASHTYPE_ISHASH(m0)) 5941 rss_hash = m0->m_pkthdr.flowid; 5942 else 5943 rss_hash = arc4random(); 5944 /* We assume RSS hashing */ 5945 cst->iqid = vi->rss[rss_hash & rss_mask]; 5946 cst->eo_txq += rss_hash % vi->nofldtxq; 5947 rc = send_etid_flowc_wr(cst, pi, vi); 5948 if (rc != 0) 5949 goto done; 5950 } 5951 5952 if (__predict_false(cst->plen + m0->m_pkthdr.len > eo_max_backlog)) { 5953 rc = ENOBUFS; 5954 goto done; 5955 } 5956 5957 mbufq_enqueue(&cst->pending_tx, m0); 5958 cst->plen += m0->m_pkthdr.len; 5959 5960 ethofld_tx(cst); 5961 rc = 0; 5962 done: 5963 mtx_unlock(&cst->lock); 5964 if (__predict_false(rc != 0)) 5965 m_freem(m0); 5966 return (rc); 5967 } 5968 5969 static int 5970 ethofld_fw4_ack(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m0) 5971 { 5972 struct adapter *sc = iq->adapter; 5973 const struct cpl_fw4_ack *cpl = (const void *)(rss + 1); 5974 struct mbuf *m; 5975 u_int etid = G_CPL_FW4_ACK_FLOWID(be32toh(OPCODE_TID(cpl))); 5976 struct cxgbe_snd_tag *cst; 5977 uint8_t credits = cpl->credits; 5978 5979 cst = lookup_etid(sc, etid); 5980 mtx_lock(&cst->lock); 5981 if (__predict_false(cst->flags & EO_FLOWC_RPL_PENDING)) { 5982 MPASS(credits >= ETID_FLOWC_LEN16); 5983 credits -= ETID_FLOWC_LEN16; 5984 cst->flags &= ~EO_FLOWC_RPL_PENDING; 5985 } 5986 5987 KASSERT(cst->ncompl > 0, 5988 ("%s: etid %u (%p) wasn't expecting completion.", 5989 __func__, etid, cst)); 5990 cst->ncompl--; 5991 5992 while (credits > 0) { 5993 m = mbufq_dequeue(&cst->pending_fwack); 5994 if (__predict_false(m == NULL)) { 5995 /* 5996 * The remaining credits are for the final flush that 5997 * was issued when the tag was freed by the kernel. 5998 */ 5999 MPASS((cst->flags & 6000 (EO_FLUSH_RPL_PENDING | EO_SND_TAG_REF)) == 6001 EO_FLUSH_RPL_PENDING); 6002 MPASS(credits == ETID_FLUSH_LEN16); 6003 MPASS(cst->tx_credits + cpl->credits == cst->tx_total); 6004 MPASS(cst->ncompl == 0); 6005 6006 cst->flags &= ~EO_FLUSH_RPL_PENDING; 6007 cst->tx_credits += cpl->credits; 6008 freetag: 6009 cxgbe_snd_tag_free_locked(cst); 6010 return (0); /* cst is gone. */ 6011 } 6012 KASSERT(m != NULL, 6013 ("%s: too many credits (%u, %u)", __func__, cpl->credits, 6014 credits)); 6015 KASSERT(credits >= mbuf_eo_len16(m), 6016 ("%s: too few credits (%u, %u, %u)", __func__, 6017 cpl->credits, credits, mbuf_eo_len16(m))); 6018 credits -= mbuf_eo_len16(m); 6019 cst->plen -= m->m_pkthdr.len; 6020 m_freem(m); 6021 } 6022 6023 cst->tx_credits += cpl->credits; 6024 MPASS(cst->tx_credits <= cst->tx_total); 6025 6026 m = mbufq_first(&cst->pending_tx); 6027 if (m != NULL && cst->tx_credits >= mbuf_eo_len16(m)) 6028 ethofld_tx(cst); 6029 6030 if (__predict_false((cst->flags & EO_SND_TAG_REF) == 0) && 6031 cst->ncompl == 0) { 6032 if (cst->tx_credits == cst->tx_total) 6033 goto freetag; 6034 else { 6035 MPASS((cst->flags & EO_FLUSH_RPL_PENDING) == 0); 6036 send_etid_flush_wr(cst); 6037 } 6038 } 6039 6040 mtx_unlock(&cst->lock); 6041 6042 return (0); 6043 } 6044 #endif 6045