1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2011 Chelsio Communications, Inc. 5 * All rights reserved. 6 * Written by: Navdeep Parhar <np@FreeBSD.org> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 #include "opt_inet.h" 32 #include "opt_inet6.h" 33 #include "opt_kern_tls.h" 34 #include "opt_ratelimit.h" 35 36 #include <sys/types.h> 37 #include <sys/eventhandler.h> 38 #include <sys/mbuf.h> 39 #include <sys/socket.h> 40 #include <sys/kernel.h> 41 #include <sys/ktls.h> 42 #include <sys/malloc.h> 43 #include <sys/msan.h> 44 #include <sys/queue.h> 45 #include <sys/sbuf.h> 46 #include <sys/taskqueue.h> 47 #include <sys/time.h> 48 #include <sys/sglist.h> 49 #include <sys/sysctl.h> 50 #include <sys/smp.h> 51 #include <sys/socketvar.h> 52 #include <sys/counter.h> 53 #include <net/bpf.h> 54 #include <net/ethernet.h> 55 #include <net/if.h> 56 #include <net/if_vlan_var.h> 57 #include <net/if_vxlan.h> 58 #include <netinet/in.h> 59 #include <netinet/ip.h> 60 #include <netinet/ip6.h> 61 #include <netinet/tcp.h> 62 #include <netinet/udp.h> 63 #include <machine/in_cksum.h> 64 #include <machine/md_var.h> 65 #include <vm/vm.h> 66 #include <vm/pmap.h> 67 #ifdef DEV_NETMAP 68 #include <machine/bus.h> 69 #include <sys/selinfo.h> 70 #include <net/if_var.h> 71 #include <net/netmap.h> 72 #include <dev/netmap/netmap_kern.h> 73 #endif 74 75 #include "common/common.h" 76 #include "common/t4_regs.h" 77 #include "common/t4_regs_values.h" 78 #include "common/t4_msg.h" 79 #include "t4_l2t.h" 80 #include "t4_mp_ring.h" 81 82 #define RX_COPY_THRESHOLD MINCLSIZE 83 84 /* 85 * Ethernet frames are DMA'd at this byte offset into the freelist buffer. 86 * 0-7 are valid values. 87 */ 88 static int fl_pktshift = 0; 89 SYSCTL_INT(_hw_cxgbe, OID_AUTO, fl_pktshift, CTLFLAG_RDTUN, &fl_pktshift, 0, 90 "payload DMA offset in rx buffer (bytes)"); 91 92 /* 93 * Pad ethernet payload up to this boundary. 94 * -1: driver should figure out a good value. 95 * 0: disable padding. 96 * Any power of 2 from 32 to 4096 (both inclusive) is also a valid value. 97 */ 98 int fl_pad = -1; 99 SYSCTL_INT(_hw_cxgbe, OID_AUTO, fl_pad, CTLFLAG_RDTUN, &fl_pad, 0, 100 "payload pad boundary (bytes)"); 101 102 /* 103 * Status page length. 104 * -1: driver should figure out a good value. 105 * 64 or 128 are the only other valid values. 106 */ 107 static int spg_len = -1; 108 SYSCTL_INT(_hw_cxgbe, OID_AUTO, spg_len, CTLFLAG_RDTUN, &spg_len, 0, 109 "status page size (bytes)"); 110 111 /* 112 * Congestion drops. 113 * -1: no congestion feedback (not recommended). 114 * 0: backpressure the channel instead of dropping packets right away. 115 * 1: no backpressure, drop packets for the congested queue immediately. 116 * 2: both backpressure and drop. 117 */ 118 static int cong_drop = 0; 119 SYSCTL_INT(_hw_cxgbe, OID_AUTO, cong_drop, CTLFLAG_RDTUN, &cong_drop, 0, 120 "Congestion control for NIC RX queues (0 = backpressure, 1 = drop, 2 = both"); 121 #ifdef TCP_OFFLOAD 122 static int ofld_cong_drop = 0; 123 SYSCTL_INT(_hw_cxgbe, OID_AUTO, ofld_cong_drop, CTLFLAG_RDTUN, &ofld_cong_drop, 0, 124 "Congestion control for TOE RX queues (0 = backpressure, 1 = drop, 2 = both"); 125 #endif 126 127 /* 128 * Deliver multiple frames in the same free list buffer if they fit. 129 * -1: let the driver decide whether to enable buffer packing or not. 130 * 0: disable buffer packing. 131 * 1: enable buffer packing. 132 */ 133 static int buffer_packing = -1; 134 SYSCTL_INT(_hw_cxgbe, OID_AUTO, buffer_packing, CTLFLAG_RDTUN, &buffer_packing, 135 0, "Enable buffer packing"); 136 137 /* 138 * Start next frame in a packed buffer at this boundary. 139 * -1: driver should figure out a good value. 140 * T4: driver will ignore this and use the same value as fl_pad above. 141 * T5: 16, or a power of 2 from 64 to 4096 (both inclusive) is a valid value. 142 */ 143 static int fl_pack = -1; 144 SYSCTL_INT(_hw_cxgbe, OID_AUTO, fl_pack, CTLFLAG_RDTUN, &fl_pack, 0, 145 "payload pack boundary (bytes)"); 146 147 /* 148 * Largest rx cluster size that the driver is allowed to allocate. 149 */ 150 static int largest_rx_cluster = MJUM16BYTES; 151 SYSCTL_INT(_hw_cxgbe, OID_AUTO, largest_rx_cluster, CTLFLAG_RDTUN, 152 &largest_rx_cluster, 0, "Largest rx cluster (bytes)"); 153 154 /* 155 * Size of cluster allocation that's most likely to succeed. The driver will 156 * fall back to this size if it fails to allocate clusters larger than this. 157 */ 158 static int safest_rx_cluster = PAGE_SIZE; 159 SYSCTL_INT(_hw_cxgbe, OID_AUTO, safest_rx_cluster, CTLFLAG_RDTUN, 160 &safest_rx_cluster, 0, "Safe rx cluster (bytes)"); 161 162 #ifdef RATELIMIT 163 /* 164 * Knob to control TCP timestamp rewriting, and the granularity of the tick used 165 * for rewriting. -1 and 0-3 are all valid values. 166 * -1: hardware should leave the TCP timestamps alone. 167 * 0: 1ms 168 * 1: 100us 169 * 2: 10us 170 * 3: 1us 171 */ 172 static int tsclk = -1; 173 SYSCTL_INT(_hw_cxgbe, OID_AUTO, tsclk, CTLFLAG_RDTUN, &tsclk, 0, 174 "Control TCP timestamp rewriting when using pacing"); 175 176 static int eo_max_backlog = 1024 * 1024; 177 SYSCTL_INT(_hw_cxgbe, OID_AUTO, eo_max_backlog, CTLFLAG_RDTUN, &eo_max_backlog, 178 0, "Maximum backlog of ratelimited data per flow"); 179 #endif 180 181 /* 182 * The interrupt holdoff timers are multiplied by this value on T6+. 183 * 1 and 3-17 (both inclusive) are legal values. 184 */ 185 static int tscale = 1; 186 SYSCTL_INT(_hw_cxgbe, OID_AUTO, tscale, CTLFLAG_RDTUN, &tscale, 0, 187 "Interrupt holdoff timer scale on T6+"); 188 189 /* 190 * Number of LRO entries in the lro_ctrl structure per rx queue. 191 */ 192 static int lro_entries = TCP_LRO_ENTRIES; 193 SYSCTL_INT(_hw_cxgbe, OID_AUTO, lro_entries, CTLFLAG_RDTUN, &lro_entries, 0, 194 "Number of LRO entries per RX queue"); 195 196 /* 197 * This enables presorting of frames before they're fed into tcp_lro_rx. 198 */ 199 static int lro_mbufs = 0; 200 SYSCTL_INT(_hw_cxgbe, OID_AUTO, lro_mbufs, CTLFLAG_RDTUN, &lro_mbufs, 0, 201 "Enable presorting of LRO frames"); 202 203 static counter_u64_t pullups; 204 SYSCTL_COUNTER_U64(_hw_cxgbe, OID_AUTO, pullups, CTLFLAG_RD, &pullups, 205 "Number of mbuf pullups performed"); 206 207 static counter_u64_t defrags; 208 SYSCTL_COUNTER_U64(_hw_cxgbe, OID_AUTO, defrags, CTLFLAG_RD, &defrags, 209 "Number of mbuf defrags performed"); 210 211 static int t4_tx_coalesce = 1; 212 SYSCTL_INT(_hw_cxgbe, OID_AUTO, tx_coalesce, CTLFLAG_RWTUN, &t4_tx_coalesce, 0, 213 "tx coalescing allowed"); 214 215 /* 216 * The driver will make aggressive attempts at tx coalescing if it sees these 217 * many packets eligible for coalescing in quick succession, with no more than 218 * the specified gap in between the eth_tx calls that delivered the packets. 219 */ 220 static int t4_tx_coalesce_pkts = 32; 221 SYSCTL_INT(_hw_cxgbe, OID_AUTO, tx_coalesce_pkts, CTLFLAG_RWTUN, 222 &t4_tx_coalesce_pkts, 0, 223 "# of consecutive packets (1 - 255) that will trigger tx coalescing"); 224 static int t4_tx_coalesce_gap = 5; 225 SYSCTL_INT(_hw_cxgbe, OID_AUTO, tx_coalesce_gap, CTLFLAG_RWTUN, 226 &t4_tx_coalesce_gap, 0, "tx gap (in microseconds)"); 227 228 static int service_iq(struct sge_iq *, int); 229 static int service_iq_fl(struct sge_iq *, int); 230 static struct mbuf *get_fl_payload(struct adapter *, struct sge_fl *, uint32_t); 231 static int eth_rx(struct adapter *, struct sge_rxq *, const struct iq_desc *, 232 u_int); 233 static inline void init_iq(struct sge_iq *, struct adapter *, int, int, int, 234 int, int, int); 235 static inline void init_fl(struct adapter *, struct sge_fl *, int, int, char *); 236 static inline void init_eq(struct adapter *, struct sge_eq *, int, int, uint8_t, 237 struct sge_iq *, char *); 238 static int alloc_iq_fl(struct vi_info *, struct sge_iq *, struct sge_fl *, 239 struct sysctl_ctx_list *, struct sysctl_oid *); 240 static void free_iq_fl(struct adapter *, struct sge_iq *, struct sge_fl *); 241 static void add_iq_sysctls(struct sysctl_ctx_list *, struct sysctl_oid *, 242 struct sge_iq *); 243 static void add_fl_sysctls(struct adapter *, struct sysctl_ctx_list *, 244 struct sysctl_oid *, struct sge_fl *); 245 static int alloc_iq_fl_hwq(struct vi_info *, struct sge_iq *, struct sge_fl *); 246 static int free_iq_fl_hwq(struct adapter *, struct sge_iq *, struct sge_fl *); 247 static int alloc_fwq(struct adapter *); 248 static void free_fwq(struct adapter *); 249 static int alloc_ctrlq(struct adapter *, int); 250 static void free_ctrlq(struct adapter *, int); 251 static int alloc_rxq(struct vi_info *, struct sge_rxq *, int, int, int); 252 static void free_rxq(struct vi_info *, struct sge_rxq *); 253 static void add_rxq_sysctls(struct sysctl_ctx_list *, struct sysctl_oid *, 254 struct sge_rxq *); 255 #ifdef TCP_OFFLOAD 256 static int alloc_ofld_rxq(struct vi_info *, struct sge_ofld_rxq *, int, int, 257 int); 258 static void free_ofld_rxq(struct vi_info *, struct sge_ofld_rxq *); 259 static void add_ofld_rxq_sysctls(struct sysctl_ctx_list *, struct sysctl_oid *, 260 struct sge_ofld_rxq *); 261 #endif 262 static int ctrl_eq_alloc(struct adapter *, struct sge_eq *); 263 static int eth_eq_alloc(struct adapter *, struct vi_info *, struct sge_eq *); 264 #if defined(TCP_OFFLOAD) || defined(RATELIMIT) 265 static int ofld_eq_alloc(struct adapter *, struct vi_info *, struct sge_eq *); 266 #endif 267 static int alloc_eq(struct adapter *, struct sge_eq *, struct sysctl_ctx_list *, 268 struct sysctl_oid *); 269 static void free_eq(struct adapter *, struct sge_eq *); 270 static void add_eq_sysctls(struct adapter *, struct sysctl_ctx_list *, 271 struct sysctl_oid *, struct sge_eq *); 272 static int alloc_eq_hwq(struct adapter *, struct vi_info *, struct sge_eq *); 273 static int free_eq_hwq(struct adapter *, struct vi_info *, struct sge_eq *); 274 static int alloc_wrq(struct adapter *, struct vi_info *, struct sge_wrq *, 275 struct sysctl_ctx_list *, struct sysctl_oid *); 276 static void free_wrq(struct adapter *, struct sge_wrq *); 277 static void add_wrq_sysctls(struct sysctl_ctx_list *, struct sysctl_oid *, 278 struct sge_wrq *); 279 static int alloc_txq(struct vi_info *, struct sge_txq *, int); 280 static void free_txq(struct vi_info *, struct sge_txq *); 281 static void add_txq_sysctls(struct vi_info *, struct sysctl_ctx_list *, 282 struct sysctl_oid *, struct sge_txq *); 283 #if defined(TCP_OFFLOAD) || defined(RATELIMIT) 284 static int alloc_ofld_txq(struct vi_info *, struct sge_ofld_txq *, int); 285 static void free_ofld_txq(struct vi_info *, struct sge_ofld_txq *); 286 static void add_ofld_txq_sysctls(struct sysctl_ctx_list *, struct sysctl_oid *, 287 struct sge_ofld_txq *); 288 #endif 289 static void oneseg_dma_callback(void *, bus_dma_segment_t *, int, int); 290 static inline void ring_fl_db(struct adapter *, struct sge_fl *); 291 static int refill_fl(struct adapter *, struct sge_fl *, int); 292 static void refill_sfl(void *); 293 static int find_refill_source(struct adapter *, int, bool); 294 static void add_fl_to_sfl(struct adapter *, struct sge_fl *); 295 296 static inline void get_pkt_gl(struct mbuf *, struct sglist *); 297 static inline u_int txpkt_len16(u_int, const u_int); 298 static inline u_int txpkt_vm_len16(u_int, const u_int); 299 static inline void calculate_mbuf_len16(struct mbuf *, bool); 300 static inline u_int txpkts0_len16(u_int); 301 static inline u_int txpkts1_len16(void); 302 static u_int write_raw_wr(struct sge_txq *, void *, struct mbuf *, u_int); 303 static u_int write_txpkt_wr(struct adapter *, struct sge_txq *, struct mbuf *, 304 u_int); 305 static u_int write_txpkt_vm_wr(struct adapter *, struct sge_txq *, 306 struct mbuf *); 307 static int add_to_txpkts_vf(struct adapter *, struct sge_txq *, struct mbuf *, 308 int, bool *); 309 static int add_to_txpkts_pf(struct adapter *, struct sge_txq *, struct mbuf *, 310 int, bool *); 311 static u_int write_txpkts_wr(struct adapter *, struct sge_txq *); 312 static u_int write_txpkts_vm_wr(struct adapter *, struct sge_txq *); 313 static void write_gl_to_txd(struct sge_txq *, struct mbuf *, caddr_t *, int); 314 static inline void copy_to_txd(struct sge_eq *, caddr_t, caddr_t *, int); 315 static inline void ring_eq_db(struct adapter *, struct sge_eq *, u_int); 316 static inline uint16_t read_hw_cidx(struct sge_eq *); 317 static inline u_int reclaimable_tx_desc(struct sge_eq *); 318 static inline u_int total_available_tx_desc(struct sge_eq *); 319 static u_int reclaim_tx_descs(struct sge_txq *, u_int); 320 static void tx_reclaim(void *, int); 321 static __be64 get_flit(struct sglist_seg *, int, int); 322 static int handle_sge_egr_update(struct sge_iq *, const struct rss_header *, 323 struct mbuf *); 324 static int handle_fw_msg(struct sge_iq *, const struct rss_header *, 325 struct mbuf *); 326 static int t4_handle_wrerr_rpl(struct adapter *, const __be64 *); 327 static void wrq_tx_drain(void *, int); 328 static void drain_wrq_wr_list(struct adapter *, struct sge_wrq *); 329 330 static int sysctl_bufsizes(SYSCTL_HANDLER_ARGS); 331 #ifdef RATELIMIT 332 static int ethofld_fw4_ack(struct sge_iq *, const struct rss_header *, 333 struct mbuf *); 334 #if defined(INET) || defined(INET6) 335 static inline u_int txpkt_eo_len16(u_int, u_int, u_int); 336 static int ethofld_transmit(if_t, struct mbuf *); 337 #endif 338 #endif 339 340 static counter_u64_t extfree_refs; 341 static counter_u64_t extfree_rels; 342 343 an_handler_t t4_an_handler; 344 fw_msg_handler_t t4_fw_msg_handler[NUM_FW6_TYPES]; 345 cpl_handler_t t4_cpl_handler[NUM_CPL_CMDS]; 346 cpl_handler_t set_tcb_rpl_handlers[NUM_CPL_COOKIES]; 347 cpl_handler_t l2t_write_rpl_handlers[NUM_CPL_COOKIES]; 348 cpl_handler_t act_open_rpl_handlers[NUM_CPL_COOKIES]; 349 cpl_handler_t abort_rpl_rss_handlers[NUM_CPL_COOKIES]; 350 cpl_handler_t fw4_ack_handlers[NUM_CPL_COOKIES]; 351 352 void 353 t4_register_an_handler(an_handler_t h) 354 { 355 uintptr_t *loc; 356 357 MPASS(h == NULL || t4_an_handler == NULL); 358 359 loc = (uintptr_t *)&t4_an_handler; 360 atomic_store_rel_ptr(loc, (uintptr_t)h); 361 } 362 363 void 364 t4_register_fw_msg_handler(int type, fw_msg_handler_t h) 365 { 366 uintptr_t *loc; 367 368 MPASS(type < nitems(t4_fw_msg_handler)); 369 MPASS(h == NULL || t4_fw_msg_handler[type] == NULL); 370 /* 371 * These are dispatched by the handler for FW{4|6}_CPL_MSG using the CPL 372 * handler dispatch table. Reject any attempt to install a handler for 373 * this subtype. 374 */ 375 MPASS(type != FW_TYPE_RSSCPL); 376 MPASS(type != FW6_TYPE_RSSCPL); 377 378 loc = (uintptr_t *)&t4_fw_msg_handler[type]; 379 atomic_store_rel_ptr(loc, (uintptr_t)h); 380 } 381 382 void 383 t4_register_cpl_handler(int opcode, cpl_handler_t h) 384 { 385 uintptr_t *loc; 386 387 MPASS(opcode < nitems(t4_cpl_handler)); 388 MPASS(h == NULL || t4_cpl_handler[opcode] == NULL); 389 390 loc = (uintptr_t *)&t4_cpl_handler[opcode]; 391 atomic_store_rel_ptr(loc, (uintptr_t)h); 392 } 393 394 static int 395 set_tcb_rpl_handler(struct sge_iq *iq, const struct rss_header *rss, 396 struct mbuf *m) 397 { 398 const struct cpl_set_tcb_rpl *cpl = (const void *)(rss + 1); 399 u_int tid; 400 int cookie; 401 402 MPASS(m == NULL); 403 404 tid = GET_TID(cpl); 405 if (is_hpftid(iq->adapter, tid) || is_ftid(iq->adapter, tid)) { 406 /* 407 * The return code for filter-write is put in the CPL cookie so 408 * we have to rely on the hardware tid (is_ftid) to determine 409 * that this is a response to a filter. 410 */ 411 cookie = CPL_COOKIE_FILTER; 412 } else { 413 cookie = G_COOKIE(cpl->cookie); 414 } 415 MPASS(cookie > CPL_COOKIE_RESERVED); 416 MPASS(cookie < nitems(set_tcb_rpl_handlers)); 417 418 return (set_tcb_rpl_handlers[cookie](iq, rss, m)); 419 } 420 421 static int 422 l2t_write_rpl_handler(struct sge_iq *iq, const struct rss_header *rss, 423 struct mbuf *m) 424 { 425 const struct cpl_l2t_write_rpl *rpl = (const void *)(rss + 1); 426 unsigned int cookie; 427 428 MPASS(m == NULL); 429 430 cookie = GET_TID(rpl) & F_SYNC_WR ? CPL_COOKIE_TOM : CPL_COOKIE_FILTER; 431 return (l2t_write_rpl_handlers[cookie](iq, rss, m)); 432 } 433 434 static int 435 act_open_rpl_handler(struct sge_iq *iq, const struct rss_header *rss, 436 struct mbuf *m) 437 { 438 const struct cpl_act_open_rpl *cpl = (const void *)(rss + 1); 439 u_int cookie = G_TID_COOKIE(G_AOPEN_ATID(be32toh(cpl->atid_status))); 440 441 MPASS(m == NULL); 442 MPASS(cookie != CPL_COOKIE_RESERVED); 443 444 return (act_open_rpl_handlers[cookie](iq, rss, m)); 445 } 446 447 static int 448 abort_rpl_rss_handler(struct sge_iq *iq, const struct rss_header *rss, 449 struct mbuf *m) 450 { 451 struct adapter *sc = iq->adapter; 452 u_int cookie; 453 454 MPASS(m == NULL); 455 if (is_hashfilter(sc)) 456 cookie = CPL_COOKIE_HASHFILTER; 457 else 458 cookie = CPL_COOKIE_TOM; 459 460 return (abort_rpl_rss_handlers[cookie](iq, rss, m)); 461 } 462 463 static int 464 fw4_ack_handler(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 465 { 466 struct adapter *sc = iq->adapter; 467 const struct cpl_fw4_ack *cpl = (const void *)(rss + 1); 468 unsigned int tid = G_CPL_FW4_ACK_FLOWID(be32toh(OPCODE_TID(cpl))); 469 u_int cookie; 470 471 MPASS(m == NULL); 472 if (is_etid(sc, tid)) 473 cookie = CPL_COOKIE_ETHOFLD; 474 else 475 cookie = CPL_COOKIE_TOM; 476 477 return (fw4_ack_handlers[cookie](iq, rss, m)); 478 } 479 480 static void 481 t4_init_shared_cpl_handlers(void) 482 { 483 484 t4_register_cpl_handler(CPL_SET_TCB_RPL, set_tcb_rpl_handler); 485 t4_register_cpl_handler(CPL_L2T_WRITE_RPL, l2t_write_rpl_handler); 486 t4_register_cpl_handler(CPL_ACT_OPEN_RPL, act_open_rpl_handler); 487 t4_register_cpl_handler(CPL_ABORT_RPL_RSS, abort_rpl_rss_handler); 488 t4_register_cpl_handler(CPL_FW4_ACK, fw4_ack_handler); 489 } 490 491 void 492 t4_register_shared_cpl_handler(int opcode, cpl_handler_t h, int cookie) 493 { 494 uintptr_t *loc; 495 496 MPASS(opcode < nitems(t4_cpl_handler)); 497 MPASS(cookie > CPL_COOKIE_RESERVED); 498 MPASS(cookie < NUM_CPL_COOKIES); 499 MPASS(t4_cpl_handler[opcode] != NULL); 500 501 switch (opcode) { 502 case CPL_SET_TCB_RPL: 503 loc = (uintptr_t *)&set_tcb_rpl_handlers[cookie]; 504 break; 505 case CPL_L2T_WRITE_RPL: 506 loc = (uintptr_t *)&l2t_write_rpl_handlers[cookie]; 507 break; 508 case CPL_ACT_OPEN_RPL: 509 loc = (uintptr_t *)&act_open_rpl_handlers[cookie]; 510 break; 511 case CPL_ABORT_RPL_RSS: 512 loc = (uintptr_t *)&abort_rpl_rss_handlers[cookie]; 513 break; 514 case CPL_FW4_ACK: 515 loc = (uintptr_t *)&fw4_ack_handlers[cookie]; 516 break; 517 default: 518 MPASS(0); 519 return; 520 } 521 MPASS(h == NULL || *loc == (uintptr_t)NULL); 522 atomic_store_rel_ptr(loc, (uintptr_t)h); 523 } 524 525 /* 526 * Called on MOD_LOAD. Validates and calculates the SGE tunables. 527 */ 528 void 529 t4_sge_modload(void) 530 { 531 532 if (fl_pktshift < 0 || fl_pktshift > 7) { 533 printf("Invalid hw.cxgbe.fl_pktshift value (%d)," 534 " using 0 instead.\n", fl_pktshift); 535 fl_pktshift = 0; 536 } 537 538 if (spg_len != 64 && spg_len != 128) { 539 int len; 540 541 #if defined(__i386__) || defined(__amd64__) 542 len = cpu_clflush_line_size > 64 ? 128 : 64; 543 #else 544 len = 64; 545 #endif 546 if (spg_len != -1) { 547 printf("Invalid hw.cxgbe.spg_len value (%d)," 548 " using %d instead.\n", spg_len, len); 549 } 550 spg_len = len; 551 } 552 553 if (cong_drop < -1 || cong_drop > 2) { 554 printf("Invalid hw.cxgbe.cong_drop value (%d)," 555 " using 0 instead.\n", cong_drop); 556 cong_drop = 0; 557 } 558 #ifdef TCP_OFFLOAD 559 if (ofld_cong_drop < -1 || ofld_cong_drop > 2) { 560 printf("Invalid hw.cxgbe.ofld_cong_drop value (%d)," 561 " using 0 instead.\n", ofld_cong_drop); 562 ofld_cong_drop = 0; 563 } 564 #endif 565 566 if (tscale != 1 && (tscale < 3 || tscale > 17)) { 567 printf("Invalid hw.cxgbe.tscale value (%d)," 568 " using 1 instead.\n", tscale); 569 tscale = 1; 570 } 571 572 if (largest_rx_cluster != MCLBYTES && 573 largest_rx_cluster != MJUMPAGESIZE && 574 largest_rx_cluster != MJUM9BYTES && 575 largest_rx_cluster != MJUM16BYTES) { 576 printf("Invalid hw.cxgbe.largest_rx_cluster value (%d)," 577 " using %d instead.\n", largest_rx_cluster, MJUM16BYTES); 578 largest_rx_cluster = MJUM16BYTES; 579 } 580 581 if (safest_rx_cluster != MCLBYTES && 582 safest_rx_cluster != MJUMPAGESIZE && 583 safest_rx_cluster != MJUM9BYTES && 584 safest_rx_cluster != MJUM16BYTES) { 585 printf("Invalid hw.cxgbe.safest_rx_cluster value (%d)," 586 " using %d instead.\n", safest_rx_cluster, MJUMPAGESIZE); 587 safest_rx_cluster = MJUMPAGESIZE; 588 } 589 590 extfree_refs = counter_u64_alloc(M_WAITOK); 591 extfree_rels = counter_u64_alloc(M_WAITOK); 592 pullups = counter_u64_alloc(M_WAITOK); 593 defrags = counter_u64_alloc(M_WAITOK); 594 counter_u64_zero(extfree_refs); 595 counter_u64_zero(extfree_rels); 596 counter_u64_zero(pullups); 597 counter_u64_zero(defrags); 598 599 t4_init_shared_cpl_handlers(); 600 t4_register_cpl_handler(CPL_FW4_MSG, handle_fw_msg); 601 t4_register_cpl_handler(CPL_FW6_MSG, handle_fw_msg); 602 t4_register_cpl_handler(CPL_SGE_EGR_UPDATE, handle_sge_egr_update); 603 #ifdef RATELIMIT 604 t4_register_shared_cpl_handler(CPL_FW4_ACK, ethofld_fw4_ack, 605 CPL_COOKIE_ETHOFLD); 606 #endif 607 t4_register_fw_msg_handler(FW6_TYPE_CMD_RPL, t4_handle_fw_rpl); 608 t4_register_fw_msg_handler(FW6_TYPE_WRERR_RPL, t4_handle_wrerr_rpl); 609 } 610 611 void 612 t4_sge_modunload(void) 613 { 614 615 counter_u64_free(extfree_refs); 616 counter_u64_free(extfree_rels); 617 counter_u64_free(pullups); 618 counter_u64_free(defrags); 619 } 620 621 uint64_t 622 t4_sge_extfree_refs(void) 623 { 624 uint64_t refs, rels; 625 626 rels = counter_u64_fetch(extfree_rels); 627 refs = counter_u64_fetch(extfree_refs); 628 629 return (refs - rels); 630 } 631 632 /* max 4096 */ 633 #define MAX_PACK_BOUNDARY 512 634 635 static inline void 636 setup_pad_and_pack_boundaries(struct adapter *sc) 637 { 638 uint32_t v, m; 639 int pad, pack, pad_shift; 640 641 pad_shift = chip_id(sc) > CHELSIO_T5 ? X_T6_INGPADBOUNDARY_SHIFT : 642 X_INGPADBOUNDARY_SHIFT; 643 pad = fl_pad; 644 if (fl_pad < (1 << pad_shift) || 645 fl_pad > (1 << (pad_shift + M_INGPADBOUNDARY)) || 646 !powerof2(fl_pad)) { 647 /* 648 * If there is any chance that we might use buffer packing and 649 * the chip is a T4, then pick 64 as the pad/pack boundary. Set 650 * it to the minimum allowed in all other cases. 651 */ 652 pad = is_t4(sc) && buffer_packing ? 64 : 1 << pad_shift; 653 654 /* 655 * For fl_pad = 0 we'll still write a reasonable value to the 656 * register but all the freelists will opt out of padding. 657 * We'll complain here only if the user tried to set it to a 658 * value greater than 0 that was invalid. 659 */ 660 if (fl_pad > 0) { 661 device_printf(sc->dev, "Invalid hw.cxgbe.fl_pad value" 662 " (%d), using %d instead.\n", fl_pad, pad); 663 } 664 } 665 m = V_INGPADBOUNDARY(M_INGPADBOUNDARY); 666 v = V_INGPADBOUNDARY(ilog2(pad) - pad_shift); 667 t4_set_reg_field(sc, A_SGE_CONTROL, m, v); 668 669 if (is_t4(sc)) { 670 if (fl_pack != -1 && fl_pack != pad) { 671 /* Complain but carry on. */ 672 device_printf(sc->dev, "hw.cxgbe.fl_pack (%d) ignored," 673 " using %d instead.\n", fl_pack, pad); 674 } 675 return; 676 } 677 678 pack = fl_pack; 679 if (fl_pack < 16 || fl_pack == 32 || fl_pack > 4096 || 680 !powerof2(fl_pack)) { 681 if (sc->params.pci.mps > MAX_PACK_BOUNDARY) 682 pack = MAX_PACK_BOUNDARY; 683 else 684 pack = max(sc->params.pci.mps, CACHE_LINE_SIZE); 685 MPASS(powerof2(pack)); 686 if (pack < 16) 687 pack = 16; 688 if (pack == 32) 689 pack = 64; 690 if (pack > 4096) 691 pack = 4096; 692 if (fl_pack != -1) { 693 device_printf(sc->dev, "Invalid hw.cxgbe.fl_pack value" 694 " (%d), using %d instead.\n", fl_pack, pack); 695 } 696 } 697 m = V_INGPACKBOUNDARY(M_INGPACKBOUNDARY); 698 if (pack == 16) 699 v = V_INGPACKBOUNDARY(0); 700 else 701 v = V_INGPACKBOUNDARY(ilog2(pack) - 5); 702 703 MPASS(!is_t4(sc)); /* T4 doesn't have SGE_CONTROL2 */ 704 t4_set_reg_field(sc, A_SGE_CONTROL2, m, v); 705 } 706 707 /* 708 * adap->params.vpd.cclk must be set up before this is called. 709 */ 710 void 711 t4_tweak_chip_settings(struct adapter *sc) 712 { 713 int i, reg; 714 uint32_t v, m; 715 int intr_timer[SGE_NTIMERS] = {1, 5, 10, 50, 100, 200}; 716 int timer_max = M_TIMERVALUE0 * 1000 / sc->params.vpd.cclk; 717 int intr_pktcount[SGE_NCOUNTERS] = {1, 8, 16, 32}; /* 63 max */ 718 uint16_t indsz = min(RX_COPY_THRESHOLD - 1, M_INDICATESIZE); 719 static int sw_buf_sizes[] = { 720 MCLBYTES, 721 MJUMPAGESIZE, 722 MJUM9BYTES, 723 MJUM16BYTES 724 }; 725 726 KASSERT(sc->flags & MASTER_PF, 727 ("%s: trying to change chip settings when not master.", __func__)); 728 729 m = V_PKTSHIFT(M_PKTSHIFT) | F_RXPKTCPLMODE | F_EGRSTATUSPAGESIZE; 730 v = V_PKTSHIFT(fl_pktshift) | F_RXPKTCPLMODE | 731 V_EGRSTATUSPAGESIZE(spg_len == 128); 732 t4_set_reg_field(sc, A_SGE_CONTROL, m, v); 733 734 setup_pad_and_pack_boundaries(sc); 735 736 v = V_HOSTPAGESIZEPF0(PAGE_SHIFT - 10) | 737 V_HOSTPAGESIZEPF1(PAGE_SHIFT - 10) | 738 V_HOSTPAGESIZEPF2(PAGE_SHIFT - 10) | 739 V_HOSTPAGESIZEPF3(PAGE_SHIFT - 10) | 740 V_HOSTPAGESIZEPF4(PAGE_SHIFT - 10) | 741 V_HOSTPAGESIZEPF5(PAGE_SHIFT - 10) | 742 V_HOSTPAGESIZEPF6(PAGE_SHIFT - 10) | 743 V_HOSTPAGESIZEPF7(PAGE_SHIFT - 10); 744 t4_write_reg(sc, A_SGE_HOST_PAGE_SIZE, v); 745 746 t4_write_reg(sc, A_SGE_FL_BUFFER_SIZE0, 4096); 747 t4_write_reg(sc, A_SGE_FL_BUFFER_SIZE1, 65536); 748 reg = A_SGE_FL_BUFFER_SIZE2; 749 for (i = 0; i < nitems(sw_buf_sizes); i++) { 750 MPASS(reg <= A_SGE_FL_BUFFER_SIZE15); 751 t4_write_reg(sc, reg, sw_buf_sizes[i]); 752 reg += 4; 753 MPASS(reg <= A_SGE_FL_BUFFER_SIZE15); 754 t4_write_reg(sc, reg, sw_buf_sizes[i] - CL_METADATA_SIZE); 755 reg += 4; 756 } 757 758 v = V_THRESHOLD_0(intr_pktcount[0]) | V_THRESHOLD_1(intr_pktcount[1]) | 759 V_THRESHOLD_2(intr_pktcount[2]) | V_THRESHOLD_3(intr_pktcount[3]); 760 t4_write_reg(sc, A_SGE_INGRESS_RX_THRESHOLD, v); 761 762 KASSERT(intr_timer[0] <= timer_max, 763 ("%s: not a single usable timer (%d, %d)", __func__, intr_timer[0], 764 timer_max)); 765 for (i = 1; i < nitems(intr_timer); i++) { 766 KASSERT(intr_timer[i] >= intr_timer[i - 1], 767 ("%s: timers not listed in increasing order (%d)", 768 __func__, i)); 769 770 while (intr_timer[i] > timer_max) { 771 if (i == nitems(intr_timer) - 1) { 772 intr_timer[i] = timer_max; 773 break; 774 } 775 intr_timer[i] += intr_timer[i - 1]; 776 intr_timer[i] /= 2; 777 } 778 } 779 780 v = V_TIMERVALUE0(us_to_core_ticks(sc, intr_timer[0])) | 781 V_TIMERVALUE1(us_to_core_ticks(sc, intr_timer[1])); 782 t4_write_reg(sc, A_SGE_TIMER_VALUE_0_AND_1, v); 783 v = V_TIMERVALUE2(us_to_core_ticks(sc, intr_timer[2])) | 784 V_TIMERVALUE3(us_to_core_ticks(sc, intr_timer[3])); 785 t4_write_reg(sc, A_SGE_TIMER_VALUE_2_AND_3, v); 786 v = V_TIMERVALUE4(us_to_core_ticks(sc, intr_timer[4])) | 787 V_TIMERVALUE5(us_to_core_ticks(sc, intr_timer[5])); 788 t4_write_reg(sc, A_SGE_TIMER_VALUE_4_AND_5, v); 789 790 if (chip_id(sc) >= CHELSIO_T6) { 791 m = V_TSCALE(M_TSCALE); 792 if (tscale == 1) 793 v = 0; 794 else 795 v = V_TSCALE(tscale - 2); 796 t4_set_reg_field(sc, A_SGE_ITP_CONTROL, m, v); 797 798 if (sc->debug_flags & DF_DISABLE_TCB_CACHE) { 799 m = V_RDTHRESHOLD(M_RDTHRESHOLD) | F_WRTHRTHRESHEN | 800 V_WRTHRTHRESH(M_WRTHRTHRESH); 801 t4_tp_pio_read(sc, &v, 1, A_TP_CMM_CONFIG, 1); 802 v &= ~m; 803 v |= V_RDTHRESHOLD(1) | F_WRTHRTHRESHEN | 804 V_WRTHRTHRESH(16); 805 t4_tp_pio_write(sc, &v, 1, A_TP_CMM_CONFIG, 1); 806 } 807 } 808 809 /* 4K, 16K, 64K, 256K DDP "page sizes" for TDDP */ 810 v = V_HPZ0(0) | V_HPZ1(2) | V_HPZ2(4) | V_HPZ3(6); 811 t4_write_reg(sc, A_ULP_RX_TDDP_PSZ, v); 812 813 /* 814 * 4K, 8K, 16K, 64K DDP "page sizes" for iSCSI DDP. These have been 815 * chosen with MAXPHYS = 128K in mind. The largest DDP buffer that we 816 * may have to deal with is MAXPHYS + 1 page. 817 */ 818 v = V_HPZ0(0) | V_HPZ1(1) | V_HPZ2(2) | V_HPZ3(4); 819 t4_write_reg(sc, A_ULP_RX_ISCSI_PSZ, v); 820 821 /* We use multiple DDP page sizes both in plain-TOE and ISCSI modes. */ 822 m = v = F_TDDPTAGTCB | F_ISCSITAGTCB; 823 t4_set_reg_field(sc, A_ULP_RX_CTL, m, v); 824 825 m = V_INDICATESIZE(M_INDICATESIZE) | F_REARMDDPOFFSET | 826 F_RESETDDPOFFSET; 827 v = V_INDICATESIZE(indsz) | F_REARMDDPOFFSET | F_RESETDDPOFFSET; 828 t4_set_reg_field(sc, A_TP_PARA_REG5, m, v); 829 } 830 831 /* 832 * SGE wants the buffer to be at least 64B and then a multiple of 16. Its 833 * address mut be 16B aligned. If padding is in use the buffer's start and end 834 * need to be aligned to the pad boundary as well. We'll just make sure that 835 * the size is a multiple of the pad boundary here, it is up to the buffer 836 * allocation code to make sure the start of the buffer is aligned. 837 */ 838 static inline int 839 hwsz_ok(struct adapter *sc, int hwsz) 840 { 841 int mask = fl_pad ? sc->params.sge.pad_boundary - 1 : 16 - 1; 842 843 return (hwsz >= 64 && (hwsz & mask) == 0); 844 } 845 846 /* 847 * Initialize the rx buffer sizes and figure out which zones the buffers will 848 * be allocated from. 849 */ 850 void 851 t4_init_rx_buf_info(struct adapter *sc) 852 { 853 struct sge *s = &sc->sge; 854 struct sge_params *sp = &sc->params.sge; 855 int i, j, n; 856 static int sw_buf_sizes[] = { /* Sorted by size */ 857 MCLBYTES, 858 MJUMPAGESIZE, 859 MJUM9BYTES, 860 MJUM16BYTES 861 }; 862 struct rx_buf_info *rxb; 863 864 s->safe_zidx = -1; 865 rxb = &s->rx_buf_info[0]; 866 for (i = 0; i < SW_ZONE_SIZES; i++, rxb++) { 867 rxb->size1 = sw_buf_sizes[i]; 868 rxb->zone = m_getzone(rxb->size1); 869 rxb->type = m_gettype(rxb->size1); 870 rxb->size2 = 0; 871 rxb->hwidx1 = -1; 872 rxb->hwidx2 = -1; 873 for (j = 0; j < SGE_FLBUF_SIZES; j++) { 874 int hwsize = sp->sge_fl_buffer_size[j]; 875 876 if (!hwsz_ok(sc, hwsize)) 877 continue; 878 879 /* hwidx for size1 */ 880 if (rxb->hwidx1 == -1 && rxb->size1 == hwsize) 881 rxb->hwidx1 = j; 882 883 /* hwidx for size2 (buffer packing) */ 884 if (rxb->size1 - CL_METADATA_SIZE < hwsize) 885 continue; 886 n = rxb->size1 - hwsize - CL_METADATA_SIZE; 887 if (n == 0) { 888 rxb->hwidx2 = j; 889 rxb->size2 = hwsize; 890 break; /* stop looking */ 891 } 892 if (rxb->hwidx2 != -1) { 893 if (n < sp->sge_fl_buffer_size[rxb->hwidx2] - 894 hwsize - CL_METADATA_SIZE) { 895 rxb->hwidx2 = j; 896 rxb->size2 = hwsize; 897 } 898 } else if (n <= 2 * CL_METADATA_SIZE) { 899 rxb->hwidx2 = j; 900 rxb->size2 = hwsize; 901 } 902 } 903 if (rxb->hwidx2 != -1) 904 sc->flags |= BUF_PACKING_OK; 905 if (s->safe_zidx == -1 && rxb->size1 == safest_rx_cluster) 906 s->safe_zidx = i; 907 } 908 } 909 910 /* 911 * Verify some basic SGE settings for the PF and VF driver, and other 912 * miscellaneous settings for the PF driver. 913 */ 914 int 915 t4_verify_chip_settings(struct adapter *sc) 916 { 917 struct sge_params *sp = &sc->params.sge; 918 uint32_t m, v, r; 919 int rc = 0; 920 const uint16_t indsz = min(RX_COPY_THRESHOLD - 1, M_INDICATESIZE); 921 922 m = F_RXPKTCPLMODE; 923 v = F_RXPKTCPLMODE; 924 r = sp->sge_control; 925 if ((r & m) != v) { 926 device_printf(sc->dev, "invalid SGE_CONTROL(0x%x)\n", r); 927 rc = EINVAL; 928 } 929 930 /* 931 * If this changes then every single use of PAGE_SHIFT in the driver 932 * needs to be carefully reviewed for PAGE_SHIFT vs sp->page_shift. 933 */ 934 if (sp->page_shift != PAGE_SHIFT) { 935 device_printf(sc->dev, "invalid SGE_HOST_PAGE_SIZE(0x%x)\n", r); 936 rc = EINVAL; 937 } 938 939 if (sc->flags & IS_VF) 940 return (0); 941 942 v = V_HPZ0(0) | V_HPZ1(2) | V_HPZ2(4) | V_HPZ3(6); 943 r = t4_read_reg(sc, A_ULP_RX_TDDP_PSZ); 944 if (r != v) { 945 device_printf(sc->dev, "invalid ULP_RX_TDDP_PSZ(0x%x)\n", r); 946 if (sc->vres.ddp.size != 0) 947 rc = EINVAL; 948 } 949 950 m = v = F_TDDPTAGTCB; 951 r = t4_read_reg(sc, A_ULP_RX_CTL); 952 if ((r & m) != v) { 953 device_printf(sc->dev, "invalid ULP_RX_CTL(0x%x)\n", r); 954 if (sc->vres.ddp.size != 0) 955 rc = EINVAL; 956 } 957 958 m = V_INDICATESIZE(M_INDICATESIZE) | F_REARMDDPOFFSET | 959 F_RESETDDPOFFSET; 960 v = V_INDICATESIZE(indsz) | F_REARMDDPOFFSET | F_RESETDDPOFFSET; 961 r = t4_read_reg(sc, A_TP_PARA_REG5); 962 if ((r & m) != v) { 963 device_printf(sc->dev, "invalid TP_PARA_REG5(0x%x)\n", r); 964 if (sc->vres.ddp.size != 0) 965 rc = EINVAL; 966 } 967 968 return (rc); 969 } 970 971 int 972 t4_create_dma_tag(struct adapter *sc) 973 { 974 int rc; 975 976 rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0, 977 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE, 978 BUS_SPACE_UNRESTRICTED, BUS_SPACE_MAXSIZE, BUS_DMA_ALLOCNOW, NULL, 979 NULL, &sc->dmat); 980 if (rc != 0) { 981 device_printf(sc->dev, 982 "failed to create main DMA tag: %d\n", rc); 983 } 984 985 return (rc); 986 } 987 988 void 989 t4_sge_sysctls(struct adapter *sc, struct sysctl_ctx_list *ctx, 990 struct sysctl_oid_list *children) 991 { 992 struct sge_params *sp = &sc->params.sge; 993 994 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "buffer_sizes", 995 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, 996 sysctl_bufsizes, "A", "freelist buffer sizes"); 997 998 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "fl_pktshift", CTLFLAG_RD, 999 NULL, sp->fl_pktshift, "payload DMA offset in rx buffer (bytes)"); 1000 1001 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "fl_pad", CTLFLAG_RD, 1002 NULL, sp->pad_boundary, "payload pad boundary (bytes)"); 1003 1004 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "spg_len", CTLFLAG_RD, 1005 NULL, sp->spg_len, "status page size (bytes)"); 1006 1007 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "cong_drop", CTLFLAG_RD, 1008 NULL, cong_drop, "congestion drop setting"); 1009 #ifdef TCP_OFFLOAD 1010 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ofld_cong_drop", CTLFLAG_RD, 1011 NULL, ofld_cong_drop, "congestion drop setting"); 1012 #endif 1013 1014 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "fl_pack", CTLFLAG_RD, 1015 NULL, sp->pack_boundary, "payload pack boundary (bytes)"); 1016 } 1017 1018 int 1019 t4_destroy_dma_tag(struct adapter *sc) 1020 { 1021 if (sc->dmat) 1022 bus_dma_tag_destroy(sc->dmat); 1023 1024 return (0); 1025 } 1026 1027 /* 1028 * Allocate and initialize the firmware event queue, control queues, and special 1029 * purpose rx queues owned by the adapter. 1030 * 1031 * Returns errno on failure. Resources allocated up to that point may still be 1032 * allocated. Caller is responsible for cleanup in case this function fails. 1033 */ 1034 int 1035 t4_setup_adapter_queues(struct adapter *sc) 1036 { 1037 int rc, i; 1038 1039 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 1040 1041 /* 1042 * Firmware event queue 1043 */ 1044 rc = alloc_fwq(sc); 1045 if (rc != 0) 1046 return (rc); 1047 1048 /* 1049 * That's all for the VF driver. 1050 */ 1051 if (sc->flags & IS_VF) 1052 return (rc); 1053 1054 /* 1055 * XXX: General purpose rx queues, one per port. 1056 */ 1057 1058 /* 1059 * Control queues, one per port. 1060 */ 1061 for_each_port(sc, i) { 1062 rc = alloc_ctrlq(sc, i); 1063 if (rc != 0) 1064 return (rc); 1065 } 1066 1067 return (rc); 1068 } 1069 1070 /* 1071 * Idempotent 1072 */ 1073 int 1074 t4_teardown_adapter_queues(struct adapter *sc) 1075 { 1076 int i; 1077 1078 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 1079 1080 if (sc->sge.ctrlq != NULL) { 1081 MPASS(!(sc->flags & IS_VF)); /* VFs don't allocate ctrlq. */ 1082 for_each_port(sc, i) 1083 free_ctrlq(sc, i); 1084 } 1085 free_fwq(sc); 1086 1087 return (0); 1088 } 1089 1090 /* Maximum payload that could arrive with a single iq descriptor. */ 1091 static inline int 1092 max_rx_payload(struct adapter *sc, if_t ifp, const bool ofld) 1093 { 1094 int maxp; 1095 1096 /* large enough even when hw VLAN extraction is disabled */ 1097 maxp = sc->params.sge.fl_pktshift + ETHER_HDR_LEN + 1098 ETHER_VLAN_ENCAP_LEN + if_getmtu(ifp); 1099 if (ofld && sc->tt.tls && sc->cryptocaps & FW_CAPS_CONFIG_TLSKEYS && 1100 maxp < sc->params.tp.max_rx_pdu) 1101 maxp = sc->params.tp.max_rx_pdu; 1102 return (maxp); 1103 } 1104 1105 int 1106 t4_setup_vi_queues(struct vi_info *vi) 1107 { 1108 int rc = 0, i, intr_idx; 1109 struct sge_rxq *rxq; 1110 struct sge_txq *txq; 1111 #ifdef TCP_OFFLOAD 1112 struct sge_ofld_rxq *ofld_rxq; 1113 #endif 1114 #if defined(TCP_OFFLOAD) || defined(RATELIMIT) 1115 struct sge_ofld_txq *ofld_txq; 1116 #endif 1117 #ifdef DEV_NETMAP 1118 int saved_idx, iqidx; 1119 struct sge_nm_rxq *nm_rxq; 1120 struct sge_nm_txq *nm_txq; 1121 #endif 1122 struct adapter *sc = vi->adapter; 1123 if_t ifp = vi->ifp; 1124 int maxp; 1125 1126 /* Interrupt vector to start from (when using multiple vectors) */ 1127 intr_idx = vi->first_intr; 1128 1129 #ifdef DEV_NETMAP 1130 saved_idx = intr_idx; 1131 if (if_getcapabilities(ifp) & IFCAP_NETMAP) { 1132 1133 /* netmap is supported with direct interrupts only. */ 1134 MPASS(!forwarding_intr_to_fwq(sc)); 1135 MPASS(vi->first_intr >= 0); 1136 1137 /* 1138 * We don't have buffers to back the netmap rx queues 1139 * right now so we create the queues in a way that 1140 * doesn't set off any congestion signal in the chip. 1141 */ 1142 for_each_nm_rxq(vi, i, nm_rxq) { 1143 rc = alloc_nm_rxq(vi, nm_rxq, intr_idx, i); 1144 if (rc != 0) 1145 goto done; 1146 intr_idx++; 1147 } 1148 1149 for_each_nm_txq(vi, i, nm_txq) { 1150 iqidx = vi->first_nm_rxq + (i % vi->nnmrxq); 1151 rc = alloc_nm_txq(vi, nm_txq, iqidx, i); 1152 if (rc != 0) 1153 goto done; 1154 } 1155 } 1156 1157 /* Normal rx queues and netmap rx queues share the same interrupts. */ 1158 intr_idx = saved_idx; 1159 #endif 1160 1161 /* 1162 * Allocate rx queues first because a default iqid is required when 1163 * creating a tx queue. 1164 */ 1165 maxp = max_rx_payload(sc, ifp, false); 1166 for_each_rxq(vi, i, rxq) { 1167 rc = alloc_rxq(vi, rxq, i, intr_idx, maxp); 1168 if (rc != 0) 1169 goto done; 1170 if (!forwarding_intr_to_fwq(sc)) 1171 intr_idx++; 1172 } 1173 #ifdef DEV_NETMAP 1174 if (if_getcapabilities(ifp) & IFCAP_NETMAP) 1175 intr_idx = saved_idx + max(vi->nrxq, vi->nnmrxq); 1176 #endif 1177 #ifdef TCP_OFFLOAD 1178 maxp = max_rx_payload(sc, ifp, true); 1179 for_each_ofld_rxq(vi, i, ofld_rxq) { 1180 rc = alloc_ofld_rxq(vi, ofld_rxq, i, intr_idx, maxp); 1181 if (rc != 0) 1182 goto done; 1183 if (!forwarding_intr_to_fwq(sc)) 1184 intr_idx++; 1185 } 1186 #endif 1187 1188 /* 1189 * Now the tx queues. 1190 */ 1191 for_each_txq(vi, i, txq) { 1192 rc = alloc_txq(vi, txq, i); 1193 if (rc != 0) 1194 goto done; 1195 } 1196 #if defined(TCP_OFFLOAD) || defined(RATELIMIT) 1197 for_each_ofld_txq(vi, i, ofld_txq) { 1198 rc = alloc_ofld_txq(vi, ofld_txq, i); 1199 if (rc != 0) 1200 goto done; 1201 } 1202 #endif 1203 done: 1204 if (rc) 1205 t4_teardown_vi_queues(vi); 1206 1207 return (rc); 1208 } 1209 1210 /* 1211 * Idempotent 1212 */ 1213 int 1214 t4_teardown_vi_queues(struct vi_info *vi) 1215 { 1216 int i; 1217 struct sge_rxq *rxq; 1218 struct sge_txq *txq; 1219 #if defined(TCP_OFFLOAD) || defined(RATELIMIT) 1220 struct sge_ofld_txq *ofld_txq; 1221 #endif 1222 #ifdef TCP_OFFLOAD 1223 struct sge_ofld_rxq *ofld_rxq; 1224 #endif 1225 #ifdef DEV_NETMAP 1226 struct sge_nm_rxq *nm_rxq; 1227 struct sge_nm_txq *nm_txq; 1228 #endif 1229 1230 #ifdef DEV_NETMAP 1231 if (if_getcapabilities(vi->ifp) & IFCAP_NETMAP) { 1232 for_each_nm_txq(vi, i, nm_txq) { 1233 free_nm_txq(vi, nm_txq); 1234 } 1235 1236 for_each_nm_rxq(vi, i, nm_rxq) { 1237 free_nm_rxq(vi, nm_rxq); 1238 } 1239 } 1240 #endif 1241 1242 /* 1243 * Take down all the tx queues first, as they reference the rx queues 1244 * (for egress updates, etc.). 1245 */ 1246 1247 for_each_txq(vi, i, txq) { 1248 free_txq(vi, txq); 1249 } 1250 #if defined(TCP_OFFLOAD) || defined(RATELIMIT) 1251 for_each_ofld_txq(vi, i, ofld_txq) { 1252 free_ofld_txq(vi, ofld_txq); 1253 } 1254 #endif 1255 1256 /* 1257 * Then take down the rx queues. 1258 */ 1259 1260 for_each_rxq(vi, i, rxq) { 1261 free_rxq(vi, rxq); 1262 } 1263 #ifdef TCP_OFFLOAD 1264 for_each_ofld_rxq(vi, i, ofld_rxq) { 1265 free_ofld_rxq(vi, ofld_rxq); 1266 } 1267 #endif 1268 1269 return (0); 1270 } 1271 1272 /* 1273 * Interrupt handler when the driver is using only 1 interrupt. This is a very 1274 * unusual scenario. 1275 * 1276 * a) Deals with errors, if any. 1277 * b) Services firmware event queue, which is taking interrupts for all other 1278 * queues. 1279 */ 1280 void 1281 t4_intr_all(void *arg) 1282 { 1283 struct adapter *sc = arg; 1284 struct sge_iq *fwq = &sc->sge.fwq; 1285 1286 MPASS(sc->intr_count == 1); 1287 1288 if (sc->intr_type == INTR_INTX) 1289 t4_write_reg(sc, MYPF_REG(A_PCIE_PF_CLI), 0); 1290 1291 t4_intr_err(arg); 1292 t4_intr_evt(fwq); 1293 } 1294 1295 /* 1296 * Interrupt handler for errors (installed directly when multiple interrupts are 1297 * being used, or called by t4_intr_all). 1298 */ 1299 void 1300 t4_intr_err(void *arg) 1301 { 1302 struct adapter *sc = arg; 1303 uint32_t v; 1304 const bool verbose = (sc->debug_flags & DF_VERBOSE_SLOWINTR) != 0; 1305 1306 if (atomic_load_int(&sc->error_flags) & ADAP_FATAL_ERR) 1307 return; 1308 1309 v = t4_read_reg(sc, MYPF_REG(A_PL_PF_INT_CAUSE)); 1310 if (v & F_PFSW) { 1311 sc->swintr++; 1312 t4_write_reg(sc, MYPF_REG(A_PL_PF_INT_CAUSE), v); 1313 } 1314 1315 if (t4_slow_intr_handler(sc, verbose)) 1316 t4_fatal_err(sc, false); 1317 } 1318 1319 /* 1320 * Interrupt handler for iq-only queues. The firmware event queue is the only 1321 * such queue right now. 1322 */ 1323 void 1324 t4_intr_evt(void *arg) 1325 { 1326 struct sge_iq *iq = arg; 1327 1328 if (atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_BUSY)) { 1329 service_iq(iq, 0); 1330 (void) atomic_cmpset_int(&iq->state, IQS_BUSY, IQS_IDLE); 1331 } 1332 } 1333 1334 /* 1335 * Interrupt handler for iq+fl queues. 1336 */ 1337 void 1338 t4_intr(void *arg) 1339 { 1340 struct sge_iq *iq = arg; 1341 1342 if (atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_BUSY)) { 1343 service_iq_fl(iq, 0); 1344 (void) atomic_cmpset_int(&iq->state, IQS_BUSY, IQS_IDLE); 1345 } 1346 } 1347 1348 #ifdef DEV_NETMAP 1349 /* 1350 * Interrupt handler for netmap rx queues. 1351 */ 1352 void 1353 t4_nm_intr(void *arg) 1354 { 1355 struct sge_nm_rxq *nm_rxq = arg; 1356 1357 if (atomic_cmpset_int(&nm_rxq->nm_state, NM_ON, NM_BUSY)) { 1358 service_nm_rxq(nm_rxq); 1359 (void) atomic_cmpset_int(&nm_rxq->nm_state, NM_BUSY, NM_ON); 1360 } 1361 } 1362 1363 /* 1364 * Interrupt handler for vectors shared between NIC and netmap rx queues. 1365 */ 1366 void 1367 t4_vi_intr(void *arg) 1368 { 1369 struct irq *irq = arg; 1370 1371 MPASS(irq->nm_rxq != NULL); 1372 t4_nm_intr(irq->nm_rxq); 1373 1374 MPASS(irq->rxq != NULL); 1375 t4_intr(irq->rxq); 1376 } 1377 #endif 1378 1379 /* 1380 * Deals with interrupts on an iq-only (no freelist) queue. 1381 */ 1382 static int 1383 service_iq(struct sge_iq *iq, int budget) 1384 { 1385 struct sge_iq *q; 1386 struct adapter *sc = iq->adapter; 1387 struct iq_desc *d = &iq->desc[iq->cidx]; 1388 int ndescs = 0, limit; 1389 int rsp_type; 1390 uint32_t lq; 1391 STAILQ_HEAD(, sge_iq) iql = STAILQ_HEAD_INITIALIZER(iql); 1392 1393 KASSERT(iq->state == IQS_BUSY, ("%s: iq %p not BUSY", __func__, iq)); 1394 KASSERT((iq->flags & IQ_HAS_FL) == 0, 1395 ("%s: called for iq %p with fl (iq->flags 0x%x)", __func__, iq, 1396 iq->flags)); 1397 MPASS((iq->flags & IQ_ADJ_CREDIT) == 0); 1398 MPASS((iq->flags & IQ_LRO_ENABLED) == 0); 1399 1400 limit = budget ? budget : iq->qsize / 16; 1401 1402 /* 1403 * We always come back and check the descriptor ring for new indirect 1404 * interrupts and other responses after running a single handler. 1405 */ 1406 for (;;) { 1407 while ((d->rsp.u.type_gen & F_RSPD_GEN) == iq->gen) { 1408 1409 rmb(); 1410 1411 rsp_type = G_RSPD_TYPE(d->rsp.u.type_gen); 1412 lq = be32toh(d->rsp.pldbuflen_qid); 1413 1414 switch (rsp_type) { 1415 case X_RSPD_TYPE_FLBUF: 1416 panic("%s: data for an iq (%p) with no freelist", 1417 __func__, iq); 1418 1419 /* NOTREACHED */ 1420 1421 case X_RSPD_TYPE_CPL: 1422 KASSERT(d->rss.opcode < NUM_CPL_CMDS, 1423 ("%s: bad opcode %02x.", __func__, 1424 d->rss.opcode)); 1425 t4_cpl_handler[d->rss.opcode](iq, &d->rss, NULL); 1426 break; 1427 1428 case X_RSPD_TYPE_INTR: 1429 /* 1430 * There are 1K interrupt-capable queues (qids 0 1431 * through 1023). A response type indicating a 1432 * forwarded interrupt with a qid >= 1K is an 1433 * iWARP async notification. 1434 */ 1435 if (__predict_true(lq >= 1024)) { 1436 t4_an_handler(iq, &d->rsp); 1437 break; 1438 } 1439 1440 q = sc->sge.iqmap[lq - sc->sge.iq_start - 1441 sc->sge.iq_base]; 1442 if (atomic_cmpset_int(&q->state, IQS_IDLE, 1443 IQS_BUSY)) { 1444 if (service_iq_fl(q, q->qsize / 16) == 0) { 1445 (void) atomic_cmpset_int(&q->state, 1446 IQS_BUSY, IQS_IDLE); 1447 } else { 1448 STAILQ_INSERT_TAIL(&iql, q, 1449 link); 1450 } 1451 } 1452 break; 1453 1454 default: 1455 KASSERT(0, 1456 ("%s: illegal response type %d on iq %p", 1457 __func__, rsp_type, iq)); 1458 log(LOG_ERR, 1459 "%s: illegal response type %d on iq %p", 1460 device_get_nameunit(sc->dev), rsp_type, iq); 1461 break; 1462 } 1463 1464 d++; 1465 if (__predict_false(++iq->cidx == iq->sidx)) { 1466 iq->cidx = 0; 1467 iq->gen ^= F_RSPD_GEN; 1468 d = &iq->desc[0]; 1469 } 1470 if (__predict_false(++ndescs == limit)) { 1471 t4_write_reg(sc, sc->sge_gts_reg, 1472 V_CIDXINC(ndescs) | 1473 V_INGRESSQID(iq->cntxt_id) | 1474 V_SEINTARM(V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX))); 1475 ndescs = 0; 1476 1477 if (budget) { 1478 return (EINPROGRESS); 1479 } 1480 } 1481 } 1482 1483 if (STAILQ_EMPTY(&iql)) 1484 break; 1485 1486 /* 1487 * Process the head only, and send it to the back of the list if 1488 * it's still not done. 1489 */ 1490 q = STAILQ_FIRST(&iql); 1491 STAILQ_REMOVE_HEAD(&iql, link); 1492 if (service_iq_fl(q, q->qsize / 8) == 0) 1493 (void) atomic_cmpset_int(&q->state, IQS_BUSY, IQS_IDLE); 1494 else 1495 STAILQ_INSERT_TAIL(&iql, q, link); 1496 } 1497 1498 t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(ndescs) | 1499 V_INGRESSQID((u32)iq->cntxt_id) | V_SEINTARM(iq->intr_params)); 1500 1501 return (0); 1502 } 1503 1504 #if defined(INET) || defined(INET6) 1505 static inline int 1506 sort_before_lro(struct lro_ctrl *lro) 1507 { 1508 1509 return (lro->lro_mbuf_max != 0); 1510 } 1511 #endif 1512 1513 #define CGBE_SHIFT_SCALE 10 1514 1515 static inline uint64_t 1516 t4_tstmp_to_ns(struct adapter *sc, uint64_t lf) 1517 { 1518 struct clock_sync *cur, dcur; 1519 uint64_t hw_clocks; 1520 uint64_t hw_clk_div; 1521 sbintime_t sbt_cur_to_prev, sbt; 1522 uint64_t hw_tstmp = lf & 0xfffffffffffffffULL; /* 60b, not 64b. */ 1523 seqc_t gen; 1524 1525 for (;;) { 1526 cur = &sc->cal_info[sc->cal_current]; 1527 gen = seqc_read(&cur->gen); 1528 if (gen == 0) 1529 return (0); 1530 dcur = *cur; 1531 if (seqc_consistent(&cur->gen, gen)) 1532 break; 1533 } 1534 1535 /* 1536 * Our goal here is to have a result that is: 1537 * 1538 * ( (cur_time - prev_time) ) 1539 * ((hw_tstmp - hw_prev) * ----------------------------- ) + prev_time 1540 * ( (hw_cur - hw_prev) ) 1541 * 1542 * With the constraints that we cannot use float and we 1543 * don't want to overflow the uint64_t numbers we are using. 1544 */ 1545 hw_clocks = hw_tstmp - dcur.hw_prev; 1546 sbt_cur_to_prev = (dcur.sbt_cur - dcur.sbt_prev); 1547 hw_clk_div = dcur.hw_cur - dcur.hw_prev; 1548 sbt = hw_clocks * sbt_cur_to_prev / hw_clk_div + dcur.sbt_prev; 1549 return (sbttons(sbt)); 1550 } 1551 1552 static inline void 1553 move_to_next_rxbuf(struct sge_fl *fl) 1554 { 1555 1556 fl->rx_offset = 0; 1557 if (__predict_false((++fl->cidx & 7) == 0)) { 1558 uint16_t cidx = fl->cidx >> 3; 1559 1560 if (__predict_false(cidx == fl->sidx)) 1561 fl->cidx = cidx = 0; 1562 fl->hw_cidx = cidx; 1563 } 1564 } 1565 1566 /* 1567 * Deals with interrupts on an iq+fl queue. 1568 */ 1569 static int 1570 service_iq_fl(struct sge_iq *iq, int budget) 1571 { 1572 struct sge_rxq *rxq = iq_to_rxq(iq); 1573 struct sge_fl *fl; 1574 struct adapter *sc = iq->adapter; 1575 struct iq_desc *d = &iq->desc[iq->cidx]; 1576 int ndescs, limit; 1577 int rsp_type, starved; 1578 uint32_t lq; 1579 uint16_t fl_hw_cidx; 1580 struct mbuf *m0; 1581 #if defined(INET) || defined(INET6) 1582 const struct timeval lro_timeout = {0, sc->lro_timeout}; 1583 struct lro_ctrl *lro = &rxq->lro; 1584 #endif 1585 1586 KASSERT(iq->state == IQS_BUSY, ("%s: iq %p not BUSY", __func__, iq)); 1587 MPASS(iq->flags & IQ_HAS_FL); 1588 1589 ndescs = 0; 1590 #if defined(INET) || defined(INET6) 1591 if (iq->flags & IQ_ADJ_CREDIT) { 1592 MPASS(sort_before_lro(lro)); 1593 iq->flags &= ~IQ_ADJ_CREDIT; 1594 if ((d->rsp.u.type_gen & F_RSPD_GEN) != iq->gen) { 1595 tcp_lro_flush_all(lro); 1596 t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(1) | 1597 V_INGRESSQID((u32)iq->cntxt_id) | 1598 V_SEINTARM(iq->intr_params)); 1599 return (0); 1600 } 1601 ndescs = 1; 1602 } 1603 #else 1604 MPASS((iq->flags & IQ_ADJ_CREDIT) == 0); 1605 #endif 1606 1607 limit = budget ? budget : iq->qsize / 16; 1608 fl = &rxq->fl; 1609 fl_hw_cidx = fl->hw_cidx; /* stable snapshot */ 1610 while ((d->rsp.u.type_gen & F_RSPD_GEN) == iq->gen) { 1611 1612 rmb(); 1613 1614 m0 = NULL; 1615 rsp_type = G_RSPD_TYPE(d->rsp.u.type_gen); 1616 lq = be32toh(d->rsp.pldbuflen_qid); 1617 1618 switch (rsp_type) { 1619 case X_RSPD_TYPE_FLBUF: 1620 if (lq & F_RSPD_NEWBUF) { 1621 if (fl->rx_offset > 0) 1622 move_to_next_rxbuf(fl); 1623 lq = G_RSPD_LEN(lq); 1624 } 1625 if (IDXDIFF(fl->hw_cidx, fl_hw_cidx, fl->sidx) > 4) { 1626 FL_LOCK(fl); 1627 refill_fl(sc, fl, 64); 1628 FL_UNLOCK(fl); 1629 fl_hw_cidx = fl->hw_cidx; 1630 } 1631 1632 if (d->rss.opcode == CPL_RX_PKT) { 1633 if (__predict_true(eth_rx(sc, rxq, d, lq) == 0)) 1634 break; 1635 goto out; 1636 } 1637 m0 = get_fl_payload(sc, fl, lq); 1638 if (__predict_false(m0 == NULL)) 1639 goto out; 1640 1641 /* fall through */ 1642 1643 case X_RSPD_TYPE_CPL: 1644 KASSERT(d->rss.opcode < NUM_CPL_CMDS, 1645 ("%s: bad opcode %02x.", __func__, d->rss.opcode)); 1646 t4_cpl_handler[d->rss.opcode](iq, &d->rss, m0); 1647 break; 1648 1649 case X_RSPD_TYPE_INTR: 1650 1651 /* 1652 * There are 1K interrupt-capable queues (qids 0 1653 * through 1023). A response type indicating a 1654 * forwarded interrupt with a qid >= 1K is an 1655 * iWARP async notification. That is the only 1656 * acceptable indirect interrupt on this queue. 1657 */ 1658 if (__predict_false(lq < 1024)) { 1659 panic("%s: indirect interrupt on iq_fl %p " 1660 "with qid %u", __func__, iq, lq); 1661 } 1662 1663 t4_an_handler(iq, &d->rsp); 1664 break; 1665 1666 default: 1667 KASSERT(0, ("%s: illegal response type %d on iq %p", 1668 __func__, rsp_type, iq)); 1669 log(LOG_ERR, "%s: illegal response type %d on iq %p", 1670 device_get_nameunit(sc->dev), rsp_type, iq); 1671 break; 1672 } 1673 1674 d++; 1675 if (__predict_false(++iq->cidx == iq->sidx)) { 1676 iq->cidx = 0; 1677 iq->gen ^= F_RSPD_GEN; 1678 d = &iq->desc[0]; 1679 } 1680 if (__predict_false(++ndescs == limit)) { 1681 t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(ndescs) | 1682 V_INGRESSQID(iq->cntxt_id) | 1683 V_SEINTARM(V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX))); 1684 1685 #if defined(INET) || defined(INET6) 1686 if (iq->flags & IQ_LRO_ENABLED && 1687 !sort_before_lro(lro) && 1688 sc->lro_timeout != 0) { 1689 tcp_lro_flush_inactive(lro, &lro_timeout); 1690 } 1691 #endif 1692 if (budget) 1693 return (EINPROGRESS); 1694 ndescs = 0; 1695 } 1696 } 1697 out: 1698 #if defined(INET) || defined(INET6) 1699 if (iq->flags & IQ_LRO_ENABLED) { 1700 if (ndescs > 0 && lro->lro_mbuf_count > 8) { 1701 MPASS(sort_before_lro(lro)); 1702 /* hold back one credit and don't flush LRO state */ 1703 iq->flags |= IQ_ADJ_CREDIT; 1704 ndescs--; 1705 } else { 1706 tcp_lro_flush_all(lro); 1707 } 1708 } 1709 #endif 1710 1711 t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(ndescs) | 1712 V_INGRESSQID((u32)iq->cntxt_id) | V_SEINTARM(iq->intr_params)); 1713 1714 FL_LOCK(fl); 1715 starved = refill_fl(sc, fl, 64); 1716 FL_UNLOCK(fl); 1717 if (__predict_false(starved != 0)) 1718 add_fl_to_sfl(sc, fl); 1719 1720 return (0); 1721 } 1722 1723 static inline struct cluster_metadata * 1724 cl_metadata(struct fl_sdesc *sd) 1725 { 1726 1727 return ((void *)(sd->cl + sd->moff)); 1728 } 1729 1730 static void 1731 rxb_free(struct mbuf *m) 1732 { 1733 struct cluster_metadata *clm = m->m_ext.ext_arg1; 1734 1735 uma_zfree(clm->zone, clm->cl); 1736 counter_u64_add(extfree_rels, 1); 1737 } 1738 1739 /* 1740 * The mbuf returned comes from zone_muf and carries the payload in one of these 1741 * ways 1742 * a) complete frame inside the mbuf 1743 * b) m_cljset (for clusters without metadata) 1744 * d) m_extaddref (cluster with metadata) 1745 */ 1746 static struct mbuf * 1747 get_scatter_segment(struct adapter *sc, struct sge_fl *fl, int fr_offset, 1748 int remaining) 1749 { 1750 struct mbuf *m; 1751 struct fl_sdesc *sd = &fl->sdesc[fl->cidx]; 1752 struct rx_buf_info *rxb = &sc->sge.rx_buf_info[sd->zidx]; 1753 struct cluster_metadata *clm; 1754 int len, blen; 1755 caddr_t payload; 1756 1757 if (fl->flags & FL_BUF_PACKING) { 1758 u_int l, pad; 1759 1760 blen = rxb->size2 - fl->rx_offset; /* max possible in this buf */ 1761 len = min(remaining, blen); 1762 payload = sd->cl + fl->rx_offset; 1763 1764 l = fr_offset + len; 1765 pad = roundup2(l, fl->buf_boundary) - l; 1766 if (fl->rx_offset + len + pad < rxb->size2) 1767 blen = len + pad; 1768 MPASS(fl->rx_offset + blen <= rxb->size2); 1769 } else { 1770 MPASS(fl->rx_offset == 0); /* not packing */ 1771 blen = rxb->size1; 1772 len = min(remaining, blen); 1773 payload = sd->cl; 1774 } 1775 1776 if (fr_offset == 0) { 1777 m = m_gethdr(M_NOWAIT, MT_DATA); 1778 if (__predict_false(m == NULL)) 1779 return (NULL); 1780 m->m_pkthdr.len = remaining; 1781 } else { 1782 m = m_get(M_NOWAIT, MT_DATA); 1783 if (__predict_false(m == NULL)) 1784 return (NULL); 1785 } 1786 m->m_len = len; 1787 kmsan_mark(payload, len, KMSAN_STATE_INITED); 1788 1789 if (sc->sc_do_rxcopy && len < RX_COPY_THRESHOLD) { 1790 /* copy data to mbuf */ 1791 bcopy(payload, mtod(m, caddr_t), len); 1792 if (fl->flags & FL_BUF_PACKING) { 1793 fl->rx_offset += blen; 1794 MPASS(fl->rx_offset <= rxb->size2); 1795 if (fl->rx_offset < rxb->size2) 1796 return (m); /* without advancing the cidx */ 1797 } 1798 } else if (fl->flags & FL_BUF_PACKING) { 1799 clm = cl_metadata(sd); 1800 if (sd->nmbuf++ == 0) { 1801 clm->refcount = 1; 1802 clm->zone = rxb->zone; 1803 clm->cl = sd->cl; 1804 counter_u64_add(extfree_refs, 1); 1805 } 1806 m_extaddref(m, payload, blen, &clm->refcount, rxb_free, clm, 1807 NULL); 1808 1809 fl->rx_offset += blen; 1810 MPASS(fl->rx_offset <= rxb->size2); 1811 if (fl->rx_offset < rxb->size2) 1812 return (m); /* without advancing the cidx */ 1813 } else { 1814 m_cljset(m, sd->cl, rxb->type); 1815 sd->cl = NULL; /* consumed, not a recycle candidate */ 1816 } 1817 1818 move_to_next_rxbuf(fl); 1819 1820 return (m); 1821 } 1822 1823 static struct mbuf * 1824 get_fl_payload(struct adapter *sc, struct sge_fl *fl, const u_int plen) 1825 { 1826 struct mbuf *m0, *m, **pnext; 1827 u_int remaining; 1828 1829 if (__predict_false(fl->flags & FL_BUF_RESUME)) { 1830 M_ASSERTPKTHDR(fl->m0); 1831 MPASS(fl->m0->m_pkthdr.len == plen); 1832 MPASS(fl->remaining < plen); 1833 1834 m0 = fl->m0; 1835 pnext = fl->pnext; 1836 remaining = fl->remaining; 1837 fl->flags &= ~FL_BUF_RESUME; 1838 goto get_segment; 1839 } 1840 1841 /* 1842 * Payload starts at rx_offset in the current hw buffer. Its length is 1843 * 'len' and it may span multiple hw buffers. 1844 */ 1845 1846 m0 = get_scatter_segment(sc, fl, 0, plen); 1847 if (m0 == NULL) 1848 return (NULL); 1849 remaining = plen - m0->m_len; 1850 pnext = &m0->m_next; 1851 while (remaining > 0) { 1852 get_segment: 1853 MPASS(fl->rx_offset == 0); 1854 m = get_scatter_segment(sc, fl, plen - remaining, remaining); 1855 if (__predict_false(m == NULL)) { 1856 fl->m0 = m0; 1857 fl->pnext = pnext; 1858 fl->remaining = remaining; 1859 fl->flags |= FL_BUF_RESUME; 1860 return (NULL); 1861 } 1862 *pnext = m; 1863 pnext = &m->m_next; 1864 remaining -= m->m_len; 1865 } 1866 *pnext = NULL; 1867 1868 M_ASSERTPKTHDR(m0); 1869 return (m0); 1870 } 1871 1872 static int 1873 skip_scatter_segment(struct adapter *sc, struct sge_fl *fl, int fr_offset, 1874 int remaining) 1875 { 1876 struct fl_sdesc *sd = &fl->sdesc[fl->cidx]; 1877 struct rx_buf_info *rxb = &sc->sge.rx_buf_info[sd->zidx]; 1878 int len, blen; 1879 1880 if (fl->flags & FL_BUF_PACKING) { 1881 u_int l, pad; 1882 1883 blen = rxb->size2 - fl->rx_offset; /* max possible in this buf */ 1884 len = min(remaining, blen); 1885 1886 l = fr_offset + len; 1887 pad = roundup2(l, fl->buf_boundary) - l; 1888 if (fl->rx_offset + len + pad < rxb->size2) 1889 blen = len + pad; 1890 fl->rx_offset += blen; 1891 MPASS(fl->rx_offset <= rxb->size2); 1892 if (fl->rx_offset < rxb->size2) 1893 return (len); /* without advancing the cidx */ 1894 } else { 1895 MPASS(fl->rx_offset == 0); /* not packing */ 1896 blen = rxb->size1; 1897 len = min(remaining, blen); 1898 } 1899 move_to_next_rxbuf(fl); 1900 return (len); 1901 } 1902 1903 static inline void 1904 skip_fl_payload(struct adapter *sc, struct sge_fl *fl, int plen) 1905 { 1906 int remaining, fr_offset, len; 1907 1908 fr_offset = 0; 1909 remaining = plen; 1910 while (remaining > 0) { 1911 len = skip_scatter_segment(sc, fl, fr_offset, remaining); 1912 fr_offset += len; 1913 remaining -= len; 1914 } 1915 } 1916 1917 static inline int 1918 get_segment_len(struct adapter *sc, struct sge_fl *fl, int plen) 1919 { 1920 int len; 1921 struct fl_sdesc *sd = &fl->sdesc[fl->cidx]; 1922 struct rx_buf_info *rxb = &sc->sge.rx_buf_info[sd->zidx]; 1923 1924 if (fl->flags & FL_BUF_PACKING) 1925 len = rxb->size2 - fl->rx_offset; 1926 else 1927 len = rxb->size1; 1928 1929 return (min(plen, len)); 1930 } 1931 1932 static int 1933 eth_rx(struct adapter *sc, struct sge_rxq *rxq, const struct iq_desc *d, 1934 u_int plen) 1935 { 1936 struct mbuf *m0; 1937 if_t ifp = rxq->ifp; 1938 struct sge_fl *fl = &rxq->fl; 1939 struct vi_info *vi = if_getsoftc(ifp); 1940 const struct cpl_rx_pkt *cpl; 1941 #if defined(INET) || defined(INET6) 1942 struct lro_ctrl *lro = &rxq->lro; 1943 #endif 1944 uint16_t err_vec, tnl_type, tnlhdr_len; 1945 static const int sw_hashtype[4][2] = { 1946 {M_HASHTYPE_NONE, M_HASHTYPE_NONE}, 1947 {M_HASHTYPE_RSS_IPV4, M_HASHTYPE_RSS_IPV6}, 1948 {M_HASHTYPE_RSS_TCP_IPV4, M_HASHTYPE_RSS_TCP_IPV6}, 1949 {M_HASHTYPE_RSS_UDP_IPV4, M_HASHTYPE_RSS_UDP_IPV6}, 1950 }; 1951 static const int sw_csum_flags[2][2] = { 1952 { 1953 /* IP, inner IP */ 1954 CSUM_ENCAP_VXLAN | 1955 CSUM_L3_CALC | CSUM_L3_VALID | 1956 CSUM_L4_CALC | CSUM_L4_VALID | 1957 CSUM_INNER_L3_CALC | CSUM_INNER_L3_VALID | 1958 CSUM_INNER_L4_CALC | CSUM_INNER_L4_VALID, 1959 1960 /* IP, inner IP6 */ 1961 CSUM_ENCAP_VXLAN | 1962 CSUM_L3_CALC | CSUM_L3_VALID | 1963 CSUM_L4_CALC | CSUM_L4_VALID | 1964 CSUM_INNER_L4_CALC | CSUM_INNER_L4_VALID, 1965 }, 1966 { 1967 /* IP6, inner IP */ 1968 CSUM_ENCAP_VXLAN | 1969 CSUM_L4_CALC | CSUM_L4_VALID | 1970 CSUM_INNER_L3_CALC | CSUM_INNER_L3_VALID | 1971 CSUM_INNER_L4_CALC | CSUM_INNER_L4_VALID, 1972 1973 /* IP6, inner IP6 */ 1974 CSUM_ENCAP_VXLAN | 1975 CSUM_L4_CALC | CSUM_L4_VALID | 1976 CSUM_INNER_L4_CALC | CSUM_INNER_L4_VALID, 1977 }, 1978 }; 1979 1980 MPASS(plen > sc->params.sge.fl_pktshift); 1981 if (vi->pfil != NULL && PFIL_HOOKED_IN(vi->pfil) && 1982 __predict_true((fl->flags & FL_BUF_RESUME) == 0)) { 1983 struct fl_sdesc *sd = &fl->sdesc[fl->cidx]; 1984 caddr_t frame; 1985 int rc, slen; 1986 1987 slen = get_segment_len(sc, fl, plen) - 1988 sc->params.sge.fl_pktshift; 1989 frame = sd->cl + fl->rx_offset + sc->params.sge.fl_pktshift; 1990 CURVNET_SET_QUIET(if_getvnet(ifp)); 1991 rc = pfil_mem_in(vi->pfil, frame, slen, ifp, &m0); 1992 CURVNET_RESTORE(); 1993 if (rc == PFIL_DROPPED || rc == PFIL_CONSUMED) { 1994 skip_fl_payload(sc, fl, plen); 1995 return (0); 1996 } 1997 if (rc == PFIL_REALLOCED) { 1998 skip_fl_payload(sc, fl, plen); 1999 goto have_mbuf; 2000 } 2001 } 2002 2003 m0 = get_fl_payload(sc, fl, plen); 2004 if (__predict_false(m0 == NULL)) 2005 return (ENOMEM); 2006 2007 m0->m_pkthdr.len -= sc->params.sge.fl_pktshift; 2008 m0->m_len -= sc->params.sge.fl_pktshift; 2009 m0->m_data += sc->params.sge.fl_pktshift; 2010 2011 have_mbuf: 2012 m0->m_pkthdr.rcvif = ifp; 2013 M_HASHTYPE_SET(m0, sw_hashtype[d->rss.hash_type][d->rss.ipv6]); 2014 m0->m_pkthdr.flowid = be32toh(d->rss.hash_val); 2015 2016 cpl = (const void *)(&d->rss + 1); 2017 if (sc->params.tp.rx_pkt_encap) { 2018 const uint16_t ev = be16toh(cpl->err_vec); 2019 2020 err_vec = G_T6_COMPR_RXERR_VEC(ev); 2021 tnl_type = G_T6_RX_TNL_TYPE(ev); 2022 tnlhdr_len = G_T6_RX_TNLHDR_LEN(ev); 2023 } else { 2024 err_vec = be16toh(cpl->err_vec); 2025 tnl_type = 0; 2026 tnlhdr_len = 0; 2027 } 2028 if (cpl->csum_calc && err_vec == 0) { 2029 int ipv6 = !!(cpl->l2info & htobe32(F_RXF_IP6)); 2030 2031 /* checksum(s) calculated and found to be correct. */ 2032 2033 MPASS((cpl->l2info & htobe32(F_RXF_IP)) ^ 2034 (cpl->l2info & htobe32(F_RXF_IP6))); 2035 m0->m_pkthdr.csum_data = be16toh(cpl->csum); 2036 if (tnl_type == 0) { 2037 if (!ipv6 && if_getcapenable(ifp) & IFCAP_RXCSUM) { 2038 m0->m_pkthdr.csum_flags = CSUM_L3_CALC | 2039 CSUM_L3_VALID | CSUM_L4_CALC | 2040 CSUM_L4_VALID; 2041 } else if (ipv6 && if_getcapenable(ifp) & IFCAP_RXCSUM_IPV6) { 2042 m0->m_pkthdr.csum_flags = CSUM_L4_CALC | 2043 CSUM_L4_VALID; 2044 } 2045 rxq->rxcsum++; 2046 } else { 2047 MPASS(tnl_type == RX_PKT_TNL_TYPE_VXLAN); 2048 2049 M_HASHTYPE_SETINNER(m0); 2050 if (__predict_false(cpl->ip_frag)) { 2051 /* 2052 * csum_data is for the inner frame (which is an 2053 * IP fragment) and is not 0xffff. There is no 2054 * way to pass the inner csum_data to the stack. 2055 * We don't want the stack to use the inner 2056 * csum_data to validate the outer frame or it 2057 * will get rejected. So we fix csum_data here 2058 * and let sw do the checksum of inner IP 2059 * fragments. 2060 * 2061 * XXX: Need 32b for csum_data2 in an rx mbuf. 2062 * Maybe stuff it into rcv_tstmp? 2063 */ 2064 m0->m_pkthdr.csum_data = 0xffff; 2065 if (ipv6) { 2066 m0->m_pkthdr.csum_flags = CSUM_L4_CALC | 2067 CSUM_L4_VALID; 2068 } else { 2069 m0->m_pkthdr.csum_flags = CSUM_L3_CALC | 2070 CSUM_L3_VALID | CSUM_L4_CALC | 2071 CSUM_L4_VALID; 2072 } 2073 } else { 2074 int outer_ipv6; 2075 2076 MPASS(m0->m_pkthdr.csum_data == 0xffff); 2077 2078 outer_ipv6 = tnlhdr_len >= 2079 sizeof(struct ether_header) + 2080 sizeof(struct ip6_hdr); 2081 m0->m_pkthdr.csum_flags = 2082 sw_csum_flags[outer_ipv6][ipv6]; 2083 } 2084 rxq->vxlan_rxcsum++; 2085 } 2086 } 2087 2088 if (cpl->vlan_ex) { 2089 m0->m_pkthdr.ether_vtag = be16toh(cpl->vlan); 2090 m0->m_flags |= M_VLANTAG; 2091 rxq->vlan_extraction++; 2092 } 2093 2094 if (rxq->iq.flags & IQ_RX_TIMESTAMP) { 2095 /* 2096 * Fill up rcv_tstmp but do not set M_TSTMP as 2097 * long as we get a non-zero back from t4_tstmp_to_ns(). 2098 */ 2099 m0->m_pkthdr.rcv_tstmp = t4_tstmp_to_ns(sc, 2100 be64toh(d->rsp.u.last_flit)); 2101 if (m0->m_pkthdr.rcv_tstmp != 0) 2102 m0->m_flags |= M_TSTMP; 2103 } 2104 2105 #ifdef NUMA 2106 m0->m_pkthdr.numa_domain = if_getnumadomain(ifp); 2107 #endif 2108 #if defined(INET) || defined(INET6) 2109 if (rxq->iq.flags & IQ_LRO_ENABLED && tnl_type == 0 && 2110 (M_HASHTYPE_GET(m0) == M_HASHTYPE_RSS_TCP_IPV4 || 2111 M_HASHTYPE_GET(m0) == M_HASHTYPE_RSS_TCP_IPV6)) { 2112 if (sort_before_lro(lro)) { 2113 tcp_lro_queue_mbuf(lro, m0); 2114 return (0); /* queued for sort, then LRO */ 2115 } 2116 if (tcp_lro_rx(lro, m0, 0) == 0) 2117 return (0); /* queued for LRO */ 2118 } 2119 #endif 2120 if_input(ifp, m0); 2121 2122 return (0); 2123 } 2124 2125 /* 2126 * Must drain the wrq or make sure that someone else will. 2127 */ 2128 static void 2129 wrq_tx_drain(void *arg, int n) 2130 { 2131 struct sge_wrq *wrq = arg; 2132 struct sge_eq *eq = &wrq->eq; 2133 2134 EQ_LOCK(eq); 2135 if (TAILQ_EMPTY(&wrq->incomplete_wrs) && !STAILQ_EMPTY(&wrq->wr_list)) 2136 drain_wrq_wr_list(wrq->adapter, wrq); 2137 EQ_UNLOCK(eq); 2138 } 2139 2140 static void 2141 drain_wrq_wr_list(struct adapter *sc, struct sge_wrq *wrq) 2142 { 2143 struct sge_eq *eq = &wrq->eq; 2144 u_int available, dbdiff; /* # of hardware descriptors */ 2145 u_int n; 2146 struct wrqe *wr; 2147 struct fw_eth_tx_pkt_wr *dst; /* any fw WR struct will do */ 2148 2149 EQ_LOCK_ASSERT_OWNED(eq); 2150 MPASS(TAILQ_EMPTY(&wrq->incomplete_wrs)); 2151 wr = STAILQ_FIRST(&wrq->wr_list); 2152 MPASS(wr != NULL); /* Must be called with something useful to do */ 2153 MPASS(eq->pidx == eq->dbidx); 2154 dbdiff = 0; 2155 2156 do { 2157 eq->cidx = read_hw_cidx(eq); 2158 if (eq->pidx == eq->cidx) 2159 available = eq->sidx - 1; 2160 else 2161 available = IDXDIFF(eq->cidx, eq->pidx, eq->sidx) - 1; 2162 2163 MPASS(wr->wrq == wrq); 2164 n = howmany(wr->wr_len, EQ_ESIZE); 2165 if (available < n) 2166 break; 2167 2168 dst = (void *)&eq->desc[eq->pidx]; 2169 if (__predict_true(eq->sidx - eq->pidx > n)) { 2170 /* Won't wrap, won't end exactly at the status page. */ 2171 bcopy(&wr->wr[0], dst, wr->wr_len); 2172 eq->pidx += n; 2173 } else { 2174 int first_portion = (eq->sidx - eq->pidx) * EQ_ESIZE; 2175 2176 bcopy(&wr->wr[0], dst, first_portion); 2177 if (wr->wr_len > first_portion) { 2178 bcopy(&wr->wr[first_portion], &eq->desc[0], 2179 wr->wr_len - first_portion); 2180 } 2181 eq->pidx = n - (eq->sidx - eq->pidx); 2182 } 2183 wrq->tx_wrs_copied++; 2184 2185 if (available < eq->sidx / 4 && 2186 atomic_cmpset_int(&eq->equiq, 0, 1)) { 2187 /* 2188 * XXX: This is not 100% reliable with some 2189 * types of WRs. But this is a very unusual 2190 * situation for an ofld/ctrl queue anyway. 2191 */ 2192 dst->equiq_to_len16 |= htobe32(F_FW_WR_EQUIQ | 2193 F_FW_WR_EQUEQ); 2194 } 2195 2196 dbdiff += n; 2197 if (dbdiff >= 16) { 2198 ring_eq_db(sc, eq, dbdiff); 2199 dbdiff = 0; 2200 } 2201 2202 STAILQ_REMOVE_HEAD(&wrq->wr_list, link); 2203 free_wrqe(wr); 2204 MPASS(wrq->nwr_pending > 0); 2205 wrq->nwr_pending--; 2206 MPASS(wrq->ndesc_needed >= n); 2207 wrq->ndesc_needed -= n; 2208 } while ((wr = STAILQ_FIRST(&wrq->wr_list)) != NULL); 2209 2210 if (dbdiff) 2211 ring_eq_db(sc, eq, dbdiff); 2212 } 2213 2214 /* 2215 * Doesn't fail. Holds on to work requests it can't send right away. 2216 */ 2217 void 2218 t4_wrq_tx_locked(struct adapter *sc, struct sge_wrq *wrq, struct wrqe *wr) 2219 { 2220 #ifdef INVARIANTS 2221 struct sge_eq *eq = &wrq->eq; 2222 #endif 2223 2224 EQ_LOCK_ASSERT_OWNED(eq); 2225 MPASS(wr != NULL); 2226 MPASS(wr->wr_len > 0 && wr->wr_len <= SGE_MAX_WR_LEN); 2227 MPASS((wr->wr_len & 0x7) == 0); 2228 2229 STAILQ_INSERT_TAIL(&wrq->wr_list, wr, link); 2230 wrq->nwr_pending++; 2231 wrq->ndesc_needed += howmany(wr->wr_len, EQ_ESIZE); 2232 2233 if (!TAILQ_EMPTY(&wrq->incomplete_wrs)) 2234 return; /* commit_wrq_wr will drain wr_list as well. */ 2235 2236 drain_wrq_wr_list(sc, wrq); 2237 2238 /* Doorbell must have caught up to the pidx. */ 2239 MPASS(eq->pidx == eq->dbidx); 2240 } 2241 2242 void 2243 t4_update_fl_bufsize(if_t ifp) 2244 { 2245 struct vi_info *vi = if_getsoftc(ifp); 2246 struct adapter *sc = vi->adapter; 2247 struct sge_rxq *rxq; 2248 #ifdef TCP_OFFLOAD 2249 struct sge_ofld_rxq *ofld_rxq; 2250 #endif 2251 struct sge_fl *fl; 2252 int i, maxp; 2253 2254 maxp = max_rx_payload(sc, ifp, false); 2255 for_each_rxq(vi, i, rxq) { 2256 fl = &rxq->fl; 2257 2258 FL_LOCK(fl); 2259 fl->zidx = find_refill_source(sc, maxp, 2260 fl->flags & FL_BUF_PACKING); 2261 FL_UNLOCK(fl); 2262 } 2263 #ifdef TCP_OFFLOAD 2264 maxp = max_rx_payload(sc, ifp, true); 2265 for_each_ofld_rxq(vi, i, ofld_rxq) { 2266 fl = &ofld_rxq->fl; 2267 2268 FL_LOCK(fl); 2269 fl->zidx = find_refill_source(sc, maxp, 2270 fl->flags & FL_BUF_PACKING); 2271 FL_UNLOCK(fl); 2272 } 2273 #endif 2274 } 2275 2276 #ifdef RATELIMIT 2277 static inline int 2278 mbuf_eo_nsegs(struct mbuf *m) 2279 { 2280 2281 M_ASSERTPKTHDR(m); 2282 return (m->m_pkthdr.PH_loc.eight[1]); 2283 } 2284 2285 #if defined(INET) || defined(INET6) 2286 static inline void 2287 set_mbuf_eo_nsegs(struct mbuf *m, uint8_t nsegs) 2288 { 2289 2290 M_ASSERTPKTHDR(m); 2291 m->m_pkthdr.PH_loc.eight[1] = nsegs; 2292 } 2293 #endif 2294 2295 static inline int 2296 mbuf_eo_len16(struct mbuf *m) 2297 { 2298 int n; 2299 2300 M_ASSERTPKTHDR(m); 2301 n = m->m_pkthdr.PH_loc.eight[2]; 2302 MPASS(n > 0 && n <= SGE_MAX_WR_LEN / 16); 2303 2304 return (n); 2305 } 2306 2307 #if defined(INET) || defined(INET6) 2308 static inline void 2309 set_mbuf_eo_len16(struct mbuf *m, uint8_t len16) 2310 { 2311 2312 M_ASSERTPKTHDR(m); 2313 m->m_pkthdr.PH_loc.eight[2] = len16; 2314 } 2315 #endif 2316 2317 static inline int 2318 mbuf_eo_tsclk_tsoff(struct mbuf *m) 2319 { 2320 2321 M_ASSERTPKTHDR(m); 2322 return (m->m_pkthdr.PH_loc.eight[3]); 2323 } 2324 2325 #if defined(INET) || defined(INET6) 2326 static inline void 2327 set_mbuf_eo_tsclk_tsoff(struct mbuf *m, uint8_t tsclk_tsoff) 2328 { 2329 2330 M_ASSERTPKTHDR(m); 2331 m->m_pkthdr.PH_loc.eight[3] = tsclk_tsoff; 2332 } 2333 #endif 2334 2335 static inline int 2336 needs_eo(struct m_snd_tag *mst) 2337 { 2338 2339 return (mst != NULL && mst->sw->type == IF_SND_TAG_TYPE_RATE_LIMIT); 2340 } 2341 #endif 2342 2343 /* 2344 * Try to allocate an mbuf to contain a raw work request. To make it 2345 * easy to construct the work request, don't allocate a chain but a 2346 * single mbuf. 2347 */ 2348 struct mbuf * 2349 alloc_wr_mbuf(int len, int how) 2350 { 2351 struct mbuf *m; 2352 2353 if (len <= MHLEN) 2354 m = m_gethdr(how, MT_DATA); 2355 else if (len <= MCLBYTES) 2356 m = m_getcl(how, MT_DATA, M_PKTHDR); 2357 else 2358 m = NULL; 2359 if (m == NULL) 2360 return (NULL); 2361 m->m_pkthdr.len = len; 2362 m->m_len = len; 2363 set_mbuf_cflags(m, MC_RAW_WR); 2364 set_mbuf_len16(m, howmany(len, 16)); 2365 return (m); 2366 } 2367 2368 static inline bool 2369 needs_hwcsum(struct mbuf *m) 2370 { 2371 const uint32_t csum_flags = CSUM_IP | CSUM_IP_UDP | CSUM_IP_TCP | 2372 CSUM_IP_TSO | CSUM_INNER_IP | CSUM_INNER_IP_UDP | 2373 CSUM_INNER_IP_TCP | CSUM_INNER_IP_TSO | CSUM_IP6_UDP | 2374 CSUM_IP6_TCP | CSUM_IP6_TSO | CSUM_INNER_IP6_UDP | 2375 CSUM_INNER_IP6_TCP | CSUM_INNER_IP6_TSO; 2376 2377 M_ASSERTPKTHDR(m); 2378 2379 return (m->m_pkthdr.csum_flags & csum_flags); 2380 } 2381 2382 static inline bool 2383 needs_tso(struct mbuf *m) 2384 { 2385 const uint32_t csum_flags = CSUM_IP_TSO | CSUM_IP6_TSO | 2386 CSUM_INNER_IP_TSO | CSUM_INNER_IP6_TSO; 2387 2388 M_ASSERTPKTHDR(m); 2389 2390 return (m->m_pkthdr.csum_flags & csum_flags); 2391 } 2392 2393 static inline bool 2394 needs_vxlan_csum(struct mbuf *m) 2395 { 2396 2397 M_ASSERTPKTHDR(m); 2398 2399 return (m->m_pkthdr.csum_flags & CSUM_ENCAP_VXLAN); 2400 } 2401 2402 static inline bool 2403 needs_vxlan_tso(struct mbuf *m) 2404 { 2405 const uint32_t csum_flags = CSUM_ENCAP_VXLAN | CSUM_INNER_IP_TSO | 2406 CSUM_INNER_IP6_TSO; 2407 2408 M_ASSERTPKTHDR(m); 2409 2410 return ((m->m_pkthdr.csum_flags & csum_flags) != 0 && 2411 (m->m_pkthdr.csum_flags & csum_flags) != CSUM_ENCAP_VXLAN); 2412 } 2413 2414 #if defined(INET) || defined(INET6) 2415 static inline bool 2416 needs_inner_tcp_csum(struct mbuf *m) 2417 { 2418 const uint32_t csum_flags = CSUM_INNER_IP_TSO | CSUM_INNER_IP6_TSO; 2419 2420 M_ASSERTPKTHDR(m); 2421 2422 return (m->m_pkthdr.csum_flags & csum_flags); 2423 } 2424 #endif 2425 2426 static inline bool 2427 needs_l3_csum(struct mbuf *m) 2428 { 2429 const uint32_t csum_flags = CSUM_IP | CSUM_IP_TSO | CSUM_INNER_IP | 2430 CSUM_INNER_IP_TSO; 2431 2432 M_ASSERTPKTHDR(m); 2433 2434 return (m->m_pkthdr.csum_flags & csum_flags); 2435 } 2436 2437 static inline bool 2438 needs_outer_tcp_csum(struct mbuf *m) 2439 { 2440 const uint32_t csum_flags = CSUM_IP_TCP | CSUM_IP_TSO | CSUM_IP6_TCP | 2441 CSUM_IP6_TSO; 2442 2443 M_ASSERTPKTHDR(m); 2444 2445 return (m->m_pkthdr.csum_flags & csum_flags); 2446 } 2447 2448 #ifdef RATELIMIT 2449 static inline bool 2450 needs_outer_l4_csum(struct mbuf *m) 2451 { 2452 const uint32_t csum_flags = CSUM_IP_UDP | CSUM_IP_TCP | CSUM_IP_TSO | 2453 CSUM_IP6_UDP | CSUM_IP6_TCP | CSUM_IP6_TSO; 2454 2455 M_ASSERTPKTHDR(m); 2456 2457 return (m->m_pkthdr.csum_flags & csum_flags); 2458 } 2459 2460 static inline bool 2461 needs_outer_udp_csum(struct mbuf *m) 2462 { 2463 const uint32_t csum_flags = CSUM_IP_UDP | CSUM_IP6_UDP; 2464 2465 M_ASSERTPKTHDR(m); 2466 2467 return (m->m_pkthdr.csum_flags & csum_flags); 2468 } 2469 #endif 2470 2471 static inline bool 2472 needs_vlan_insertion(struct mbuf *m) 2473 { 2474 2475 M_ASSERTPKTHDR(m); 2476 2477 return (m->m_flags & M_VLANTAG); 2478 } 2479 2480 #if defined(INET) || defined(INET6) 2481 static void * 2482 m_advance(struct mbuf **pm, int *poffset, int len) 2483 { 2484 struct mbuf *m = *pm; 2485 int offset = *poffset; 2486 uintptr_t p = 0; 2487 2488 MPASS(len > 0); 2489 2490 for (;;) { 2491 if (offset + len < m->m_len) { 2492 offset += len; 2493 p = mtod(m, uintptr_t) + offset; 2494 break; 2495 } 2496 len -= m->m_len - offset; 2497 m = m->m_next; 2498 offset = 0; 2499 MPASS(m != NULL); 2500 } 2501 *poffset = offset; 2502 *pm = m; 2503 return ((void *)p); 2504 } 2505 #endif 2506 2507 static inline int 2508 count_mbuf_ext_pgs(struct mbuf *m, int skip, vm_paddr_t *nextaddr) 2509 { 2510 vm_paddr_t paddr; 2511 int i, len, off, pglen, pgoff, seglen, segoff; 2512 int nsegs = 0; 2513 2514 M_ASSERTEXTPG(m); 2515 off = mtod(m, vm_offset_t); 2516 len = m->m_len; 2517 off += skip; 2518 len -= skip; 2519 2520 if (m->m_epg_hdrlen != 0) { 2521 if (off >= m->m_epg_hdrlen) { 2522 off -= m->m_epg_hdrlen; 2523 } else { 2524 seglen = m->m_epg_hdrlen - off; 2525 segoff = off; 2526 seglen = min(seglen, len); 2527 off = 0; 2528 len -= seglen; 2529 paddr = pmap_kextract( 2530 (vm_offset_t)&m->m_epg_hdr[segoff]); 2531 if (*nextaddr != paddr) 2532 nsegs++; 2533 *nextaddr = paddr + seglen; 2534 } 2535 } 2536 pgoff = m->m_epg_1st_off; 2537 for (i = 0; i < m->m_epg_npgs && len > 0; i++) { 2538 pglen = m_epg_pagelen(m, i, pgoff); 2539 if (off >= pglen) { 2540 off -= pglen; 2541 pgoff = 0; 2542 continue; 2543 } 2544 seglen = pglen - off; 2545 segoff = pgoff + off; 2546 off = 0; 2547 seglen = min(seglen, len); 2548 len -= seglen; 2549 paddr = m->m_epg_pa[i] + segoff; 2550 if (*nextaddr != paddr) 2551 nsegs++; 2552 *nextaddr = paddr + seglen; 2553 pgoff = 0; 2554 }; 2555 if (len != 0) { 2556 seglen = min(len, m->m_epg_trllen - off); 2557 len -= seglen; 2558 paddr = pmap_kextract((vm_offset_t)&m->m_epg_trail[off]); 2559 if (*nextaddr != paddr) 2560 nsegs++; 2561 *nextaddr = paddr + seglen; 2562 } 2563 2564 return (nsegs); 2565 } 2566 2567 2568 /* 2569 * Can deal with empty mbufs in the chain that have m_len = 0, but the chain 2570 * must have at least one mbuf that's not empty. It is possible for this 2571 * routine to return 0 if skip accounts for all the contents of the mbuf chain. 2572 */ 2573 static inline int 2574 count_mbuf_nsegs(struct mbuf *m, int skip, uint8_t *cflags) 2575 { 2576 vm_paddr_t nextaddr, paddr; 2577 vm_offset_t va; 2578 int len, nsegs; 2579 2580 M_ASSERTPKTHDR(m); 2581 MPASS(m->m_pkthdr.len > 0); 2582 MPASS(m->m_pkthdr.len >= skip); 2583 2584 nsegs = 0; 2585 nextaddr = 0; 2586 for (; m; m = m->m_next) { 2587 len = m->m_len; 2588 if (__predict_false(len == 0)) 2589 continue; 2590 if (skip >= len) { 2591 skip -= len; 2592 continue; 2593 } 2594 if ((m->m_flags & M_EXTPG) != 0) { 2595 *cflags |= MC_NOMAP; 2596 nsegs += count_mbuf_ext_pgs(m, skip, &nextaddr); 2597 skip = 0; 2598 continue; 2599 } 2600 va = mtod(m, vm_offset_t) + skip; 2601 len -= skip; 2602 skip = 0; 2603 paddr = pmap_kextract(va); 2604 nsegs += sglist_count((void *)(uintptr_t)va, len); 2605 if (paddr == nextaddr) 2606 nsegs--; 2607 nextaddr = pmap_kextract(va + len - 1) + 1; 2608 } 2609 2610 return (nsegs); 2611 } 2612 2613 /* 2614 * The maximum number of segments that can fit in a WR. 2615 */ 2616 static int 2617 max_nsegs_allowed(struct mbuf *m, bool vm_wr) 2618 { 2619 2620 if (vm_wr) { 2621 if (needs_tso(m)) 2622 return (TX_SGL_SEGS_VM_TSO); 2623 return (TX_SGL_SEGS_VM); 2624 } 2625 2626 if (needs_tso(m)) { 2627 if (needs_vxlan_tso(m)) 2628 return (TX_SGL_SEGS_VXLAN_TSO); 2629 else 2630 return (TX_SGL_SEGS_TSO); 2631 } 2632 2633 return (TX_SGL_SEGS); 2634 } 2635 2636 static struct timeval txerr_ratecheck = {0}; 2637 static const struct timeval txerr_interval = {3, 0}; 2638 2639 /* 2640 * Analyze the mbuf to determine its tx needs. The mbuf passed in may change: 2641 * a) caller can assume it's been freed if this function returns with an error. 2642 * b) it may get defragged up if the gather list is too long for the hardware. 2643 */ 2644 int 2645 parse_pkt(struct mbuf **mp, bool vm_wr) 2646 { 2647 struct mbuf *m0 = *mp, *m; 2648 int rc, nsegs, defragged = 0; 2649 struct ether_header *eh; 2650 #ifdef INET 2651 void *l3hdr; 2652 #endif 2653 #if defined(INET) || defined(INET6) 2654 int offset; 2655 struct tcphdr *tcp; 2656 #endif 2657 #if defined(KERN_TLS) || defined(RATELIMIT) 2658 struct m_snd_tag *mst; 2659 #endif 2660 uint16_t eh_type; 2661 uint8_t cflags; 2662 2663 cflags = 0; 2664 M_ASSERTPKTHDR(m0); 2665 if (__predict_false(m0->m_pkthdr.len < ETHER_HDR_LEN)) { 2666 rc = EINVAL; 2667 fail: 2668 m_freem(m0); 2669 *mp = NULL; 2670 return (rc); 2671 } 2672 restart: 2673 /* 2674 * First count the number of gather list segments in the payload. 2675 * Defrag the mbuf if nsegs exceeds the hardware limit. 2676 */ 2677 M_ASSERTPKTHDR(m0); 2678 MPASS(m0->m_pkthdr.len > 0); 2679 nsegs = count_mbuf_nsegs(m0, 0, &cflags); 2680 #if defined(KERN_TLS) || defined(RATELIMIT) 2681 if (m0->m_pkthdr.csum_flags & CSUM_SND_TAG) 2682 mst = m0->m_pkthdr.snd_tag; 2683 else 2684 mst = NULL; 2685 #endif 2686 #ifdef KERN_TLS 2687 if (mst != NULL && mst->sw->type == IF_SND_TAG_TYPE_TLS) { 2688 cflags |= MC_TLS; 2689 set_mbuf_cflags(m0, cflags); 2690 rc = t6_ktls_parse_pkt(m0); 2691 if (rc != 0) 2692 goto fail; 2693 return (EINPROGRESS); 2694 } 2695 #endif 2696 if (nsegs > max_nsegs_allowed(m0, vm_wr)) { 2697 if (defragged++ > 0) { 2698 rc = EFBIG; 2699 goto fail; 2700 } 2701 counter_u64_add(defrags, 1); 2702 if ((m = m_defrag(m0, M_NOWAIT)) == NULL) { 2703 rc = ENOMEM; 2704 goto fail; 2705 } 2706 *mp = m0 = m; /* update caller's copy after defrag */ 2707 goto restart; 2708 } 2709 2710 if (__predict_false(nsegs > 2 && m0->m_pkthdr.len <= MHLEN && 2711 !(cflags & MC_NOMAP))) { 2712 counter_u64_add(pullups, 1); 2713 m0 = m_pullup(m0, m0->m_pkthdr.len); 2714 if (m0 == NULL) { 2715 /* Should have left well enough alone. */ 2716 rc = EFBIG; 2717 goto fail; 2718 } 2719 *mp = m0; /* update caller's copy after pullup */ 2720 goto restart; 2721 } 2722 set_mbuf_nsegs(m0, nsegs); 2723 set_mbuf_cflags(m0, cflags); 2724 calculate_mbuf_len16(m0, vm_wr); 2725 2726 #ifdef RATELIMIT 2727 /* 2728 * Ethofld is limited to TCP and UDP for now, and only when L4 hw 2729 * checksumming is enabled. needs_outer_l4_csum happens to check for 2730 * all the right things. 2731 */ 2732 if (__predict_false(needs_eo(mst) && !needs_outer_l4_csum(m0))) { 2733 m_snd_tag_rele(m0->m_pkthdr.snd_tag); 2734 m0->m_pkthdr.snd_tag = NULL; 2735 m0->m_pkthdr.csum_flags &= ~CSUM_SND_TAG; 2736 mst = NULL; 2737 } 2738 #endif 2739 2740 if (!needs_hwcsum(m0) 2741 #ifdef RATELIMIT 2742 && !needs_eo(mst) 2743 #endif 2744 ) 2745 return (0); 2746 2747 m = m0; 2748 eh = mtod(m, struct ether_header *); 2749 eh_type = ntohs(eh->ether_type); 2750 if (eh_type == ETHERTYPE_VLAN) { 2751 struct ether_vlan_header *evh = (void *)eh; 2752 2753 eh_type = ntohs(evh->evl_proto); 2754 m0->m_pkthdr.l2hlen = sizeof(*evh); 2755 } else 2756 m0->m_pkthdr.l2hlen = sizeof(*eh); 2757 2758 #if defined(INET) || defined(INET6) 2759 offset = 0; 2760 #ifdef INET 2761 l3hdr = m_advance(&m, &offset, m0->m_pkthdr.l2hlen); 2762 #else 2763 m_advance(&m, &offset, m0->m_pkthdr.l2hlen); 2764 #endif 2765 #endif 2766 2767 switch (eh_type) { 2768 #ifdef INET6 2769 case ETHERTYPE_IPV6: 2770 m0->m_pkthdr.l3hlen = sizeof(struct ip6_hdr); 2771 break; 2772 #endif 2773 #ifdef INET 2774 case ETHERTYPE_IP: 2775 { 2776 struct ip *ip = l3hdr; 2777 2778 if (needs_vxlan_csum(m0)) { 2779 /* Driver will do the outer IP hdr checksum. */ 2780 ip->ip_sum = 0; 2781 if (needs_vxlan_tso(m0)) { 2782 const uint16_t ipl = ip->ip_len; 2783 2784 ip->ip_len = 0; 2785 ip->ip_sum = ~in_cksum_hdr(ip); 2786 ip->ip_len = ipl; 2787 } else 2788 ip->ip_sum = in_cksum_hdr(ip); 2789 } 2790 m0->m_pkthdr.l3hlen = ip->ip_hl << 2; 2791 break; 2792 } 2793 #endif 2794 default: 2795 if (ratecheck(&txerr_ratecheck, &txerr_interval)) { 2796 log(LOG_ERR, "%s: ethertype 0x%04x unknown. " 2797 "if_cxgbe must be compiled with the same " 2798 "INET/INET6 options as the kernel.\n", __func__, 2799 eh_type); 2800 } 2801 rc = EINVAL; 2802 goto fail; 2803 } 2804 2805 #if defined(INET) || defined(INET6) 2806 if (needs_vxlan_csum(m0)) { 2807 m0->m_pkthdr.l4hlen = sizeof(struct udphdr); 2808 m0->m_pkthdr.l5hlen = sizeof(struct vxlan_header); 2809 2810 /* Inner headers. */ 2811 eh = m_advance(&m, &offset, m0->m_pkthdr.l3hlen + 2812 sizeof(struct udphdr) + sizeof(struct vxlan_header)); 2813 eh_type = ntohs(eh->ether_type); 2814 if (eh_type == ETHERTYPE_VLAN) { 2815 struct ether_vlan_header *evh = (void *)eh; 2816 2817 eh_type = ntohs(evh->evl_proto); 2818 m0->m_pkthdr.inner_l2hlen = sizeof(*evh); 2819 } else 2820 m0->m_pkthdr.inner_l2hlen = sizeof(*eh); 2821 #ifdef INET 2822 l3hdr = m_advance(&m, &offset, m0->m_pkthdr.inner_l2hlen); 2823 #else 2824 m_advance(&m, &offset, m0->m_pkthdr.inner_l2hlen); 2825 #endif 2826 2827 switch (eh_type) { 2828 #ifdef INET6 2829 case ETHERTYPE_IPV6: 2830 m0->m_pkthdr.inner_l3hlen = sizeof(struct ip6_hdr); 2831 break; 2832 #endif 2833 #ifdef INET 2834 case ETHERTYPE_IP: 2835 { 2836 struct ip *ip = l3hdr; 2837 2838 m0->m_pkthdr.inner_l3hlen = ip->ip_hl << 2; 2839 break; 2840 } 2841 #endif 2842 default: 2843 if (ratecheck(&txerr_ratecheck, &txerr_interval)) { 2844 log(LOG_ERR, "%s: VXLAN hw offload requested" 2845 "with unknown ethertype 0x%04x. if_cxgbe " 2846 "must be compiled with the same INET/INET6 " 2847 "options as the kernel.\n", __func__, 2848 eh_type); 2849 } 2850 rc = EINVAL; 2851 goto fail; 2852 } 2853 if (needs_inner_tcp_csum(m0)) { 2854 tcp = m_advance(&m, &offset, m0->m_pkthdr.inner_l3hlen); 2855 m0->m_pkthdr.inner_l4hlen = tcp->th_off * 4; 2856 } 2857 MPASS((m0->m_pkthdr.csum_flags & CSUM_SND_TAG) == 0); 2858 m0->m_pkthdr.csum_flags &= CSUM_INNER_IP6_UDP | 2859 CSUM_INNER_IP6_TCP | CSUM_INNER_IP6_TSO | CSUM_INNER_IP | 2860 CSUM_INNER_IP_UDP | CSUM_INNER_IP_TCP | CSUM_INNER_IP_TSO | 2861 CSUM_ENCAP_VXLAN; 2862 } 2863 2864 if (needs_outer_tcp_csum(m0)) { 2865 tcp = m_advance(&m, &offset, m0->m_pkthdr.l3hlen); 2866 m0->m_pkthdr.l4hlen = tcp->th_off * 4; 2867 #ifdef RATELIMIT 2868 if (tsclk >= 0 && *(uint32_t *)(tcp + 1) == ntohl(0x0101080a)) { 2869 set_mbuf_eo_tsclk_tsoff(m0, 2870 V_FW_ETH_TX_EO_WR_TSCLK(tsclk) | 2871 V_FW_ETH_TX_EO_WR_TSOFF(sizeof(*tcp) / 2 + 1)); 2872 } else 2873 set_mbuf_eo_tsclk_tsoff(m0, 0); 2874 } else if (needs_outer_udp_csum(m0)) { 2875 m0->m_pkthdr.l4hlen = sizeof(struct udphdr); 2876 #endif 2877 } 2878 #ifdef RATELIMIT 2879 if (needs_eo(mst)) { 2880 u_int immhdrs; 2881 2882 /* EO WRs have the headers in the WR and not the GL. */ 2883 immhdrs = m0->m_pkthdr.l2hlen + m0->m_pkthdr.l3hlen + 2884 m0->m_pkthdr.l4hlen; 2885 cflags = 0; 2886 nsegs = count_mbuf_nsegs(m0, immhdrs, &cflags); 2887 MPASS(cflags == mbuf_cflags(m0)); 2888 set_mbuf_eo_nsegs(m0, nsegs); 2889 set_mbuf_eo_len16(m0, 2890 txpkt_eo_len16(nsegs, immhdrs, needs_tso(m0))); 2891 rc = ethofld_transmit(mst->ifp, m0); 2892 if (rc != 0) 2893 goto fail; 2894 return (EINPROGRESS); 2895 } 2896 #endif 2897 #endif 2898 MPASS(m0 == *mp); 2899 return (0); 2900 } 2901 2902 void * 2903 start_wrq_wr(struct sge_wrq *wrq, int len16, struct wrq_cookie *cookie) 2904 { 2905 struct sge_eq *eq = &wrq->eq; 2906 struct adapter *sc = wrq->adapter; 2907 int ndesc, available; 2908 struct wrqe *wr; 2909 void *w; 2910 2911 MPASS(len16 > 0); 2912 ndesc = tx_len16_to_desc(len16); 2913 MPASS(ndesc > 0 && ndesc <= SGE_MAX_WR_NDESC); 2914 2915 EQ_LOCK(eq); 2916 2917 if (TAILQ_EMPTY(&wrq->incomplete_wrs) && !STAILQ_EMPTY(&wrq->wr_list)) 2918 drain_wrq_wr_list(sc, wrq); 2919 2920 if (!STAILQ_EMPTY(&wrq->wr_list)) { 2921 slowpath: 2922 EQ_UNLOCK(eq); 2923 wr = alloc_wrqe(len16 * 16, wrq); 2924 if (__predict_false(wr == NULL)) 2925 return (NULL); 2926 cookie->pidx = -1; 2927 cookie->ndesc = ndesc; 2928 return (&wr->wr); 2929 } 2930 2931 eq->cidx = read_hw_cidx(eq); 2932 if (eq->pidx == eq->cidx) 2933 available = eq->sidx - 1; 2934 else 2935 available = IDXDIFF(eq->cidx, eq->pidx, eq->sidx) - 1; 2936 if (available < ndesc) 2937 goto slowpath; 2938 2939 cookie->pidx = eq->pidx; 2940 cookie->ndesc = ndesc; 2941 TAILQ_INSERT_TAIL(&wrq->incomplete_wrs, cookie, link); 2942 2943 w = &eq->desc[eq->pidx]; 2944 IDXINCR(eq->pidx, ndesc, eq->sidx); 2945 if (__predict_false(cookie->pidx + ndesc > eq->sidx)) { 2946 w = &wrq->ss[0]; 2947 wrq->ss_pidx = cookie->pidx; 2948 wrq->ss_len = len16 * 16; 2949 } 2950 2951 EQ_UNLOCK(eq); 2952 2953 return (w); 2954 } 2955 2956 void 2957 commit_wrq_wr(struct sge_wrq *wrq, void *w, struct wrq_cookie *cookie) 2958 { 2959 struct sge_eq *eq = &wrq->eq; 2960 struct adapter *sc = wrq->adapter; 2961 int ndesc, pidx; 2962 struct wrq_cookie *prev, *next; 2963 2964 if (cookie->pidx == -1) { 2965 struct wrqe *wr = __containerof(w, struct wrqe, wr); 2966 2967 t4_wrq_tx(sc, wr); 2968 return; 2969 } 2970 2971 if (__predict_false(w == &wrq->ss[0])) { 2972 int n = (eq->sidx - wrq->ss_pidx) * EQ_ESIZE; 2973 2974 MPASS(wrq->ss_len > n); /* WR had better wrap around. */ 2975 bcopy(&wrq->ss[0], &eq->desc[wrq->ss_pidx], n); 2976 bcopy(&wrq->ss[n], &eq->desc[0], wrq->ss_len - n); 2977 wrq->tx_wrs_ss++; 2978 } else 2979 wrq->tx_wrs_direct++; 2980 2981 EQ_LOCK(eq); 2982 ndesc = cookie->ndesc; /* Can be more than SGE_MAX_WR_NDESC here. */ 2983 pidx = cookie->pidx; 2984 MPASS(pidx >= 0 && pidx < eq->sidx); 2985 prev = TAILQ_PREV(cookie, wrq_incomplete_wrs, link); 2986 next = TAILQ_NEXT(cookie, link); 2987 if (prev == NULL) { 2988 MPASS(pidx == eq->dbidx); 2989 if (next == NULL || ndesc >= 16) { 2990 int available; 2991 struct fw_eth_tx_pkt_wr *dst; /* any fw WR struct will do */ 2992 2993 /* 2994 * Note that the WR via which we'll request tx updates 2995 * is at pidx and not eq->pidx, which has moved on 2996 * already. 2997 */ 2998 dst = (void *)&eq->desc[pidx]; 2999 available = IDXDIFF(eq->cidx, eq->pidx, eq->sidx) - 1; 3000 if (available < eq->sidx / 4 && 3001 atomic_cmpset_int(&eq->equiq, 0, 1)) { 3002 /* 3003 * XXX: This is not 100% reliable with some 3004 * types of WRs. But this is a very unusual 3005 * situation for an ofld/ctrl queue anyway. 3006 */ 3007 dst->equiq_to_len16 |= htobe32(F_FW_WR_EQUIQ | 3008 F_FW_WR_EQUEQ); 3009 } 3010 3011 ring_eq_db(wrq->adapter, eq, ndesc); 3012 } else { 3013 MPASS(IDXDIFF(next->pidx, pidx, eq->sidx) == ndesc); 3014 next->pidx = pidx; 3015 next->ndesc += ndesc; 3016 } 3017 } else { 3018 MPASS(IDXDIFF(pidx, prev->pidx, eq->sidx) == prev->ndesc); 3019 prev->ndesc += ndesc; 3020 } 3021 TAILQ_REMOVE(&wrq->incomplete_wrs, cookie, link); 3022 3023 if (TAILQ_EMPTY(&wrq->incomplete_wrs) && !STAILQ_EMPTY(&wrq->wr_list)) 3024 drain_wrq_wr_list(sc, wrq); 3025 3026 #ifdef INVARIANTS 3027 if (TAILQ_EMPTY(&wrq->incomplete_wrs)) { 3028 /* Doorbell must have caught up to the pidx. */ 3029 MPASS(wrq->eq.pidx == wrq->eq.dbidx); 3030 } 3031 #endif 3032 EQ_UNLOCK(eq); 3033 } 3034 3035 static u_int 3036 can_resume_eth_tx(struct mp_ring *r) 3037 { 3038 struct sge_eq *eq = r->cookie; 3039 3040 return (total_available_tx_desc(eq) > eq->sidx / 8); 3041 } 3042 3043 static inline bool 3044 cannot_use_txpkts(struct mbuf *m) 3045 { 3046 /* maybe put a GL limit too, to avoid silliness? */ 3047 3048 return (needs_tso(m) || (mbuf_cflags(m) & (MC_RAW_WR | MC_TLS)) != 0); 3049 } 3050 3051 static inline int 3052 discard_tx(struct sge_eq *eq) 3053 { 3054 3055 return ((eq->flags & (EQ_ENABLED | EQ_QFLUSH)) != EQ_ENABLED); 3056 } 3057 3058 static inline int 3059 wr_can_update_eq(void *p) 3060 { 3061 struct fw_eth_tx_pkts_wr *wr = p; 3062 3063 switch (G_FW_WR_OP(be32toh(wr->op_pkd))) { 3064 case FW_ULPTX_WR: 3065 case FW_ETH_TX_PKT_WR: 3066 case FW_ETH_TX_PKTS_WR: 3067 case FW_ETH_TX_PKTS2_WR: 3068 case FW_ETH_TX_PKT_VM_WR: 3069 case FW_ETH_TX_PKTS_VM_WR: 3070 return (1); 3071 default: 3072 return (0); 3073 } 3074 } 3075 3076 static inline void 3077 set_txupdate_flags(struct sge_txq *txq, u_int avail, 3078 struct fw_eth_tx_pkt_wr *wr) 3079 { 3080 struct sge_eq *eq = &txq->eq; 3081 struct txpkts *txp = &txq->txp; 3082 3083 if ((txp->npkt > 0 || avail < eq->sidx / 2) && 3084 atomic_cmpset_int(&eq->equiq, 0, 1)) { 3085 wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ | F_FW_WR_EQUIQ); 3086 eq->equeqidx = eq->pidx; 3087 } else if (IDXDIFF(eq->pidx, eq->equeqidx, eq->sidx) >= 32) { 3088 wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ); 3089 eq->equeqidx = eq->pidx; 3090 } 3091 } 3092 3093 #if defined(__i386__) || defined(__amd64__) 3094 extern uint64_t tsc_freq; 3095 #endif 3096 3097 static inline bool 3098 record_eth_tx_time(struct sge_txq *txq) 3099 { 3100 const uint64_t cycles = get_cyclecount(); 3101 const uint64_t last_tx = txq->last_tx; 3102 #if defined(__i386__) || defined(__amd64__) 3103 const uint64_t itg = tsc_freq * t4_tx_coalesce_gap / 1000000; 3104 #else 3105 const uint64_t itg = 0; 3106 #endif 3107 3108 MPASS(cycles >= last_tx); 3109 txq->last_tx = cycles; 3110 return (cycles - last_tx < itg); 3111 } 3112 3113 /* 3114 * r->items[cidx] to r->items[pidx], with a wraparound at r->size, are ready to 3115 * be consumed. Return the actual number consumed. 0 indicates a stall. 3116 */ 3117 static u_int 3118 eth_tx(struct mp_ring *r, u_int cidx, u_int pidx, bool *coalescing) 3119 { 3120 struct sge_txq *txq = r->cookie; 3121 if_t ifp = txq->ifp; 3122 struct sge_eq *eq = &txq->eq; 3123 struct txpkts *txp = &txq->txp; 3124 struct vi_info *vi = if_getsoftc(ifp); 3125 struct adapter *sc = vi->adapter; 3126 u_int total, remaining; /* # of packets */ 3127 u_int n, avail, dbdiff; /* # of hardware descriptors */ 3128 int i, rc; 3129 struct mbuf *m0; 3130 bool snd, recent_tx; 3131 void *wr; /* start of the last WR written to the ring */ 3132 3133 TXQ_LOCK_ASSERT_OWNED(txq); 3134 recent_tx = record_eth_tx_time(txq); 3135 3136 remaining = IDXDIFF(pidx, cidx, r->size); 3137 if (__predict_false(discard_tx(eq))) { 3138 for (i = 0; i < txp->npkt; i++) 3139 m_freem(txp->mb[i]); 3140 txp->npkt = 0; 3141 while (cidx != pidx) { 3142 m0 = r->items[cidx]; 3143 m_freem(m0); 3144 if (++cidx == r->size) 3145 cidx = 0; 3146 } 3147 reclaim_tx_descs(txq, eq->sidx); 3148 *coalescing = false; 3149 return (remaining); /* emptied */ 3150 } 3151 3152 /* How many hardware descriptors do we have readily available. */ 3153 if (eq->pidx == eq->cidx) 3154 avail = eq->sidx - 1; 3155 else 3156 avail = IDXDIFF(eq->cidx, eq->pidx, eq->sidx) - 1; 3157 3158 total = 0; 3159 if (remaining == 0) { 3160 txp->score = 0; 3161 txq->txpkts_flush++; 3162 goto send_txpkts; 3163 } 3164 3165 dbdiff = 0; 3166 MPASS(remaining > 0); 3167 while (remaining > 0) { 3168 m0 = r->items[cidx]; 3169 M_ASSERTPKTHDR(m0); 3170 MPASS(m0->m_nextpkt == NULL); 3171 3172 if (avail < 2 * SGE_MAX_WR_NDESC) 3173 avail += reclaim_tx_descs(txq, 64); 3174 3175 if (t4_tx_coalesce == 0 && txp->npkt == 0) 3176 goto skip_coalescing; 3177 if (cannot_use_txpkts(m0)) 3178 txp->score = 0; 3179 else if (recent_tx) { 3180 if (++txp->score == 0) 3181 txp->score = UINT8_MAX; 3182 } else 3183 txp->score = 1; 3184 if (txp->npkt > 0 || remaining > 1 || 3185 txp->score >= t4_tx_coalesce_pkts || 3186 atomic_load_int(&txq->eq.equiq) != 0) { 3187 if (vi->flags & TX_USES_VM_WR) 3188 rc = add_to_txpkts_vf(sc, txq, m0, avail, &snd); 3189 else 3190 rc = add_to_txpkts_pf(sc, txq, m0, avail, &snd); 3191 } else { 3192 snd = false; 3193 rc = EINVAL; 3194 } 3195 if (snd) { 3196 MPASS(txp->npkt > 0); 3197 for (i = 0; i < txp->npkt; i++) 3198 ETHER_BPF_MTAP(ifp, txp->mb[i]); 3199 if (txp->npkt > 1) { 3200 MPASS(avail >= tx_len16_to_desc(txp->len16)); 3201 if (vi->flags & TX_USES_VM_WR) 3202 n = write_txpkts_vm_wr(sc, txq); 3203 else 3204 n = write_txpkts_wr(sc, txq); 3205 } else { 3206 MPASS(avail >= 3207 tx_len16_to_desc(mbuf_len16(txp->mb[0]))); 3208 if (vi->flags & TX_USES_VM_WR) 3209 n = write_txpkt_vm_wr(sc, txq, 3210 txp->mb[0]); 3211 else 3212 n = write_txpkt_wr(sc, txq, txp->mb[0], 3213 avail); 3214 } 3215 MPASS(n <= SGE_MAX_WR_NDESC); 3216 avail -= n; 3217 dbdiff += n; 3218 wr = &eq->desc[eq->pidx]; 3219 IDXINCR(eq->pidx, n, eq->sidx); 3220 txp->npkt = 0; /* emptied */ 3221 } 3222 if (rc == 0) { 3223 /* m0 was coalesced into txq->txpkts. */ 3224 goto next_mbuf; 3225 } 3226 if (rc == EAGAIN) { 3227 /* 3228 * m0 is suitable for tx coalescing but could not be 3229 * combined with the existing txq->txpkts, which has now 3230 * been transmitted. Start a new txpkts with m0. 3231 */ 3232 MPASS(snd); 3233 MPASS(txp->npkt == 0); 3234 continue; 3235 } 3236 3237 MPASS(rc != 0 && rc != EAGAIN); 3238 MPASS(txp->npkt == 0); 3239 skip_coalescing: 3240 n = tx_len16_to_desc(mbuf_len16(m0)); 3241 if (__predict_false(avail < n)) { 3242 avail += reclaim_tx_descs(txq, min(n, 32)); 3243 if (avail < n) 3244 break; /* out of descriptors */ 3245 } 3246 3247 wr = &eq->desc[eq->pidx]; 3248 if (mbuf_cflags(m0) & MC_RAW_WR) { 3249 n = write_raw_wr(txq, wr, m0, avail); 3250 #ifdef KERN_TLS 3251 } else if (mbuf_cflags(m0) & MC_TLS) { 3252 ETHER_BPF_MTAP(ifp, m0); 3253 n = t6_ktls_write_wr(txq, wr, m0, avail); 3254 #endif 3255 } else { 3256 ETHER_BPF_MTAP(ifp, m0); 3257 if (vi->flags & TX_USES_VM_WR) 3258 n = write_txpkt_vm_wr(sc, txq, m0); 3259 else 3260 n = write_txpkt_wr(sc, txq, m0, avail); 3261 } 3262 MPASS(n >= 1 && n <= avail); 3263 if (!(mbuf_cflags(m0) & MC_TLS)) 3264 MPASS(n <= SGE_MAX_WR_NDESC); 3265 3266 avail -= n; 3267 dbdiff += n; 3268 IDXINCR(eq->pidx, n, eq->sidx); 3269 3270 if (dbdiff >= 512 / EQ_ESIZE) { /* X_FETCHBURSTMAX_512B */ 3271 if (wr_can_update_eq(wr)) 3272 set_txupdate_flags(txq, avail, wr); 3273 ring_eq_db(sc, eq, dbdiff); 3274 avail += reclaim_tx_descs(txq, 32); 3275 dbdiff = 0; 3276 } 3277 next_mbuf: 3278 total++; 3279 remaining--; 3280 if (__predict_false(++cidx == r->size)) 3281 cidx = 0; 3282 } 3283 if (dbdiff != 0) { 3284 if (wr_can_update_eq(wr)) 3285 set_txupdate_flags(txq, avail, wr); 3286 ring_eq_db(sc, eq, dbdiff); 3287 reclaim_tx_descs(txq, 32); 3288 } else if (eq->pidx == eq->cidx && txp->npkt > 0 && 3289 atomic_load_int(&txq->eq.equiq) == 0) { 3290 /* 3291 * If nothing was submitted to the chip for tx (it was coalesced 3292 * into txpkts instead) and there is no tx update outstanding 3293 * then we need to send txpkts now. 3294 */ 3295 send_txpkts: 3296 MPASS(txp->npkt > 0); 3297 for (i = 0; i < txp->npkt; i++) 3298 ETHER_BPF_MTAP(ifp, txp->mb[i]); 3299 if (txp->npkt > 1) { 3300 MPASS(avail >= tx_len16_to_desc(txp->len16)); 3301 if (vi->flags & TX_USES_VM_WR) 3302 n = write_txpkts_vm_wr(sc, txq); 3303 else 3304 n = write_txpkts_wr(sc, txq); 3305 } else { 3306 MPASS(avail >= 3307 tx_len16_to_desc(mbuf_len16(txp->mb[0]))); 3308 if (vi->flags & TX_USES_VM_WR) 3309 n = write_txpkt_vm_wr(sc, txq, txp->mb[0]); 3310 else 3311 n = write_txpkt_wr(sc, txq, txp->mb[0], avail); 3312 } 3313 MPASS(n <= SGE_MAX_WR_NDESC); 3314 wr = &eq->desc[eq->pidx]; 3315 IDXINCR(eq->pidx, n, eq->sidx); 3316 txp->npkt = 0; /* emptied */ 3317 3318 MPASS(wr_can_update_eq(wr)); 3319 set_txupdate_flags(txq, avail - n, wr); 3320 ring_eq_db(sc, eq, n); 3321 reclaim_tx_descs(txq, 32); 3322 } 3323 *coalescing = txp->npkt > 0; 3324 3325 return (total); 3326 } 3327 3328 static inline void 3329 init_iq(struct sge_iq *iq, struct adapter *sc, int tmr_idx, int pktc_idx, 3330 int qsize, int intr_idx, int cong, int qtype) 3331 { 3332 3333 KASSERT(tmr_idx >= 0 && tmr_idx < SGE_NTIMERS, 3334 ("%s: bad tmr_idx %d", __func__, tmr_idx)); 3335 KASSERT(pktc_idx < SGE_NCOUNTERS, /* -ve is ok, means don't use */ 3336 ("%s: bad pktc_idx %d", __func__, pktc_idx)); 3337 KASSERT(intr_idx >= -1 && intr_idx < sc->intr_count, 3338 ("%s: bad intr_idx %d", __func__, intr_idx)); 3339 KASSERT(qtype == FW_IQ_IQTYPE_OTHER || qtype == FW_IQ_IQTYPE_NIC || 3340 qtype == FW_IQ_IQTYPE_OFLD, ("%s: bad qtype %d", __func__, qtype)); 3341 3342 iq->flags = 0; 3343 iq->state = IQS_DISABLED; 3344 iq->adapter = sc; 3345 iq->qtype = qtype; 3346 iq->intr_params = V_QINTR_TIMER_IDX(tmr_idx); 3347 iq->intr_pktc_idx = SGE_NCOUNTERS - 1; 3348 if (pktc_idx >= 0) { 3349 iq->intr_params |= F_QINTR_CNT_EN; 3350 iq->intr_pktc_idx = pktc_idx; 3351 } 3352 iq->qsize = roundup2(qsize, 16); /* See FW_IQ_CMD/iqsize */ 3353 iq->sidx = iq->qsize - sc->params.sge.spg_len / IQ_ESIZE; 3354 iq->intr_idx = intr_idx; 3355 iq->cong_drop = cong; 3356 } 3357 3358 static inline void 3359 init_fl(struct adapter *sc, struct sge_fl *fl, int qsize, int maxp, char *name) 3360 { 3361 struct sge_params *sp = &sc->params.sge; 3362 3363 fl->qsize = qsize; 3364 fl->sidx = qsize - sc->params.sge.spg_len / EQ_ESIZE; 3365 strlcpy(fl->lockname, name, sizeof(fl->lockname)); 3366 mtx_init(&fl->fl_lock, fl->lockname, NULL, MTX_DEF); 3367 if (sc->flags & BUF_PACKING_OK && 3368 ((!is_t4(sc) && buffer_packing) || /* T5+: enabled unless 0 */ 3369 (is_t4(sc) && buffer_packing == 1)))/* T4: disabled unless 1 */ 3370 fl->flags |= FL_BUF_PACKING; 3371 fl->zidx = find_refill_source(sc, maxp, fl->flags & FL_BUF_PACKING); 3372 fl->safe_zidx = sc->sge.safe_zidx; 3373 if (fl->flags & FL_BUF_PACKING) { 3374 fl->lowat = roundup2(sp->fl_starve_threshold2, 8); 3375 fl->buf_boundary = sp->pack_boundary; 3376 } else { 3377 fl->lowat = roundup2(sp->fl_starve_threshold, 8); 3378 fl->buf_boundary = 16; 3379 } 3380 if (fl_pad && fl->buf_boundary < sp->pad_boundary) 3381 fl->buf_boundary = sp->pad_boundary; 3382 } 3383 3384 static inline void 3385 init_eq(struct adapter *sc, struct sge_eq *eq, int eqtype, int qsize, 3386 uint8_t port_id, struct sge_iq *iq, char *name) 3387 { 3388 KASSERT(eqtype >= EQ_CTRL && eqtype <= EQ_OFLD, 3389 ("%s: bad qtype %d", __func__, eqtype)); 3390 3391 eq->type = eqtype; 3392 eq->port_id = port_id; 3393 eq->tx_chan = sc->port[port_id]->tx_chan; 3394 eq->iq = iq; 3395 eq->sidx = qsize - sc->params.sge.spg_len / EQ_ESIZE; 3396 strlcpy(eq->lockname, name, sizeof(eq->lockname)); 3397 mtx_init(&eq->eq_lock, eq->lockname, NULL, MTX_DEF); 3398 } 3399 3400 int 3401 alloc_ring(struct adapter *sc, size_t len, bus_dma_tag_t *tag, 3402 bus_dmamap_t *map, bus_addr_t *pa, void **va) 3403 { 3404 int rc; 3405 3406 rc = bus_dma_tag_create(sc->dmat, 512, 0, BUS_SPACE_MAXADDR, 3407 BUS_SPACE_MAXADDR, NULL, NULL, len, 1, len, 0, NULL, NULL, tag); 3408 if (rc != 0) { 3409 CH_ERR(sc, "cannot allocate DMA tag: %d\n", rc); 3410 goto done; 3411 } 3412 3413 rc = bus_dmamem_alloc(*tag, va, 3414 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, map); 3415 if (rc != 0) { 3416 CH_ERR(sc, "cannot allocate DMA memory: %d\n", rc); 3417 goto done; 3418 } 3419 3420 rc = bus_dmamap_load(*tag, *map, *va, len, oneseg_dma_callback, pa, 0); 3421 if (rc != 0) { 3422 CH_ERR(sc, "cannot load DMA map: %d\n", rc); 3423 goto done; 3424 } 3425 done: 3426 if (rc) 3427 free_ring(sc, *tag, *map, *pa, *va); 3428 3429 return (rc); 3430 } 3431 3432 int 3433 free_ring(struct adapter *sc, bus_dma_tag_t tag, bus_dmamap_t map, 3434 bus_addr_t pa, void *va) 3435 { 3436 if (pa) 3437 bus_dmamap_unload(tag, map); 3438 if (va) 3439 bus_dmamem_free(tag, va, map); 3440 if (tag) 3441 bus_dma_tag_destroy(tag); 3442 3443 return (0); 3444 } 3445 3446 /* 3447 * Allocates the software resources (mainly memory and sysctl nodes) for an 3448 * ingress queue and an optional freelist. 3449 * 3450 * Sets IQ_SW_ALLOCATED and returns 0 on success. 3451 */ 3452 static int 3453 alloc_iq_fl(struct vi_info *vi, struct sge_iq *iq, struct sge_fl *fl, 3454 struct sysctl_ctx_list *ctx, struct sysctl_oid *oid) 3455 { 3456 int rc; 3457 size_t len; 3458 struct adapter *sc = vi->adapter; 3459 3460 MPASS(!(iq->flags & IQ_SW_ALLOCATED)); 3461 3462 len = iq->qsize * IQ_ESIZE; 3463 rc = alloc_ring(sc, len, &iq->desc_tag, &iq->desc_map, &iq->ba, 3464 (void **)&iq->desc); 3465 if (rc != 0) 3466 return (rc); 3467 3468 if (fl) { 3469 len = fl->qsize * EQ_ESIZE; 3470 rc = alloc_ring(sc, len, &fl->desc_tag, &fl->desc_map, 3471 &fl->ba, (void **)&fl->desc); 3472 if (rc) { 3473 free_ring(sc, iq->desc_tag, iq->desc_map, iq->ba, 3474 iq->desc); 3475 return (rc); 3476 } 3477 3478 /* Allocate space for one software descriptor per buffer. */ 3479 fl->sdesc = malloc(fl->sidx * 8 * sizeof(struct fl_sdesc), 3480 M_CXGBE, M_ZERO | M_WAITOK); 3481 3482 add_fl_sysctls(sc, ctx, oid, fl); 3483 iq->flags |= IQ_HAS_FL; 3484 } 3485 add_iq_sysctls(ctx, oid, iq); 3486 iq->flags |= IQ_SW_ALLOCATED; 3487 3488 return (0); 3489 } 3490 3491 /* 3492 * Frees all software resources (memory and locks) associated with an ingress 3493 * queue and an optional freelist. 3494 */ 3495 static void 3496 free_iq_fl(struct adapter *sc, struct sge_iq *iq, struct sge_fl *fl) 3497 { 3498 MPASS(iq->flags & IQ_SW_ALLOCATED); 3499 3500 if (fl) { 3501 MPASS(iq->flags & IQ_HAS_FL); 3502 free_ring(sc, fl->desc_tag, fl->desc_map, fl->ba, fl->desc); 3503 free_fl_buffers(sc, fl); 3504 free(fl->sdesc, M_CXGBE); 3505 mtx_destroy(&fl->fl_lock); 3506 bzero(fl, sizeof(*fl)); 3507 } 3508 free_ring(sc, iq->desc_tag, iq->desc_map, iq->ba, iq->desc); 3509 bzero(iq, sizeof(*iq)); 3510 } 3511 3512 /* 3513 * Allocates a hardware ingress queue and an optional freelist that will be 3514 * associated with it. 3515 * 3516 * Returns errno on failure. Resources allocated up to that point may still be 3517 * allocated. Caller is responsible for cleanup in case this function fails. 3518 */ 3519 static int 3520 alloc_iq_fl_hwq(struct vi_info *vi, struct sge_iq *iq, struct sge_fl *fl) 3521 { 3522 int rc, cntxt_id, cong_map; 3523 struct fw_iq_cmd c; 3524 struct adapter *sc = vi->adapter; 3525 struct port_info *pi = vi->pi; 3526 __be32 v = 0; 3527 3528 MPASS (!(iq->flags & IQ_HW_ALLOCATED)); 3529 3530 bzero(&c, sizeof(c)); 3531 c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST | 3532 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(sc->pf) | 3533 V_FW_IQ_CMD_VFN(0)); 3534 3535 c.alloc_to_len16 = htobe32(F_FW_IQ_CMD_ALLOC | F_FW_IQ_CMD_IQSTART | 3536 FW_LEN16(c)); 3537 3538 /* Special handling for firmware event queue */ 3539 if (iq == &sc->sge.fwq) 3540 v |= F_FW_IQ_CMD_IQASYNCH; 3541 3542 if (iq->intr_idx < 0) { 3543 /* Forwarded interrupts, all headed to fwq */ 3544 v |= F_FW_IQ_CMD_IQANDST; 3545 v |= V_FW_IQ_CMD_IQANDSTINDEX(sc->sge.fwq.cntxt_id); 3546 } else { 3547 KASSERT(iq->intr_idx < sc->intr_count, 3548 ("%s: invalid direct intr_idx %d", __func__, iq->intr_idx)); 3549 v |= V_FW_IQ_CMD_IQANDSTINDEX(iq->intr_idx); 3550 } 3551 3552 bzero(iq->desc, iq->qsize * IQ_ESIZE); 3553 c.type_to_iqandstindex = htobe32(v | 3554 V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) | 3555 V_FW_IQ_CMD_VIID(vi->viid) | 3556 V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_INTERRUPT)); 3557 c.iqdroprss_to_iqesize = htobe16(V_FW_IQ_CMD_IQPCIECH(pi->tx_chan) | 3558 F_FW_IQ_CMD_IQGTSMODE | 3559 V_FW_IQ_CMD_IQINTCNTTHRESH(iq->intr_pktc_idx) | 3560 V_FW_IQ_CMD_IQESIZE(ilog2(IQ_ESIZE) - 4)); 3561 c.iqsize = htobe16(iq->qsize); 3562 c.iqaddr = htobe64(iq->ba); 3563 c.iqns_to_fl0congen = htobe32(V_FW_IQ_CMD_IQTYPE(iq->qtype)); 3564 if (iq->cong_drop != -1) { 3565 cong_map = iq->qtype == IQ_ETH ? pi->rx_e_chan_map : 0; 3566 c.iqns_to_fl0congen |= htobe32(F_FW_IQ_CMD_IQFLINTCONGEN); 3567 } 3568 3569 if (fl) { 3570 bzero(fl->desc, fl->sidx * EQ_ESIZE + sc->params.sge.spg_len); 3571 c.iqns_to_fl0congen |= 3572 htobe32(V_FW_IQ_CMD_FL0HOSTFCMODE(X_HOSTFCMODE_NONE) | 3573 F_FW_IQ_CMD_FL0FETCHRO | F_FW_IQ_CMD_FL0DATARO | 3574 (fl_pad ? F_FW_IQ_CMD_FL0PADEN : 0) | 3575 (fl->flags & FL_BUF_PACKING ? F_FW_IQ_CMD_FL0PACKEN : 3576 0)); 3577 if (iq->cong_drop != -1) { 3578 c.iqns_to_fl0congen |= 3579 htobe32(V_FW_IQ_CMD_FL0CNGCHMAP(cong_map) | 3580 F_FW_IQ_CMD_FL0CONGCIF | 3581 F_FW_IQ_CMD_FL0CONGEN); 3582 } 3583 c.fl0dcaen_to_fl0cidxfthresh = 3584 htobe16(V_FW_IQ_CMD_FL0FBMIN(chip_id(sc) <= CHELSIO_T5 ? 3585 X_FETCHBURSTMIN_128B : X_FETCHBURSTMIN_64B_T6) | 3586 V_FW_IQ_CMD_FL0FBMAX(chip_id(sc) <= CHELSIO_T5 ? 3587 X_FETCHBURSTMAX_512B : X_FETCHBURSTMAX_256B)); 3588 c.fl0size = htobe16(fl->qsize); 3589 c.fl0addr = htobe64(fl->ba); 3590 } 3591 3592 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); 3593 if (rc != 0) { 3594 CH_ERR(sc, "failed to create hw ingress queue: %d\n", rc); 3595 return (rc); 3596 } 3597 3598 iq->cidx = 0; 3599 iq->gen = F_RSPD_GEN; 3600 iq->cntxt_id = be16toh(c.iqid); 3601 iq->abs_id = be16toh(c.physiqid); 3602 3603 cntxt_id = iq->cntxt_id - sc->sge.iq_start; 3604 if (cntxt_id >= sc->sge.iqmap_sz) { 3605 panic ("%s: iq->cntxt_id (%d) more than the max (%d)", __func__, 3606 cntxt_id, sc->sge.iqmap_sz - 1); 3607 } 3608 sc->sge.iqmap[cntxt_id] = iq; 3609 3610 if (fl) { 3611 u_int qid; 3612 #ifdef INVARIANTS 3613 int i; 3614 3615 MPASS(!(fl->flags & FL_BUF_RESUME)); 3616 for (i = 0; i < fl->sidx * 8; i++) 3617 MPASS(fl->sdesc[i].cl == NULL); 3618 #endif 3619 fl->cntxt_id = be16toh(c.fl0id); 3620 fl->pidx = fl->cidx = fl->hw_cidx = fl->dbidx = 0; 3621 fl->rx_offset = 0; 3622 fl->flags &= ~(FL_STARVING | FL_DOOMED); 3623 3624 cntxt_id = fl->cntxt_id - sc->sge.eq_start; 3625 if (cntxt_id >= sc->sge.eqmap_sz) { 3626 panic("%s: fl->cntxt_id (%d) more than the max (%d)", 3627 __func__, cntxt_id, sc->sge.eqmap_sz - 1); 3628 } 3629 sc->sge.eqmap[cntxt_id] = (void *)fl; 3630 3631 qid = fl->cntxt_id; 3632 if (isset(&sc->doorbells, DOORBELL_UDB)) { 3633 uint32_t s_qpp = sc->params.sge.eq_s_qpp; 3634 uint32_t mask = (1 << s_qpp) - 1; 3635 volatile uint8_t *udb; 3636 3637 udb = sc->udbs_base + UDBS_DB_OFFSET; 3638 udb += (qid >> s_qpp) << PAGE_SHIFT; 3639 qid &= mask; 3640 if (qid < PAGE_SIZE / UDBS_SEG_SIZE) { 3641 udb += qid << UDBS_SEG_SHIFT; 3642 qid = 0; 3643 } 3644 fl->udb = (volatile void *)udb; 3645 } 3646 fl->dbval = V_QID(qid) | sc->chip_params->sge_fl_db; 3647 3648 FL_LOCK(fl); 3649 /* Enough to make sure the SGE doesn't think it's starved */ 3650 refill_fl(sc, fl, fl->lowat); 3651 FL_UNLOCK(fl); 3652 } 3653 3654 if (chip_id(sc) >= CHELSIO_T5 && !(sc->flags & IS_VF) && 3655 iq->cong_drop != -1) { 3656 t4_sge_set_conm_context(sc, iq->cntxt_id, iq->cong_drop, 3657 cong_map); 3658 } 3659 3660 /* Enable IQ interrupts */ 3661 atomic_store_rel_int(&iq->state, IQS_IDLE); 3662 t4_write_reg(sc, sc->sge_gts_reg, V_SEINTARM(iq->intr_params) | 3663 V_INGRESSQID(iq->cntxt_id)); 3664 3665 iq->flags |= IQ_HW_ALLOCATED; 3666 3667 return (0); 3668 } 3669 3670 static int 3671 free_iq_fl_hwq(struct adapter *sc, struct sge_iq *iq, struct sge_fl *fl) 3672 { 3673 int rc; 3674 3675 MPASS(iq->flags & IQ_HW_ALLOCATED); 3676 rc = -t4_iq_free(sc, sc->mbox, sc->pf, 0, FW_IQ_TYPE_FL_INT_CAP, 3677 iq->cntxt_id, fl ? fl->cntxt_id : 0xffff, 0xffff); 3678 if (rc != 0) { 3679 CH_ERR(sc, "failed to free iq %p: %d\n", iq, rc); 3680 return (rc); 3681 } 3682 iq->flags &= ~IQ_HW_ALLOCATED; 3683 3684 return (0); 3685 } 3686 3687 static void 3688 add_iq_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *oid, 3689 struct sge_iq *iq) 3690 { 3691 struct sysctl_oid_list *children; 3692 3693 if (ctx == NULL || oid == NULL) 3694 return; 3695 3696 children = SYSCTL_CHILDREN(oid); 3697 SYSCTL_ADD_UAUTO(ctx, children, OID_AUTO, "ba", CTLFLAG_RD, &iq->ba, 3698 "bus address of descriptor ring"); 3699 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "dmalen", CTLFLAG_RD, NULL, 3700 iq->qsize * IQ_ESIZE, "descriptor ring size in bytes"); 3701 SYSCTL_ADD_U16(ctx, children, OID_AUTO, "abs_id", CTLFLAG_RD, 3702 &iq->abs_id, 0, "absolute id of the queue"); 3703 SYSCTL_ADD_U16(ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD, 3704 &iq->cntxt_id, 0, "SGE context id of the queue"); 3705 SYSCTL_ADD_U16(ctx, children, OID_AUTO, "cidx", CTLFLAG_RD, &iq->cidx, 3706 0, "consumer index"); 3707 } 3708 3709 static void 3710 add_fl_sysctls(struct adapter *sc, struct sysctl_ctx_list *ctx, 3711 struct sysctl_oid *oid, struct sge_fl *fl) 3712 { 3713 struct sysctl_oid_list *children; 3714 3715 if (ctx == NULL || oid == NULL) 3716 return; 3717 3718 children = SYSCTL_CHILDREN(oid); 3719 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "fl", 3720 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "freelist"); 3721 children = SYSCTL_CHILDREN(oid); 3722 3723 SYSCTL_ADD_UAUTO(ctx, children, OID_AUTO, "ba", CTLFLAG_RD, 3724 &fl->ba, "bus address of descriptor ring"); 3725 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "dmalen", CTLFLAG_RD, NULL, 3726 fl->sidx * EQ_ESIZE + sc->params.sge.spg_len, 3727 "desc ring size in bytes"); 3728 SYSCTL_ADD_U16(ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD, 3729 &fl->cntxt_id, 0, "SGE context id of the freelist"); 3730 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "padding", CTLFLAG_RD, NULL, 3731 fl_pad ? 1 : 0, "padding enabled"); 3732 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "packing", CTLFLAG_RD, NULL, 3733 fl->flags & FL_BUF_PACKING ? 1 : 0, "packing enabled"); 3734 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cidx", CTLFLAG_RD, &fl->cidx, 3735 0, "consumer index"); 3736 if (fl->flags & FL_BUF_PACKING) { 3737 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rx_offset", 3738 CTLFLAG_RD, &fl->rx_offset, 0, "packing rx offset"); 3739 } 3740 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "pidx", CTLFLAG_RD, &fl->pidx, 3741 0, "producer index"); 3742 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "cluster_allocated", 3743 CTLFLAG_RD, &fl->cl_allocated, "# of clusters allocated"); 3744 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "cluster_recycled", 3745 CTLFLAG_RD, &fl->cl_recycled, "# of clusters recycled"); 3746 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "cluster_fast_recycled", 3747 CTLFLAG_RD, &fl->cl_fast_recycled, "# of clusters recycled (fast)"); 3748 } 3749 3750 /* 3751 * Idempotent. 3752 */ 3753 static int 3754 alloc_fwq(struct adapter *sc) 3755 { 3756 int rc, intr_idx; 3757 struct sge_iq *fwq = &sc->sge.fwq; 3758 struct vi_info *vi = &sc->port[0]->vi[0]; 3759 3760 if (!(fwq->flags & IQ_SW_ALLOCATED)) { 3761 MPASS(!(fwq->flags & IQ_HW_ALLOCATED)); 3762 3763 if (sc->flags & IS_VF) 3764 intr_idx = 0; 3765 else 3766 intr_idx = sc->intr_count > 1 ? 1 : 0; 3767 init_iq(fwq, sc, 0, 0, FW_IQ_QSIZE, intr_idx, -1, IQ_OTHER); 3768 rc = alloc_iq_fl(vi, fwq, NULL, &sc->ctx, sc->fwq_oid); 3769 if (rc != 0) { 3770 CH_ERR(sc, "failed to allocate fwq: %d\n", rc); 3771 return (rc); 3772 } 3773 MPASS(fwq->flags & IQ_SW_ALLOCATED); 3774 } 3775 3776 if (!(fwq->flags & IQ_HW_ALLOCATED)) { 3777 MPASS(fwq->flags & IQ_SW_ALLOCATED); 3778 3779 rc = alloc_iq_fl_hwq(vi, fwq, NULL); 3780 if (rc != 0) { 3781 CH_ERR(sc, "failed to create hw fwq: %d\n", rc); 3782 return (rc); 3783 } 3784 MPASS(fwq->flags & IQ_HW_ALLOCATED); 3785 } 3786 3787 return (0); 3788 } 3789 3790 /* 3791 * Idempotent. 3792 */ 3793 static void 3794 free_fwq(struct adapter *sc) 3795 { 3796 struct sge_iq *fwq = &sc->sge.fwq; 3797 3798 if (fwq->flags & IQ_HW_ALLOCATED) { 3799 MPASS(fwq->flags & IQ_SW_ALLOCATED); 3800 free_iq_fl_hwq(sc, fwq, NULL); 3801 MPASS(!(fwq->flags & IQ_HW_ALLOCATED)); 3802 } 3803 3804 if (fwq->flags & IQ_SW_ALLOCATED) { 3805 MPASS(!(fwq->flags & IQ_HW_ALLOCATED)); 3806 free_iq_fl(sc, fwq, NULL); 3807 MPASS(!(fwq->flags & IQ_SW_ALLOCATED)); 3808 } 3809 } 3810 3811 /* 3812 * Idempotent. 3813 */ 3814 static int 3815 alloc_ctrlq(struct adapter *sc, int idx) 3816 { 3817 int rc; 3818 char name[16]; 3819 struct sysctl_oid *oid; 3820 struct sge_wrq *ctrlq = &sc->sge.ctrlq[idx]; 3821 3822 MPASS(idx < sc->params.nports); 3823 3824 if (!(ctrlq->eq.flags & EQ_SW_ALLOCATED)) { 3825 MPASS(!(ctrlq->eq.flags & EQ_HW_ALLOCATED)); 3826 3827 snprintf(name, sizeof(name), "%d", idx); 3828 oid = SYSCTL_ADD_NODE(&sc->ctx, SYSCTL_CHILDREN(sc->ctrlq_oid), 3829 OID_AUTO, name, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 3830 "ctrl queue"); 3831 3832 snprintf(name, sizeof(name), "%s ctrlq%d", 3833 device_get_nameunit(sc->dev), idx); 3834 init_eq(sc, &ctrlq->eq, EQ_CTRL, CTRL_EQ_QSIZE, idx, 3835 &sc->sge.fwq, name); 3836 rc = alloc_wrq(sc, NULL, ctrlq, &sc->ctx, oid); 3837 if (rc != 0) { 3838 CH_ERR(sc, "failed to allocate ctrlq%d: %d\n", idx, rc); 3839 sysctl_remove_oid(oid, 1, 1); 3840 return (rc); 3841 } 3842 MPASS(ctrlq->eq.flags & EQ_SW_ALLOCATED); 3843 } 3844 3845 if (!(ctrlq->eq.flags & EQ_HW_ALLOCATED)) { 3846 MPASS(ctrlq->eq.flags & EQ_SW_ALLOCATED); 3847 3848 rc = alloc_eq_hwq(sc, NULL, &ctrlq->eq); 3849 if (rc != 0) { 3850 CH_ERR(sc, "failed to create hw ctrlq%d: %d\n", idx, rc); 3851 return (rc); 3852 } 3853 MPASS(ctrlq->eq.flags & EQ_HW_ALLOCATED); 3854 } 3855 3856 return (0); 3857 } 3858 3859 /* 3860 * Idempotent. 3861 */ 3862 static void 3863 free_ctrlq(struct adapter *sc, int idx) 3864 { 3865 struct sge_wrq *ctrlq = &sc->sge.ctrlq[idx]; 3866 3867 if (ctrlq->eq.flags & EQ_HW_ALLOCATED) { 3868 MPASS(ctrlq->eq.flags & EQ_SW_ALLOCATED); 3869 free_eq_hwq(sc, NULL, &ctrlq->eq); 3870 MPASS(!(ctrlq->eq.flags & EQ_HW_ALLOCATED)); 3871 } 3872 3873 if (ctrlq->eq.flags & EQ_SW_ALLOCATED) { 3874 MPASS(!(ctrlq->eq.flags & EQ_HW_ALLOCATED)); 3875 free_wrq(sc, ctrlq); 3876 MPASS(!(ctrlq->eq.flags & EQ_SW_ALLOCATED)); 3877 } 3878 } 3879 3880 int 3881 t4_sge_set_conm_context(struct adapter *sc, int cntxt_id, int cong_drop, 3882 int cong_map) 3883 { 3884 const int cng_ch_bits_log = sc->chip_params->cng_ch_bits_log; 3885 uint32_t param, val; 3886 uint16_t ch_map; 3887 int cong_mode, rc, i; 3888 3889 if (chip_id(sc) < CHELSIO_T5) 3890 return (ENOTSUP); 3891 3892 /* Convert the driver knob to the mode understood by the firmware. */ 3893 switch (cong_drop) { 3894 case -1: 3895 cong_mode = X_CONMCTXT_CNGTPMODE_DISABLE; 3896 break; 3897 case 0: 3898 cong_mode = X_CONMCTXT_CNGTPMODE_CHANNEL; 3899 break; 3900 case 1: 3901 cong_mode = X_CONMCTXT_CNGTPMODE_QUEUE; 3902 break; 3903 case 2: 3904 cong_mode = X_CONMCTXT_CNGTPMODE_BOTH; 3905 break; 3906 default: 3907 MPASS(0); 3908 CH_ERR(sc, "cong_drop = %d is invalid (ingress queue %d).\n", 3909 cong_drop, cntxt_id); 3910 return (EINVAL); 3911 } 3912 3913 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | 3914 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) | 3915 V_FW_PARAMS_PARAM_YZ(cntxt_id); 3916 val = V_CONMCTXT_CNGTPMODE(cong_mode); 3917 if (cong_mode == X_CONMCTXT_CNGTPMODE_CHANNEL || 3918 cong_mode == X_CONMCTXT_CNGTPMODE_BOTH) { 3919 for (i = 0, ch_map = 0; i < 4; i++) { 3920 if (cong_map & (1 << i)) 3921 ch_map |= 1 << (i << cng_ch_bits_log); 3922 } 3923 val |= V_CONMCTXT_CNGCHMAP(ch_map); 3924 } 3925 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 3926 if (rc != 0) { 3927 CH_ERR(sc, "failed to set congestion manager context " 3928 "for ingress queue %d: %d\n", cntxt_id, rc); 3929 } 3930 3931 return (rc); 3932 } 3933 3934 /* 3935 * Idempotent. 3936 */ 3937 static int 3938 alloc_rxq(struct vi_info *vi, struct sge_rxq *rxq, int idx, int intr_idx, 3939 int maxp) 3940 { 3941 int rc; 3942 struct adapter *sc = vi->adapter; 3943 if_t ifp = vi->ifp; 3944 struct sysctl_oid *oid; 3945 char name[16]; 3946 3947 if (!(rxq->iq.flags & IQ_SW_ALLOCATED)) { 3948 MPASS(!(rxq->iq.flags & IQ_HW_ALLOCATED)); 3949 #if defined(INET) || defined(INET6) 3950 rc = tcp_lro_init_args(&rxq->lro, ifp, lro_entries, lro_mbufs); 3951 if (rc != 0) 3952 return (rc); 3953 MPASS(rxq->lro.ifp == ifp); /* also indicates LRO init'ed */ 3954 #endif 3955 rxq->ifp = ifp; 3956 3957 snprintf(name, sizeof(name), "%d", idx); 3958 oid = SYSCTL_ADD_NODE(&vi->ctx, SYSCTL_CHILDREN(vi->rxq_oid), 3959 OID_AUTO, name, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 3960 "rx queue"); 3961 3962 init_iq(&rxq->iq, sc, vi->tmr_idx, vi->pktc_idx, vi->qsize_rxq, 3963 intr_idx, cong_drop, IQ_ETH); 3964 #if defined(INET) || defined(INET6) 3965 if (if_getcapenable(ifp) & IFCAP_LRO) 3966 rxq->iq.flags |= IQ_LRO_ENABLED; 3967 #endif 3968 if (if_getcapenable(ifp) & IFCAP_HWRXTSTMP) 3969 rxq->iq.flags |= IQ_RX_TIMESTAMP; 3970 snprintf(name, sizeof(name), "%s rxq%d-fl", 3971 device_get_nameunit(vi->dev), idx); 3972 init_fl(sc, &rxq->fl, vi->qsize_rxq / 8, maxp, name); 3973 rc = alloc_iq_fl(vi, &rxq->iq, &rxq->fl, &vi->ctx, oid); 3974 if (rc != 0) { 3975 CH_ERR(vi, "failed to allocate rxq%d: %d\n", idx, rc); 3976 sysctl_remove_oid(oid, 1, 1); 3977 #if defined(INET) || defined(INET6) 3978 tcp_lro_free(&rxq->lro); 3979 rxq->lro.ifp = NULL; 3980 #endif 3981 return (rc); 3982 } 3983 MPASS(rxq->iq.flags & IQ_SW_ALLOCATED); 3984 add_rxq_sysctls(&vi->ctx, oid, rxq); 3985 } 3986 3987 if (!(rxq->iq.flags & IQ_HW_ALLOCATED)) { 3988 MPASS(rxq->iq.flags & IQ_SW_ALLOCATED); 3989 rc = alloc_iq_fl_hwq(vi, &rxq->iq, &rxq->fl); 3990 if (rc != 0) { 3991 CH_ERR(vi, "failed to create hw rxq%d: %d\n", idx, rc); 3992 return (rc); 3993 } 3994 MPASS(rxq->iq.flags & IQ_HW_ALLOCATED); 3995 3996 if (idx == 0) 3997 sc->sge.iq_base = rxq->iq.abs_id - rxq->iq.cntxt_id; 3998 else 3999 KASSERT(rxq->iq.cntxt_id + sc->sge.iq_base == rxq->iq.abs_id, 4000 ("iq_base mismatch")); 4001 KASSERT(sc->sge.iq_base == 0 || sc->flags & IS_VF, 4002 ("PF with non-zero iq_base")); 4003 4004 /* 4005 * The freelist is just barely above the starvation threshold 4006 * right now, fill it up a bit more. 4007 */ 4008 FL_LOCK(&rxq->fl); 4009 refill_fl(sc, &rxq->fl, 128); 4010 FL_UNLOCK(&rxq->fl); 4011 } 4012 4013 return (0); 4014 } 4015 4016 /* 4017 * Idempotent. 4018 */ 4019 static void 4020 free_rxq(struct vi_info *vi, struct sge_rxq *rxq) 4021 { 4022 if (rxq->iq.flags & IQ_HW_ALLOCATED) { 4023 MPASS(rxq->iq.flags & IQ_SW_ALLOCATED); 4024 free_iq_fl_hwq(vi->adapter, &rxq->iq, &rxq->fl); 4025 MPASS(!(rxq->iq.flags & IQ_HW_ALLOCATED)); 4026 } 4027 4028 if (rxq->iq.flags & IQ_SW_ALLOCATED) { 4029 MPASS(!(rxq->iq.flags & IQ_HW_ALLOCATED)); 4030 #if defined(INET) || defined(INET6) 4031 tcp_lro_free(&rxq->lro); 4032 #endif 4033 free_iq_fl(vi->adapter, &rxq->iq, &rxq->fl); 4034 MPASS(!(rxq->iq.flags & IQ_SW_ALLOCATED)); 4035 bzero(rxq, sizeof(*rxq)); 4036 } 4037 } 4038 4039 static void 4040 add_rxq_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *oid, 4041 struct sge_rxq *rxq) 4042 { 4043 struct sysctl_oid_list *children; 4044 4045 if (ctx == NULL || oid == NULL) 4046 return; 4047 4048 children = SYSCTL_CHILDREN(oid); 4049 #if defined(INET) || defined(INET6) 4050 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "lro_queued", CTLFLAG_RD, 4051 &rxq->lro.lro_queued, 0, NULL); 4052 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "lro_flushed", CTLFLAG_RD, 4053 &rxq->lro.lro_flushed, 0, NULL); 4054 #endif 4055 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "rxcsum", CTLFLAG_RD, 4056 &rxq->rxcsum, "# of times hardware assisted with checksum"); 4057 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "vlan_extraction", CTLFLAG_RD, 4058 &rxq->vlan_extraction, "# of times hardware extracted 802.1Q tag"); 4059 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "vxlan_rxcsum", CTLFLAG_RD, 4060 &rxq->vxlan_rxcsum, 4061 "# of times hardware assisted with inner checksum (VXLAN)"); 4062 } 4063 4064 #ifdef TCP_OFFLOAD 4065 /* 4066 * Idempotent. 4067 */ 4068 static int 4069 alloc_ofld_rxq(struct vi_info *vi, struct sge_ofld_rxq *ofld_rxq, int idx, 4070 int intr_idx, int maxp) 4071 { 4072 int rc; 4073 struct adapter *sc = vi->adapter; 4074 struct sysctl_oid *oid; 4075 char name[16]; 4076 4077 if (!(ofld_rxq->iq.flags & IQ_SW_ALLOCATED)) { 4078 MPASS(!(ofld_rxq->iq.flags & IQ_HW_ALLOCATED)); 4079 4080 snprintf(name, sizeof(name), "%d", idx); 4081 oid = SYSCTL_ADD_NODE(&vi->ctx, 4082 SYSCTL_CHILDREN(vi->ofld_rxq_oid), OID_AUTO, name, 4083 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "offload rx queue"); 4084 4085 init_iq(&ofld_rxq->iq, sc, vi->ofld_tmr_idx, vi->ofld_pktc_idx, 4086 vi->qsize_rxq, intr_idx, ofld_cong_drop, IQ_OFLD); 4087 snprintf(name, sizeof(name), "%s ofld_rxq%d-fl", 4088 device_get_nameunit(vi->dev), idx); 4089 init_fl(sc, &ofld_rxq->fl, vi->qsize_rxq / 8, maxp, name); 4090 rc = alloc_iq_fl(vi, &ofld_rxq->iq, &ofld_rxq->fl, &vi->ctx, 4091 oid); 4092 if (rc != 0) { 4093 CH_ERR(vi, "failed to allocate ofld_rxq%d: %d\n", idx, 4094 rc); 4095 sysctl_remove_oid(oid, 1, 1); 4096 return (rc); 4097 } 4098 MPASS(ofld_rxq->iq.flags & IQ_SW_ALLOCATED); 4099 ofld_rxq->rx_iscsi_ddp_setup_ok = counter_u64_alloc(M_WAITOK); 4100 ofld_rxq->rx_iscsi_ddp_setup_error = 4101 counter_u64_alloc(M_WAITOK); 4102 ofld_rxq->ddp_buffer_alloc = counter_u64_alloc(M_WAITOK); 4103 ofld_rxq->ddp_buffer_reuse = counter_u64_alloc(M_WAITOK); 4104 ofld_rxq->ddp_buffer_free = counter_u64_alloc(M_WAITOK); 4105 add_ofld_rxq_sysctls(&vi->ctx, oid, ofld_rxq); 4106 } 4107 4108 if (!(ofld_rxq->iq.flags & IQ_HW_ALLOCATED)) { 4109 MPASS(ofld_rxq->iq.flags & IQ_SW_ALLOCATED); 4110 rc = alloc_iq_fl_hwq(vi, &ofld_rxq->iq, &ofld_rxq->fl); 4111 if (rc != 0) { 4112 CH_ERR(vi, "failed to create hw ofld_rxq%d: %d\n", idx, 4113 rc); 4114 return (rc); 4115 } 4116 MPASS(ofld_rxq->iq.flags & IQ_HW_ALLOCATED); 4117 } 4118 return (rc); 4119 } 4120 4121 /* 4122 * Idempotent. 4123 */ 4124 static void 4125 free_ofld_rxq(struct vi_info *vi, struct sge_ofld_rxq *ofld_rxq) 4126 { 4127 if (ofld_rxq->iq.flags & IQ_HW_ALLOCATED) { 4128 MPASS(ofld_rxq->iq.flags & IQ_SW_ALLOCATED); 4129 free_iq_fl_hwq(vi->adapter, &ofld_rxq->iq, &ofld_rxq->fl); 4130 MPASS(!(ofld_rxq->iq.flags & IQ_HW_ALLOCATED)); 4131 } 4132 4133 if (ofld_rxq->iq.flags & IQ_SW_ALLOCATED) { 4134 MPASS(!(ofld_rxq->iq.flags & IQ_HW_ALLOCATED)); 4135 free_iq_fl(vi->adapter, &ofld_rxq->iq, &ofld_rxq->fl); 4136 MPASS(!(ofld_rxq->iq.flags & IQ_SW_ALLOCATED)); 4137 counter_u64_free(ofld_rxq->rx_iscsi_ddp_setup_ok); 4138 counter_u64_free(ofld_rxq->rx_iscsi_ddp_setup_error); 4139 counter_u64_free(ofld_rxq->ddp_buffer_alloc); 4140 counter_u64_free(ofld_rxq->ddp_buffer_reuse); 4141 counter_u64_free(ofld_rxq->ddp_buffer_free); 4142 bzero(ofld_rxq, sizeof(*ofld_rxq)); 4143 } 4144 } 4145 4146 static void 4147 add_ofld_rxq_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *oid, 4148 struct sge_ofld_rxq *ofld_rxq) 4149 { 4150 struct sysctl_oid_list *children; 4151 4152 if (ctx == NULL || oid == NULL) 4153 return; 4154 4155 children = SYSCTL_CHILDREN(oid); 4156 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "rx_aio_ddp_jobs", 4157 CTLFLAG_RD, &ofld_rxq->rx_aio_ddp_jobs, 0, 4158 "# of aio_read(2) jobs completed via DDP"); 4159 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "rx_aio_ddp_octets", 4160 CTLFLAG_RD, &ofld_rxq->rx_aio_ddp_octets, 0, 4161 "# of octets placed directly for aio_read(2) jobs"); 4162 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 4163 "rx_toe_tls_records", CTLFLAG_RD, &ofld_rxq->rx_toe_tls_records, 4164 "# of TOE TLS records received"); 4165 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 4166 "rx_toe_tls_octets", CTLFLAG_RD, &ofld_rxq->rx_toe_tls_octets, 4167 "# of payload octets in received TOE TLS records"); 4168 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 4169 "rx_toe_ddp_octets", CTLFLAG_RD, &ofld_rxq->rx_toe_ddp_octets, 4170 "# of payload octets received via TCP DDP"); 4171 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, 4172 "ddp_buffer_alloc", CTLFLAG_RD, &ofld_rxq->ddp_buffer_alloc, 4173 "# of DDP RCV buffers allocated"); 4174 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, 4175 "ddp_buffer_reuse", CTLFLAG_RD, &ofld_rxq->ddp_buffer_reuse, 4176 "# of DDP RCV buffers reused"); 4177 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, 4178 "ddp_buffer_free", CTLFLAG_RD, &ofld_rxq->ddp_buffer_free, 4179 "# of DDP RCV buffers freed"); 4180 4181 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "iscsi", 4182 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "TOE iSCSI statistics"); 4183 children = SYSCTL_CHILDREN(oid); 4184 4185 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "ddp_setup_ok", 4186 CTLFLAG_RD, &ofld_rxq->rx_iscsi_ddp_setup_ok, 4187 "# of times DDP buffer was setup successfully."); 4188 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "ddp_setup_error", 4189 CTLFLAG_RD, &ofld_rxq->rx_iscsi_ddp_setup_error, 4190 "# of times DDP buffer setup failed."); 4191 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "ddp_octets", 4192 CTLFLAG_RD, &ofld_rxq->rx_iscsi_ddp_octets, 0, 4193 "# of octets placed directly"); 4194 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "ddp_pdus", 4195 CTLFLAG_RD, &ofld_rxq->rx_iscsi_ddp_pdus, 0, 4196 "# of PDUs with data placed directly."); 4197 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "fl_octets", 4198 CTLFLAG_RD, &ofld_rxq->rx_iscsi_fl_octets, 0, 4199 "# of data octets delivered in freelist"); 4200 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "fl_pdus", 4201 CTLFLAG_RD, &ofld_rxq->rx_iscsi_fl_pdus, 0, 4202 "# of PDUs with data delivered in freelist"); 4203 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "padding_errors", 4204 CTLFLAG_RD, &ofld_rxq->rx_iscsi_padding_errors, 0, 4205 "# of PDUs with invalid padding"); 4206 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "header_digest_errors", 4207 CTLFLAG_RD, &ofld_rxq->rx_iscsi_header_digest_errors, 0, 4208 "# of PDUs with invalid header digests"); 4209 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "data_digest_errors", 4210 CTLFLAG_RD, &ofld_rxq->rx_iscsi_data_digest_errors, 0, 4211 "# of PDUs with invalid data digests"); 4212 } 4213 #endif 4214 4215 /* 4216 * Returns a reasonable automatic cidx flush threshold for a given queue size. 4217 */ 4218 static u_int 4219 qsize_to_fthresh(int qsize) 4220 { 4221 u_int fthresh; 4222 4223 fthresh = qsize == 0 ? 0 : fls(qsize - 1); 4224 if (fthresh > X_CIDXFLUSHTHRESH_128) 4225 fthresh = X_CIDXFLUSHTHRESH_128; 4226 4227 return (fthresh); 4228 } 4229 4230 static int 4231 ctrl_eq_alloc(struct adapter *sc, struct sge_eq *eq) 4232 { 4233 int rc, cntxt_id; 4234 struct fw_eq_ctrl_cmd c; 4235 int qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE; 4236 4237 bzero(&c, sizeof(c)); 4238 4239 c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | F_FW_CMD_REQUEST | 4240 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_CTRL_CMD_PFN(sc->pf) | 4241 V_FW_EQ_CTRL_CMD_VFN(0)); 4242 c.alloc_to_len16 = htobe32(F_FW_EQ_CTRL_CMD_ALLOC | 4243 F_FW_EQ_CTRL_CMD_EQSTART | FW_LEN16(c)); 4244 c.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_CMPLIQID(eq->iqid)); 4245 c.physeqid_pkd = htobe32(0); 4246 c.fetchszm_to_iqid = 4247 htobe32(V_FW_EQ_CTRL_CMD_HOSTFCMODE(X_HOSTFCMODE_STATUS_PAGE) | 4248 V_FW_EQ_CTRL_CMD_PCIECHN(eq->tx_chan) | 4249 F_FW_EQ_CTRL_CMD_FETCHRO | V_FW_EQ_CTRL_CMD_IQID(eq->iqid)); 4250 c.dcaen_to_eqsize = 4251 htobe32(V_FW_EQ_CTRL_CMD_FBMIN(chip_id(sc) <= CHELSIO_T5 ? 4252 X_FETCHBURSTMIN_64B : X_FETCHBURSTMIN_64B_T6) | 4253 V_FW_EQ_CTRL_CMD_FBMAX(X_FETCHBURSTMAX_512B) | 4254 V_FW_EQ_CTRL_CMD_CIDXFTHRESH(qsize_to_fthresh(qsize)) | 4255 V_FW_EQ_CTRL_CMD_EQSIZE(qsize)); 4256 c.eqaddr = htobe64(eq->ba); 4257 4258 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); 4259 if (rc != 0) { 4260 CH_ERR(sc, "failed to create hw ctrlq for tx_chan %d: %d\n", 4261 eq->tx_chan, rc); 4262 return (rc); 4263 } 4264 4265 eq->cntxt_id = G_FW_EQ_CTRL_CMD_EQID(be32toh(c.cmpliqid_eqid)); 4266 eq->abs_id = G_FW_EQ_CTRL_CMD_PHYSEQID(be32toh(c.physeqid_pkd)); 4267 cntxt_id = eq->cntxt_id - sc->sge.eq_start; 4268 if (cntxt_id >= sc->sge.eqmap_sz) 4269 panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__, 4270 cntxt_id, sc->sge.eqmap_sz - 1); 4271 sc->sge.eqmap[cntxt_id] = eq; 4272 4273 return (rc); 4274 } 4275 4276 static int 4277 eth_eq_alloc(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq) 4278 { 4279 int rc, cntxt_id; 4280 struct fw_eq_eth_cmd c; 4281 int qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE; 4282 4283 bzero(&c, sizeof(c)); 4284 4285 c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST | 4286 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(sc->pf) | 4287 V_FW_EQ_ETH_CMD_VFN(0)); 4288 c.alloc_to_len16 = htobe32(F_FW_EQ_ETH_CMD_ALLOC | 4289 F_FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c)); 4290 c.autoequiqe_to_viid = htobe32(F_FW_EQ_ETH_CMD_AUTOEQUIQE | 4291 F_FW_EQ_ETH_CMD_AUTOEQUEQE | V_FW_EQ_ETH_CMD_VIID(vi->viid)); 4292 c.fetchszm_to_iqid = 4293 htobe32(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) | 4294 V_FW_EQ_ETH_CMD_PCIECHN(eq->tx_chan) | F_FW_EQ_ETH_CMD_FETCHRO | 4295 V_FW_EQ_ETH_CMD_IQID(eq->iqid)); 4296 c.dcaen_to_eqsize = 4297 htobe32(V_FW_EQ_ETH_CMD_FBMIN(chip_id(sc) <= CHELSIO_T5 ? 4298 X_FETCHBURSTMIN_64B : X_FETCHBURSTMIN_64B_T6) | 4299 V_FW_EQ_ETH_CMD_FBMAX(X_FETCHBURSTMAX_512B) | 4300 V_FW_EQ_ETH_CMD_EQSIZE(qsize)); 4301 c.eqaddr = htobe64(eq->ba); 4302 4303 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); 4304 if (rc != 0) { 4305 device_printf(vi->dev, 4306 "failed to create Ethernet egress queue: %d\n", rc); 4307 return (rc); 4308 } 4309 4310 eq->cntxt_id = G_FW_EQ_ETH_CMD_EQID(be32toh(c.eqid_pkd)); 4311 eq->abs_id = G_FW_EQ_ETH_CMD_PHYSEQID(be32toh(c.physeqid_pkd)); 4312 cntxt_id = eq->cntxt_id - sc->sge.eq_start; 4313 if (cntxt_id >= sc->sge.eqmap_sz) 4314 panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__, 4315 cntxt_id, sc->sge.eqmap_sz - 1); 4316 sc->sge.eqmap[cntxt_id] = eq; 4317 4318 return (rc); 4319 } 4320 4321 #if defined(TCP_OFFLOAD) || defined(RATELIMIT) 4322 static int 4323 ofld_eq_alloc(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq) 4324 { 4325 int rc, cntxt_id; 4326 struct fw_eq_ofld_cmd c; 4327 int qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE; 4328 4329 bzero(&c, sizeof(c)); 4330 4331 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_OFLD_CMD) | F_FW_CMD_REQUEST | 4332 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_OFLD_CMD_PFN(sc->pf) | 4333 V_FW_EQ_OFLD_CMD_VFN(0)); 4334 c.alloc_to_len16 = htonl(F_FW_EQ_OFLD_CMD_ALLOC | 4335 F_FW_EQ_OFLD_CMD_EQSTART | FW_LEN16(c)); 4336 c.fetchszm_to_iqid = 4337 htonl(V_FW_EQ_OFLD_CMD_HOSTFCMODE(X_HOSTFCMODE_STATUS_PAGE) | 4338 V_FW_EQ_OFLD_CMD_PCIECHN(eq->tx_chan) | 4339 F_FW_EQ_OFLD_CMD_FETCHRO | V_FW_EQ_OFLD_CMD_IQID(eq->iqid)); 4340 c.dcaen_to_eqsize = 4341 htobe32(V_FW_EQ_OFLD_CMD_FBMIN(chip_id(sc) <= CHELSIO_T5 ? 4342 X_FETCHBURSTMIN_64B : X_FETCHBURSTMIN_64B_T6) | 4343 V_FW_EQ_OFLD_CMD_FBMAX(X_FETCHBURSTMAX_512B) | 4344 V_FW_EQ_OFLD_CMD_CIDXFTHRESH(qsize_to_fthresh(qsize)) | 4345 V_FW_EQ_OFLD_CMD_EQSIZE(qsize)); 4346 c.eqaddr = htobe64(eq->ba); 4347 4348 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); 4349 if (rc != 0) { 4350 device_printf(vi->dev, 4351 "failed to create egress queue for TCP offload: %d\n", rc); 4352 return (rc); 4353 } 4354 4355 eq->cntxt_id = G_FW_EQ_OFLD_CMD_EQID(be32toh(c.eqid_pkd)); 4356 eq->abs_id = G_FW_EQ_OFLD_CMD_PHYSEQID(be32toh(c.physeqid_pkd)); 4357 cntxt_id = eq->cntxt_id - sc->sge.eq_start; 4358 if (cntxt_id >= sc->sge.eqmap_sz) 4359 panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__, 4360 cntxt_id, sc->sge.eqmap_sz - 1); 4361 sc->sge.eqmap[cntxt_id] = eq; 4362 4363 return (rc); 4364 } 4365 #endif 4366 4367 /* SW only */ 4368 static int 4369 alloc_eq(struct adapter *sc, struct sge_eq *eq, struct sysctl_ctx_list *ctx, 4370 struct sysctl_oid *oid) 4371 { 4372 int rc, qsize; 4373 size_t len; 4374 4375 MPASS(!(eq->flags & EQ_SW_ALLOCATED)); 4376 4377 qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE; 4378 len = qsize * EQ_ESIZE; 4379 rc = alloc_ring(sc, len, &eq->desc_tag, &eq->desc_map, &eq->ba, 4380 (void **)&eq->desc); 4381 if (rc) 4382 return (rc); 4383 if (ctx != NULL && oid != NULL) 4384 add_eq_sysctls(sc, ctx, oid, eq); 4385 eq->flags |= EQ_SW_ALLOCATED; 4386 4387 return (0); 4388 } 4389 4390 /* SW only */ 4391 static void 4392 free_eq(struct adapter *sc, struct sge_eq *eq) 4393 { 4394 MPASS(eq->flags & EQ_SW_ALLOCATED); 4395 if (eq->type == EQ_ETH) 4396 MPASS(eq->pidx == eq->cidx); 4397 4398 free_ring(sc, eq->desc_tag, eq->desc_map, eq->ba, eq->desc); 4399 mtx_destroy(&eq->eq_lock); 4400 bzero(eq, sizeof(*eq)); 4401 } 4402 4403 static void 4404 add_eq_sysctls(struct adapter *sc, struct sysctl_ctx_list *ctx, 4405 struct sysctl_oid *oid, struct sge_eq *eq) 4406 { 4407 struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); 4408 4409 SYSCTL_ADD_UAUTO(ctx, children, OID_AUTO, "ba", CTLFLAG_RD, &eq->ba, 4410 "bus address of descriptor ring"); 4411 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "dmalen", CTLFLAG_RD, NULL, 4412 eq->sidx * EQ_ESIZE + sc->params.sge.spg_len, 4413 "desc ring size in bytes"); 4414 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "abs_id", CTLFLAG_RD, 4415 &eq->abs_id, 0, "absolute id of the queue"); 4416 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD, 4417 &eq->cntxt_id, 0, "SGE context id of the queue"); 4418 SYSCTL_ADD_U16(ctx, children, OID_AUTO, "cidx", CTLFLAG_RD, &eq->cidx, 4419 0, "consumer index"); 4420 SYSCTL_ADD_U16(ctx, children, OID_AUTO, "pidx", CTLFLAG_RD, &eq->pidx, 4421 0, "producer index"); 4422 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sidx", CTLFLAG_RD, NULL, 4423 eq->sidx, "status page index"); 4424 } 4425 4426 static int 4427 alloc_eq_hwq(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq) 4428 { 4429 int rc; 4430 4431 MPASS(!(eq->flags & EQ_HW_ALLOCATED)); 4432 4433 eq->iqid = eq->iq->cntxt_id; 4434 eq->pidx = eq->cidx = eq->dbidx = 0; 4435 /* Note that equeqidx is not used with sge_wrq (OFLD/CTRL) queues. */ 4436 eq->equeqidx = 0; 4437 eq->doorbells = sc->doorbells; 4438 bzero(eq->desc, eq->sidx * EQ_ESIZE + sc->params.sge.spg_len); 4439 4440 switch (eq->type) { 4441 case EQ_CTRL: 4442 rc = ctrl_eq_alloc(sc, eq); 4443 break; 4444 4445 case EQ_ETH: 4446 rc = eth_eq_alloc(sc, vi, eq); 4447 break; 4448 4449 #if defined(TCP_OFFLOAD) || defined(RATELIMIT) 4450 case EQ_OFLD: 4451 rc = ofld_eq_alloc(sc, vi, eq); 4452 break; 4453 #endif 4454 4455 default: 4456 panic("%s: invalid eq type %d.", __func__, eq->type); 4457 } 4458 if (rc != 0) { 4459 CH_ERR(sc, "failed to allocate egress queue(%d): %d\n", 4460 eq->type, rc); 4461 return (rc); 4462 } 4463 4464 if (isset(&eq->doorbells, DOORBELL_UDB) || 4465 isset(&eq->doorbells, DOORBELL_UDBWC) || 4466 isset(&eq->doorbells, DOORBELL_WCWR)) { 4467 uint32_t s_qpp = sc->params.sge.eq_s_qpp; 4468 uint32_t mask = (1 << s_qpp) - 1; 4469 volatile uint8_t *udb; 4470 4471 udb = sc->udbs_base + UDBS_DB_OFFSET; 4472 udb += (eq->cntxt_id >> s_qpp) << PAGE_SHIFT; /* pg offset */ 4473 eq->udb_qid = eq->cntxt_id & mask; /* id in page */ 4474 if (eq->udb_qid >= PAGE_SIZE / UDBS_SEG_SIZE) 4475 clrbit(&eq->doorbells, DOORBELL_WCWR); 4476 else { 4477 udb += eq->udb_qid << UDBS_SEG_SHIFT; /* seg offset */ 4478 eq->udb_qid = 0; 4479 } 4480 eq->udb = (volatile void *)udb; 4481 } 4482 4483 eq->flags |= EQ_HW_ALLOCATED; 4484 return (0); 4485 } 4486 4487 static int 4488 free_eq_hwq(struct adapter *sc, struct vi_info *vi __unused, struct sge_eq *eq) 4489 { 4490 int rc; 4491 4492 MPASS(eq->flags & EQ_HW_ALLOCATED); 4493 4494 switch (eq->type) { 4495 case EQ_CTRL: 4496 rc = -t4_ctrl_eq_free(sc, sc->mbox, sc->pf, 0, eq->cntxt_id); 4497 break; 4498 case EQ_ETH: 4499 rc = -t4_eth_eq_free(sc, sc->mbox, sc->pf, 0, eq->cntxt_id); 4500 break; 4501 #if defined(TCP_OFFLOAD) || defined(RATELIMIT) 4502 case EQ_OFLD: 4503 rc = -t4_ofld_eq_free(sc, sc->mbox, sc->pf, 0, eq->cntxt_id); 4504 break; 4505 #endif 4506 default: 4507 panic("%s: invalid eq type %d.", __func__, eq->type); 4508 } 4509 if (rc != 0) { 4510 CH_ERR(sc, "failed to free eq (type %d): %d\n", eq->type, rc); 4511 return (rc); 4512 } 4513 eq->flags &= ~EQ_HW_ALLOCATED; 4514 4515 return (0); 4516 } 4517 4518 static int 4519 alloc_wrq(struct adapter *sc, struct vi_info *vi, struct sge_wrq *wrq, 4520 struct sysctl_ctx_list *ctx, struct sysctl_oid *oid) 4521 { 4522 struct sge_eq *eq = &wrq->eq; 4523 int rc; 4524 4525 MPASS(!(eq->flags & EQ_SW_ALLOCATED)); 4526 4527 rc = alloc_eq(sc, eq, ctx, oid); 4528 if (rc) 4529 return (rc); 4530 MPASS(eq->flags & EQ_SW_ALLOCATED); 4531 /* Can't fail after this. */ 4532 4533 wrq->adapter = sc; 4534 TASK_INIT(&wrq->wrq_tx_task, 0, wrq_tx_drain, wrq); 4535 TAILQ_INIT(&wrq->incomplete_wrs); 4536 STAILQ_INIT(&wrq->wr_list); 4537 wrq->nwr_pending = 0; 4538 wrq->ndesc_needed = 0; 4539 add_wrq_sysctls(ctx, oid, wrq); 4540 4541 return (0); 4542 } 4543 4544 static void 4545 free_wrq(struct adapter *sc, struct sge_wrq *wrq) 4546 { 4547 free_eq(sc, &wrq->eq); 4548 MPASS(wrq->nwr_pending == 0); 4549 MPASS(TAILQ_EMPTY(&wrq->incomplete_wrs)); 4550 MPASS(STAILQ_EMPTY(&wrq->wr_list)); 4551 bzero(wrq, sizeof(*wrq)); 4552 } 4553 4554 static void 4555 add_wrq_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *oid, 4556 struct sge_wrq *wrq) 4557 { 4558 struct sysctl_oid_list *children; 4559 4560 if (ctx == NULL || oid == NULL) 4561 return; 4562 4563 children = SYSCTL_CHILDREN(oid); 4564 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "tx_wrs_direct", CTLFLAG_RD, 4565 &wrq->tx_wrs_direct, "# of work requests (direct)"); 4566 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "tx_wrs_copied", CTLFLAG_RD, 4567 &wrq->tx_wrs_copied, "# of work requests (copied)"); 4568 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "tx_wrs_sspace", CTLFLAG_RD, 4569 &wrq->tx_wrs_ss, "# of work requests (copied from scratch space)"); 4570 } 4571 4572 /* 4573 * Idempotent. 4574 */ 4575 static int 4576 alloc_txq(struct vi_info *vi, struct sge_txq *txq, int idx) 4577 { 4578 int rc, iqidx; 4579 struct port_info *pi = vi->pi; 4580 struct adapter *sc = vi->adapter; 4581 struct sge_eq *eq = &txq->eq; 4582 struct txpkts *txp; 4583 char name[16]; 4584 struct sysctl_oid *oid; 4585 4586 if (!(eq->flags & EQ_SW_ALLOCATED)) { 4587 MPASS(!(eq->flags & EQ_HW_ALLOCATED)); 4588 4589 snprintf(name, sizeof(name), "%d", idx); 4590 oid = SYSCTL_ADD_NODE(&vi->ctx, SYSCTL_CHILDREN(vi->txq_oid), 4591 OID_AUTO, name, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 4592 "tx queue"); 4593 4594 iqidx = vi->first_rxq + (idx % vi->nrxq); 4595 snprintf(name, sizeof(name), "%s txq%d", 4596 device_get_nameunit(vi->dev), idx); 4597 init_eq(sc, &txq->eq, EQ_ETH, vi->qsize_txq, pi->port_id, 4598 &sc->sge.rxq[iqidx].iq, name); 4599 4600 rc = mp_ring_alloc(&txq->r, eq->sidx, txq, eth_tx, 4601 can_resume_eth_tx, M_CXGBE, &eq->eq_lock, M_WAITOK); 4602 if (rc != 0) { 4603 CH_ERR(vi, "failed to allocate mp_ring for txq%d: %d\n", 4604 idx, rc); 4605 failed: 4606 sysctl_remove_oid(oid, 1, 1); 4607 return (rc); 4608 } 4609 4610 rc = alloc_eq(sc, eq, &vi->ctx, oid); 4611 if (rc) { 4612 CH_ERR(vi, "failed to allocate txq%d: %d\n", idx, rc); 4613 mp_ring_free(txq->r); 4614 goto failed; 4615 } 4616 MPASS(eq->flags & EQ_SW_ALLOCATED); 4617 /* Can't fail after this point. */ 4618 4619 TASK_INIT(&txq->tx_reclaim_task, 0, tx_reclaim, eq); 4620 txq->ifp = vi->ifp; 4621 txq->gl = sglist_alloc(TX_SGL_SEGS, M_WAITOK); 4622 txq->sdesc = malloc(eq->sidx * sizeof(struct tx_sdesc), M_CXGBE, 4623 M_ZERO | M_WAITOK); 4624 4625 add_txq_sysctls(vi, &vi->ctx, oid, txq); 4626 } 4627 4628 if (!(eq->flags & EQ_HW_ALLOCATED)) { 4629 MPASS(eq->flags & EQ_SW_ALLOCATED); 4630 rc = alloc_eq_hwq(sc, vi, eq); 4631 if (rc != 0) { 4632 CH_ERR(vi, "failed to create hw txq%d: %d\n", idx, rc); 4633 return (rc); 4634 } 4635 MPASS(eq->flags & EQ_HW_ALLOCATED); 4636 /* Can't fail after this point. */ 4637 4638 if (idx == 0) 4639 sc->sge.eq_base = eq->abs_id - eq->cntxt_id; 4640 else 4641 KASSERT(eq->cntxt_id + sc->sge.eq_base == eq->abs_id, 4642 ("eq_base mismatch")); 4643 KASSERT(sc->sge.eq_base == 0 || sc->flags & IS_VF, 4644 ("PF with non-zero eq_base")); 4645 4646 txp = &txq->txp; 4647 MPASS(nitems(txp->mb) >= sc->params.max_pkts_per_eth_tx_pkts_wr); 4648 txq->txp.max_npkt = min(nitems(txp->mb), 4649 sc->params.max_pkts_per_eth_tx_pkts_wr); 4650 if (vi->flags & TX_USES_VM_WR && !(sc->flags & IS_VF)) 4651 txq->txp.max_npkt--; 4652 4653 if (vi->flags & TX_USES_VM_WR) 4654 txq->cpl_ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT_XT) | 4655 V_TXPKT_INTF(pi->tx_chan)); 4656 else 4657 txq->cpl_ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT_XT) | 4658 V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(sc->pf) | 4659 V_TXPKT_VF(vi->vin) | V_TXPKT_VF_VLD(vi->vfvld)); 4660 4661 txq->tc_idx = -1; 4662 } 4663 4664 return (0); 4665 } 4666 4667 /* 4668 * Idempotent. 4669 */ 4670 static void 4671 free_txq(struct vi_info *vi, struct sge_txq *txq) 4672 { 4673 struct adapter *sc = vi->adapter; 4674 struct sge_eq *eq = &txq->eq; 4675 4676 if (eq->flags & EQ_HW_ALLOCATED) { 4677 MPASS(eq->flags & EQ_SW_ALLOCATED); 4678 free_eq_hwq(sc, NULL, eq); 4679 MPASS(!(eq->flags & EQ_HW_ALLOCATED)); 4680 } 4681 4682 if (eq->flags & EQ_SW_ALLOCATED) { 4683 MPASS(!(eq->flags & EQ_HW_ALLOCATED)); 4684 sglist_free(txq->gl); 4685 free(txq->sdesc, M_CXGBE); 4686 mp_ring_free(txq->r); 4687 free_eq(sc, eq); 4688 MPASS(!(eq->flags & EQ_SW_ALLOCATED)); 4689 bzero(txq, sizeof(*txq)); 4690 } 4691 } 4692 4693 static void 4694 add_txq_sysctls(struct vi_info *vi, struct sysctl_ctx_list *ctx, 4695 struct sysctl_oid *oid, struct sge_txq *txq) 4696 { 4697 struct adapter *sc; 4698 struct sysctl_oid_list *children; 4699 4700 if (ctx == NULL || oid == NULL) 4701 return; 4702 4703 sc = vi->adapter; 4704 children = SYSCTL_CHILDREN(oid); 4705 4706 mp_ring_sysctls(txq->r, ctx, children); 4707 4708 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tc", 4709 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, vi, txq - sc->sge.txq, 4710 sysctl_tc, "I", "traffic class (-1 means none)"); 4711 4712 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "txcsum", CTLFLAG_RD, 4713 &txq->txcsum, "# of times hardware assisted with checksum"); 4714 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "vlan_insertion", CTLFLAG_RD, 4715 &txq->vlan_insertion, "# of times hardware inserted 802.1Q tag"); 4716 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "tso_wrs", CTLFLAG_RD, 4717 &txq->tso_wrs, "# of TSO work requests"); 4718 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "imm_wrs", CTLFLAG_RD, 4719 &txq->imm_wrs, "# of work requests with immediate data"); 4720 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "sgl_wrs", CTLFLAG_RD, 4721 &txq->sgl_wrs, "# of work requests with direct SGL"); 4722 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "txpkt_wrs", CTLFLAG_RD, 4723 &txq->txpkt_wrs, "# of txpkt work requests (one pkt/WR)"); 4724 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "txpkts0_wrs", CTLFLAG_RD, 4725 &txq->txpkts0_wrs, "# of txpkts (type 0) work requests"); 4726 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "txpkts1_wrs", CTLFLAG_RD, 4727 &txq->txpkts1_wrs, "# of txpkts (type 1) work requests"); 4728 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "txpkts0_pkts", CTLFLAG_RD, 4729 &txq->txpkts0_pkts, 4730 "# of frames tx'd using type0 txpkts work requests"); 4731 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "txpkts1_pkts", CTLFLAG_RD, 4732 &txq->txpkts1_pkts, 4733 "# of frames tx'd using type1 txpkts work requests"); 4734 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "txpkts_flush", CTLFLAG_RD, 4735 &txq->txpkts_flush, 4736 "# of times txpkts had to be flushed out by an egress-update"); 4737 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "raw_wrs", CTLFLAG_RD, 4738 &txq->raw_wrs, "# of raw work requests (non-packets)"); 4739 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "vxlan_tso_wrs", CTLFLAG_RD, 4740 &txq->vxlan_tso_wrs, "# of VXLAN TSO work requests"); 4741 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "vxlan_txcsum", CTLFLAG_RD, 4742 &txq->vxlan_txcsum, 4743 "# of times hardware assisted with inner checksums (VXLAN)"); 4744 4745 #ifdef KERN_TLS 4746 if (is_ktls(sc)) { 4747 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "kern_tls_records", 4748 CTLFLAG_RD, &txq->kern_tls_records, 4749 "# of NIC TLS records transmitted"); 4750 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "kern_tls_short", 4751 CTLFLAG_RD, &txq->kern_tls_short, 4752 "# of short NIC TLS records transmitted"); 4753 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "kern_tls_partial", 4754 CTLFLAG_RD, &txq->kern_tls_partial, 4755 "# of partial NIC TLS records transmitted"); 4756 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "kern_tls_full", 4757 CTLFLAG_RD, &txq->kern_tls_full, 4758 "# of full NIC TLS records transmitted"); 4759 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "kern_tls_octets", 4760 CTLFLAG_RD, &txq->kern_tls_octets, 4761 "# of payload octets in transmitted NIC TLS records"); 4762 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "kern_tls_waste", 4763 CTLFLAG_RD, &txq->kern_tls_waste, 4764 "# of octets DMAd but not transmitted in NIC TLS records"); 4765 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "kern_tls_options", 4766 CTLFLAG_RD, &txq->kern_tls_options, 4767 "# of NIC TLS options-only packets transmitted"); 4768 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "kern_tls_header", 4769 CTLFLAG_RD, &txq->kern_tls_header, 4770 "# of NIC TLS header-only packets transmitted"); 4771 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "kern_tls_fin", 4772 CTLFLAG_RD, &txq->kern_tls_fin, 4773 "# of NIC TLS FIN-only packets transmitted"); 4774 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "kern_tls_fin_short", 4775 CTLFLAG_RD, &txq->kern_tls_fin_short, 4776 "# of NIC TLS padded FIN packets on short TLS records"); 4777 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "kern_tls_cbc", 4778 CTLFLAG_RD, &txq->kern_tls_cbc, 4779 "# of NIC TLS sessions using AES-CBC"); 4780 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "kern_tls_gcm", 4781 CTLFLAG_RD, &txq->kern_tls_gcm, 4782 "# of NIC TLS sessions using AES-GCM"); 4783 } 4784 #endif 4785 } 4786 4787 #if defined(TCP_OFFLOAD) || defined(RATELIMIT) 4788 /* 4789 * Idempotent. 4790 */ 4791 static int 4792 alloc_ofld_txq(struct vi_info *vi, struct sge_ofld_txq *ofld_txq, int idx) 4793 { 4794 struct sysctl_oid *oid; 4795 struct port_info *pi = vi->pi; 4796 struct adapter *sc = vi->adapter; 4797 struct sge_eq *eq = &ofld_txq->wrq.eq; 4798 int rc, iqidx; 4799 char name[16]; 4800 4801 MPASS(idx >= 0); 4802 MPASS(idx < vi->nofldtxq); 4803 4804 if (!(eq->flags & EQ_SW_ALLOCATED)) { 4805 snprintf(name, sizeof(name), "%d", idx); 4806 oid = SYSCTL_ADD_NODE(&vi->ctx, 4807 SYSCTL_CHILDREN(vi->ofld_txq_oid), OID_AUTO, name, 4808 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "offload tx queue"); 4809 4810 snprintf(name, sizeof(name), "%s ofld_txq%d", 4811 device_get_nameunit(vi->dev), idx); 4812 if (vi->nofldrxq > 0) { 4813 iqidx = vi->first_ofld_rxq + (idx % vi->nofldrxq); 4814 init_eq(sc, eq, EQ_OFLD, vi->qsize_txq, pi->port_id, 4815 &sc->sge.ofld_rxq[iqidx].iq, name); 4816 } else { 4817 iqidx = vi->first_rxq + (idx % vi->nrxq); 4818 init_eq(sc, eq, EQ_OFLD, vi->qsize_txq, pi->port_id, 4819 &sc->sge.rxq[iqidx].iq, name); 4820 } 4821 4822 rc = alloc_wrq(sc, vi, &ofld_txq->wrq, &vi->ctx, oid); 4823 if (rc != 0) { 4824 CH_ERR(vi, "failed to allocate ofld_txq%d: %d\n", idx, 4825 rc); 4826 sysctl_remove_oid(oid, 1, 1); 4827 return (rc); 4828 } 4829 MPASS(eq->flags & EQ_SW_ALLOCATED); 4830 /* Can't fail after this point. */ 4831 4832 ofld_txq->tx_iscsi_pdus = counter_u64_alloc(M_WAITOK); 4833 ofld_txq->tx_iscsi_octets = counter_u64_alloc(M_WAITOK); 4834 ofld_txq->tx_iscsi_iso_wrs = counter_u64_alloc(M_WAITOK); 4835 ofld_txq->tx_aio_jobs = counter_u64_alloc(M_WAITOK); 4836 ofld_txq->tx_aio_octets = counter_u64_alloc(M_WAITOK); 4837 ofld_txq->tx_toe_tls_records = counter_u64_alloc(M_WAITOK); 4838 ofld_txq->tx_toe_tls_octets = counter_u64_alloc(M_WAITOK); 4839 add_ofld_txq_sysctls(&vi->ctx, oid, ofld_txq); 4840 } 4841 4842 if (!(eq->flags & EQ_HW_ALLOCATED)) { 4843 rc = alloc_eq_hwq(sc, vi, eq); 4844 if (rc != 0) { 4845 CH_ERR(vi, "failed to create hw ofld_txq%d: %d\n", idx, 4846 rc); 4847 return (rc); 4848 } 4849 MPASS(eq->flags & EQ_HW_ALLOCATED); 4850 } 4851 4852 return (0); 4853 } 4854 4855 /* 4856 * Idempotent. 4857 */ 4858 static void 4859 free_ofld_txq(struct vi_info *vi, struct sge_ofld_txq *ofld_txq) 4860 { 4861 struct adapter *sc = vi->adapter; 4862 struct sge_eq *eq = &ofld_txq->wrq.eq; 4863 4864 if (eq->flags & EQ_HW_ALLOCATED) { 4865 MPASS(eq->flags & EQ_SW_ALLOCATED); 4866 free_eq_hwq(sc, NULL, eq); 4867 MPASS(!(eq->flags & EQ_HW_ALLOCATED)); 4868 } 4869 4870 if (eq->flags & EQ_SW_ALLOCATED) { 4871 MPASS(!(eq->flags & EQ_HW_ALLOCATED)); 4872 counter_u64_free(ofld_txq->tx_iscsi_pdus); 4873 counter_u64_free(ofld_txq->tx_iscsi_octets); 4874 counter_u64_free(ofld_txq->tx_iscsi_iso_wrs); 4875 counter_u64_free(ofld_txq->tx_aio_jobs); 4876 counter_u64_free(ofld_txq->tx_aio_octets); 4877 counter_u64_free(ofld_txq->tx_toe_tls_records); 4878 counter_u64_free(ofld_txq->tx_toe_tls_octets); 4879 free_wrq(sc, &ofld_txq->wrq); 4880 MPASS(!(eq->flags & EQ_SW_ALLOCATED)); 4881 bzero(ofld_txq, sizeof(*ofld_txq)); 4882 } 4883 } 4884 4885 static void 4886 add_ofld_txq_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *oid, 4887 struct sge_ofld_txq *ofld_txq) 4888 { 4889 struct sysctl_oid_list *children; 4890 4891 if (ctx == NULL || oid == NULL) 4892 return; 4893 4894 children = SYSCTL_CHILDREN(oid); 4895 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "tx_iscsi_pdus", 4896 CTLFLAG_RD, &ofld_txq->tx_iscsi_pdus, 4897 "# of iSCSI PDUs transmitted"); 4898 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "tx_iscsi_octets", 4899 CTLFLAG_RD, &ofld_txq->tx_iscsi_octets, 4900 "# of payload octets in transmitted iSCSI PDUs"); 4901 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "tx_iscsi_iso_wrs", 4902 CTLFLAG_RD, &ofld_txq->tx_iscsi_iso_wrs, 4903 "# of iSCSI segmentation offload work requests"); 4904 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "tx_aio_jobs", 4905 CTLFLAG_RD, &ofld_txq->tx_aio_jobs, 4906 "# of zero-copy aio_write(2) jobs transmitted"); 4907 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "tx_aio_octets", 4908 CTLFLAG_RD, &ofld_txq->tx_aio_octets, 4909 "# of payload octets in transmitted zero-copy aio_write(2) jobs"); 4910 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "tx_toe_tls_records", 4911 CTLFLAG_RD, &ofld_txq->tx_toe_tls_records, 4912 "# of TOE TLS records transmitted"); 4913 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "tx_toe_tls_octets", 4914 CTLFLAG_RD, &ofld_txq->tx_toe_tls_octets, 4915 "# of payload octets in transmitted TOE TLS records"); 4916 } 4917 #endif 4918 4919 static void 4920 oneseg_dma_callback(void *arg, bus_dma_segment_t *segs, int nseg, int error) 4921 { 4922 bus_addr_t *ba = arg; 4923 4924 KASSERT(nseg == 1, 4925 ("%s meant for single segment mappings only.", __func__)); 4926 4927 *ba = error ? 0 : segs->ds_addr; 4928 } 4929 4930 static inline void 4931 ring_fl_db(struct adapter *sc, struct sge_fl *fl) 4932 { 4933 uint32_t n, v; 4934 4935 n = IDXDIFF(fl->pidx >> 3, fl->dbidx, fl->sidx); 4936 MPASS(n > 0); 4937 4938 wmb(); 4939 v = fl->dbval | V_PIDX(n); 4940 if (fl->udb) 4941 *fl->udb = htole32(v); 4942 else 4943 t4_write_reg(sc, sc->sge_kdoorbell_reg, v); 4944 IDXINCR(fl->dbidx, n, fl->sidx); 4945 } 4946 4947 /* 4948 * Fills up the freelist by allocating up to 'n' buffers. Buffers that are 4949 * recycled do not count towards this allocation budget. 4950 * 4951 * Returns non-zero to indicate that this freelist should be added to the list 4952 * of starving freelists. 4953 */ 4954 static int 4955 refill_fl(struct adapter *sc, struct sge_fl *fl, int n) 4956 { 4957 __be64 *d; 4958 struct fl_sdesc *sd; 4959 uintptr_t pa; 4960 caddr_t cl; 4961 struct rx_buf_info *rxb; 4962 struct cluster_metadata *clm; 4963 uint16_t max_pidx, zidx = fl->zidx; 4964 uint16_t hw_cidx = fl->hw_cidx; /* stable snapshot */ 4965 4966 FL_LOCK_ASSERT_OWNED(fl); 4967 4968 /* 4969 * We always stop at the beginning of the hardware descriptor that's just 4970 * before the one with the hw cidx. This is to avoid hw pidx = hw cidx, 4971 * which would mean an empty freelist to the chip. 4972 */ 4973 max_pidx = __predict_false(hw_cidx == 0) ? fl->sidx - 1 : hw_cidx - 1; 4974 if (fl->pidx == max_pidx * 8) 4975 return (0); 4976 4977 d = &fl->desc[fl->pidx]; 4978 sd = &fl->sdesc[fl->pidx]; 4979 rxb = &sc->sge.rx_buf_info[zidx]; 4980 4981 while (n > 0) { 4982 4983 if (sd->cl != NULL) { 4984 4985 if (sd->nmbuf == 0) { 4986 /* 4987 * Fast recycle without involving any atomics on 4988 * the cluster's metadata (if the cluster has 4989 * metadata). This happens when all frames 4990 * received in the cluster were small enough to 4991 * fit within a single mbuf each. 4992 */ 4993 fl->cl_fast_recycled++; 4994 goto recycled; 4995 } 4996 4997 /* 4998 * Cluster is guaranteed to have metadata. Clusters 4999 * without metadata always take the fast recycle path 5000 * when they're recycled. 5001 */ 5002 clm = cl_metadata(sd); 5003 MPASS(clm != NULL); 5004 5005 if (atomic_fetchadd_int(&clm->refcount, -1) == 1) { 5006 fl->cl_recycled++; 5007 counter_u64_add(extfree_rels, 1); 5008 goto recycled; 5009 } 5010 sd->cl = NULL; /* gave up my reference */ 5011 } 5012 MPASS(sd->cl == NULL); 5013 cl = uma_zalloc(rxb->zone, M_NOWAIT); 5014 if (__predict_false(cl == NULL)) { 5015 if (zidx != fl->safe_zidx) { 5016 zidx = fl->safe_zidx; 5017 rxb = &sc->sge.rx_buf_info[zidx]; 5018 cl = uma_zalloc(rxb->zone, M_NOWAIT); 5019 } 5020 if (cl == NULL) 5021 break; 5022 } 5023 fl->cl_allocated++; 5024 n--; 5025 5026 pa = pmap_kextract((vm_offset_t)cl); 5027 sd->cl = cl; 5028 sd->zidx = zidx; 5029 5030 if (fl->flags & FL_BUF_PACKING) { 5031 *d = htobe64(pa | rxb->hwidx2); 5032 sd->moff = rxb->size2; 5033 } else { 5034 *d = htobe64(pa | rxb->hwidx1); 5035 sd->moff = 0; 5036 } 5037 recycled: 5038 sd->nmbuf = 0; 5039 d++; 5040 sd++; 5041 if (__predict_false((++fl->pidx & 7) == 0)) { 5042 uint16_t pidx = fl->pidx >> 3; 5043 5044 if (__predict_false(pidx == fl->sidx)) { 5045 fl->pidx = 0; 5046 pidx = 0; 5047 sd = fl->sdesc; 5048 d = fl->desc; 5049 } 5050 if (n < 8 || pidx == max_pidx) 5051 break; 5052 5053 if (IDXDIFF(pidx, fl->dbidx, fl->sidx) >= 4) 5054 ring_fl_db(sc, fl); 5055 } 5056 } 5057 5058 if ((fl->pidx >> 3) != fl->dbidx) 5059 ring_fl_db(sc, fl); 5060 5061 return (FL_RUNNING_LOW(fl) && !(fl->flags & FL_STARVING)); 5062 } 5063 5064 /* 5065 * Attempt to refill all starving freelists. 5066 */ 5067 static void 5068 refill_sfl(void *arg) 5069 { 5070 struct adapter *sc = arg; 5071 struct sge_fl *fl, *fl_temp; 5072 5073 mtx_assert(&sc->sfl_lock, MA_OWNED); 5074 TAILQ_FOREACH_SAFE(fl, &sc->sfl, link, fl_temp) { 5075 FL_LOCK(fl); 5076 refill_fl(sc, fl, 64); 5077 if (FL_NOT_RUNNING_LOW(fl) || fl->flags & FL_DOOMED) { 5078 TAILQ_REMOVE(&sc->sfl, fl, link); 5079 fl->flags &= ~FL_STARVING; 5080 } 5081 FL_UNLOCK(fl); 5082 } 5083 5084 if (!TAILQ_EMPTY(&sc->sfl)) 5085 callout_schedule(&sc->sfl_callout, hz / 5); 5086 } 5087 5088 /* 5089 * Release the driver's reference on all buffers in the given freelist. Buffers 5090 * with kernel references cannot be freed and will prevent the driver from being 5091 * unloaded safely. 5092 */ 5093 void 5094 free_fl_buffers(struct adapter *sc, struct sge_fl *fl) 5095 { 5096 struct fl_sdesc *sd; 5097 struct cluster_metadata *clm; 5098 int i; 5099 5100 sd = fl->sdesc; 5101 for (i = 0; i < fl->sidx * 8; i++, sd++) { 5102 if (sd->cl == NULL) 5103 continue; 5104 5105 if (sd->nmbuf == 0) 5106 uma_zfree(sc->sge.rx_buf_info[sd->zidx].zone, sd->cl); 5107 else if (fl->flags & FL_BUF_PACKING) { 5108 clm = cl_metadata(sd); 5109 if (atomic_fetchadd_int(&clm->refcount, -1) == 1) { 5110 uma_zfree(sc->sge.rx_buf_info[sd->zidx].zone, 5111 sd->cl); 5112 counter_u64_add(extfree_rels, 1); 5113 } 5114 } 5115 sd->cl = NULL; 5116 } 5117 5118 if (fl->flags & FL_BUF_RESUME) { 5119 m_freem(fl->m0); 5120 fl->flags &= ~FL_BUF_RESUME; 5121 } 5122 } 5123 5124 static inline void 5125 get_pkt_gl(struct mbuf *m, struct sglist *gl) 5126 { 5127 int rc; 5128 5129 M_ASSERTPKTHDR(m); 5130 5131 sglist_reset(gl); 5132 rc = sglist_append_mbuf(gl, m); 5133 if (__predict_false(rc != 0)) { 5134 panic("%s: mbuf %p (%d segs) was vetted earlier but now fails " 5135 "with %d.", __func__, m, mbuf_nsegs(m), rc); 5136 } 5137 5138 KASSERT(gl->sg_nseg == mbuf_nsegs(m), 5139 ("%s: nsegs changed for mbuf %p from %d to %d", __func__, m, 5140 mbuf_nsegs(m), gl->sg_nseg)); 5141 #if 0 /* vm_wr not readily available here. */ 5142 KASSERT(gl->sg_nseg > 0 && gl->sg_nseg <= max_nsegs_allowed(m, vm_wr), 5143 ("%s: %d segments, should have been 1 <= nsegs <= %d", __func__, 5144 gl->sg_nseg, max_nsegs_allowed(m, vm_wr))); 5145 #endif 5146 } 5147 5148 /* 5149 * len16 for a txpkt WR with a GL. Includes the firmware work request header. 5150 */ 5151 static inline u_int 5152 txpkt_len16(u_int nsegs, const u_int extra) 5153 { 5154 u_int n; 5155 5156 MPASS(nsegs > 0); 5157 5158 nsegs--; /* first segment is part of ulptx_sgl */ 5159 n = extra + sizeof(struct fw_eth_tx_pkt_wr) + 5160 sizeof(struct cpl_tx_pkt_core) + 5161 sizeof(struct ulptx_sgl) + 8 * ((3 * nsegs) / 2 + (nsegs & 1)); 5162 5163 return (howmany(n, 16)); 5164 } 5165 5166 /* 5167 * len16 for a txpkt_vm WR with a GL. Includes the firmware work 5168 * request header. 5169 */ 5170 static inline u_int 5171 txpkt_vm_len16(u_int nsegs, const u_int extra) 5172 { 5173 u_int n; 5174 5175 MPASS(nsegs > 0); 5176 5177 nsegs--; /* first segment is part of ulptx_sgl */ 5178 n = extra + sizeof(struct fw_eth_tx_pkt_vm_wr) + 5179 sizeof(struct cpl_tx_pkt_core) + 5180 sizeof(struct ulptx_sgl) + 8 * ((3 * nsegs) / 2 + (nsegs & 1)); 5181 5182 return (howmany(n, 16)); 5183 } 5184 5185 static inline void 5186 calculate_mbuf_len16(struct mbuf *m, bool vm_wr) 5187 { 5188 const int lso = sizeof(struct cpl_tx_pkt_lso_core); 5189 const int tnl_lso = sizeof(struct cpl_tx_tnl_lso); 5190 5191 if (vm_wr) { 5192 if (needs_tso(m)) 5193 set_mbuf_len16(m, txpkt_vm_len16(mbuf_nsegs(m), lso)); 5194 else 5195 set_mbuf_len16(m, txpkt_vm_len16(mbuf_nsegs(m), 0)); 5196 return; 5197 } 5198 5199 if (needs_tso(m)) { 5200 if (needs_vxlan_tso(m)) 5201 set_mbuf_len16(m, txpkt_len16(mbuf_nsegs(m), tnl_lso)); 5202 else 5203 set_mbuf_len16(m, txpkt_len16(mbuf_nsegs(m), lso)); 5204 } else 5205 set_mbuf_len16(m, txpkt_len16(mbuf_nsegs(m), 0)); 5206 } 5207 5208 /* 5209 * len16 for a txpkts type 0 WR with a GL. Does not include the firmware work 5210 * request header. 5211 */ 5212 static inline u_int 5213 txpkts0_len16(u_int nsegs) 5214 { 5215 u_int n; 5216 5217 MPASS(nsegs > 0); 5218 5219 nsegs--; /* first segment is part of ulptx_sgl */ 5220 n = sizeof(struct ulp_txpkt) + sizeof(struct ulptx_idata) + 5221 sizeof(struct cpl_tx_pkt_core) + sizeof(struct ulptx_sgl) + 5222 8 * ((3 * nsegs) / 2 + (nsegs & 1)); 5223 5224 return (howmany(n, 16)); 5225 } 5226 5227 /* 5228 * len16 for a txpkts type 1 WR with a GL. Does not include the firmware work 5229 * request header. 5230 */ 5231 static inline u_int 5232 txpkts1_len16(void) 5233 { 5234 u_int n; 5235 5236 n = sizeof(struct cpl_tx_pkt_core) + sizeof(struct ulptx_sgl); 5237 5238 return (howmany(n, 16)); 5239 } 5240 5241 static inline u_int 5242 imm_payload(u_int ndesc) 5243 { 5244 u_int n; 5245 5246 n = ndesc * EQ_ESIZE - sizeof(struct fw_eth_tx_pkt_wr) - 5247 sizeof(struct cpl_tx_pkt_core); 5248 5249 return (n); 5250 } 5251 5252 static inline uint64_t 5253 csum_to_ctrl(struct adapter *sc, struct mbuf *m) 5254 { 5255 uint64_t ctrl; 5256 int csum_type, l2hlen, l3hlen; 5257 int x, y; 5258 static const int csum_types[3][2] = { 5259 {TX_CSUM_TCPIP, TX_CSUM_TCPIP6}, 5260 {TX_CSUM_UDPIP, TX_CSUM_UDPIP6}, 5261 {TX_CSUM_IP, 0} 5262 }; 5263 5264 M_ASSERTPKTHDR(m); 5265 5266 if (!needs_hwcsum(m)) 5267 return (F_TXPKT_IPCSUM_DIS | F_TXPKT_L4CSUM_DIS); 5268 5269 MPASS(m->m_pkthdr.l2hlen >= ETHER_HDR_LEN); 5270 MPASS(m->m_pkthdr.l3hlen >= sizeof(struct ip)); 5271 5272 if (needs_vxlan_csum(m)) { 5273 MPASS(m->m_pkthdr.l4hlen > 0); 5274 MPASS(m->m_pkthdr.l5hlen > 0); 5275 MPASS(m->m_pkthdr.inner_l2hlen >= ETHER_HDR_LEN); 5276 MPASS(m->m_pkthdr.inner_l3hlen >= sizeof(struct ip)); 5277 5278 l2hlen = m->m_pkthdr.l2hlen + m->m_pkthdr.l3hlen + 5279 m->m_pkthdr.l4hlen + m->m_pkthdr.l5hlen + 5280 m->m_pkthdr.inner_l2hlen - ETHER_HDR_LEN; 5281 l3hlen = m->m_pkthdr.inner_l3hlen; 5282 } else { 5283 l2hlen = m->m_pkthdr.l2hlen - ETHER_HDR_LEN; 5284 l3hlen = m->m_pkthdr.l3hlen; 5285 } 5286 5287 ctrl = 0; 5288 if (!needs_l3_csum(m)) 5289 ctrl |= F_TXPKT_IPCSUM_DIS; 5290 5291 if (m->m_pkthdr.csum_flags & (CSUM_IP_TCP | CSUM_INNER_IP_TCP | 5292 CSUM_IP6_TCP | CSUM_INNER_IP6_TCP)) 5293 x = 0; /* TCP */ 5294 else if (m->m_pkthdr.csum_flags & (CSUM_IP_UDP | CSUM_INNER_IP_UDP | 5295 CSUM_IP6_UDP | CSUM_INNER_IP6_UDP)) 5296 x = 1; /* UDP */ 5297 else 5298 x = 2; 5299 5300 if (m->m_pkthdr.csum_flags & (CSUM_IP | CSUM_IP_TCP | CSUM_IP_UDP | 5301 CSUM_INNER_IP | CSUM_INNER_IP_TCP | CSUM_INNER_IP_UDP)) 5302 y = 0; /* IPv4 */ 5303 else { 5304 MPASS(m->m_pkthdr.csum_flags & (CSUM_IP6_TCP | CSUM_IP6_UDP | 5305 CSUM_INNER_IP6_TCP | CSUM_INNER_IP6_UDP)); 5306 y = 1; /* IPv6 */ 5307 } 5308 /* 5309 * needs_hwcsum returned true earlier so there must be some kind of 5310 * checksum to calculate. 5311 */ 5312 csum_type = csum_types[x][y]; 5313 MPASS(csum_type != 0); 5314 if (csum_type == TX_CSUM_IP) 5315 ctrl |= F_TXPKT_L4CSUM_DIS; 5316 ctrl |= V_TXPKT_CSUM_TYPE(csum_type) | V_TXPKT_IPHDR_LEN(l3hlen); 5317 if (chip_id(sc) <= CHELSIO_T5) 5318 ctrl |= V_TXPKT_ETHHDR_LEN(l2hlen); 5319 else 5320 ctrl |= V_T6_TXPKT_ETHHDR_LEN(l2hlen); 5321 5322 return (ctrl); 5323 } 5324 5325 static inline void * 5326 write_lso_cpl(void *cpl, struct mbuf *m0) 5327 { 5328 struct cpl_tx_pkt_lso_core *lso; 5329 uint32_t ctrl; 5330 5331 KASSERT(m0->m_pkthdr.l2hlen > 0 && m0->m_pkthdr.l3hlen > 0 && 5332 m0->m_pkthdr.l4hlen > 0, 5333 ("%s: mbuf %p needs TSO but missing header lengths", 5334 __func__, m0)); 5335 5336 ctrl = V_LSO_OPCODE(CPL_TX_PKT_LSO) | 5337 F_LSO_FIRST_SLICE | F_LSO_LAST_SLICE | 5338 V_LSO_ETHHDR_LEN((m0->m_pkthdr.l2hlen - ETHER_HDR_LEN) >> 2) | 5339 V_LSO_IPHDR_LEN(m0->m_pkthdr.l3hlen >> 2) | 5340 V_LSO_TCPHDR_LEN(m0->m_pkthdr.l4hlen >> 2); 5341 if (m0->m_pkthdr.l3hlen == sizeof(struct ip6_hdr)) 5342 ctrl |= F_LSO_IPV6; 5343 5344 lso = cpl; 5345 lso->lso_ctrl = htobe32(ctrl); 5346 lso->ipid_ofst = htobe16(0); 5347 lso->mss = htobe16(m0->m_pkthdr.tso_segsz); 5348 lso->seqno_offset = htobe32(0); 5349 lso->len = htobe32(m0->m_pkthdr.len); 5350 5351 return (lso + 1); 5352 } 5353 5354 static void * 5355 write_tnl_lso_cpl(void *cpl, struct mbuf *m0) 5356 { 5357 struct cpl_tx_tnl_lso *tnl_lso = cpl; 5358 uint32_t ctrl; 5359 5360 KASSERT(m0->m_pkthdr.inner_l2hlen > 0 && 5361 m0->m_pkthdr.inner_l3hlen > 0 && m0->m_pkthdr.inner_l4hlen > 0 && 5362 m0->m_pkthdr.inner_l5hlen > 0, 5363 ("%s: mbuf %p needs VXLAN_TSO but missing inner header lengths", 5364 __func__, m0)); 5365 KASSERT(m0->m_pkthdr.l2hlen > 0 && m0->m_pkthdr.l3hlen > 0 && 5366 m0->m_pkthdr.l4hlen > 0 && m0->m_pkthdr.l5hlen > 0, 5367 ("%s: mbuf %p needs VXLAN_TSO but missing outer header lengths", 5368 __func__, m0)); 5369 5370 /* Outer headers. */ 5371 ctrl = V_CPL_TX_TNL_LSO_OPCODE(CPL_TX_TNL_LSO) | 5372 F_CPL_TX_TNL_LSO_FIRST | F_CPL_TX_TNL_LSO_LAST | 5373 V_CPL_TX_TNL_LSO_ETHHDRLENOUT( 5374 (m0->m_pkthdr.l2hlen - ETHER_HDR_LEN) >> 2) | 5375 V_CPL_TX_TNL_LSO_IPHDRLENOUT(m0->m_pkthdr.l3hlen >> 2) | 5376 F_CPL_TX_TNL_LSO_IPLENSETOUT; 5377 if (m0->m_pkthdr.l3hlen == sizeof(struct ip6_hdr)) 5378 ctrl |= F_CPL_TX_TNL_LSO_IPV6OUT; 5379 else { 5380 ctrl |= F_CPL_TX_TNL_LSO_IPHDRCHKOUT | 5381 F_CPL_TX_TNL_LSO_IPIDINCOUT; 5382 } 5383 tnl_lso->op_to_IpIdSplitOut = htobe32(ctrl); 5384 tnl_lso->IpIdOffsetOut = 0; 5385 tnl_lso->UdpLenSetOut_to_TnlHdrLen = 5386 htobe16(F_CPL_TX_TNL_LSO_UDPCHKCLROUT | 5387 F_CPL_TX_TNL_LSO_UDPLENSETOUT | 5388 V_CPL_TX_TNL_LSO_TNLHDRLEN(m0->m_pkthdr.l2hlen + 5389 m0->m_pkthdr.l3hlen + m0->m_pkthdr.l4hlen + 5390 m0->m_pkthdr.l5hlen) | 5391 V_CPL_TX_TNL_LSO_TNLTYPE(TX_TNL_TYPE_VXLAN)); 5392 tnl_lso->r1 = 0; 5393 5394 /* Inner headers. */ 5395 ctrl = V_CPL_TX_TNL_LSO_ETHHDRLEN( 5396 (m0->m_pkthdr.inner_l2hlen - ETHER_HDR_LEN) >> 2) | 5397 V_CPL_TX_TNL_LSO_IPHDRLEN(m0->m_pkthdr.inner_l3hlen >> 2) | 5398 V_CPL_TX_TNL_LSO_TCPHDRLEN(m0->m_pkthdr.inner_l4hlen >> 2); 5399 if (m0->m_pkthdr.inner_l3hlen == sizeof(struct ip6_hdr)) 5400 ctrl |= F_CPL_TX_TNL_LSO_IPV6; 5401 tnl_lso->Flow_to_TcpHdrLen = htobe32(ctrl); 5402 tnl_lso->IpIdOffset = 0; 5403 tnl_lso->IpIdSplit_to_Mss = 5404 htobe16(V_CPL_TX_TNL_LSO_MSS(m0->m_pkthdr.tso_segsz)); 5405 tnl_lso->TCPSeqOffset = 0; 5406 tnl_lso->EthLenOffset_Size = 5407 htobe32(V_CPL_TX_TNL_LSO_SIZE(m0->m_pkthdr.len)); 5408 5409 return (tnl_lso + 1); 5410 } 5411 5412 #define VM_TX_L2HDR_LEN 16 /* ethmacdst to vlantci */ 5413 5414 /* 5415 * Write a VM txpkt WR for this packet to the hardware descriptors, update the 5416 * software descriptor, and advance the pidx. It is guaranteed that enough 5417 * descriptors are available. 5418 * 5419 * The return value is the # of hardware descriptors used. 5420 */ 5421 static u_int 5422 write_txpkt_vm_wr(struct adapter *sc, struct sge_txq *txq, struct mbuf *m0) 5423 { 5424 struct sge_eq *eq; 5425 struct fw_eth_tx_pkt_vm_wr *wr; 5426 struct tx_sdesc *txsd; 5427 struct cpl_tx_pkt_core *cpl; 5428 uint32_t ctrl; /* used in many unrelated places */ 5429 uint64_t ctrl1; 5430 int len16, ndesc, pktlen; 5431 caddr_t dst; 5432 5433 TXQ_LOCK_ASSERT_OWNED(txq); 5434 M_ASSERTPKTHDR(m0); 5435 5436 len16 = mbuf_len16(m0); 5437 pktlen = m0->m_pkthdr.len; 5438 ctrl = sizeof(struct cpl_tx_pkt_core); 5439 if (needs_tso(m0)) 5440 ctrl += sizeof(struct cpl_tx_pkt_lso_core); 5441 ndesc = tx_len16_to_desc(len16); 5442 5443 /* Firmware work request header */ 5444 eq = &txq->eq; 5445 wr = (void *)&eq->desc[eq->pidx]; 5446 wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_PKT_VM_WR) | 5447 V_FW_ETH_TX_PKT_WR_IMMDLEN(ctrl)); 5448 5449 ctrl = V_FW_WR_LEN16(len16); 5450 wr->equiq_to_len16 = htobe32(ctrl); 5451 wr->r3[0] = 0; 5452 wr->r3[1] = 0; 5453 5454 /* 5455 * Copy over ethmacdst, ethmacsrc, ethtype, and vlantci. 5456 * vlantci is ignored unless the ethtype is 0x8100, so it's 5457 * simpler to always copy it rather than making it 5458 * conditional. Also, it seems that we do not have to set 5459 * vlantci or fake the ethtype when doing VLAN tag insertion. 5460 */ 5461 m_copydata(m0, 0, VM_TX_L2HDR_LEN, wr->ethmacdst); 5462 5463 if (needs_tso(m0)) { 5464 cpl = write_lso_cpl(wr + 1, m0); 5465 txq->tso_wrs++; 5466 } else 5467 cpl = (void *)(wr + 1); 5468 5469 /* Checksum offload */ 5470 ctrl1 = csum_to_ctrl(sc, m0); 5471 if (ctrl1 != (F_TXPKT_IPCSUM_DIS | F_TXPKT_L4CSUM_DIS)) 5472 txq->txcsum++; /* some hardware assistance provided */ 5473 5474 /* VLAN tag insertion */ 5475 if (needs_vlan_insertion(m0)) { 5476 ctrl1 |= F_TXPKT_VLAN_VLD | 5477 V_TXPKT_VLAN(m0->m_pkthdr.ether_vtag); 5478 txq->vlan_insertion++; 5479 } 5480 5481 /* CPL header */ 5482 cpl->ctrl0 = txq->cpl_ctrl0; 5483 cpl->pack = 0; 5484 cpl->len = htobe16(pktlen); 5485 cpl->ctrl1 = htobe64(ctrl1); 5486 5487 /* SGL */ 5488 dst = (void *)(cpl + 1); 5489 5490 /* 5491 * A packet using TSO will use up an entire descriptor for the 5492 * firmware work request header, LSO CPL, and TX_PKT_XT CPL. 5493 * If this descriptor is the last descriptor in the ring, wrap 5494 * around to the front of the ring explicitly for the start of 5495 * the sgl. 5496 */ 5497 if (dst == (void *)&eq->desc[eq->sidx]) { 5498 dst = (void *)&eq->desc[0]; 5499 write_gl_to_txd(txq, m0, &dst, 0); 5500 } else 5501 write_gl_to_txd(txq, m0, &dst, eq->sidx - ndesc < eq->pidx); 5502 txq->sgl_wrs++; 5503 txq->txpkt_wrs++; 5504 5505 txsd = &txq->sdesc[eq->pidx]; 5506 txsd->m = m0; 5507 txsd->desc_used = ndesc; 5508 5509 return (ndesc); 5510 } 5511 5512 /* 5513 * Write a raw WR to the hardware descriptors, update the software 5514 * descriptor, and advance the pidx. It is guaranteed that enough 5515 * descriptors are available. 5516 * 5517 * The return value is the # of hardware descriptors used. 5518 */ 5519 static u_int 5520 write_raw_wr(struct sge_txq *txq, void *wr, struct mbuf *m0, u_int available) 5521 { 5522 struct sge_eq *eq = &txq->eq; 5523 struct tx_sdesc *txsd; 5524 struct mbuf *m; 5525 caddr_t dst; 5526 int len16, ndesc; 5527 5528 len16 = mbuf_len16(m0); 5529 ndesc = tx_len16_to_desc(len16); 5530 MPASS(ndesc <= available); 5531 5532 dst = wr; 5533 for (m = m0; m != NULL; m = m->m_next) 5534 copy_to_txd(eq, mtod(m, caddr_t), &dst, m->m_len); 5535 5536 txq->raw_wrs++; 5537 5538 txsd = &txq->sdesc[eq->pidx]; 5539 txsd->m = m0; 5540 txsd->desc_used = ndesc; 5541 5542 return (ndesc); 5543 } 5544 5545 /* 5546 * Write a txpkt WR for this packet to the hardware descriptors, update the 5547 * software descriptor, and advance the pidx. It is guaranteed that enough 5548 * descriptors are available. 5549 * 5550 * The return value is the # of hardware descriptors used. 5551 */ 5552 static u_int 5553 write_txpkt_wr(struct adapter *sc, struct sge_txq *txq, struct mbuf *m0, 5554 u_int available) 5555 { 5556 struct sge_eq *eq; 5557 struct fw_eth_tx_pkt_wr *wr; 5558 struct tx_sdesc *txsd; 5559 struct cpl_tx_pkt_core *cpl; 5560 uint32_t ctrl; /* used in many unrelated places */ 5561 uint64_t ctrl1; 5562 int len16, ndesc, pktlen, nsegs; 5563 caddr_t dst; 5564 5565 TXQ_LOCK_ASSERT_OWNED(txq); 5566 M_ASSERTPKTHDR(m0); 5567 5568 len16 = mbuf_len16(m0); 5569 nsegs = mbuf_nsegs(m0); 5570 pktlen = m0->m_pkthdr.len; 5571 ctrl = sizeof(struct cpl_tx_pkt_core); 5572 if (needs_tso(m0)) { 5573 if (needs_vxlan_tso(m0)) 5574 ctrl += sizeof(struct cpl_tx_tnl_lso); 5575 else 5576 ctrl += sizeof(struct cpl_tx_pkt_lso_core); 5577 } else if (!(mbuf_cflags(m0) & MC_NOMAP) && pktlen <= imm_payload(2) && 5578 available >= 2) { 5579 /* Immediate data. Recalculate len16 and set nsegs to 0. */ 5580 ctrl += pktlen; 5581 len16 = howmany(sizeof(struct fw_eth_tx_pkt_wr) + 5582 sizeof(struct cpl_tx_pkt_core) + pktlen, 16); 5583 nsegs = 0; 5584 } 5585 ndesc = tx_len16_to_desc(len16); 5586 MPASS(ndesc <= available); 5587 5588 /* Firmware work request header */ 5589 eq = &txq->eq; 5590 wr = (void *)&eq->desc[eq->pidx]; 5591 wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_PKT_WR) | 5592 V_FW_ETH_TX_PKT_WR_IMMDLEN(ctrl)); 5593 5594 ctrl = V_FW_WR_LEN16(len16); 5595 wr->equiq_to_len16 = htobe32(ctrl); 5596 wr->r3 = 0; 5597 5598 if (needs_tso(m0)) { 5599 if (needs_vxlan_tso(m0)) { 5600 cpl = write_tnl_lso_cpl(wr + 1, m0); 5601 txq->vxlan_tso_wrs++; 5602 } else { 5603 cpl = write_lso_cpl(wr + 1, m0); 5604 txq->tso_wrs++; 5605 } 5606 } else 5607 cpl = (void *)(wr + 1); 5608 5609 /* Checksum offload */ 5610 ctrl1 = csum_to_ctrl(sc, m0); 5611 if (ctrl1 != (F_TXPKT_IPCSUM_DIS | F_TXPKT_L4CSUM_DIS)) { 5612 /* some hardware assistance provided */ 5613 if (needs_vxlan_csum(m0)) 5614 txq->vxlan_txcsum++; 5615 else 5616 txq->txcsum++; 5617 } 5618 5619 /* VLAN tag insertion */ 5620 if (needs_vlan_insertion(m0)) { 5621 ctrl1 |= F_TXPKT_VLAN_VLD | 5622 V_TXPKT_VLAN(m0->m_pkthdr.ether_vtag); 5623 txq->vlan_insertion++; 5624 } 5625 5626 /* CPL header */ 5627 cpl->ctrl0 = txq->cpl_ctrl0; 5628 cpl->pack = 0; 5629 cpl->len = htobe16(pktlen); 5630 cpl->ctrl1 = htobe64(ctrl1); 5631 5632 /* SGL */ 5633 dst = (void *)(cpl + 1); 5634 if (__predict_false((uintptr_t)dst == (uintptr_t)&eq->desc[eq->sidx])) 5635 dst = (caddr_t)&eq->desc[0]; 5636 if (nsegs > 0) { 5637 5638 write_gl_to_txd(txq, m0, &dst, eq->sidx - ndesc < eq->pidx); 5639 txq->sgl_wrs++; 5640 } else { 5641 struct mbuf *m; 5642 5643 for (m = m0; m != NULL; m = m->m_next) { 5644 copy_to_txd(eq, mtod(m, caddr_t), &dst, m->m_len); 5645 #ifdef INVARIANTS 5646 pktlen -= m->m_len; 5647 #endif 5648 } 5649 #ifdef INVARIANTS 5650 KASSERT(pktlen == 0, ("%s: %d bytes left.", __func__, pktlen)); 5651 #endif 5652 txq->imm_wrs++; 5653 } 5654 5655 txq->txpkt_wrs++; 5656 5657 txsd = &txq->sdesc[eq->pidx]; 5658 txsd->m = m0; 5659 txsd->desc_used = ndesc; 5660 5661 return (ndesc); 5662 } 5663 5664 static inline bool 5665 cmp_l2hdr(struct txpkts *txp, struct mbuf *m) 5666 { 5667 int len; 5668 5669 MPASS(txp->npkt > 0); 5670 MPASS(m->m_len >= VM_TX_L2HDR_LEN); 5671 5672 if (txp->ethtype == be16toh(ETHERTYPE_VLAN)) 5673 len = VM_TX_L2HDR_LEN; 5674 else 5675 len = sizeof(struct ether_header); 5676 5677 return (memcmp(m->m_data, &txp->ethmacdst[0], len) != 0); 5678 } 5679 5680 static inline void 5681 save_l2hdr(struct txpkts *txp, struct mbuf *m) 5682 { 5683 MPASS(m->m_len >= VM_TX_L2HDR_LEN); 5684 5685 memcpy(&txp->ethmacdst[0], mtod(m, const void *), VM_TX_L2HDR_LEN); 5686 } 5687 5688 static int 5689 add_to_txpkts_vf(struct adapter *sc, struct sge_txq *txq, struct mbuf *m, 5690 int avail, bool *send) 5691 { 5692 struct txpkts *txp = &txq->txp; 5693 5694 /* Cannot have TSO and coalesce at the same time. */ 5695 if (cannot_use_txpkts(m)) { 5696 cannot_coalesce: 5697 *send = txp->npkt > 0; 5698 return (EINVAL); 5699 } 5700 5701 /* VF allows coalescing of type 1 (1 GL) only */ 5702 if (mbuf_nsegs(m) > 1) 5703 goto cannot_coalesce; 5704 5705 *send = false; 5706 if (txp->npkt > 0) { 5707 MPASS(tx_len16_to_desc(txp->len16) <= avail); 5708 MPASS(txp->npkt < txp->max_npkt); 5709 MPASS(txp->wr_type == 1); /* VF supports type 1 only */ 5710 5711 if (tx_len16_to_desc(txp->len16 + txpkts1_len16()) > avail) { 5712 retry_after_send: 5713 *send = true; 5714 return (EAGAIN); 5715 } 5716 if (m->m_pkthdr.len + txp->plen > 65535) 5717 goto retry_after_send; 5718 if (cmp_l2hdr(txp, m)) 5719 goto retry_after_send; 5720 5721 txp->len16 += txpkts1_len16(); 5722 txp->plen += m->m_pkthdr.len; 5723 txp->mb[txp->npkt++] = m; 5724 if (txp->npkt == txp->max_npkt) 5725 *send = true; 5726 } else { 5727 txp->len16 = howmany(sizeof(struct fw_eth_tx_pkts_vm_wr), 16) + 5728 txpkts1_len16(); 5729 if (tx_len16_to_desc(txp->len16) > avail) 5730 goto cannot_coalesce; 5731 txp->npkt = 1; 5732 txp->wr_type = 1; 5733 txp->plen = m->m_pkthdr.len; 5734 txp->mb[0] = m; 5735 save_l2hdr(txp, m); 5736 } 5737 return (0); 5738 } 5739 5740 static int 5741 add_to_txpkts_pf(struct adapter *sc, struct sge_txq *txq, struct mbuf *m, 5742 int avail, bool *send) 5743 { 5744 struct txpkts *txp = &txq->txp; 5745 int nsegs; 5746 5747 MPASS(!(sc->flags & IS_VF)); 5748 5749 /* Cannot have TSO and coalesce at the same time. */ 5750 if (cannot_use_txpkts(m)) { 5751 cannot_coalesce: 5752 *send = txp->npkt > 0; 5753 return (EINVAL); 5754 } 5755 5756 *send = false; 5757 nsegs = mbuf_nsegs(m); 5758 if (txp->npkt == 0) { 5759 if (m->m_pkthdr.len > 65535) 5760 goto cannot_coalesce; 5761 if (nsegs > 1) { 5762 txp->wr_type = 0; 5763 txp->len16 = 5764 howmany(sizeof(struct fw_eth_tx_pkts_wr), 16) + 5765 txpkts0_len16(nsegs); 5766 } else { 5767 txp->wr_type = 1; 5768 txp->len16 = 5769 howmany(sizeof(struct fw_eth_tx_pkts_wr), 16) + 5770 txpkts1_len16(); 5771 } 5772 if (tx_len16_to_desc(txp->len16) > avail) 5773 goto cannot_coalesce; 5774 txp->npkt = 1; 5775 txp->plen = m->m_pkthdr.len; 5776 txp->mb[0] = m; 5777 } else { 5778 MPASS(tx_len16_to_desc(txp->len16) <= avail); 5779 MPASS(txp->npkt < txp->max_npkt); 5780 5781 if (m->m_pkthdr.len + txp->plen > 65535) { 5782 retry_after_send: 5783 *send = true; 5784 return (EAGAIN); 5785 } 5786 5787 MPASS(txp->wr_type == 0 || txp->wr_type == 1); 5788 if (txp->wr_type == 0) { 5789 if (tx_len16_to_desc(txp->len16 + 5790 txpkts0_len16(nsegs)) > min(avail, SGE_MAX_WR_NDESC)) 5791 goto retry_after_send; 5792 txp->len16 += txpkts0_len16(nsegs); 5793 } else { 5794 if (nsegs != 1) 5795 goto retry_after_send; 5796 if (tx_len16_to_desc(txp->len16 + txpkts1_len16()) > 5797 avail) 5798 goto retry_after_send; 5799 txp->len16 += txpkts1_len16(); 5800 } 5801 5802 txp->plen += m->m_pkthdr.len; 5803 txp->mb[txp->npkt++] = m; 5804 if (txp->npkt == txp->max_npkt) 5805 *send = true; 5806 } 5807 return (0); 5808 } 5809 5810 /* 5811 * Write a txpkts WR for the packets in txp to the hardware descriptors, update 5812 * the software descriptor, and advance the pidx. It is guaranteed that enough 5813 * descriptors are available. 5814 * 5815 * The return value is the # of hardware descriptors used. 5816 */ 5817 static u_int 5818 write_txpkts_wr(struct adapter *sc, struct sge_txq *txq) 5819 { 5820 const struct txpkts *txp = &txq->txp; 5821 struct sge_eq *eq = &txq->eq; 5822 struct fw_eth_tx_pkts_wr *wr; 5823 struct tx_sdesc *txsd; 5824 struct cpl_tx_pkt_core *cpl; 5825 uint64_t ctrl1; 5826 int ndesc, i, checkwrap; 5827 struct mbuf *m, *last; 5828 void *flitp; 5829 5830 TXQ_LOCK_ASSERT_OWNED(txq); 5831 MPASS(txp->npkt > 0); 5832 MPASS(txp->len16 <= howmany(SGE_MAX_WR_LEN, 16)); 5833 5834 wr = (void *)&eq->desc[eq->pidx]; 5835 wr->op_pkd = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_WR)); 5836 wr->equiq_to_len16 = htobe32(V_FW_WR_LEN16(txp->len16)); 5837 wr->plen = htobe16(txp->plen); 5838 wr->npkt = txp->npkt; 5839 wr->r3 = 0; 5840 wr->type = txp->wr_type; 5841 flitp = wr + 1; 5842 5843 /* 5844 * At this point we are 16B into a hardware descriptor. If checkwrap is 5845 * set then we know the WR is going to wrap around somewhere. We'll 5846 * check for that at appropriate points. 5847 */ 5848 ndesc = tx_len16_to_desc(txp->len16); 5849 last = NULL; 5850 checkwrap = eq->sidx - ndesc < eq->pidx; 5851 for (i = 0; i < txp->npkt; i++) { 5852 m = txp->mb[i]; 5853 if (txp->wr_type == 0) { 5854 struct ulp_txpkt *ulpmc; 5855 struct ulptx_idata *ulpsc; 5856 5857 /* ULP master command */ 5858 ulpmc = flitp; 5859 ulpmc->cmd_dest = htobe32(V_ULPTX_CMD(ULP_TX_PKT) | 5860 V_ULP_TXPKT_DEST(0) | V_ULP_TXPKT_FID(eq->iqid)); 5861 ulpmc->len = htobe32(txpkts0_len16(mbuf_nsegs(m))); 5862 5863 /* ULP subcommand */ 5864 ulpsc = (void *)(ulpmc + 1); 5865 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM) | 5866 F_ULP_TX_SC_MORE); 5867 ulpsc->len = htobe32(sizeof(struct cpl_tx_pkt_core)); 5868 5869 cpl = (void *)(ulpsc + 1); 5870 if (checkwrap && 5871 (uintptr_t)cpl == (uintptr_t)&eq->desc[eq->sidx]) 5872 cpl = (void *)&eq->desc[0]; 5873 } else { 5874 cpl = flitp; 5875 } 5876 5877 /* Checksum offload */ 5878 ctrl1 = csum_to_ctrl(sc, m); 5879 if (ctrl1 != (F_TXPKT_IPCSUM_DIS | F_TXPKT_L4CSUM_DIS)) { 5880 /* some hardware assistance provided */ 5881 if (needs_vxlan_csum(m)) 5882 txq->vxlan_txcsum++; 5883 else 5884 txq->txcsum++; 5885 } 5886 5887 /* VLAN tag insertion */ 5888 if (needs_vlan_insertion(m)) { 5889 ctrl1 |= F_TXPKT_VLAN_VLD | 5890 V_TXPKT_VLAN(m->m_pkthdr.ether_vtag); 5891 txq->vlan_insertion++; 5892 } 5893 5894 /* CPL header */ 5895 cpl->ctrl0 = txq->cpl_ctrl0; 5896 cpl->pack = 0; 5897 cpl->len = htobe16(m->m_pkthdr.len); 5898 cpl->ctrl1 = htobe64(ctrl1); 5899 5900 flitp = cpl + 1; 5901 if (checkwrap && 5902 (uintptr_t)flitp == (uintptr_t)&eq->desc[eq->sidx]) 5903 flitp = (void *)&eq->desc[0]; 5904 5905 write_gl_to_txd(txq, m, (caddr_t *)(&flitp), checkwrap); 5906 5907 if (last != NULL) 5908 last->m_nextpkt = m; 5909 last = m; 5910 } 5911 5912 txq->sgl_wrs++; 5913 if (txp->wr_type == 0) { 5914 txq->txpkts0_pkts += txp->npkt; 5915 txq->txpkts0_wrs++; 5916 } else { 5917 txq->txpkts1_pkts += txp->npkt; 5918 txq->txpkts1_wrs++; 5919 } 5920 5921 txsd = &txq->sdesc[eq->pidx]; 5922 txsd->m = txp->mb[0]; 5923 txsd->desc_used = ndesc; 5924 5925 return (ndesc); 5926 } 5927 5928 static u_int 5929 write_txpkts_vm_wr(struct adapter *sc, struct sge_txq *txq) 5930 { 5931 const struct txpkts *txp = &txq->txp; 5932 struct sge_eq *eq = &txq->eq; 5933 struct fw_eth_tx_pkts_vm_wr *wr; 5934 struct tx_sdesc *txsd; 5935 struct cpl_tx_pkt_core *cpl; 5936 uint64_t ctrl1; 5937 int ndesc, i; 5938 struct mbuf *m, *last; 5939 void *flitp; 5940 5941 TXQ_LOCK_ASSERT_OWNED(txq); 5942 MPASS(txp->npkt > 0); 5943 MPASS(txp->wr_type == 1); /* VF supports type 1 only */ 5944 MPASS(txp->mb[0] != NULL); 5945 MPASS(txp->len16 <= howmany(SGE_MAX_WR_LEN, 16)); 5946 5947 wr = (void *)&eq->desc[eq->pidx]; 5948 wr->op_pkd = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_VM_WR)); 5949 wr->equiq_to_len16 = htobe32(V_FW_WR_LEN16(txp->len16)); 5950 wr->r3 = 0; 5951 wr->plen = htobe16(txp->plen); 5952 wr->npkt = txp->npkt; 5953 wr->r4 = 0; 5954 memcpy(&wr->ethmacdst[0], &txp->ethmacdst[0], 16); 5955 flitp = wr + 1; 5956 5957 /* 5958 * At this point we are 32B into a hardware descriptor. Each mbuf in 5959 * the WR will take 32B so we check for the end of the descriptor ring 5960 * before writing odd mbufs (mb[1], 3, 5, ..) 5961 */ 5962 ndesc = tx_len16_to_desc(txp->len16); 5963 last = NULL; 5964 for (i = 0; i < txp->npkt; i++) { 5965 m = txp->mb[i]; 5966 if (i & 1 && (uintptr_t)flitp == (uintptr_t)&eq->desc[eq->sidx]) 5967 flitp = &eq->desc[0]; 5968 cpl = flitp; 5969 5970 /* Checksum offload */ 5971 ctrl1 = csum_to_ctrl(sc, m); 5972 if (ctrl1 != (F_TXPKT_IPCSUM_DIS | F_TXPKT_L4CSUM_DIS)) 5973 txq->txcsum++; /* some hardware assistance provided */ 5974 5975 /* VLAN tag insertion */ 5976 if (needs_vlan_insertion(m)) { 5977 ctrl1 |= F_TXPKT_VLAN_VLD | 5978 V_TXPKT_VLAN(m->m_pkthdr.ether_vtag); 5979 txq->vlan_insertion++; 5980 } 5981 5982 /* CPL header */ 5983 cpl->ctrl0 = txq->cpl_ctrl0; 5984 cpl->pack = 0; 5985 cpl->len = htobe16(m->m_pkthdr.len); 5986 cpl->ctrl1 = htobe64(ctrl1); 5987 5988 flitp = cpl + 1; 5989 MPASS(mbuf_nsegs(m) == 1); 5990 write_gl_to_txd(txq, m, (caddr_t *)(&flitp), 0); 5991 5992 if (last != NULL) 5993 last->m_nextpkt = m; 5994 last = m; 5995 } 5996 5997 txq->sgl_wrs++; 5998 txq->txpkts1_pkts += txp->npkt; 5999 txq->txpkts1_wrs++; 6000 6001 txsd = &txq->sdesc[eq->pidx]; 6002 txsd->m = txp->mb[0]; 6003 txsd->desc_used = ndesc; 6004 6005 return (ndesc); 6006 } 6007 6008 /* 6009 * If the SGL ends on an address that is not 16 byte aligned, this function will 6010 * add a 0 filled flit at the end. 6011 */ 6012 static void 6013 write_gl_to_txd(struct sge_txq *txq, struct mbuf *m, caddr_t *to, int checkwrap) 6014 { 6015 struct sge_eq *eq = &txq->eq; 6016 struct sglist *gl = txq->gl; 6017 struct sglist_seg *seg; 6018 __be64 *flitp, *wrap; 6019 struct ulptx_sgl *usgl; 6020 int i, nflits, nsegs; 6021 6022 KASSERT(((uintptr_t)(*to) & 0xf) == 0, 6023 ("%s: SGL must start at a 16 byte boundary: %p", __func__, *to)); 6024 MPASS((uintptr_t)(*to) >= (uintptr_t)&eq->desc[0]); 6025 MPASS((uintptr_t)(*to) < (uintptr_t)&eq->desc[eq->sidx]); 6026 6027 get_pkt_gl(m, gl); 6028 nsegs = gl->sg_nseg; 6029 MPASS(nsegs > 0); 6030 6031 nflits = (3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1) + 2; 6032 flitp = (__be64 *)(*to); 6033 wrap = (__be64 *)(&eq->desc[eq->sidx]); 6034 seg = &gl->sg_segs[0]; 6035 usgl = (void *)flitp; 6036 6037 /* 6038 * We start at a 16 byte boundary somewhere inside the tx descriptor 6039 * ring, so we're at least 16 bytes away from the status page. There is 6040 * no chance of a wrap around in the middle of usgl (which is 16 bytes). 6041 */ 6042 6043 usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) | 6044 V_ULPTX_NSGE(nsegs)); 6045 usgl->len0 = htobe32(seg->ss_len); 6046 usgl->addr0 = htobe64(seg->ss_paddr); 6047 seg++; 6048 6049 if (checkwrap == 0 || (uintptr_t)(flitp + nflits) <= (uintptr_t)wrap) { 6050 6051 /* Won't wrap around at all */ 6052 6053 for (i = 0; i < nsegs - 1; i++, seg++) { 6054 usgl->sge[i / 2].len[i & 1] = htobe32(seg->ss_len); 6055 usgl->sge[i / 2].addr[i & 1] = htobe64(seg->ss_paddr); 6056 } 6057 if (i & 1) 6058 usgl->sge[i / 2].len[1] = htobe32(0); 6059 flitp += nflits; 6060 } else { 6061 6062 /* Will wrap somewhere in the rest of the SGL */ 6063 6064 /* 2 flits already written, write the rest flit by flit */ 6065 flitp = (void *)(usgl + 1); 6066 for (i = 0; i < nflits - 2; i++) { 6067 if (flitp == wrap) 6068 flitp = (void *)eq->desc; 6069 *flitp++ = get_flit(seg, nsegs - 1, i); 6070 } 6071 } 6072 6073 if (nflits & 1) { 6074 MPASS(((uintptr_t)flitp) & 0xf); 6075 *flitp++ = 0; 6076 } 6077 6078 MPASS((((uintptr_t)flitp) & 0xf) == 0); 6079 if (__predict_false(flitp == wrap)) 6080 *to = (void *)eq->desc; 6081 else 6082 *to = (void *)flitp; 6083 } 6084 6085 static inline void 6086 copy_to_txd(struct sge_eq *eq, caddr_t from, caddr_t *to, int len) 6087 { 6088 6089 MPASS((uintptr_t)(*to) >= (uintptr_t)&eq->desc[0]); 6090 MPASS((uintptr_t)(*to) < (uintptr_t)&eq->desc[eq->sidx]); 6091 6092 if (__predict_true((uintptr_t)(*to) + len <= 6093 (uintptr_t)&eq->desc[eq->sidx])) { 6094 bcopy(from, *to, len); 6095 (*to) += len; 6096 } else { 6097 int portion = (uintptr_t)&eq->desc[eq->sidx] - (uintptr_t)(*to); 6098 6099 bcopy(from, *to, portion); 6100 from += portion; 6101 portion = len - portion; /* remaining */ 6102 bcopy(from, (void *)eq->desc, portion); 6103 (*to) = (caddr_t)eq->desc + portion; 6104 } 6105 } 6106 6107 static inline void 6108 ring_eq_db(struct adapter *sc, struct sge_eq *eq, u_int n) 6109 { 6110 u_int db; 6111 6112 MPASS(n > 0); 6113 6114 db = eq->doorbells; 6115 if (n > 1) 6116 clrbit(&db, DOORBELL_WCWR); 6117 wmb(); 6118 6119 switch (ffs(db) - 1) { 6120 case DOORBELL_UDB: 6121 *eq->udb = htole32(V_QID(eq->udb_qid) | V_PIDX(n)); 6122 break; 6123 6124 case DOORBELL_WCWR: { 6125 volatile uint64_t *dst, *src; 6126 int i; 6127 6128 /* 6129 * Queues whose 128B doorbell segment fits in the page do not 6130 * use relative qid (udb_qid is always 0). Only queues with 6131 * doorbell segments can do WCWR. 6132 */ 6133 KASSERT(eq->udb_qid == 0 && n == 1, 6134 ("%s: inappropriate doorbell (0x%x, %d, %d) for eq %p", 6135 __func__, eq->doorbells, n, eq->dbidx, eq)); 6136 6137 dst = (volatile void *)((uintptr_t)eq->udb + UDBS_WR_OFFSET - 6138 UDBS_DB_OFFSET); 6139 i = eq->dbidx; 6140 src = (void *)&eq->desc[i]; 6141 while (src != (void *)&eq->desc[i + 1]) 6142 *dst++ = *src++; 6143 wmb(); 6144 break; 6145 } 6146 6147 case DOORBELL_UDBWC: 6148 *eq->udb = htole32(V_QID(eq->udb_qid) | V_PIDX(n)); 6149 wmb(); 6150 break; 6151 6152 case DOORBELL_KDB: 6153 t4_write_reg(sc, sc->sge_kdoorbell_reg, 6154 V_QID(eq->cntxt_id) | V_PIDX(n)); 6155 break; 6156 } 6157 6158 IDXINCR(eq->dbidx, n, eq->sidx); 6159 } 6160 6161 static inline u_int 6162 reclaimable_tx_desc(struct sge_eq *eq) 6163 { 6164 uint16_t hw_cidx; 6165 6166 hw_cidx = read_hw_cidx(eq); 6167 return (IDXDIFF(hw_cidx, eq->cidx, eq->sidx)); 6168 } 6169 6170 static inline u_int 6171 total_available_tx_desc(struct sge_eq *eq) 6172 { 6173 uint16_t hw_cidx, pidx; 6174 6175 hw_cidx = read_hw_cidx(eq); 6176 pidx = eq->pidx; 6177 6178 if (pidx == hw_cidx) 6179 return (eq->sidx - 1); 6180 else 6181 return (IDXDIFF(hw_cidx, pidx, eq->sidx) - 1); 6182 } 6183 6184 static inline uint16_t 6185 read_hw_cidx(struct sge_eq *eq) 6186 { 6187 struct sge_qstat *spg = (void *)&eq->desc[eq->sidx]; 6188 uint16_t cidx = spg->cidx; /* stable snapshot */ 6189 6190 return (be16toh(cidx)); 6191 } 6192 6193 /* 6194 * Reclaim 'n' descriptors approximately. 6195 */ 6196 static u_int 6197 reclaim_tx_descs(struct sge_txq *txq, u_int n) 6198 { 6199 struct tx_sdesc *txsd; 6200 struct sge_eq *eq = &txq->eq; 6201 u_int can_reclaim, reclaimed; 6202 6203 TXQ_LOCK_ASSERT_OWNED(txq); 6204 MPASS(n > 0); 6205 6206 reclaimed = 0; 6207 can_reclaim = reclaimable_tx_desc(eq); 6208 while (can_reclaim && reclaimed < n) { 6209 int ndesc; 6210 struct mbuf *m, *nextpkt; 6211 6212 txsd = &txq->sdesc[eq->cidx]; 6213 ndesc = txsd->desc_used; 6214 6215 /* Firmware doesn't return "partial" credits. */ 6216 KASSERT(can_reclaim >= ndesc, 6217 ("%s: unexpected number of credits: %d, %d", 6218 __func__, can_reclaim, ndesc)); 6219 KASSERT(ndesc != 0, 6220 ("%s: descriptor with no credits: cidx %d", 6221 __func__, eq->cidx)); 6222 6223 for (m = txsd->m; m != NULL; m = nextpkt) { 6224 nextpkt = m->m_nextpkt; 6225 m->m_nextpkt = NULL; 6226 m_freem(m); 6227 } 6228 reclaimed += ndesc; 6229 can_reclaim -= ndesc; 6230 IDXINCR(eq->cidx, ndesc, eq->sidx); 6231 } 6232 6233 return (reclaimed); 6234 } 6235 6236 static void 6237 tx_reclaim(void *arg, int n) 6238 { 6239 struct sge_txq *txq = arg; 6240 struct sge_eq *eq = &txq->eq; 6241 6242 do { 6243 if (TXQ_TRYLOCK(txq) == 0) 6244 break; 6245 n = reclaim_tx_descs(txq, 32); 6246 if (eq->cidx == eq->pidx) 6247 eq->equeqidx = eq->pidx; 6248 TXQ_UNLOCK(txq); 6249 } while (n > 0); 6250 } 6251 6252 static __be64 6253 get_flit(struct sglist_seg *segs, int nsegs, int idx) 6254 { 6255 int i = (idx / 3) * 2; 6256 6257 switch (idx % 3) { 6258 case 0: { 6259 uint64_t rc; 6260 6261 rc = (uint64_t)segs[i].ss_len << 32; 6262 if (i + 1 < nsegs) 6263 rc |= (uint64_t)(segs[i + 1].ss_len); 6264 6265 return (htobe64(rc)); 6266 } 6267 case 1: 6268 return (htobe64(segs[i].ss_paddr)); 6269 case 2: 6270 return (htobe64(segs[i + 1].ss_paddr)); 6271 } 6272 6273 return (0); 6274 } 6275 6276 static int 6277 find_refill_source(struct adapter *sc, int maxp, bool packing) 6278 { 6279 int i, zidx = -1; 6280 struct rx_buf_info *rxb = &sc->sge.rx_buf_info[0]; 6281 6282 if (packing) { 6283 for (i = 0; i < SW_ZONE_SIZES; i++, rxb++) { 6284 if (rxb->hwidx2 == -1) 6285 continue; 6286 if (rxb->size1 < PAGE_SIZE && 6287 rxb->size1 < largest_rx_cluster) 6288 continue; 6289 if (rxb->size1 > largest_rx_cluster) 6290 break; 6291 MPASS(rxb->size1 - rxb->size2 >= CL_METADATA_SIZE); 6292 if (rxb->size2 >= maxp) 6293 return (i); 6294 zidx = i; 6295 } 6296 } else { 6297 for (i = 0; i < SW_ZONE_SIZES; i++, rxb++) { 6298 if (rxb->hwidx1 == -1) 6299 continue; 6300 if (rxb->size1 > largest_rx_cluster) 6301 break; 6302 if (rxb->size1 >= maxp) 6303 return (i); 6304 zidx = i; 6305 } 6306 } 6307 6308 return (zidx); 6309 } 6310 6311 static void 6312 add_fl_to_sfl(struct adapter *sc, struct sge_fl *fl) 6313 { 6314 mtx_lock(&sc->sfl_lock); 6315 FL_LOCK(fl); 6316 if ((fl->flags & FL_DOOMED) == 0) { 6317 fl->flags |= FL_STARVING; 6318 TAILQ_INSERT_TAIL(&sc->sfl, fl, link); 6319 callout_reset(&sc->sfl_callout, hz / 5, refill_sfl, sc); 6320 } 6321 FL_UNLOCK(fl); 6322 mtx_unlock(&sc->sfl_lock); 6323 } 6324 6325 static void 6326 handle_wrq_egr_update(struct adapter *sc, struct sge_eq *eq) 6327 { 6328 struct sge_wrq *wrq = (void *)eq; 6329 6330 atomic_readandclear_int(&eq->equiq); 6331 taskqueue_enqueue(sc->tq[eq->port_id], &wrq->wrq_tx_task); 6332 } 6333 6334 static void 6335 handle_eth_egr_update(struct adapter *sc, struct sge_eq *eq) 6336 { 6337 struct sge_txq *txq = (void *)eq; 6338 6339 MPASS(eq->type == EQ_ETH); 6340 6341 atomic_readandclear_int(&eq->equiq); 6342 if (mp_ring_is_idle(txq->r)) 6343 taskqueue_enqueue(sc->tq[eq->port_id], &txq->tx_reclaim_task); 6344 else 6345 mp_ring_check_drainage(txq->r, 64); 6346 } 6347 6348 static int 6349 handle_sge_egr_update(struct sge_iq *iq, const struct rss_header *rss, 6350 struct mbuf *m) 6351 { 6352 const struct cpl_sge_egr_update *cpl = (const void *)(rss + 1); 6353 unsigned int qid = G_EGR_QID(ntohl(cpl->opcode_qid)); 6354 struct adapter *sc = iq->adapter; 6355 struct sge *s = &sc->sge; 6356 struct sge_eq *eq; 6357 static void (*h[])(struct adapter *, struct sge_eq *) = {NULL, 6358 &handle_wrq_egr_update, &handle_eth_egr_update, 6359 &handle_wrq_egr_update}; 6360 6361 KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__, 6362 rss->opcode)); 6363 6364 eq = s->eqmap[qid - s->eq_start - s->eq_base]; 6365 (*h[eq->type])(sc, eq); 6366 6367 return (0); 6368 } 6369 6370 /* handle_fw_msg works for both fw4_msg and fw6_msg because this is valid */ 6371 CTASSERT(offsetof(struct cpl_fw4_msg, data) == \ 6372 offsetof(struct cpl_fw6_msg, data)); 6373 6374 static int 6375 handle_fw_msg(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 6376 { 6377 struct adapter *sc = iq->adapter; 6378 const struct cpl_fw6_msg *cpl = (const void *)(rss + 1); 6379 6380 KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__, 6381 rss->opcode)); 6382 6383 if (cpl->type == FW_TYPE_RSSCPL || cpl->type == FW6_TYPE_RSSCPL) { 6384 const struct rss_header *rss2; 6385 6386 rss2 = (const struct rss_header *)&cpl->data[0]; 6387 return (t4_cpl_handler[rss2->opcode](iq, rss2, m)); 6388 } 6389 6390 return (t4_fw_msg_handler[cpl->type](sc, &cpl->data[0])); 6391 } 6392 6393 /** 6394 * t4_handle_wrerr_rpl - process a FW work request error message 6395 * @adap: the adapter 6396 * @rpl: start of the FW message 6397 */ 6398 static int 6399 t4_handle_wrerr_rpl(struct adapter *adap, const __be64 *rpl) 6400 { 6401 u8 opcode = *(const u8 *)rpl; 6402 const struct fw_error_cmd *e = (const void *)rpl; 6403 unsigned int i; 6404 6405 if (opcode != FW_ERROR_CMD) { 6406 log(LOG_ERR, 6407 "%s: Received WRERR_RPL message with opcode %#x\n", 6408 device_get_nameunit(adap->dev), opcode); 6409 return (EINVAL); 6410 } 6411 log(LOG_ERR, "%s: FW_ERROR (%s) ", device_get_nameunit(adap->dev), 6412 G_FW_ERROR_CMD_FATAL(be32toh(e->op_to_type)) ? "fatal" : 6413 "non-fatal"); 6414 switch (G_FW_ERROR_CMD_TYPE(be32toh(e->op_to_type))) { 6415 case FW_ERROR_TYPE_EXCEPTION: 6416 log(LOG_ERR, "exception info:\n"); 6417 for (i = 0; i < nitems(e->u.exception.info); i++) 6418 log(LOG_ERR, "%s%08x", i == 0 ? "\t" : " ", 6419 be32toh(e->u.exception.info[i])); 6420 log(LOG_ERR, "\n"); 6421 break; 6422 case FW_ERROR_TYPE_HWMODULE: 6423 log(LOG_ERR, "HW module regaddr %08x regval %08x\n", 6424 be32toh(e->u.hwmodule.regaddr), 6425 be32toh(e->u.hwmodule.regval)); 6426 break; 6427 case FW_ERROR_TYPE_WR: 6428 log(LOG_ERR, "WR cidx %d PF %d VF %d eqid %d hdr:\n", 6429 be16toh(e->u.wr.cidx), 6430 G_FW_ERROR_CMD_PFN(be16toh(e->u.wr.pfn_vfn)), 6431 G_FW_ERROR_CMD_VFN(be16toh(e->u.wr.pfn_vfn)), 6432 be32toh(e->u.wr.eqid)); 6433 for (i = 0; i < nitems(e->u.wr.wrhdr); i++) 6434 log(LOG_ERR, "%s%02x", i == 0 ? "\t" : " ", 6435 e->u.wr.wrhdr[i]); 6436 log(LOG_ERR, "\n"); 6437 break; 6438 case FW_ERROR_TYPE_ACL: 6439 log(LOG_ERR, "ACL cidx %d PF %d VF %d eqid %d %s", 6440 be16toh(e->u.acl.cidx), 6441 G_FW_ERROR_CMD_PFN(be16toh(e->u.acl.pfn_vfn)), 6442 G_FW_ERROR_CMD_VFN(be16toh(e->u.acl.pfn_vfn)), 6443 be32toh(e->u.acl.eqid), 6444 G_FW_ERROR_CMD_MV(be16toh(e->u.acl.mv_pkd)) ? "vlanid" : 6445 "MAC"); 6446 for (i = 0; i < nitems(e->u.acl.val); i++) 6447 log(LOG_ERR, " %02x", e->u.acl.val[i]); 6448 log(LOG_ERR, "\n"); 6449 break; 6450 default: 6451 log(LOG_ERR, "type %#x\n", 6452 G_FW_ERROR_CMD_TYPE(be32toh(e->op_to_type))); 6453 return (EINVAL); 6454 } 6455 return (0); 6456 } 6457 6458 static inline bool 6459 bufidx_used(struct adapter *sc, int idx) 6460 { 6461 struct rx_buf_info *rxb = &sc->sge.rx_buf_info[0]; 6462 int i; 6463 6464 for (i = 0; i < SW_ZONE_SIZES; i++, rxb++) { 6465 if (rxb->size1 > largest_rx_cluster) 6466 continue; 6467 if (rxb->hwidx1 == idx || rxb->hwidx2 == idx) 6468 return (true); 6469 } 6470 6471 return (false); 6472 } 6473 6474 static int 6475 sysctl_bufsizes(SYSCTL_HANDLER_ARGS) 6476 { 6477 struct adapter *sc = arg1; 6478 struct sge_params *sp = &sc->params.sge; 6479 int i, rc; 6480 struct sbuf sb; 6481 char c; 6482 6483 sbuf_new(&sb, NULL, 128, SBUF_AUTOEXTEND); 6484 for (i = 0; i < SGE_FLBUF_SIZES; i++) { 6485 if (bufidx_used(sc, i)) 6486 c = '*'; 6487 else 6488 c = '\0'; 6489 6490 sbuf_printf(&sb, "%u%c ", sp->sge_fl_buffer_size[i], c); 6491 } 6492 sbuf_trim(&sb); 6493 sbuf_finish(&sb); 6494 rc = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req); 6495 sbuf_delete(&sb); 6496 return (rc); 6497 } 6498 6499 #ifdef RATELIMIT 6500 #if defined(INET) || defined(INET6) 6501 /* 6502 * len16 for a txpkt WR with a GL. Includes the firmware work request header. 6503 */ 6504 static inline u_int 6505 txpkt_eo_len16(u_int nsegs, u_int immhdrs, u_int tso) 6506 { 6507 u_int n; 6508 6509 MPASS(immhdrs > 0); 6510 6511 n = roundup2(sizeof(struct fw_eth_tx_eo_wr) + 6512 sizeof(struct cpl_tx_pkt_core) + immhdrs, 16); 6513 if (__predict_false(nsegs == 0)) 6514 goto done; 6515 6516 nsegs--; /* first segment is part of ulptx_sgl */ 6517 n += sizeof(struct ulptx_sgl) + 8 * ((3 * nsegs) / 2 + (nsegs & 1)); 6518 if (tso) 6519 n += sizeof(struct cpl_tx_pkt_lso_core); 6520 6521 done: 6522 return (howmany(n, 16)); 6523 } 6524 #endif 6525 6526 #define ETID_FLOWC_NPARAMS 6 6527 #define ETID_FLOWC_LEN (roundup2((sizeof(struct fw_flowc_wr) + \ 6528 ETID_FLOWC_NPARAMS * sizeof(struct fw_flowc_mnemval)), 16)) 6529 #define ETID_FLOWC_LEN16 (howmany(ETID_FLOWC_LEN, 16)) 6530 6531 #if defined(INET) || defined(INET6) 6532 static int 6533 send_etid_flowc_wr(struct cxgbe_rate_tag *cst, struct port_info *pi, 6534 struct vi_info *vi) 6535 { 6536 struct wrq_cookie cookie; 6537 u_int pfvf = pi->adapter->pf << S_FW_VIID_PFN; 6538 struct fw_flowc_wr *flowc; 6539 6540 mtx_assert(&cst->lock, MA_OWNED); 6541 MPASS((cst->flags & (EO_FLOWC_PENDING | EO_FLOWC_RPL_PENDING)) == 6542 EO_FLOWC_PENDING); 6543 6544 flowc = start_wrq_wr(&cst->eo_txq->wrq, ETID_FLOWC_LEN16, &cookie); 6545 if (__predict_false(flowc == NULL)) 6546 return (ENOMEM); 6547 6548 bzero(flowc, ETID_FLOWC_LEN); 6549 flowc->op_to_nparams = htobe32(V_FW_WR_OP(FW_FLOWC_WR) | 6550 V_FW_FLOWC_WR_NPARAMS(ETID_FLOWC_NPARAMS) | V_FW_WR_COMPL(0)); 6551 flowc->flowid_len16 = htonl(V_FW_WR_LEN16(ETID_FLOWC_LEN16) | 6552 V_FW_WR_FLOWID(cst->etid)); 6553 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; 6554 flowc->mnemval[0].val = htobe32(pfvf); 6555 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; 6556 flowc->mnemval[1].val = htobe32(pi->tx_chan); 6557 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT; 6558 flowc->mnemval[2].val = htobe32(pi->tx_chan); 6559 flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID; 6560 flowc->mnemval[3].val = htobe32(cst->iqid); 6561 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_EOSTATE; 6562 flowc->mnemval[4].val = htobe32(FW_FLOWC_MNEM_EOSTATE_ESTABLISHED); 6563 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_SCHEDCLASS; 6564 flowc->mnemval[5].val = htobe32(cst->schedcl); 6565 6566 commit_wrq_wr(&cst->eo_txq->wrq, flowc, &cookie); 6567 6568 cst->flags &= ~EO_FLOWC_PENDING; 6569 cst->flags |= EO_FLOWC_RPL_PENDING; 6570 MPASS(cst->tx_credits >= ETID_FLOWC_LEN16); /* flowc is first WR. */ 6571 cst->tx_credits -= ETID_FLOWC_LEN16; 6572 6573 return (0); 6574 } 6575 #endif 6576 6577 #define ETID_FLUSH_LEN16 (howmany(sizeof (struct fw_flowc_wr), 16)) 6578 6579 void 6580 send_etid_flush_wr(struct cxgbe_rate_tag *cst) 6581 { 6582 struct fw_flowc_wr *flowc; 6583 struct wrq_cookie cookie; 6584 6585 mtx_assert(&cst->lock, MA_OWNED); 6586 6587 flowc = start_wrq_wr(&cst->eo_txq->wrq, ETID_FLUSH_LEN16, &cookie); 6588 if (__predict_false(flowc == NULL)) 6589 CXGBE_UNIMPLEMENTED(__func__); 6590 6591 bzero(flowc, ETID_FLUSH_LEN16 * 16); 6592 flowc->op_to_nparams = htobe32(V_FW_WR_OP(FW_FLOWC_WR) | 6593 V_FW_FLOWC_WR_NPARAMS(0) | F_FW_WR_COMPL); 6594 flowc->flowid_len16 = htobe32(V_FW_WR_LEN16(ETID_FLUSH_LEN16) | 6595 V_FW_WR_FLOWID(cst->etid)); 6596 6597 commit_wrq_wr(&cst->eo_txq->wrq, flowc, &cookie); 6598 6599 cst->flags |= EO_FLUSH_RPL_PENDING; 6600 MPASS(cst->tx_credits >= ETID_FLUSH_LEN16); 6601 cst->tx_credits -= ETID_FLUSH_LEN16; 6602 cst->ncompl++; 6603 } 6604 6605 static void 6606 write_ethofld_wr(struct cxgbe_rate_tag *cst, struct fw_eth_tx_eo_wr *wr, 6607 struct mbuf *m0, int compl) 6608 { 6609 struct cpl_tx_pkt_core *cpl; 6610 uint64_t ctrl1; 6611 uint32_t ctrl; /* used in many unrelated places */ 6612 int len16, pktlen, nsegs, immhdrs; 6613 uintptr_t p; 6614 struct ulptx_sgl *usgl; 6615 struct sglist sg; 6616 struct sglist_seg segs[38]; /* XXX: find real limit. XXX: get off the stack */ 6617 6618 mtx_assert(&cst->lock, MA_OWNED); 6619 M_ASSERTPKTHDR(m0); 6620 KASSERT(m0->m_pkthdr.l2hlen > 0 && m0->m_pkthdr.l3hlen > 0 && 6621 m0->m_pkthdr.l4hlen > 0, 6622 ("%s: ethofld mbuf %p is missing header lengths", __func__, m0)); 6623 6624 len16 = mbuf_eo_len16(m0); 6625 nsegs = mbuf_eo_nsegs(m0); 6626 pktlen = m0->m_pkthdr.len; 6627 ctrl = sizeof(struct cpl_tx_pkt_core); 6628 if (needs_tso(m0)) 6629 ctrl += sizeof(struct cpl_tx_pkt_lso_core); 6630 immhdrs = m0->m_pkthdr.l2hlen + m0->m_pkthdr.l3hlen + m0->m_pkthdr.l4hlen; 6631 ctrl += immhdrs; 6632 6633 wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_EO_WR) | 6634 V_FW_ETH_TX_EO_WR_IMMDLEN(ctrl) | V_FW_WR_COMPL(!!compl)); 6635 wr->equiq_to_len16 = htobe32(V_FW_WR_LEN16(len16) | 6636 V_FW_WR_FLOWID(cst->etid)); 6637 wr->r3 = 0; 6638 if (needs_outer_udp_csum(m0)) { 6639 wr->u.udpseg.type = FW_ETH_TX_EO_TYPE_UDPSEG; 6640 wr->u.udpseg.ethlen = m0->m_pkthdr.l2hlen; 6641 wr->u.udpseg.iplen = htobe16(m0->m_pkthdr.l3hlen); 6642 wr->u.udpseg.udplen = m0->m_pkthdr.l4hlen; 6643 wr->u.udpseg.rtplen = 0; 6644 wr->u.udpseg.r4 = 0; 6645 wr->u.udpseg.mss = htobe16(pktlen - immhdrs); 6646 wr->u.udpseg.schedpktsize = wr->u.udpseg.mss; 6647 wr->u.udpseg.plen = htobe32(pktlen - immhdrs); 6648 cpl = (void *)(wr + 1); 6649 } else { 6650 MPASS(needs_outer_tcp_csum(m0)); 6651 wr->u.tcpseg.type = FW_ETH_TX_EO_TYPE_TCPSEG; 6652 wr->u.tcpseg.ethlen = m0->m_pkthdr.l2hlen; 6653 wr->u.tcpseg.iplen = htobe16(m0->m_pkthdr.l3hlen); 6654 wr->u.tcpseg.tcplen = m0->m_pkthdr.l4hlen; 6655 wr->u.tcpseg.tsclk_tsoff = mbuf_eo_tsclk_tsoff(m0); 6656 wr->u.tcpseg.r4 = 0; 6657 wr->u.tcpseg.r5 = 0; 6658 wr->u.tcpseg.plen = htobe32(pktlen - immhdrs); 6659 6660 if (needs_tso(m0)) { 6661 struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1); 6662 6663 wr->u.tcpseg.mss = htobe16(m0->m_pkthdr.tso_segsz); 6664 6665 ctrl = V_LSO_OPCODE(CPL_TX_PKT_LSO) | 6666 F_LSO_FIRST_SLICE | F_LSO_LAST_SLICE | 6667 V_LSO_ETHHDR_LEN((m0->m_pkthdr.l2hlen - 6668 ETHER_HDR_LEN) >> 2) | 6669 V_LSO_IPHDR_LEN(m0->m_pkthdr.l3hlen >> 2) | 6670 V_LSO_TCPHDR_LEN(m0->m_pkthdr.l4hlen >> 2); 6671 if (m0->m_pkthdr.l3hlen == sizeof(struct ip6_hdr)) 6672 ctrl |= F_LSO_IPV6; 6673 lso->lso_ctrl = htobe32(ctrl); 6674 lso->ipid_ofst = htobe16(0); 6675 lso->mss = htobe16(m0->m_pkthdr.tso_segsz); 6676 lso->seqno_offset = htobe32(0); 6677 lso->len = htobe32(pktlen); 6678 6679 cpl = (void *)(lso + 1); 6680 } else { 6681 wr->u.tcpseg.mss = htobe16(0xffff); 6682 cpl = (void *)(wr + 1); 6683 } 6684 } 6685 6686 /* Checksum offload must be requested for ethofld. */ 6687 MPASS(needs_outer_l4_csum(m0)); 6688 ctrl1 = csum_to_ctrl(cst->adapter, m0); 6689 6690 /* VLAN tag insertion */ 6691 if (needs_vlan_insertion(m0)) { 6692 ctrl1 |= F_TXPKT_VLAN_VLD | 6693 V_TXPKT_VLAN(m0->m_pkthdr.ether_vtag); 6694 } 6695 6696 /* CPL header */ 6697 cpl->ctrl0 = cst->ctrl0; 6698 cpl->pack = 0; 6699 cpl->len = htobe16(pktlen); 6700 cpl->ctrl1 = htobe64(ctrl1); 6701 6702 /* Copy Ethernet, IP & TCP/UDP hdrs as immediate data */ 6703 p = (uintptr_t)(cpl + 1); 6704 m_copydata(m0, 0, immhdrs, (void *)p); 6705 6706 /* SGL */ 6707 if (nsegs > 0) { 6708 int i, pad; 6709 6710 /* zero-pad upto next 16Byte boundary, if not 16Byte aligned */ 6711 p += immhdrs; 6712 pad = 16 - (immhdrs & 0xf); 6713 bzero((void *)p, pad); 6714 6715 usgl = (void *)(p + pad); 6716 usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) | 6717 V_ULPTX_NSGE(nsegs)); 6718 6719 sglist_init(&sg, nitems(segs), segs); 6720 for (; m0 != NULL; m0 = m0->m_next) { 6721 if (__predict_false(m0->m_len == 0)) 6722 continue; 6723 if (immhdrs >= m0->m_len) { 6724 immhdrs -= m0->m_len; 6725 continue; 6726 } 6727 if (m0->m_flags & M_EXTPG) 6728 sglist_append_mbuf_epg(&sg, m0, 6729 mtod(m0, vm_offset_t), m0->m_len); 6730 else 6731 sglist_append(&sg, mtod(m0, char *) + immhdrs, 6732 m0->m_len - immhdrs); 6733 immhdrs = 0; 6734 } 6735 MPASS(sg.sg_nseg == nsegs); 6736 6737 /* 6738 * Zero pad last 8B in case the WR doesn't end on a 16B 6739 * boundary. 6740 */ 6741 *(uint64_t *)((char *)wr + len16 * 16 - 8) = 0; 6742 6743 usgl->len0 = htobe32(segs[0].ss_len); 6744 usgl->addr0 = htobe64(segs[0].ss_paddr); 6745 for (i = 0; i < nsegs - 1; i++) { 6746 usgl->sge[i / 2].len[i & 1] = htobe32(segs[i + 1].ss_len); 6747 usgl->sge[i / 2].addr[i & 1] = htobe64(segs[i + 1].ss_paddr); 6748 } 6749 if (i & 1) 6750 usgl->sge[i / 2].len[1] = htobe32(0); 6751 } 6752 6753 } 6754 6755 static void 6756 ethofld_tx(struct cxgbe_rate_tag *cst) 6757 { 6758 struct mbuf *m; 6759 struct wrq_cookie cookie; 6760 int next_credits, compl; 6761 struct fw_eth_tx_eo_wr *wr; 6762 6763 mtx_assert(&cst->lock, MA_OWNED); 6764 6765 while ((m = mbufq_first(&cst->pending_tx)) != NULL) { 6766 M_ASSERTPKTHDR(m); 6767 6768 /* How many len16 credits do we need to send this mbuf. */ 6769 next_credits = mbuf_eo_len16(m); 6770 MPASS(next_credits > 0); 6771 if (next_credits > cst->tx_credits) { 6772 /* 6773 * Tx will make progress eventually because there is at 6774 * least one outstanding fw4_ack that will return 6775 * credits and kick the tx. 6776 */ 6777 MPASS(cst->ncompl > 0); 6778 return; 6779 } 6780 wr = start_wrq_wr(&cst->eo_txq->wrq, next_credits, &cookie); 6781 if (__predict_false(wr == NULL)) { 6782 /* XXX: wishful thinking, not a real assertion. */ 6783 MPASS(cst->ncompl > 0); 6784 return; 6785 } 6786 cst->tx_credits -= next_credits; 6787 cst->tx_nocompl += next_credits; 6788 compl = cst->ncompl == 0 || cst->tx_nocompl >= cst->tx_total / 2; 6789 ETHER_BPF_MTAP(cst->com.ifp, m); 6790 write_ethofld_wr(cst, wr, m, compl); 6791 commit_wrq_wr(&cst->eo_txq->wrq, wr, &cookie); 6792 if (compl) { 6793 cst->ncompl++; 6794 cst->tx_nocompl = 0; 6795 } 6796 (void) mbufq_dequeue(&cst->pending_tx); 6797 6798 /* 6799 * Drop the mbuf's reference on the tag now rather 6800 * than waiting until m_freem(). This ensures that 6801 * cxgbe_rate_tag_free gets called when the inp drops 6802 * its reference on the tag and there are no more 6803 * mbufs in the pending_tx queue and can flush any 6804 * pending requests. Otherwise if the last mbuf 6805 * doesn't request a completion the etid will never be 6806 * released. 6807 */ 6808 m->m_pkthdr.snd_tag = NULL; 6809 m->m_pkthdr.csum_flags &= ~CSUM_SND_TAG; 6810 m_snd_tag_rele(&cst->com); 6811 6812 mbufq_enqueue(&cst->pending_fwack, m); 6813 } 6814 } 6815 6816 #if defined(INET) || defined(INET6) 6817 static int 6818 ethofld_transmit(if_t ifp, struct mbuf *m0) 6819 { 6820 struct cxgbe_rate_tag *cst; 6821 int rc; 6822 6823 MPASS(m0->m_nextpkt == NULL); 6824 MPASS(m0->m_pkthdr.csum_flags & CSUM_SND_TAG); 6825 MPASS(m0->m_pkthdr.snd_tag != NULL); 6826 cst = mst_to_crt(m0->m_pkthdr.snd_tag); 6827 6828 mtx_lock(&cst->lock); 6829 MPASS(cst->flags & EO_SND_TAG_REF); 6830 6831 if (__predict_false(cst->flags & EO_FLOWC_PENDING)) { 6832 struct vi_info *vi = if_getsoftc(ifp); 6833 struct port_info *pi = vi->pi; 6834 struct adapter *sc = pi->adapter; 6835 const uint32_t rss_mask = vi->rss_size - 1; 6836 uint32_t rss_hash; 6837 6838 cst->eo_txq = &sc->sge.ofld_txq[vi->first_ofld_txq]; 6839 if (M_HASHTYPE_ISHASH(m0)) 6840 rss_hash = m0->m_pkthdr.flowid; 6841 else 6842 rss_hash = arc4random(); 6843 /* We assume RSS hashing */ 6844 cst->iqid = vi->rss[rss_hash & rss_mask]; 6845 cst->eo_txq += rss_hash % vi->nofldtxq; 6846 rc = send_etid_flowc_wr(cst, pi, vi); 6847 if (rc != 0) 6848 goto done; 6849 } 6850 6851 if (__predict_false(cst->plen + m0->m_pkthdr.len > eo_max_backlog)) { 6852 rc = ENOBUFS; 6853 goto done; 6854 } 6855 6856 mbufq_enqueue(&cst->pending_tx, m0); 6857 cst->plen += m0->m_pkthdr.len; 6858 6859 /* 6860 * Hold an extra reference on the tag while generating work 6861 * requests to ensure that we don't try to free the tag during 6862 * ethofld_tx() in case we are sending the final mbuf after 6863 * the inp was freed. 6864 */ 6865 m_snd_tag_ref(&cst->com); 6866 ethofld_tx(cst); 6867 mtx_unlock(&cst->lock); 6868 m_snd_tag_rele(&cst->com); 6869 return (0); 6870 6871 done: 6872 mtx_unlock(&cst->lock); 6873 return (rc); 6874 } 6875 #endif 6876 6877 static int 6878 ethofld_fw4_ack(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m0) 6879 { 6880 struct adapter *sc = iq->adapter; 6881 const struct cpl_fw4_ack *cpl = (const void *)(rss + 1); 6882 struct mbuf *m; 6883 u_int etid = G_CPL_FW4_ACK_FLOWID(be32toh(OPCODE_TID(cpl))); 6884 struct cxgbe_rate_tag *cst; 6885 uint8_t credits = cpl->credits; 6886 6887 cst = lookup_etid(sc, etid); 6888 mtx_lock(&cst->lock); 6889 if (__predict_false(cst->flags & EO_FLOWC_RPL_PENDING)) { 6890 MPASS(credits >= ETID_FLOWC_LEN16); 6891 credits -= ETID_FLOWC_LEN16; 6892 cst->flags &= ~EO_FLOWC_RPL_PENDING; 6893 } 6894 6895 KASSERT(cst->ncompl > 0, 6896 ("%s: etid %u (%p) wasn't expecting completion.", 6897 __func__, etid, cst)); 6898 cst->ncompl--; 6899 6900 while (credits > 0) { 6901 m = mbufq_dequeue(&cst->pending_fwack); 6902 if (__predict_false(m == NULL)) { 6903 /* 6904 * The remaining credits are for the final flush that 6905 * was issued when the tag was freed by the kernel. 6906 */ 6907 MPASS((cst->flags & 6908 (EO_FLUSH_RPL_PENDING | EO_SND_TAG_REF)) == 6909 EO_FLUSH_RPL_PENDING); 6910 MPASS(credits == ETID_FLUSH_LEN16); 6911 MPASS(cst->tx_credits + cpl->credits == cst->tx_total); 6912 MPASS(cst->ncompl == 0); 6913 6914 cst->flags &= ~EO_FLUSH_RPL_PENDING; 6915 cst->tx_credits += cpl->credits; 6916 cxgbe_rate_tag_free_locked(cst); 6917 return (0); /* cst is gone. */ 6918 } 6919 KASSERT(m != NULL, 6920 ("%s: too many credits (%u, %u)", __func__, cpl->credits, 6921 credits)); 6922 KASSERT(credits >= mbuf_eo_len16(m), 6923 ("%s: too few credits (%u, %u, %u)", __func__, 6924 cpl->credits, credits, mbuf_eo_len16(m))); 6925 credits -= mbuf_eo_len16(m); 6926 cst->plen -= m->m_pkthdr.len; 6927 m_freem(m); 6928 } 6929 6930 cst->tx_credits += cpl->credits; 6931 MPASS(cst->tx_credits <= cst->tx_total); 6932 6933 if (cst->flags & EO_SND_TAG_REF) { 6934 /* 6935 * As with ethofld_transmit(), hold an extra reference 6936 * so that the tag is stable across ethold_tx(). 6937 */ 6938 m_snd_tag_ref(&cst->com); 6939 m = mbufq_first(&cst->pending_tx); 6940 if (m != NULL && cst->tx_credits >= mbuf_eo_len16(m)) 6941 ethofld_tx(cst); 6942 mtx_unlock(&cst->lock); 6943 m_snd_tag_rele(&cst->com); 6944 } else { 6945 /* 6946 * There shouldn't be any pending packets if the tag 6947 * was freed by the kernel since any pending packet 6948 * should hold a reference to the tag. 6949 */ 6950 MPASS(mbufq_first(&cst->pending_tx) == NULL); 6951 mtx_unlock(&cst->lock); 6952 } 6953 6954 return (0); 6955 } 6956 #endif 6957