xref: /freebsd/sys/dev/cxgbe/t4_sge.c (revision 271128b0698653294acf0ed3457d5871af5b3ef1)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2011 Chelsio Communications, Inc.
5  * All rights reserved.
6  * Written by: Navdeep Parhar <np@FreeBSD.org>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 #include <sys/cdefs.h>
31 #include "opt_inet.h"
32 #include "opt_inet6.h"
33 #include "opt_kern_tls.h"
34 #include "opt_ratelimit.h"
35 
36 #include <sys/types.h>
37 #include <sys/eventhandler.h>
38 #include <sys/mbuf.h>
39 #include <sys/socket.h>
40 #include <sys/kernel.h>
41 #include <sys/ktls.h>
42 #include <sys/malloc.h>
43 #include <sys/msan.h>
44 #include <sys/queue.h>
45 #include <sys/sbuf.h>
46 #include <sys/taskqueue.h>
47 #include <sys/time.h>
48 #include <sys/sglist.h>
49 #include <sys/sysctl.h>
50 #include <sys/smp.h>
51 #include <sys/socketvar.h>
52 #include <sys/counter.h>
53 #include <net/bpf.h>
54 #include <net/ethernet.h>
55 #include <net/if.h>
56 #include <net/if_vlan_var.h>
57 #include <net/if_vxlan.h>
58 #include <netinet/in.h>
59 #include <netinet/ip.h>
60 #include <netinet/ip6.h>
61 #include <netinet/tcp.h>
62 #include <netinet/udp.h>
63 #include <machine/in_cksum.h>
64 #include <machine/md_var.h>
65 #include <vm/vm.h>
66 #include <vm/pmap.h>
67 #ifdef DEV_NETMAP
68 #include <machine/bus.h>
69 #include <sys/selinfo.h>
70 #include <net/if_var.h>
71 #include <net/netmap.h>
72 #include <dev/netmap/netmap_kern.h>
73 #endif
74 
75 #include "common/common.h"
76 #include "common/t4_regs.h"
77 #include "common/t4_regs_values.h"
78 #include "common/t4_msg.h"
79 #include "t4_l2t.h"
80 #include "t4_mp_ring.h"
81 
82 #define RX_COPY_THRESHOLD MINCLSIZE
83 
84 /*
85  * Ethernet frames are DMA'd at this byte offset into the freelist buffer.
86  * 0-7 are valid values.
87  */
88 static int fl_pktshift = 0;
89 SYSCTL_INT(_hw_cxgbe, OID_AUTO, fl_pktshift, CTLFLAG_RDTUN, &fl_pktshift, 0,
90     "payload DMA offset in rx buffer (bytes)");
91 
92 /*
93  * Pad ethernet payload up to this boundary.
94  * -1: driver should figure out a good value.
95  *  0: disable padding.
96  *  Any power of 2 from 32 to 4096 (both inclusive) is also a valid value.
97  */
98 int fl_pad = -1;
99 SYSCTL_INT(_hw_cxgbe, OID_AUTO, fl_pad, CTLFLAG_RDTUN, &fl_pad, 0,
100     "payload pad boundary (bytes)");
101 
102 /*
103  * Status page length.
104  * -1: driver should figure out a good value.
105  *  64 or 128 are the only other valid values.
106  */
107 static int spg_len = -1;
108 SYSCTL_INT(_hw_cxgbe, OID_AUTO, spg_len, CTLFLAG_RDTUN, &spg_len, 0,
109     "status page size (bytes)");
110 
111 /*
112  * Congestion drops.
113  * -1: no congestion feedback (not recommended).
114  *  0: backpressure the channel instead of dropping packets right away.
115  *  1: no backpressure, drop packets for the congested queue immediately.
116  *  2: both backpressure and drop.
117  */
118 static int cong_drop = 0;
119 SYSCTL_INT(_hw_cxgbe, OID_AUTO, cong_drop, CTLFLAG_RDTUN, &cong_drop, 0,
120     "Congestion control for NIC RX queues (0 = backpressure, 1 = drop, 2 = both");
121 #ifdef TCP_OFFLOAD
122 static int ofld_cong_drop = 0;
123 SYSCTL_INT(_hw_cxgbe, OID_AUTO, ofld_cong_drop, CTLFLAG_RDTUN, &ofld_cong_drop, 0,
124     "Congestion control for TOE RX queues (0 = backpressure, 1 = drop, 2 = both");
125 #endif
126 
127 /*
128  * Deliver multiple frames in the same free list buffer if they fit.
129  * -1: let the driver decide whether to enable buffer packing or not.
130  *  0: disable buffer packing.
131  *  1: enable buffer packing.
132  */
133 static int buffer_packing = -1;
134 SYSCTL_INT(_hw_cxgbe, OID_AUTO, buffer_packing, CTLFLAG_RDTUN, &buffer_packing,
135     0, "Enable buffer packing");
136 
137 /*
138  * Start next frame in a packed buffer at this boundary.
139  * -1: driver should figure out a good value.
140  * T4: driver will ignore this and use the same value as fl_pad above.
141  * T5: 16, or a power of 2 from 64 to 4096 (both inclusive) is a valid value.
142  */
143 static int fl_pack = -1;
144 SYSCTL_INT(_hw_cxgbe, OID_AUTO, fl_pack, CTLFLAG_RDTUN, &fl_pack, 0,
145     "payload pack boundary (bytes)");
146 
147 /*
148  * Largest rx cluster size that the driver is allowed to allocate.
149  */
150 static int largest_rx_cluster = MJUM16BYTES;
151 SYSCTL_INT(_hw_cxgbe, OID_AUTO, largest_rx_cluster, CTLFLAG_RDTUN,
152     &largest_rx_cluster, 0, "Largest rx cluster (bytes)");
153 
154 /*
155  * Size of cluster allocation that's most likely to succeed.  The driver will
156  * fall back to this size if it fails to allocate clusters larger than this.
157  */
158 static int safest_rx_cluster = PAGE_SIZE;
159 SYSCTL_INT(_hw_cxgbe, OID_AUTO, safest_rx_cluster, CTLFLAG_RDTUN,
160     &safest_rx_cluster, 0, "Safe rx cluster (bytes)");
161 
162 #ifdef RATELIMIT
163 /*
164  * Knob to control TCP timestamp rewriting, and the granularity of the tick used
165  * for rewriting.  -1 and 0-3 are all valid values.
166  * -1: hardware should leave the TCP timestamps alone.
167  * 0: 1ms
168  * 1: 100us
169  * 2: 10us
170  * 3: 1us
171  */
172 static int tsclk = -1;
173 SYSCTL_INT(_hw_cxgbe, OID_AUTO, tsclk, CTLFLAG_RDTUN, &tsclk, 0,
174     "Control TCP timestamp rewriting when using pacing");
175 
176 static int eo_max_backlog = 1024 * 1024;
177 SYSCTL_INT(_hw_cxgbe, OID_AUTO, eo_max_backlog, CTLFLAG_RDTUN, &eo_max_backlog,
178     0, "Maximum backlog of ratelimited data per flow");
179 #endif
180 
181 /*
182  * The interrupt holdoff timers are multiplied by this value on T6+.
183  * 1 and 3-17 (both inclusive) are legal values.
184  */
185 static int tscale = 1;
186 SYSCTL_INT(_hw_cxgbe, OID_AUTO, tscale, CTLFLAG_RDTUN, &tscale, 0,
187     "Interrupt holdoff timer scale on T6+");
188 
189 /*
190  * Number of LRO entries in the lro_ctrl structure per rx queue.
191  */
192 static int lro_entries = TCP_LRO_ENTRIES;
193 SYSCTL_INT(_hw_cxgbe, OID_AUTO, lro_entries, CTLFLAG_RDTUN, &lro_entries, 0,
194     "Number of LRO entries per RX queue");
195 
196 /*
197  * This enables presorting of frames before they're fed into tcp_lro_rx.
198  */
199 static int lro_mbufs = 0;
200 SYSCTL_INT(_hw_cxgbe, OID_AUTO, lro_mbufs, CTLFLAG_RDTUN, &lro_mbufs, 0,
201     "Enable presorting of LRO frames");
202 
203 static counter_u64_t pullups;
204 SYSCTL_COUNTER_U64(_hw_cxgbe, OID_AUTO, pullups, CTLFLAG_RD, &pullups,
205     "Number of mbuf pullups performed");
206 
207 static counter_u64_t defrags;
208 SYSCTL_COUNTER_U64(_hw_cxgbe, OID_AUTO, defrags, CTLFLAG_RD, &defrags,
209     "Number of mbuf defrags performed");
210 
211 static int t4_tx_coalesce = 1;
212 SYSCTL_INT(_hw_cxgbe, OID_AUTO, tx_coalesce, CTLFLAG_RWTUN, &t4_tx_coalesce, 0,
213     "tx coalescing allowed");
214 
215 /*
216  * The driver will make aggressive attempts at tx coalescing if it sees these
217  * many packets eligible for coalescing in quick succession, with no more than
218  * the specified gap in between the eth_tx calls that delivered the packets.
219  */
220 static int t4_tx_coalesce_pkts = 32;
221 SYSCTL_INT(_hw_cxgbe, OID_AUTO, tx_coalesce_pkts, CTLFLAG_RWTUN,
222     &t4_tx_coalesce_pkts, 0,
223     "# of consecutive packets (1 - 255) that will trigger tx coalescing");
224 static int t4_tx_coalesce_gap = 5;
225 SYSCTL_INT(_hw_cxgbe, OID_AUTO, tx_coalesce_gap, CTLFLAG_RWTUN,
226     &t4_tx_coalesce_gap, 0, "tx gap (in microseconds)");
227 
228 static int service_iq(struct sge_iq *, int);
229 static int service_iq_fl(struct sge_iq *, int);
230 static struct mbuf *get_fl_payload(struct adapter *, struct sge_fl *, uint32_t);
231 static int eth_rx(struct adapter *, struct sge_rxq *, const struct iq_desc *,
232     u_int);
233 static inline void init_iq(struct sge_iq *, struct adapter *, int, int, int,
234     int, int, int);
235 static inline void init_fl(struct adapter *, struct sge_fl *, int, int, char *);
236 static inline void init_eq(struct adapter *, struct sge_eq *, int, int, uint8_t,
237     struct sge_iq *, char *);
238 static int alloc_iq_fl(struct vi_info *, struct sge_iq *, struct sge_fl *,
239     struct sysctl_ctx_list *, struct sysctl_oid *);
240 static void free_iq_fl(struct adapter *, struct sge_iq *, struct sge_fl *);
241 static void add_iq_sysctls(struct sysctl_ctx_list *, struct sysctl_oid *,
242     struct sge_iq *);
243 static void add_fl_sysctls(struct adapter *, struct sysctl_ctx_list *,
244     struct sysctl_oid *, struct sge_fl *);
245 static int alloc_iq_fl_hwq(struct vi_info *, struct sge_iq *, struct sge_fl *);
246 static int free_iq_fl_hwq(struct adapter *, struct sge_iq *, struct sge_fl *);
247 static int alloc_fwq(struct adapter *);
248 static void free_fwq(struct adapter *);
249 static int alloc_ctrlq(struct adapter *, int);
250 static void free_ctrlq(struct adapter *, int);
251 static int alloc_rxq(struct vi_info *, struct sge_rxq *, int, int, int);
252 static void free_rxq(struct vi_info *, struct sge_rxq *);
253 static void add_rxq_sysctls(struct sysctl_ctx_list *, struct sysctl_oid *,
254     struct sge_rxq *);
255 #ifdef TCP_OFFLOAD
256 static int alloc_ofld_rxq(struct vi_info *, struct sge_ofld_rxq *, int, int,
257     int);
258 static void free_ofld_rxq(struct vi_info *, struct sge_ofld_rxq *);
259 static void add_ofld_rxq_sysctls(struct sysctl_ctx_list *, struct sysctl_oid *,
260     struct sge_ofld_rxq *);
261 #endif
262 static int ctrl_eq_alloc(struct adapter *, struct sge_eq *);
263 static int eth_eq_alloc(struct adapter *, struct vi_info *, struct sge_eq *);
264 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
265 static int ofld_eq_alloc(struct adapter *, struct vi_info *, struct sge_eq *);
266 #endif
267 static int alloc_eq(struct adapter *, struct sge_eq *, struct sysctl_ctx_list *,
268     struct sysctl_oid *);
269 static void free_eq(struct adapter *, struct sge_eq *);
270 static void add_eq_sysctls(struct adapter *, struct sysctl_ctx_list *,
271     struct sysctl_oid *, struct sge_eq *);
272 static int alloc_eq_hwq(struct adapter *, struct vi_info *, struct sge_eq *);
273 static int free_eq_hwq(struct adapter *, struct vi_info *, struct sge_eq *);
274 static int alloc_wrq(struct adapter *, struct vi_info *, struct sge_wrq *,
275     struct sysctl_ctx_list *, struct sysctl_oid *);
276 static void free_wrq(struct adapter *, struct sge_wrq *);
277 static void add_wrq_sysctls(struct sysctl_ctx_list *, struct sysctl_oid *,
278     struct sge_wrq *);
279 static int alloc_txq(struct vi_info *, struct sge_txq *, int);
280 static void free_txq(struct vi_info *, struct sge_txq *);
281 static void add_txq_sysctls(struct vi_info *, struct sysctl_ctx_list *,
282     struct sysctl_oid *, struct sge_txq *);
283 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
284 static int alloc_ofld_txq(struct vi_info *, struct sge_ofld_txq *, int);
285 static void free_ofld_txq(struct vi_info *, struct sge_ofld_txq *);
286 static void add_ofld_txq_sysctls(struct sysctl_ctx_list *, struct sysctl_oid *,
287     struct sge_ofld_txq *);
288 #endif
289 static void oneseg_dma_callback(void *, bus_dma_segment_t *, int, int);
290 static inline void ring_fl_db(struct adapter *, struct sge_fl *);
291 static int refill_fl(struct adapter *, struct sge_fl *, int);
292 static void refill_sfl(void *);
293 static int find_refill_source(struct adapter *, int, bool);
294 static void add_fl_to_sfl(struct adapter *, struct sge_fl *);
295 
296 static inline void get_pkt_gl(struct mbuf *, struct sglist *);
297 static inline u_int txpkt_len16(u_int, const u_int);
298 static inline u_int txpkt_vm_len16(u_int, const u_int);
299 static inline void calculate_mbuf_len16(struct mbuf *, bool);
300 static inline u_int txpkts0_len16(u_int);
301 static inline u_int txpkts1_len16(void);
302 static u_int write_raw_wr(struct sge_txq *, void *, struct mbuf *, u_int);
303 static u_int write_txpkt_wr(struct adapter *, struct sge_txq *, struct mbuf *,
304     u_int);
305 static u_int write_txpkt_vm_wr(struct adapter *, struct sge_txq *,
306     struct mbuf *);
307 static int add_to_txpkts_vf(struct adapter *, struct sge_txq *, struct mbuf *,
308     int, bool *);
309 static int add_to_txpkts_pf(struct adapter *, struct sge_txq *, struct mbuf *,
310     int, bool *);
311 static u_int write_txpkts_wr(struct adapter *, struct sge_txq *);
312 static u_int write_txpkts_vm_wr(struct adapter *, struct sge_txq *);
313 static void write_gl_to_txd(struct sge_txq *, struct mbuf *, caddr_t *, int);
314 static inline void copy_to_txd(struct sge_eq *, caddr_t, caddr_t *, int);
315 static inline void ring_eq_db(struct adapter *, struct sge_eq *, u_int);
316 static inline uint16_t read_hw_cidx(struct sge_eq *);
317 static inline u_int reclaimable_tx_desc(struct sge_eq *);
318 static inline u_int total_available_tx_desc(struct sge_eq *);
319 static u_int reclaim_tx_descs(struct sge_txq *, u_int);
320 static void tx_reclaim(void *, int);
321 static __be64 get_flit(struct sglist_seg *, int, int);
322 static int handle_sge_egr_update(struct sge_iq *, const struct rss_header *,
323     struct mbuf *);
324 static int handle_fw_msg(struct sge_iq *, const struct rss_header *,
325     struct mbuf *);
326 static int t4_handle_wrerr_rpl(struct adapter *, const __be64 *);
327 static void wrq_tx_drain(void *, int);
328 static void drain_wrq_wr_list(struct adapter *, struct sge_wrq *);
329 
330 static int sysctl_bufsizes(SYSCTL_HANDLER_ARGS);
331 #ifdef RATELIMIT
332 static int ethofld_fw4_ack(struct sge_iq *, const struct rss_header *,
333     struct mbuf *);
334 #if defined(INET) || defined(INET6)
335 static inline u_int txpkt_eo_len16(u_int, u_int, u_int);
336 static int ethofld_transmit(if_t, struct mbuf *);
337 #endif
338 #endif
339 
340 static counter_u64_t extfree_refs;
341 static counter_u64_t extfree_rels;
342 
343 an_handler_t t4_an_handler;
344 fw_msg_handler_t t4_fw_msg_handler[NUM_FW6_TYPES];
345 cpl_handler_t t4_cpl_handler[NUM_CPL_CMDS];
346 cpl_handler_t set_tcb_rpl_handlers[NUM_CPL_COOKIES];
347 cpl_handler_t l2t_write_rpl_handlers[NUM_CPL_COOKIES];
348 cpl_handler_t act_open_rpl_handlers[NUM_CPL_COOKIES];
349 cpl_handler_t abort_rpl_rss_handlers[NUM_CPL_COOKIES];
350 cpl_handler_t fw4_ack_handlers[NUM_CPL_COOKIES];
351 
352 void
t4_register_an_handler(an_handler_t h)353 t4_register_an_handler(an_handler_t h)
354 {
355 	uintptr_t *loc;
356 
357 	MPASS(h == NULL || t4_an_handler == NULL);
358 
359 	loc = (uintptr_t *)&t4_an_handler;
360 	atomic_store_rel_ptr(loc, (uintptr_t)h);
361 }
362 
363 void
t4_register_fw_msg_handler(int type,fw_msg_handler_t h)364 t4_register_fw_msg_handler(int type, fw_msg_handler_t h)
365 {
366 	uintptr_t *loc;
367 
368 	MPASS(type < nitems(t4_fw_msg_handler));
369 	MPASS(h == NULL || t4_fw_msg_handler[type] == NULL);
370 	/*
371 	 * These are dispatched by the handler for FW{4|6}_CPL_MSG using the CPL
372 	 * handler dispatch table.  Reject any attempt to install a handler for
373 	 * this subtype.
374 	 */
375 	MPASS(type != FW_TYPE_RSSCPL);
376 	MPASS(type != FW6_TYPE_RSSCPL);
377 
378 	loc = (uintptr_t *)&t4_fw_msg_handler[type];
379 	atomic_store_rel_ptr(loc, (uintptr_t)h);
380 }
381 
382 void
t4_register_cpl_handler(int opcode,cpl_handler_t h)383 t4_register_cpl_handler(int opcode, cpl_handler_t h)
384 {
385 	uintptr_t *loc;
386 
387 	MPASS(opcode < nitems(t4_cpl_handler));
388 	MPASS(h == NULL || t4_cpl_handler[opcode] == NULL);
389 
390 	loc = (uintptr_t *)&t4_cpl_handler[opcode];
391 	atomic_store_rel_ptr(loc, (uintptr_t)h);
392 }
393 
394 static int
set_tcb_rpl_handler(struct sge_iq * iq,const struct rss_header * rss,struct mbuf * m)395 set_tcb_rpl_handler(struct sge_iq *iq, const struct rss_header *rss,
396     struct mbuf *m)
397 {
398 	const struct cpl_set_tcb_rpl *cpl = (const void *)(rss + 1);
399 	u_int tid;
400 	int cookie;
401 
402 	MPASS(m == NULL);
403 
404 	tid = GET_TID(cpl);
405 	if (is_hpftid(iq->adapter, tid) || is_ftid(iq->adapter, tid)) {
406 		/*
407 		 * The return code for filter-write is put in the CPL cookie so
408 		 * we have to rely on the hardware tid (is_ftid) to determine
409 		 * that this is a response to a filter.
410 		 */
411 		cookie = CPL_COOKIE_FILTER;
412 	} else {
413 		cookie = G_COOKIE(cpl->cookie);
414 	}
415 	MPASS(cookie > CPL_COOKIE_RESERVED);
416 	MPASS(cookie < nitems(set_tcb_rpl_handlers));
417 
418 	return (set_tcb_rpl_handlers[cookie](iq, rss, m));
419 }
420 
421 static int
l2t_write_rpl_handler(struct sge_iq * iq,const struct rss_header * rss,struct mbuf * m)422 l2t_write_rpl_handler(struct sge_iq *iq, const struct rss_header *rss,
423     struct mbuf *m)
424 {
425 	const struct cpl_l2t_write_rpl *rpl = (const void *)(rss + 1);
426 	unsigned int cookie;
427 
428 	MPASS(m == NULL);
429 
430 	cookie = GET_TID(rpl) & F_SYNC_WR ? CPL_COOKIE_TOM : CPL_COOKIE_FILTER;
431 	return (l2t_write_rpl_handlers[cookie](iq, rss, m));
432 }
433 
434 static int
act_open_rpl_handler(struct sge_iq * iq,const struct rss_header * rss,struct mbuf * m)435 act_open_rpl_handler(struct sge_iq *iq, const struct rss_header *rss,
436     struct mbuf *m)
437 {
438 	const struct cpl_act_open_rpl *cpl = (const void *)(rss + 1);
439 	u_int cookie = G_TID_COOKIE(G_AOPEN_ATID(be32toh(cpl->atid_status)));
440 
441 	MPASS(m == NULL);
442 	MPASS(cookie != CPL_COOKIE_RESERVED);
443 
444 	return (act_open_rpl_handlers[cookie](iq, rss, m));
445 }
446 
447 static int
abort_rpl_rss_handler(struct sge_iq * iq,const struct rss_header * rss,struct mbuf * m)448 abort_rpl_rss_handler(struct sge_iq *iq, const struct rss_header *rss,
449     struct mbuf *m)
450 {
451 	struct adapter *sc = iq->adapter;
452 	u_int cookie;
453 
454 	MPASS(m == NULL);
455 	if (is_hashfilter(sc))
456 		cookie = CPL_COOKIE_HASHFILTER;
457 	else
458 		cookie = CPL_COOKIE_TOM;
459 
460 	return (abort_rpl_rss_handlers[cookie](iq, rss, m));
461 }
462 
463 static int
fw4_ack_handler(struct sge_iq * iq,const struct rss_header * rss,struct mbuf * m)464 fw4_ack_handler(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
465 {
466 	struct adapter *sc = iq->adapter;
467 	const struct cpl_fw4_ack *cpl = (const void *)(rss + 1);
468 	unsigned int tid = G_CPL_FW4_ACK_FLOWID(be32toh(OPCODE_TID(cpl)));
469 	u_int cookie;
470 
471 	MPASS(m == NULL);
472 	if (is_etid(sc, tid))
473 		cookie = CPL_COOKIE_ETHOFLD;
474 	else
475 		cookie = CPL_COOKIE_TOM;
476 
477 	return (fw4_ack_handlers[cookie](iq, rss, m));
478 }
479 
480 static void
t4_init_shared_cpl_handlers(void)481 t4_init_shared_cpl_handlers(void)
482 {
483 
484 	t4_register_cpl_handler(CPL_SET_TCB_RPL, set_tcb_rpl_handler);
485 	t4_register_cpl_handler(CPL_L2T_WRITE_RPL, l2t_write_rpl_handler);
486 	t4_register_cpl_handler(CPL_ACT_OPEN_RPL, act_open_rpl_handler);
487 	t4_register_cpl_handler(CPL_ABORT_RPL_RSS, abort_rpl_rss_handler);
488 	t4_register_cpl_handler(CPL_FW4_ACK, fw4_ack_handler);
489 }
490 
491 void
t4_register_shared_cpl_handler(int opcode,cpl_handler_t h,int cookie)492 t4_register_shared_cpl_handler(int opcode, cpl_handler_t h, int cookie)
493 {
494 	uintptr_t *loc;
495 
496 	MPASS(opcode < nitems(t4_cpl_handler));
497 	MPASS(cookie > CPL_COOKIE_RESERVED);
498 	MPASS(cookie < NUM_CPL_COOKIES);
499 	MPASS(t4_cpl_handler[opcode] != NULL);
500 
501 	switch (opcode) {
502 	case CPL_SET_TCB_RPL:
503 		loc = (uintptr_t *)&set_tcb_rpl_handlers[cookie];
504 		break;
505 	case CPL_L2T_WRITE_RPL:
506 		loc = (uintptr_t *)&l2t_write_rpl_handlers[cookie];
507 		break;
508 	case CPL_ACT_OPEN_RPL:
509 		loc = (uintptr_t *)&act_open_rpl_handlers[cookie];
510 		break;
511 	case CPL_ABORT_RPL_RSS:
512 		loc = (uintptr_t *)&abort_rpl_rss_handlers[cookie];
513 		break;
514 	case CPL_FW4_ACK:
515 		loc = (uintptr_t *)&fw4_ack_handlers[cookie];
516 		break;
517 	default:
518 		MPASS(0);
519 		return;
520 	}
521 	MPASS(h == NULL || *loc == (uintptr_t)NULL);
522 	atomic_store_rel_ptr(loc, (uintptr_t)h);
523 }
524 
525 /*
526  * Called on MOD_LOAD.  Validates and calculates the SGE tunables.
527  */
528 void
t4_sge_modload(void)529 t4_sge_modload(void)
530 {
531 
532 	if (fl_pktshift < 0 || fl_pktshift > 7) {
533 		printf("Invalid hw.cxgbe.fl_pktshift value (%d),"
534 		    " using 0 instead.\n", fl_pktshift);
535 		fl_pktshift = 0;
536 	}
537 
538 	if (spg_len != 64 && spg_len != 128) {
539 		int len;
540 
541 #if defined(__i386__) || defined(__amd64__)
542 		len = cpu_clflush_line_size > 64 ? 128 : 64;
543 #else
544 		len = 64;
545 #endif
546 		if (spg_len != -1) {
547 			printf("Invalid hw.cxgbe.spg_len value (%d),"
548 			    " using %d instead.\n", spg_len, len);
549 		}
550 		spg_len = len;
551 	}
552 
553 	if (cong_drop < -1 || cong_drop > 2) {
554 		printf("Invalid hw.cxgbe.cong_drop value (%d),"
555 		    " using 0 instead.\n", cong_drop);
556 		cong_drop = 0;
557 	}
558 #ifdef TCP_OFFLOAD
559 	if (ofld_cong_drop < -1 || ofld_cong_drop > 2) {
560 		printf("Invalid hw.cxgbe.ofld_cong_drop value (%d),"
561 		    " using 0 instead.\n", ofld_cong_drop);
562 		ofld_cong_drop = 0;
563 	}
564 #endif
565 
566 	if (tscale != 1 && (tscale < 3 || tscale > 17)) {
567 		printf("Invalid hw.cxgbe.tscale value (%d),"
568 		    " using 1 instead.\n", tscale);
569 		tscale = 1;
570 	}
571 
572 	if (largest_rx_cluster != MCLBYTES &&
573 #if MJUMPAGESIZE != MCLBYTES
574 	    largest_rx_cluster != MJUMPAGESIZE &&
575 #endif
576 	    largest_rx_cluster != MJUM9BYTES &&
577 	    largest_rx_cluster != MJUM16BYTES) {
578 		printf("Invalid hw.cxgbe.largest_rx_cluster value (%d),"
579 		    " using %d instead.\n", largest_rx_cluster, MJUM16BYTES);
580 		largest_rx_cluster = MJUM16BYTES;
581 	}
582 
583 	if (safest_rx_cluster != MCLBYTES &&
584 #if MJUMPAGESIZE != MCLBYTES
585 	    safest_rx_cluster != MJUMPAGESIZE &&
586 #endif
587 	    safest_rx_cluster != MJUM9BYTES &&
588 	    safest_rx_cluster != MJUM16BYTES) {
589 		printf("Invalid hw.cxgbe.safest_rx_cluster value (%d),"
590 		    " using %d instead.\n", safest_rx_cluster, MJUMPAGESIZE);
591 		safest_rx_cluster = MJUMPAGESIZE;
592 	}
593 
594 	extfree_refs = counter_u64_alloc(M_WAITOK);
595 	extfree_rels = counter_u64_alloc(M_WAITOK);
596 	pullups = counter_u64_alloc(M_WAITOK);
597 	defrags = counter_u64_alloc(M_WAITOK);
598 	counter_u64_zero(extfree_refs);
599 	counter_u64_zero(extfree_rels);
600 	counter_u64_zero(pullups);
601 	counter_u64_zero(defrags);
602 
603 	t4_init_shared_cpl_handlers();
604 	t4_register_cpl_handler(CPL_FW4_MSG, handle_fw_msg);
605 	t4_register_cpl_handler(CPL_FW6_MSG, handle_fw_msg);
606 	t4_register_cpl_handler(CPL_SGE_EGR_UPDATE, handle_sge_egr_update);
607 #ifdef RATELIMIT
608 	t4_register_shared_cpl_handler(CPL_FW4_ACK, ethofld_fw4_ack,
609 	    CPL_COOKIE_ETHOFLD);
610 #endif
611 	t4_register_fw_msg_handler(FW6_TYPE_CMD_RPL, t4_handle_fw_rpl);
612 	t4_register_fw_msg_handler(FW6_TYPE_WRERR_RPL, t4_handle_wrerr_rpl);
613 }
614 
615 void
t4_sge_modunload(void)616 t4_sge_modunload(void)
617 {
618 
619 	counter_u64_free(extfree_refs);
620 	counter_u64_free(extfree_rels);
621 	counter_u64_free(pullups);
622 	counter_u64_free(defrags);
623 }
624 
625 uint64_t
t4_sge_extfree_refs(void)626 t4_sge_extfree_refs(void)
627 {
628 	uint64_t refs, rels;
629 
630 	rels = counter_u64_fetch(extfree_rels);
631 	refs = counter_u64_fetch(extfree_refs);
632 
633 	return (refs - rels);
634 }
635 
636 /* max 4096 */
637 #define MAX_PACK_BOUNDARY 512
638 
639 static inline void
setup_pad_and_pack_boundaries(struct adapter * sc)640 setup_pad_and_pack_boundaries(struct adapter *sc)
641 {
642 	uint32_t v, m;
643 	int pad, pack, pad_shift;
644 
645 	pad_shift = chip_id(sc) > CHELSIO_T5 ? X_T6_INGPADBOUNDARY_SHIFT :
646 	    X_INGPADBOUNDARY_SHIFT;
647 	pad = fl_pad;
648 	if (fl_pad < (1 << pad_shift) ||
649 	    fl_pad > (1 << (pad_shift + M_INGPADBOUNDARY)) ||
650 	    !powerof2(fl_pad)) {
651 		/*
652 		 * If there is any chance that we might use buffer packing and
653 		 * the chip is a T4, then pick 64 as the pad/pack boundary.  Set
654 		 * it to the minimum allowed in all other cases.
655 		 */
656 		pad = is_t4(sc) && buffer_packing ? 64 : 1 << pad_shift;
657 
658 		/*
659 		 * For fl_pad = 0 we'll still write a reasonable value to the
660 		 * register but all the freelists will opt out of padding.
661 		 * We'll complain here only if the user tried to set it to a
662 		 * value greater than 0 that was invalid.
663 		 */
664 		if (fl_pad > 0) {
665 			device_printf(sc->dev, "Invalid hw.cxgbe.fl_pad value"
666 			    " (%d), using %d instead.\n", fl_pad, pad);
667 		}
668 	}
669 	m = V_INGPADBOUNDARY(M_INGPADBOUNDARY);
670 	v = V_INGPADBOUNDARY(ilog2(pad) - pad_shift);
671 	t4_set_reg_field(sc, A_SGE_CONTROL, m, v);
672 
673 	if (is_t4(sc)) {
674 		if (fl_pack != -1 && fl_pack != pad) {
675 			/* Complain but carry on. */
676 			device_printf(sc->dev, "hw.cxgbe.fl_pack (%d) ignored,"
677 			    " using %d instead.\n", fl_pack, pad);
678 		}
679 		return;
680 	}
681 
682 	pack = fl_pack;
683 	if (fl_pack < 16 || fl_pack == 32 || fl_pack > 4096 ||
684 	    !powerof2(fl_pack)) {
685 		if (sc->params.pci.mps > MAX_PACK_BOUNDARY)
686 			pack = MAX_PACK_BOUNDARY;
687 		else
688 			pack = max(sc->params.pci.mps, CACHE_LINE_SIZE);
689 		MPASS(powerof2(pack));
690 		if (pack < 16)
691 			pack = 16;
692 		if (pack == 32)
693 			pack = 64;
694 		if (pack > 4096)
695 			pack = 4096;
696 		if (fl_pack != -1) {
697 			device_printf(sc->dev, "Invalid hw.cxgbe.fl_pack value"
698 			    " (%d), using %d instead.\n", fl_pack, pack);
699 		}
700 	}
701 	m = V_INGPACKBOUNDARY(M_INGPACKBOUNDARY);
702 	if (pack == 16)
703 		v = V_INGPACKBOUNDARY(0);
704 	else
705 		v = V_INGPACKBOUNDARY(ilog2(pack) - 5);
706 
707 	MPASS(!is_t4(sc));	/* T4 doesn't have SGE_CONTROL2 */
708 	t4_set_reg_field(sc, A_SGE_CONTROL2, m, v);
709 }
710 
711 /*
712  * adap->params.vpd.cclk must be set up before this is called.
713  */
714 void
t4_tweak_chip_settings(struct adapter * sc)715 t4_tweak_chip_settings(struct adapter *sc)
716 {
717 	int i, reg;
718 	uint32_t v, m;
719 	int intr_timer[SGE_NTIMERS] = {1, 5, 10, 50, 100, 200};
720 	int timer_max = M_TIMERVALUE0 * 1000 / sc->params.vpd.cclk;
721 	int intr_pktcount[SGE_NCOUNTERS] = {1, 8, 16, 32}; /* 63 max */
722 	uint16_t indsz = min(RX_COPY_THRESHOLD - 1, M_INDICATESIZE);
723 	static int sw_buf_sizes[] = {
724 		MCLBYTES,
725 #if MJUMPAGESIZE != MCLBYTES
726 		MJUMPAGESIZE,
727 #endif
728 		MJUM9BYTES,
729 		MJUM16BYTES
730 	};
731 
732 	KASSERT(sc->flags & MASTER_PF,
733 	    ("%s: trying to change chip settings when not master.", __func__));
734 
735 	m = V_PKTSHIFT(M_PKTSHIFT) | F_RXPKTCPLMODE | F_EGRSTATUSPAGESIZE;
736 	v = V_PKTSHIFT(fl_pktshift) | F_RXPKTCPLMODE |
737 	    V_EGRSTATUSPAGESIZE(spg_len == 128);
738 	t4_set_reg_field(sc, A_SGE_CONTROL, m, v);
739 
740 	setup_pad_and_pack_boundaries(sc);
741 
742 	v = V_HOSTPAGESIZEPF0(PAGE_SHIFT - 10) |
743 	    V_HOSTPAGESIZEPF1(PAGE_SHIFT - 10) |
744 	    V_HOSTPAGESIZEPF2(PAGE_SHIFT - 10) |
745 	    V_HOSTPAGESIZEPF3(PAGE_SHIFT - 10) |
746 	    V_HOSTPAGESIZEPF4(PAGE_SHIFT - 10) |
747 	    V_HOSTPAGESIZEPF5(PAGE_SHIFT - 10) |
748 	    V_HOSTPAGESIZEPF6(PAGE_SHIFT - 10) |
749 	    V_HOSTPAGESIZEPF7(PAGE_SHIFT - 10);
750 	t4_write_reg(sc, A_SGE_HOST_PAGE_SIZE, v);
751 
752 	t4_write_reg(sc, A_SGE_FL_BUFFER_SIZE0, 4096);
753 	t4_write_reg(sc, A_SGE_FL_BUFFER_SIZE1, 65536);
754 	reg = A_SGE_FL_BUFFER_SIZE2;
755 	for (i = 0; i < nitems(sw_buf_sizes); i++) {
756 		MPASS(reg <= A_SGE_FL_BUFFER_SIZE15);
757 		t4_write_reg(sc, reg, sw_buf_sizes[i]);
758 		reg += 4;
759 		MPASS(reg <= A_SGE_FL_BUFFER_SIZE15);
760 		t4_write_reg(sc, reg, sw_buf_sizes[i] - CL_METADATA_SIZE);
761 		reg += 4;
762 	}
763 
764 	v = V_THRESHOLD_0(intr_pktcount[0]) | V_THRESHOLD_1(intr_pktcount[1]) |
765 	    V_THRESHOLD_2(intr_pktcount[2]) | V_THRESHOLD_3(intr_pktcount[3]);
766 	t4_write_reg(sc, A_SGE_INGRESS_RX_THRESHOLD, v);
767 
768 	KASSERT(intr_timer[0] <= timer_max,
769 	    ("%s: not a single usable timer (%d, %d)", __func__, intr_timer[0],
770 	    timer_max));
771 	for (i = 1; i < nitems(intr_timer); i++) {
772 		KASSERT(intr_timer[i] >= intr_timer[i - 1],
773 		    ("%s: timers not listed in increasing order (%d)",
774 		    __func__, i));
775 
776 		while (intr_timer[i] > timer_max) {
777 			if (i == nitems(intr_timer) - 1) {
778 				intr_timer[i] = timer_max;
779 				break;
780 			}
781 			intr_timer[i] += intr_timer[i - 1];
782 			intr_timer[i] /= 2;
783 		}
784 	}
785 
786 	v = V_TIMERVALUE0(us_to_core_ticks(sc, intr_timer[0])) |
787 	    V_TIMERVALUE1(us_to_core_ticks(sc, intr_timer[1]));
788 	t4_write_reg(sc, A_SGE_TIMER_VALUE_0_AND_1, v);
789 	v = V_TIMERVALUE2(us_to_core_ticks(sc, intr_timer[2])) |
790 	    V_TIMERVALUE3(us_to_core_ticks(sc, intr_timer[3]));
791 	t4_write_reg(sc, A_SGE_TIMER_VALUE_2_AND_3, v);
792 	v = V_TIMERVALUE4(us_to_core_ticks(sc, intr_timer[4])) |
793 	    V_TIMERVALUE5(us_to_core_ticks(sc, intr_timer[5]));
794 	t4_write_reg(sc, A_SGE_TIMER_VALUE_4_AND_5, v);
795 
796 	if (chip_id(sc) >= CHELSIO_T6) {
797 		m = V_TSCALE(M_TSCALE);
798 		if (tscale == 1)
799 			v = 0;
800 		else
801 			v = V_TSCALE(tscale - 2);
802 		t4_set_reg_field(sc, A_SGE_ITP_CONTROL, m, v);
803 
804 		if (sc->debug_flags & DF_DISABLE_TCB_CACHE) {
805 			m = V_RDTHRESHOLD(M_RDTHRESHOLD) | F_WRTHRTHRESHEN |
806 			    V_WRTHRTHRESH(M_WRTHRTHRESH);
807 			t4_tp_pio_read(sc, &v, 1, A_TP_CMM_CONFIG, 1);
808 			v &= ~m;
809 			v |= V_RDTHRESHOLD(1) | F_WRTHRTHRESHEN |
810 			    V_WRTHRTHRESH(16);
811 			t4_tp_pio_write(sc, &v, 1, A_TP_CMM_CONFIG, 1);
812 		}
813 	}
814 
815 	/* 4K, 16K, 64K, 256K DDP "page sizes" for TDDP */
816 	v = V_HPZ0(0) | V_HPZ1(2) | V_HPZ2(4) | V_HPZ3(6);
817 	t4_write_reg(sc, A_ULP_RX_TDDP_PSZ, v);
818 
819 	/*
820 	 * 4K, 8K, 16K, 64K DDP "page sizes" for iSCSI DDP.  These have been
821 	 * chosen with MAXPHYS = 128K in mind.  The largest DDP buffer that we
822 	 * may have to deal with is MAXPHYS + 1 page.
823 	 */
824 	v = V_HPZ0(0) | V_HPZ1(1) | V_HPZ2(2) | V_HPZ3(4);
825 	t4_write_reg(sc, A_ULP_RX_ISCSI_PSZ, v);
826 
827 	/* We use multiple DDP page sizes both in plain-TOE and ISCSI modes. */
828 	m = v = F_TDDPTAGTCB | F_ISCSITAGTCB;
829 	t4_set_reg_field(sc, A_ULP_RX_CTL, m, v);
830 
831 	m = V_INDICATESIZE(M_INDICATESIZE) | F_REARMDDPOFFSET |
832 	    F_RESETDDPOFFSET;
833 	v = V_INDICATESIZE(indsz) | F_REARMDDPOFFSET | F_RESETDDPOFFSET;
834 	t4_set_reg_field(sc, A_TP_PARA_REG5, m, v);
835 }
836 
837 /*
838  * SGE wants the buffer to be at least 64B and then a multiple of 16.  Its
839  * address mut be 16B aligned.  If padding is in use the buffer's start and end
840  * need to be aligned to the pad boundary as well.  We'll just make sure that
841  * the size is a multiple of the pad boundary here, it is up to the buffer
842  * allocation code to make sure the start of the buffer is aligned.
843  */
844 static inline int
hwsz_ok(struct adapter * sc,int hwsz)845 hwsz_ok(struct adapter *sc, int hwsz)
846 {
847 	int mask = fl_pad ? sc->params.sge.pad_boundary - 1 : 16 - 1;
848 
849 	return (hwsz >= 64 && (hwsz & mask) == 0);
850 }
851 
852 /*
853  * Initialize the rx buffer sizes and figure out which zones the buffers will
854  * be allocated from.
855  */
856 void
t4_init_rx_buf_info(struct adapter * sc)857 t4_init_rx_buf_info(struct adapter *sc)
858 {
859 	struct sge *s = &sc->sge;
860 	struct sge_params *sp = &sc->params.sge;
861 	int i, j, n;
862 	static int sw_buf_sizes[] = {	/* Sorted by size */
863 		MCLBYTES,
864 #if MJUMPAGESIZE != MCLBYTES
865 		MJUMPAGESIZE,
866 #endif
867 		MJUM9BYTES,
868 		MJUM16BYTES
869 	};
870 	struct rx_buf_info *rxb;
871 
872 	s->safe_zidx = -1;
873 	rxb = &s->rx_buf_info[0];
874 	for (i = 0; i < SW_ZONE_SIZES; i++, rxb++) {
875 		rxb->size1 = sw_buf_sizes[i];
876 		rxb->zone = m_getzone(rxb->size1);
877 		rxb->type = m_gettype(rxb->size1);
878 		rxb->size2 = 0;
879 		rxb->hwidx1 = -1;
880 		rxb->hwidx2 = -1;
881 		for (j = 0; j < SGE_FLBUF_SIZES; j++) {
882 			int hwsize = sp->sge_fl_buffer_size[j];
883 
884 			if (!hwsz_ok(sc, hwsize))
885 				continue;
886 
887 			/* hwidx for size1 */
888 			if (rxb->hwidx1 == -1 && rxb->size1 == hwsize)
889 				rxb->hwidx1 = j;
890 
891 			/* hwidx for size2 (buffer packing) */
892 			if (rxb->size1 - CL_METADATA_SIZE < hwsize)
893 				continue;
894 			n = rxb->size1 - hwsize - CL_METADATA_SIZE;
895 			if (n == 0) {
896 				rxb->hwidx2 = j;
897 				rxb->size2 = hwsize;
898 				break;	/* stop looking */
899 			}
900 			if (rxb->hwidx2 != -1) {
901 				if (n < sp->sge_fl_buffer_size[rxb->hwidx2] -
902 				    hwsize - CL_METADATA_SIZE) {
903 					rxb->hwidx2 = j;
904 					rxb->size2 = hwsize;
905 				}
906 			} else if (n <= 2 * CL_METADATA_SIZE) {
907 				rxb->hwidx2 = j;
908 				rxb->size2 = hwsize;
909 			}
910 		}
911 		if (rxb->hwidx2 != -1)
912 			sc->flags |= BUF_PACKING_OK;
913 		if (s->safe_zidx == -1 && rxb->size1 == safest_rx_cluster)
914 			s->safe_zidx = i;
915 	}
916 }
917 
918 /*
919  * Verify some basic SGE settings for the PF and VF driver, and other
920  * miscellaneous settings for the PF driver.
921  */
922 int
t4_verify_chip_settings(struct adapter * sc)923 t4_verify_chip_settings(struct adapter *sc)
924 {
925 	struct sge_params *sp = &sc->params.sge;
926 	uint32_t m, v, r;
927 	int rc = 0;
928 	const uint16_t indsz = min(RX_COPY_THRESHOLD - 1, M_INDICATESIZE);
929 
930 	m = F_RXPKTCPLMODE;
931 	v = F_RXPKTCPLMODE;
932 	r = sp->sge_control;
933 	if ((r & m) != v) {
934 		device_printf(sc->dev, "invalid SGE_CONTROL(0x%x)\n", r);
935 		rc = EINVAL;
936 	}
937 
938 	/*
939 	 * If this changes then every single use of PAGE_SHIFT in the driver
940 	 * needs to be carefully reviewed for PAGE_SHIFT vs sp->page_shift.
941 	 */
942 	if (sp->page_shift != PAGE_SHIFT) {
943 		device_printf(sc->dev, "invalid SGE_HOST_PAGE_SIZE(0x%x)\n", r);
944 		rc = EINVAL;
945 	}
946 
947 	if (sc->flags & IS_VF)
948 		return (0);
949 
950 	v = V_HPZ0(0) | V_HPZ1(2) | V_HPZ2(4) | V_HPZ3(6);
951 	r = t4_read_reg(sc, A_ULP_RX_TDDP_PSZ);
952 	if (r != v) {
953 		device_printf(sc->dev, "invalid ULP_RX_TDDP_PSZ(0x%x)\n", r);
954 		if (sc->vres.ddp.size != 0)
955 			rc = EINVAL;
956 	}
957 
958 	m = v = F_TDDPTAGTCB;
959 	r = t4_read_reg(sc, A_ULP_RX_CTL);
960 	if ((r & m) != v) {
961 		device_printf(sc->dev, "invalid ULP_RX_CTL(0x%x)\n", r);
962 		if (sc->vres.ddp.size != 0)
963 			rc = EINVAL;
964 	}
965 
966 	m = V_INDICATESIZE(M_INDICATESIZE) | F_REARMDDPOFFSET |
967 	    F_RESETDDPOFFSET;
968 	v = V_INDICATESIZE(indsz) | F_REARMDDPOFFSET | F_RESETDDPOFFSET;
969 	r = t4_read_reg(sc, A_TP_PARA_REG5);
970 	if ((r & m) != v) {
971 		device_printf(sc->dev, "invalid TP_PARA_REG5(0x%x)\n", r);
972 		if (sc->vres.ddp.size != 0)
973 			rc = EINVAL;
974 	}
975 
976 	return (rc);
977 }
978 
979 int
t4_create_dma_tag(struct adapter * sc)980 t4_create_dma_tag(struct adapter *sc)
981 {
982 	int rc;
983 
984 	rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
985 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE,
986 	    BUS_SPACE_UNRESTRICTED, BUS_SPACE_MAXSIZE, BUS_DMA_ALLOCNOW, NULL,
987 	    NULL, &sc->dmat);
988 	if (rc != 0) {
989 		device_printf(sc->dev,
990 		    "failed to create main DMA tag: %d\n", rc);
991 	}
992 
993 	return (rc);
994 }
995 
996 void
t4_sge_sysctls(struct adapter * sc,struct sysctl_ctx_list * ctx,struct sysctl_oid_list * children)997 t4_sge_sysctls(struct adapter *sc, struct sysctl_ctx_list *ctx,
998     struct sysctl_oid_list *children)
999 {
1000 	struct sge_params *sp = &sc->params.sge;
1001 
1002 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "buffer_sizes",
1003 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
1004 	    sysctl_bufsizes, "A", "freelist buffer sizes");
1005 
1006 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "fl_pktshift", CTLFLAG_RD,
1007 	    NULL, sp->fl_pktshift, "payload DMA offset in rx buffer (bytes)");
1008 
1009 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "fl_pad", CTLFLAG_RD,
1010 	    NULL, sp->pad_boundary, "payload pad boundary (bytes)");
1011 
1012 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "spg_len", CTLFLAG_RD,
1013 	    NULL, sp->spg_len, "status page size (bytes)");
1014 
1015 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "cong_drop", CTLFLAG_RD,
1016 	    NULL, cong_drop, "congestion drop setting");
1017 #ifdef TCP_OFFLOAD
1018 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ofld_cong_drop", CTLFLAG_RD,
1019 	    NULL, ofld_cong_drop, "congestion drop setting");
1020 #endif
1021 
1022 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "fl_pack", CTLFLAG_RD,
1023 	    NULL, sp->pack_boundary, "payload pack boundary (bytes)");
1024 }
1025 
1026 int
t4_destroy_dma_tag(struct adapter * sc)1027 t4_destroy_dma_tag(struct adapter *sc)
1028 {
1029 	if (sc->dmat)
1030 		bus_dma_tag_destroy(sc->dmat);
1031 
1032 	return (0);
1033 }
1034 
1035 /*
1036  * Allocate and initialize the firmware event queue, control queues, and special
1037  * purpose rx queues owned by the adapter.
1038  *
1039  * Returns errno on failure.  Resources allocated up to that point may still be
1040  * allocated.  Caller is responsible for cleanup in case this function fails.
1041  */
1042 int
t4_setup_adapter_queues(struct adapter * sc)1043 t4_setup_adapter_queues(struct adapter *sc)
1044 {
1045 	int rc, i;
1046 
1047 	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
1048 
1049 	/*
1050 	 * Firmware event queue
1051 	 */
1052 	rc = alloc_fwq(sc);
1053 	if (rc != 0)
1054 		return (rc);
1055 
1056 	/*
1057 	 * That's all for the VF driver.
1058 	 */
1059 	if (sc->flags & IS_VF)
1060 		return (rc);
1061 
1062 	/*
1063 	 * XXX: General purpose rx queues, one per port.
1064 	 */
1065 
1066 	/*
1067 	 * Control queues, one per port.
1068 	 */
1069 	for_each_port(sc, i) {
1070 		rc = alloc_ctrlq(sc, i);
1071 		if (rc != 0)
1072 			return (rc);
1073 	}
1074 
1075 	return (rc);
1076 }
1077 
1078 /*
1079  * Idempotent
1080  */
1081 int
t4_teardown_adapter_queues(struct adapter * sc)1082 t4_teardown_adapter_queues(struct adapter *sc)
1083 {
1084 	int i;
1085 
1086 	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
1087 
1088 	if (sc->sge.ctrlq != NULL) {
1089 		MPASS(!(sc->flags & IS_VF));	/* VFs don't allocate ctrlq. */
1090 		for_each_port(sc, i)
1091 			free_ctrlq(sc, i);
1092 	}
1093 	free_fwq(sc);
1094 
1095 	return (0);
1096 }
1097 
1098 /* Maximum payload that could arrive with a single iq descriptor. */
1099 static inline int
max_rx_payload(struct adapter * sc,if_t ifp,const bool ofld)1100 max_rx_payload(struct adapter *sc, if_t ifp, const bool ofld)
1101 {
1102 	int maxp;
1103 
1104 	/* large enough even when hw VLAN extraction is disabled */
1105 	maxp = sc->params.sge.fl_pktshift + ETHER_HDR_LEN +
1106 	    ETHER_VLAN_ENCAP_LEN + if_getmtu(ifp);
1107 	if (ofld && sc->tt.tls && sc->cryptocaps & FW_CAPS_CONFIG_TLSKEYS &&
1108 	    maxp < sc->params.tp.max_rx_pdu)
1109 		maxp = sc->params.tp.max_rx_pdu;
1110 	return (maxp);
1111 }
1112 
1113 int
t4_setup_vi_queues(struct vi_info * vi)1114 t4_setup_vi_queues(struct vi_info *vi)
1115 {
1116 	int rc = 0, i, intr_idx;
1117 	struct sge_rxq *rxq;
1118 	struct sge_txq *txq;
1119 #ifdef TCP_OFFLOAD
1120 	struct sge_ofld_rxq *ofld_rxq;
1121 #endif
1122 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
1123 	struct sge_ofld_txq *ofld_txq;
1124 #endif
1125 #ifdef DEV_NETMAP
1126 	int saved_idx, iqidx;
1127 	struct sge_nm_rxq *nm_rxq;
1128 	struct sge_nm_txq *nm_txq;
1129 #endif
1130 	struct adapter *sc = vi->adapter;
1131 	if_t ifp = vi->ifp;
1132 	int maxp;
1133 
1134 	/* Interrupt vector to start from (when using multiple vectors) */
1135 	intr_idx = vi->first_intr;
1136 
1137 #ifdef DEV_NETMAP
1138 	saved_idx = intr_idx;
1139 	if (if_getcapabilities(ifp) & IFCAP_NETMAP) {
1140 
1141 		/* netmap is supported with direct interrupts only. */
1142 		MPASS(!forwarding_intr_to_fwq(sc));
1143 		MPASS(vi->first_intr >= 0);
1144 
1145 		/*
1146 		 * We don't have buffers to back the netmap rx queues
1147 		 * right now so we create the queues in a way that
1148 		 * doesn't set off any congestion signal in the chip.
1149 		 */
1150 		for_each_nm_rxq(vi, i, nm_rxq) {
1151 			rc = alloc_nm_rxq(vi, nm_rxq, intr_idx, i);
1152 			if (rc != 0)
1153 				goto done;
1154 			intr_idx++;
1155 		}
1156 
1157 		for_each_nm_txq(vi, i, nm_txq) {
1158 			iqidx = vi->first_nm_rxq + (i % vi->nnmrxq);
1159 			rc = alloc_nm_txq(vi, nm_txq, iqidx, i);
1160 			if (rc != 0)
1161 				goto done;
1162 		}
1163 	}
1164 
1165 	/* Normal rx queues and netmap rx queues share the same interrupts. */
1166 	intr_idx = saved_idx;
1167 #endif
1168 
1169 	/*
1170 	 * Allocate rx queues first because a default iqid is required when
1171 	 * creating a tx queue.
1172 	 */
1173 	maxp = max_rx_payload(sc, ifp, false);
1174 	for_each_rxq(vi, i, rxq) {
1175 		rc = alloc_rxq(vi, rxq, i, intr_idx, maxp);
1176 		if (rc != 0)
1177 			goto done;
1178 		if (!forwarding_intr_to_fwq(sc))
1179 			intr_idx++;
1180 	}
1181 #ifdef DEV_NETMAP
1182 	if (if_getcapabilities(ifp) & IFCAP_NETMAP)
1183 		intr_idx = saved_idx + max(vi->nrxq, vi->nnmrxq);
1184 #endif
1185 #ifdef TCP_OFFLOAD
1186 	maxp = max_rx_payload(sc, ifp, true);
1187 	for_each_ofld_rxq(vi, i, ofld_rxq) {
1188 		rc = alloc_ofld_rxq(vi, ofld_rxq, i, intr_idx, maxp);
1189 		if (rc != 0)
1190 			goto done;
1191 		if (!forwarding_intr_to_fwq(sc))
1192 			intr_idx++;
1193 	}
1194 #endif
1195 
1196 	/*
1197 	 * Now the tx queues.
1198 	 */
1199 	for_each_txq(vi, i, txq) {
1200 		rc = alloc_txq(vi, txq, i);
1201 		if (rc != 0)
1202 			goto done;
1203 	}
1204 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
1205 	for_each_ofld_txq(vi, i, ofld_txq) {
1206 		rc = alloc_ofld_txq(vi, ofld_txq, i);
1207 		if (rc != 0)
1208 			goto done;
1209 	}
1210 #endif
1211 done:
1212 	if (rc)
1213 		t4_teardown_vi_queues(vi);
1214 
1215 	return (rc);
1216 }
1217 
1218 /*
1219  * Idempotent
1220  */
1221 int
t4_teardown_vi_queues(struct vi_info * vi)1222 t4_teardown_vi_queues(struct vi_info *vi)
1223 {
1224 	int i;
1225 	struct sge_rxq *rxq;
1226 	struct sge_txq *txq;
1227 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
1228 	struct sge_ofld_txq *ofld_txq;
1229 #endif
1230 #ifdef TCP_OFFLOAD
1231 	struct sge_ofld_rxq *ofld_rxq;
1232 #endif
1233 #ifdef DEV_NETMAP
1234 	struct sge_nm_rxq *nm_rxq;
1235 	struct sge_nm_txq *nm_txq;
1236 #endif
1237 
1238 #ifdef DEV_NETMAP
1239 	if (if_getcapabilities(vi->ifp) & IFCAP_NETMAP) {
1240 		for_each_nm_txq(vi, i, nm_txq) {
1241 			free_nm_txq(vi, nm_txq);
1242 		}
1243 
1244 		for_each_nm_rxq(vi, i, nm_rxq) {
1245 			free_nm_rxq(vi, nm_rxq);
1246 		}
1247 	}
1248 #endif
1249 
1250 	/*
1251 	 * Take down all the tx queues first, as they reference the rx queues
1252 	 * (for egress updates, etc.).
1253 	 */
1254 
1255 	for_each_txq(vi, i, txq) {
1256 		free_txq(vi, txq);
1257 	}
1258 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
1259 	for_each_ofld_txq(vi, i, ofld_txq) {
1260 		free_ofld_txq(vi, ofld_txq);
1261 	}
1262 #endif
1263 
1264 	/*
1265 	 * Then take down the rx queues.
1266 	 */
1267 
1268 	for_each_rxq(vi, i, rxq) {
1269 		free_rxq(vi, rxq);
1270 	}
1271 #ifdef TCP_OFFLOAD
1272 	for_each_ofld_rxq(vi, i, ofld_rxq) {
1273 		free_ofld_rxq(vi, ofld_rxq);
1274 	}
1275 #endif
1276 
1277 	return (0);
1278 }
1279 
1280 /*
1281  * Interrupt handler when the driver is using only 1 interrupt.  This is a very
1282  * unusual scenario.
1283  *
1284  * a) Deals with errors, if any.
1285  * b) Services firmware event queue, which is taking interrupts for all other
1286  *    queues.
1287  */
1288 void
t4_intr_all(void * arg)1289 t4_intr_all(void *arg)
1290 {
1291 	struct adapter *sc = arg;
1292 	struct sge_iq *fwq = &sc->sge.fwq;
1293 
1294 	MPASS(sc->intr_count == 1);
1295 
1296 	if (sc->intr_type == INTR_INTX)
1297 		t4_write_reg(sc, MYPF_REG(A_PCIE_PF_CLI), 0);
1298 
1299 	t4_intr_err(arg);
1300 	t4_intr_evt(fwq);
1301 }
1302 
1303 /*
1304  * Interrupt handler for errors (installed directly when multiple interrupts are
1305  * being used, or called by t4_intr_all).
1306  */
1307 void
t4_intr_err(void * arg)1308 t4_intr_err(void *arg)
1309 {
1310 	struct adapter *sc = arg;
1311 	uint32_t v;
1312 	const bool verbose = (sc->debug_flags & DF_VERBOSE_SLOWINTR) != 0;
1313 
1314 	if (atomic_load_int(&sc->error_flags) & ADAP_FATAL_ERR)
1315 		return;
1316 
1317 	v = t4_read_reg(sc, MYPF_REG(A_PL_PF_INT_CAUSE));
1318 	if (v & F_PFSW) {
1319 		sc->swintr++;
1320 		t4_write_reg(sc, MYPF_REG(A_PL_PF_INT_CAUSE), v);
1321 	}
1322 
1323 	if (t4_slow_intr_handler(sc, verbose))
1324 		t4_fatal_err(sc, false);
1325 }
1326 
1327 /*
1328  * Interrupt handler for iq-only queues.  The firmware event queue is the only
1329  * such queue right now.
1330  */
1331 void
t4_intr_evt(void * arg)1332 t4_intr_evt(void *arg)
1333 {
1334 	struct sge_iq *iq = arg;
1335 
1336 	if (atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_BUSY)) {
1337 		service_iq(iq, 0);
1338 		(void) atomic_cmpset_int(&iq->state, IQS_BUSY, IQS_IDLE);
1339 	}
1340 }
1341 
1342 /*
1343  * Interrupt handler for iq+fl queues.
1344  */
1345 void
t4_intr(void * arg)1346 t4_intr(void *arg)
1347 {
1348 	struct sge_iq *iq = arg;
1349 
1350 	if (atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_BUSY)) {
1351 		service_iq_fl(iq, 0);
1352 		(void) atomic_cmpset_int(&iq->state, IQS_BUSY, IQS_IDLE);
1353 	}
1354 }
1355 
1356 #ifdef DEV_NETMAP
1357 /*
1358  * Interrupt handler for netmap rx queues.
1359  */
1360 void
t4_nm_intr(void * arg)1361 t4_nm_intr(void *arg)
1362 {
1363 	struct sge_nm_rxq *nm_rxq = arg;
1364 
1365 	if (atomic_cmpset_int(&nm_rxq->nm_state, NM_ON, NM_BUSY)) {
1366 		service_nm_rxq(nm_rxq);
1367 		(void) atomic_cmpset_int(&nm_rxq->nm_state, NM_BUSY, NM_ON);
1368 	}
1369 }
1370 
1371 /*
1372  * Interrupt handler for vectors shared between NIC and netmap rx queues.
1373  */
1374 void
t4_vi_intr(void * arg)1375 t4_vi_intr(void *arg)
1376 {
1377 	struct irq *irq = arg;
1378 
1379 	MPASS(irq->nm_rxq != NULL);
1380 	t4_nm_intr(irq->nm_rxq);
1381 
1382 	MPASS(irq->rxq != NULL);
1383 	t4_intr(irq->rxq);
1384 }
1385 #endif
1386 
1387 /*
1388  * Deals with interrupts on an iq-only (no freelist) queue.
1389  */
1390 static int
service_iq(struct sge_iq * iq,int budget)1391 service_iq(struct sge_iq *iq, int budget)
1392 {
1393 	struct sge_iq *q;
1394 	struct adapter *sc = iq->adapter;
1395 	struct iq_desc *d = &iq->desc[iq->cidx];
1396 	int ndescs = 0, limit;
1397 	int rsp_type;
1398 	uint32_t lq;
1399 	STAILQ_HEAD(, sge_iq) iql = STAILQ_HEAD_INITIALIZER(iql);
1400 
1401 	KASSERT(iq->state == IQS_BUSY, ("%s: iq %p not BUSY", __func__, iq));
1402 	KASSERT((iq->flags & IQ_HAS_FL) == 0,
1403 	    ("%s: called for iq %p with fl (iq->flags 0x%x)", __func__, iq,
1404 	    iq->flags));
1405 	MPASS((iq->flags & IQ_ADJ_CREDIT) == 0);
1406 	MPASS((iq->flags & IQ_LRO_ENABLED) == 0);
1407 
1408 	limit = budget ? budget : iq->qsize / 16;
1409 
1410 	/*
1411 	 * We always come back and check the descriptor ring for new indirect
1412 	 * interrupts and other responses after running a single handler.
1413 	 */
1414 	for (;;) {
1415 		while ((d->rsp.u.type_gen & F_RSPD_GEN) == iq->gen) {
1416 
1417 			rmb();
1418 
1419 			rsp_type = G_RSPD_TYPE(d->rsp.u.type_gen);
1420 			lq = be32toh(d->rsp.pldbuflen_qid);
1421 
1422 			switch (rsp_type) {
1423 			case X_RSPD_TYPE_FLBUF:
1424 				panic("%s: data for an iq (%p) with no freelist",
1425 				    __func__, iq);
1426 
1427 				/* NOTREACHED */
1428 
1429 			case X_RSPD_TYPE_CPL:
1430 				KASSERT(d->rss.opcode < NUM_CPL_CMDS,
1431 				    ("%s: bad opcode %02x.", __func__,
1432 				    d->rss.opcode));
1433 				t4_cpl_handler[d->rss.opcode](iq, &d->rss, NULL);
1434 				break;
1435 
1436 			case X_RSPD_TYPE_INTR:
1437 				/*
1438 				 * There are 1K interrupt-capable queues (qids 0
1439 				 * through 1023).  A response type indicating a
1440 				 * forwarded interrupt with a qid >= 1K is an
1441 				 * iWARP async notification.
1442 				 */
1443 				if (__predict_true(lq >= 1024)) {
1444 					t4_an_handler(iq, &d->rsp);
1445 					break;
1446 				}
1447 
1448 				q = sc->sge.iqmap[lq - sc->sge.iq_start -
1449 				    sc->sge.iq_base];
1450 				if (atomic_cmpset_int(&q->state, IQS_IDLE,
1451 				    IQS_BUSY)) {
1452 					if (service_iq_fl(q, q->qsize / 16) == 0) {
1453 						(void) atomic_cmpset_int(&q->state,
1454 						    IQS_BUSY, IQS_IDLE);
1455 					} else {
1456 						STAILQ_INSERT_TAIL(&iql, q,
1457 						    link);
1458 					}
1459 				}
1460 				break;
1461 
1462 			default:
1463 				KASSERT(0,
1464 				    ("%s: illegal response type %d on iq %p",
1465 				    __func__, rsp_type, iq));
1466 				log(LOG_ERR,
1467 				    "%s: illegal response type %d on iq %p",
1468 				    device_get_nameunit(sc->dev), rsp_type, iq);
1469 				break;
1470 			}
1471 
1472 			d++;
1473 			if (__predict_false(++iq->cidx == iq->sidx)) {
1474 				iq->cidx = 0;
1475 				iq->gen ^= F_RSPD_GEN;
1476 				d = &iq->desc[0];
1477 			}
1478 			if (__predict_false(++ndescs == limit)) {
1479 				t4_write_reg(sc, sc->sge_gts_reg,
1480 				    V_CIDXINC(ndescs) |
1481 				    V_INGRESSQID(iq->cntxt_id) |
1482 				    V_SEINTARM(V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX)));
1483 				ndescs = 0;
1484 
1485 				if (budget) {
1486 					return (EINPROGRESS);
1487 				}
1488 			}
1489 		}
1490 
1491 		if (STAILQ_EMPTY(&iql))
1492 			break;
1493 
1494 		/*
1495 		 * Process the head only, and send it to the back of the list if
1496 		 * it's still not done.
1497 		 */
1498 		q = STAILQ_FIRST(&iql);
1499 		STAILQ_REMOVE_HEAD(&iql, link);
1500 		if (service_iq_fl(q, q->qsize / 8) == 0)
1501 			(void) atomic_cmpset_int(&q->state, IQS_BUSY, IQS_IDLE);
1502 		else
1503 			STAILQ_INSERT_TAIL(&iql, q, link);
1504 	}
1505 
1506 	t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(ndescs) |
1507 	    V_INGRESSQID((u32)iq->cntxt_id) | V_SEINTARM(iq->intr_params));
1508 
1509 	return (0);
1510 }
1511 
1512 #if defined(INET) || defined(INET6)
1513 static inline int
sort_before_lro(struct lro_ctrl * lro)1514 sort_before_lro(struct lro_ctrl *lro)
1515 {
1516 
1517 	return (lro->lro_mbuf_max != 0);
1518 }
1519 #endif
1520 
1521 #define CGBE_SHIFT_SCALE 10
1522 
1523 static inline uint64_t
t4_tstmp_to_ns(struct adapter * sc,uint64_t lf)1524 t4_tstmp_to_ns(struct adapter *sc, uint64_t lf)
1525 {
1526 	struct clock_sync *cur, dcur;
1527 	uint64_t hw_clocks;
1528 	uint64_t hw_clk_div;
1529 	sbintime_t sbt_cur_to_prev, sbt;
1530 	uint64_t hw_tstmp = lf & 0xfffffffffffffffULL;	/* 60b, not 64b. */
1531 	seqc_t gen;
1532 
1533 	for (;;) {
1534 		cur = &sc->cal_info[sc->cal_current];
1535 		gen = seqc_read(&cur->gen);
1536 		if (gen == 0)
1537 			return (0);
1538 		dcur = *cur;
1539 		if (seqc_consistent(&cur->gen, gen))
1540 			break;
1541 	}
1542 
1543 	/*
1544 	 * Our goal here is to have a result that is:
1545 	 *
1546 	 * (                             (cur_time - prev_time)   )
1547 	 * ((hw_tstmp - hw_prev) *  ----------------------------- ) + prev_time
1548 	 * (                             (hw_cur - hw_prev)       )
1549 	 *
1550 	 * With the constraints that we cannot use float and we
1551 	 * don't want to overflow the uint64_t numbers we are using.
1552 	 */
1553 	hw_clocks = hw_tstmp - dcur.hw_prev;
1554 	sbt_cur_to_prev = (dcur.sbt_cur - dcur.sbt_prev);
1555 	hw_clk_div = dcur.hw_cur - dcur.hw_prev;
1556 	sbt = hw_clocks * sbt_cur_to_prev / hw_clk_div + dcur.sbt_prev;
1557 	return (sbttons(sbt));
1558 }
1559 
1560 static inline void
move_to_next_rxbuf(struct sge_fl * fl)1561 move_to_next_rxbuf(struct sge_fl *fl)
1562 {
1563 
1564 	fl->rx_offset = 0;
1565 	if (__predict_false((++fl->cidx & 7) == 0)) {
1566 		uint16_t cidx = fl->cidx >> 3;
1567 
1568 		if (__predict_false(cidx == fl->sidx))
1569 			fl->cidx = cidx = 0;
1570 		fl->hw_cidx = cidx;
1571 	}
1572 }
1573 
1574 /*
1575  * Deals with interrupts on an iq+fl queue.
1576  */
1577 static int
service_iq_fl(struct sge_iq * iq,int budget)1578 service_iq_fl(struct sge_iq *iq, int budget)
1579 {
1580 	struct sge_rxq *rxq = iq_to_rxq(iq);
1581 	struct sge_fl *fl;
1582 	struct adapter *sc = iq->adapter;
1583 	struct iq_desc *d = &iq->desc[iq->cidx];
1584 	int ndescs, limit;
1585 	int rsp_type, starved;
1586 	uint32_t lq;
1587 	uint16_t fl_hw_cidx;
1588 	struct mbuf *m0;
1589 #if defined(INET) || defined(INET6)
1590 	const struct timeval lro_timeout = {0, sc->lro_timeout};
1591 	struct lro_ctrl *lro = &rxq->lro;
1592 #endif
1593 
1594 	KASSERT(iq->state == IQS_BUSY, ("%s: iq %p not BUSY", __func__, iq));
1595 	MPASS(iq->flags & IQ_HAS_FL);
1596 
1597 	ndescs = 0;
1598 #if defined(INET) || defined(INET6)
1599 	if (iq->flags & IQ_ADJ_CREDIT) {
1600 		MPASS(sort_before_lro(lro));
1601 		iq->flags &= ~IQ_ADJ_CREDIT;
1602 		if ((d->rsp.u.type_gen & F_RSPD_GEN) != iq->gen) {
1603 			tcp_lro_flush_all(lro);
1604 			t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(1) |
1605 			    V_INGRESSQID((u32)iq->cntxt_id) |
1606 			    V_SEINTARM(iq->intr_params));
1607 			return (0);
1608 		}
1609 		ndescs = 1;
1610 	}
1611 #else
1612 	MPASS((iq->flags & IQ_ADJ_CREDIT) == 0);
1613 #endif
1614 
1615 	limit = budget ? budget : iq->qsize / 16;
1616 	fl = &rxq->fl;
1617 	fl_hw_cidx = fl->hw_cidx;	/* stable snapshot */
1618 	while ((d->rsp.u.type_gen & F_RSPD_GEN) == iq->gen) {
1619 
1620 		rmb();
1621 
1622 		m0 = NULL;
1623 		rsp_type = G_RSPD_TYPE(d->rsp.u.type_gen);
1624 		lq = be32toh(d->rsp.pldbuflen_qid);
1625 
1626 		switch (rsp_type) {
1627 		case X_RSPD_TYPE_FLBUF:
1628 			if (lq & F_RSPD_NEWBUF) {
1629 				if (fl->rx_offset > 0)
1630 					move_to_next_rxbuf(fl);
1631 				lq = G_RSPD_LEN(lq);
1632 			}
1633 			if (IDXDIFF(fl->hw_cidx, fl_hw_cidx, fl->sidx) > 4) {
1634 				FL_LOCK(fl);
1635 				refill_fl(sc, fl, 64);
1636 				FL_UNLOCK(fl);
1637 				fl_hw_cidx = fl->hw_cidx;
1638 			}
1639 
1640 			if (d->rss.opcode == CPL_RX_PKT) {
1641 				if (__predict_true(eth_rx(sc, rxq, d, lq) == 0))
1642 					break;
1643 				goto out;
1644 			}
1645 			m0 = get_fl_payload(sc, fl, lq);
1646 			if (__predict_false(m0 == NULL))
1647 				goto out;
1648 
1649 			/* fall through */
1650 
1651 		case X_RSPD_TYPE_CPL:
1652 			KASSERT(d->rss.opcode < NUM_CPL_CMDS,
1653 			    ("%s: bad opcode %02x.", __func__, d->rss.opcode));
1654 			t4_cpl_handler[d->rss.opcode](iq, &d->rss, m0);
1655 			break;
1656 
1657 		case X_RSPD_TYPE_INTR:
1658 
1659 			/*
1660 			 * There are 1K interrupt-capable queues (qids 0
1661 			 * through 1023).  A response type indicating a
1662 			 * forwarded interrupt with a qid >= 1K is an
1663 			 * iWARP async notification.  That is the only
1664 			 * acceptable indirect interrupt on this queue.
1665 			 */
1666 			if (__predict_false(lq < 1024)) {
1667 				panic("%s: indirect interrupt on iq_fl %p "
1668 				    "with qid %u", __func__, iq, lq);
1669 			}
1670 
1671 			t4_an_handler(iq, &d->rsp);
1672 			break;
1673 
1674 		default:
1675 			KASSERT(0, ("%s: illegal response type %d on iq %p",
1676 			    __func__, rsp_type, iq));
1677 			log(LOG_ERR, "%s: illegal response type %d on iq %p",
1678 			    device_get_nameunit(sc->dev), rsp_type, iq);
1679 			break;
1680 		}
1681 
1682 		d++;
1683 		if (__predict_false(++iq->cidx == iq->sidx)) {
1684 			iq->cidx = 0;
1685 			iq->gen ^= F_RSPD_GEN;
1686 			d = &iq->desc[0];
1687 		}
1688 		if (__predict_false(++ndescs == limit)) {
1689 			t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(ndescs) |
1690 			    V_INGRESSQID(iq->cntxt_id) |
1691 			    V_SEINTARM(V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX)));
1692 
1693 #if defined(INET) || defined(INET6)
1694 			if (iq->flags & IQ_LRO_ENABLED &&
1695 			    !sort_before_lro(lro) &&
1696 			    sc->lro_timeout != 0) {
1697 				tcp_lro_flush_inactive(lro, &lro_timeout);
1698 			}
1699 #endif
1700 			if (budget)
1701 				return (EINPROGRESS);
1702 			ndescs = 0;
1703 		}
1704 	}
1705 out:
1706 #if defined(INET) || defined(INET6)
1707 	if (iq->flags & IQ_LRO_ENABLED) {
1708 		if (ndescs > 0 && lro->lro_mbuf_count > 8) {
1709 			MPASS(sort_before_lro(lro));
1710 			/* hold back one credit and don't flush LRO state */
1711 			iq->flags |= IQ_ADJ_CREDIT;
1712 			ndescs--;
1713 		} else {
1714 			tcp_lro_flush_all(lro);
1715 		}
1716 	}
1717 #endif
1718 
1719 	t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(ndescs) |
1720 	    V_INGRESSQID((u32)iq->cntxt_id) | V_SEINTARM(iq->intr_params));
1721 
1722 	FL_LOCK(fl);
1723 	starved = refill_fl(sc, fl, 64);
1724 	FL_UNLOCK(fl);
1725 	if (__predict_false(starved != 0))
1726 		add_fl_to_sfl(sc, fl);
1727 
1728 	return (0);
1729 }
1730 
1731 static inline struct cluster_metadata *
cl_metadata(struct fl_sdesc * sd)1732 cl_metadata(struct fl_sdesc *sd)
1733 {
1734 
1735 	return ((void *)(sd->cl + sd->moff));
1736 }
1737 
1738 static void
rxb_free(struct mbuf * m)1739 rxb_free(struct mbuf *m)
1740 {
1741 	struct cluster_metadata *clm = m->m_ext.ext_arg1;
1742 
1743 	uma_zfree(clm->zone, clm->cl);
1744 	counter_u64_add(extfree_rels, 1);
1745 }
1746 
1747 /*
1748  * The mbuf returned comes from zone_muf and carries the payload in one of these
1749  * ways
1750  * a) complete frame inside the mbuf
1751  * b) m_cljset (for clusters without metadata)
1752  * d) m_extaddref (cluster with metadata)
1753  */
1754 static struct mbuf *
get_scatter_segment(struct adapter * sc,struct sge_fl * fl,int fr_offset,int remaining)1755 get_scatter_segment(struct adapter *sc, struct sge_fl *fl, int fr_offset,
1756     int remaining)
1757 {
1758 	struct mbuf *m;
1759 	struct fl_sdesc *sd = &fl->sdesc[fl->cidx];
1760 	struct rx_buf_info *rxb = &sc->sge.rx_buf_info[sd->zidx];
1761 	struct cluster_metadata *clm;
1762 	int len, blen;
1763 	caddr_t payload;
1764 
1765 	if (fl->flags & FL_BUF_PACKING) {
1766 		u_int l, pad;
1767 
1768 		blen = rxb->size2 - fl->rx_offset;	/* max possible in this buf */
1769 		len = min(remaining, blen);
1770 		payload = sd->cl + fl->rx_offset;
1771 
1772 		l = fr_offset + len;
1773 		pad = roundup2(l, fl->buf_boundary) - l;
1774 		if (fl->rx_offset + len + pad < rxb->size2)
1775 			blen = len + pad;
1776 		MPASS(fl->rx_offset + blen <= rxb->size2);
1777 	} else {
1778 		MPASS(fl->rx_offset == 0);	/* not packing */
1779 		blen = rxb->size1;
1780 		len = min(remaining, blen);
1781 		payload = sd->cl;
1782 	}
1783 
1784 	if (fr_offset == 0) {
1785 		m = m_gethdr(M_NOWAIT, MT_DATA);
1786 		if (__predict_false(m == NULL))
1787 			return (NULL);
1788 		m->m_pkthdr.len = remaining;
1789 	} else {
1790 		m = m_get(M_NOWAIT, MT_DATA);
1791 		if (__predict_false(m == NULL))
1792 			return (NULL);
1793 	}
1794 	m->m_len = len;
1795 	kmsan_mark(payload, len, KMSAN_STATE_INITED);
1796 
1797 	if (sc->sc_do_rxcopy && len < RX_COPY_THRESHOLD) {
1798 		/* copy data to mbuf */
1799 		bcopy(payload, mtod(m, caddr_t), len);
1800 		if (fl->flags & FL_BUF_PACKING) {
1801 			fl->rx_offset += blen;
1802 			MPASS(fl->rx_offset <= rxb->size2);
1803 			if (fl->rx_offset < rxb->size2)
1804 				return (m);	/* without advancing the cidx */
1805 		}
1806 	} else if (fl->flags & FL_BUF_PACKING) {
1807 		clm = cl_metadata(sd);
1808 		if (sd->nmbuf++ == 0) {
1809 			clm->refcount = 1;
1810 			clm->zone = rxb->zone;
1811 			clm->cl = sd->cl;
1812 			counter_u64_add(extfree_refs, 1);
1813 		}
1814 		m_extaddref(m, payload, blen, &clm->refcount, rxb_free, clm,
1815 		    NULL);
1816 
1817 		fl->rx_offset += blen;
1818 		MPASS(fl->rx_offset <= rxb->size2);
1819 		if (fl->rx_offset < rxb->size2)
1820 			return (m);	/* without advancing the cidx */
1821 	} else {
1822 		m_cljset(m, sd->cl, rxb->type);
1823 		sd->cl = NULL;	/* consumed, not a recycle candidate */
1824 	}
1825 
1826 	move_to_next_rxbuf(fl);
1827 
1828 	return (m);
1829 }
1830 
1831 static struct mbuf *
get_fl_payload(struct adapter * sc,struct sge_fl * fl,const u_int plen)1832 get_fl_payload(struct adapter *sc, struct sge_fl *fl, const u_int plen)
1833 {
1834 	struct mbuf *m0, *m, **pnext;
1835 	u_int remaining;
1836 
1837 	if (__predict_false(fl->flags & FL_BUF_RESUME)) {
1838 		M_ASSERTPKTHDR(fl->m0);
1839 		MPASS(fl->m0->m_pkthdr.len == plen);
1840 		MPASS(fl->remaining < plen);
1841 
1842 		m0 = fl->m0;
1843 		pnext = fl->pnext;
1844 		remaining = fl->remaining;
1845 		fl->flags &= ~FL_BUF_RESUME;
1846 		goto get_segment;
1847 	}
1848 
1849 	/*
1850 	 * Payload starts at rx_offset in the current hw buffer.  Its length is
1851 	 * 'len' and it may span multiple hw buffers.
1852 	 */
1853 
1854 	m0 = get_scatter_segment(sc, fl, 0, plen);
1855 	if (m0 == NULL)
1856 		return (NULL);
1857 	remaining = plen - m0->m_len;
1858 	pnext = &m0->m_next;
1859 	while (remaining > 0) {
1860 get_segment:
1861 		MPASS(fl->rx_offset == 0);
1862 		m = get_scatter_segment(sc, fl, plen - remaining, remaining);
1863 		if (__predict_false(m == NULL)) {
1864 			fl->m0 = m0;
1865 			fl->pnext = pnext;
1866 			fl->remaining = remaining;
1867 			fl->flags |= FL_BUF_RESUME;
1868 			return (NULL);
1869 		}
1870 		*pnext = m;
1871 		pnext = &m->m_next;
1872 		remaining -= m->m_len;
1873 	}
1874 	*pnext = NULL;
1875 
1876 	M_ASSERTPKTHDR(m0);
1877 	return (m0);
1878 }
1879 
1880 static int
skip_scatter_segment(struct adapter * sc,struct sge_fl * fl,int fr_offset,int remaining)1881 skip_scatter_segment(struct adapter *sc, struct sge_fl *fl, int fr_offset,
1882     int remaining)
1883 {
1884 	struct fl_sdesc *sd = &fl->sdesc[fl->cidx];
1885 	struct rx_buf_info *rxb = &sc->sge.rx_buf_info[sd->zidx];
1886 	int len, blen;
1887 
1888 	if (fl->flags & FL_BUF_PACKING) {
1889 		u_int l, pad;
1890 
1891 		blen = rxb->size2 - fl->rx_offset;	/* max possible in this buf */
1892 		len = min(remaining, blen);
1893 
1894 		l = fr_offset + len;
1895 		pad = roundup2(l, fl->buf_boundary) - l;
1896 		if (fl->rx_offset + len + pad < rxb->size2)
1897 			blen = len + pad;
1898 		fl->rx_offset += blen;
1899 		MPASS(fl->rx_offset <= rxb->size2);
1900 		if (fl->rx_offset < rxb->size2)
1901 			return (len);	/* without advancing the cidx */
1902 	} else {
1903 		MPASS(fl->rx_offset == 0);	/* not packing */
1904 		blen = rxb->size1;
1905 		len = min(remaining, blen);
1906 	}
1907 	move_to_next_rxbuf(fl);
1908 	return (len);
1909 }
1910 
1911 static inline void
skip_fl_payload(struct adapter * sc,struct sge_fl * fl,int plen)1912 skip_fl_payload(struct adapter *sc, struct sge_fl *fl, int plen)
1913 {
1914 	int remaining, fr_offset, len;
1915 
1916 	fr_offset = 0;
1917 	remaining = plen;
1918 	while (remaining > 0) {
1919 		len = skip_scatter_segment(sc, fl, fr_offset, remaining);
1920 		fr_offset += len;
1921 		remaining -= len;
1922 	}
1923 }
1924 
1925 static inline int
get_segment_len(struct adapter * sc,struct sge_fl * fl,int plen)1926 get_segment_len(struct adapter *sc, struct sge_fl *fl, int plen)
1927 {
1928 	int len;
1929 	struct fl_sdesc *sd = &fl->sdesc[fl->cidx];
1930 	struct rx_buf_info *rxb = &sc->sge.rx_buf_info[sd->zidx];
1931 
1932 	if (fl->flags & FL_BUF_PACKING)
1933 		len = rxb->size2 - fl->rx_offset;
1934 	else
1935 		len = rxb->size1;
1936 
1937 	return (min(plen, len));
1938 }
1939 
1940 static int
eth_rx(struct adapter * sc,struct sge_rxq * rxq,const struct iq_desc * d,u_int plen)1941 eth_rx(struct adapter *sc, struct sge_rxq *rxq, const struct iq_desc *d,
1942     u_int plen)
1943 {
1944 	struct mbuf *m0;
1945 	if_t ifp = rxq->ifp;
1946 	struct sge_fl *fl = &rxq->fl;
1947 	struct vi_info *vi = if_getsoftc(ifp);
1948 	const struct cpl_rx_pkt *cpl;
1949 #if defined(INET) || defined(INET6)
1950 	struct lro_ctrl *lro = &rxq->lro;
1951 #endif
1952 	uint16_t err_vec, tnl_type, tnlhdr_len;
1953 	static const int sw_hashtype[4][2] = {
1954 		{M_HASHTYPE_NONE, M_HASHTYPE_NONE},
1955 		{M_HASHTYPE_RSS_IPV4, M_HASHTYPE_RSS_IPV6},
1956 		{M_HASHTYPE_RSS_TCP_IPV4, M_HASHTYPE_RSS_TCP_IPV6},
1957 		{M_HASHTYPE_RSS_UDP_IPV4, M_HASHTYPE_RSS_UDP_IPV6},
1958 	};
1959 	static const int sw_csum_flags[2][2] = {
1960 		{
1961 			/* IP, inner IP */
1962 			CSUM_ENCAP_VXLAN |
1963 			    CSUM_L3_CALC | CSUM_L3_VALID |
1964 			    CSUM_L4_CALC | CSUM_L4_VALID |
1965 			    CSUM_INNER_L3_CALC | CSUM_INNER_L3_VALID |
1966 			    CSUM_INNER_L4_CALC | CSUM_INNER_L4_VALID,
1967 
1968 			/* IP, inner IP6 */
1969 			CSUM_ENCAP_VXLAN |
1970 			    CSUM_L3_CALC | CSUM_L3_VALID |
1971 			    CSUM_L4_CALC | CSUM_L4_VALID |
1972 			    CSUM_INNER_L4_CALC | CSUM_INNER_L4_VALID,
1973 		},
1974 		{
1975 			/* IP6, inner IP */
1976 			CSUM_ENCAP_VXLAN |
1977 			    CSUM_L4_CALC | CSUM_L4_VALID |
1978 			    CSUM_INNER_L3_CALC | CSUM_INNER_L3_VALID |
1979 			    CSUM_INNER_L4_CALC | CSUM_INNER_L4_VALID,
1980 
1981 			/* IP6, inner IP6 */
1982 			CSUM_ENCAP_VXLAN |
1983 			    CSUM_L4_CALC | CSUM_L4_VALID |
1984 			    CSUM_INNER_L4_CALC | CSUM_INNER_L4_VALID,
1985 		},
1986 	};
1987 
1988 	MPASS(plen > sc->params.sge.fl_pktshift);
1989 	if (vi->pfil != NULL && PFIL_HOOKED_IN(vi->pfil) &&
1990 	    __predict_true((fl->flags & FL_BUF_RESUME) == 0)) {
1991 		struct fl_sdesc *sd = &fl->sdesc[fl->cidx];
1992 		caddr_t frame;
1993 		int rc, slen;
1994 
1995 		slen = get_segment_len(sc, fl, plen) -
1996 		    sc->params.sge.fl_pktshift;
1997 		frame = sd->cl + fl->rx_offset + sc->params.sge.fl_pktshift;
1998 		CURVNET_SET_QUIET(if_getvnet(ifp));
1999 		rc = pfil_mem_in(vi->pfil, frame, slen, ifp, &m0);
2000 		CURVNET_RESTORE();
2001 		if (rc == PFIL_DROPPED || rc == PFIL_CONSUMED) {
2002 			skip_fl_payload(sc, fl, plen);
2003 			return (0);
2004 		}
2005 		if (rc == PFIL_REALLOCED) {
2006 			skip_fl_payload(sc, fl, plen);
2007 			goto have_mbuf;
2008 		}
2009 	}
2010 
2011 	m0 = get_fl_payload(sc, fl, plen);
2012 	if (__predict_false(m0 == NULL))
2013 		return (ENOMEM);
2014 
2015 	m0->m_pkthdr.len -= sc->params.sge.fl_pktshift;
2016 	m0->m_len -= sc->params.sge.fl_pktshift;
2017 	m0->m_data += sc->params.sge.fl_pktshift;
2018 
2019 have_mbuf:
2020 	m0->m_pkthdr.rcvif = ifp;
2021 	M_HASHTYPE_SET(m0, sw_hashtype[d->rss.hash_type][d->rss.ipv6]);
2022 	m0->m_pkthdr.flowid = be32toh(d->rss.hash_val);
2023 
2024 	cpl = (const void *)(&d->rss + 1);
2025 	if (sc->params.tp.rx_pkt_encap) {
2026 		const uint16_t ev = be16toh(cpl->err_vec);
2027 
2028 		err_vec = G_T6_COMPR_RXERR_VEC(ev);
2029 		tnl_type = G_T6_RX_TNL_TYPE(ev);
2030 		tnlhdr_len = G_T6_RX_TNLHDR_LEN(ev);
2031 	} else {
2032 		err_vec = be16toh(cpl->err_vec);
2033 		tnl_type = 0;
2034 		tnlhdr_len = 0;
2035 	}
2036 	if (cpl->csum_calc && err_vec == 0) {
2037 		int ipv6 = !!(cpl->l2info & htobe32(F_RXF_IP6));
2038 
2039 		/* checksum(s) calculated and found to be correct. */
2040 
2041 		MPASS((cpl->l2info & htobe32(F_RXF_IP)) ^
2042 		    (cpl->l2info & htobe32(F_RXF_IP6)));
2043 		m0->m_pkthdr.csum_data = be16toh(cpl->csum);
2044 		if (tnl_type == 0) {
2045 			if (!ipv6 && if_getcapenable(ifp) & IFCAP_RXCSUM) {
2046 				m0->m_pkthdr.csum_flags = CSUM_L3_CALC |
2047 				    CSUM_L3_VALID | CSUM_L4_CALC |
2048 				    CSUM_L4_VALID;
2049 			} else if (ipv6 && if_getcapenable(ifp) & IFCAP_RXCSUM_IPV6) {
2050 				m0->m_pkthdr.csum_flags = CSUM_L4_CALC |
2051 				    CSUM_L4_VALID;
2052 			}
2053 			rxq->rxcsum++;
2054 		} else {
2055 			MPASS(tnl_type == RX_PKT_TNL_TYPE_VXLAN);
2056 
2057 			M_HASHTYPE_SETINNER(m0);
2058 			if (__predict_false(cpl->ip_frag)) {
2059 				/*
2060 				 * csum_data is for the inner frame (which is an
2061 				 * IP fragment) and is not 0xffff.  There is no
2062 				 * way to pass the inner csum_data to the stack.
2063 				 * We don't want the stack to use the inner
2064 				 * csum_data to validate the outer frame or it
2065 				 * will get rejected.  So we fix csum_data here
2066 				 * and let sw do the checksum of inner IP
2067 				 * fragments.
2068 				 *
2069 				 * XXX: Need 32b for csum_data2 in an rx mbuf.
2070 				 * Maybe stuff it into rcv_tstmp?
2071 				 */
2072 				m0->m_pkthdr.csum_data = 0xffff;
2073 				if (ipv6) {
2074 					m0->m_pkthdr.csum_flags = CSUM_L4_CALC |
2075 					    CSUM_L4_VALID;
2076 				} else {
2077 					m0->m_pkthdr.csum_flags = CSUM_L3_CALC |
2078 					    CSUM_L3_VALID | CSUM_L4_CALC |
2079 					    CSUM_L4_VALID;
2080 				}
2081 			} else {
2082 				int outer_ipv6;
2083 
2084 				MPASS(m0->m_pkthdr.csum_data == 0xffff);
2085 
2086 				outer_ipv6 = tnlhdr_len >=
2087 				    sizeof(struct ether_header) +
2088 				    sizeof(struct ip6_hdr);
2089 				m0->m_pkthdr.csum_flags =
2090 				    sw_csum_flags[outer_ipv6][ipv6];
2091 			}
2092 			rxq->vxlan_rxcsum++;
2093 		}
2094 	}
2095 
2096 	if (cpl->vlan_ex) {
2097 		if (sc->flags & IS_VF && sc->vlan_id) {
2098 			/*
2099 			 * HW is not setup correctly if extracted vlan_id does
2100 			 * not match the VF's setting.
2101 			 */
2102 			MPASS(be16toh(cpl->vlan) == sc->vlan_id);
2103 		} else {
2104 			m0->m_pkthdr.ether_vtag = be16toh(cpl->vlan);
2105 			m0->m_flags |= M_VLANTAG;
2106 			rxq->vlan_extraction++;
2107 		}
2108 	}
2109 
2110 	if (rxq->iq.flags & IQ_RX_TIMESTAMP) {
2111 		/*
2112 		 * Fill up rcv_tstmp but do not set M_TSTMP as
2113 		 * long as we get a non-zero back from t4_tstmp_to_ns().
2114 		 */
2115 		m0->m_pkthdr.rcv_tstmp = t4_tstmp_to_ns(sc,
2116 		    be64toh(d->rsp.u.last_flit));
2117 		if (m0->m_pkthdr.rcv_tstmp != 0)
2118 			m0->m_flags |= M_TSTMP;
2119 	}
2120 
2121 #ifdef NUMA
2122 	m0->m_pkthdr.numa_domain = if_getnumadomain(ifp);
2123 #endif
2124 #if defined(INET) || defined(INET6)
2125 	if (rxq->iq.flags & IQ_LRO_ENABLED && tnl_type == 0 &&
2126 	    (M_HASHTYPE_GET(m0) == M_HASHTYPE_RSS_TCP_IPV4 ||
2127 	    M_HASHTYPE_GET(m0) == M_HASHTYPE_RSS_TCP_IPV6)) {
2128 		if (sort_before_lro(lro)) {
2129 			tcp_lro_queue_mbuf(lro, m0);
2130 			return (0); /* queued for sort, then LRO */
2131 		}
2132 		if (tcp_lro_rx(lro, m0, 0) == 0)
2133 			return (0); /* queued for LRO */
2134 	}
2135 #endif
2136 	if_input(ifp, m0);
2137 
2138 	return (0);
2139 }
2140 
2141 /*
2142  * Must drain the wrq or make sure that someone else will.
2143  */
2144 static void
wrq_tx_drain(void * arg,int n)2145 wrq_tx_drain(void *arg, int n)
2146 {
2147 	struct sge_wrq *wrq = arg;
2148 	struct sge_eq *eq = &wrq->eq;
2149 
2150 	EQ_LOCK(eq);
2151 	if (TAILQ_EMPTY(&wrq->incomplete_wrs) && !STAILQ_EMPTY(&wrq->wr_list))
2152 		drain_wrq_wr_list(wrq->adapter, wrq);
2153 	EQ_UNLOCK(eq);
2154 }
2155 
2156 static void
drain_wrq_wr_list(struct adapter * sc,struct sge_wrq * wrq)2157 drain_wrq_wr_list(struct adapter *sc, struct sge_wrq *wrq)
2158 {
2159 	struct sge_eq *eq = &wrq->eq;
2160 	u_int available, dbdiff;	/* # of hardware descriptors */
2161 	u_int n;
2162 	struct wrqe *wr;
2163 	struct fw_eth_tx_pkt_wr *dst;	/* any fw WR struct will do */
2164 
2165 	EQ_LOCK_ASSERT_OWNED(eq);
2166 	MPASS(TAILQ_EMPTY(&wrq->incomplete_wrs));
2167 	wr = STAILQ_FIRST(&wrq->wr_list);
2168 	MPASS(wr != NULL);	/* Must be called with something useful to do */
2169 	MPASS(eq->pidx == eq->dbidx);
2170 	dbdiff = 0;
2171 
2172 	do {
2173 		eq->cidx = read_hw_cidx(eq);
2174 		if (eq->pidx == eq->cidx)
2175 			available = eq->sidx - 1;
2176 		else
2177 			available = IDXDIFF(eq->cidx, eq->pidx, eq->sidx) - 1;
2178 
2179 		MPASS(wr->wrq == wrq);
2180 		n = howmany(wr->wr_len, EQ_ESIZE);
2181 		if (available < n)
2182 			break;
2183 
2184 		dst = (void *)&eq->desc[eq->pidx];
2185 		if (__predict_true(eq->sidx - eq->pidx > n)) {
2186 			/* Won't wrap, won't end exactly at the status page. */
2187 			bcopy(&wr->wr[0], dst, wr->wr_len);
2188 			eq->pidx += n;
2189 		} else {
2190 			int first_portion = (eq->sidx - eq->pidx) * EQ_ESIZE;
2191 
2192 			bcopy(&wr->wr[0], dst, first_portion);
2193 			if (wr->wr_len > first_portion) {
2194 				bcopy(&wr->wr[first_portion], &eq->desc[0],
2195 				    wr->wr_len - first_portion);
2196 			}
2197 			eq->pidx = n - (eq->sidx - eq->pidx);
2198 		}
2199 		wrq->tx_wrs_copied++;
2200 
2201 		if (available < eq->sidx / 4 &&
2202 		    atomic_cmpset_int(&eq->equiq, 0, 1)) {
2203 				/*
2204 				 * XXX: This is not 100% reliable with some
2205 				 * types of WRs.  But this is a very unusual
2206 				 * situation for an ofld/ctrl queue anyway.
2207 				 */
2208 			dst->equiq_to_len16 |= htobe32(F_FW_WR_EQUIQ |
2209 			    F_FW_WR_EQUEQ);
2210 		}
2211 
2212 		dbdiff += n;
2213 		if (dbdiff >= 16) {
2214 			ring_eq_db(sc, eq, dbdiff);
2215 			dbdiff = 0;
2216 		}
2217 
2218 		STAILQ_REMOVE_HEAD(&wrq->wr_list, link);
2219 		free_wrqe(wr);
2220 		MPASS(wrq->nwr_pending > 0);
2221 		wrq->nwr_pending--;
2222 		MPASS(wrq->ndesc_needed >= n);
2223 		wrq->ndesc_needed -= n;
2224 	} while ((wr = STAILQ_FIRST(&wrq->wr_list)) != NULL);
2225 
2226 	if (dbdiff)
2227 		ring_eq_db(sc, eq, dbdiff);
2228 }
2229 
2230 /*
2231  * Doesn't fail.  Holds on to work requests it can't send right away.
2232  */
2233 void
t4_wrq_tx_locked(struct adapter * sc,struct sge_wrq * wrq,struct wrqe * wr)2234 t4_wrq_tx_locked(struct adapter *sc, struct sge_wrq *wrq, struct wrqe *wr)
2235 {
2236 #ifdef INVARIANTS
2237 	struct sge_eq *eq = &wrq->eq;
2238 #endif
2239 
2240 	EQ_LOCK_ASSERT_OWNED(eq);
2241 	MPASS(wr != NULL);
2242 	MPASS(wr->wr_len > 0 && wr->wr_len <= SGE_MAX_WR_LEN);
2243 	MPASS((wr->wr_len & 0x7) == 0);
2244 
2245 	STAILQ_INSERT_TAIL(&wrq->wr_list, wr, link);
2246 	wrq->nwr_pending++;
2247 	wrq->ndesc_needed += howmany(wr->wr_len, EQ_ESIZE);
2248 
2249 	if (!TAILQ_EMPTY(&wrq->incomplete_wrs))
2250 		return;	/* commit_wrq_wr will drain wr_list as well. */
2251 
2252 	drain_wrq_wr_list(sc, wrq);
2253 
2254 	/* Doorbell must have caught up to the pidx. */
2255 	MPASS(eq->pidx == eq->dbidx);
2256 }
2257 
2258 void
t4_update_fl_bufsize(if_t ifp)2259 t4_update_fl_bufsize(if_t ifp)
2260 {
2261 	struct vi_info *vi = if_getsoftc(ifp);
2262 	struct adapter *sc = vi->adapter;
2263 	struct sge_rxq *rxq;
2264 #ifdef TCP_OFFLOAD
2265 	struct sge_ofld_rxq *ofld_rxq;
2266 #endif
2267 	struct sge_fl *fl;
2268 	int i, maxp;
2269 
2270 	maxp = max_rx_payload(sc, ifp, false);
2271 	for_each_rxq(vi, i, rxq) {
2272 		fl = &rxq->fl;
2273 
2274 		FL_LOCK(fl);
2275 		fl->zidx = find_refill_source(sc, maxp,
2276 		    fl->flags & FL_BUF_PACKING);
2277 		FL_UNLOCK(fl);
2278 	}
2279 #ifdef TCP_OFFLOAD
2280 	maxp = max_rx_payload(sc, ifp, true);
2281 	for_each_ofld_rxq(vi, i, ofld_rxq) {
2282 		fl = &ofld_rxq->fl;
2283 
2284 		FL_LOCK(fl);
2285 		fl->zidx = find_refill_source(sc, maxp,
2286 		    fl->flags & FL_BUF_PACKING);
2287 		FL_UNLOCK(fl);
2288 	}
2289 #endif
2290 }
2291 
2292 #ifdef RATELIMIT
2293 static inline int
mbuf_eo_nsegs(struct mbuf * m)2294 mbuf_eo_nsegs(struct mbuf *m)
2295 {
2296 
2297 	M_ASSERTPKTHDR(m);
2298 	return (m->m_pkthdr.PH_loc.eight[1]);
2299 }
2300 
2301 #if defined(INET) || defined(INET6)
2302 static inline void
set_mbuf_eo_nsegs(struct mbuf * m,uint8_t nsegs)2303 set_mbuf_eo_nsegs(struct mbuf *m, uint8_t nsegs)
2304 {
2305 
2306 	M_ASSERTPKTHDR(m);
2307 	m->m_pkthdr.PH_loc.eight[1] = nsegs;
2308 }
2309 #endif
2310 
2311 static inline int
mbuf_eo_len16(struct mbuf * m)2312 mbuf_eo_len16(struct mbuf *m)
2313 {
2314 	int n;
2315 
2316 	M_ASSERTPKTHDR(m);
2317 	n = m->m_pkthdr.PH_loc.eight[2];
2318 	MPASS(n > 0 && n <= SGE_MAX_WR_LEN / 16);
2319 
2320 	return (n);
2321 }
2322 
2323 #if defined(INET) || defined(INET6)
2324 static inline void
set_mbuf_eo_len16(struct mbuf * m,uint8_t len16)2325 set_mbuf_eo_len16(struct mbuf *m, uint8_t len16)
2326 {
2327 
2328 	M_ASSERTPKTHDR(m);
2329 	m->m_pkthdr.PH_loc.eight[2] = len16;
2330 }
2331 #endif
2332 
2333 static inline int
mbuf_eo_tsclk_tsoff(struct mbuf * m)2334 mbuf_eo_tsclk_tsoff(struct mbuf *m)
2335 {
2336 
2337 	M_ASSERTPKTHDR(m);
2338 	return (m->m_pkthdr.PH_loc.eight[3]);
2339 }
2340 
2341 #if defined(INET) || defined(INET6)
2342 static inline void
set_mbuf_eo_tsclk_tsoff(struct mbuf * m,uint8_t tsclk_tsoff)2343 set_mbuf_eo_tsclk_tsoff(struct mbuf *m, uint8_t tsclk_tsoff)
2344 {
2345 
2346 	M_ASSERTPKTHDR(m);
2347 	m->m_pkthdr.PH_loc.eight[3] = tsclk_tsoff;
2348 }
2349 #endif
2350 
2351 static inline int
needs_eo(struct m_snd_tag * mst)2352 needs_eo(struct m_snd_tag *mst)
2353 {
2354 
2355 	return (mst != NULL && mst->sw->type == IF_SND_TAG_TYPE_RATE_LIMIT);
2356 }
2357 #endif
2358 
2359 /*
2360  * Try to allocate an mbuf to contain a raw work request.  To make it
2361  * easy to construct the work request, don't allocate a chain but a
2362  * single mbuf.
2363  */
2364 struct mbuf *
alloc_wr_mbuf(int len,int how)2365 alloc_wr_mbuf(int len, int how)
2366 {
2367 	struct mbuf *m;
2368 
2369 	if (len <= MHLEN)
2370 		m = m_gethdr(how, MT_DATA);
2371 	else if (len <= MCLBYTES)
2372 		m = m_getcl(how, MT_DATA, M_PKTHDR);
2373 	else
2374 		m = NULL;
2375 	if (m == NULL)
2376 		return (NULL);
2377 	m->m_pkthdr.len = len;
2378 	m->m_len = len;
2379 	set_mbuf_cflags(m, MC_RAW_WR);
2380 	set_mbuf_len16(m, howmany(len, 16));
2381 	return (m);
2382 }
2383 
2384 static inline bool
needs_hwcsum(struct mbuf * m)2385 needs_hwcsum(struct mbuf *m)
2386 {
2387 	const uint32_t csum_flags = CSUM_IP | CSUM_IP_UDP | CSUM_IP_TCP |
2388 	    CSUM_IP_TSO | CSUM_INNER_IP | CSUM_INNER_IP_UDP |
2389 	    CSUM_INNER_IP_TCP | CSUM_INNER_IP_TSO | CSUM_IP6_UDP |
2390 	    CSUM_IP6_TCP | CSUM_IP6_TSO | CSUM_INNER_IP6_UDP |
2391 	    CSUM_INNER_IP6_TCP | CSUM_INNER_IP6_TSO;
2392 
2393 	M_ASSERTPKTHDR(m);
2394 
2395 	return (m->m_pkthdr.csum_flags & csum_flags);
2396 }
2397 
2398 static inline bool
needs_tso(struct mbuf * m)2399 needs_tso(struct mbuf *m)
2400 {
2401 	const uint32_t csum_flags = CSUM_IP_TSO | CSUM_IP6_TSO |
2402 	    CSUM_INNER_IP_TSO | CSUM_INNER_IP6_TSO;
2403 
2404 	M_ASSERTPKTHDR(m);
2405 
2406 	return (m->m_pkthdr.csum_flags & csum_flags);
2407 }
2408 
2409 static inline bool
needs_vxlan_csum(struct mbuf * m)2410 needs_vxlan_csum(struct mbuf *m)
2411 {
2412 
2413 	M_ASSERTPKTHDR(m);
2414 
2415 	return (m->m_pkthdr.csum_flags & CSUM_ENCAP_VXLAN);
2416 }
2417 
2418 static inline bool
needs_vxlan_tso(struct mbuf * m)2419 needs_vxlan_tso(struct mbuf *m)
2420 {
2421 	const uint32_t csum_flags = CSUM_ENCAP_VXLAN | CSUM_INNER_IP_TSO |
2422 	    CSUM_INNER_IP6_TSO;
2423 
2424 	M_ASSERTPKTHDR(m);
2425 
2426 	return ((m->m_pkthdr.csum_flags & csum_flags) != 0 &&
2427 	    (m->m_pkthdr.csum_flags & csum_flags) != CSUM_ENCAP_VXLAN);
2428 }
2429 
2430 #if defined(INET) || defined(INET6)
2431 static inline bool
needs_inner_tcp_csum(struct mbuf * m)2432 needs_inner_tcp_csum(struct mbuf *m)
2433 {
2434 	const uint32_t csum_flags = CSUM_INNER_IP_TSO | CSUM_INNER_IP6_TSO;
2435 
2436 	M_ASSERTPKTHDR(m);
2437 
2438 	return (m->m_pkthdr.csum_flags & csum_flags);
2439 }
2440 #endif
2441 
2442 static inline bool
needs_l3_csum(struct mbuf * m)2443 needs_l3_csum(struct mbuf *m)
2444 {
2445 	const uint32_t csum_flags = CSUM_IP | CSUM_IP_TSO | CSUM_INNER_IP |
2446 	    CSUM_INNER_IP_TSO;
2447 
2448 	M_ASSERTPKTHDR(m);
2449 
2450 	return (m->m_pkthdr.csum_flags & csum_flags);
2451 }
2452 
2453 static inline bool
needs_outer_tcp_csum(struct mbuf * m)2454 needs_outer_tcp_csum(struct mbuf *m)
2455 {
2456 	const uint32_t csum_flags = CSUM_IP_TCP | CSUM_IP_TSO | CSUM_IP6_TCP |
2457 	    CSUM_IP6_TSO;
2458 
2459 	M_ASSERTPKTHDR(m);
2460 
2461 	return (m->m_pkthdr.csum_flags & csum_flags);
2462 }
2463 
2464 #ifdef RATELIMIT
2465 static inline bool
needs_outer_l4_csum(struct mbuf * m)2466 needs_outer_l4_csum(struct mbuf *m)
2467 {
2468 	const uint32_t csum_flags = CSUM_IP_UDP | CSUM_IP_TCP | CSUM_IP_TSO |
2469 	    CSUM_IP6_UDP | CSUM_IP6_TCP | CSUM_IP6_TSO;
2470 
2471 	M_ASSERTPKTHDR(m);
2472 
2473 	return (m->m_pkthdr.csum_flags & csum_flags);
2474 }
2475 
2476 static inline bool
needs_outer_udp_csum(struct mbuf * m)2477 needs_outer_udp_csum(struct mbuf *m)
2478 {
2479 	const uint32_t csum_flags = CSUM_IP_UDP | CSUM_IP6_UDP;
2480 
2481 	M_ASSERTPKTHDR(m);
2482 
2483 	return (m->m_pkthdr.csum_flags & csum_flags);
2484 }
2485 #endif
2486 
2487 static inline bool
needs_vlan_insertion(struct mbuf * m)2488 needs_vlan_insertion(struct mbuf *m)
2489 {
2490 
2491 	M_ASSERTPKTHDR(m);
2492 
2493 	return (m->m_flags & M_VLANTAG);
2494 }
2495 
2496 #if defined(INET) || defined(INET6)
2497 static void *
m_advance(struct mbuf ** pm,int * poffset,int len)2498 m_advance(struct mbuf **pm, int *poffset, int len)
2499 {
2500 	struct mbuf *m = *pm;
2501 	int offset = *poffset;
2502 	uintptr_t p = 0;
2503 
2504 	MPASS(len > 0);
2505 
2506 	for (;;) {
2507 		if (offset + len < m->m_len) {
2508 			offset += len;
2509 			p = mtod(m, uintptr_t) + offset;
2510 			break;
2511 		}
2512 		len -= m->m_len - offset;
2513 		m = m->m_next;
2514 		offset = 0;
2515 		MPASS(m != NULL);
2516 	}
2517 	*poffset = offset;
2518 	*pm = m;
2519 	return ((void *)p);
2520 }
2521 #endif
2522 
2523 static inline int
count_mbuf_ext_pgs(struct mbuf * m,int skip,vm_paddr_t * nextaddr)2524 count_mbuf_ext_pgs(struct mbuf *m, int skip, vm_paddr_t *nextaddr)
2525 {
2526 	vm_paddr_t paddr;
2527 	int i, len, off, pglen, pgoff, seglen, segoff;
2528 	int nsegs = 0;
2529 
2530 	M_ASSERTEXTPG(m);
2531 	off = mtod(m, vm_offset_t);
2532 	len = m->m_len;
2533 	off += skip;
2534 	len -= skip;
2535 
2536 	if (m->m_epg_hdrlen != 0) {
2537 		if (off >= m->m_epg_hdrlen) {
2538 			off -= m->m_epg_hdrlen;
2539 		} else {
2540 			seglen = m->m_epg_hdrlen - off;
2541 			segoff = off;
2542 			seglen = min(seglen, len);
2543 			off = 0;
2544 			len -= seglen;
2545 			paddr = pmap_kextract(
2546 			    (vm_offset_t)&m->m_epg_hdr[segoff]);
2547 			if (*nextaddr != paddr)
2548 				nsegs++;
2549 			*nextaddr = paddr + seglen;
2550 		}
2551 	}
2552 	pgoff = m->m_epg_1st_off;
2553 	for (i = 0; i < m->m_epg_npgs && len > 0; i++) {
2554 		pglen = m_epg_pagelen(m, i, pgoff);
2555 		if (off >= pglen) {
2556 			off -= pglen;
2557 			pgoff = 0;
2558 			continue;
2559 		}
2560 		seglen = pglen - off;
2561 		segoff = pgoff + off;
2562 		off = 0;
2563 		seglen = min(seglen, len);
2564 		len -= seglen;
2565 		paddr = m->m_epg_pa[i] + segoff;
2566 		if (*nextaddr != paddr)
2567 			nsegs++;
2568 		*nextaddr = paddr + seglen;
2569 		pgoff = 0;
2570 	};
2571 	if (len != 0) {
2572 		seglen = min(len, m->m_epg_trllen - off);
2573 		len -= seglen;
2574 		paddr = pmap_kextract((vm_offset_t)&m->m_epg_trail[off]);
2575 		if (*nextaddr != paddr)
2576 			nsegs++;
2577 		*nextaddr = paddr + seglen;
2578 	}
2579 
2580 	return (nsegs);
2581 }
2582 
2583 
2584 /*
2585  * Can deal with empty mbufs in the chain that have m_len = 0, but the chain
2586  * must have at least one mbuf that's not empty.  It is possible for this
2587  * routine to return 0 if skip accounts for all the contents of the mbuf chain.
2588  */
2589 static inline int
count_mbuf_nsegs(struct mbuf * m,int skip,uint8_t * cflags)2590 count_mbuf_nsegs(struct mbuf *m, int skip, uint8_t *cflags)
2591 {
2592 	vm_paddr_t nextaddr, paddr;
2593 	vm_offset_t va;
2594 	int len, nsegs;
2595 
2596 	M_ASSERTPKTHDR(m);
2597 	MPASS(m->m_pkthdr.len > 0);
2598 	MPASS(m->m_pkthdr.len >= skip);
2599 
2600 	nsegs = 0;
2601 	nextaddr = 0;
2602 	for (; m; m = m->m_next) {
2603 		len = m->m_len;
2604 		if (__predict_false(len == 0))
2605 			continue;
2606 		if (skip >= len) {
2607 			skip -= len;
2608 			continue;
2609 		}
2610 		if ((m->m_flags & M_EXTPG) != 0) {
2611 			*cflags |= MC_NOMAP;
2612 			nsegs += count_mbuf_ext_pgs(m, skip, &nextaddr);
2613 			skip = 0;
2614 			continue;
2615 		}
2616 		va = mtod(m, vm_offset_t) + skip;
2617 		len -= skip;
2618 		skip = 0;
2619 		paddr = pmap_kextract(va);
2620 		nsegs += sglist_count((void *)(uintptr_t)va, len);
2621 		if (paddr == nextaddr)
2622 			nsegs--;
2623 		nextaddr = pmap_kextract(va + len - 1) + 1;
2624 	}
2625 
2626 	return (nsegs);
2627 }
2628 
2629 /*
2630  * The maximum number of segments that can fit in a WR.
2631  */
2632 static int
max_nsegs_allowed(struct mbuf * m,bool vm_wr)2633 max_nsegs_allowed(struct mbuf *m, bool vm_wr)
2634 {
2635 
2636 	if (vm_wr) {
2637 		if (needs_tso(m))
2638 			return (TX_SGL_SEGS_VM_TSO);
2639 		return (TX_SGL_SEGS_VM);
2640 	}
2641 
2642 	if (needs_tso(m)) {
2643 		if (needs_vxlan_tso(m))
2644 			return (TX_SGL_SEGS_VXLAN_TSO);
2645 		else
2646 			return (TX_SGL_SEGS_TSO);
2647 	}
2648 
2649 	return (TX_SGL_SEGS);
2650 }
2651 
2652 static struct timeval txerr_ratecheck = {0};
2653 static const struct timeval txerr_interval = {3, 0};
2654 
2655 /*
2656  * Analyze the mbuf to determine its tx needs.  The mbuf passed in may change:
2657  * a) caller can assume it's been freed if this function returns with an error.
2658  * b) it may get defragged up if the gather list is too long for the hardware.
2659  */
2660 int
parse_pkt(struct mbuf ** mp,bool vm_wr)2661 parse_pkt(struct mbuf **mp, bool vm_wr)
2662 {
2663 	struct mbuf *m0 = *mp, *m;
2664 	int rc, nsegs, defragged = 0;
2665 	struct ether_header *eh;
2666 #ifdef INET
2667 	void *l3hdr;
2668 #endif
2669 #if defined(INET) || defined(INET6)
2670 	int offset;
2671 	struct tcphdr *tcp;
2672 #endif
2673 #if defined(KERN_TLS) || defined(RATELIMIT)
2674 	struct m_snd_tag *mst;
2675 #endif
2676 	uint16_t eh_type;
2677 	uint8_t cflags;
2678 
2679 	cflags = 0;
2680 	M_ASSERTPKTHDR(m0);
2681 	if (__predict_false(m0->m_pkthdr.len < ETHER_HDR_LEN)) {
2682 		rc = EINVAL;
2683 fail:
2684 		m_freem(m0);
2685 		*mp = NULL;
2686 		return (rc);
2687 	}
2688 restart:
2689 	/*
2690 	 * First count the number of gather list segments in the payload.
2691 	 * Defrag the mbuf if nsegs exceeds the hardware limit.
2692 	 */
2693 	M_ASSERTPKTHDR(m0);
2694 	MPASS(m0->m_pkthdr.len > 0);
2695 	nsegs = count_mbuf_nsegs(m0, 0, &cflags);
2696 #if defined(KERN_TLS) || defined(RATELIMIT)
2697 	if (m0->m_pkthdr.csum_flags & CSUM_SND_TAG)
2698 		mst = m0->m_pkthdr.snd_tag;
2699 	else
2700 		mst = NULL;
2701 #endif
2702 #ifdef KERN_TLS
2703 	if (mst != NULL && mst->sw->type == IF_SND_TAG_TYPE_TLS) {
2704 		cflags |= MC_TLS;
2705 		set_mbuf_cflags(m0, cflags);
2706 		rc = t6_ktls_parse_pkt(m0);
2707 		if (rc != 0)
2708 			goto fail;
2709 		return (EINPROGRESS);
2710 	}
2711 #endif
2712 	if (nsegs > max_nsegs_allowed(m0, vm_wr)) {
2713 		if (defragged++ > 0) {
2714 			rc = EFBIG;
2715 			goto fail;
2716 		}
2717 		counter_u64_add(defrags, 1);
2718 		if ((m = m_defrag(m0, M_NOWAIT)) == NULL) {
2719 			rc = ENOMEM;
2720 			goto fail;
2721 		}
2722 		*mp = m0 = m;	/* update caller's copy after defrag */
2723 		goto restart;
2724 	}
2725 
2726 	if (__predict_false(nsegs > 2 && m0->m_pkthdr.len <= MHLEN &&
2727 	    !(cflags & MC_NOMAP))) {
2728 		counter_u64_add(pullups, 1);
2729 		m0 = m_pullup(m0, m0->m_pkthdr.len);
2730 		if (m0 == NULL) {
2731 			/* Should have left well enough alone. */
2732 			rc = EFBIG;
2733 			goto fail;
2734 		}
2735 		*mp = m0;	/* update caller's copy after pullup */
2736 		goto restart;
2737 	}
2738 	set_mbuf_nsegs(m0, nsegs);
2739 	set_mbuf_cflags(m0, cflags);
2740 	calculate_mbuf_len16(m0, vm_wr);
2741 
2742 #ifdef RATELIMIT
2743 	/*
2744 	 * Ethofld is limited to TCP and UDP for now, and only when L4 hw
2745 	 * checksumming is enabled.  needs_outer_l4_csum happens to check for
2746 	 * all the right things.
2747 	 */
2748 	if (__predict_false(needs_eo(mst) && !needs_outer_l4_csum(m0))) {
2749 		m_snd_tag_rele(m0->m_pkthdr.snd_tag);
2750 		m0->m_pkthdr.snd_tag = NULL;
2751 		m0->m_pkthdr.csum_flags &= ~CSUM_SND_TAG;
2752 		mst = NULL;
2753 	}
2754 #endif
2755 
2756 	if (!needs_hwcsum(m0)
2757 #ifdef RATELIMIT
2758 		 && !needs_eo(mst)
2759 #endif
2760 	)
2761 		return (0);
2762 
2763 	m = m0;
2764 	eh = mtod(m, struct ether_header *);
2765 	eh_type = ntohs(eh->ether_type);
2766 	if (eh_type == ETHERTYPE_VLAN) {
2767 		struct ether_vlan_header *evh = (void *)eh;
2768 
2769 		eh_type = ntohs(evh->evl_proto);
2770 		m0->m_pkthdr.l2hlen = sizeof(*evh);
2771 	} else
2772 		m0->m_pkthdr.l2hlen = sizeof(*eh);
2773 
2774 #if defined(INET) || defined(INET6)
2775 	offset = 0;
2776 #ifdef INET
2777 	l3hdr = m_advance(&m, &offset, m0->m_pkthdr.l2hlen);
2778 #else
2779 	m_advance(&m, &offset, m0->m_pkthdr.l2hlen);
2780 #endif
2781 #endif
2782 
2783 	switch (eh_type) {
2784 #ifdef INET6
2785 	case ETHERTYPE_IPV6:
2786 		m0->m_pkthdr.l3hlen = sizeof(struct ip6_hdr);
2787 		break;
2788 #endif
2789 #ifdef INET
2790 	case ETHERTYPE_IP:
2791 	{
2792 		struct ip *ip = l3hdr;
2793 
2794 		if (needs_vxlan_csum(m0)) {
2795 			/* Driver will do the outer IP hdr checksum. */
2796 			ip->ip_sum = 0;
2797 			if (needs_vxlan_tso(m0)) {
2798 				const uint16_t ipl = ip->ip_len;
2799 
2800 				ip->ip_len = 0;
2801 				ip->ip_sum = ~in_cksum_hdr(ip);
2802 				ip->ip_len = ipl;
2803 			} else
2804 				ip->ip_sum = in_cksum_hdr(ip);
2805 		}
2806 		m0->m_pkthdr.l3hlen = ip->ip_hl << 2;
2807 		break;
2808 	}
2809 #endif
2810 	default:
2811 		if (ratecheck(&txerr_ratecheck, &txerr_interval)) {
2812 			log(LOG_ERR, "%s: ethertype 0x%04x unknown.  "
2813 			    "if_cxgbe must be compiled with the same "
2814 			    "INET/INET6 options as the kernel.\n", __func__,
2815 			    eh_type);
2816 		}
2817 		rc = EINVAL;
2818 		goto fail;
2819 	}
2820 
2821 #if defined(INET) || defined(INET6)
2822 	if (needs_vxlan_csum(m0)) {
2823 		m0->m_pkthdr.l4hlen = sizeof(struct udphdr);
2824 		m0->m_pkthdr.l5hlen = sizeof(struct vxlan_header);
2825 
2826 		/* Inner headers. */
2827 		eh = m_advance(&m, &offset, m0->m_pkthdr.l3hlen +
2828 		    sizeof(struct udphdr) + sizeof(struct vxlan_header));
2829 		eh_type = ntohs(eh->ether_type);
2830 		if (eh_type == ETHERTYPE_VLAN) {
2831 			struct ether_vlan_header *evh = (void *)eh;
2832 
2833 			eh_type = ntohs(evh->evl_proto);
2834 			m0->m_pkthdr.inner_l2hlen = sizeof(*evh);
2835 		} else
2836 			m0->m_pkthdr.inner_l2hlen = sizeof(*eh);
2837 #ifdef INET
2838 		l3hdr = m_advance(&m, &offset, m0->m_pkthdr.inner_l2hlen);
2839 #else
2840 		m_advance(&m, &offset, m0->m_pkthdr.inner_l2hlen);
2841 #endif
2842 
2843 		switch (eh_type) {
2844 #ifdef INET6
2845 		case ETHERTYPE_IPV6:
2846 			m0->m_pkthdr.inner_l3hlen = sizeof(struct ip6_hdr);
2847 			break;
2848 #endif
2849 #ifdef INET
2850 		case ETHERTYPE_IP:
2851 		{
2852 			struct ip *ip = l3hdr;
2853 
2854 			m0->m_pkthdr.inner_l3hlen = ip->ip_hl << 2;
2855 			break;
2856 		}
2857 #endif
2858 		default:
2859 			if (ratecheck(&txerr_ratecheck, &txerr_interval)) {
2860 				log(LOG_ERR, "%s: VXLAN hw offload requested"
2861 				    "with unknown ethertype 0x%04x.  if_cxgbe "
2862 				    "must be compiled with the same INET/INET6 "
2863 				    "options as the kernel.\n", __func__,
2864 				    eh_type);
2865 			}
2866 			rc = EINVAL;
2867 			goto fail;
2868 		}
2869 		if (needs_inner_tcp_csum(m0)) {
2870 			tcp = m_advance(&m, &offset, m0->m_pkthdr.inner_l3hlen);
2871 			m0->m_pkthdr.inner_l4hlen = tcp->th_off * 4;
2872 		}
2873 		MPASS((m0->m_pkthdr.csum_flags & CSUM_SND_TAG) == 0);
2874 		m0->m_pkthdr.csum_flags &= CSUM_INNER_IP6_UDP |
2875 		    CSUM_INNER_IP6_TCP | CSUM_INNER_IP6_TSO | CSUM_INNER_IP |
2876 		    CSUM_INNER_IP_UDP | CSUM_INNER_IP_TCP | CSUM_INNER_IP_TSO |
2877 		    CSUM_ENCAP_VXLAN;
2878 	}
2879 
2880 	if (needs_outer_tcp_csum(m0)) {
2881 		tcp = m_advance(&m, &offset, m0->m_pkthdr.l3hlen);
2882 		m0->m_pkthdr.l4hlen = tcp->th_off * 4;
2883 #ifdef RATELIMIT
2884 		if (tsclk >= 0 && *(uint32_t *)(tcp + 1) == ntohl(0x0101080a)) {
2885 			set_mbuf_eo_tsclk_tsoff(m0,
2886 			    V_FW_ETH_TX_EO_WR_TSCLK(tsclk) |
2887 			    V_FW_ETH_TX_EO_WR_TSOFF(sizeof(*tcp) / 2 + 1));
2888 		} else
2889 			set_mbuf_eo_tsclk_tsoff(m0, 0);
2890 	} else if (needs_outer_udp_csum(m0)) {
2891 		m0->m_pkthdr.l4hlen = sizeof(struct udphdr);
2892 #endif
2893 	}
2894 #ifdef RATELIMIT
2895 	if (needs_eo(mst)) {
2896 		u_int immhdrs;
2897 
2898 		/* EO WRs have the headers in the WR and not the GL. */
2899 		immhdrs = m0->m_pkthdr.l2hlen + m0->m_pkthdr.l3hlen +
2900 		    m0->m_pkthdr.l4hlen;
2901 		cflags = 0;
2902 		nsegs = count_mbuf_nsegs(m0, immhdrs, &cflags);
2903 		MPASS(cflags == mbuf_cflags(m0));
2904 		set_mbuf_eo_nsegs(m0, nsegs);
2905 		set_mbuf_eo_len16(m0,
2906 		    txpkt_eo_len16(nsegs, immhdrs, needs_tso(m0)));
2907 		rc = ethofld_transmit(mst->ifp, m0);
2908 		if (rc != 0)
2909 			goto fail;
2910 		return (EINPROGRESS);
2911 	}
2912 #endif
2913 #endif
2914 	MPASS(m0 == *mp);
2915 	return (0);
2916 }
2917 
2918 void *
start_wrq_wr(struct sge_wrq * wrq,int len16,struct wrq_cookie * cookie)2919 start_wrq_wr(struct sge_wrq *wrq, int len16, struct wrq_cookie *cookie)
2920 {
2921 	struct sge_eq *eq = &wrq->eq;
2922 	struct adapter *sc = wrq->adapter;
2923 	int ndesc, available;
2924 	struct wrqe *wr;
2925 	void *w;
2926 
2927 	MPASS(len16 > 0);
2928 	ndesc = tx_len16_to_desc(len16);
2929 	MPASS(ndesc > 0 && ndesc <= SGE_MAX_WR_NDESC);
2930 
2931 	EQ_LOCK(eq);
2932 	if (__predict_false((eq->flags & EQ_HW_ALLOCATED) == 0)) {
2933 		EQ_UNLOCK(eq);
2934 		return (NULL);
2935 	}
2936 
2937 	if (TAILQ_EMPTY(&wrq->incomplete_wrs) && !STAILQ_EMPTY(&wrq->wr_list))
2938 		drain_wrq_wr_list(sc, wrq);
2939 
2940 	if (!STAILQ_EMPTY(&wrq->wr_list)) {
2941 slowpath:
2942 		EQ_UNLOCK(eq);
2943 		wr = alloc_wrqe(len16 * 16, wrq);
2944 		if (__predict_false(wr == NULL))
2945 			return (NULL);
2946 		cookie->pidx = -1;
2947 		cookie->ndesc = ndesc;
2948 		return (&wr->wr);
2949 	}
2950 
2951 	eq->cidx = read_hw_cidx(eq);
2952 	if (eq->pidx == eq->cidx)
2953 		available = eq->sidx - 1;
2954 	else
2955 		available = IDXDIFF(eq->cidx, eq->pidx, eq->sidx) - 1;
2956 	if (available < ndesc)
2957 		goto slowpath;
2958 
2959 	cookie->pidx = eq->pidx;
2960 	cookie->ndesc = ndesc;
2961 	TAILQ_INSERT_TAIL(&wrq->incomplete_wrs, cookie, link);
2962 
2963 	w = &eq->desc[eq->pidx];
2964 	IDXINCR(eq->pidx, ndesc, eq->sidx);
2965 	if (__predict_false(cookie->pidx + ndesc > eq->sidx)) {
2966 		w = &wrq->ss[0];
2967 		wrq->ss_pidx = cookie->pidx;
2968 		wrq->ss_len = len16 * 16;
2969 	}
2970 
2971 	EQ_UNLOCK(eq);
2972 
2973 	return (w);
2974 }
2975 
2976 void
commit_wrq_wr(struct sge_wrq * wrq,void * w,struct wrq_cookie * cookie)2977 commit_wrq_wr(struct sge_wrq *wrq, void *w, struct wrq_cookie *cookie)
2978 {
2979 	struct sge_eq *eq = &wrq->eq;
2980 	struct adapter *sc = wrq->adapter;
2981 	int ndesc, pidx;
2982 	struct wrq_cookie *prev, *next;
2983 
2984 	if (cookie->pidx == -1) {
2985 		struct wrqe *wr = __containerof(w, struct wrqe, wr);
2986 
2987 		t4_wrq_tx(sc, wr);
2988 		return;
2989 	}
2990 
2991 	if (__predict_false(w == &wrq->ss[0])) {
2992 		int n = (eq->sidx - wrq->ss_pidx) * EQ_ESIZE;
2993 
2994 		MPASS(wrq->ss_len > n);	/* WR had better wrap around. */
2995 		bcopy(&wrq->ss[0], &eq->desc[wrq->ss_pidx], n);
2996 		bcopy(&wrq->ss[n], &eq->desc[0], wrq->ss_len - n);
2997 		wrq->tx_wrs_ss++;
2998 	} else
2999 		wrq->tx_wrs_direct++;
3000 
3001 	EQ_LOCK(eq);
3002 	ndesc = cookie->ndesc;	/* Can be more than SGE_MAX_WR_NDESC here. */
3003 	pidx = cookie->pidx;
3004 	MPASS(pidx >= 0 && pidx < eq->sidx);
3005 	prev = TAILQ_PREV(cookie, wrq_incomplete_wrs, link);
3006 	next = TAILQ_NEXT(cookie, link);
3007 	if (prev == NULL) {
3008 		MPASS(pidx == eq->dbidx);
3009 		if (next == NULL || ndesc >= 16) {
3010 			int available;
3011 			struct fw_eth_tx_pkt_wr *dst;	/* any fw WR struct will do */
3012 
3013 			/*
3014 			 * Note that the WR via which we'll request tx updates
3015 			 * is at pidx and not eq->pidx, which has moved on
3016 			 * already.
3017 			 */
3018 			dst = (void *)&eq->desc[pidx];
3019 			available = IDXDIFF(eq->cidx, eq->pidx, eq->sidx) - 1;
3020 			if (available < eq->sidx / 4 &&
3021 			    atomic_cmpset_int(&eq->equiq, 0, 1)) {
3022 				/*
3023 				 * XXX: This is not 100% reliable with some
3024 				 * types of WRs.  But this is a very unusual
3025 				 * situation for an ofld/ctrl queue anyway.
3026 				 */
3027 				dst->equiq_to_len16 |= htobe32(F_FW_WR_EQUIQ |
3028 				    F_FW_WR_EQUEQ);
3029 			}
3030 
3031 			if (__predict_true(eq->flags & EQ_HW_ALLOCATED))
3032 				ring_eq_db(wrq->adapter, eq, ndesc);
3033 			else
3034 				IDXINCR(eq->dbidx, ndesc, eq->sidx);
3035 		} else {
3036 			MPASS(IDXDIFF(next->pidx, pidx, eq->sidx) == ndesc);
3037 			next->pidx = pidx;
3038 			next->ndesc += ndesc;
3039 		}
3040 	} else {
3041 		MPASS(IDXDIFF(pidx, prev->pidx, eq->sidx) == prev->ndesc);
3042 		prev->ndesc += ndesc;
3043 	}
3044 	TAILQ_REMOVE(&wrq->incomplete_wrs, cookie, link);
3045 
3046 	if (TAILQ_EMPTY(&wrq->incomplete_wrs) && !STAILQ_EMPTY(&wrq->wr_list))
3047 		drain_wrq_wr_list(sc, wrq);
3048 
3049 #ifdef INVARIANTS
3050 	if (TAILQ_EMPTY(&wrq->incomplete_wrs)) {
3051 		/* Doorbell must have caught up to the pidx. */
3052 		MPASS(wrq->eq.pidx == wrq->eq.dbidx);
3053 	}
3054 #endif
3055 	EQ_UNLOCK(eq);
3056 }
3057 
3058 static u_int
can_resume_eth_tx(struct mp_ring * r)3059 can_resume_eth_tx(struct mp_ring *r)
3060 {
3061 	struct sge_eq *eq = r->cookie;
3062 
3063 	return (total_available_tx_desc(eq) > eq->sidx / 8);
3064 }
3065 
3066 static inline bool
cannot_use_txpkts(struct mbuf * m)3067 cannot_use_txpkts(struct mbuf *m)
3068 {
3069 	/* maybe put a GL limit too, to avoid silliness? */
3070 
3071 	return (needs_tso(m) || (mbuf_cflags(m) & (MC_RAW_WR | MC_TLS)) != 0);
3072 }
3073 
3074 static inline int
discard_tx(struct sge_eq * eq)3075 discard_tx(struct sge_eq *eq)
3076 {
3077 
3078 	return ((eq->flags & (EQ_ENABLED | EQ_QFLUSH)) != EQ_ENABLED);
3079 }
3080 
3081 static inline int
wr_can_update_eq(void * p)3082 wr_can_update_eq(void *p)
3083 {
3084 	struct fw_eth_tx_pkts_wr *wr = p;
3085 
3086 	switch (G_FW_WR_OP(be32toh(wr->op_pkd))) {
3087 	case FW_ULPTX_WR:
3088 	case FW_ETH_TX_PKT_WR:
3089 	case FW_ETH_TX_PKTS_WR:
3090 	case FW_ETH_TX_PKTS2_WR:
3091 	case FW_ETH_TX_PKT_VM_WR:
3092 	case FW_ETH_TX_PKTS_VM_WR:
3093 		return (1);
3094 	default:
3095 		return (0);
3096 	}
3097 }
3098 
3099 static inline void
set_txupdate_flags(struct sge_txq * txq,u_int avail,struct fw_eth_tx_pkt_wr * wr)3100 set_txupdate_flags(struct sge_txq *txq, u_int avail,
3101     struct fw_eth_tx_pkt_wr *wr)
3102 {
3103 	struct sge_eq *eq = &txq->eq;
3104 	struct txpkts *txp = &txq->txp;
3105 
3106 	if ((txp->npkt > 0 || avail < eq->sidx / 2) &&
3107 	    atomic_cmpset_int(&eq->equiq, 0, 1)) {
3108 		wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ | F_FW_WR_EQUIQ);
3109 		eq->equeqidx = eq->pidx;
3110 	} else if (IDXDIFF(eq->pidx, eq->equeqidx, eq->sidx) >= 32) {
3111 		wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ);
3112 		eq->equeqidx = eq->pidx;
3113 	}
3114 }
3115 
3116 #if defined(__i386__) || defined(__amd64__)
3117 extern uint64_t tsc_freq;
3118 #endif
3119 
3120 static inline bool
record_eth_tx_time(struct sge_txq * txq)3121 record_eth_tx_time(struct sge_txq *txq)
3122 {
3123 	const uint64_t cycles = get_cyclecount();
3124 	const uint64_t last_tx = txq->last_tx;
3125 #if defined(__i386__) || defined(__amd64__)
3126 	const uint64_t itg = tsc_freq * t4_tx_coalesce_gap / 1000000;
3127 #else
3128 	const uint64_t itg = 0;
3129 #endif
3130 
3131 	MPASS(cycles >= last_tx);
3132 	txq->last_tx = cycles;
3133 	return (cycles - last_tx < itg);
3134 }
3135 
3136 /*
3137  * r->items[cidx] to r->items[pidx], with a wraparound at r->size, are ready to
3138  * be consumed.  Return the actual number consumed.  0 indicates a stall.
3139  */
3140 static u_int
eth_tx(struct mp_ring * r,u_int cidx,u_int pidx,bool * coalescing)3141 eth_tx(struct mp_ring *r, u_int cidx, u_int pidx, bool *coalescing)
3142 {
3143 	struct sge_txq *txq = r->cookie;
3144 	if_t ifp = txq->ifp;
3145 	struct sge_eq *eq = &txq->eq;
3146 	struct txpkts *txp = &txq->txp;
3147 	struct vi_info *vi = if_getsoftc(ifp);
3148 	struct adapter *sc = vi->adapter;
3149 	u_int total, remaining;		/* # of packets */
3150 	u_int n, avail, dbdiff;		/* # of hardware descriptors */
3151 	int i, rc;
3152 	struct mbuf *m0;
3153 	bool snd, recent_tx;
3154 	void *wr;	/* start of the last WR written to the ring */
3155 
3156 	TXQ_LOCK_ASSERT_OWNED(txq);
3157 	recent_tx = record_eth_tx_time(txq);
3158 
3159 	remaining = IDXDIFF(pidx, cidx, r->size);
3160 	if (__predict_false(discard_tx(eq))) {
3161 		for (i = 0; i < txp->npkt; i++)
3162 			m_freem(txp->mb[i]);
3163 		txp->npkt = 0;
3164 		while (cidx != pidx) {
3165 			m0 = r->items[cidx];
3166 			m_freem(m0);
3167 			if (++cidx == r->size)
3168 				cidx = 0;
3169 		}
3170 		reclaim_tx_descs(txq, eq->sidx);
3171 		*coalescing = false;
3172 		return (remaining);	/* emptied */
3173 	}
3174 
3175 	/* How many hardware descriptors do we have readily available. */
3176 	if (eq->pidx == eq->cidx)
3177 		avail = eq->sidx - 1;
3178 	else
3179 		avail = IDXDIFF(eq->cidx, eq->pidx, eq->sidx) - 1;
3180 
3181 	total = 0;
3182 	if (remaining == 0) {
3183 		txp->score = 0;
3184 		txq->txpkts_flush++;
3185 		goto send_txpkts;
3186 	}
3187 
3188 	dbdiff = 0;
3189 	MPASS(remaining > 0);
3190 	while (remaining > 0) {
3191 		m0 = r->items[cidx];
3192 		M_ASSERTPKTHDR(m0);
3193 		MPASS(m0->m_nextpkt == NULL);
3194 
3195 		if (avail < 2 * SGE_MAX_WR_NDESC)
3196 			avail += reclaim_tx_descs(txq, 64);
3197 
3198 		if (t4_tx_coalesce == 0 && txp->npkt == 0)
3199 			goto skip_coalescing;
3200 		if (cannot_use_txpkts(m0))
3201 			txp->score = 0;
3202 		else if (recent_tx) {
3203 			if (++txp->score == 0)
3204 				txp->score = UINT8_MAX;
3205 		} else
3206 			txp->score = 1;
3207 		if (txp->npkt > 0 || remaining > 1 ||
3208 		    txp->score >= t4_tx_coalesce_pkts ||
3209 		    atomic_load_int(&txq->eq.equiq) != 0) {
3210 			if (vi->flags & TX_USES_VM_WR)
3211 				rc = add_to_txpkts_vf(sc, txq, m0, avail, &snd);
3212 			else
3213 				rc = add_to_txpkts_pf(sc, txq, m0, avail, &snd);
3214 		} else {
3215 			snd = false;
3216 			rc = EINVAL;
3217 		}
3218 		if (snd) {
3219 			MPASS(txp->npkt > 0);
3220 			for (i = 0; i < txp->npkt; i++)
3221 				ETHER_BPF_MTAP(ifp, txp->mb[i]);
3222 			if (txp->npkt > 1) {
3223 				MPASS(avail >= tx_len16_to_desc(txp->len16));
3224 				if (vi->flags & TX_USES_VM_WR)
3225 					n = write_txpkts_vm_wr(sc, txq);
3226 				else
3227 					n = write_txpkts_wr(sc, txq);
3228 			} else {
3229 				MPASS(avail >=
3230 				    tx_len16_to_desc(mbuf_len16(txp->mb[0])));
3231 				if (vi->flags & TX_USES_VM_WR)
3232 					n = write_txpkt_vm_wr(sc, txq,
3233 					    txp->mb[0]);
3234 				else
3235 					n = write_txpkt_wr(sc, txq, txp->mb[0],
3236 					    avail);
3237 			}
3238 			MPASS(n <= SGE_MAX_WR_NDESC);
3239 			avail -= n;
3240 			dbdiff += n;
3241 			wr = &eq->desc[eq->pidx];
3242 			IDXINCR(eq->pidx, n, eq->sidx);
3243 			txp->npkt = 0;	/* emptied */
3244 		}
3245 		if (rc == 0) {
3246 			/* m0 was coalesced into txq->txpkts. */
3247 			goto next_mbuf;
3248 		}
3249 		if (rc == EAGAIN) {
3250 			/*
3251 			 * m0 is suitable for tx coalescing but could not be
3252 			 * combined with the existing txq->txpkts, which has now
3253 			 * been transmitted.  Start a new txpkts with m0.
3254 			 */
3255 			MPASS(snd);
3256 			MPASS(txp->npkt == 0);
3257 			continue;
3258 		}
3259 
3260 		MPASS(rc != 0 && rc != EAGAIN);
3261 		MPASS(txp->npkt == 0);
3262 skip_coalescing:
3263 		n = tx_len16_to_desc(mbuf_len16(m0));
3264 		if (__predict_false(avail < n)) {
3265 			avail += reclaim_tx_descs(txq, min(n, 32));
3266 			if (avail < n)
3267 				break;	/* out of descriptors */
3268 		}
3269 
3270 		wr = &eq->desc[eq->pidx];
3271 		if (mbuf_cflags(m0) & MC_RAW_WR) {
3272 			n = write_raw_wr(txq, wr, m0, avail);
3273 #ifdef KERN_TLS
3274 		} else if (mbuf_cflags(m0) & MC_TLS) {
3275 			ETHER_BPF_MTAP(ifp, m0);
3276 			n = t6_ktls_write_wr(txq, wr, m0, avail);
3277 #endif
3278 		} else {
3279 			ETHER_BPF_MTAP(ifp, m0);
3280 			if (vi->flags & TX_USES_VM_WR)
3281 				n = write_txpkt_vm_wr(sc, txq, m0);
3282 			else
3283 				n = write_txpkt_wr(sc, txq, m0, avail);
3284 		}
3285 		MPASS(n >= 1 && n <= avail);
3286 		if (!(mbuf_cflags(m0) & MC_TLS))
3287 			MPASS(n <= SGE_MAX_WR_NDESC);
3288 
3289 		avail -= n;
3290 		dbdiff += n;
3291 		IDXINCR(eq->pidx, n, eq->sidx);
3292 
3293 		if (dbdiff >= 512 / EQ_ESIZE) {	/* X_FETCHBURSTMAX_512B */
3294 			if (wr_can_update_eq(wr))
3295 				set_txupdate_flags(txq, avail, wr);
3296 			ring_eq_db(sc, eq, dbdiff);
3297 			avail += reclaim_tx_descs(txq, 32);
3298 			dbdiff = 0;
3299 		}
3300 next_mbuf:
3301 		total++;
3302 		remaining--;
3303 		if (__predict_false(++cidx == r->size))
3304 			cidx = 0;
3305 	}
3306 	if (dbdiff != 0) {
3307 		if (wr_can_update_eq(wr))
3308 			set_txupdate_flags(txq, avail, wr);
3309 		ring_eq_db(sc, eq, dbdiff);
3310 		reclaim_tx_descs(txq, 32);
3311 	} else if (eq->pidx == eq->cidx && txp->npkt > 0 &&
3312 	    atomic_load_int(&txq->eq.equiq) == 0) {
3313 		/*
3314 		 * If nothing was submitted to the chip for tx (it was coalesced
3315 		 * into txpkts instead) and there is no tx update outstanding
3316 		 * then we need to send txpkts now.
3317 		 */
3318 send_txpkts:
3319 		MPASS(txp->npkt > 0);
3320 		for (i = 0; i < txp->npkt; i++)
3321 			ETHER_BPF_MTAP(ifp, txp->mb[i]);
3322 		if (txp->npkt > 1) {
3323 			MPASS(avail >= tx_len16_to_desc(txp->len16));
3324 			if (vi->flags & TX_USES_VM_WR)
3325 				n = write_txpkts_vm_wr(sc, txq);
3326 			else
3327 				n = write_txpkts_wr(sc, txq);
3328 		} else {
3329 			MPASS(avail >=
3330 			    tx_len16_to_desc(mbuf_len16(txp->mb[0])));
3331 			if (vi->flags & TX_USES_VM_WR)
3332 				n = write_txpkt_vm_wr(sc, txq, txp->mb[0]);
3333 			else
3334 				n = write_txpkt_wr(sc, txq, txp->mb[0], avail);
3335 		}
3336 		MPASS(n <= SGE_MAX_WR_NDESC);
3337 		wr = &eq->desc[eq->pidx];
3338 		IDXINCR(eq->pidx, n, eq->sidx);
3339 		txp->npkt = 0;	/* emptied */
3340 
3341 		MPASS(wr_can_update_eq(wr));
3342 		set_txupdate_flags(txq, avail - n, wr);
3343 		ring_eq_db(sc, eq, n);
3344 		reclaim_tx_descs(txq, 32);
3345 	}
3346 	*coalescing = txp->npkt > 0;
3347 
3348 	return (total);
3349 }
3350 
3351 static inline void
init_iq(struct sge_iq * iq,struct adapter * sc,int tmr_idx,int pktc_idx,int qsize,int intr_idx,int cong,int qtype)3352 init_iq(struct sge_iq *iq, struct adapter *sc, int tmr_idx, int pktc_idx,
3353     int qsize, int intr_idx, int cong, int qtype)
3354 {
3355 
3356 	KASSERT(tmr_idx >= 0 && tmr_idx < SGE_NTIMERS,
3357 	    ("%s: bad tmr_idx %d", __func__, tmr_idx));
3358 	KASSERT(pktc_idx < SGE_NCOUNTERS,	/* -ve is ok, means don't use */
3359 	    ("%s: bad pktc_idx %d", __func__, pktc_idx));
3360 	KASSERT(intr_idx >= -1 && intr_idx < sc->intr_count,
3361 	    ("%s: bad intr_idx %d", __func__, intr_idx));
3362 	KASSERT(qtype == FW_IQ_IQTYPE_OTHER || qtype == FW_IQ_IQTYPE_NIC ||
3363 	    qtype == FW_IQ_IQTYPE_OFLD, ("%s: bad qtype %d", __func__, qtype));
3364 
3365 	iq->flags = 0;
3366 	iq->state = IQS_DISABLED;
3367 	iq->adapter = sc;
3368 	iq->qtype = qtype;
3369 	iq->intr_params = V_QINTR_TIMER_IDX(tmr_idx);
3370 	iq->intr_pktc_idx = SGE_NCOUNTERS - 1;
3371 	if (pktc_idx >= 0) {
3372 		iq->intr_params |= F_QINTR_CNT_EN;
3373 		iq->intr_pktc_idx = pktc_idx;
3374 	}
3375 	iq->qsize = roundup2(qsize, 16);	/* See FW_IQ_CMD/iqsize */
3376 	iq->sidx = iq->qsize - sc->params.sge.spg_len / IQ_ESIZE;
3377 	iq->intr_idx = intr_idx;
3378 	iq->cong_drop = cong;
3379 }
3380 
3381 static inline void
init_fl(struct adapter * sc,struct sge_fl * fl,int qsize,int maxp,char * name)3382 init_fl(struct adapter *sc, struct sge_fl *fl, int qsize, int maxp, char *name)
3383 {
3384 	struct sge_params *sp = &sc->params.sge;
3385 
3386 	fl->qsize = qsize;
3387 	fl->sidx = qsize - sc->params.sge.spg_len / EQ_ESIZE;
3388 	strlcpy(fl->lockname, name, sizeof(fl->lockname));
3389 	mtx_init(&fl->fl_lock, fl->lockname, NULL, MTX_DEF);
3390 	if (sc->flags & BUF_PACKING_OK &&
3391 	    ((!is_t4(sc) && buffer_packing) ||	/* T5+: enabled unless 0 */
3392 	    (is_t4(sc) && buffer_packing == 1)))/* T4: disabled unless 1 */
3393 		fl->flags |= FL_BUF_PACKING;
3394 	fl->zidx = find_refill_source(sc, maxp, fl->flags & FL_BUF_PACKING);
3395 	fl->safe_zidx = sc->sge.safe_zidx;
3396 	if (fl->flags & FL_BUF_PACKING) {
3397 		fl->lowat = roundup2(sp->fl_starve_threshold2, 8);
3398 		fl->buf_boundary = sp->pack_boundary;
3399 	} else {
3400 		fl->lowat = roundup2(sp->fl_starve_threshold, 8);
3401 		fl->buf_boundary = 16;
3402 	}
3403 	if (fl_pad && fl->buf_boundary < sp->pad_boundary)
3404 		fl->buf_boundary = sp->pad_boundary;
3405 }
3406 
3407 static inline void
init_eq(struct adapter * sc,struct sge_eq * eq,int eqtype,int qsize,uint8_t port_id,struct sge_iq * iq,char * name)3408 init_eq(struct adapter *sc, struct sge_eq *eq, int eqtype, int qsize,
3409     uint8_t port_id, struct sge_iq *iq, char *name)
3410 {
3411 	KASSERT(eqtype >= EQ_CTRL && eqtype <= EQ_OFLD,
3412 	    ("%s: bad qtype %d", __func__, eqtype));
3413 
3414 	eq->type = eqtype;
3415 	eq->port_id = port_id;
3416 	eq->tx_chan = sc->port[port_id]->tx_chan;
3417 	eq->iq = iq;
3418 	eq->sidx = qsize - sc->params.sge.spg_len / EQ_ESIZE;
3419 	strlcpy(eq->lockname, name, sizeof(eq->lockname));
3420 	mtx_init(&eq->eq_lock, eq->lockname, NULL, MTX_DEF);
3421 }
3422 
3423 int
alloc_ring(struct adapter * sc,size_t len,bus_dma_tag_t * tag,bus_dmamap_t * map,bus_addr_t * pa,void ** va)3424 alloc_ring(struct adapter *sc, size_t len, bus_dma_tag_t *tag,
3425     bus_dmamap_t *map, bus_addr_t *pa, void **va)
3426 {
3427 	int rc;
3428 
3429 	rc = bus_dma_tag_create(sc->dmat, 512, 0, BUS_SPACE_MAXADDR,
3430 	    BUS_SPACE_MAXADDR, NULL, NULL, len, 1, len, 0, NULL, NULL, tag);
3431 	if (rc != 0) {
3432 		CH_ERR(sc, "cannot allocate DMA tag: %d\n", rc);
3433 		goto done;
3434 	}
3435 
3436 	rc = bus_dmamem_alloc(*tag, va,
3437 	    BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, map);
3438 	if (rc != 0) {
3439 		CH_ERR(sc, "cannot allocate DMA memory: %d\n", rc);
3440 		goto done;
3441 	}
3442 
3443 	rc = bus_dmamap_load(*tag, *map, *va, len, oneseg_dma_callback, pa, 0);
3444 	if (rc != 0) {
3445 		CH_ERR(sc, "cannot load DMA map: %d\n", rc);
3446 		goto done;
3447 	}
3448 done:
3449 	if (rc)
3450 		free_ring(sc, *tag, *map, *pa, *va);
3451 
3452 	return (rc);
3453 }
3454 
3455 int
free_ring(struct adapter * sc,bus_dma_tag_t tag,bus_dmamap_t map,bus_addr_t pa,void * va)3456 free_ring(struct adapter *sc, bus_dma_tag_t tag, bus_dmamap_t map,
3457     bus_addr_t pa, void *va)
3458 {
3459 	if (pa)
3460 		bus_dmamap_unload(tag, map);
3461 	if (va)
3462 		bus_dmamem_free(tag, va, map);
3463 	if (tag)
3464 		bus_dma_tag_destroy(tag);
3465 
3466 	return (0);
3467 }
3468 
3469 /*
3470  * Allocates the software resources (mainly memory and sysctl nodes) for an
3471  * ingress queue and an optional freelist.
3472  *
3473  * Sets IQ_SW_ALLOCATED and returns 0 on success.
3474  */
3475 static int
alloc_iq_fl(struct vi_info * vi,struct sge_iq * iq,struct sge_fl * fl,struct sysctl_ctx_list * ctx,struct sysctl_oid * oid)3476 alloc_iq_fl(struct vi_info *vi, struct sge_iq *iq, struct sge_fl *fl,
3477     struct sysctl_ctx_list *ctx, struct sysctl_oid *oid)
3478 {
3479 	int rc;
3480 	size_t len;
3481 	struct adapter *sc = vi->adapter;
3482 
3483 	MPASS(!(iq->flags & IQ_SW_ALLOCATED));
3484 
3485 	len = iq->qsize * IQ_ESIZE;
3486 	rc = alloc_ring(sc, len, &iq->desc_tag, &iq->desc_map, &iq->ba,
3487 	    (void **)&iq->desc);
3488 	if (rc != 0)
3489 		return (rc);
3490 
3491 	if (fl) {
3492 		len = fl->qsize * EQ_ESIZE;
3493 		rc = alloc_ring(sc, len, &fl->desc_tag, &fl->desc_map,
3494 		    &fl->ba, (void **)&fl->desc);
3495 		if (rc) {
3496 			free_ring(sc, iq->desc_tag, iq->desc_map, iq->ba,
3497 			    iq->desc);
3498 			return (rc);
3499 		}
3500 
3501 		/* Allocate space for one software descriptor per buffer. */
3502 		fl->sdesc = malloc(fl->sidx * 8 * sizeof(struct fl_sdesc),
3503 		    M_CXGBE, M_ZERO | M_WAITOK);
3504 
3505 		add_fl_sysctls(sc, ctx, oid, fl);
3506 		iq->flags |= IQ_HAS_FL;
3507 	}
3508 	add_iq_sysctls(ctx, oid, iq);
3509 	iq->flags |= IQ_SW_ALLOCATED;
3510 
3511 	return (0);
3512 }
3513 
3514 /*
3515  * Frees all software resources (memory and locks) associated with an ingress
3516  * queue and an optional freelist.
3517  */
3518 static void
free_iq_fl(struct adapter * sc,struct sge_iq * iq,struct sge_fl * fl)3519 free_iq_fl(struct adapter *sc, struct sge_iq *iq, struct sge_fl *fl)
3520 {
3521 	MPASS(iq->flags & IQ_SW_ALLOCATED);
3522 
3523 	if (fl) {
3524 		MPASS(iq->flags & IQ_HAS_FL);
3525 		free_ring(sc, fl->desc_tag, fl->desc_map, fl->ba, fl->desc);
3526 		free_fl_buffers(sc, fl);
3527 		free(fl->sdesc, M_CXGBE);
3528 		mtx_destroy(&fl->fl_lock);
3529 		bzero(fl, sizeof(*fl));
3530 	}
3531 	free_ring(sc, iq->desc_tag, iq->desc_map, iq->ba, iq->desc);
3532 	bzero(iq, sizeof(*iq));
3533 }
3534 
3535 /*
3536  * Allocates a hardware ingress queue and an optional freelist that will be
3537  * associated with it.
3538  *
3539  * Returns errno on failure.  Resources allocated up to that point may still be
3540  * allocated.  Caller is responsible for cleanup in case this function fails.
3541  */
3542 static int
alloc_iq_fl_hwq(struct vi_info * vi,struct sge_iq * iq,struct sge_fl * fl)3543 alloc_iq_fl_hwq(struct vi_info *vi, struct sge_iq *iq, struct sge_fl *fl)
3544 {
3545 	int rc, cntxt_id, cong_map;
3546 	struct fw_iq_cmd c;
3547 	struct adapter *sc = vi->adapter;
3548 	struct port_info *pi = vi->pi;
3549 	__be32 v = 0;
3550 
3551 	MPASS (!(iq->flags & IQ_HW_ALLOCATED));
3552 
3553 	bzero(&c, sizeof(c));
3554 	c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
3555 	    F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(sc->pf) |
3556 	    V_FW_IQ_CMD_VFN(0));
3557 
3558 	c.alloc_to_len16 = htobe32(F_FW_IQ_CMD_ALLOC | F_FW_IQ_CMD_IQSTART |
3559 	    FW_LEN16(c));
3560 
3561 	/* Special handling for firmware event queue */
3562 	if (iq == &sc->sge.fwq)
3563 		v |= F_FW_IQ_CMD_IQASYNCH;
3564 
3565 	if (iq->intr_idx < 0) {
3566 		/* Forwarded interrupts, all headed to fwq */
3567 		v |= F_FW_IQ_CMD_IQANDST;
3568 		v |= V_FW_IQ_CMD_IQANDSTINDEX(sc->sge.fwq.cntxt_id);
3569 	} else {
3570 		KASSERT(iq->intr_idx < sc->intr_count,
3571 		    ("%s: invalid direct intr_idx %d", __func__, iq->intr_idx));
3572 		v |= V_FW_IQ_CMD_IQANDSTINDEX(iq->intr_idx);
3573 	}
3574 
3575 	bzero(iq->desc, iq->qsize * IQ_ESIZE);
3576 	c.type_to_iqandstindex = htobe32(v |
3577 	    V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) |
3578 	    V_FW_IQ_CMD_VIID(vi->viid) |
3579 	    V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_INTERRUPT));
3580 	c.iqdroprss_to_iqesize = htobe16(V_FW_IQ_CMD_IQPCIECH(pi->tx_chan) |
3581 	    F_FW_IQ_CMD_IQGTSMODE |
3582 	    V_FW_IQ_CMD_IQINTCNTTHRESH(iq->intr_pktc_idx) |
3583 	    V_FW_IQ_CMD_IQESIZE(ilog2(IQ_ESIZE) - 4));
3584 	c.iqsize = htobe16(iq->qsize);
3585 	c.iqaddr = htobe64(iq->ba);
3586 	c.iqns_to_fl0congen = htobe32(V_FW_IQ_CMD_IQTYPE(iq->qtype));
3587 	if (iq->cong_drop != -1) {
3588 		cong_map = iq->qtype == IQ_ETH ? pi->rx_e_chan_map : 0;
3589 		c.iqns_to_fl0congen |= htobe32(F_FW_IQ_CMD_IQFLINTCONGEN);
3590 	}
3591 
3592 	if (fl) {
3593 		bzero(fl->desc, fl->sidx * EQ_ESIZE + sc->params.sge.spg_len);
3594 		c.iqns_to_fl0congen |=
3595 		    htobe32(V_FW_IQ_CMD_FL0HOSTFCMODE(X_HOSTFCMODE_NONE) |
3596 			F_FW_IQ_CMD_FL0FETCHRO | F_FW_IQ_CMD_FL0DATARO |
3597 			(fl_pad ? F_FW_IQ_CMD_FL0PADEN : 0) |
3598 			(fl->flags & FL_BUF_PACKING ? F_FW_IQ_CMD_FL0PACKEN :
3599 			    0));
3600 		if (iq->cong_drop != -1) {
3601 			c.iqns_to_fl0congen |=
3602 				htobe32(V_FW_IQ_CMD_FL0CNGCHMAP(cong_map) |
3603 				    F_FW_IQ_CMD_FL0CONGCIF |
3604 				    F_FW_IQ_CMD_FL0CONGEN);
3605 		}
3606 		c.fl0dcaen_to_fl0cidxfthresh =
3607 		    htobe16(V_FW_IQ_CMD_FL0FBMIN(chip_id(sc) <= CHELSIO_T5 ?
3608 			X_FETCHBURSTMIN_128B : X_FETCHBURSTMIN_64B_T6) |
3609 			V_FW_IQ_CMD_FL0FBMAX(chip_id(sc) <= CHELSIO_T5 ?
3610 			X_FETCHBURSTMAX_512B : X_FETCHBURSTMAX_256B));
3611 		c.fl0size = htobe16(fl->qsize);
3612 		c.fl0addr = htobe64(fl->ba);
3613 	}
3614 
3615 	rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c);
3616 	if (rc != 0) {
3617 		CH_ERR(sc, "failed to create hw ingress queue: %d\n", rc);
3618 		return (rc);
3619 	}
3620 
3621 	iq->cidx = 0;
3622 	iq->gen = F_RSPD_GEN;
3623 	iq->cntxt_id = be16toh(c.iqid);
3624 	iq->abs_id = be16toh(c.physiqid);
3625 
3626 	cntxt_id = iq->cntxt_id - sc->sge.iq_start;
3627 	if (cntxt_id >= sc->sge.iqmap_sz) {
3628 		panic ("%s: iq->cntxt_id (%d) more than the max (%d)", __func__,
3629 		    cntxt_id, sc->sge.iqmap_sz - 1);
3630 	}
3631 	sc->sge.iqmap[cntxt_id] = iq;
3632 
3633 	if (fl) {
3634 		u_int qid;
3635 #ifdef INVARIANTS
3636 		int i;
3637 
3638 		MPASS(!(fl->flags & FL_BUF_RESUME));
3639 		for (i = 0; i < fl->sidx * 8; i++)
3640 			MPASS(fl->sdesc[i].cl == NULL);
3641 #endif
3642 		fl->cntxt_id = be16toh(c.fl0id);
3643 		fl->pidx = fl->cidx = fl->hw_cidx = fl->dbidx = 0;
3644 		fl->rx_offset = 0;
3645 		fl->flags &= ~(FL_STARVING | FL_DOOMED);
3646 
3647 		cntxt_id = fl->cntxt_id - sc->sge.eq_start;
3648 		if (cntxt_id >= sc->sge.eqmap_sz) {
3649 			panic("%s: fl->cntxt_id (%d) more than the max (%d)",
3650 			    __func__, cntxt_id, sc->sge.eqmap_sz - 1);
3651 		}
3652 		sc->sge.eqmap[cntxt_id] = (void *)fl;
3653 
3654 		qid = fl->cntxt_id;
3655 		if (isset(&sc->doorbells, DOORBELL_UDB)) {
3656 			uint32_t s_qpp = sc->params.sge.eq_s_qpp;
3657 			uint32_t mask = (1 << s_qpp) - 1;
3658 			volatile uint8_t *udb;
3659 
3660 			udb = sc->udbs_base + UDBS_DB_OFFSET;
3661 			udb += (qid >> s_qpp) << PAGE_SHIFT;
3662 			qid &= mask;
3663 			if (qid < PAGE_SIZE / UDBS_SEG_SIZE) {
3664 				udb += qid << UDBS_SEG_SHIFT;
3665 				qid = 0;
3666 			}
3667 			fl->udb = (volatile void *)udb;
3668 		}
3669 		fl->dbval = V_QID(qid) | sc->chip_params->sge_fl_db;
3670 
3671 		FL_LOCK(fl);
3672 		/* Enough to make sure the SGE doesn't think it's starved */
3673 		refill_fl(sc, fl, fl->lowat);
3674 		FL_UNLOCK(fl);
3675 	}
3676 
3677 	if (chip_id(sc) >= CHELSIO_T5 && !(sc->flags & IS_VF) &&
3678 	    iq->cong_drop != -1) {
3679 		t4_sge_set_conm_context(sc, iq->cntxt_id, iq->cong_drop,
3680 		    cong_map);
3681 	}
3682 
3683 	/* Enable IQ interrupts */
3684 	atomic_store_rel_int(&iq->state, IQS_IDLE);
3685 	t4_write_reg(sc, sc->sge_gts_reg, V_SEINTARM(iq->intr_params) |
3686 	    V_INGRESSQID(iq->cntxt_id));
3687 
3688 	iq->flags |= IQ_HW_ALLOCATED;
3689 
3690 	return (0);
3691 }
3692 
3693 static int
free_iq_fl_hwq(struct adapter * sc,struct sge_iq * iq,struct sge_fl * fl)3694 free_iq_fl_hwq(struct adapter *sc, struct sge_iq *iq, struct sge_fl *fl)
3695 {
3696 	int rc;
3697 
3698 	MPASS(iq->flags & IQ_HW_ALLOCATED);
3699 	rc = -t4_iq_free(sc, sc->mbox, sc->pf, 0, FW_IQ_TYPE_FL_INT_CAP,
3700 	    iq->cntxt_id, fl ? fl->cntxt_id : 0xffff, 0xffff);
3701 	if (rc != 0) {
3702 		CH_ERR(sc, "failed to free iq %p: %d\n", iq, rc);
3703 		return (rc);
3704 	}
3705 	iq->flags &= ~IQ_HW_ALLOCATED;
3706 
3707 	return (0);
3708 }
3709 
3710 static void
add_iq_sysctls(struct sysctl_ctx_list * ctx,struct sysctl_oid * oid,struct sge_iq * iq)3711 add_iq_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *oid,
3712     struct sge_iq *iq)
3713 {
3714 	struct sysctl_oid_list *children;
3715 
3716 	if (ctx == NULL || oid == NULL)
3717 		return;
3718 
3719 	children = SYSCTL_CHILDREN(oid);
3720 	SYSCTL_ADD_UAUTO(ctx, children, OID_AUTO, "ba", CTLFLAG_RD, &iq->ba,
3721 	    "bus address of descriptor ring");
3722 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "dmalen", CTLFLAG_RD, NULL,
3723 	    iq->qsize * IQ_ESIZE, "descriptor ring size in bytes");
3724 	SYSCTL_ADD_U16(ctx, children, OID_AUTO, "abs_id", CTLFLAG_RD,
3725 	    &iq->abs_id, 0, "absolute id of the queue");
3726 	SYSCTL_ADD_U16(ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD,
3727 	    &iq->cntxt_id, 0, "SGE context id of the queue");
3728 	SYSCTL_ADD_U16(ctx, children, OID_AUTO, "cidx", CTLFLAG_RD, &iq->cidx,
3729 	    0, "consumer index");
3730 }
3731 
3732 static void
add_fl_sysctls(struct adapter * sc,struct sysctl_ctx_list * ctx,struct sysctl_oid * oid,struct sge_fl * fl)3733 add_fl_sysctls(struct adapter *sc, struct sysctl_ctx_list *ctx,
3734     struct sysctl_oid *oid, struct sge_fl *fl)
3735 {
3736 	struct sysctl_oid_list *children;
3737 
3738 	if (ctx == NULL || oid == NULL)
3739 		return;
3740 
3741 	children = SYSCTL_CHILDREN(oid);
3742 	oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "fl",
3743 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "freelist");
3744 	children = SYSCTL_CHILDREN(oid);
3745 
3746 	SYSCTL_ADD_UAUTO(ctx, children, OID_AUTO, "ba", CTLFLAG_RD,
3747 	    &fl->ba, "bus address of descriptor ring");
3748 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "dmalen", CTLFLAG_RD, NULL,
3749 	    fl->sidx * EQ_ESIZE + sc->params.sge.spg_len,
3750 	    "desc ring size in bytes");
3751 	SYSCTL_ADD_U16(ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD,
3752 	    &fl->cntxt_id, 0, "SGE context id of the freelist");
3753 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "padding", CTLFLAG_RD, NULL,
3754 	    fl_pad ? 1 : 0, "padding enabled");
3755 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "packing", CTLFLAG_RD, NULL,
3756 	    fl->flags & FL_BUF_PACKING ? 1 : 0, "packing enabled");
3757 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cidx", CTLFLAG_RD, &fl->cidx,
3758 	    0, "consumer index");
3759 	if (fl->flags & FL_BUF_PACKING) {
3760 		SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rx_offset",
3761 		    CTLFLAG_RD, &fl->rx_offset, 0, "packing rx offset");
3762 	}
3763 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "pidx", CTLFLAG_RD, &fl->pidx,
3764 	    0, "producer index");
3765 	SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "cluster_allocated",
3766 	    CTLFLAG_RD, &fl->cl_allocated, "# of clusters allocated");
3767 	SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "cluster_recycled",
3768 	    CTLFLAG_RD, &fl->cl_recycled, "# of clusters recycled");
3769 	SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "cluster_fast_recycled",
3770 	    CTLFLAG_RD, &fl->cl_fast_recycled, "# of clusters recycled (fast)");
3771 }
3772 
3773 /*
3774  * Idempotent.
3775  */
3776 static int
alloc_fwq(struct adapter * sc)3777 alloc_fwq(struct adapter *sc)
3778 {
3779 	int rc, intr_idx;
3780 	struct sge_iq *fwq = &sc->sge.fwq;
3781 	struct vi_info *vi = &sc->port[0]->vi[0];
3782 
3783 	if (!(fwq->flags & IQ_SW_ALLOCATED)) {
3784 		MPASS(!(fwq->flags & IQ_HW_ALLOCATED));
3785 
3786 		if (sc->flags & IS_VF)
3787 			intr_idx = 0;
3788 		else
3789 			intr_idx = sc->intr_count > 1 ? 1 : 0;
3790 		init_iq(fwq, sc, 0, 0, FW_IQ_QSIZE, intr_idx, -1, IQ_OTHER);
3791 		rc = alloc_iq_fl(vi, fwq, NULL, &sc->ctx, sc->fwq_oid);
3792 		if (rc != 0) {
3793 			CH_ERR(sc, "failed to allocate fwq: %d\n", rc);
3794 			return (rc);
3795 		}
3796 		MPASS(fwq->flags & IQ_SW_ALLOCATED);
3797 	}
3798 
3799 	if (!(fwq->flags & IQ_HW_ALLOCATED)) {
3800 		MPASS(fwq->flags & IQ_SW_ALLOCATED);
3801 
3802 		rc = alloc_iq_fl_hwq(vi, fwq, NULL);
3803 		if (rc != 0) {
3804 			CH_ERR(sc, "failed to create hw fwq: %d\n", rc);
3805 			return (rc);
3806 		}
3807 		MPASS(fwq->flags & IQ_HW_ALLOCATED);
3808 	}
3809 
3810 	return (0);
3811 }
3812 
3813 /*
3814  * Idempotent.
3815  */
3816 static void
free_fwq(struct adapter * sc)3817 free_fwq(struct adapter *sc)
3818 {
3819 	struct sge_iq *fwq = &sc->sge.fwq;
3820 
3821 	if (fwq->flags & IQ_HW_ALLOCATED) {
3822 		MPASS(fwq->flags & IQ_SW_ALLOCATED);
3823 		free_iq_fl_hwq(sc, fwq, NULL);
3824 		MPASS(!(fwq->flags & IQ_HW_ALLOCATED));
3825 	}
3826 
3827 	if (fwq->flags & IQ_SW_ALLOCATED) {
3828 		MPASS(!(fwq->flags & IQ_HW_ALLOCATED));
3829 		free_iq_fl(sc, fwq, NULL);
3830 		MPASS(!(fwq->flags & IQ_SW_ALLOCATED));
3831 	}
3832 }
3833 
3834 /*
3835  * Idempotent.
3836  */
3837 static int
alloc_ctrlq(struct adapter * sc,int idx)3838 alloc_ctrlq(struct adapter *sc, int idx)
3839 {
3840 	int rc;
3841 	char name[16];
3842 	struct sysctl_oid *oid;
3843 	struct sge_wrq *ctrlq = &sc->sge.ctrlq[idx];
3844 
3845 	MPASS(idx < sc->params.nports);
3846 
3847 	if (!(ctrlq->eq.flags & EQ_SW_ALLOCATED)) {
3848 		MPASS(!(ctrlq->eq.flags & EQ_HW_ALLOCATED));
3849 
3850 		snprintf(name, sizeof(name), "%d", idx);
3851 		oid = SYSCTL_ADD_NODE(&sc->ctx, SYSCTL_CHILDREN(sc->ctrlq_oid),
3852 		    OID_AUTO, name, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
3853 		    "ctrl queue");
3854 
3855 		snprintf(name, sizeof(name), "%s ctrlq%d",
3856 		    device_get_nameunit(sc->dev), idx);
3857 		init_eq(sc, &ctrlq->eq, EQ_CTRL, CTRL_EQ_QSIZE, idx,
3858 		    &sc->sge.fwq, name);
3859 		rc = alloc_wrq(sc, NULL, ctrlq, &sc->ctx, oid);
3860 		if (rc != 0) {
3861 			CH_ERR(sc, "failed to allocate ctrlq%d: %d\n", idx, rc);
3862 			sysctl_remove_oid(oid, 1, 1);
3863 			return (rc);
3864 		}
3865 		MPASS(ctrlq->eq.flags & EQ_SW_ALLOCATED);
3866 	}
3867 
3868 	if (!(ctrlq->eq.flags & EQ_HW_ALLOCATED)) {
3869 		MPASS(ctrlq->eq.flags & EQ_SW_ALLOCATED);
3870 		MPASS(ctrlq->nwr_pending == 0);
3871 		MPASS(ctrlq->ndesc_needed == 0);
3872 
3873 		rc = alloc_eq_hwq(sc, NULL, &ctrlq->eq);
3874 		if (rc != 0) {
3875 			CH_ERR(sc, "failed to create hw ctrlq%d: %d\n", idx, rc);
3876 			return (rc);
3877 		}
3878 		MPASS(ctrlq->eq.flags & EQ_HW_ALLOCATED);
3879 	}
3880 
3881 	return (0);
3882 }
3883 
3884 /*
3885  * Idempotent.
3886  */
3887 static void
free_ctrlq(struct adapter * sc,int idx)3888 free_ctrlq(struct adapter *sc, int idx)
3889 {
3890 	struct sge_wrq *ctrlq = &sc->sge.ctrlq[idx];
3891 
3892 	if (ctrlq->eq.flags & EQ_HW_ALLOCATED) {
3893 		MPASS(ctrlq->eq.flags & EQ_SW_ALLOCATED);
3894 		free_eq_hwq(sc, NULL, &ctrlq->eq);
3895 		MPASS(!(ctrlq->eq.flags & EQ_HW_ALLOCATED));
3896 	}
3897 
3898 	if (ctrlq->eq.flags & EQ_SW_ALLOCATED) {
3899 		MPASS(!(ctrlq->eq.flags & EQ_HW_ALLOCATED));
3900 		free_wrq(sc, ctrlq);
3901 		MPASS(!(ctrlq->eq.flags & EQ_SW_ALLOCATED));
3902 	}
3903 }
3904 
3905 int
t4_sge_set_conm_context(struct adapter * sc,int cntxt_id,int cong_drop,int cong_map)3906 t4_sge_set_conm_context(struct adapter *sc, int cntxt_id, int cong_drop,
3907     int cong_map)
3908 {
3909 	const int cng_ch_bits_log = sc->chip_params->cng_ch_bits_log;
3910 	uint32_t param, val;
3911 	uint16_t ch_map;
3912 	int cong_mode, rc, i;
3913 
3914 	if (chip_id(sc) < CHELSIO_T5)
3915 		return (ENOTSUP);
3916 
3917 	/* Convert the driver knob to the mode understood by the firmware. */
3918 	switch (cong_drop) {
3919 	case -1:
3920 		cong_mode = X_CONMCTXT_CNGTPMODE_DISABLE;
3921 		break;
3922 	case 0:
3923 		cong_mode = X_CONMCTXT_CNGTPMODE_CHANNEL;
3924 		break;
3925 	case 1:
3926 		cong_mode = X_CONMCTXT_CNGTPMODE_QUEUE;
3927 		break;
3928 	case 2:
3929 		cong_mode = X_CONMCTXT_CNGTPMODE_BOTH;
3930 		break;
3931 	default:
3932 		MPASS(0);
3933 		CH_ERR(sc, "cong_drop = %d is invalid (ingress queue %d).\n",
3934 		    cong_drop, cntxt_id);
3935 		return (EINVAL);
3936 	}
3937 
3938 	param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
3939 	    V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) |
3940 	    V_FW_PARAMS_PARAM_YZ(cntxt_id);
3941 	val = V_CONMCTXT_CNGTPMODE(cong_mode);
3942 	if (cong_mode == X_CONMCTXT_CNGTPMODE_CHANNEL ||
3943 	    cong_mode == X_CONMCTXT_CNGTPMODE_BOTH) {
3944 		for (i = 0, ch_map = 0; i < 4; i++) {
3945 			if (cong_map & (1 << i))
3946 				ch_map |= 1 << (i << cng_ch_bits_log);
3947 		}
3948 		val |= V_CONMCTXT_CNGCHMAP(ch_map);
3949 	}
3950 	rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
3951 	if (rc != 0) {
3952 		CH_ERR(sc, "failed to set congestion manager context "
3953 		    "for ingress queue %d: %d\n", cntxt_id, rc);
3954 	}
3955 
3956 	return (rc);
3957 }
3958 
3959 /*
3960  * Idempotent.
3961  */
3962 static int
alloc_rxq(struct vi_info * vi,struct sge_rxq * rxq,int idx,int intr_idx,int maxp)3963 alloc_rxq(struct vi_info *vi, struct sge_rxq *rxq, int idx, int intr_idx,
3964     int maxp)
3965 {
3966 	int rc;
3967 	struct adapter *sc = vi->adapter;
3968 	if_t ifp = vi->ifp;
3969 	struct sysctl_oid *oid;
3970 	char name[16];
3971 
3972 	if (!(rxq->iq.flags & IQ_SW_ALLOCATED)) {
3973 		MPASS(!(rxq->iq.flags & IQ_HW_ALLOCATED));
3974 #if defined(INET) || defined(INET6)
3975 		rc = tcp_lro_init_args(&rxq->lro, ifp, lro_entries, lro_mbufs);
3976 		if (rc != 0)
3977 			return (rc);
3978 		MPASS(rxq->lro.ifp == ifp);	/* also indicates LRO init'ed */
3979 #endif
3980 		rxq->ifp = ifp;
3981 
3982 		snprintf(name, sizeof(name), "%d", idx);
3983 		oid = SYSCTL_ADD_NODE(&vi->ctx, SYSCTL_CHILDREN(vi->rxq_oid),
3984 		    OID_AUTO, name, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
3985 		    "rx queue");
3986 
3987 		init_iq(&rxq->iq, sc, vi->tmr_idx, vi->pktc_idx, vi->qsize_rxq,
3988 		    intr_idx, cong_drop, IQ_ETH);
3989 #if defined(INET) || defined(INET6)
3990 		if (if_getcapenable(ifp) & IFCAP_LRO)
3991 			rxq->iq.flags |= IQ_LRO_ENABLED;
3992 #endif
3993 		if (if_getcapenable(ifp) & IFCAP_HWRXTSTMP)
3994 			rxq->iq.flags |= IQ_RX_TIMESTAMP;
3995 		snprintf(name, sizeof(name), "%s rxq%d-fl",
3996 		    device_get_nameunit(vi->dev), idx);
3997 		init_fl(sc, &rxq->fl, vi->qsize_rxq / 8, maxp, name);
3998 		rc = alloc_iq_fl(vi, &rxq->iq, &rxq->fl, &vi->ctx, oid);
3999 		if (rc != 0) {
4000 			CH_ERR(vi, "failed to allocate rxq%d: %d\n", idx, rc);
4001 			sysctl_remove_oid(oid, 1, 1);
4002 #if defined(INET) || defined(INET6)
4003 			tcp_lro_free(&rxq->lro);
4004 			rxq->lro.ifp = NULL;
4005 #endif
4006 			return (rc);
4007 		}
4008 		MPASS(rxq->iq.flags & IQ_SW_ALLOCATED);
4009 		add_rxq_sysctls(&vi->ctx, oid, rxq);
4010 	}
4011 
4012 	if (!(rxq->iq.flags & IQ_HW_ALLOCATED)) {
4013 		MPASS(rxq->iq.flags & IQ_SW_ALLOCATED);
4014 		rc = alloc_iq_fl_hwq(vi, &rxq->iq, &rxq->fl);
4015 		if (rc != 0) {
4016 			CH_ERR(vi, "failed to create hw rxq%d: %d\n", idx, rc);
4017 			return (rc);
4018 		}
4019 		MPASS(rxq->iq.flags & IQ_HW_ALLOCATED);
4020 
4021 		if (idx == 0)
4022 			sc->sge.iq_base = rxq->iq.abs_id - rxq->iq.cntxt_id;
4023 		else
4024 			KASSERT(rxq->iq.cntxt_id + sc->sge.iq_base == rxq->iq.abs_id,
4025 			    ("iq_base mismatch"));
4026 		KASSERT(sc->sge.iq_base == 0 || sc->flags & IS_VF,
4027 		    ("PF with non-zero iq_base"));
4028 
4029 		/*
4030 		 * The freelist is just barely above the starvation threshold
4031 		 * right now, fill it up a bit more.
4032 		 */
4033 		FL_LOCK(&rxq->fl);
4034 		refill_fl(sc, &rxq->fl, 128);
4035 		FL_UNLOCK(&rxq->fl);
4036 	}
4037 
4038 	return (0);
4039 }
4040 
4041 /*
4042  * Idempotent.
4043  */
4044 static void
free_rxq(struct vi_info * vi,struct sge_rxq * rxq)4045 free_rxq(struct vi_info *vi, struct sge_rxq *rxq)
4046 {
4047 	if (rxq->iq.flags & IQ_HW_ALLOCATED) {
4048 		MPASS(rxq->iq.flags & IQ_SW_ALLOCATED);
4049 		free_iq_fl_hwq(vi->adapter, &rxq->iq, &rxq->fl);
4050 		MPASS(!(rxq->iq.flags & IQ_HW_ALLOCATED));
4051 	}
4052 
4053 	if (rxq->iq.flags & IQ_SW_ALLOCATED) {
4054 		MPASS(!(rxq->iq.flags & IQ_HW_ALLOCATED));
4055 #if defined(INET) || defined(INET6)
4056 		tcp_lro_free(&rxq->lro);
4057 #endif
4058 		free_iq_fl(vi->adapter, &rxq->iq, &rxq->fl);
4059 		MPASS(!(rxq->iq.flags & IQ_SW_ALLOCATED));
4060 		bzero(rxq, sizeof(*rxq));
4061 	}
4062 }
4063 
4064 static void
add_rxq_sysctls(struct sysctl_ctx_list * ctx,struct sysctl_oid * oid,struct sge_rxq * rxq)4065 add_rxq_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *oid,
4066     struct sge_rxq *rxq)
4067 {
4068 	struct sysctl_oid_list *children;
4069 
4070 	if (ctx == NULL || oid == NULL)
4071 		return;
4072 
4073 	children = SYSCTL_CHILDREN(oid);
4074 #if defined(INET) || defined(INET6)
4075 	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "lro_queued", CTLFLAG_RD,
4076 	    &rxq->lro.lro_queued, 0, NULL);
4077 	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "lro_flushed", CTLFLAG_RD,
4078 	    &rxq->lro.lro_flushed, 0, NULL);
4079 #endif
4080 	SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "rxcsum", CTLFLAG_RD,
4081 	    &rxq->rxcsum, "# of times hardware assisted with checksum");
4082 	SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "vlan_extraction", CTLFLAG_RD,
4083 	    &rxq->vlan_extraction, "# of times hardware extracted 802.1Q tag");
4084 	SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "vxlan_rxcsum", CTLFLAG_RD,
4085 	    &rxq->vxlan_rxcsum,
4086 	    "# of times hardware assisted with inner checksum (VXLAN)");
4087 }
4088 
4089 #ifdef TCP_OFFLOAD
4090 /*
4091  * Idempotent.
4092  */
4093 static int
alloc_ofld_rxq(struct vi_info * vi,struct sge_ofld_rxq * ofld_rxq,int idx,int intr_idx,int maxp)4094 alloc_ofld_rxq(struct vi_info *vi, struct sge_ofld_rxq *ofld_rxq, int idx,
4095     int intr_idx, int maxp)
4096 {
4097 	int rc;
4098 	struct adapter *sc = vi->adapter;
4099 	struct sysctl_oid *oid;
4100 	char name[16];
4101 
4102 	if (!(ofld_rxq->iq.flags & IQ_SW_ALLOCATED)) {
4103 		MPASS(!(ofld_rxq->iq.flags & IQ_HW_ALLOCATED));
4104 
4105 		snprintf(name, sizeof(name), "%d", idx);
4106 		oid = SYSCTL_ADD_NODE(&vi->ctx,
4107 		    SYSCTL_CHILDREN(vi->ofld_rxq_oid), OID_AUTO, name,
4108 		    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "offload rx queue");
4109 
4110 		init_iq(&ofld_rxq->iq, sc, vi->ofld_tmr_idx, vi->ofld_pktc_idx,
4111 		    vi->qsize_rxq, intr_idx, ofld_cong_drop, IQ_OFLD);
4112 		snprintf(name, sizeof(name), "%s ofld_rxq%d-fl",
4113 		    device_get_nameunit(vi->dev), idx);
4114 		init_fl(sc, &ofld_rxq->fl, vi->qsize_rxq / 8, maxp, name);
4115 		rc = alloc_iq_fl(vi, &ofld_rxq->iq, &ofld_rxq->fl, &vi->ctx,
4116 		    oid);
4117 		if (rc != 0) {
4118 			CH_ERR(vi, "failed to allocate ofld_rxq%d: %d\n", idx,
4119 			    rc);
4120 			sysctl_remove_oid(oid, 1, 1);
4121 			return (rc);
4122 		}
4123 		MPASS(ofld_rxq->iq.flags & IQ_SW_ALLOCATED);
4124 		ofld_rxq->rx_iscsi_ddp_setup_ok = counter_u64_alloc(M_WAITOK);
4125 		ofld_rxq->rx_iscsi_ddp_setup_error =
4126 		    counter_u64_alloc(M_WAITOK);
4127 		ofld_rxq->ddp_buffer_alloc = counter_u64_alloc(M_WAITOK);
4128 		ofld_rxq->ddp_buffer_reuse = counter_u64_alloc(M_WAITOK);
4129 		ofld_rxq->ddp_buffer_free = counter_u64_alloc(M_WAITOK);
4130 		add_ofld_rxq_sysctls(&vi->ctx, oid, ofld_rxq);
4131 	}
4132 
4133 	if (!(ofld_rxq->iq.flags & IQ_HW_ALLOCATED)) {
4134 		MPASS(ofld_rxq->iq.flags & IQ_SW_ALLOCATED);
4135 		rc = alloc_iq_fl_hwq(vi, &ofld_rxq->iq, &ofld_rxq->fl);
4136 		if (rc != 0) {
4137 			CH_ERR(vi, "failed to create hw ofld_rxq%d: %d\n", idx,
4138 			    rc);
4139 			return (rc);
4140 		}
4141 		MPASS(ofld_rxq->iq.flags & IQ_HW_ALLOCATED);
4142 	}
4143 	return (rc);
4144 }
4145 
4146 /*
4147  * Idempotent.
4148  */
4149 static void
free_ofld_rxq(struct vi_info * vi,struct sge_ofld_rxq * ofld_rxq)4150 free_ofld_rxq(struct vi_info *vi, struct sge_ofld_rxq *ofld_rxq)
4151 {
4152 	if (ofld_rxq->iq.flags & IQ_HW_ALLOCATED) {
4153 		MPASS(ofld_rxq->iq.flags & IQ_SW_ALLOCATED);
4154 		free_iq_fl_hwq(vi->adapter, &ofld_rxq->iq, &ofld_rxq->fl);
4155 		MPASS(!(ofld_rxq->iq.flags & IQ_HW_ALLOCATED));
4156 	}
4157 
4158 	if (ofld_rxq->iq.flags & IQ_SW_ALLOCATED) {
4159 		MPASS(!(ofld_rxq->iq.flags & IQ_HW_ALLOCATED));
4160 		free_iq_fl(vi->adapter, &ofld_rxq->iq, &ofld_rxq->fl);
4161 		MPASS(!(ofld_rxq->iq.flags & IQ_SW_ALLOCATED));
4162 		counter_u64_free(ofld_rxq->rx_iscsi_ddp_setup_ok);
4163 		counter_u64_free(ofld_rxq->rx_iscsi_ddp_setup_error);
4164 		counter_u64_free(ofld_rxq->ddp_buffer_alloc);
4165 		counter_u64_free(ofld_rxq->ddp_buffer_reuse);
4166 		counter_u64_free(ofld_rxq->ddp_buffer_free);
4167 		bzero(ofld_rxq, sizeof(*ofld_rxq));
4168 	}
4169 }
4170 
4171 static void
add_ofld_rxq_sysctls(struct sysctl_ctx_list * ctx,struct sysctl_oid * oid,struct sge_ofld_rxq * ofld_rxq)4172 add_ofld_rxq_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *oid,
4173     struct sge_ofld_rxq *ofld_rxq)
4174 {
4175 	struct sysctl_oid_list *children;
4176 
4177 	if (ctx == NULL || oid == NULL)
4178 		return;
4179 
4180 	children = SYSCTL_CHILDREN(oid);
4181 	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "rx_aio_ddp_jobs",
4182 	    CTLFLAG_RD, &ofld_rxq->rx_aio_ddp_jobs, 0,
4183 	    "# of aio_read(2) jobs completed via DDP");
4184 	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "rx_aio_ddp_octets",
4185 	    CTLFLAG_RD, &ofld_rxq->rx_aio_ddp_octets, 0,
4186 	    "# of octets placed directly for aio_read(2) jobs");
4187 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
4188 	    "rx_toe_tls_records", CTLFLAG_RD, &ofld_rxq->rx_toe_tls_records,
4189 	    "# of TOE TLS records received");
4190 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
4191 	    "rx_toe_tls_octets", CTLFLAG_RD, &ofld_rxq->rx_toe_tls_octets,
4192 	    "# of payload octets in received TOE TLS records");
4193 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
4194 	    "rx_toe_ddp_octets", CTLFLAG_RD, &ofld_rxq->rx_toe_ddp_octets,
4195 	    "# of payload octets received via TCP DDP");
4196 	SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO,
4197 	    "ddp_buffer_alloc", CTLFLAG_RD, &ofld_rxq->ddp_buffer_alloc,
4198 	    "# of DDP RCV buffers allocated");
4199 	SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO,
4200 	    "ddp_buffer_reuse", CTLFLAG_RD, &ofld_rxq->ddp_buffer_reuse,
4201 	    "# of DDP RCV buffers reused");
4202 	SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO,
4203 	    "ddp_buffer_free", CTLFLAG_RD, &ofld_rxq->ddp_buffer_free,
4204 	    "# of DDP RCV buffers freed");
4205 
4206 	oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "iscsi",
4207 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "TOE iSCSI statistics");
4208 	children = SYSCTL_CHILDREN(oid);
4209 
4210 	SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "ddp_setup_ok",
4211 	    CTLFLAG_RD, &ofld_rxq->rx_iscsi_ddp_setup_ok,
4212 	    "# of times DDP buffer was setup successfully.");
4213 	SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "ddp_setup_error",
4214 	    CTLFLAG_RD, &ofld_rxq->rx_iscsi_ddp_setup_error,
4215 	    "# of times DDP buffer setup failed.");
4216 	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "ddp_octets",
4217 	    CTLFLAG_RD, &ofld_rxq->rx_iscsi_ddp_octets, 0,
4218 	    "# of octets placed directly");
4219 	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "ddp_pdus",
4220 	    CTLFLAG_RD, &ofld_rxq->rx_iscsi_ddp_pdus, 0,
4221 	    "# of PDUs with data placed directly.");
4222 	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "fl_octets",
4223 	    CTLFLAG_RD, &ofld_rxq->rx_iscsi_fl_octets, 0,
4224 	    "# of data octets delivered in freelist");
4225 	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "fl_pdus",
4226 	    CTLFLAG_RD, &ofld_rxq->rx_iscsi_fl_pdus, 0,
4227 	    "# of PDUs with data delivered in freelist");
4228 	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "padding_errors",
4229 	    CTLFLAG_RD, &ofld_rxq->rx_iscsi_padding_errors, 0,
4230 	    "# of PDUs with invalid padding");
4231 	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "header_digest_errors",
4232 	    CTLFLAG_RD, &ofld_rxq->rx_iscsi_header_digest_errors, 0,
4233 	    "# of PDUs with invalid header digests");
4234 	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "data_digest_errors",
4235 	    CTLFLAG_RD, &ofld_rxq->rx_iscsi_data_digest_errors, 0,
4236 	    "# of PDUs with invalid data digests");
4237 }
4238 #endif
4239 
4240 /*
4241  * Returns a reasonable automatic cidx flush threshold for a given queue size.
4242  */
4243 static u_int
qsize_to_fthresh(int qsize)4244 qsize_to_fthresh(int qsize)
4245 {
4246 	u_int fthresh;
4247 
4248 	fthresh = qsize == 0 ? 0 : order_base_2(qsize);
4249 	if (fthresh > X_CIDXFLUSHTHRESH_128)
4250 		fthresh = X_CIDXFLUSHTHRESH_128;
4251 
4252 	return (fthresh);
4253 }
4254 
4255 static int
ctrl_eq_alloc(struct adapter * sc,struct sge_eq * eq)4256 ctrl_eq_alloc(struct adapter *sc, struct sge_eq *eq)
4257 {
4258 	int rc, cntxt_id;
4259 	struct fw_eq_ctrl_cmd c;
4260 	int qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE;
4261 
4262 	bzero(&c, sizeof(c));
4263 
4264 	c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | F_FW_CMD_REQUEST |
4265 	    F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_CTRL_CMD_PFN(sc->pf) |
4266 	    V_FW_EQ_CTRL_CMD_VFN(0));
4267 	c.alloc_to_len16 = htobe32(F_FW_EQ_CTRL_CMD_ALLOC |
4268 	    F_FW_EQ_CTRL_CMD_EQSTART | FW_LEN16(c));
4269 	c.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_CMPLIQID(eq->iqid));
4270 	c.physeqid_pkd = htobe32(0);
4271 	c.fetchszm_to_iqid =
4272 	    htobe32(V_FW_EQ_CTRL_CMD_HOSTFCMODE(X_HOSTFCMODE_STATUS_PAGE) |
4273 		V_FW_EQ_CTRL_CMD_PCIECHN(eq->tx_chan) |
4274 		F_FW_EQ_CTRL_CMD_FETCHRO | V_FW_EQ_CTRL_CMD_IQID(eq->iqid));
4275 	c.dcaen_to_eqsize =
4276 	    htobe32(V_FW_EQ_CTRL_CMD_FBMIN(chip_id(sc) <= CHELSIO_T5 ?
4277 		X_FETCHBURSTMIN_64B : X_FETCHBURSTMIN_64B_T6) |
4278 		V_FW_EQ_CTRL_CMD_FBMAX(X_FETCHBURSTMAX_512B) |
4279 		V_FW_EQ_CTRL_CMD_CIDXFTHRESH(qsize_to_fthresh(qsize)) |
4280 		V_FW_EQ_CTRL_CMD_EQSIZE(qsize));
4281 	c.eqaddr = htobe64(eq->ba);
4282 
4283 	rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c);
4284 	if (rc != 0) {
4285 		CH_ERR(sc, "failed to create hw ctrlq for tx_chan %d: %d\n",
4286 		    eq->tx_chan, rc);
4287 		return (rc);
4288 	}
4289 
4290 	eq->cntxt_id = G_FW_EQ_CTRL_CMD_EQID(be32toh(c.cmpliqid_eqid));
4291 	eq->abs_id = G_FW_EQ_CTRL_CMD_PHYSEQID(be32toh(c.physeqid_pkd));
4292 	cntxt_id = eq->cntxt_id - sc->sge.eq_start;
4293 	if (cntxt_id >= sc->sge.eqmap_sz)
4294 	    panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__,
4295 		cntxt_id, sc->sge.eqmap_sz - 1);
4296 	sc->sge.eqmap[cntxt_id] = eq;
4297 
4298 	return (rc);
4299 }
4300 
4301 static int
eth_eq_alloc(struct adapter * sc,struct vi_info * vi,struct sge_eq * eq)4302 eth_eq_alloc(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq)
4303 {
4304 	int rc, cntxt_id;
4305 	struct fw_eq_eth_cmd c;
4306 	int qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE;
4307 
4308 	bzero(&c, sizeof(c));
4309 
4310 	c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST |
4311 	    F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(sc->pf) |
4312 	    V_FW_EQ_ETH_CMD_VFN(0));
4313 	c.alloc_to_len16 = htobe32(F_FW_EQ_ETH_CMD_ALLOC |
4314 	    F_FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c));
4315 	c.autoequiqe_to_viid = htobe32(F_FW_EQ_ETH_CMD_AUTOEQUIQE |
4316 	    F_FW_EQ_ETH_CMD_AUTOEQUEQE | V_FW_EQ_ETH_CMD_VIID(vi->viid));
4317 	c.fetchszm_to_iqid =
4318 	    htobe32(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) |
4319 		V_FW_EQ_ETH_CMD_PCIECHN(eq->tx_chan) | F_FW_EQ_ETH_CMD_FETCHRO |
4320 		V_FW_EQ_ETH_CMD_IQID(eq->iqid));
4321 	c.dcaen_to_eqsize =
4322 	    htobe32(V_FW_EQ_ETH_CMD_FBMIN(chip_id(sc) <= CHELSIO_T5 ?
4323 		X_FETCHBURSTMIN_64B : X_FETCHBURSTMIN_64B_T6) |
4324 		V_FW_EQ_ETH_CMD_FBMAX(X_FETCHBURSTMAX_512B) |
4325 		V_FW_EQ_ETH_CMD_EQSIZE(qsize));
4326 	c.eqaddr = htobe64(eq->ba);
4327 
4328 	rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c);
4329 	if (rc != 0) {
4330 		device_printf(vi->dev,
4331 		    "failed to create Ethernet egress queue: %d\n", rc);
4332 		return (rc);
4333 	}
4334 
4335 	eq->cntxt_id = G_FW_EQ_ETH_CMD_EQID(be32toh(c.eqid_pkd));
4336 	eq->abs_id = G_FW_EQ_ETH_CMD_PHYSEQID(be32toh(c.physeqid_pkd));
4337 	cntxt_id = eq->cntxt_id - sc->sge.eq_start;
4338 	if (cntxt_id >= sc->sge.eqmap_sz)
4339 	    panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__,
4340 		cntxt_id, sc->sge.eqmap_sz - 1);
4341 	sc->sge.eqmap[cntxt_id] = eq;
4342 
4343 	return (rc);
4344 }
4345 
4346 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
4347 static int
ofld_eq_alloc(struct adapter * sc,struct vi_info * vi,struct sge_eq * eq)4348 ofld_eq_alloc(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq)
4349 {
4350 	int rc, cntxt_id;
4351 	struct fw_eq_ofld_cmd c;
4352 	int qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE;
4353 
4354 	bzero(&c, sizeof(c));
4355 
4356 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_OFLD_CMD) | F_FW_CMD_REQUEST |
4357 	    F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_OFLD_CMD_PFN(sc->pf) |
4358 	    V_FW_EQ_OFLD_CMD_VFN(0));
4359 	c.alloc_to_len16 = htonl(F_FW_EQ_OFLD_CMD_ALLOC |
4360 	    F_FW_EQ_OFLD_CMD_EQSTART | FW_LEN16(c));
4361 	c.fetchszm_to_iqid =
4362 		htonl(V_FW_EQ_OFLD_CMD_HOSTFCMODE(X_HOSTFCMODE_STATUS_PAGE) |
4363 		    V_FW_EQ_OFLD_CMD_PCIECHN(eq->tx_chan) |
4364 		    F_FW_EQ_OFLD_CMD_FETCHRO | V_FW_EQ_OFLD_CMD_IQID(eq->iqid));
4365 	c.dcaen_to_eqsize =
4366 	    htobe32(V_FW_EQ_OFLD_CMD_FBMIN(chip_id(sc) <= CHELSIO_T5 ?
4367 		X_FETCHBURSTMIN_64B : X_FETCHBURSTMIN_64B_T6) |
4368 		V_FW_EQ_OFLD_CMD_FBMAX(X_FETCHBURSTMAX_512B) |
4369 		V_FW_EQ_OFLD_CMD_CIDXFTHRESH(qsize_to_fthresh(qsize)) |
4370 		V_FW_EQ_OFLD_CMD_EQSIZE(qsize));
4371 	c.eqaddr = htobe64(eq->ba);
4372 
4373 	rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c);
4374 	if (rc != 0) {
4375 		device_printf(vi->dev,
4376 		    "failed to create egress queue for TCP offload: %d\n", rc);
4377 		return (rc);
4378 	}
4379 
4380 	eq->cntxt_id = G_FW_EQ_OFLD_CMD_EQID(be32toh(c.eqid_pkd));
4381 	eq->abs_id = G_FW_EQ_OFLD_CMD_PHYSEQID(be32toh(c.physeqid_pkd));
4382 	cntxt_id = eq->cntxt_id - sc->sge.eq_start;
4383 	if (cntxt_id >= sc->sge.eqmap_sz)
4384 	    panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__,
4385 		cntxt_id, sc->sge.eqmap_sz - 1);
4386 	sc->sge.eqmap[cntxt_id] = eq;
4387 
4388 	return (rc);
4389 }
4390 #endif
4391 
4392 /* SW only */
4393 static int
alloc_eq(struct adapter * sc,struct sge_eq * eq,struct sysctl_ctx_list * ctx,struct sysctl_oid * oid)4394 alloc_eq(struct adapter *sc, struct sge_eq *eq, struct sysctl_ctx_list *ctx,
4395     struct sysctl_oid *oid)
4396 {
4397 	int rc, qsize;
4398 	size_t len;
4399 
4400 	MPASS(!(eq->flags & EQ_SW_ALLOCATED));
4401 
4402 	qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE;
4403 	len = qsize * EQ_ESIZE;
4404 	rc = alloc_ring(sc, len, &eq->desc_tag, &eq->desc_map, &eq->ba,
4405 	    (void **)&eq->desc);
4406 	if (rc)
4407 		return (rc);
4408 	if (ctx != NULL && oid != NULL)
4409 		add_eq_sysctls(sc, ctx, oid, eq);
4410 	eq->flags |= EQ_SW_ALLOCATED;
4411 
4412 	return (0);
4413 }
4414 
4415 /* SW only */
4416 static void
free_eq(struct adapter * sc,struct sge_eq * eq)4417 free_eq(struct adapter *sc, struct sge_eq *eq)
4418 {
4419 	MPASS(eq->flags & EQ_SW_ALLOCATED);
4420 	if (eq->type == EQ_ETH)
4421 		MPASS(eq->pidx == eq->cidx);
4422 
4423 	free_ring(sc, eq->desc_tag, eq->desc_map, eq->ba, eq->desc);
4424 	mtx_destroy(&eq->eq_lock);
4425 	bzero(eq, sizeof(*eq));
4426 }
4427 
4428 static void
add_eq_sysctls(struct adapter * sc,struct sysctl_ctx_list * ctx,struct sysctl_oid * oid,struct sge_eq * eq)4429 add_eq_sysctls(struct adapter *sc, struct sysctl_ctx_list *ctx,
4430     struct sysctl_oid *oid, struct sge_eq *eq)
4431 {
4432 	struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid);
4433 
4434 	SYSCTL_ADD_UAUTO(ctx, children, OID_AUTO, "ba", CTLFLAG_RD, &eq->ba,
4435 	    "bus address of descriptor ring");
4436 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "dmalen", CTLFLAG_RD, NULL,
4437 	    eq->sidx * EQ_ESIZE + sc->params.sge.spg_len,
4438 	    "desc ring size in bytes");
4439 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "abs_id", CTLFLAG_RD,
4440 	    &eq->abs_id, 0, "absolute id of the queue");
4441 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD,
4442 	    &eq->cntxt_id, 0, "SGE context id of the queue");
4443 	SYSCTL_ADD_U16(ctx, children, OID_AUTO, "cidx", CTLFLAG_RD, &eq->cidx,
4444 	    0, "consumer index");
4445 	SYSCTL_ADD_U16(ctx, children, OID_AUTO, "pidx", CTLFLAG_RD, &eq->pidx,
4446 	    0, "producer index");
4447 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sidx", CTLFLAG_RD, NULL,
4448 	    eq->sidx, "status page index");
4449 }
4450 
4451 static int
alloc_eq_hwq(struct adapter * sc,struct vi_info * vi,struct sge_eq * eq)4452 alloc_eq_hwq(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq)
4453 {
4454 	int rc;
4455 
4456 	MPASS(!(eq->flags & EQ_HW_ALLOCATED));
4457 
4458 	eq->iqid = eq->iq->cntxt_id;
4459 	eq->pidx = eq->cidx = eq->dbidx = 0;
4460 	/* Note that equeqidx is not used with sge_wrq (OFLD/CTRL) queues. */
4461 	eq->equeqidx = 0;
4462 	eq->doorbells = sc->doorbells;
4463 	bzero(eq->desc, eq->sidx * EQ_ESIZE + sc->params.sge.spg_len);
4464 
4465 	switch (eq->type) {
4466 	case EQ_CTRL:
4467 		rc = ctrl_eq_alloc(sc, eq);
4468 		break;
4469 
4470 	case EQ_ETH:
4471 		rc = eth_eq_alloc(sc, vi, eq);
4472 		break;
4473 
4474 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
4475 	case EQ_OFLD:
4476 		rc = ofld_eq_alloc(sc, vi, eq);
4477 		break;
4478 #endif
4479 
4480 	default:
4481 		panic("%s: invalid eq type %d.", __func__, eq->type);
4482 	}
4483 	if (rc != 0) {
4484 		CH_ERR(sc, "failed to allocate egress queue(%d): %d\n",
4485 		    eq->type, rc);
4486 		return (rc);
4487 	}
4488 
4489 	if (isset(&eq->doorbells, DOORBELL_UDB) ||
4490 	    isset(&eq->doorbells, DOORBELL_UDBWC) ||
4491 	    isset(&eq->doorbells, DOORBELL_WCWR)) {
4492 		uint32_t s_qpp = sc->params.sge.eq_s_qpp;
4493 		uint32_t mask = (1 << s_qpp) - 1;
4494 		volatile uint8_t *udb;
4495 
4496 		udb = sc->udbs_base + UDBS_DB_OFFSET;
4497 		udb += (eq->cntxt_id >> s_qpp) << PAGE_SHIFT;	/* pg offset */
4498 		eq->udb_qid = eq->cntxt_id & mask;		/* id in page */
4499 		if (eq->udb_qid >= PAGE_SIZE / UDBS_SEG_SIZE)
4500 			clrbit(&eq->doorbells, DOORBELL_WCWR);
4501 		else {
4502 			udb += eq->udb_qid << UDBS_SEG_SHIFT;	/* seg offset */
4503 			eq->udb_qid = 0;
4504 		}
4505 		eq->udb = (volatile void *)udb;
4506 	}
4507 
4508 	eq->flags |= EQ_HW_ALLOCATED;
4509 	return (0);
4510 }
4511 
4512 static int
free_eq_hwq(struct adapter * sc,struct vi_info * vi __unused,struct sge_eq * eq)4513 free_eq_hwq(struct adapter *sc, struct vi_info *vi __unused, struct sge_eq *eq)
4514 {
4515 	int rc;
4516 
4517 	MPASS(eq->flags & EQ_HW_ALLOCATED);
4518 
4519 	switch (eq->type) {
4520 	case EQ_CTRL:
4521 		rc = -t4_ctrl_eq_free(sc, sc->mbox, sc->pf, 0, eq->cntxt_id);
4522 		break;
4523 	case EQ_ETH:
4524 		rc = -t4_eth_eq_free(sc, sc->mbox, sc->pf, 0, eq->cntxt_id);
4525 		break;
4526 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
4527 	case EQ_OFLD:
4528 		rc = -t4_ofld_eq_free(sc, sc->mbox, sc->pf, 0, eq->cntxt_id);
4529 		break;
4530 #endif
4531 	default:
4532 		panic("%s: invalid eq type %d.", __func__, eq->type);
4533 	}
4534 	if (rc != 0) {
4535 		CH_ERR(sc, "failed to free eq (type %d): %d\n", eq->type, rc);
4536 		return (rc);
4537 	}
4538 	eq->flags &= ~EQ_HW_ALLOCATED;
4539 
4540 	return (0);
4541 }
4542 
4543 static int
alloc_wrq(struct adapter * sc,struct vi_info * vi,struct sge_wrq * wrq,struct sysctl_ctx_list * ctx,struct sysctl_oid * oid)4544 alloc_wrq(struct adapter *sc, struct vi_info *vi, struct sge_wrq *wrq,
4545     struct sysctl_ctx_list *ctx, struct sysctl_oid *oid)
4546 {
4547 	struct sge_eq *eq = &wrq->eq;
4548 	int rc;
4549 
4550 	MPASS(!(eq->flags & EQ_SW_ALLOCATED));
4551 
4552 	rc = alloc_eq(sc, eq, ctx, oid);
4553 	if (rc)
4554 		return (rc);
4555 	MPASS(eq->flags & EQ_SW_ALLOCATED);
4556 	/* Can't fail after this. */
4557 
4558 	wrq->adapter = sc;
4559 	TASK_INIT(&wrq->wrq_tx_task, 0, wrq_tx_drain, wrq);
4560 	TAILQ_INIT(&wrq->incomplete_wrs);
4561 	STAILQ_INIT(&wrq->wr_list);
4562 	wrq->nwr_pending = 0;
4563 	wrq->ndesc_needed = 0;
4564 	add_wrq_sysctls(ctx, oid, wrq);
4565 
4566 	return (0);
4567 }
4568 
4569 static void
free_wrq(struct adapter * sc,struct sge_wrq * wrq)4570 free_wrq(struct adapter *sc, struct sge_wrq *wrq)
4571 {
4572 	free_eq(sc, &wrq->eq);
4573 	MPASS(wrq->nwr_pending == 0);
4574 	MPASS(wrq->ndesc_needed == 0);
4575 	MPASS(TAILQ_EMPTY(&wrq->incomplete_wrs));
4576 	MPASS(STAILQ_EMPTY(&wrq->wr_list));
4577 	bzero(wrq, sizeof(*wrq));
4578 }
4579 
4580 static void
add_wrq_sysctls(struct sysctl_ctx_list * ctx,struct sysctl_oid * oid,struct sge_wrq * wrq)4581 add_wrq_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *oid,
4582     struct sge_wrq *wrq)
4583 {
4584 	struct sysctl_oid_list *children;
4585 
4586 	if (ctx == NULL || oid == NULL)
4587 		return;
4588 
4589 	children = SYSCTL_CHILDREN(oid);
4590 	SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "tx_wrs_direct", CTLFLAG_RD,
4591 	    &wrq->tx_wrs_direct, "# of work requests (direct)");
4592 	SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "tx_wrs_copied", CTLFLAG_RD,
4593 	    &wrq->tx_wrs_copied, "# of work requests (copied)");
4594 	SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "tx_wrs_sspace", CTLFLAG_RD,
4595 	    &wrq->tx_wrs_ss, "# of work requests (copied from scratch space)");
4596 }
4597 
4598 /*
4599  * Idempotent.
4600  */
4601 static int
alloc_txq(struct vi_info * vi,struct sge_txq * txq,int idx)4602 alloc_txq(struct vi_info *vi, struct sge_txq *txq, int idx)
4603 {
4604 	int rc, iqidx;
4605 	struct port_info *pi = vi->pi;
4606 	struct adapter *sc = vi->adapter;
4607 	struct sge_eq *eq = &txq->eq;
4608 	struct txpkts *txp;
4609 	char name[16];
4610 	struct sysctl_oid *oid;
4611 
4612 	if (!(eq->flags & EQ_SW_ALLOCATED)) {
4613 		MPASS(!(eq->flags & EQ_HW_ALLOCATED));
4614 
4615 		snprintf(name, sizeof(name), "%d", idx);
4616 		oid = SYSCTL_ADD_NODE(&vi->ctx, SYSCTL_CHILDREN(vi->txq_oid),
4617 		    OID_AUTO, name, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
4618 		    "tx queue");
4619 
4620 		iqidx = vi->first_rxq + (idx % vi->nrxq);
4621 		snprintf(name, sizeof(name), "%s txq%d",
4622 		    device_get_nameunit(vi->dev), idx);
4623 		init_eq(sc, &txq->eq, EQ_ETH, vi->qsize_txq, pi->port_id,
4624 		    &sc->sge.rxq[iqidx].iq, name);
4625 
4626 		rc = mp_ring_alloc(&txq->r, eq->sidx, txq, eth_tx,
4627 		    can_resume_eth_tx, M_CXGBE, &eq->eq_lock, M_WAITOK);
4628 		if (rc != 0) {
4629 			CH_ERR(vi, "failed to allocate mp_ring for txq%d: %d\n",
4630 			    idx, rc);
4631 failed:
4632 			sysctl_remove_oid(oid, 1, 1);
4633 			return (rc);
4634 		}
4635 
4636 		rc = alloc_eq(sc, eq, &vi->ctx, oid);
4637 		if (rc) {
4638 			CH_ERR(vi, "failed to allocate txq%d: %d\n", idx, rc);
4639 			mp_ring_free(txq->r);
4640 			goto failed;
4641 		}
4642 		MPASS(eq->flags & EQ_SW_ALLOCATED);
4643 		/* Can't fail after this point. */
4644 
4645 		TASK_INIT(&txq->tx_reclaim_task, 0, tx_reclaim, eq);
4646 		txq->ifp = vi->ifp;
4647 		txq->gl = sglist_alloc(TX_SGL_SEGS, M_WAITOK);
4648 		txq->sdesc = malloc(eq->sidx * sizeof(struct tx_sdesc), M_CXGBE,
4649 		    M_ZERO | M_WAITOK);
4650 
4651 		add_txq_sysctls(vi, &vi->ctx, oid, txq);
4652 	}
4653 
4654 	if (!(eq->flags & EQ_HW_ALLOCATED)) {
4655 		MPASS(eq->flags & EQ_SW_ALLOCATED);
4656 		rc = alloc_eq_hwq(sc, vi, eq);
4657 		if (rc != 0) {
4658 			CH_ERR(vi, "failed to create hw txq%d: %d\n", idx, rc);
4659 			return (rc);
4660 		}
4661 		MPASS(eq->flags & EQ_HW_ALLOCATED);
4662 		/* Can't fail after this point. */
4663 
4664 		if (idx == 0)
4665 			sc->sge.eq_base = eq->abs_id - eq->cntxt_id;
4666 		else
4667 			KASSERT(eq->cntxt_id + sc->sge.eq_base == eq->abs_id,
4668 			    ("eq_base mismatch"));
4669 		KASSERT(sc->sge.eq_base == 0 || sc->flags & IS_VF,
4670 		    ("PF with non-zero eq_base"));
4671 
4672 		txp = &txq->txp;
4673 		MPASS(nitems(txp->mb) >= sc->params.max_pkts_per_eth_tx_pkts_wr);
4674 		txq->txp.max_npkt = min(nitems(txp->mb),
4675 		    sc->params.max_pkts_per_eth_tx_pkts_wr);
4676 		if (vi->flags & TX_USES_VM_WR && !(sc->flags & IS_VF))
4677 			txq->txp.max_npkt--;
4678 
4679 		if (vi->flags & TX_USES_VM_WR)
4680 			txq->cpl_ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT_XT) |
4681 			    V_TXPKT_INTF(pi->tx_chan));
4682 		else
4683 			txq->cpl_ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT_XT) |
4684 			    V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(sc->pf) |
4685 			    V_TXPKT_VF(vi->vin) | V_TXPKT_VF_VLD(vi->vfvld));
4686 
4687 		txq->tc_idx = -1;
4688 	}
4689 
4690 	return (0);
4691 }
4692 
4693 /*
4694  * Idempotent.
4695  */
4696 static void
free_txq(struct vi_info * vi,struct sge_txq * txq)4697 free_txq(struct vi_info *vi, struct sge_txq *txq)
4698 {
4699 	struct adapter *sc = vi->adapter;
4700 	struct sge_eq *eq = &txq->eq;
4701 
4702 	if (eq->flags & EQ_HW_ALLOCATED) {
4703 		MPASS(eq->flags & EQ_SW_ALLOCATED);
4704 		free_eq_hwq(sc, NULL, eq);
4705 		MPASS(!(eq->flags & EQ_HW_ALLOCATED));
4706 	}
4707 
4708 	if (eq->flags & EQ_SW_ALLOCATED) {
4709 		MPASS(!(eq->flags & EQ_HW_ALLOCATED));
4710 		sglist_free(txq->gl);
4711 		free(txq->sdesc, M_CXGBE);
4712 		mp_ring_free(txq->r);
4713 		free_eq(sc, eq);
4714 		MPASS(!(eq->flags & EQ_SW_ALLOCATED));
4715 		bzero(txq, sizeof(*txq));
4716 	}
4717 }
4718 
4719 static void
add_txq_sysctls(struct vi_info * vi,struct sysctl_ctx_list * ctx,struct sysctl_oid * oid,struct sge_txq * txq)4720 add_txq_sysctls(struct vi_info *vi, struct sysctl_ctx_list *ctx,
4721     struct sysctl_oid *oid, struct sge_txq *txq)
4722 {
4723 	struct adapter *sc;
4724 	struct sysctl_oid_list *children;
4725 
4726 	if (ctx == NULL || oid == NULL)
4727 		return;
4728 
4729 	sc = vi->adapter;
4730 	children = SYSCTL_CHILDREN(oid);
4731 
4732 	mp_ring_sysctls(txq->r, ctx, children);
4733 
4734 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tc",
4735 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, vi, txq - sc->sge.txq,
4736 	    sysctl_tc, "I", "traffic class (-1 means none)");
4737 
4738 	SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "txcsum", CTLFLAG_RD,
4739 	    &txq->txcsum, "# of times hardware assisted with checksum");
4740 	SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "vlan_insertion", CTLFLAG_RD,
4741 	    &txq->vlan_insertion, "# of times hardware inserted 802.1Q tag");
4742 	SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "tso_wrs", CTLFLAG_RD,
4743 	    &txq->tso_wrs, "# of TSO work requests");
4744 	SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "imm_wrs", CTLFLAG_RD,
4745 	    &txq->imm_wrs, "# of work requests with immediate data");
4746 	SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "sgl_wrs", CTLFLAG_RD,
4747 	    &txq->sgl_wrs, "# of work requests with direct SGL");
4748 	SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "txpkt_wrs", CTLFLAG_RD,
4749 	    &txq->txpkt_wrs, "# of txpkt work requests (one pkt/WR)");
4750 	SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "txpkts0_wrs", CTLFLAG_RD,
4751 	    &txq->txpkts0_wrs, "# of txpkts (type 0) work requests");
4752 	SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "txpkts1_wrs", CTLFLAG_RD,
4753 	    &txq->txpkts1_wrs, "# of txpkts (type 1) work requests");
4754 	SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "txpkts0_pkts", CTLFLAG_RD,
4755 	    &txq->txpkts0_pkts,
4756 	    "# of frames tx'd using type0 txpkts work requests");
4757 	SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "txpkts1_pkts", CTLFLAG_RD,
4758 	    &txq->txpkts1_pkts,
4759 	    "# of frames tx'd using type1 txpkts work requests");
4760 	SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "txpkts_flush", CTLFLAG_RD,
4761 	    &txq->txpkts_flush,
4762 	    "# of times txpkts had to be flushed out by an egress-update");
4763 	SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "raw_wrs", CTLFLAG_RD,
4764 	    &txq->raw_wrs, "# of raw work requests (non-packets)");
4765 	SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "vxlan_tso_wrs", CTLFLAG_RD,
4766 	    &txq->vxlan_tso_wrs, "# of VXLAN TSO work requests");
4767 	SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "vxlan_txcsum", CTLFLAG_RD,
4768 	    &txq->vxlan_txcsum,
4769 	    "# of times hardware assisted with inner checksums (VXLAN)");
4770 
4771 #ifdef KERN_TLS
4772 	if (is_ktls(sc)) {
4773 		SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "kern_tls_records",
4774 		    CTLFLAG_RD, &txq->kern_tls_records,
4775 		    "# of NIC TLS records transmitted");
4776 		SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "kern_tls_short",
4777 		    CTLFLAG_RD, &txq->kern_tls_short,
4778 		    "# of short NIC TLS records transmitted");
4779 		SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "kern_tls_partial",
4780 		    CTLFLAG_RD, &txq->kern_tls_partial,
4781 		    "# of partial NIC TLS records transmitted");
4782 		SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "kern_tls_full",
4783 		    CTLFLAG_RD, &txq->kern_tls_full,
4784 		    "# of full NIC TLS records transmitted");
4785 		SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "kern_tls_octets",
4786 		    CTLFLAG_RD, &txq->kern_tls_octets,
4787 		    "# of payload octets in transmitted NIC TLS records");
4788 		SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "kern_tls_waste",
4789 		    CTLFLAG_RD, &txq->kern_tls_waste,
4790 		    "# of octets DMAd but not transmitted in NIC TLS records");
4791 		SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "kern_tls_options",
4792 		    CTLFLAG_RD, &txq->kern_tls_options,
4793 		    "# of NIC TLS options-only packets transmitted");
4794 		SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "kern_tls_header",
4795 		    CTLFLAG_RD, &txq->kern_tls_header,
4796 		    "# of NIC TLS header-only packets transmitted");
4797 		SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "kern_tls_fin",
4798 		    CTLFLAG_RD, &txq->kern_tls_fin,
4799 		    "# of NIC TLS FIN-only packets transmitted");
4800 		SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "kern_tls_fin_short",
4801 		    CTLFLAG_RD, &txq->kern_tls_fin_short,
4802 		    "# of NIC TLS padded FIN packets on short TLS records");
4803 		SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "kern_tls_cbc",
4804 		    CTLFLAG_RD, &txq->kern_tls_cbc,
4805 		    "# of NIC TLS sessions using AES-CBC");
4806 		SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "kern_tls_gcm",
4807 		    CTLFLAG_RD, &txq->kern_tls_gcm,
4808 		    "# of NIC TLS sessions using AES-GCM");
4809 	}
4810 #endif
4811 }
4812 
4813 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
4814 /*
4815  * Idempotent.
4816  */
4817 static int
alloc_ofld_txq(struct vi_info * vi,struct sge_ofld_txq * ofld_txq,int idx)4818 alloc_ofld_txq(struct vi_info *vi, struct sge_ofld_txq *ofld_txq, int idx)
4819 {
4820 	struct sysctl_oid *oid;
4821 	struct port_info *pi = vi->pi;
4822 	struct adapter *sc = vi->adapter;
4823 	struct sge_eq *eq = &ofld_txq->wrq.eq;
4824 	int rc, iqidx;
4825 	char name[16];
4826 
4827 	MPASS(idx >= 0);
4828 	MPASS(idx < vi->nofldtxq);
4829 
4830 	if (!(eq->flags & EQ_SW_ALLOCATED)) {
4831 		snprintf(name, sizeof(name), "%d", idx);
4832 		oid = SYSCTL_ADD_NODE(&vi->ctx,
4833 		    SYSCTL_CHILDREN(vi->ofld_txq_oid), OID_AUTO, name,
4834 		    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "offload tx queue");
4835 
4836 		snprintf(name, sizeof(name), "%s ofld_txq%d",
4837 		    device_get_nameunit(vi->dev), idx);
4838 		if (vi->nofldrxq > 0) {
4839 			iqidx = vi->first_ofld_rxq + (idx % vi->nofldrxq);
4840 			init_eq(sc, eq, EQ_OFLD, vi->qsize_txq, pi->port_id,
4841 			    &sc->sge.ofld_rxq[iqidx].iq, name);
4842 		} else {
4843 			iqidx = vi->first_rxq + (idx % vi->nrxq);
4844 			init_eq(sc, eq, EQ_OFLD, vi->qsize_txq, pi->port_id,
4845 			    &sc->sge.rxq[iqidx].iq, name);
4846 		}
4847 
4848 		rc = alloc_wrq(sc, vi, &ofld_txq->wrq, &vi->ctx, oid);
4849 		if (rc != 0) {
4850 			CH_ERR(vi, "failed to allocate ofld_txq%d: %d\n", idx,
4851 			    rc);
4852 			sysctl_remove_oid(oid, 1, 1);
4853 			return (rc);
4854 		}
4855 		MPASS(eq->flags & EQ_SW_ALLOCATED);
4856 		/* Can't fail after this point. */
4857 
4858 		ofld_txq->tx_iscsi_pdus = counter_u64_alloc(M_WAITOK);
4859 		ofld_txq->tx_iscsi_octets = counter_u64_alloc(M_WAITOK);
4860 		ofld_txq->tx_iscsi_iso_wrs = counter_u64_alloc(M_WAITOK);
4861 		ofld_txq->tx_aio_jobs = counter_u64_alloc(M_WAITOK);
4862 		ofld_txq->tx_aio_octets = counter_u64_alloc(M_WAITOK);
4863 		ofld_txq->tx_toe_tls_records = counter_u64_alloc(M_WAITOK);
4864 		ofld_txq->tx_toe_tls_octets = counter_u64_alloc(M_WAITOK);
4865 		add_ofld_txq_sysctls(&vi->ctx, oid, ofld_txq);
4866 	}
4867 
4868 	if (!(eq->flags & EQ_HW_ALLOCATED)) {
4869 		MPASS(eq->flags & EQ_SW_ALLOCATED);
4870 		MPASS(ofld_txq->wrq.nwr_pending == 0);
4871 		MPASS(ofld_txq->wrq.ndesc_needed == 0);
4872 		rc = alloc_eq_hwq(sc, vi, eq);
4873 		if (rc != 0) {
4874 			CH_ERR(vi, "failed to create hw ofld_txq%d: %d\n", idx,
4875 			    rc);
4876 			return (rc);
4877 		}
4878 		MPASS(eq->flags & EQ_HW_ALLOCATED);
4879 	}
4880 
4881 	return (0);
4882 }
4883 
4884 /*
4885  * Idempotent.
4886  */
4887 static void
free_ofld_txq(struct vi_info * vi,struct sge_ofld_txq * ofld_txq)4888 free_ofld_txq(struct vi_info *vi, struct sge_ofld_txq *ofld_txq)
4889 {
4890 	struct adapter *sc = vi->adapter;
4891 	struct sge_eq *eq = &ofld_txq->wrq.eq;
4892 
4893 	if (eq->flags & EQ_HW_ALLOCATED) {
4894 		MPASS(eq->flags & EQ_SW_ALLOCATED);
4895 		free_eq_hwq(sc, NULL, eq);
4896 		MPASS(!(eq->flags & EQ_HW_ALLOCATED));
4897 	}
4898 
4899 	if (eq->flags & EQ_SW_ALLOCATED) {
4900 		MPASS(!(eq->flags & EQ_HW_ALLOCATED));
4901 		counter_u64_free(ofld_txq->tx_iscsi_pdus);
4902 		counter_u64_free(ofld_txq->tx_iscsi_octets);
4903 		counter_u64_free(ofld_txq->tx_iscsi_iso_wrs);
4904 		counter_u64_free(ofld_txq->tx_aio_jobs);
4905 		counter_u64_free(ofld_txq->tx_aio_octets);
4906 		counter_u64_free(ofld_txq->tx_toe_tls_records);
4907 		counter_u64_free(ofld_txq->tx_toe_tls_octets);
4908 		free_wrq(sc, &ofld_txq->wrq);
4909 		MPASS(!(eq->flags & EQ_SW_ALLOCATED));
4910 		bzero(ofld_txq, sizeof(*ofld_txq));
4911 	}
4912 }
4913 
4914 static void
add_ofld_txq_sysctls(struct sysctl_ctx_list * ctx,struct sysctl_oid * oid,struct sge_ofld_txq * ofld_txq)4915 add_ofld_txq_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *oid,
4916     struct sge_ofld_txq *ofld_txq)
4917 {
4918 	struct sysctl_oid_list *children;
4919 
4920 	if (ctx == NULL || oid == NULL)
4921 		return;
4922 
4923 	children = SYSCTL_CHILDREN(oid);
4924 	SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "tx_iscsi_pdus",
4925 	    CTLFLAG_RD, &ofld_txq->tx_iscsi_pdus,
4926 	    "# of iSCSI PDUs transmitted");
4927 	SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "tx_iscsi_octets",
4928 	    CTLFLAG_RD, &ofld_txq->tx_iscsi_octets,
4929 	    "# of payload octets in transmitted iSCSI PDUs");
4930 	SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "tx_iscsi_iso_wrs",
4931 	    CTLFLAG_RD, &ofld_txq->tx_iscsi_iso_wrs,
4932 	    "# of iSCSI segmentation offload work requests");
4933 	SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "tx_aio_jobs",
4934 	    CTLFLAG_RD, &ofld_txq->tx_aio_jobs,
4935 	    "# of zero-copy aio_write(2) jobs transmitted");
4936 	SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "tx_aio_octets",
4937 	    CTLFLAG_RD, &ofld_txq->tx_aio_octets,
4938 	    "# of payload octets in transmitted zero-copy aio_write(2) jobs");
4939 	SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "tx_toe_tls_records",
4940 	    CTLFLAG_RD, &ofld_txq->tx_toe_tls_records,
4941 	    "# of TOE TLS records transmitted");
4942 	SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "tx_toe_tls_octets",
4943 	    CTLFLAG_RD, &ofld_txq->tx_toe_tls_octets,
4944 	    "# of payload octets in transmitted TOE TLS records");
4945 }
4946 #endif
4947 
4948 static void
oneseg_dma_callback(void * arg,bus_dma_segment_t * segs,int nseg,int error)4949 oneseg_dma_callback(void *arg, bus_dma_segment_t *segs, int nseg, int error)
4950 {
4951 	bus_addr_t *ba = arg;
4952 
4953 	KASSERT(nseg == 1,
4954 	    ("%s meant for single segment mappings only.", __func__));
4955 
4956 	*ba = error ? 0 : segs->ds_addr;
4957 }
4958 
4959 static inline void
ring_fl_db(struct adapter * sc,struct sge_fl * fl)4960 ring_fl_db(struct adapter *sc, struct sge_fl *fl)
4961 {
4962 	uint32_t n, v;
4963 
4964 	n = IDXDIFF(fl->pidx >> 3, fl->dbidx, fl->sidx);
4965 	MPASS(n > 0);
4966 
4967 	wmb();
4968 	v = fl->dbval | V_PIDX(n);
4969 	if (fl->udb)
4970 		*fl->udb = htole32(v);
4971 	else
4972 		t4_write_reg(sc, sc->sge_kdoorbell_reg, v);
4973 	IDXINCR(fl->dbidx, n, fl->sidx);
4974 }
4975 
4976 /*
4977  * Fills up the freelist by allocating up to 'n' buffers.  Buffers that are
4978  * recycled do not count towards this allocation budget.
4979  *
4980  * Returns non-zero to indicate that this freelist should be added to the list
4981  * of starving freelists.
4982  */
4983 static int
refill_fl(struct adapter * sc,struct sge_fl * fl,int n)4984 refill_fl(struct adapter *sc, struct sge_fl *fl, int n)
4985 {
4986 	__be64 *d;
4987 	struct fl_sdesc *sd;
4988 	uintptr_t pa;
4989 	caddr_t cl;
4990 	struct rx_buf_info *rxb;
4991 	struct cluster_metadata *clm;
4992 	uint16_t max_pidx, zidx = fl->zidx;
4993 	uint16_t hw_cidx = fl->hw_cidx;		/* stable snapshot */
4994 
4995 	FL_LOCK_ASSERT_OWNED(fl);
4996 
4997 	/*
4998 	 * We always stop at the beginning of the hardware descriptor that's just
4999 	 * before the one with the hw cidx.  This is to avoid hw pidx = hw cidx,
5000 	 * which would mean an empty freelist to the chip.
5001 	 */
5002 	max_pidx = __predict_false(hw_cidx == 0) ? fl->sidx - 1 : hw_cidx - 1;
5003 	if (fl->pidx == max_pidx * 8)
5004 		return (0);
5005 
5006 	d = &fl->desc[fl->pidx];
5007 	sd = &fl->sdesc[fl->pidx];
5008 	rxb = &sc->sge.rx_buf_info[zidx];
5009 
5010 	while (n > 0) {
5011 
5012 		if (sd->cl != NULL) {
5013 
5014 			if (sd->nmbuf == 0) {
5015 				/*
5016 				 * Fast recycle without involving any atomics on
5017 				 * the cluster's metadata (if the cluster has
5018 				 * metadata).  This happens when all frames
5019 				 * received in the cluster were small enough to
5020 				 * fit within a single mbuf each.
5021 				 */
5022 				fl->cl_fast_recycled++;
5023 				goto recycled;
5024 			}
5025 
5026 			/*
5027 			 * Cluster is guaranteed to have metadata.  Clusters
5028 			 * without metadata always take the fast recycle path
5029 			 * when they're recycled.
5030 			 */
5031 			clm = cl_metadata(sd);
5032 			MPASS(clm != NULL);
5033 
5034 			if (atomic_fetchadd_int(&clm->refcount, -1) == 1) {
5035 				fl->cl_recycled++;
5036 				counter_u64_add(extfree_rels, 1);
5037 				goto recycled;
5038 			}
5039 			sd->cl = NULL;	/* gave up my reference */
5040 		}
5041 		MPASS(sd->cl == NULL);
5042 		cl = uma_zalloc(rxb->zone, M_NOWAIT);
5043 		if (__predict_false(cl == NULL)) {
5044 			if (zidx != fl->safe_zidx) {
5045 				zidx = fl->safe_zidx;
5046 				rxb = &sc->sge.rx_buf_info[zidx];
5047 				cl = uma_zalloc(rxb->zone, M_NOWAIT);
5048 			}
5049 			if (cl == NULL)
5050 				break;
5051 		}
5052 		fl->cl_allocated++;
5053 		n--;
5054 
5055 		pa = pmap_kextract((vm_offset_t)cl);
5056 		sd->cl = cl;
5057 		sd->zidx = zidx;
5058 
5059 		if (fl->flags & FL_BUF_PACKING) {
5060 			*d = htobe64(pa | rxb->hwidx2);
5061 			sd->moff = rxb->size2;
5062 		} else {
5063 			*d = htobe64(pa | rxb->hwidx1);
5064 			sd->moff = 0;
5065 		}
5066 recycled:
5067 		sd->nmbuf = 0;
5068 		d++;
5069 		sd++;
5070 		if (__predict_false((++fl->pidx & 7) == 0)) {
5071 			uint16_t pidx = fl->pidx >> 3;
5072 
5073 			if (__predict_false(pidx == fl->sidx)) {
5074 				fl->pidx = 0;
5075 				pidx = 0;
5076 				sd = fl->sdesc;
5077 				d = fl->desc;
5078 			}
5079 			if (n < 8 || pidx == max_pidx)
5080 				break;
5081 
5082 			if (IDXDIFF(pidx, fl->dbidx, fl->sidx) >= 4)
5083 				ring_fl_db(sc, fl);
5084 		}
5085 	}
5086 
5087 	if ((fl->pidx >> 3) != fl->dbidx)
5088 		ring_fl_db(sc, fl);
5089 
5090 	return (FL_RUNNING_LOW(fl) && !(fl->flags & FL_STARVING));
5091 }
5092 
5093 /*
5094  * Attempt to refill all starving freelists.
5095  */
5096 static void
refill_sfl(void * arg)5097 refill_sfl(void *arg)
5098 {
5099 	struct adapter *sc = arg;
5100 	struct sge_fl *fl, *fl_temp;
5101 
5102 	mtx_assert(&sc->sfl_lock, MA_OWNED);
5103 	TAILQ_FOREACH_SAFE(fl, &sc->sfl, link, fl_temp) {
5104 		FL_LOCK(fl);
5105 		refill_fl(sc, fl, 64);
5106 		if (FL_NOT_RUNNING_LOW(fl) || fl->flags & FL_DOOMED) {
5107 			TAILQ_REMOVE(&sc->sfl, fl, link);
5108 			fl->flags &= ~FL_STARVING;
5109 		}
5110 		FL_UNLOCK(fl);
5111 	}
5112 
5113 	if (!TAILQ_EMPTY(&sc->sfl))
5114 		callout_schedule(&sc->sfl_callout, hz / 5);
5115 }
5116 
5117 /*
5118  * Release the driver's reference on all buffers in the given freelist.  Buffers
5119  * with kernel references cannot be freed and will prevent the driver from being
5120  * unloaded safely.
5121  */
5122 void
free_fl_buffers(struct adapter * sc,struct sge_fl * fl)5123 free_fl_buffers(struct adapter *sc, struct sge_fl *fl)
5124 {
5125 	struct fl_sdesc *sd;
5126 	struct cluster_metadata *clm;
5127 	int i;
5128 
5129 	sd = fl->sdesc;
5130 	for (i = 0; i < fl->sidx * 8; i++, sd++) {
5131 		if (sd->cl == NULL)
5132 			continue;
5133 
5134 		if (sd->nmbuf == 0)
5135 			uma_zfree(sc->sge.rx_buf_info[sd->zidx].zone, sd->cl);
5136 		else if (fl->flags & FL_BUF_PACKING) {
5137 			clm = cl_metadata(sd);
5138 			if (atomic_fetchadd_int(&clm->refcount, -1) == 1) {
5139 				uma_zfree(sc->sge.rx_buf_info[sd->zidx].zone,
5140 				    sd->cl);
5141 				counter_u64_add(extfree_rels, 1);
5142 			}
5143 		}
5144 		sd->cl = NULL;
5145 	}
5146 
5147 	if (fl->flags & FL_BUF_RESUME) {
5148 		m_freem(fl->m0);
5149 		fl->flags &= ~FL_BUF_RESUME;
5150 	}
5151 }
5152 
5153 static inline void
get_pkt_gl(struct mbuf * m,struct sglist * gl)5154 get_pkt_gl(struct mbuf *m, struct sglist *gl)
5155 {
5156 	int rc;
5157 
5158 	M_ASSERTPKTHDR(m);
5159 
5160 	sglist_reset(gl);
5161 	rc = sglist_append_mbuf(gl, m);
5162 	if (__predict_false(rc != 0)) {
5163 		panic("%s: mbuf %p (%d segs) was vetted earlier but now fails "
5164 		    "with %d.", __func__, m, mbuf_nsegs(m), rc);
5165 	}
5166 
5167 	KASSERT(gl->sg_nseg == mbuf_nsegs(m),
5168 	    ("%s: nsegs changed for mbuf %p from %d to %d", __func__, m,
5169 	    mbuf_nsegs(m), gl->sg_nseg));
5170 #if 0	/* vm_wr not readily available here. */
5171 	KASSERT(gl->sg_nseg > 0 && gl->sg_nseg <= max_nsegs_allowed(m, vm_wr),
5172 	    ("%s: %d segments, should have been 1 <= nsegs <= %d", __func__,
5173 		gl->sg_nseg, max_nsegs_allowed(m, vm_wr)));
5174 #endif
5175 }
5176 
5177 /*
5178  * len16 for a txpkt WR with a GL.  Includes the firmware work request header.
5179  */
5180 static inline u_int
txpkt_len16(u_int nsegs,const u_int extra)5181 txpkt_len16(u_int nsegs, const u_int extra)
5182 {
5183 	u_int n;
5184 
5185 	MPASS(nsegs > 0);
5186 
5187 	nsegs--; /* first segment is part of ulptx_sgl */
5188 	n = extra + sizeof(struct fw_eth_tx_pkt_wr) +
5189 	    sizeof(struct cpl_tx_pkt_core) +
5190 	    sizeof(struct ulptx_sgl) + 8 * ((3 * nsegs) / 2 + (nsegs & 1));
5191 
5192 	return (howmany(n, 16));
5193 }
5194 
5195 /*
5196  * len16 for a txpkt_vm WR with a GL.  Includes the firmware work
5197  * request header.
5198  */
5199 static inline u_int
txpkt_vm_len16(u_int nsegs,const u_int extra)5200 txpkt_vm_len16(u_int nsegs, const u_int extra)
5201 {
5202 	u_int n;
5203 
5204 	MPASS(nsegs > 0);
5205 
5206 	nsegs--; /* first segment is part of ulptx_sgl */
5207 	n = extra + sizeof(struct fw_eth_tx_pkt_vm_wr) +
5208 	    sizeof(struct cpl_tx_pkt_core) +
5209 	    sizeof(struct ulptx_sgl) + 8 * ((3 * nsegs) / 2 + (nsegs & 1));
5210 
5211 	return (howmany(n, 16));
5212 }
5213 
5214 static inline void
calculate_mbuf_len16(struct mbuf * m,bool vm_wr)5215 calculate_mbuf_len16(struct mbuf *m, bool vm_wr)
5216 {
5217 	const int lso = sizeof(struct cpl_tx_pkt_lso_core);
5218 	const int tnl_lso = sizeof(struct cpl_tx_tnl_lso);
5219 
5220 	if (vm_wr) {
5221 		if (needs_tso(m))
5222 			set_mbuf_len16(m, txpkt_vm_len16(mbuf_nsegs(m), lso));
5223 		else
5224 			set_mbuf_len16(m, txpkt_vm_len16(mbuf_nsegs(m), 0));
5225 		return;
5226 	}
5227 
5228 	if (needs_tso(m)) {
5229 		if (needs_vxlan_tso(m))
5230 			set_mbuf_len16(m, txpkt_len16(mbuf_nsegs(m), tnl_lso));
5231 		else
5232 			set_mbuf_len16(m, txpkt_len16(mbuf_nsegs(m), lso));
5233 	} else
5234 		set_mbuf_len16(m, txpkt_len16(mbuf_nsegs(m), 0));
5235 }
5236 
5237 /*
5238  * len16 for a txpkts type 0 WR with a GL.  Does not include the firmware work
5239  * request header.
5240  */
5241 static inline u_int
txpkts0_len16(u_int nsegs)5242 txpkts0_len16(u_int nsegs)
5243 {
5244 	u_int n;
5245 
5246 	MPASS(nsegs > 0);
5247 
5248 	nsegs--; /* first segment is part of ulptx_sgl */
5249 	n = sizeof(struct ulp_txpkt) + sizeof(struct ulptx_idata) +
5250 	    sizeof(struct cpl_tx_pkt_core) + sizeof(struct ulptx_sgl) +
5251 	    8 * ((3 * nsegs) / 2 + (nsegs & 1));
5252 
5253 	return (howmany(n, 16));
5254 }
5255 
5256 /*
5257  * len16 for a txpkts type 1 WR with a GL.  Does not include the firmware work
5258  * request header.
5259  */
5260 static inline u_int
txpkts1_len16(void)5261 txpkts1_len16(void)
5262 {
5263 	u_int n;
5264 
5265 	n = sizeof(struct cpl_tx_pkt_core) + sizeof(struct ulptx_sgl);
5266 
5267 	return (howmany(n, 16));
5268 }
5269 
5270 static inline u_int
imm_payload(u_int ndesc)5271 imm_payload(u_int ndesc)
5272 {
5273 	u_int n;
5274 
5275 	n = ndesc * EQ_ESIZE - sizeof(struct fw_eth_tx_pkt_wr) -
5276 	    sizeof(struct cpl_tx_pkt_core);
5277 
5278 	return (n);
5279 }
5280 
5281 static inline uint64_t
csum_to_ctrl(struct adapter * sc,struct mbuf * m)5282 csum_to_ctrl(struct adapter *sc, struct mbuf *m)
5283 {
5284 	uint64_t ctrl;
5285 	int csum_type, l2hlen, l3hlen;
5286 	int x, y;
5287 	static const int csum_types[3][2] = {
5288 		{TX_CSUM_TCPIP, TX_CSUM_TCPIP6},
5289 		{TX_CSUM_UDPIP, TX_CSUM_UDPIP6},
5290 		{TX_CSUM_IP, 0}
5291 	};
5292 
5293 	M_ASSERTPKTHDR(m);
5294 
5295 	if (!needs_hwcsum(m))
5296 		return (F_TXPKT_IPCSUM_DIS | F_TXPKT_L4CSUM_DIS);
5297 
5298 	MPASS(m->m_pkthdr.l2hlen >= ETHER_HDR_LEN);
5299 	MPASS(m->m_pkthdr.l3hlen >= sizeof(struct ip));
5300 
5301 	if (needs_vxlan_csum(m)) {
5302 		MPASS(m->m_pkthdr.l4hlen > 0);
5303 		MPASS(m->m_pkthdr.l5hlen > 0);
5304 		MPASS(m->m_pkthdr.inner_l2hlen >= ETHER_HDR_LEN);
5305 		MPASS(m->m_pkthdr.inner_l3hlen >= sizeof(struct ip));
5306 
5307 		l2hlen = m->m_pkthdr.l2hlen + m->m_pkthdr.l3hlen +
5308 		    m->m_pkthdr.l4hlen + m->m_pkthdr.l5hlen +
5309 		    m->m_pkthdr.inner_l2hlen - ETHER_HDR_LEN;
5310 		l3hlen = m->m_pkthdr.inner_l3hlen;
5311 	} else {
5312 		l2hlen = m->m_pkthdr.l2hlen - ETHER_HDR_LEN;
5313 		l3hlen = m->m_pkthdr.l3hlen;
5314 	}
5315 
5316 	ctrl = 0;
5317 	if (!needs_l3_csum(m))
5318 		ctrl |= F_TXPKT_IPCSUM_DIS;
5319 
5320 	if (m->m_pkthdr.csum_flags & (CSUM_IP_TCP | CSUM_INNER_IP_TCP |
5321 	    CSUM_IP6_TCP | CSUM_INNER_IP6_TCP))
5322 		x = 0;	/* TCP */
5323 	else if (m->m_pkthdr.csum_flags & (CSUM_IP_UDP | CSUM_INNER_IP_UDP |
5324 	    CSUM_IP6_UDP | CSUM_INNER_IP6_UDP))
5325 		x = 1;	/* UDP */
5326 	else
5327 		x = 2;
5328 
5329 	if (m->m_pkthdr.csum_flags & (CSUM_IP | CSUM_IP_TCP | CSUM_IP_UDP |
5330 	    CSUM_INNER_IP | CSUM_INNER_IP_TCP | CSUM_INNER_IP_UDP))
5331 		y = 0;	/* IPv4 */
5332 	else {
5333 		MPASS(m->m_pkthdr.csum_flags & (CSUM_IP6_TCP | CSUM_IP6_UDP |
5334 		    CSUM_INNER_IP6_TCP | CSUM_INNER_IP6_UDP));
5335 		y = 1;	/* IPv6 */
5336 	}
5337 	/*
5338 	 * needs_hwcsum returned true earlier so there must be some kind of
5339 	 * checksum to calculate.
5340 	 */
5341 	csum_type = csum_types[x][y];
5342 	MPASS(csum_type != 0);
5343 	if (csum_type == TX_CSUM_IP)
5344 		ctrl |= F_TXPKT_L4CSUM_DIS;
5345 	ctrl |= V_TXPKT_CSUM_TYPE(csum_type) | V_TXPKT_IPHDR_LEN(l3hlen);
5346 	if (chip_id(sc) <= CHELSIO_T5)
5347 		ctrl |= V_TXPKT_ETHHDR_LEN(l2hlen);
5348 	else
5349 		ctrl |= V_T6_TXPKT_ETHHDR_LEN(l2hlen);
5350 
5351 	return (ctrl);
5352 }
5353 
5354 static inline void *
write_lso_cpl(void * cpl,struct mbuf * m0)5355 write_lso_cpl(void *cpl, struct mbuf *m0)
5356 {
5357 	struct cpl_tx_pkt_lso_core *lso;
5358 	uint32_t ctrl;
5359 
5360 	KASSERT(m0->m_pkthdr.l2hlen > 0 && m0->m_pkthdr.l3hlen > 0 &&
5361 	    m0->m_pkthdr.l4hlen > 0,
5362 	    ("%s: mbuf %p needs TSO but missing header lengths",
5363 		__func__, m0));
5364 
5365 	ctrl = V_LSO_OPCODE(CPL_TX_PKT_LSO) |
5366 	    F_LSO_FIRST_SLICE | F_LSO_LAST_SLICE |
5367 	    V_LSO_ETHHDR_LEN((m0->m_pkthdr.l2hlen - ETHER_HDR_LEN) >> 2) |
5368 	    V_LSO_IPHDR_LEN(m0->m_pkthdr.l3hlen >> 2) |
5369 	    V_LSO_TCPHDR_LEN(m0->m_pkthdr.l4hlen >> 2);
5370 	if (m0->m_pkthdr.l3hlen == sizeof(struct ip6_hdr))
5371 		ctrl |= F_LSO_IPV6;
5372 
5373 	lso = cpl;
5374 	lso->lso_ctrl = htobe32(ctrl);
5375 	lso->ipid_ofst = htobe16(0);
5376 	lso->mss = htobe16(m0->m_pkthdr.tso_segsz);
5377 	lso->seqno_offset = htobe32(0);
5378 	lso->len = htobe32(m0->m_pkthdr.len);
5379 
5380 	return (lso + 1);
5381 }
5382 
5383 static void *
write_tnl_lso_cpl(void * cpl,struct mbuf * m0)5384 write_tnl_lso_cpl(void *cpl, struct mbuf *m0)
5385 {
5386 	struct cpl_tx_tnl_lso *tnl_lso = cpl;
5387 	uint32_t ctrl;
5388 
5389 	KASSERT(m0->m_pkthdr.inner_l2hlen > 0 &&
5390 	    m0->m_pkthdr.inner_l3hlen > 0 && m0->m_pkthdr.inner_l4hlen > 0 &&
5391 	    m0->m_pkthdr.inner_l5hlen > 0,
5392 	    ("%s: mbuf %p needs VXLAN_TSO but missing inner header lengths",
5393 		__func__, m0));
5394 	KASSERT(m0->m_pkthdr.l2hlen > 0 && m0->m_pkthdr.l3hlen > 0 &&
5395 	    m0->m_pkthdr.l4hlen > 0 && m0->m_pkthdr.l5hlen > 0,
5396 	    ("%s: mbuf %p needs VXLAN_TSO but missing outer header lengths",
5397 		__func__, m0));
5398 
5399 	/* Outer headers. */
5400 	ctrl = V_CPL_TX_TNL_LSO_OPCODE(CPL_TX_TNL_LSO) |
5401 	    F_CPL_TX_TNL_LSO_FIRST | F_CPL_TX_TNL_LSO_LAST |
5402 	    V_CPL_TX_TNL_LSO_ETHHDRLENOUT(
5403 		(m0->m_pkthdr.l2hlen - ETHER_HDR_LEN) >> 2) |
5404 	    V_CPL_TX_TNL_LSO_IPHDRLENOUT(m0->m_pkthdr.l3hlen >> 2) |
5405 	    F_CPL_TX_TNL_LSO_IPLENSETOUT;
5406 	if (m0->m_pkthdr.l3hlen == sizeof(struct ip6_hdr))
5407 		ctrl |= F_CPL_TX_TNL_LSO_IPV6OUT;
5408 	else {
5409 		ctrl |= F_CPL_TX_TNL_LSO_IPHDRCHKOUT |
5410 		    F_CPL_TX_TNL_LSO_IPIDINCOUT;
5411 	}
5412 	tnl_lso->op_to_IpIdSplitOut = htobe32(ctrl);
5413 	tnl_lso->IpIdOffsetOut = 0;
5414 	tnl_lso->UdpLenSetOut_to_TnlHdrLen =
5415 		htobe16(F_CPL_TX_TNL_LSO_UDPCHKCLROUT |
5416 		    F_CPL_TX_TNL_LSO_UDPLENSETOUT |
5417 		    V_CPL_TX_TNL_LSO_TNLHDRLEN(m0->m_pkthdr.l2hlen +
5418 			m0->m_pkthdr.l3hlen + m0->m_pkthdr.l4hlen +
5419 			m0->m_pkthdr.l5hlen) |
5420 		    V_CPL_TX_TNL_LSO_TNLTYPE(TX_TNL_TYPE_VXLAN));
5421 	tnl_lso->r1 = 0;
5422 
5423 	/* Inner headers. */
5424 	ctrl = V_CPL_TX_TNL_LSO_ETHHDRLEN(
5425 	    (m0->m_pkthdr.inner_l2hlen - ETHER_HDR_LEN) >> 2) |
5426 	    V_CPL_TX_TNL_LSO_IPHDRLEN(m0->m_pkthdr.inner_l3hlen >> 2) |
5427 	    V_CPL_TX_TNL_LSO_TCPHDRLEN(m0->m_pkthdr.inner_l4hlen >> 2);
5428 	if (m0->m_pkthdr.inner_l3hlen == sizeof(struct ip6_hdr))
5429 		ctrl |= F_CPL_TX_TNL_LSO_IPV6;
5430 	tnl_lso->Flow_to_TcpHdrLen = htobe32(ctrl);
5431 	tnl_lso->IpIdOffset = 0;
5432 	tnl_lso->IpIdSplit_to_Mss =
5433 	    htobe16(V_CPL_TX_TNL_LSO_MSS(m0->m_pkthdr.tso_segsz));
5434 	tnl_lso->TCPSeqOffset = 0;
5435 	tnl_lso->EthLenOffset_Size =
5436 	    htobe32(V_CPL_TX_TNL_LSO_SIZE(m0->m_pkthdr.len));
5437 
5438 	return (tnl_lso + 1);
5439 }
5440 
5441 #define VM_TX_L2HDR_LEN	16	/* ethmacdst to vlantci */
5442 
5443 /*
5444  * Write a VM txpkt WR for this packet to the hardware descriptors, update the
5445  * software descriptor, and advance the pidx.  It is guaranteed that enough
5446  * descriptors are available.
5447  *
5448  * The return value is the # of hardware descriptors used.
5449  */
5450 static u_int
write_txpkt_vm_wr(struct adapter * sc,struct sge_txq * txq,struct mbuf * m0)5451 write_txpkt_vm_wr(struct adapter *sc, struct sge_txq *txq, struct mbuf *m0)
5452 {
5453 	struct sge_eq *eq;
5454 	struct fw_eth_tx_pkt_vm_wr *wr;
5455 	struct tx_sdesc *txsd;
5456 	struct cpl_tx_pkt_core *cpl;
5457 	uint32_t ctrl;	/* used in many unrelated places */
5458 	uint64_t ctrl1;
5459 	int len16, ndesc, pktlen;
5460 	caddr_t dst;
5461 
5462 	TXQ_LOCK_ASSERT_OWNED(txq);
5463 	M_ASSERTPKTHDR(m0);
5464 
5465 	len16 = mbuf_len16(m0);
5466 	pktlen = m0->m_pkthdr.len;
5467 	ctrl = sizeof(struct cpl_tx_pkt_core);
5468 	if (needs_tso(m0))
5469 		ctrl += sizeof(struct cpl_tx_pkt_lso_core);
5470 	ndesc = tx_len16_to_desc(len16);
5471 
5472 	/* Firmware work request header */
5473 	eq = &txq->eq;
5474 	wr = (void *)&eq->desc[eq->pidx];
5475 	wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_PKT_VM_WR) |
5476 	    V_FW_ETH_TX_PKT_WR_IMMDLEN(ctrl));
5477 
5478 	ctrl = V_FW_WR_LEN16(len16);
5479 	wr->equiq_to_len16 = htobe32(ctrl);
5480 	wr->r3[0] = 0;
5481 	wr->r3[1] = 0;
5482 
5483 	/*
5484 	 * Copy over ethmacdst, ethmacsrc, ethtype, and vlantci.
5485 	 * vlantci is ignored unless the ethtype is 0x8100, so it's
5486 	 * simpler to always copy it rather than making it
5487 	 * conditional.  Also, it seems that we do not have to set
5488 	 * vlantci or fake the ethtype when doing VLAN tag insertion.
5489 	 */
5490 	m_copydata(m0, 0, VM_TX_L2HDR_LEN, wr->ethmacdst);
5491 
5492 	if (needs_tso(m0)) {
5493 		cpl = write_lso_cpl(wr + 1, m0);
5494 		txq->tso_wrs++;
5495 	} else
5496 		cpl = (void *)(wr + 1);
5497 
5498 	/* Checksum offload */
5499 	ctrl1 = csum_to_ctrl(sc, m0);
5500 	if (ctrl1 != (F_TXPKT_IPCSUM_DIS | F_TXPKT_L4CSUM_DIS))
5501 		txq->txcsum++;	/* some hardware assistance provided */
5502 
5503 	/* VLAN tag insertion */
5504 	if (needs_vlan_insertion(m0)) {
5505 		ctrl1 |= F_TXPKT_VLAN_VLD |
5506 		    V_TXPKT_VLAN(m0->m_pkthdr.ether_vtag);
5507 		txq->vlan_insertion++;
5508 	} else if (sc->vlan_id)
5509 		ctrl1 |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(sc->vlan_id);
5510 
5511 	/* CPL header */
5512 	cpl->ctrl0 = txq->cpl_ctrl0;
5513 	cpl->pack = 0;
5514 	cpl->len = htobe16(pktlen);
5515 	cpl->ctrl1 = htobe64(ctrl1);
5516 
5517 	/* SGL */
5518 	dst = (void *)(cpl + 1);
5519 
5520 	/*
5521 	 * A packet using TSO will use up an entire descriptor for the
5522 	 * firmware work request header, LSO CPL, and TX_PKT_XT CPL.
5523 	 * If this descriptor is the last descriptor in the ring, wrap
5524 	 * around to the front of the ring explicitly for the start of
5525 	 * the sgl.
5526 	 */
5527 	if (dst == (void *)&eq->desc[eq->sidx]) {
5528 		dst = (void *)&eq->desc[0];
5529 		write_gl_to_txd(txq, m0, &dst, 0);
5530 	} else
5531 		write_gl_to_txd(txq, m0, &dst, eq->sidx - ndesc < eq->pidx);
5532 	txq->sgl_wrs++;
5533 	txq->txpkt_wrs++;
5534 
5535 	txsd = &txq->sdesc[eq->pidx];
5536 	txsd->m = m0;
5537 	txsd->desc_used = ndesc;
5538 
5539 	return (ndesc);
5540 }
5541 
5542 /*
5543  * Write a raw WR to the hardware descriptors, update the software
5544  * descriptor, and advance the pidx.  It is guaranteed that enough
5545  * descriptors are available.
5546  *
5547  * The return value is the # of hardware descriptors used.
5548  */
5549 static u_int
write_raw_wr(struct sge_txq * txq,void * wr,struct mbuf * m0,u_int available)5550 write_raw_wr(struct sge_txq *txq, void *wr, struct mbuf *m0, u_int available)
5551 {
5552 	struct sge_eq *eq = &txq->eq;
5553 	struct tx_sdesc *txsd;
5554 	struct mbuf *m;
5555 	caddr_t dst;
5556 	int len16, ndesc;
5557 
5558 	len16 = mbuf_len16(m0);
5559 	ndesc = tx_len16_to_desc(len16);
5560 	MPASS(ndesc <= available);
5561 
5562 	dst = wr;
5563 	for (m = m0; m != NULL; m = m->m_next)
5564 		copy_to_txd(eq, mtod(m, caddr_t), &dst, m->m_len);
5565 
5566 	txq->raw_wrs++;
5567 
5568 	txsd = &txq->sdesc[eq->pidx];
5569 	txsd->m = m0;
5570 	txsd->desc_used = ndesc;
5571 
5572 	return (ndesc);
5573 }
5574 
5575 /*
5576  * Write a txpkt WR for this packet to the hardware descriptors, update the
5577  * software descriptor, and advance the pidx.  It is guaranteed that enough
5578  * descriptors are available.
5579  *
5580  * The return value is the # of hardware descriptors used.
5581  */
5582 static u_int
write_txpkt_wr(struct adapter * sc,struct sge_txq * txq,struct mbuf * m0,u_int available)5583 write_txpkt_wr(struct adapter *sc, struct sge_txq *txq, struct mbuf *m0,
5584     u_int available)
5585 {
5586 	struct sge_eq *eq;
5587 	struct fw_eth_tx_pkt_wr *wr;
5588 	struct tx_sdesc *txsd;
5589 	struct cpl_tx_pkt_core *cpl;
5590 	uint32_t ctrl;	/* used in many unrelated places */
5591 	uint64_t ctrl1;
5592 	int len16, ndesc, pktlen, nsegs;
5593 	caddr_t dst;
5594 
5595 	TXQ_LOCK_ASSERT_OWNED(txq);
5596 	M_ASSERTPKTHDR(m0);
5597 
5598 	len16 = mbuf_len16(m0);
5599 	nsegs = mbuf_nsegs(m0);
5600 	pktlen = m0->m_pkthdr.len;
5601 	ctrl = sizeof(struct cpl_tx_pkt_core);
5602 	if (needs_tso(m0)) {
5603 		if (needs_vxlan_tso(m0))
5604 			ctrl += sizeof(struct cpl_tx_tnl_lso);
5605 		else
5606 			ctrl += sizeof(struct cpl_tx_pkt_lso_core);
5607 	} else if (!(mbuf_cflags(m0) & MC_NOMAP) && pktlen <= imm_payload(2) &&
5608 	    available >= 2) {
5609 		/* Immediate data.  Recalculate len16 and set nsegs to 0. */
5610 		ctrl += pktlen;
5611 		len16 = howmany(sizeof(struct fw_eth_tx_pkt_wr) +
5612 		    sizeof(struct cpl_tx_pkt_core) + pktlen, 16);
5613 		nsegs = 0;
5614 	}
5615 	ndesc = tx_len16_to_desc(len16);
5616 	MPASS(ndesc <= available);
5617 
5618 	/* Firmware work request header */
5619 	eq = &txq->eq;
5620 	wr = (void *)&eq->desc[eq->pidx];
5621 	wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_PKT_WR) |
5622 	    V_FW_ETH_TX_PKT_WR_IMMDLEN(ctrl));
5623 
5624 	ctrl = V_FW_WR_LEN16(len16);
5625 	wr->equiq_to_len16 = htobe32(ctrl);
5626 	wr->r3 = 0;
5627 
5628 	if (needs_tso(m0)) {
5629 		if (needs_vxlan_tso(m0)) {
5630 			cpl = write_tnl_lso_cpl(wr + 1, m0);
5631 			txq->vxlan_tso_wrs++;
5632 		} else {
5633 			cpl = write_lso_cpl(wr + 1, m0);
5634 			txq->tso_wrs++;
5635 		}
5636 	} else
5637 		cpl = (void *)(wr + 1);
5638 
5639 	/* Checksum offload */
5640 	ctrl1 = csum_to_ctrl(sc, m0);
5641 	if (ctrl1 != (F_TXPKT_IPCSUM_DIS | F_TXPKT_L4CSUM_DIS)) {
5642 		/* some hardware assistance provided */
5643 		if (needs_vxlan_csum(m0))
5644 			txq->vxlan_txcsum++;
5645 		else
5646 			txq->txcsum++;
5647 	}
5648 
5649 	/* VLAN tag insertion */
5650 	if (needs_vlan_insertion(m0)) {
5651 		ctrl1 |= F_TXPKT_VLAN_VLD |
5652 		    V_TXPKT_VLAN(m0->m_pkthdr.ether_vtag);
5653 		txq->vlan_insertion++;
5654 	}
5655 
5656 	/* CPL header */
5657 	cpl->ctrl0 = txq->cpl_ctrl0;
5658 	cpl->pack = 0;
5659 	cpl->len = htobe16(pktlen);
5660 	cpl->ctrl1 = htobe64(ctrl1);
5661 
5662 	/* SGL */
5663 	dst = (void *)(cpl + 1);
5664 	if (__predict_false((uintptr_t)dst == (uintptr_t)&eq->desc[eq->sidx]))
5665 		dst = (caddr_t)&eq->desc[0];
5666 	if (nsegs > 0) {
5667 
5668 		write_gl_to_txd(txq, m0, &dst, eq->sidx - ndesc < eq->pidx);
5669 		txq->sgl_wrs++;
5670 	} else {
5671 		struct mbuf *m;
5672 
5673 		for (m = m0; m != NULL; m = m->m_next) {
5674 			copy_to_txd(eq, mtod(m, caddr_t), &dst, m->m_len);
5675 #ifdef INVARIANTS
5676 			pktlen -= m->m_len;
5677 #endif
5678 		}
5679 #ifdef INVARIANTS
5680 		KASSERT(pktlen == 0, ("%s: %d bytes left.", __func__, pktlen));
5681 #endif
5682 		txq->imm_wrs++;
5683 	}
5684 
5685 	txq->txpkt_wrs++;
5686 
5687 	txsd = &txq->sdesc[eq->pidx];
5688 	txsd->m = m0;
5689 	txsd->desc_used = ndesc;
5690 
5691 	return (ndesc);
5692 }
5693 
5694 static inline bool
cmp_l2hdr(struct txpkts * txp,struct mbuf * m)5695 cmp_l2hdr(struct txpkts *txp, struct mbuf *m)
5696 {
5697 	int len;
5698 
5699 	MPASS(txp->npkt > 0);
5700 	MPASS(m->m_len >= VM_TX_L2HDR_LEN);
5701 
5702 	if (txp->ethtype == be16toh(ETHERTYPE_VLAN))
5703 		len = VM_TX_L2HDR_LEN;
5704 	else
5705 		len = sizeof(struct ether_header);
5706 
5707 	return (memcmp(m->m_data, &txp->ethmacdst[0], len) != 0);
5708 }
5709 
5710 static inline void
save_l2hdr(struct txpkts * txp,struct mbuf * m)5711 save_l2hdr(struct txpkts *txp, struct mbuf *m)
5712 {
5713 	MPASS(m->m_len >= VM_TX_L2HDR_LEN);
5714 
5715 	memcpy(&txp->ethmacdst[0], mtod(m, const void *), VM_TX_L2HDR_LEN);
5716 }
5717 
5718 static int
add_to_txpkts_vf(struct adapter * sc,struct sge_txq * txq,struct mbuf * m,int avail,bool * send)5719 add_to_txpkts_vf(struct adapter *sc, struct sge_txq *txq, struct mbuf *m,
5720     int avail, bool *send)
5721 {
5722 	struct txpkts *txp = &txq->txp;
5723 
5724 	/* Cannot have TSO and coalesce at the same time. */
5725 	if (cannot_use_txpkts(m)) {
5726 cannot_coalesce:
5727 		*send = txp->npkt > 0;
5728 		return (EINVAL);
5729 	}
5730 
5731 	/* VF allows coalescing of type 1 (1 GL) only */
5732 	if (mbuf_nsegs(m) > 1)
5733 		goto cannot_coalesce;
5734 
5735 	*send = false;
5736 	if (txp->npkt > 0) {
5737 		MPASS(tx_len16_to_desc(txp->len16) <= avail);
5738 		MPASS(txp->npkt < txp->max_npkt);
5739 		MPASS(txp->wr_type == 1);	/* VF supports type 1 only */
5740 
5741 		if (tx_len16_to_desc(txp->len16 + txpkts1_len16()) > avail) {
5742 retry_after_send:
5743 			*send = true;
5744 			return (EAGAIN);
5745 		}
5746 		if (m->m_pkthdr.len + txp->plen > 65535)
5747 			goto retry_after_send;
5748 		if (cmp_l2hdr(txp, m))
5749 			goto retry_after_send;
5750 
5751 		txp->len16 += txpkts1_len16();
5752 		txp->plen += m->m_pkthdr.len;
5753 		txp->mb[txp->npkt++] = m;
5754 		if (txp->npkt == txp->max_npkt)
5755 			*send = true;
5756 	} else {
5757 		txp->len16 = howmany(sizeof(struct fw_eth_tx_pkts_vm_wr), 16) +
5758 		    txpkts1_len16();
5759 		if (tx_len16_to_desc(txp->len16) > avail)
5760 			goto cannot_coalesce;
5761 		txp->npkt = 1;
5762 		txp->wr_type = 1;
5763 		txp->plen = m->m_pkthdr.len;
5764 		txp->mb[0] = m;
5765 		save_l2hdr(txp, m);
5766 	}
5767 	return (0);
5768 }
5769 
5770 static int
add_to_txpkts_pf(struct adapter * sc,struct sge_txq * txq,struct mbuf * m,int avail,bool * send)5771 add_to_txpkts_pf(struct adapter *sc, struct sge_txq *txq, struct mbuf *m,
5772     int avail, bool *send)
5773 {
5774 	struct txpkts *txp = &txq->txp;
5775 	int nsegs;
5776 
5777 	MPASS(!(sc->flags & IS_VF));
5778 
5779 	/* Cannot have TSO and coalesce at the same time. */
5780 	if (cannot_use_txpkts(m)) {
5781 cannot_coalesce:
5782 		*send = txp->npkt > 0;
5783 		return (EINVAL);
5784 	}
5785 
5786 	*send = false;
5787 	nsegs = mbuf_nsegs(m);
5788 	if (txp->npkt == 0) {
5789 		if (m->m_pkthdr.len > 65535)
5790 			goto cannot_coalesce;
5791 		if (nsegs > 1) {
5792 			txp->wr_type = 0;
5793 			txp->len16 =
5794 			    howmany(sizeof(struct fw_eth_tx_pkts_wr), 16) +
5795 			    txpkts0_len16(nsegs);
5796 		} else {
5797 			txp->wr_type = 1;
5798 			txp->len16 =
5799 			    howmany(sizeof(struct fw_eth_tx_pkts_wr), 16) +
5800 			    txpkts1_len16();
5801 		}
5802 		if (tx_len16_to_desc(txp->len16) > avail)
5803 			goto cannot_coalesce;
5804 		txp->npkt = 1;
5805 		txp->plen = m->m_pkthdr.len;
5806 		txp->mb[0] = m;
5807 	} else {
5808 		MPASS(tx_len16_to_desc(txp->len16) <= avail);
5809 		MPASS(txp->npkt < txp->max_npkt);
5810 
5811 		if (m->m_pkthdr.len + txp->plen > 65535) {
5812 retry_after_send:
5813 			*send = true;
5814 			return (EAGAIN);
5815 		}
5816 
5817 		MPASS(txp->wr_type == 0 || txp->wr_type == 1);
5818 		if (txp->wr_type == 0) {
5819 			if (tx_len16_to_desc(txp->len16 +
5820 			    txpkts0_len16(nsegs)) > min(avail, SGE_MAX_WR_NDESC))
5821 				goto retry_after_send;
5822 			txp->len16 += txpkts0_len16(nsegs);
5823 		} else {
5824 			if (nsegs != 1)
5825 				goto retry_after_send;
5826 			if (tx_len16_to_desc(txp->len16 + txpkts1_len16()) >
5827 			    avail)
5828 				goto retry_after_send;
5829 			txp->len16 += txpkts1_len16();
5830 		}
5831 
5832 		txp->plen += m->m_pkthdr.len;
5833 		txp->mb[txp->npkt++] = m;
5834 		if (txp->npkt == txp->max_npkt)
5835 			*send = true;
5836 	}
5837 	return (0);
5838 }
5839 
5840 /*
5841  * Write a txpkts WR for the packets in txp to the hardware descriptors, update
5842  * the software descriptor, and advance the pidx.  It is guaranteed that enough
5843  * descriptors are available.
5844  *
5845  * The return value is the # of hardware descriptors used.
5846  */
5847 static u_int
write_txpkts_wr(struct adapter * sc,struct sge_txq * txq)5848 write_txpkts_wr(struct adapter *sc, struct sge_txq *txq)
5849 {
5850 	const struct txpkts *txp = &txq->txp;
5851 	struct sge_eq *eq = &txq->eq;
5852 	struct fw_eth_tx_pkts_wr *wr;
5853 	struct tx_sdesc *txsd;
5854 	struct cpl_tx_pkt_core *cpl;
5855 	uint64_t ctrl1;
5856 	int ndesc, i, checkwrap;
5857 	struct mbuf *m, *last;
5858 	void *flitp;
5859 
5860 	TXQ_LOCK_ASSERT_OWNED(txq);
5861 	MPASS(txp->npkt > 0);
5862 	MPASS(txp->len16 <= howmany(SGE_MAX_WR_LEN, 16));
5863 
5864 	wr = (void *)&eq->desc[eq->pidx];
5865 	wr->op_pkd = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_WR));
5866 	wr->equiq_to_len16 = htobe32(V_FW_WR_LEN16(txp->len16));
5867 	wr->plen = htobe16(txp->plen);
5868 	wr->npkt = txp->npkt;
5869 	wr->r3 = 0;
5870 	wr->type = txp->wr_type;
5871 	flitp = wr + 1;
5872 
5873 	/*
5874 	 * At this point we are 16B into a hardware descriptor.  If checkwrap is
5875 	 * set then we know the WR is going to wrap around somewhere.  We'll
5876 	 * check for that at appropriate points.
5877 	 */
5878 	ndesc = tx_len16_to_desc(txp->len16);
5879 	last = NULL;
5880 	checkwrap = eq->sidx - ndesc < eq->pidx;
5881 	for (i = 0; i < txp->npkt; i++) {
5882 		m = txp->mb[i];
5883 		if (txp->wr_type == 0) {
5884 			struct ulp_txpkt *ulpmc;
5885 			struct ulptx_idata *ulpsc;
5886 
5887 			/* ULP master command */
5888 			ulpmc = flitp;
5889 			ulpmc->cmd_dest = htobe32(V_ULPTX_CMD(ULP_TX_PKT) |
5890 			    V_ULP_TXPKT_DEST(0) | V_ULP_TXPKT_FID(eq->iqid));
5891 			ulpmc->len = htobe32(txpkts0_len16(mbuf_nsegs(m)));
5892 
5893 			/* ULP subcommand */
5894 			ulpsc = (void *)(ulpmc + 1);
5895 			ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM) |
5896 			    F_ULP_TX_SC_MORE);
5897 			ulpsc->len = htobe32(sizeof(struct cpl_tx_pkt_core));
5898 
5899 			cpl = (void *)(ulpsc + 1);
5900 			if (checkwrap &&
5901 			    (uintptr_t)cpl == (uintptr_t)&eq->desc[eq->sidx])
5902 				cpl = (void *)&eq->desc[0];
5903 		} else {
5904 			cpl = flitp;
5905 		}
5906 
5907 		/* Checksum offload */
5908 		ctrl1 = csum_to_ctrl(sc, m);
5909 		if (ctrl1 != (F_TXPKT_IPCSUM_DIS | F_TXPKT_L4CSUM_DIS)) {
5910 			/* some hardware assistance provided */
5911 			if (needs_vxlan_csum(m))
5912 				txq->vxlan_txcsum++;
5913 			else
5914 				txq->txcsum++;
5915 		}
5916 
5917 		/* VLAN tag insertion */
5918 		if (needs_vlan_insertion(m)) {
5919 			ctrl1 |= F_TXPKT_VLAN_VLD |
5920 			    V_TXPKT_VLAN(m->m_pkthdr.ether_vtag);
5921 			txq->vlan_insertion++;
5922 		}
5923 
5924 		/* CPL header */
5925 		cpl->ctrl0 = txq->cpl_ctrl0;
5926 		cpl->pack = 0;
5927 		cpl->len = htobe16(m->m_pkthdr.len);
5928 		cpl->ctrl1 = htobe64(ctrl1);
5929 
5930 		flitp = cpl + 1;
5931 		if (checkwrap &&
5932 		    (uintptr_t)flitp == (uintptr_t)&eq->desc[eq->sidx])
5933 			flitp = (void *)&eq->desc[0];
5934 
5935 		write_gl_to_txd(txq, m, (caddr_t *)(&flitp), checkwrap);
5936 
5937 		if (last != NULL)
5938 			last->m_nextpkt = m;
5939 		last = m;
5940 	}
5941 
5942 	txq->sgl_wrs++;
5943 	if (txp->wr_type == 0) {
5944 		txq->txpkts0_pkts += txp->npkt;
5945 		txq->txpkts0_wrs++;
5946 	} else {
5947 		txq->txpkts1_pkts += txp->npkt;
5948 		txq->txpkts1_wrs++;
5949 	}
5950 
5951 	txsd = &txq->sdesc[eq->pidx];
5952 	txsd->m = txp->mb[0];
5953 	txsd->desc_used = ndesc;
5954 
5955 	return (ndesc);
5956 }
5957 
5958 static u_int
write_txpkts_vm_wr(struct adapter * sc,struct sge_txq * txq)5959 write_txpkts_vm_wr(struct adapter *sc, struct sge_txq *txq)
5960 {
5961 	const struct txpkts *txp = &txq->txp;
5962 	struct sge_eq *eq = &txq->eq;
5963 	struct fw_eth_tx_pkts_vm_wr *wr;
5964 	struct tx_sdesc *txsd;
5965 	struct cpl_tx_pkt_core *cpl;
5966 	uint64_t ctrl1;
5967 	int ndesc, i;
5968 	struct mbuf *m, *last;
5969 	void *flitp;
5970 
5971 	TXQ_LOCK_ASSERT_OWNED(txq);
5972 	MPASS(txp->npkt > 0);
5973 	MPASS(txp->wr_type == 1);	/* VF supports type 1 only */
5974 	MPASS(txp->mb[0] != NULL);
5975 	MPASS(txp->len16 <= howmany(SGE_MAX_WR_LEN, 16));
5976 
5977 	wr = (void *)&eq->desc[eq->pidx];
5978 	wr->op_pkd = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_VM_WR));
5979 	wr->equiq_to_len16 = htobe32(V_FW_WR_LEN16(txp->len16));
5980 	wr->r3 = 0;
5981 	wr->plen = htobe16(txp->plen);
5982 	wr->npkt = txp->npkt;
5983 	wr->r4 = 0;
5984 	memcpy(&wr->ethmacdst[0], &txp->ethmacdst[0], 16);
5985 	flitp = wr + 1;
5986 
5987 	/*
5988 	 * At this point we are 32B into a hardware descriptor.  Each mbuf in
5989 	 * the WR will take 32B so we check for the end of the descriptor ring
5990 	 * before writing odd mbufs (mb[1], 3, 5, ..)
5991 	 */
5992 	ndesc = tx_len16_to_desc(txp->len16);
5993 	last = NULL;
5994 	for (i = 0; i < txp->npkt; i++) {
5995 		m = txp->mb[i];
5996 		if (i & 1 && (uintptr_t)flitp == (uintptr_t)&eq->desc[eq->sidx])
5997 			flitp = &eq->desc[0];
5998 		cpl = flitp;
5999 
6000 		/* Checksum offload */
6001 		ctrl1 = csum_to_ctrl(sc, m);
6002 		if (ctrl1 != (F_TXPKT_IPCSUM_DIS | F_TXPKT_L4CSUM_DIS))
6003 			txq->txcsum++;	/* some hardware assistance provided */
6004 
6005 		/* VLAN tag insertion */
6006 		if (needs_vlan_insertion(m)) {
6007 			ctrl1 |= F_TXPKT_VLAN_VLD |
6008 			    V_TXPKT_VLAN(m->m_pkthdr.ether_vtag);
6009 			txq->vlan_insertion++;
6010 		} else if (sc->vlan_id)
6011 			ctrl1 |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(sc->vlan_id);
6012 
6013 		/* CPL header */
6014 		cpl->ctrl0 = txq->cpl_ctrl0;
6015 		cpl->pack = 0;
6016 		cpl->len = htobe16(m->m_pkthdr.len);
6017 		cpl->ctrl1 = htobe64(ctrl1);
6018 
6019 		flitp = cpl + 1;
6020 		MPASS(mbuf_nsegs(m) == 1);
6021 		write_gl_to_txd(txq, m, (caddr_t *)(&flitp), 0);
6022 
6023 		if (last != NULL)
6024 			last->m_nextpkt = m;
6025 		last = m;
6026 	}
6027 
6028 	txq->sgl_wrs++;
6029 	txq->txpkts1_pkts += txp->npkt;
6030 	txq->txpkts1_wrs++;
6031 
6032 	txsd = &txq->sdesc[eq->pidx];
6033 	txsd->m = txp->mb[0];
6034 	txsd->desc_used = ndesc;
6035 
6036 	return (ndesc);
6037 }
6038 
6039 /*
6040  * If the SGL ends on an address that is not 16 byte aligned, this function will
6041  * add a 0 filled flit at the end.
6042  */
6043 static void
write_gl_to_txd(struct sge_txq * txq,struct mbuf * m,caddr_t * to,int checkwrap)6044 write_gl_to_txd(struct sge_txq *txq, struct mbuf *m, caddr_t *to, int checkwrap)
6045 {
6046 	struct sge_eq *eq = &txq->eq;
6047 	struct sglist *gl = txq->gl;
6048 	struct sglist_seg *seg;
6049 	__be64 *flitp, *wrap;
6050 	struct ulptx_sgl *usgl;
6051 	int i, nflits, nsegs;
6052 
6053 	KASSERT(((uintptr_t)(*to) & 0xf) == 0,
6054 	    ("%s: SGL must start at a 16 byte boundary: %p", __func__, *to));
6055 	MPASS((uintptr_t)(*to) >= (uintptr_t)&eq->desc[0]);
6056 	MPASS((uintptr_t)(*to) < (uintptr_t)&eq->desc[eq->sidx]);
6057 
6058 	get_pkt_gl(m, gl);
6059 	nsegs = gl->sg_nseg;
6060 	MPASS(nsegs > 0);
6061 
6062 	nflits = (3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1) + 2;
6063 	flitp = (__be64 *)(*to);
6064 	wrap = (__be64 *)(&eq->desc[eq->sidx]);
6065 	seg = &gl->sg_segs[0];
6066 	usgl = (void *)flitp;
6067 
6068 	/*
6069 	 * We start at a 16 byte boundary somewhere inside the tx descriptor
6070 	 * ring, so we're at least 16 bytes away from the status page.  There is
6071 	 * no chance of a wrap around in the middle of usgl (which is 16 bytes).
6072 	 */
6073 
6074 	usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) |
6075 	    V_ULPTX_NSGE(nsegs));
6076 	usgl->len0 = htobe32(seg->ss_len);
6077 	usgl->addr0 = htobe64(seg->ss_paddr);
6078 	seg++;
6079 
6080 	if (checkwrap == 0 || (uintptr_t)(flitp + nflits) <= (uintptr_t)wrap) {
6081 
6082 		/* Won't wrap around at all */
6083 
6084 		for (i = 0; i < nsegs - 1; i++, seg++) {
6085 			usgl->sge[i / 2].len[i & 1] = htobe32(seg->ss_len);
6086 			usgl->sge[i / 2].addr[i & 1] = htobe64(seg->ss_paddr);
6087 		}
6088 		if (i & 1)
6089 			usgl->sge[i / 2].len[1] = htobe32(0);
6090 		flitp += nflits;
6091 	} else {
6092 
6093 		/* Will wrap somewhere in the rest of the SGL */
6094 
6095 		/* 2 flits already written, write the rest flit by flit */
6096 		flitp = (void *)(usgl + 1);
6097 		for (i = 0; i < nflits - 2; i++) {
6098 			if (flitp == wrap)
6099 				flitp = (void *)eq->desc;
6100 			*flitp++ = get_flit(seg, nsegs - 1, i);
6101 		}
6102 	}
6103 
6104 	if (nflits & 1) {
6105 		MPASS(((uintptr_t)flitp) & 0xf);
6106 		*flitp++ = 0;
6107 	}
6108 
6109 	MPASS((((uintptr_t)flitp) & 0xf) == 0);
6110 	if (__predict_false(flitp == wrap))
6111 		*to = (void *)eq->desc;
6112 	else
6113 		*to = (void *)flitp;
6114 }
6115 
6116 static inline void
copy_to_txd(struct sge_eq * eq,caddr_t from,caddr_t * to,int len)6117 copy_to_txd(struct sge_eq *eq, caddr_t from, caddr_t *to, int len)
6118 {
6119 
6120 	MPASS((uintptr_t)(*to) >= (uintptr_t)&eq->desc[0]);
6121 	MPASS((uintptr_t)(*to) < (uintptr_t)&eq->desc[eq->sidx]);
6122 
6123 	if (__predict_true((uintptr_t)(*to) + len <=
6124 	    (uintptr_t)&eq->desc[eq->sidx])) {
6125 		bcopy(from, *to, len);
6126 		(*to) += len;
6127 	} else {
6128 		int portion = (uintptr_t)&eq->desc[eq->sidx] - (uintptr_t)(*to);
6129 
6130 		bcopy(from, *to, portion);
6131 		from += portion;
6132 		portion = len - portion;	/* remaining */
6133 		bcopy(from, (void *)eq->desc, portion);
6134 		(*to) = (caddr_t)eq->desc + portion;
6135 	}
6136 }
6137 
6138 static inline void
ring_eq_db(struct adapter * sc,struct sge_eq * eq,u_int n)6139 ring_eq_db(struct adapter *sc, struct sge_eq *eq, u_int n)
6140 {
6141 	u_int db;
6142 
6143 	MPASS(n > 0);
6144 
6145 	db = eq->doorbells;
6146 	if (n > 1)
6147 		clrbit(&db, DOORBELL_WCWR);
6148 	wmb();
6149 
6150 	switch (ffs(db) - 1) {
6151 	case DOORBELL_UDB:
6152 		*eq->udb = htole32(V_QID(eq->udb_qid) | V_PIDX(n));
6153 		break;
6154 
6155 	case DOORBELL_WCWR: {
6156 		volatile uint64_t *dst, *src;
6157 		int i;
6158 
6159 		/*
6160 		 * Queues whose 128B doorbell segment fits in the page do not
6161 		 * use relative qid (udb_qid is always 0).  Only queues with
6162 		 * doorbell segments can do WCWR.
6163 		 */
6164 		KASSERT(eq->udb_qid == 0 && n == 1,
6165 		    ("%s: inappropriate doorbell (0x%x, %d, %d) for eq %p",
6166 		    __func__, eq->doorbells, n, eq->dbidx, eq));
6167 
6168 		dst = (volatile void *)((uintptr_t)eq->udb + UDBS_WR_OFFSET -
6169 		    UDBS_DB_OFFSET);
6170 		i = eq->dbidx;
6171 		src = (void *)&eq->desc[i];
6172 		while (src != (void *)&eq->desc[i + 1])
6173 			*dst++ = *src++;
6174 		wmb();
6175 		break;
6176 	}
6177 
6178 	case DOORBELL_UDBWC:
6179 		*eq->udb = htole32(V_QID(eq->udb_qid) | V_PIDX(n));
6180 		wmb();
6181 		break;
6182 
6183 	case DOORBELL_KDB:
6184 		t4_write_reg(sc, sc->sge_kdoorbell_reg,
6185 		    V_QID(eq->cntxt_id) | V_PIDX(n));
6186 		break;
6187 	}
6188 
6189 	IDXINCR(eq->dbidx, n, eq->sidx);
6190 }
6191 
6192 static inline u_int
reclaimable_tx_desc(struct sge_eq * eq)6193 reclaimable_tx_desc(struct sge_eq *eq)
6194 {
6195 	uint16_t hw_cidx;
6196 
6197 	hw_cidx = read_hw_cidx(eq);
6198 	return (IDXDIFF(hw_cidx, eq->cidx, eq->sidx));
6199 }
6200 
6201 static inline u_int
total_available_tx_desc(struct sge_eq * eq)6202 total_available_tx_desc(struct sge_eq *eq)
6203 {
6204 	uint16_t hw_cidx, pidx;
6205 
6206 	hw_cidx = read_hw_cidx(eq);
6207 	pidx = eq->pidx;
6208 
6209 	if (pidx == hw_cidx)
6210 		return (eq->sidx - 1);
6211 	else
6212 		return (IDXDIFF(hw_cidx, pidx, eq->sidx) - 1);
6213 }
6214 
6215 static inline uint16_t
read_hw_cidx(struct sge_eq * eq)6216 read_hw_cidx(struct sge_eq *eq)
6217 {
6218 	struct sge_qstat *spg = (void *)&eq->desc[eq->sidx];
6219 	uint16_t cidx = spg->cidx;	/* stable snapshot */
6220 
6221 	return (be16toh(cidx));
6222 }
6223 
6224 /*
6225  * Reclaim 'n' descriptors approximately.
6226  */
6227 static u_int
reclaim_tx_descs(struct sge_txq * txq,u_int n)6228 reclaim_tx_descs(struct sge_txq *txq, u_int n)
6229 {
6230 	struct tx_sdesc *txsd;
6231 	struct sge_eq *eq = &txq->eq;
6232 	u_int can_reclaim, reclaimed;
6233 
6234 	TXQ_LOCK_ASSERT_OWNED(txq);
6235 	MPASS(n > 0);
6236 
6237 	reclaimed = 0;
6238 	can_reclaim = reclaimable_tx_desc(eq);
6239 	while (can_reclaim && reclaimed < n) {
6240 		int ndesc;
6241 		struct mbuf *m, *nextpkt;
6242 
6243 		txsd = &txq->sdesc[eq->cidx];
6244 		ndesc = txsd->desc_used;
6245 
6246 		/* Firmware doesn't return "partial" credits. */
6247 		KASSERT(can_reclaim >= ndesc,
6248 		    ("%s: unexpected number of credits: %d, %d",
6249 		    __func__, can_reclaim, ndesc));
6250 		KASSERT(ndesc != 0,
6251 		    ("%s: descriptor with no credits: cidx %d",
6252 		    __func__, eq->cidx));
6253 
6254 		for (m = txsd->m; m != NULL; m = nextpkt) {
6255 			nextpkt = m->m_nextpkt;
6256 			m->m_nextpkt = NULL;
6257 			m_freem(m);
6258 		}
6259 		reclaimed += ndesc;
6260 		can_reclaim -= ndesc;
6261 		IDXINCR(eq->cidx, ndesc, eq->sidx);
6262 	}
6263 
6264 	return (reclaimed);
6265 }
6266 
6267 static void
tx_reclaim(void * arg,int n)6268 tx_reclaim(void *arg, int n)
6269 {
6270 	struct sge_txq *txq = arg;
6271 	struct sge_eq *eq = &txq->eq;
6272 
6273 	do {
6274 		if (TXQ_TRYLOCK(txq) == 0)
6275 			break;
6276 		n = reclaim_tx_descs(txq, 32);
6277 		if (eq->cidx == eq->pidx)
6278 			eq->equeqidx = eq->pidx;
6279 		TXQ_UNLOCK(txq);
6280 	} while (n > 0);
6281 }
6282 
6283 static __be64
get_flit(struct sglist_seg * segs,int nsegs,int idx)6284 get_flit(struct sglist_seg *segs, int nsegs, int idx)
6285 {
6286 	int i = (idx / 3) * 2;
6287 
6288 	switch (idx % 3) {
6289 	case 0: {
6290 		uint64_t rc;
6291 
6292 		rc = (uint64_t)segs[i].ss_len << 32;
6293 		if (i + 1 < nsegs)
6294 			rc |= (uint64_t)(segs[i + 1].ss_len);
6295 
6296 		return (htobe64(rc));
6297 	}
6298 	case 1:
6299 		return (htobe64(segs[i].ss_paddr));
6300 	case 2:
6301 		return (htobe64(segs[i + 1].ss_paddr));
6302 	}
6303 
6304 	return (0);
6305 }
6306 
6307 static int
find_refill_source(struct adapter * sc,int maxp,bool packing)6308 find_refill_source(struct adapter *sc, int maxp, bool packing)
6309 {
6310 	int i, zidx = -1;
6311 	struct rx_buf_info *rxb = &sc->sge.rx_buf_info[0];
6312 
6313 	if (packing) {
6314 		for (i = 0; i < SW_ZONE_SIZES; i++, rxb++) {
6315 			if (rxb->hwidx2 == -1)
6316 				continue;
6317 			if (rxb->size1 < PAGE_SIZE &&
6318 			    rxb->size1 < largest_rx_cluster)
6319 				continue;
6320 			if (rxb->size1 > largest_rx_cluster)
6321 				break;
6322 			MPASS(rxb->size1 - rxb->size2 >= CL_METADATA_SIZE);
6323 			if (rxb->size2 >= maxp)
6324 				return (i);
6325 			zidx = i;
6326 		}
6327 	} else {
6328 		for (i = 0; i < SW_ZONE_SIZES; i++, rxb++) {
6329 			if (rxb->hwidx1 == -1)
6330 				continue;
6331 			if (rxb->size1 > largest_rx_cluster)
6332 				break;
6333 			if (rxb->size1 >= maxp)
6334 				return (i);
6335 			zidx = i;
6336 		}
6337 	}
6338 
6339 	return (zidx);
6340 }
6341 
6342 static void
add_fl_to_sfl(struct adapter * sc,struct sge_fl * fl)6343 add_fl_to_sfl(struct adapter *sc, struct sge_fl *fl)
6344 {
6345 	mtx_lock(&sc->sfl_lock);
6346 	FL_LOCK(fl);
6347 	if ((fl->flags & FL_DOOMED) == 0) {
6348 		fl->flags |= FL_STARVING;
6349 		TAILQ_INSERT_TAIL(&sc->sfl, fl, link);
6350 		callout_reset(&sc->sfl_callout, hz / 5, refill_sfl, sc);
6351 	}
6352 	FL_UNLOCK(fl);
6353 	mtx_unlock(&sc->sfl_lock);
6354 }
6355 
6356 static void
handle_wrq_egr_update(struct adapter * sc,struct sge_eq * eq)6357 handle_wrq_egr_update(struct adapter *sc, struct sge_eq *eq)
6358 {
6359 	struct sge_wrq *wrq = (void *)eq;
6360 
6361 	atomic_readandclear_int(&eq->equiq);
6362 	taskqueue_enqueue(sc->tq[eq->port_id], &wrq->wrq_tx_task);
6363 }
6364 
6365 static void
handle_eth_egr_update(struct adapter * sc,struct sge_eq * eq)6366 handle_eth_egr_update(struct adapter *sc, struct sge_eq *eq)
6367 {
6368 	struct sge_txq *txq = (void *)eq;
6369 
6370 	MPASS(eq->type == EQ_ETH);
6371 
6372 	atomic_readandclear_int(&eq->equiq);
6373 	if (mp_ring_is_idle(txq->r))
6374 		taskqueue_enqueue(sc->tq[eq->port_id], &txq->tx_reclaim_task);
6375 	else
6376 		mp_ring_check_drainage(txq->r, 64);
6377 }
6378 
6379 static int
handle_sge_egr_update(struct sge_iq * iq,const struct rss_header * rss,struct mbuf * m)6380 handle_sge_egr_update(struct sge_iq *iq, const struct rss_header *rss,
6381     struct mbuf *m)
6382 {
6383 	const struct cpl_sge_egr_update *cpl = (const void *)(rss + 1);
6384 	unsigned int qid = G_EGR_QID(ntohl(cpl->opcode_qid));
6385 	struct adapter *sc = iq->adapter;
6386 	struct sge *s = &sc->sge;
6387 	struct sge_eq *eq;
6388 	static void (*h[])(struct adapter *, struct sge_eq *) = {NULL,
6389 		&handle_wrq_egr_update, &handle_eth_egr_update,
6390 		&handle_wrq_egr_update};
6391 
6392 	KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__,
6393 	    rss->opcode));
6394 
6395 	eq = s->eqmap[qid - s->eq_start - s->eq_base];
6396 	(*h[eq->type])(sc, eq);
6397 
6398 	return (0);
6399 }
6400 
6401 /* handle_fw_msg works for both fw4_msg and fw6_msg because this is valid */
6402 CTASSERT(offsetof(struct cpl_fw4_msg, data) == \
6403     offsetof(struct cpl_fw6_msg, data));
6404 
6405 static int
handle_fw_msg(struct sge_iq * iq,const struct rss_header * rss,struct mbuf * m)6406 handle_fw_msg(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
6407 {
6408 	struct adapter *sc = iq->adapter;
6409 	const struct cpl_fw6_msg *cpl = (const void *)(rss + 1);
6410 
6411 	KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__,
6412 	    rss->opcode));
6413 
6414 	if (cpl->type == FW_TYPE_RSSCPL || cpl->type == FW6_TYPE_RSSCPL) {
6415 		const struct rss_header *rss2;
6416 
6417 		rss2 = (const struct rss_header *)&cpl->data[0];
6418 		return (t4_cpl_handler[rss2->opcode](iq, rss2, m));
6419 	}
6420 
6421 	return (t4_fw_msg_handler[cpl->type](sc, &cpl->data[0]));
6422 }
6423 
6424 /**
6425  *	t4_handle_wrerr_rpl - process a FW work request error message
6426  *	@adap: the adapter
6427  *	@rpl: start of the FW message
6428  */
6429 static int
t4_handle_wrerr_rpl(struct adapter * adap,const __be64 * rpl)6430 t4_handle_wrerr_rpl(struct adapter *adap, const __be64 *rpl)
6431 {
6432 	u8 opcode = *(const u8 *)rpl;
6433 	const struct fw_error_cmd *e = (const void *)rpl;
6434 	unsigned int i;
6435 
6436 	if (opcode != FW_ERROR_CMD) {
6437 		log(LOG_ERR,
6438 		    "%s: Received WRERR_RPL message with opcode %#x\n",
6439 		    device_get_nameunit(adap->dev), opcode);
6440 		return (EINVAL);
6441 	}
6442 	log(LOG_ERR, "%s: FW_ERROR (%s) ", device_get_nameunit(adap->dev),
6443 	    G_FW_ERROR_CMD_FATAL(be32toh(e->op_to_type)) ? "fatal" :
6444 	    "non-fatal");
6445 	switch (G_FW_ERROR_CMD_TYPE(be32toh(e->op_to_type))) {
6446 	case FW_ERROR_TYPE_EXCEPTION:
6447 		log(LOG_ERR, "exception info:\n");
6448 		for (i = 0; i < nitems(e->u.exception.info); i++)
6449 			log(LOG_ERR, "%s%08x", i == 0 ? "\t" : " ",
6450 			    be32toh(e->u.exception.info[i]));
6451 		log(LOG_ERR, "\n");
6452 		break;
6453 	case FW_ERROR_TYPE_HWMODULE:
6454 		log(LOG_ERR, "HW module regaddr %08x regval %08x\n",
6455 		    be32toh(e->u.hwmodule.regaddr),
6456 		    be32toh(e->u.hwmodule.regval));
6457 		break;
6458 	case FW_ERROR_TYPE_WR:
6459 		log(LOG_ERR, "WR cidx %d PF %d VF %d eqid %d hdr:\n",
6460 		    be16toh(e->u.wr.cidx),
6461 		    G_FW_ERROR_CMD_PFN(be16toh(e->u.wr.pfn_vfn)),
6462 		    G_FW_ERROR_CMD_VFN(be16toh(e->u.wr.pfn_vfn)),
6463 		    be32toh(e->u.wr.eqid));
6464 		for (i = 0; i < nitems(e->u.wr.wrhdr); i++)
6465 			log(LOG_ERR, "%s%02x", i == 0 ? "\t" : " ",
6466 			    e->u.wr.wrhdr[i]);
6467 		log(LOG_ERR, "\n");
6468 		break;
6469 	case FW_ERROR_TYPE_ACL:
6470 		log(LOG_ERR, "ACL cidx %d PF %d VF %d eqid %d %s",
6471 		    be16toh(e->u.acl.cidx),
6472 		    G_FW_ERROR_CMD_PFN(be16toh(e->u.acl.pfn_vfn)),
6473 		    G_FW_ERROR_CMD_VFN(be16toh(e->u.acl.pfn_vfn)),
6474 		    be32toh(e->u.acl.eqid),
6475 		    G_FW_ERROR_CMD_MV(be16toh(e->u.acl.mv_pkd)) ? "vlanid" :
6476 		    "MAC");
6477 		for (i = 0; i < nitems(e->u.acl.val); i++)
6478 			log(LOG_ERR, " %02x", e->u.acl.val[i]);
6479 		log(LOG_ERR, "\n");
6480 		break;
6481 	default:
6482 		log(LOG_ERR, "type %#x\n",
6483 		    G_FW_ERROR_CMD_TYPE(be32toh(e->op_to_type)));
6484 		return (EINVAL);
6485 	}
6486 	return (0);
6487 }
6488 
6489 static inline bool
bufidx_used(struct adapter * sc,int idx)6490 bufidx_used(struct adapter *sc, int idx)
6491 {
6492 	struct rx_buf_info *rxb = &sc->sge.rx_buf_info[0];
6493 	int i;
6494 
6495 	for (i = 0; i < SW_ZONE_SIZES; i++, rxb++) {
6496 		if (rxb->size1 > largest_rx_cluster)
6497 			continue;
6498 		if (rxb->hwidx1 == idx || rxb->hwidx2 == idx)
6499 			return (true);
6500 	}
6501 
6502 	return (false);
6503 }
6504 
6505 static int
sysctl_bufsizes(SYSCTL_HANDLER_ARGS)6506 sysctl_bufsizes(SYSCTL_HANDLER_ARGS)
6507 {
6508 	struct adapter *sc = arg1;
6509 	struct sge_params *sp = &sc->params.sge;
6510 	int i, rc;
6511 	struct sbuf sb;
6512 	char c;
6513 
6514 	sbuf_new(&sb, NULL, 128, SBUF_AUTOEXTEND);
6515 	for (i = 0; i < SGE_FLBUF_SIZES; i++) {
6516 		if (bufidx_used(sc, i))
6517 			c = '*';
6518 		else
6519 			c = '\0';
6520 
6521 		sbuf_printf(&sb, "%u%c ", sp->sge_fl_buffer_size[i], c);
6522 	}
6523 	sbuf_trim(&sb);
6524 	sbuf_finish(&sb);
6525 	rc = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
6526 	sbuf_delete(&sb);
6527 	return (rc);
6528 }
6529 
6530 #ifdef RATELIMIT
6531 #if defined(INET) || defined(INET6)
6532 /*
6533  * len16 for a txpkt WR with a GL.  Includes the firmware work request header.
6534  */
6535 static inline u_int
txpkt_eo_len16(u_int nsegs,u_int immhdrs,u_int tso)6536 txpkt_eo_len16(u_int nsegs, u_int immhdrs, u_int tso)
6537 {
6538 	u_int n;
6539 
6540 	MPASS(immhdrs > 0);
6541 
6542 	n = roundup2(sizeof(struct fw_eth_tx_eo_wr) +
6543 	    sizeof(struct cpl_tx_pkt_core) + immhdrs, 16);
6544 	if (__predict_false(nsegs == 0))
6545 		goto done;
6546 
6547 	nsegs--; /* first segment is part of ulptx_sgl */
6548 	n += sizeof(struct ulptx_sgl) + 8 * ((3 * nsegs) / 2 + (nsegs & 1));
6549 	if (tso)
6550 		n += sizeof(struct cpl_tx_pkt_lso_core);
6551 
6552 done:
6553 	return (howmany(n, 16));
6554 }
6555 #endif
6556 
6557 #define ETID_FLOWC_NPARAMS 6
6558 #define ETID_FLOWC_LEN (roundup2((sizeof(struct fw_flowc_wr) + \
6559     ETID_FLOWC_NPARAMS * sizeof(struct fw_flowc_mnemval)), 16))
6560 #define ETID_FLOWC_LEN16 (howmany(ETID_FLOWC_LEN, 16))
6561 
6562 #if defined(INET) || defined(INET6)
6563 static int
send_etid_flowc_wr(struct cxgbe_rate_tag * cst,struct port_info * pi,struct vi_info * vi)6564 send_etid_flowc_wr(struct cxgbe_rate_tag *cst, struct port_info *pi,
6565     struct vi_info *vi)
6566 {
6567 	struct wrq_cookie cookie;
6568 	u_int pfvf = pi->adapter->pf << S_FW_VIID_PFN;
6569 	struct fw_flowc_wr *flowc;
6570 
6571 	mtx_assert(&cst->lock, MA_OWNED);
6572 	MPASS((cst->flags & (EO_FLOWC_PENDING | EO_FLOWC_RPL_PENDING)) ==
6573 	    EO_FLOWC_PENDING);
6574 
6575 	flowc = start_wrq_wr(&cst->eo_txq->wrq, ETID_FLOWC_LEN16, &cookie);
6576 	if (__predict_false(flowc == NULL))
6577 		return (ENOMEM);
6578 
6579 	bzero(flowc, ETID_FLOWC_LEN);
6580 	flowc->op_to_nparams = htobe32(V_FW_WR_OP(FW_FLOWC_WR) |
6581 	    V_FW_FLOWC_WR_NPARAMS(ETID_FLOWC_NPARAMS) | V_FW_WR_COMPL(0));
6582 	flowc->flowid_len16 = htonl(V_FW_WR_LEN16(ETID_FLOWC_LEN16) |
6583 	    V_FW_WR_FLOWID(cst->etid));
6584 	flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
6585 	flowc->mnemval[0].val = htobe32(pfvf);
6586 	flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
6587 	flowc->mnemval[1].val = htobe32(pi->tx_chan);
6588 	flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
6589 	flowc->mnemval[2].val = htobe32(pi->tx_chan);
6590 	flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
6591 	flowc->mnemval[3].val = htobe32(cst->iqid);
6592 	flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_EOSTATE;
6593 	flowc->mnemval[4].val = htobe32(FW_FLOWC_MNEM_EOSTATE_ESTABLISHED);
6594 	flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_SCHEDCLASS;
6595 	flowc->mnemval[5].val = htobe32(cst->schedcl);
6596 
6597 	commit_wrq_wr(&cst->eo_txq->wrq, flowc, &cookie);
6598 
6599 	cst->flags &= ~EO_FLOWC_PENDING;
6600 	cst->flags |= EO_FLOWC_RPL_PENDING;
6601 	MPASS(cst->tx_credits >= ETID_FLOWC_LEN16);	/* flowc is first WR. */
6602 	cst->tx_credits -= ETID_FLOWC_LEN16;
6603 
6604 	return (0);
6605 }
6606 #endif
6607 
6608 #define ETID_FLUSH_LEN16 (howmany(sizeof (struct fw_flowc_wr), 16))
6609 
6610 void
send_etid_flush_wr(struct cxgbe_rate_tag * cst)6611 send_etid_flush_wr(struct cxgbe_rate_tag *cst)
6612 {
6613 	struct fw_flowc_wr *flowc;
6614 	struct wrq_cookie cookie;
6615 
6616 	mtx_assert(&cst->lock, MA_OWNED);
6617 
6618 	flowc = start_wrq_wr(&cst->eo_txq->wrq, ETID_FLUSH_LEN16, &cookie);
6619 	if (__predict_false(flowc == NULL))
6620 		CXGBE_UNIMPLEMENTED(__func__);
6621 
6622 	bzero(flowc, ETID_FLUSH_LEN16 * 16);
6623 	flowc->op_to_nparams = htobe32(V_FW_WR_OP(FW_FLOWC_WR) |
6624 	    V_FW_FLOWC_WR_NPARAMS(0) | F_FW_WR_COMPL);
6625 	flowc->flowid_len16 = htobe32(V_FW_WR_LEN16(ETID_FLUSH_LEN16) |
6626 	    V_FW_WR_FLOWID(cst->etid));
6627 
6628 	commit_wrq_wr(&cst->eo_txq->wrq, flowc, &cookie);
6629 
6630 	cst->flags |= EO_FLUSH_RPL_PENDING;
6631 	MPASS(cst->tx_credits >= ETID_FLUSH_LEN16);
6632 	cst->tx_credits -= ETID_FLUSH_LEN16;
6633 	cst->ncompl++;
6634 }
6635 
6636 static void
write_ethofld_wr(struct cxgbe_rate_tag * cst,struct fw_eth_tx_eo_wr * wr,struct mbuf * m0,int compl)6637 write_ethofld_wr(struct cxgbe_rate_tag *cst, struct fw_eth_tx_eo_wr *wr,
6638     struct mbuf *m0, int compl)
6639 {
6640 	struct cpl_tx_pkt_core *cpl;
6641 	uint64_t ctrl1;
6642 	uint32_t ctrl;	/* used in many unrelated places */
6643 	int len16, pktlen, nsegs, immhdrs;
6644 	uintptr_t p;
6645 	struct ulptx_sgl *usgl;
6646 	struct sglist sg;
6647 	struct sglist_seg segs[38];	/* XXX: find real limit.  XXX: get off the stack */
6648 
6649 	mtx_assert(&cst->lock, MA_OWNED);
6650 	M_ASSERTPKTHDR(m0);
6651 	KASSERT(m0->m_pkthdr.l2hlen > 0 && m0->m_pkthdr.l3hlen > 0 &&
6652 	    m0->m_pkthdr.l4hlen > 0,
6653 	    ("%s: ethofld mbuf %p is missing header lengths", __func__, m0));
6654 
6655 	len16 = mbuf_eo_len16(m0);
6656 	nsegs = mbuf_eo_nsegs(m0);
6657 	pktlen = m0->m_pkthdr.len;
6658 	ctrl = sizeof(struct cpl_tx_pkt_core);
6659 	if (needs_tso(m0))
6660 		ctrl += sizeof(struct cpl_tx_pkt_lso_core);
6661 	immhdrs = m0->m_pkthdr.l2hlen + m0->m_pkthdr.l3hlen + m0->m_pkthdr.l4hlen;
6662 	ctrl += immhdrs;
6663 
6664 	wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_EO_WR) |
6665 	    V_FW_ETH_TX_EO_WR_IMMDLEN(ctrl) | V_FW_WR_COMPL(!!compl));
6666 	wr->equiq_to_len16 = htobe32(V_FW_WR_LEN16(len16) |
6667 	    V_FW_WR_FLOWID(cst->etid));
6668 	wr->r3 = 0;
6669 	if (needs_outer_udp_csum(m0)) {
6670 		wr->u.udpseg.type = FW_ETH_TX_EO_TYPE_UDPSEG;
6671 		wr->u.udpseg.ethlen = m0->m_pkthdr.l2hlen;
6672 		wr->u.udpseg.iplen = htobe16(m0->m_pkthdr.l3hlen);
6673 		wr->u.udpseg.udplen = m0->m_pkthdr.l4hlen;
6674 		wr->u.udpseg.rtplen = 0;
6675 		wr->u.udpseg.r4 = 0;
6676 		wr->u.udpseg.mss = htobe16(pktlen - immhdrs);
6677 		wr->u.udpseg.schedpktsize = wr->u.udpseg.mss;
6678 		wr->u.udpseg.plen = htobe32(pktlen - immhdrs);
6679 		cpl = (void *)(wr + 1);
6680 	} else {
6681 		MPASS(needs_outer_tcp_csum(m0));
6682 		wr->u.tcpseg.type = FW_ETH_TX_EO_TYPE_TCPSEG;
6683 		wr->u.tcpseg.ethlen = m0->m_pkthdr.l2hlen;
6684 		wr->u.tcpseg.iplen = htobe16(m0->m_pkthdr.l3hlen);
6685 		wr->u.tcpseg.tcplen = m0->m_pkthdr.l4hlen;
6686 		wr->u.tcpseg.tsclk_tsoff = mbuf_eo_tsclk_tsoff(m0);
6687 		wr->u.tcpseg.r4 = 0;
6688 		wr->u.tcpseg.r5 = 0;
6689 		wr->u.tcpseg.plen = htobe32(pktlen - immhdrs);
6690 
6691 		if (needs_tso(m0)) {
6692 			struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1);
6693 
6694 			wr->u.tcpseg.mss = htobe16(m0->m_pkthdr.tso_segsz);
6695 
6696 			ctrl = V_LSO_OPCODE(CPL_TX_PKT_LSO) |
6697 			    F_LSO_FIRST_SLICE | F_LSO_LAST_SLICE |
6698 			    V_LSO_ETHHDR_LEN((m0->m_pkthdr.l2hlen -
6699 				ETHER_HDR_LEN) >> 2) |
6700 			    V_LSO_IPHDR_LEN(m0->m_pkthdr.l3hlen >> 2) |
6701 			    V_LSO_TCPHDR_LEN(m0->m_pkthdr.l4hlen >> 2);
6702 			if (m0->m_pkthdr.l3hlen == sizeof(struct ip6_hdr))
6703 				ctrl |= F_LSO_IPV6;
6704 			lso->lso_ctrl = htobe32(ctrl);
6705 			lso->ipid_ofst = htobe16(0);
6706 			lso->mss = htobe16(m0->m_pkthdr.tso_segsz);
6707 			lso->seqno_offset = htobe32(0);
6708 			lso->len = htobe32(pktlen);
6709 
6710 			cpl = (void *)(lso + 1);
6711 		} else {
6712 			wr->u.tcpseg.mss = htobe16(0xffff);
6713 			cpl = (void *)(wr + 1);
6714 		}
6715 	}
6716 
6717 	/* Checksum offload must be requested for ethofld. */
6718 	MPASS(needs_outer_l4_csum(m0));
6719 	ctrl1 = csum_to_ctrl(cst->adapter, m0);
6720 
6721 	/* VLAN tag insertion */
6722 	if (needs_vlan_insertion(m0)) {
6723 		ctrl1 |= F_TXPKT_VLAN_VLD |
6724 		    V_TXPKT_VLAN(m0->m_pkthdr.ether_vtag);
6725 	}
6726 
6727 	/* CPL header */
6728 	cpl->ctrl0 = cst->ctrl0;
6729 	cpl->pack = 0;
6730 	cpl->len = htobe16(pktlen);
6731 	cpl->ctrl1 = htobe64(ctrl1);
6732 
6733 	/* Copy Ethernet, IP & TCP/UDP hdrs as immediate data */
6734 	p = (uintptr_t)(cpl + 1);
6735 	m_copydata(m0, 0, immhdrs, (void *)p);
6736 
6737 	/* SGL */
6738 	if (nsegs > 0) {
6739 		int i, pad;
6740 
6741 		/* zero-pad upto next 16Byte boundary, if not 16Byte aligned */
6742 		p += immhdrs;
6743 		pad = 16 - (immhdrs & 0xf);
6744 		bzero((void *)p, pad);
6745 
6746 		usgl = (void *)(p + pad);
6747 		usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) |
6748 		    V_ULPTX_NSGE(nsegs));
6749 
6750 		sglist_init(&sg, nitems(segs), segs);
6751 		for (; m0 != NULL; m0 = m0->m_next) {
6752 			if (__predict_false(m0->m_len == 0))
6753 				continue;
6754 			if (immhdrs >= m0->m_len) {
6755 				immhdrs -= m0->m_len;
6756 				continue;
6757 			}
6758 			if (m0->m_flags & M_EXTPG)
6759 				sglist_append_mbuf_epg(&sg, m0,
6760 				    mtod(m0, vm_offset_t), m0->m_len);
6761                         else
6762 				sglist_append(&sg, mtod(m0, char *) + immhdrs,
6763 				    m0->m_len - immhdrs);
6764 			immhdrs = 0;
6765 		}
6766 		MPASS(sg.sg_nseg == nsegs);
6767 
6768 		/*
6769 		 * Zero pad last 8B in case the WR doesn't end on a 16B
6770 		 * boundary.
6771 		 */
6772 		*(uint64_t *)((char *)wr + len16 * 16 - 8) = 0;
6773 
6774 		usgl->len0 = htobe32(segs[0].ss_len);
6775 		usgl->addr0 = htobe64(segs[0].ss_paddr);
6776 		for (i = 0; i < nsegs - 1; i++) {
6777 			usgl->sge[i / 2].len[i & 1] = htobe32(segs[i + 1].ss_len);
6778 			usgl->sge[i / 2].addr[i & 1] = htobe64(segs[i + 1].ss_paddr);
6779 		}
6780 		if (i & 1)
6781 			usgl->sge[i / 2].len[1] = htobe32(0);
6782 	}
6783 
6784 }
6785 
6786 static void
ethofld_tx(struct cxgbe_rate_tag * cst)6787 ethofld_tx(struct cxgbe_rate_tag *cst)
6788 {
6789 	struct mbuf *m;
6790 	struct wrq_cookie cookie;
6791 	int next_credits, compl;
6792 	struct fw_eth_tx_eo_wr *wr;
6793 
6794 	mtx_assert(&cst->lock, MA_OWNED);
6795 
6796 	while ((m = mbufq_first(&cst->pending_tx)) != NULL) {
6797 		M_ASSERTPKTHDR(m);
6798 
6799 		/* How many len16 credits do we need to send this mbuf. */
6800 		next_credits = mbuf_eo_len16(m);
6801 		MPASS(next_credits > 0);
6802 		if (next_credits > cst->tx_credits) {
6803 			/*
6804 			 * Tx will make progress eventually because there is at
6805 			 * least one outstanding fw4_ack that will return
6806 			 * credits and kick the tx.
6807 			 */
6808 			MPASS(cst->ncompl > 0);
6809 			return;
6810 		}
6811 		wr = start_wrq_wr(&cst->eo_txq->wrq, next_credits, &cookie);
6812 		if (__predict_false(wr == NULL)) {
6813 			/* XXX: wishful thinking, not a real assertion. */
6814 			MPASS(cst->ncompl > 0);
6815 			return;
6816 		}
6817 		cst->tx_credits -= next_credits;
6818 		cst->tx_nocompl += next_credits;
6819 		compl = cst->ncompl == 0 || cst->tx_nocompl >= cst->tx_total / 2;
6820 		ETHER_BPF_MTAP(cst->com.ifp, m);
6821 		write_ethofld_wr(cst, wr, m, compl);
6822 		commit_wrq_wr(&cst->eo_txq->wrq, wr, &cookie);
6823 		if (compl) {
6824 			cst->ncompl++;
6825 			cst->tx_nocompl	= 0;
6826 		}
6827 		(void) mbufq_dequeue(&cst->pending_tx);
6828 
6829 		/*
6830 		 * Drop the mbuf's reference on the tag now rather
6831 		 * than waiting until m_freem().  This ensures that
6832 		 * cxgbe_rate_tag_free gets called when the inp drops
6833 		 * its reference on the tag and there are no more
6834 		 * mbufs in the pending_tx queue and can flush any
6835 		 * pending requests.  Otherwise if the last mbuf
6836 		 * doesn't request a completion the etid will never be
6837 		 * released.
6838 		 */
6839 		m->m_pkthdr.snd_tag = NULL;
6840 		m->m_pkthdr.csum_flags &= ~CSUM_SND_TAG;
6841 		m_snd_tag_rele(&cst->com);
6842 
6843 		mbufq_enqueue(&cst->pending_fwack, m);
6844 	}
6845 }
6846 
6847 #if defined(INET) || defined(INET6)
6848 static int
ethofld_transmit(if_t ifp,struct mbuf * m0)6849 ethofld_transmit(if_t ifp, struct mbuf *m0)
6850 {
6851 	struct cxgbe_rate_tag *cst;
6852 	int rc;
6853 
6854 	MPASS(m0->m_nextpkt == NULL);
6855 	MPASS(m0->m_pkthdr.csum_flags & CSUM_SND_TAG);
6856 	MPASS(m0->m_pkthdr.snd_tag != NULL);
6857 	cst = mst_to_crt(m0->m_pkthdr.snd_tag);
6858 
6859 	mtx_lock(&cst->lock);
6860 	MPASS(cst->flags & EO_SND_TAG_REF);
6861 
6862 	if (__predict_false(cst->flags & EO_FLOWC_PENDING)) {
6863 		struct vi_info *vi = if_getsoftc(ifp);
6864 		struct port_info *pi = vi->pi;
6865 		struct adapter *sc = pi->adapter;
6866 		const uint32_t rss_mask = vi->rss_size - 1;
6867 		uint32_t rss_hash;
6868 
6869 		cst->eo_txq = &sc->sge.ofld_txq[vi->first_ofld_txq];
6870 		if (M_HASHTYPE_ISHASH(m0))
6871 			rss_hash = m0->m_pkthdr.flowid;
6872 		else
6873 			rss_hash = arc4random();
6874 		/* We assume RSS hashing */
6875 		cst->iqid = vi->rss[rss_hash & rss_mask];
6876 		cst->eo_txq += rss_hash % vi->nofldtxq;
6877 		rc = send_etid_flowc_wr(cst, pi, vi);
6878 		if (rc != 0)
6879 			goto done;
6880 	}
6881 
6882 	if (__predict_false(cst->plen + m0->m_pkthdr.len > eo_max_backlog)) {
6883 		rc = ENOBUFS;
6884 		goto done;
6885 	}
6886 
6887 	mbufq_enqueue(&cst->pending_tx, m0);
6888 	cst->plen += m0->m_pkthdr.len;
6889 
6890 	/*
6891 	 * Hold an extra reference on the tag while generating work
6892 	 * requests to ensure that we don't try to free the tag during
6893 	 * ethofld_tx() in case we are sending the final mbuf after
6894 	 * the inp was freed.
6895 	 */
6896 	m_snd_tag_ref(&cst->com);
6897 	ethofld_tx(cst);
6898 	mtx_unlock(&cst->lock);
6899 	m_snd_tag_rele(&cst->com);
6900 	return (0);
6901 
6902 done:
6903 	mtx_unlock(&cst->lock);
6904 	return (rc);
6905 }
6906 #endif
6907 
6908 static int
ethofld_fw4_ack(struct sge_iq * iq,const struct rss_header * rss,struct mbuf * m0)6909 ethofld_fw4_ack(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m0)
6910 {
6911 	struct adapter *sc = iq->adapter;
6912 	const struct cpl_fw4_ack *cpl = (const void *)(rss + 1);
6913 	struct mbuf *m;
6914 	u_int etid = G_CPL_FW4_ACK_FLOWID(be32toh(OPCODE_TID(cpl)));
6915 	struct cxgbe_rate_tag *cst;
6916 	uint8_t credits = cpl->credits;
6917 
6918 	cst = lookup_etid(sc, etid);
6919 	mtx_lock(&cst->lock);
6920 	if (__predict_false(cst->flags & EO_FLOWC_RPL_PENDING)) {
6921 		MPASS(credits >= ETID_FLOWC_LEN16);
6922 		credits -= ETID_FLOWC_LEN16;
6923 		cst->flags &= ~EO_FLOWC_RPL_PENDING;
6924 	}
6925 
6926 	KASSERT(cst->ncompl > 0,
6927 	    ("%s: etid %u (%p) wasn't expecting completion.",
6928 	    __func__, etid, cst));
6929 	cst->ncompl--;
6930 
6931 	while (credits > 0) {
6932 		m = mbufq_dequeue(&cst->pending_fwack);
6933 		if (__predict_false(m == NULL)) {
6934 			/*
6935 			 * The remaining credits are for the final flush that
6936 			 * was issued when the tag was freed by the kernel.
6937 			 */
6938 			MPASS((cst->flags &
6939 			    (EO_FLUSH_RPL_PENDING | EO_SND_TAG_REF)) ==
6940 			    EO_FLUSH_RPL_PENDING);
6941 			MPASS(credits == ETID_FLUSH_LEN16);
6942 			MPASS(cst->tx_credits + cpl->credits == cst->tx_total);
6943 			MPASS(cst->ncompl == 0);
6944 
6945 			cst->flags &= ~EO_FLUSH_RPL_PENDING;
6946 			cst->tx_credits += cpl->credits;
6947 			cxgbe_rate_tag_free_locked(cst);
6948 			return (0);	/* cst is gone. */
6949 		}
6950 		KASSERT(m != NULL,
6951 		    ("%s: too many credits (%u, %u)", __func__, cpl->credits,
6952 		    credits));
6953 		KASSERT(credits >= mbuf_eo_len16(m),
6954 		    ("%s: too few credits (%u, %u, %u)", __func__,
6955 		    cpl->credits, credits, mbuf_eo_len16(m)));
6956 		credits -= mbuf_eo_len16(m);
6957 		cst->plen -= m->m_pkthdr.len;
6958 		m_freem(m);
6959 	}
6960 
6961 	cst->tx_credits += cpl->credits;
6962 	MPASS(cst->tx_credits <= cst->tx_total);
6963 
6964 	if (cst->flags & EO_SND_TAG_REF) {
6965 		/*
6966 		 * As with ethofld_transmit(), hold an extra reference
6967 		 * so that the tag is stable across ethold_tx().
6968 		 */
6969 		m_snd_tag_ref(&cst->com);
6970 		m = mbufq_first(&cst->pending_tx);
6971 		if (m != NULL && cst->tx_credits >= mbuf_eo_len16(m))
6972 			ethofld_tx(cst);
6973 		mtx_unlock(&cst->lock);
6974 		m_snd_tag_rele(&cst->com);
6975 	} else {
6976 		/*
6977 		 * There shouldn't be any pending packets if the tag
6978 		 * was freed by the kernel since any pending packet
6979 		 * should hold a reference to the tag.
6980 		 */
6981 		MPASS(mbufq_first(&cst->pending_tx) == NULL);
6982 		mtx_unlock(&cst->lock);
6983 	}
6984 
6985 	return (0);
6986 }
6987 #endif
6988