xref: /freebsd/sys/dev/cxgbe/tom/t4_tom.h (revision 67ca7330cf34a789afbbff9ae7e4cdc4a4917ae3)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2012, 2015 Chelsio Communications, Inc.
5  * All rights reserved.
6  * Written by: Navdeep Parhar <np@FreeBSD.org>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  * $FreeBSD$
30  *
31  */
32 
33 #ifndef __T4_TOM_H__
34 #define __T4_TOM_H__
35 #include <sys/vmem.h>
36 #include "common/t4_hw.h"
37 #include "tom/t4_tls.h"
38 
39 #define LISTEN_HASH_SIZE 32
40 
41 /*
42  * Min receive window.  We want it to be large enough to accommodate receive
43  * coalescing, handle jumbo frames, and not trigger sender SWS avoidance.
44  */
45 #define MIN_RCV_WND (24 * 1024U)
46 
47 /*
48  * Max receive window supported by HW in bytes.  Only a small part of it can
49  * be set through option0, the rest needs to be set through RX_DATA_ACK.
50  */
51 #define MAX_RCV_WND ((1U << 27) - 1)
52 
53 #define	DDP_RSVD_WIN (16 * 1024U)
54 #define	SB_DDP_INDICATE	SB_IN_TOE	/* soreceive must respond to indicate */
55 
56 #define USE_DDP_RX_FLOW_CONTROL
57 
58 #define PPOD_SZ(n)	((n) * sizeof(struct pagepod))
59 #define PPOD_SIZE	(PPOD_SZ(1))
60 
61 /* TOE PCB flags */
62 enum {
63 	TPF_ATTACHED	   = (1 << 0),	/* a tcpcb refers to this toepcb */
64 	TPF_FLOWC_WR_SENT  = (1 << 1),	/* firmware flow context WR sent */
65 	TPF_TX_DATA_SENT   = (1 << 2),	/* some data sent */
66 	TPF_TX_SUSPENDED   = (1 << 3),	/* tx suspended for lack of resources */
67 	TPF_SEND_FIN	   = (1 << 4),	/* send FIN after all pending data */
68 	TPF_FIN_SENT	   = (1 << 5),	/* FIN has been sent */
69 	TPF_ABORT_SHUTDOWN = (1 << 6),	/* connection abort is in progress */
70 	TPF_CPL_PENDING    = (1 << 7),	/* haven't received the last CPL */
71 	TPF_SYNQE	   = (1 << 8),	/* synq_entry, not really a toepcb */
72 	TPF_SYNQE_EXPANDED = (1 << 9),	/* toepcb ready, tid context updated */
73 	TPF_FORCE_CREDITS  = (1 << 10), /* always send credits */
74 };
75 
76 enum {
77 	DDP_OK		= (1 << 0),	/* OK to turn on DDP */
78 	DDP_SC_REQ	= (1 << 1),	/* state change (on/off) requested */
79 	DDP_ON		= (1 << 2),	/* DDP is turned on */
80 	DDP_BUF0_ACTIVE	= (1 << 3),	/* buffer 0 in use (not invalidated) */
81 	DDP_BUF1_ACTIVE	= (1 << 4),	/* buffer 1 in use (not invalidated) */
82 	DDP_TASK_ACTIVE = (1 << 5),	/* requeue task is queued / running */
83 	DDP_DEAD	= (1 << 6),	/* toepcb is shutting down */
84 };
85 
86 struct sockopt;
87 struct offload_settings;
88 
89 struct ofld_tx_sdesc {
90 	uint32_t plen;		/* payload length */
91 	uint8_t tx_credits;	/* firmware tx credits (unit is 16B) */
92 	void *iv_buffer;	/* optional buffer holding IVs for TLS */
93 };
94 
95 struct ppod_region {
96 	u_int pr_start;
97 	u_int pr_len;
98 	u_int pr_page_shift[4];
99 	uint32_t pr_tag_mask;		/* hardware tagmask for this region. */
100 	uint32_t pr_invalid_bit;	/* OR with this to invalidate tag. */
101 	uint32_t pr_alias_mask;		/* AND with tag to get alias bits. */
102 	u_int pr_alias_shift;		/* shift this much for first alias bit. */
103 	vmem_t *pr_arena;
104 };
105 
106 struct ppod_reservation {
107 	struct ppod_region *prsv_pr;
108 	uint32_t prsv_tag;		/* Full tag: pgsz, alias, tag, color */
109 	u_int prsv_nppods;
110 };
111 
112 struct pageset {
113 	TAILQ_ENTRY(pageset) link;
114 	vm_page_t *pages;
115 	int npages;
116 	int flags;
117 	int offset;		/* offset in first page */
118 	int len;
119 	struct ppod_reservation prsv;
120 	struct vmspace *vm;
121 	vm_offset_t start;
122 	u_int vm_timestamp;
123 };
124 
125 TAILQ_HEAD(pagesetq, pageset);
126 
127 #define	PS_WIRED		0x0001	/* Pages wired rather than held. */
128 #define	PS_PPODS_WRITTEN	0x0002	/* Page pods written to the card. */
129 
130 #define	EXT_FLAG_AIOTX		EXT_FLAG_VENDOR1
131 
132 #define	IS_AIOTX_MBUF(m)						\
133 	((m)->m_flags & M_EXT && (m)->m_ext.ext_flags & EXT_FLAG_AIOTX)
134 
135 struct ddp_buffer {
136 	struct pageset *ps;
137 
138 	struct kaiocb *job;
139 	int cancel_pending;
140 };
141 
142 struct ddp_pcb {
143 	u_int flags;
144 	struct ddp_buffer db[2];
145 	TAILQ_HEAD(, pageset) cached_pagesets;
146 	TAILQ_HEAD(, kaiocb) aiojobq;
147 	u_int waiting_count;
148 	u_int active_count;
149 	u_int cached_count;
150 	int active_id;	/* the currently active DDP buffer */
151 	struct task requeue_task;
152 	struct kaiocb *queueing;
153 	struct mtx lock;
154 };
155 
156 struct aiotx_buffer {
157 	struct pageset ps;
158 	struct kaiocb *job;
159 	int refcount;
160 };
161 
162 struct toepcb {
163 	TAILQ_ENTRY(toepcb) link; /* toep_list */
164 	u_int flags;		/* miscellaneous flags */
165 	int refcount;
166 	struct tom_data *td;
167 	struct inpcb *inp;	/* backpointer to host stack's PCB */
168 	struct vnet *vnet;
169 	struct vi_info *vi;	/* virtual interface */
170 	struct sge_wrq *ofld_txq;
171 	struct sge_ofld_rxq *ofld_rxq;
172 	struct sge_wrq *ctrlq;
173 	struct l2t_entry *l2te;	/* L2 table entry used by this connection */
174 	struct clip_entry *ce;	/* CLIP table entry used by this tid */
175 	int tid;		/* Connection identifier */
176 	int tc_idx;		/* traffic class that this tid is bound to */
177 
178 	/* tx credit handling */
179 	u_int tx_total;		/* total tx WR credits (in 16B units) */
180 	u_int tx_credits;	/* tx WR credits (in 16B units) available */
181 	u_int tx_nocompl;	/* tx WR credits since last compl request */
182 	u_int plen_nocompl;	/* payload since last compl request */
183 
184 	int opt0_rcv_bufsize;	/* XXX: save full opt0/opt2 for later? */
185 
186 	u_int ulp_mode;	/* ULP mode */
187 	void *ulpcb;
188 	void *ulpcb2;
189 	struct mbufq ulp_pduq;	/* PDUs waiting to be sent out. */
190 	struct mbufq ulp_pdu_reclaimq;
191 
192 	struct ddp_pcb ddp;
193 	struct tls_ofld_info tls;
194 
195 	TAILQ_HEAD(, kaiocb) aiotx_jobq;
196 	struct task aiotx_task;
197 	bool aiotx_task_active;
198 
199 	/* Tx software descriptor */
200 	uint8_t txsd_total;
201 	uint8_t txsd_pidx;
202 	uint8_t txsd_cidx;
203 	uint8_t txsd_avail;
204 	struct ofld_tx_sdesc txsd[];
205 };
206 
207 #define	DDP_LOCK(toep)		mtx_lock(&(toep)->ddp.lock)
208 #define	DDP_UNLOCK(toep)	mtx_unlock(&(toep)->ddp.lock)
209 #define	DDP_ASSERT_LOCKED(toep)	mtx_assert(&(toep)->ddp.lock, MA_OWNED)
210 
211 struct flowc_tx_params {
212 	uint32_t snd_nxt;
213 	uint32_t rcv_nxt;
214 	unsigned int snd_space;
215 	unsigned int mss;
216 };
217 
218 /*
219  * Compressed state for embryonic connections for a listener.
220  */
221 struct synq_entry {
222 	struct listen_ctx *lctx;	/* backpointer to listen ctx */
223 	struct mbuf *syn;
224 	int flags;			/* same as toepcb's tp_flags */
225 	volatile int ok_to_respond;
226 	volatile u_int refcnt;
227 	int tid;
228 	uint32_t iss;
229 	uint32_t irs;
230 	uint32_t ts;
231 	uint16_t txqid;
232 	uint16_t rxqid;
233 	uint16_t l2e_idx;
234 	uint16_t ulp_mode;
235 	uint16_t rcv_bufsize;
236 	__be16 tcp_opt; /* from cpl_pass_establish */
237 	struct toepcb *toep;
238 };
239 
240 /* listen_ctx flags */
241 #define LCTX_RPL_PENDING 1	/* waiting for a CPL_PASS_OPEN_RPL */
242 
243 struct listen_ctx {
244 	LIST_ENTRY(listen_ctx) link;	/* listen hash linkage */
245 	volatile int refcount;
246 	int stid;
247 	struct stid_region stid_region;
248 	int flags;
249 	struct inpcb *inp;		/* listening socket's inp */
250 	struct vnet *vnet;
251 	struct sge_wrq *ctrlq;
252 	struct sge_ofld_rxq *ofld_rxq;
253 	struct clip_entry *ce;
254 };
255 
256 /* tcb_histent flags */
257 #define TE_RPL_PENDING	1
258 #define TE_ACTIVE	2
259 
260 /* bits in one 8b tcb_histent sample. */
261 #define TS_RTO			(1 << 0)
262 #define TS_DUPACKS		(1 << 1)
263 #define TS_FASTREXMT		(1 << 2)
264 #define TS_SND_BACKLOGGED	(1 << 3)
265 #define TS_CWND_LIMITED		(1 << 4)
266 #define TS_ECN_ECE		(1 << 5)
267 #define TS_ECN_CWR		(1 << 6)
268 #define TS_RESERVED		(1 << 7)	/* Unused. */
269 
270 struct tcb_histent {
271 	struct mtx te_lock;
272 	struct callout te_callout;
273 	uint64_t te_tcb[TCB_SIZE / sizeof(uint64_t)];
274 	struct adapter *te_adapter;
275 	u_int te_flags;
276 	u_int te_tid;
277 	uint8_t te_pidx;
278 	uint8_t te_sample[100];
279 };
280 
281 struct tom_data {
282 	struct toedev tod;
283 
284 	/* toepcb's associated with this TOE device */
285 	struct mtx toep_list_lock;
286 	TAILQ_HEAD(, toepcb) toep_list;
287 
288 	struct mtx lctx_hash_lock;
289 	LIST_HEAD(, listen_ctx) *listen_hash;
290 	u_long listen_mask;
291 	int lctx_count;		/* # of lctx in the hash table */
292 
293 	struct ppod_region pr;
294 
295 	struct rwlock tcb_history_lock __aligned(CACHE_LINE_SIZE);
296 	struct tcb_histent **tcb_history;
297 	int dupack_threshold;
298 
299 	/* WRs that will not be sent to the chip because L2 resolution failed */
300 	struct mtx unsent_wr_lock;
301 	STAILQ_HEAD(, wrqe) unsent_wr_list;
302 	struct task reclaim_wr_resources;
303 };
304 
305 static inline struct tom_data *
306 tod_td(struct toedev *tod)
307 {
308 
309 	return (__containerof(tod, struct tom_data, tod));
310 }
311 
312 static inline struct adapter *
313 td_adapter(struct tom_data *td)
314 {
315 
316 	return (td->tod.tod_softc);
317 }
318 
319 static inline void
320 set_mbuf_ulp_submode(struct mbuf *m, uint8_t ulp_submode)
321 {
322 
323 	M_ASSERTPKTHDR(m);
324 	m->m_pkthdr.PH_per.eight[0] = ulp_submode;
325 }
326 
327 static inline uint8_t
328 mbuf_ulp_submode(struct mbuf *m)
329 {
330 
331 	M_ASSERTPKTHDR(m);
332 	return (m->m_pkthdr.PH_per.eight[0]);
333 }
334 
335 /* t4_tom.c */
336 struct toepcb *alloc_toepcb(struct vi_info *, int, int, int);
337 struct toepcb *hold_toepcb(struct toepcb *);
338 void free_toepcb(struct toepcb *);
339 void offload_socket(struct socket *, struct toepcb *);
340 void undo_offload_socket(struct socket *);
341 void final_cpl_received(struct toepcb *);
342 void insert_tid(struct adapter *, int, void *, int);
343 void *lookup_tid(struct adapter *, int);
344 void update_tid(struct adapter *, int, void *);
345 void remove_tid(struct adapter *, int, int);
346 int find_best_mtu_idx(struct adapter *, struct in_conninfo *,
347     struct offload_settings *);
348 u_long select_rcv_wnd(struct socket *);
349 int select_rcv_wscale(void);
350 uint64_t calc_opt0(struct socket *, struct vi_info *, struct l2t_entry *,
351     int, int, int, int, struct offload_settings *);
352 uint64_t select_ntuple(struct vi_info *, struct l2t_entry *);
353 int select_ulp_mode(struct socket *, struct adapter *,
354     struct offload_settings *);
355 void set_ulp_mode(struct toepcb *, int);
356 int negative_advice(int);
357 int add_tid_to_history(struct adapter *, u_int);
358 
359 /* t4_connect.c */
360 void t4_init_connect_cpl_handlers(void);
361 void t4_uninit_connect_cpl_handlers(void);
362 int t4_connect(struct toedev *, struct socket *, struct rtentry *,
363     struct sockaddr *);
364 void act_open_failure_cleanup(struct adapter *, u_int, u_int);
365 
366 /* t4_listen.c */
367 void t4_init_listen_cpl_handlers(void);
368 void t4_uninit_listen_cpl_handlers(void);
369 int t4_listen_start(struct toedev *, struct tcpcb *);
370 int t4_listen_stop(struct toedev *, struct tcpcb *);
371 void t4_syncache_added(struct toedev *, void *);
372 void t4_syncache_removed(struct toedev *, void *);
373 int t4_syncache_respond(struct toedev *, void *, struct mbuf *);
374 int do_abort_req_synqe(struct sge_iq *, const struct rss_header *,
375     struct mbuf *);
376 int do_abort_rpl_synqe(struct sge_iq *, const struct rss_header *,
377     struct mbuf *);
378 void t4_offload_socket(struct toedev *, void *, struct socket *);
379 void synack_failure_cleanup(struct adapter *, int);
380 
381 /* t4_cpl_io.c */
382 void aiotx_init_toep(struct toepcb *);
383 int t4_aio_queue_aiotx(struct socket *, struct kaiocb *);
384 void t4_init_cpl_io_handlers(void);
385 void t4_uninit_cpl_io_handlers(void);
386 void send_abort_rpl(struct adapter *, struct sge_wrq *, int , int);
387 void send_flowc_wr(struct toepcb *, struct flowc_tx_params *);
388 void send_reset(struct adapter *, struct toepcb *, uint32_t);
389 int send_rx_credits(struct adapter *, struct toepcb *, int);
390 void send_rx_modulate(struct adapter *, struct toepcb *);
391 void make_established(struct toepcb *, uint32_t, uint32_t, uint16_t);
392 int t4_close_conn(struct adapter *, struct toepcb *);
393 void t4_rcvd(struct toedev *, struct tcpcb *);
394 void t4_rcvd_locked(struct toedev *, struct tcpcb *);
395 int t4_tod_output(struct toedev *, struct tcpcb *);
396 int t4_send_fin(struct toedev *, struct tcpcb *);
397 int t4_send_rst(struct toedev *, struct tcpcb *);
398 void t4_set_tcb_field(struct adapter *, struct sge_wrq *, struct toepcb *,
399     uint16_t, uint64_t, uint64_t, int, int);
400 void t4_push_frames(struct adapter *sc, struct toepcb *toep, int drop);
401 void t4_push_pdus(struct adapter *sc, struct toepcb *toep, int drop);
402 
403 /* t4_ddp.c */
404 int t4_init_ppod_region(struct ppod_region *, struct t4_range *, u_int,
405     const char *);
406 void t4_free_ppod_region(struct ppod_region *);
407 int t4_alloc_page_pods_for_ps(struct ppod_region *, struct pageset *);
408 int t4_alloc_page_pods_for_buf(struct ppod_region *, vm_offset_t, int,
409     struct ppod_reservation *);
410 int t4_write_page_pods_for_ps(struct adapter *, struct sge_wrq *, int,
411     struct pageset *);
412 int t4_write_page_pods_for_buf(struct adapter *, struct sge_wrq *, int tid,
413     struct ppod_reservation *, vm_offset_t, int);
414 void t4_free_page_pods(struct ppod_reservation *);
415 int t4_soreceive_ddp(struct socket *, struct sockaddr **, struct uio *,
416     struct mbuf **, struct mbuf **, int *);
417 int t4_aio_queue_ddp(struct socket *, struct kaiocb *);
418 void t4_ddp_mod_load(void);
419 void t4_ddp_mod_unload(void);
420 void ddp_assert_empty(struct toepcb *);
421 void ddp_init_toep(struct toepcb *);
422 void ddp_uninit_toep(struct toepcb *);
423 void ddp_queue_toep(struct toepcb *);
424 void release_ddp_resources(struct toepcb *toep);
425 void handle_ddp_close(struct toepcb *, struct tcpcb *, uint32_t);
426 void handle_ddp_indicate(struct toepcb *);
427 void insert_ddp_data(struct toepcb *, uint32_t);
428 const struct offload_settings *lookup_offload_policy(struct adapter *, int,
429     struct mbuf *, uint16_t, struct inpcb *);
430 
431 /* t4_tls.c */
432 bool can_tls_offload(struct adapter *);
433 int t4_ctloutput_tls(struct socket *, struct sockopt *);
434 void t4_push_tls_records(struct adapter *, struct toepcb *, int);
435 void t4_tls_mod_load(void);
436 void t4_tls_mod_unload(void);
437 void tls_establish(struct toepcb *);
438 void tls_init_toep(struct toepcb *);
439 int tls_rx_key(struct toepcb *);
440 void tls_stop_handshake_timer(struct toepcb *);
441 int tls_tx_key(struct toepcb *);
442 void tls_uninit_toep(struct toepcb *);
443 
444 #endif
445