xref: /linux/drivers/target/iscsi/cxgbit/cxgbit.h (revision 4f2c0a4acffbec01079c28f839422e64ddeff004)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (c) 2016 Chelsio Communications, Inc.
4  */
5 
6 #ifndef __CXGBIT_H__
7 #define __CXGBIT_H__
8 
9 #include <linux/mutex.h>
10 #include <linux/list.h>
11 #include <linux/spinlock.h>
12 #include <linux/idr.h>
13 #include <linux/completion.h>
14 #include <linux/netdevice.h>
15 #include <linux/sched.h>
16 #include <linux/pci.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/inet.h>
19 #include <linux/wait.h>
20 #include <linux/kref.h>
21 #include <linux/timer.h>
22 #include <linux/io.h>
23 
24 #include <asm/byteorder.h>
25 
26 #include <net/net_namespace.h>
27 
28 #include <target/iscsi/iscsi_transport.h>
29 #include <iscsi_target_parameters.h>
30 #include <iscsi_target_login.h>
31 
32 #include "t4_regs.h"
33 #include "t4_msg.h"
34 #include "cxgb4.h"
35 #include "cxgb4_uld.h"
36 #include "l2t.h"
37 #include "libcxgb_ppm.h"
38 #include "cxgbit_lro.h"
39 
40 extern struct mutex cdev_list_lock;
41 extern struct list_head cdev_list_head;
42 struct cxgbit_np;
43 
44 struct cxgbit_sock;
45 
46 struct cxgbit_cmd {
47 	struct scatterlist sg;
48 	struct cxgbi_task_tag_info ttinfo;
49 	bool setup_ddp;
50 	bool release;
51 };
52 
53 #define CXGBIT_MAX_ISO_PAYLOAD	\
54 	min_t(u32, MAX_SKB_FRAGS * PAGE_SIZE, 65535)
55 
56 struct cxgbit_iso_info {
57 	u8 flags;
58 	u32 mpdu;
59 	u32 len;
60 	u32 burst_len;
61 };
62 
63 enum cxgbit_skcb_flags {
64 	SKCBF_TX_NEED_HDR	= (1 << 0), /* packet needs a header */
65 	SKCBF_TX_FLAG_COMPL	= (1 << 1), /* wr completion flag */
66 	SKCBF_TX_ISO		= (1 << 2), /* iso cpl in tx skb */
67 	SKCBF_RX_LRO		= (1 << 3), /* lro skb */
68 };
69 
70 struct cxgbit_skb_rx_cb {
71 	u8 opcode;
72 	void *pdu_cb;
73 	void (*backlog_fn)(struct cxgbit_sock *, struct sk_buff *);
74 };
75 
76 struct cxgbit_skb_tx_cb {
77 	u8 submode;
78 	u32 extra_len;
79 };
80 
81 union cxgbit_skb_cb {
82 	struct {
83 		u8 flags;
84 		union {
85 			struct cxgbit_skb_tx_cb tx;
86 			struct cxgbit_skb_rx_cb rx;
87 		};
88 	};
89 
90 	struct {
91 		/* This member must be first. */
92 		struct l2t_skb_cb l2t;
93 		struct sk_buff *wr_next;
94 	};
95 };
96 
97 #define CXGBIT_SKB_CB(skb)	((union cxgbit_skb_cb *)&((skb)->cb[0]))
98 #define cxgbit_skcb_flags(skb)		(CXGBIT_SKB_CB(skb)->flags)
99 #define cxgbit_skcb_submode(skb)	(CXGBIT_SKB_CB(skb)->tx.submode)
100 #define cxgbit_skcb_tx_wr_next(skb)	(CXGBIT_SKB_CB(skb)->wr_next)
101 #define cxgbit_skcb_tx_extralen(skb)	(CXGBIT_SKB_CB(skb)->tx.extra_len)
102 #define cxgbit_skcb_rx_opcode(skb)	(CXGBIT_SKB_CB(skb)->rx.opcode)
103 #define cxgbit_skcb_rx_backlog_fn(skb)	(CXGBIT_SKB_CB(skb)->rx.backlog_fn)
104 #define cxgbit_rx_pdu_cb(skb)		(CXGBIT_SKB_CB(skb)->rx.pdu_cb)
105 
cplhdr(struct sk_buff * skb)106 static inline void *cplhdr(struct sk_buff *skb)
107 {
108 	return skb->data;
109 }
110 
111 enum cxgbit_cdev_flags {
112 	CDEV_STATE_UP = 0,
113 	CDEV_ISO_ENABLE,
114 	CDEV_DDP_ENABLE,
115 };
116 
117 #define NP_INFO_HASH_SIZE 32
118 
119 struct np_info {
120 	struct np_info *next;
121 	struct cxgbit_np *cnp;
122 	unsigned int stid;
123 };
124 
125 struct cxgbit_list_head {
126 	struct list_head list;
127 	/* device lock */
128 	spinlock_t lock;
129 };
130 
131 struct cxgbit_device {
132 	struct list_head list;
133 	struct cxgb4_lld_info lldi;
134 	struct np_info *np_hash_tab[NP_INFO_HASH_SIZE];
135 	/* np lock */
136 	spinlock_t np_lock;
137 	u8 selectq[MAX_NPORTS][2];
138 	struct cxgbit_list_head cskq;
139 	u32 mdsl;
140 	struct kref kref;
141 	unsigned long flags;
142 };
143 
144 struct cxgbit_wr_wait {
145 	struct completion completion;
146 	int ret;
147 };
148 
149 enum cxgbit_csk_state {
150 	CSK_STATE_IDLE = 0,
151 	CSK_STATE_LISTEN,
152 	CSK_STATE_CONNECTING,
153 	CSK_STATE_ESTABLISHED,
154 	CSK_STATE_ABORTING,
155 	CSK_STATE_CLOSING,
156 	CSK_STATE_MORIBUND,
157 	CSK_STATE_DEAD,
158 };
159 
160 enum cxgbit_csk_flags {
161 	CSK_TX_DATA_SENT = 0,
162 	CSK_LOGIN_PDU_DONE,
163 	CSK_LOGIN_DONE,
164 	CSK_DDP_ENABLE,
165 	CSK_ABORT_RPL_WAIT,
166 };
167 
168 struct cxgbit_sock_common {
169 	struct cxgbit_device *cdev;
170 	struct sockaddr_storage local_addr;
171 	struct sockaddr_storage remote_addr;
172 	struct cxgbit_wr_wait wr_wait;
173 	enum cxgbit_csk_state state;
174 	unsigned long flags;
175 };
176 
177 struct cxgbit_np {
178 	struct cxgbit_sock_common com;
179 	wait_queue_head_t accept_wait;
180 	struct iscsi_np *np;
181 	struct completion accept_comp;
182 	struct list_head np_accept_list;
183 	/* np accept lock */
184 	spinlock_t np_accept_lock;
185 	struct kref kref;
186 	unsigned int stid;
187 };
188 
189 struct cxgbit_sock {
190 	struct cxgbit_sock_common com;
191 	struct cxgbit_np *cnp;
192 	struct iscsit_conn *conn;
193 	struct l2t_entry *l2t;
194 	struct dst_entry *dst;
195 	struct list_head list;
196 	struct sk_buff_head rxq;
197 	struct sk_buff_head txq;
198 	struct sk_buff_head ppodq;
199 	struct sk_buff_head backlogq;
200 	struct sk_buff_head skbq;
201 	struct sk_buff *wr_pending_head;
202 	struct sk_buff *wr_pending_tail;
203 	struct sk_buff *skb;
204 	struct sk_buff *lro_skb;
205 	struct sk_buff *lro_hskb;
206 	struct list_head accept_node;
207 	/* socket lock */
208 	spinlock_t lock;
209 	wait_queue_head_t waitq;
210 	bool lock_owner;
211 	struct kref kref;
212 	u32 max_iso_npdu;
213 	u32 wr_cred;
214 	u32 wr_una_cred;
215 	u32 wr_max_cred;
216 	u32 snd_una;
217 	u32 tid;
218 	u32 snd_nxt;
219 	u32 rcv_nxt;
220 	u32 smac_idx;
221 	u32 tx_chan;
222 	u32 mtu;
223 	u32 write_seq;
224 	u32 rx_credits;
225 	u32 snd_win;
226 	u32 rcv_win;
227 	u16 mss;
228 	u16 emss;
229 	u16 plen;
230 	u16 rss_qid;
231 	u16 txq_idx;
232 	u16 ctrlq_idx;
233 	u8 tos;
234 	u8 port_id;
235 #define CXGBIT_SUBMODE_HCRC 0x1
236 #define CXGBIT_SUBMODE_DCRC 0x2
237 	u8 submode;
238 #ifdef CONFIG_CHELSIO_T4_DCB
239 	u8 dcb_priority;
240 #endif
241 	u8 snd_wscale;
242 };
243 
244 void _cxgbit_free_cdev(struct kref *kref);
245 void _cxgbit_free_csk(struct kref *kref);
246 void _cxgbit_free_cnp(struct kref *kref);
247 
cxgbit_get_cdev(struct cxgbit_device * cdev)248 static inline void cxgbit_get_cdev(struct cxgbit_device *cdev)
249 {
250 	kref_get(&cdev->kref);
251 }
252 
cxgbit_put_cdev(struct cxgbit_device * cdev)253 static inline void cxgbit_put_cdev(struct cxgbit_device *cdev)
254 {
255 	kref_put(&cdev->kref, _cxgbit_free_cdev);
256 }
257 
cxgbit_get_csk(struct cxgbit_sock * csk)258 static inline void cxgbit_get_csk(struct cxgbit_sock *csk)
259 {
260 	kref_get(&csk->kref);
261 }
262 
cxgbit_put_csk(struct cxgbit_sock * csk)263 static inline void cxgbit_put_csk(struct cxgbit_sock *csk)
264 {
265 	kref_put(&csk->kref, _cxgbit_free_csk);
266 }
267 
cxgbit_get_cnp(struct cxgbit_np * cnp)268 static inline void cxgbit_get_cnp(struct cxgbit_np *cnp)
269 {
270 	kref_get(&cnp->kref);
271 }
272 
cxgbit_put_cnp(struct cxgbit_np * cnp)273 static inline void cxgbit_put_cnp(struct cxgbit_np *cnp)
274 {
275 	kref_put(&cnp->kref, _cxgbit_free_cnp);
276 }
277 
cxgbit_sock_reset_wr_list(struct cxgbit_sock * csk)278 static inline void cxgbit_sock_reset_wr_list(struct cxgbit_sock *csk)
279 {
280 	csk->wr_pending_tail = NULL;
281 	csk->wr_pending_head = NULL;
282 }
283 
cxgbit_sock_peek_wr(const struct cxgbit_sock * csk)284 static inline struct sk_buff *cxgbit_sock_peek_wr(const struct cxgbit_sock *csk)
285 {
286 	return csk->wr_pending_head;
287 }
288 
289 static inline void
cxgbit_sock_enqueue_wr(struct cxgbit_sock * csk,struct sk_buff * skb)290 cxgbit_sock_enqueue_wr(struct cxgbit_sock *csk, struct sk_buff *skb)
291 {
292 	cxgbit_skcb_tx_wr_next(skb) = NULL;
293 
294 	skb_get(skb);
295 
296 	if (!csk->wr_pending_head)
297 		csk->wr_pending_head = skb;
298 	else
299 		cxgbit_skcb_tx_wr_next(csk->wr_pending_tail) = skb;
300 	csk->wr_pending_tail = skb;
301 }
302 
cxgbit_sock_dequeue_wr(struct cxgbit_sock * csk)303 static inline struct sk_buff *cxgbit_sock_dequeue_wr(struct cxgbit_sock *csk)
304 {
305 	struct sk_buff *skb = csk->wr_pending_head;
306 
307 	if (likely(skb)) {
308 		csk->wr_pending_head = cxgbit_skcb_tx_wr_next(skb);
309 		cxgbit_skcb_tx_wr_next(skb) = NULL;
310 	}
311 	return skb;
312 }
313 
314 typedef void (*cxgbit_cplhandler_func)(struct cxgbit_device *,
315 				       struct sk_buff *);
316 
317 int cxgbit_setup_np(struct iscsi_np *, struct sockaddr_storage *);
318 int cxgbit_setup_conn_digest(struct cxgbit_sock *);
319 int cxgbit_accept_np(struct iscsi_np *, struct iscsit_conn *);
320 void cxgbit_free_np(struct iscsi_np *);
321 void cxgbit_abort_conn(struct cxgbit_sock *csk);
322 void cxgbit_free_conn(struct iscsit_conn *);
323 extern cxgbit_cplhandler_func cxgbit_cplhandlers[NUM_CPL_CMDS];
324 int cxgbit_get_login_rx(struct iscsit_conn *, struct iscsi_login *);
325 int cxgbit_rx_data_ack(struct cxgbit_sock *);
326 int cxgbit_l2t_send(struct cxgbit_device *, struct sk_buff *,
327 		    struct l2t_entry *);
328 void cxgbit_push_tx_frames(struct cxgbit_sock *);
329 int cxgbit_put_login_tx(struct iscsit_conn *, struct iscsi_login *, u32);
330 int cxgbit_xmit_pdu(struct iscsit_conn *, struct iscsit_cmd *,
331 		    struct iscsi_datain_req *, const void *, u32);
332 void cxgbit_get_r2t_ttt(struct iscsit_conn *, struct iscsit_cmd *,
333 			struct iscsi_r2t *);
334 u32 cxgbit_send_tx_flowc_wr(struct cxgbit_sock *);
335 int cxgbit_ofld_send(struct cxgbit_device *, struct sk_buff *);
336 void cxgbit_get_rx_pdu(struct iscsit_conn *);
337 int cxgbit_validate_params(struct iscsit_conn *);
338 struct cxgbit_device *cxgbit_find_device(struct net_device *, u8 *);
339 
340 /* DDP */
341 int cxgbit_ddp_init(struct cxgbit_device *);
342 int cxgbit_setup_conn_pgidx(struct cxgbit_sock *, u32);
343 int cxgbit_reserve_ttt(struct cxgbit_sock *, struct iscsit_cmd *);
344 void cxgbit_unmap_cmd(struct iscsit_conn *, struct iscsit_cmd *);
345 
346 static inline
cdev2ppm(struct cxgbit_device * cdev)347 struct cxgbi_ppm *cdev2ppm(struct cxgbit_device *cdev)
348 {
349 	return (struct cxgbi_ppm *)(*cdev->lldi.iscsi_ppm);
350 }
351 #endif /* __CXGBIT_H__ */
352