xref: /freebsd/sys/dev/qlnx/qlnxe/ecore_iwarp.h (revision 13ea0450a9c8742119d36f3bf8f47accdce46e54)
1 /*
2  * Copyright (c) 2018-2019 Cavium, Inc.
3  * All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions
7  *  are met:
8  *
9  *  1. Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  *  2. Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  *
15  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  *  POSSIBILITY OF SUCH DAMAGE.
26  *
27  * $FreeBSD$
28  */
29 
30 #ifndef __ECORE_IWARP_H__
31 #define __ECORE_IWARP_H__
32 
33 enum ecore_iwarp_qp_state {
34 	ECORE_IWARP_QP_STATE_IDLE,
35 	ECORE_IWARP_QP_STATE_RTS,
36 	ECORE_IWARP_QP_STATE_TERMINATE,
37 	ECORE_IWARP_QP_STATE_CLOSING,
38 	ECORE_IWARP_QP_STATE_ERROR,
39 };
40 
41 enum ecore_iwarp_listener_state {
42 	ECORE_IWARP_LISTENER_STATE_ACTIVE,
43 	ECORE_IWARP_LISTENER_STATE_UNPAUSE,
44 	ECORE_IWARP_LISTENER_STATE_PAUSE,
45 	ECORE_IWARP_LISTENER_STATE_DESTROYING,
46 };
47 
48 enum ecore_iwarp_qp_state
49 ecore_roce2iwarp_state(enum ecore_roce_qp_state state);
50 
51 #ifdef CONFIG_ECORE_IWARP
52 
53 #define ECORE_IWARP_PREALLOC_CNT	ECORE_IWARP_MAX_LIS_BACKLOG
54 
55 #define ECORE_IWARP_LL2_SYN_TX_SIZE	(128)
56 #define ECORE_IWARP_LL2_SYN_RX_SIZE	(256)
57 #define ECORE_IWARP_MAX_SYN_PKT_SIZE	(128)
58 
59 #define ECORE_IWARP_LL2_OOO_DEF_TX_SIZE	(256)
60 #define ECORE_MAX_OOO			(16)
61 #define ECORE_IWARP_LL2_OOO_MAX_RX_SIZE	(16384)
62 
63 #define ECORE_IWARP_HANDLE_INVAL	(0xff)
64 
65 struct ecore_iwarp_ll2_buff {
66 	struct ecore_iwarp_ll2_buff	*piggy_buf;
67 	void 				*data;
68 	dma_addr_t			data_phys_addr;
69 	u32				buff_size;
70 };
71 
72 struct ecore_iwarp_ll2_mpa_buf {
73 	osal_list_entry_t		list_entry;
74 	struct ecore_iwarp_ll2_buff	*ll2_buf;
75 	struct unaligned_opaque_data	data;
76 	u16				tcp_payload_len;
77 	u8				placement_offset;
78 };
79 
80 /* In some cases a fpdu will arrive with only one byte of the header, in this
81  * case the fpdu_length will be partial ( contain only higher byte and
82  * incomplete bytes will contain the invalid value
83  */
84 #define ECORE_IWARP_INVALID_INCOMPLETE_BYTES 0xffff
85 
86 struct ecore_iwarp_fpdu {
87 	struct ecore_iwarp_ll2_buff 	*mpa_buf;
88 	dma_addr_t			pkt_hdr;
89 	u8				pkt_hdr_size;
90 	dma_addr_t			mpa_frag;
91 	void				*mpa_frag_virt;
92 	u16				mpa_frag_len;
93 	u16				fpdu_length;
94 	u16				incomplete_bytes;
95 };
96 
97 struct ecore_iwarp_info {
98 	osal_list_t			listen_list; /* ecore_iwarp_listener */
99 	osal_list_t			ep_list;     /* ecore_iwarp_ep */
100 	osal_list_t			ep_free_list;/* pre-allocated ep's */
101 	osal_list_t			mpa_buf_list;/* list of mpa_bufs */
102 	osal_list_t			mpa_buf_pending_list;
103 	osal_spinlock_t			iw_lock;
104 	osal_spinlock_t			qp_lock; /* for teardown races */
105 	struct iwarp_rxmit_stats_drv	stats;
106 	u32				rcv_wnd_scale;
107 	u16				rcv_wnd_size;
108 	u16				max_mtu;
109 	u16				num_ooo_rx_bufs;
110 	u8				mac_addr[ETH_ALEN];
111 	u8				crc_needed;
112 	u8				tcp_flags;
113 	u8				ll2_syn_handle;
114 	u8				ll2_ooo_handle;
115 	u8				ll2_mpa_handle;
116 	u8				peer2peer;
117 	u8				_pad;
118 	enum mpa_negotiation_mode	mpa_rev;
119 	enum mpa_rtr_type		rtr_type;
120 	struct ecore_iwarp_fpdu		*partial_fpdus;
121 	struct ecore_iwarp_ll2_mpa_buf  *mpa_bufs;
122 	u8				*mpa_intermediate_buf;
123 	u16				max_num_partial_fpdus;
124 
125 	/* MPA statistics */
126 	u64				unalign_rx_comp;
127 };
128 
129 enum ecore_iwarp_ep_state {
130 	ECORE_IWARP_EP_INIT,
131 	ECORE_IWARP_EP_MPA_REQ_RCVD,
132 	ECORE_IWARP_EP_MPA_OFFLOADED,
133 	ECORE_IWARP_EP_ESTABLISHED,
134 	ECORE_IWARP_EP_CLOSED,
135 	ECORE_IWARP_EP_ABORTING
136 };
137 
138 union async_output {
139 	struct iwarp_eqe_data_mpa_async_completion mpa_response;
140 	struct iwarp_eqe_data_tcp_async_completion mpa_request;
141 };
142 
143 #define ECORE_MAX_PRIV_DATA_LEN (512)
144 struct ecore_iwarp_ep_memory {
145 	u8			in_pdata[ECORE_MAX_PRIV_DATA_LEN];
146 	u8			out_pdata[ECORE_MAX_PRIV_DATA_LEN];
147 	union async_output	async_output;
148 };
149 
150 /* Endpoint structure represents a TCP connection. This connection can be
151  * associated with a QP or not (in which case QP==NULL)
152  */
153 struct ecore_iwarp_ep {
154 	osal_list_entry_t		list_entry;
155 	int				sig;
156 	struct ecore_rdma_qp		*qp;
157 	enum ecore_iwarp_ep_state	state;
158 
159 	/* This contains entire buffer required for ep memories. This is the
160 	 * only one actually allocated and freed. The rest are pointers into
161 	 * this buffer
162 	 */
163 	struct ecore_iwarp_ep_memory    *ep_buffer_virt;
164 	dma_addr_t			ep_buffer_phys;
165 
166 	struct ecore_iwarp_cm_info	cm_info;
167 	struct ecore_iwarp_listener	*listener;
168 	enum tcp_connect_mode		connect_mode;
169 	enum mpa_rtr_type		rtr_type;
170 	enum mpa_negotiation_mode	mpa_rev;
171 	u32				tcp_cid;
172 	u32				cid;
173 	u8				remote_mac_addr[6];
174 	u8				local_mac_addr[6];
175 	u16				mss;
176 	bool				mpa_reply_processed;
177 
178 	/* The event_cb function is called for asynchrounous events associated
179 	 * with the ep. It is initialized at different entry points depending
180 	 * on whether the ep is the tcp connection active side or passive side
181 	 * The cb_context is passed to the event_cb function.
182 	 */
183 	iwarp_event_handler		event_cb;
184 	void				*cb_context;
185 
186 	/* For Passive side - syn packet related data */
187 	struct ecore_iwarp_ll2_buff	*syn;
188 	u16				syn_ip_payload_length;
189 	dma_addr_t			syn_phy_addr;
190 };
191 
192 struct ecore_iwarp_listener {
193 	osal_list_entry_t	list_entry;
194 
195 	/* The event_cb function is called for connection requests.
196 	 * The cb_context is passed to the event_cb function.
197 	 */
198 	iwarp_event_handler	event_cb;
199 	void			*cb_context;
200 	osal_list_t		ep_list;
201 	osal_spinlock_t		lock;
202 	u32			max_backlog;
203 	u8			ip_version;
204 	u32			ip_addr[4];
205 	u16			port;
206 	u16			vlan;
207 	bool			drop;
208 	bool			done;
209 	enum			ecore_iwarp_listener_state state;
210 };
211 
212 enum _ecore_status_t
213 ecore_iwarp_alloc(struct ecore_hwfn *p_hwfn);
214 
215 enum _ecore_status_t
216 ecore_iwarp_setup(struct ecore_hwfn *p_hwfn,
217 		  struct ecore_rdma_start_in_params *params);
218 
219 void
220 ecore_iwarp_init_fw_ramrod(struct ecore_hwfn *p_hwfn,
221 			   struct iwarp_init_func_ramrod_data *p_ramrod);
222 
223 enum _ecore_status_t
224 ecore_iwarp_stop(struct ecore_hwfn *p_hwfn);
225 
226 void
227 ecore_iwarp_resc_free(struct ecore_hwfn *p_hwfn);
228 
229 void
230 ecore_iwarp_init_devinfo(struct ecore_hwfn *p_hwfn);
231 
232 enum _ecore_status_t
233 ecore_iwarp_init_hw(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
234 
235 enum _ecore_status_t
236 ecore_iwarp_create_qp(struct ecore_hwfn *p_hwfn,
237 		      struct ecore_rdma_qp *qp,
238 		      struct ecore_rdma_create_qp_out_params *out_params);
239 
240 enum _ecore_status_t
241 ecore_iwarp_modify_qp(struct ecore_hwfn *p_hwfn,
242 		      struct ecore_rdma_qp *qp,
243 		      enum ecore_iwarp_qp_state new_state,
244 		      bool internal);
245 
246 enum _ecore_status_t
247 ecore_iwarp_destroy_qp(struct ecore_hwfn *p_hwfn,
248 		       struct ecore_rdma_qp *qp);
249 
250 enum _ecore_status_t
251 ecore_iwarp_fw_destroy(struct ecore_hwfn *p_hwfn,
252 		       struct ecore_rdma_qp *qp);
253 
254 enum _ecore_status_t
255 ecore_iwarp_query_qp(struct ecore_rdma_qp *qp,
256 		     struct ecore_rdma_query_qp_out_params *out_params);
257 
258 #else
259 
260 static OSAL_INLINE enum _ecore_status_t
261 ecore_iwarp_alloc(struct ecore_hwfn OSAL_UNUSED *p_hwfn)
262 {
263 	return ECORE_SUCCESS;
264 }
265 
266 static OSAL_INLINE enum _ecore_status_t
267 ecore_iwarp_setup(struct ecore_hwfn OSAL_UNUSED *p_hwfn,
268 		  struct ecore_rdma_start_in_params OSAL_UNUSED *params)
269 {
270 	return ECORE_SUCCESS;
271 }
272 
273 static OSAL_INLINE void
274 ecore_iwarp_init_fw_ramrod(struct ecore_hwfn OSAL_UNUSED *p_hwfn,
275 			   struct iwarp_init_func_ramrod_data OSAL_UNUSED *p_ramrod)
276 {
277 }
278 
279 static OSAL_INLINE enum _ecore_status_t
280 ecore_iwarp_stop(struct ecore_hwfn OSAL_UNUSED *p_hwfn)
281 {
282 	return ECORE_SUCCESS;
283 }
284 
285 static OSAL_INLINE void
286 ecore_iwarp_resc_free(struct ecore_hwfn OSAL_UNUSED *p_hwfn)
287 {
288 }
289 
290 static OSAL_INLINE void
291 ecore_iwarp_init_devinfo(struct ecore_hwfn OSAL_UNUSED *p_hwfn)
292 {
293 }
294 
295 static OSAL_INLINE enum _ecore_status_t
296 ecore_iwarp_init_hw(struct ecore_hwfn OSAL_UNUSED *p_hwfn,
297 		    struct ecore_ptt OSAL_UNUSED *p_ptt)
298 {
299 	return ECORE_SUCCESS;
300 }
301 
302 static OSAL_INLINE enum _ecore_status_t
303 ecore_iwarp_create_qp(struct ecore_hwfn OSAL_UNUSED *p_hwfn,
304 		      struct ecore_rdma_qp OSAL_UNUSED *qp,
305 		      struct ecore_rdma_create_qp_out_params OSAL_UNUSED *out_params)
306 {
307 	return ECORE_SUCCESS;
308 }
309 
310 static OSAL_INLINE enum _ecore_status_t
311 ecore_iwarp_modify_qp(struct ecore_hwfn OSAL_UNUSED *p_hwfn,
312 		      struct ecore_rdma_qp OSAL_UNUSED *qp,
313 		      enum ecore_iwarp_qp_state OSAL_UNUSED new_state,
314 		      bool OSAL_UNUSED internal)
315 {
316 	return ECORE_SUCCESS;
317 }
318 
319 static OSAL_INLINE enum _ecore_status_t
320 ecore_iwarp_destroy_qp(struct ecore_hwfn OSAL_UNUSED *p_hwfn,
321 		       struct ecore_rdma_qp OSAL_UNUSED *qp)
322 {
323 	return ECORE_SUCCESS;
324 }
325 
326 static OSAL_INLINE enum _ecore_status_t
327 ecore_iwarp_fw_destroy(struct ecore_hwfn OSAL_UNUSED *p_hwfn,
328 		       struct ecore_rdma_qp OSAL_UNUSED *qp)
329 {
330 	return ECORE_SUCCESS;
331 }
332 
333 static OSAL_INLINE enum _ecore_status_t
334 ecore_iwarp_query_qp(struct ecore_rdma_qp OSAL_UNUSED *qp,
335 		     struct ecore_rdma_query_qp_out_params OSAL_UNUSED *out_params)
336 {
337 	return ECORE_SUCCESS;
338 }
339 
340 #endif
341 #endif
342