xref: /freebsd/sys/dev/qlnx/qlnxe/ecore_iwarp.h (revision 95ee2897e98f5d444f26ed2334cc7c439f9c16c6)
1 /*
2  * Copyright (c) 2018-2019 Cavium, Inc.
3  * All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions
7  *  are met:
8  *
9  *  1. Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  *  2. Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  *
15  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  *  POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #ifndef __ECORE_IWARP_H__
29 #define __ECORE_IWARP_H__
30 
31 enum ecore_iwarp_qp_state {
32 	ECORE_IWARP_QP_STATE_IDLE,
33 	ECORE_IWARP_QP_STATE_RTS,
34 	ECORE_IWARP_QP_STATE_TERMINATE,
35 	ECORE_IWARP_QP_STATE_CLOSING,
36 	ECORE_IWARP_QP_STATE_ERROR,
37 };
38 
39 enum ecore_iwarp_listener_state {
40 	ECORE_IWARP_LISTENER_STATE_ACTIVE,
41 	ECORE_IWARP_LISTENER_STATE_UNPAUSE,
42 	ECORE_IWARP_LISTENER_STATE_PAUSE,
43 	ECORE_IWARP_LISTENER_STATE_DESTROYING,
44 };
45 
46 enum ecore_iwarp_qp_state
47 ecore_roce2iwarp_state(enum ecore_roce_qp_state state);
48 
49 #ifdef CONFIG_ECORE_IWARP
50 
51 #define ECORE_IWARP_PREALLOC_CNT	ECORE_IWARP_MAX_LIS_BACKLOG
52 
53 #define ECORE_IWARP_LL2_SYN_TX_SIZE	(128)
54 #define ECORE_IWARP_LL2_SYN_RX_SIZE	(256)
55 #define ECORE_IWARP_MAX_SYN_PKT_SIZE	(128)
56 
57 #define ECORE_IWARP_LL2_OOO_DEF_TX_SIZE	(256)
58 #define ECORE_MAX_OOO			(16)
59 #define ECORE_IWARP_LL2_OOO_MAX_RX_SIZE	(16384)
60 
61 #define ECORE_IWARP_HANDLE_INVAL	(0xff)
62 
63 struct ecore_iwarp_ll2_buff {
64 	struct ecore_iwarp_ll2_buff	*piggy_buf;
65 	void 				*data;
66 	dma_addr_t			data_phys_addr;
67 	u32				buff_size;
68 };
69 
70 struct ecore_iwarp_ll2_mpa_buf {
71 	osal_list_entry_t		list_entry;
72 	struct ecore_iwarp_ll2_buff	*ll2_buf;
73 	struct unaligned_opaque_data	data;
74 	u16				tcp_payload_len;
75 	u8				placement_offset;
76 };
77 
78 /* In some cases a fpdu will arrive with only one byte of the header, in this
79  * case the fpdu_length will be partial ( contain only higher byte and
80  * incomplete bytes will contain the invalid value
81  */
82 #define ECORE_IWARP_INVALID_INCOMPLETE_BYTES 0xffff
83 
84 struct ecore_iwarp_fpdu {
85 	struct ecore_iwarp_ll2_buff 	*mpa_buf;
86 	dma_addr_t			pkt_hdr;
87 	u8				pkt_hdr_size;
88 	dma_addr_t			mpa_frag;
89 	void				*mpa_frag_virt;
90 	u16				mpa_frag_len;
91 	u16				fpdu_length;
92 	u16				incomplete_bytes;
93 };
94 
95 struct ecore_iwarp_info {
96 	osal_list_t			listen_list; /* ecore_iwarp_listener */
97 	osal_list_t			ep_list;     /* ecore_iwarp_ep */
98 	osal_list_t			ep_free_list;/* pre-allocated ep's */
99 	osal_list_t			mpa_buf_list;/* list of mpa_bufs */
100 	osal_list_t			mpa_buf_pending_list;
101 	osal_spinlock_t			iw_lock;
102 	osal_spinlock_t			qp_lock; /* for teardown races */
103 	struct iwarp_rxmit_stats_drv	stats;
104 	u32				rcv_wnd_scale;
105 	u16				rcv_wnd_size;
106 	u16				max_mtu;
107 	u16				num_ooo_rx_bufs;
108 	u8				mac_addr[ETH_ALEN];
109 	u8				crc_needed;
110 	u8				tcp_flags;
111 	u8				ll2_syn_handle;
112 	u8				ll2_ooo_handle;
113 	u8				ll2_mpa_handle;
114 	u8				peer2peer;
115 	u8				_pad;
116 	enum mpa_negotiation_mode	mpa_rev;
117 	enum mpa_rtr_type		rtr_type;
118 	struct ecore_iwarp_fpdu		*partial_fpdus;
119 	struct ecore_iwarp_ll2_mpa_buf  *mpa_bufs;
120 	u8				*mpa_intermediate_buf;
121 	u16				max_num_partial_fpdus;
122 
123 	/* MPA statistics */
124 	u64				unalign_rx_comp;
125 };
126 
127 enum ecore_iwarp_ep_state {
128 	ECORE_IWARP_EP_INIT,
129 	ECORE_IWARP_EP_MPA_REQ_RCVD,
130 	ECORE_IWARP_EP_MPA_OFFLOADED,
131 	ECORE_IWARP_EP_ESTABLISHED,
132 	ECORE_IWARP_EP_CLOSED,
133 	ECORE_IWARP_EP_ABORTING
134 };
135 
136 union async_output {
137 	struct iwarp_eqe_data_mpa_async_completion mpa_response;
138 	struct iwarp_eqe_data_tcp_async_completion mpa_request;
139 };
140 
141 #define ECORE_MAX_PRIV_DATA_LEN (512)
142 struct ecore_iwarp_ep_memory {
143 	u8			in_pdata[ECORE_MAX_PRIV_DATA_LEN];
144 	u8			out_pdata[ECORE_MAX_PRIV_DATA_LEN];
145 	union async_output	async_output;
146 };
147 
148 /* Endpoint structure represents a TCP connection. This connection can be
149  * associated with a QP or not (in which case QP==NULL)
150  */
151 struct ecore_iwarp_ep {
152 	osal_list_entry_t		list_entry;
153 	int				sig;
154 	struct ecore_rdma_qp		*qp;
155 	enum ecore_iwarp_ep_state	state;
156 
157 	/* This contains entire buffer required for ep memories. This is the
158 	 * only one actually allocated and freed. The rest are pointers into
159 	 * this buffer
160 	 */
161 	struct ecore_iwarp_ep_memory    *ep_buffer_virt;
162 	dma_addr_t			ep_buffer_phys;
163 
164 	struct ecore_iwarp_cm_info	cm_info;
165 	struct ecore_iwarp_listener	*listener;
166 	enum tcp_connect_mode		connect_mode;
167 	enum mpa_rtr_type		rtr_type;
168 	enum mpa_negotiation_mode	mpa_rev;
169 	u32				tcp_cid;
170 	u32				cid;
171 	u8				remote_mac_addr[6];
172 	u8				local_mac_addr[6];
173 	u16				mss;
174 	bool				mpa_reply_processed;
175 
176 	/* The event_cb function is called for asynchrounous events associated
177 	 * with the ep. It is initialized at different entry points depending
178 	 * on whether the ep is the tcp connection active side or passive side
179 	 * The cb_context is passed to the event_cb function.
180 	 */
181 	iwarp_event_handler		event_cb;
182 	void				*cb_context;
183 
184 	/* For Passive side - syn packet related data */
185 	struct ecore_iwarp_ll2_buff	*syn;
186 	u16				syn_ip_payload_length;
187 	dma_addr_t			syn_phy_addr;
188 };
189 
190 struct ecore_iwarp_listener {
191 	osal_list_entry_t	list_entry;
192 
193 	/* The event_cb function is called for connection requests.
194 	 * The cb_context is passed to the event_cb function.
195 	 */
196 	iwarp_event_handler	event_cb;
197 	void			*cb_context;
198 	osal_list_t		ep_list;
199 	osal_spinlock_t		lock;
200 	u32			max_backlog;
201 	u8			ip_version;
202 	u32			ip_addr[4];
203 	u16			port;
204 	u16			vlan;
205 	bool			drop;
206 	bool			done;
207 	enum			ecore_iwarp_listener_state state;
208 };
209 
210 enum _ecore_status_t
211 ecore_iwarp_alloc(struct ecore_hwfn *p_hwfn);
212 
213 enum _ecore_status_t
214 ecore_iwarp_setup(struct ecore_hwfn *p_hwfn,
215 		  struct ecore_rdma_start_in_params *params);
216 
217 void
218 ecore_iwarp_init_fw_ramrod(struct ecore_hwfn *p_hwfn,
219 			   struct iwarp_init_func_ramrod_data *p_ramrod);
220 
221 enum _ecore_status_t
222 ecore_iwarp_stop(struct ecore_hwfn *p_hwfn);
223 
224 void
225 ecore_iwarp_resc_free(struct ecore_hwfn *p_hwfn);
226 
227 void
228 ecore_iwarp_init_devinfo(struct ecore_hwfn *p_hwfn);
229 
230 enum _ecore_status_t
231 ecore_iwarp_init_hw(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
232 
233 enum _ecore_status_t
234 ecore_iwarp_create_qp(struct ecore_hwfn *p_hwfn,
235 		      struct ecore_rdma_qp *qp,
236 		      struct ecore_rdma_create_qp_out_params *out_params);
237 
238 enum _ecore_status_t
239 ecore_iwarp_modify_qp(struct ecore_hwfn *p_hwfn,
240 		      struct ecore_rdma_qp *qp,
241 		      enum ecore_iwarp_qp_state new_state,
242 		      bool internal);
243 
244 enum _ecore_status_t
245 ecore_iwarp_destroy_qp(struct ecore_hwfn *p_hwfn,
246 		       struct ecore_rdma_qp *qp);
247 
248 enum _ecore_status_t
249 ecore_iwarp_fw_destroy(struct ecore_hwfn *p_hwfn,
250 		       struct ecore_rdma_qp *qp);
251 
252 enum _ecore_status_t
253 ecore_iwarp_query_qp(struct ecore_rdma_qp *qp,
254 		     struct ecore_rdma_query_qp_out_params *out_params);
255 
256 #else
257 
258 static OSAL_INLINE enum _ecore_status_t
ecore_iwarp_alloc(struct ecore_hwfn OSAL_UNUSED * p_hwfn)259 ecore_iwarp_alloc(struct ecore_hwfn OSAL_UNUSED *p_hwfn)
260 {
261 	return ECORE_SUCCESS;
262 }
263 
264 static OSAL_INLINE enum _ecore_status_t
ecore_iwarp_setup(struct ecore_hwfn OSAL_UNUSED * p_hwfn,struct ecore_rdma_start_in_params OSAL_UNUSED * params)265 ecore_iwarp_setup(struct ecore_hwfn OSAL_UNUSED *p_hwfn,
266 		  struct ecore_rdma_start_in_params OSAL_UNUSED *params)
267 {
268 	return ECORE_SUCCESS;
269 }
270 
271 static OSAL_INLINE void
ecore_iwarp_init_fw_ramrod(struct ecore_hwfn OSAL_UNUSED * p_hwfn,struct iwarp_init_func_ramrod_data OSAL_UNUSED * p_ramrod)272 ecore_iwarp_init_fw_ramrod(struct ecore_hwfn OSAL_UNUSED *p_hwfn,
273 			   struct iwarp_init_func_ramrod_data OSAL_UNUSED *p_ramrod)
274 {
275 }
276 
277 static OSAL_INLINE enum _ecore_status_t
ecore_iwarp_stop(struct ecore_hwfn OSAL_UNUSED * p_hwfn)278 ecore_iwarp_stop(struct ecore_hwfn OSAL_UNUSED *p_hwfn)
279 {
280 	return ECORE_SUCCESS;
281 }
282 
283 static OSAL_INLINE void
ecore_iwarp_resc_free(struct ecore_hwfn OSAL_UNUSED * p_hwfn)284 ecore_iwarp_resc_free(struct ecore_hwfn OSAL_UNUSED *p_hwfn)
285 {
286 }
287 
288 static OSAL_INLINE void
ecore_iwarp_init_devinfo(struct ecore_hwfn OSAL_UNUSED * p_hwfn)289 ecore_iwarp_init_devinfo(struct ecore_hwfn OSAL_UNUSED *p_hwfn)
290 {
291 }
292 
293 static OSAL_INLINE enum _ecore_status_t
ecore_iwarp_init_hw(struct ecore_hwfn OSAL_UNUSED * p_hwfn,struct ecore_ptt OSAL_UNUSED * p_ptt)294 ecore_iwarp_init_hw(struct ecore_hwfn OSAL_UNUSED *p_hwfn,
295 		    struct ecore_ptt OSAL_UNUSED *p_ptt)
296 {
297 	return ECORE_SUCCESS;
298 }
299 
300 static OSAL_INLINE enum _ecore_status_t
ecore_iwarp_create_qp(struct ecore_hwfn OSAL_UNUSED * p_hwfn,struct ecore_rdma_qp OSAL_UNUSED * qp,struct ecore_rdma_create_qp_out_params OSAL_UNUSED * out_params)301 ecore_iwarp_create_qp(struct ecore_hwfn OSAL_UNUSED *p_hwfn,
302 		      struct ecore_rdma_qp OSAL_UNUSED *qp,
303 		      struct ecore_rdma_create_qp_out_params OSAL_UNUSED *out_params)
304 {
305 	return ECORE_SUCCESS;
306 }
307 
308 static OSAL_INLINE enum _ecore_status_t
ecore_iwarp_modify_qp(struct ecore_hwfn OSAL_UNUSED * p_hwfn,struct ecore_rdma_qp OSAL_UNUSED * qp,enum ecore_iwarp_qp_state OSAL_UNUSED new_state,bool OSAL_UNUSED internal)309 ecore_iwarp_modify_qp(struct ecore_hwfn OSAL_UNUSED *p_hwfn,
310 		      struct ecore_rdma_qp OSAL_UNUSED *qp,
311 		      enum ecore_iwarp_qp_state OSAL_UNUSED new_state,
312 		      bool OSAL_UNUSED internal)
313 {
314 	return ECORE_SUCCESS;
315 }
316 
317 static OSAL_INLINE enum _ecore_status_t
ecore_iwarp_destroy_qp(struct ecore_hwfn OSAL_UNUSED * p_hwfn,struct ecore_rdma_qp OSAL_UNUSED * qp)318 ecore_iwarp_destroy_qp(struct ecore_hwfn OSAL_UNUSED *p_hwfn,
319 		       struct ecore_rdma_qp OSAL_UNUSED *qp)
320 {
321 	return ECORE_SUCCESS;
322 }
323 
324 static OSAL_INLINE enum _ecore_status_t
ecore_iwarp_fw_destroy(struct ecore_hwfn OSAL_UNUSED * p_hwfn,struct ecore_rdma_qp OSAL_UNUSED * qp)325 ecore_iwarp_fw_destroy(struct ecore_hwfn OSAL_UNUSED *p_hwfn,
326 		       struct ecore_rdma_qp OSAL_UNUSED *qp)
327 {
328 	return ECORE_SUCCESS;
329 }
330 
331 static OSAL_INLINE enum _ecore_status_t
ecore_iwarp_query_qp(struct ecore_rdma_qp OSAL_UNUSED * qp,struct ecore_rdma_query_qp_out_params OSAL_UNUSED * out_params)332 ecore_iwarp_query_qp(struct ecore_rdma_qp OSAL_UNUSED *qp,
333 		     struct ecore_rdma_query_qp_out_params OSAL_UNUSED *out_params)
334 {
335 	return ECORE_SUCCESS;
336 }
337 
338 #endif
339 #endif
340