xref: /freebsd/sys/dev/qlnx/qlnxe/ecore_roce_api.h (revision 95ee2897e98f5d444f26ed2334cc7c439f9c16c6)
1 /*
2  * Copyright (c) 2017-2018 Cavium, Inc.
3  * All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions
7  *  are met:
8  *
9  *  1. Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  *  2. Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  *
15  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  *  POSSIBILITY OF SUCH DAMAGE.
26  *
27  */
28 
29 #ifndef __ECORE_RDMA_API_H__
30 #define __ECORE_RDMA_API_H__
31 
32 #ifndef ETH_ALEN
33 #define ETH_ALEN 6
34 #endif
35 
36 enum ecore_roce_ll2_tx_dest
37 {
38 	ECORE_ROCE_LL2_TX_DEST_NW /* Light L2 TX Destination to the Network */,
39 	ECORE_ROCE_LL2_TX_DEST_LB /* Light L2 TX Destination to the Loopback */,
40 	ECORE_ROCE_LL2_TX_DEST_MAX
41 };
42 
43 /* HW/FW RoCE Limitations (external. For internal see ecore_roce.h) */
44 /* CNQ size Limitation
45  * The CNQ size should be set as twice the amount of CQs, since for each CQ one
46  * element may be inserted into the CNQ and another element is used per CQ to
47  * accommodate for a possible race in the arm mechanism.
48  * The FW supports a CNQ of 64k-1 and this apparently causes an issue - notice
49  * that the number of QPs can reach 32k giving 64k CQs and 128k CNQ elements.
50  * Luckily the FW can buffer CNQ elements avoiding an overflow, on the expense
51  * of performance.
52  */
53 #define ECORE_RDMA_MAX_CNQ_SIZE               (0xFFFF) /* 2^16 - 1 */
54 
55 /* rdma interface */
56 enum ecore_rdma_tid_type
57 {
58 	ECORE_RDMA_TID_REGISTERED_MR,
59 	ECORE_RDMA_TID_FMR,
60 	ECORE_RDMA_TID_MW_TYPE1,
61 	ECORE_RDMA_TID_MW_TYPE2A
62 };
63 
64 enum ecore_roce_qp_state {
65 	ECORE_ROCE_QP_STATE_RESET, /* Reset */
66 	ECORE_ROCE_QP_STATE_INIT,  /* Initialized */
67 	ECORE_ROCE_QP_STATE_RTR,   /* Ready to Receive */
68 	ECORE_ROCE_QP_STATE_RTS,   /* Ready to Send */
69 	ECORE_ROCE_QP_STATE_SQD,   /* Send Queue Draining */
70 	ECORE_ROCE_QP_STATE_ERR,   /* Error */
71 	ECORE_ROCE_QP_STATE_SQE    /* Send Queue Error */
72 };
73 
74 typedef
75 void (*affiliated_event_t)(void	*context,
76 			   u8	fw_event_code,
77 			   void	*fw_handle);
78 
79 typedef
80 void (*unaffiliated_event_t)(void *context,
81 			     u8   event_code);
82 
83 struct ecore_rdma_events {
84 	void			*context;
85 	affiliated_event_t	affiliated_event;
86 	unaffiliated_event_t	unaffiliated_event;
87 };
88 
89 struct ecore_rdma_device {
90     /* Vendor specific information */
91 	u32	vendor_id;
92 	u32	vendor_part_id;
93 	u32	hw_ver;
94 	u64	fw_ver;
95 
96 	u64	node_guid; /* node GUID */
97 	u64	sys_image_guid; /* System image GUID */
98 
99 	u8	max_cnq;
100 	u8	max_sge; /* The maximum number of scatter/gather entries
101 			  * per Work Request supported
102 			  */
103 	u8	max_srq_sge; /* The maximum number of scatter/gather entries
104 			      * per Work Request supported for SRQ
105 			      */
106 	u16	max_inline;
107 	u32	max_wqe; /* The maximum number of outstanding work
108 			  * requests on any Work Queue supported
109 			  */
110 	u32	max_srq_wqe; /* The maximum number of outstanding work
111 			      * requests on any Work Queue supported for SRQ
112 			      */
113 	u8	max_qp_resp_rd_atomic_resc; /* The maximum number of RDMA Reads
114 					     * & atomic operation that can be
115 					     * outstanding per QP
116 					     */
117 
118 	u8	max_qp_req_rd_atomic_resc; /* The maximum depth per QP for
119 					    * initiation of RDMA Read
120 					    * & atomic operations
121 					    */
122 	u64	max_dev_resp_rd_atomic_resc;
123 	u32	max_cq;
124 	u32	max_qp;
125 	u32	max_srq; /* Maximum number of SRQs */
126 	u32	max_mr; /* Maximum number of MRs supported by this device */
127 	u64	max_mr_size; /* Size (in bytes) of the largest contiguous memory
128 			      * block that can be registered by this device
129 			      */
130 	u32	max_cqe;
131 	u32	max_mw; /* The maximum number of memory windows supported */
132 	u32	max_fmr;
133 	u32	max_mr_mw_fmr_pbl;
134 	u64	max_mr_mw_fmr_size;
135 	u32	max_pd; /* The maximum number of protection domains supported */
136 	u32	max_ah;
137 	u8	max_pkey;
138 	u16	max_srq_wr; /* Maximum number of WRs per SRQ */
139 	u8	max_stats_queues; /* Maximum number of statistics queues */
140 	u32	dev_caps;
141 
142 	/* Abilty to support RNR-NAK generation */
143 
144 #define ECORE_RDMA_DEV_CAP_RNR_NAK_MASK				0x1
145 #define ECORE_RDMA_DEV_CAP_RNR_NAK_SHIFT			0
146 	/* Abilty to support shutdown port */
147 #define ECORE_RDMA_DEV_CAP_SHUTDOWN_PORT_MASK			0x1
148 #define ECORE_RDMA_DEV_CAP_SHUTDOWN_PORT_SHIFT			1
149 	/* Abilty to support port active event */
150 #define ECORE_RDMA_DEV_CAP_PORT_ACTIVE_EVENT_MASK		0x1
151 #define ECORE_RDMA_DEV_CAP_PORT_ACTIVE_EVENT_SHIFT		2
152 	/* Abilty to support port change event */
153 #define ECORE_RDMA_DEV_CAP_PORT_CHANGE_EVENT_MASK		0x1
154 #define ECORE_RDMA_DEV_CAP_PORT_CHANGE_EVENT_SHIFT		3
155 	/* Abilty to support system image GUID */
156 #define ECORE_RDMA_DEV_CAP_SYS_IMAGE_MASK			0x1
157 #define ECORE_RDMA_DEV_CAP_SYS_IMAGE_SHIFT			4
158 	/* Abilty to support bad P_Key counter support */
159 #define ECORE_RDMA_DEV_CAP_BAD_PKEY_CNT_MASK			0x1
160 #define ECORE_RDMA_DEV_CAP_BAD_PKEY_CNT_SHIFT			5
161 	/* Abilty to support atomic operations */
162 #define ECORE_RDMA_DEV_CAP_ATOMIC_OP_MASK			0x1
163 #define ECORE_RDMA_DEV_CAP_ATOMIC_OP_SHIFT			6
164 #define ECORE_RDMA_DEV_CAP_RESIZE_CQ_MASK			0x1
165 #define ECORE_RDMA_DEV_CAP_RESIZE_CQ_SHIFT			7
166 	/* Abilty to support modifying the maximum number of
167 	 * outstanding work requests per QP
168 	 */
169 #define ECORE_RDMA_DEV_CAP_RESIZE_MAX_WR_MASK			0x1
170 #define ECORE_RDMA_DEV_CAP_RESIZE_MAX_WR_SHIFT			8
171 	/* Abilty to support automatic path migration */
172 #define ECORE_RDMA_DEV_CAP_AUTO_PATH_MIG_MASK			0x1
173 #define ECORE_RDMA_DEV_CAP_AUTO_PATH_MIG_SHIFT			9
174 	/* Abilty to support the base memory management extensions */
175 #define ECORE_RDMA_DEV_CAP_BASE_MEMORY_EXT_MASK			0x1
176 #define ECORE_RDMA_DEV_CAP_BASE_MEMORY_EXT_SHIFT		10
177 #define ECORE_RDMA_DEV_CAP_BASE_QUEUE_EXT_MASK			0x1
178 #define ECORE_RDMA_DEV_CAP_BASE_QUEUE_EXT_SHIFT			11
179 	/* Abilty to support multipile page sizes per memory region */
180 #define ECORE_RDMA_DEV_CAP_MULTI_PAGE_PER_MR_EXT_MASK		0x1
181 #define ECORE_RDMA_DEV_CAP_MULTI_PAGE_PER_MR_EXT_SHIFT		12
182 	/* Abilty to support block list physical buffer list */
183 #define ECORE_RDMA_DEV_CAP_BLOCK_MODE_MASK			0x1
184 #define ECORE_RDMA_DEV_CAP_BLOCK_MODE_SHIFT			13
185 	/* Abilty to support zero based virtual addresses */
186 #define ECORE_RDMA_DEV_CAP_ZBVA_MASK				0x1
187 #define ECORE_RDMA_DEV_CAP_ZBVA_SHIFT				14
188 	/* Abilty to support local invalidate fencing */
189 #define ECORE_RDMA_DEV_CAP_LOCAL_INV_FENCE_MASK			0x1
190 #define ECORE_RDMA_DEV_CAP_LOCAL_INV_FENCE_SHIFT		15
191 	/* Abilty to support Loopback on QP */
192 #define ECORE_RDMA_DEV_CAP_LB_INDICATOR_MASK			0x1
193 #define ECORE_RDMA_DEV_CAP_LB_INDICATOR_SHIFT			16
194 	u64	page_size_caps;
195 	u8	dev_ack_delay;
196 	u32	reserved_lkey; /* Value of reserved L_key */
197 	u32	bad_pkey_counter; /* Bad P_key counter support indicator */
198 	struct ecore_rdma_events events;
199 };
200 
201 enum ecore_port_state {
202 	ECORE_RDMA_PORT_UP,
203 	ECORE_RDMA_PORT_DOWN,
204 };
205 
206 enum ecore_roce_capability {
207 	ECORE_ROCE_V1	= 1 << 0,
208 	ECORE_ROCE_V2	= 1 << 1,
209 };
210 
211 struct ecore_rdma_port {
212 	enum ecore_port_state port_state;
213 	int	link_speed;
214 	u64	max_msg_size;
215 	u8	source_gid_table_len;
216 	void	*source_gid_table_ptr;
217 	u8	pkey_table_len;
218 	void	*pkey_table_ptr;
219 	u32	pkey_bad_counter;
220 	enum ecore_roce_capability capability;
221 };
222 
223 struct ecore_rdma_cnq_params
224 {
225 	u8  num_pbl_pages; /* Number of pages in the PBL allocated
226 				   * for this queue
227 				   */
228 	u64 pbl_ptr; /* Address to the first entry of the queue PBL */
229 };
230 
231 /* The CQ Mode affects the CQ doorbell transaction size.
232  * 64/32 bit machines should configure to 32/16 bits respectively.
233  */
234 enum ecore_rdma_cq_mode {
235 	ECORE_RDMA_CQ_MODE_16_BITS,
236 	ECORE_RDMA_CQ_MODE_32_BITS,
237 };
238 
239 struct ecore_roce_dcqcn_params {
240 	u8	notification_point;
241 	u8	reaction_point;
242 
243 	/* fields for notification point */
244 	u32	cnp_send_timeout;
245 
246 	/* fields for reaction point */
247 	u32	rl_bc_rate;  /* Byte Counter Limit. */
248 	u16	rl_max_rate; /* Maximum rate in 1.6 Mbps resolution */
249 	u16	rl_r_ai;     /* Active increase rate */
250 	u16	rl_r_hai;    /* Hyper active increase rate */
251 	u16	dcqcn_g;     /* Alpha update gain in 1/64K resolution */
252 	u32	dcqcn_k_us;  /* Alpha update interval */
253 	u32	dcqcn_timeout_us;
254 };
255 
256 #ifdef CONFIG_ECORE_IWARP
257 
258 #define ECORE_MPA_RTR_TYPE_NONE		0 /* No RTR type */
259 #define ECORE_MPA_RTR_TYPE_ZERO_SEND	(1 << 0)
260 #define ECORE_MPA_RTR_TYPE_ZERO_WRITE	(1 << 1)
261 #define ECORE_MPA_RTR_TYPE_ZERO_READ	(1 << 2)
262 
263 enum ecore_mpa_rev {
264 	ECORE_MPA_REV1,
265 	ECORE_MPA_REV2,
266 };
267 
268 struct ecore_iwarp_params {
269 	u32				rcv_wnd_size;
270 	u16				ooo_num_rx_bufs;
271 #define ECORE_IWARP_TS_EN (1 << 0)
272 #define ECORE_IWARP_DA_EN (1 << 1)
273 	u8				flags;
274 	u8				crc_needed;
275 	enum ecore_mpa_rev		mpa_rev;
276 	u8				mpa_rtr;
277 	u8				mpa_peer2peer;
278 };
279 
280 #endif
281 
282 struct ecore_roce_params {
283 	enum ecore_rdma_cq_mode		cq_mode;
284 	struct ecore_roce_dcqcn_params	dcqcn_params;
285 	u8				ll2_handle; /* required for UD QPs */
286 };
287 
288 struct ecore_rdma_start_in_params {
289 	struct ecore_rdma_events	*events;
290 	struct ecore_rdma_cnq_params	cnq_pbl_list[128];
291 	u8				desired_cnq;
292 	u16				max_mtu;
293 	u8				mac_addr[ETH_ALEN];
294 #ifdef CONFIG_ECORE_IWARP
295 	struct ecore_iwarp_params	iwarp;
296 #endif
297 	struct ecore_roce_params	roce;
298 };
299 
300 struct ecore_rdma_add_user_out_params {
301 	/* output variables (given to miniport) */
302 	u16	dpi;
303 	u64	dpi_addr;
304 	u64	dpi_phys_addr;
305 	u32	dpi_size;
306 	u16	wid_count;
307 };
308 
309 /*Returns the CQ CID or zero in case of failure */
310 struct ecore_rdma_create_cq_in_params {
311 	/* input variables (given by miniport) */
312 	u32	cq_handle_lo; /* CQ handle to be written in CNQ */
313 	u32	cq_handle_hi;
314 	u32	cq_size;
315 	u16	dpi;
316 	bool	pbl_two_level;
317 	u64	pbl_ptr;
318 	u16	pbl_num_pages;
319 	u8	pbl_page_size_log; /* for the pages that contain the
320 			   * pointers to the CQ pages
321 			   */
322 	u8	cnq_id;
323 	u16	int_timeout;
324 };
325 
326 struct ecore_rdma_resize_cq_in_params {
327 	/* input variables (given by miniport) */
328 
329 	u16	icid;
330 	u32	cq_size;
331 	bool	pbl_two_level;
332 	u64	pbl_ptr;
333 	u16	pbl_num_pages;
334 	u8	pbl_page_size_log; /* for the pages that contain the
335 		       * pointers to the CQ pages
336 		       */
337 };
338 
339 enum roce_mode
340 {
341 	ROCE_V1,
342 	ROCE_V2_IPV4,
343 	ROCE_V2_IPV6,
344 	MAX_ROCE_MODE
345 };
346 
347 struct ecore_rdma_create_qp_in_params {
348 	/* input variables (given by miniport) */
349 	u32	qp_handle_lo; /* QP handle to be written in CQE */
350 	u32	qp_handle_hi;
351 	u32	qp_handle_async_lo; /* QP handle to be written in async event */
352 	u32	qp_handle_async_hi;
353 	bool	use_srq;
354 	bool	signal_all;
355 	bool	fmr_and_reserved_lkey;
356 	u16	pd;
357 	u16	dpi;
358 	u16	sq_cq_id;
359 	u16	sq_num_pages;
360 	u64	sq_pbl_ptr;	/* Not relevant for iWARP */
361 	u8	max_sq_sges;
362 	u16	rq_cq_id;
363 	u16	rq_num_pages;
364 	u64	rq_pbl_ptr;	/* Not relevant for iWARP */
365 	u16	srq_id;
366 	u8	stats_queue;
367 };
368 
369 struct ecore_rdma_create_qp_out_params {
370 	/* output variables (given to miniport) */
371 	u32		qp_id;
372 	u16		icid;
373 	void		*rq_pbl_virt;
374 	dma_addr_t	rq_pbl_phys;
375 	void		*sq_pbl_virt;
376 	dma_addr_t	sq_pbl_phys;
377 };
378 
379 struct ecore_rdma_destroy_cq_in_params {
380 	/* input variables (given by miniport) */
381 	u16 icid;
382 };
383 
384 struct ecore_rdma_destroy_cq_out_params {
385 	/* output variables, provided to the upper layer */
386 
387 	/* Sequence number of completion notification sent for the CQ on
388 	 * the associated CNQ
389 	 */
390 	u16	num_cq_notif;
391 };
392 
393 /* ECORE GID can be used as IPv4/6 address in RoCE v2 */
394 union ecore_gid {
395 	u8 bytes[16];
396 	u16 words[8];
397 	u32 dwords[4];
398 	u64 qwords[2];
399 	u32 ipv4_addr;
400 };
401 
402 struct ecore_rdma_modify_qp_in_params {
403 	/* input variables (given by miniport) */
404 	u32		modify_flags;
405 #define ECORE_RDMA_MODIFY_QP_VALID_NEW_STATE_MASK               0x1
406 #define ECORE_RDMA_MODIFY_QP_VALID_NEW_STATE_SHIFT              0
407 #define ECORE_ROCE_MODIFY_QP_VALID_PKEY_MASK                    0x1
408 #define ECORE_ROCE_MODIFY_QP_VALID_PKEY_SHIFT                   1
409 #define ECORE_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN_MASK             0x1
410 #define ECORE_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN_SHIFT            2
411 #define ECORE_ROCE_MODIFY_QP_VALID_DEST_QP_MASK                 0x1
412 #define ECORE_ROCE_MODIFY_QP_VALID_DEST_QP_SHIFT                3
413 #define ECORE_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR_MASK          0x1
414 #define ECORE_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR_SHIFT         4
415 #define ECORE_ROCE_MODIFY_QP_VALID_RQ_PSN_MASK                  0x1
416 #define ECORE_ROCE_MODIFY_QP_VALID_RQ_PSN_SHIFT                 5
417 #define ECORE_ROCE_MODIFY_QP_VALID_SQ_PSN_MASK                  0x1
418 #define ECORE_ROCE_MODIFY_QP_VALID_SQ_PSN_SHIFT                 6
419 #define ECORE_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ_MASK       0x1
420 #define ECORE_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ_SHIFT      7
421 #define ECORE_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP_MASK      0x1
422 #define ECORE_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP_SHIFT     8
423 #define ECORE_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT_MASK             0x1
424 #define ECORE_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT_SHIFT            9
425 #define ECORE_ROCE_MODIFY_QP_VALID_RETRY_CNT_MASK               0x1
426 #define ECORE_ROCE_MODIFY_QP_VALID_RETRY_CNT_SHIFT              10
427 #define ECORE_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT_MASK           0x1
428 #define ECORE_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT_SHIFT          11
429 #define ECORE_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER_MASK       0x1
430 #define ECORE_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER_SHIFT      12
431 #define ECORE_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN_MASK     0x1
432 #define ECORE_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN_SHIFT    13
433 #define ECORE_ROCE_MODIFY_QP_VALID_ROCE_MODE_MASK               0x1
434 #define ECORE_ROCE_MODIFY_QP_VALID_ROCE_MODE_SHIFT              14
435 
436 	enum ecore_roce_qp_state	new_state;
437 	u16		pkey;
438 	bool		incoming_rdma_read_en;
439 	bool		incoming_rdma_write_en;
440 	bool		incoming_atomic_en;
441 	bool		e2e_flow_control_en;
442 	u32		dest_qp;
443 	u16		mtu;
444 	u8		traffic_class_tos; /* IPv6/GRH tc; IPv4 TOS */
445 	u8		hop_limit_ttl; /* IPv6/GRH hop limit; IPv4 TTL */
446 	u32		flow_label; /* ignored in IPv4 */
447 	union ecore_gid	sgid; /* GRH SGID; IPv4/6 Source IP */
448 	union ecore_gid	dgid; /* GRH DGID; IPv4/6 Destination IP */
449 	u16		udp_src_port; /* RoCEv2 only */
450 
451 	u16		vlan_id;
452 
453 	u32		rq_psn;
454 	u32		sq_psn;
455 	u8		max_rd_atomic_resp;
456 	u8		max_rd_atomic_req;
457 	u32		ack_timeout;
458 	u8		retry_cnt;
459 	u8		rnr_retry_cnt;
460 	u8		min_rnr_nak_timer;
461 	bool		sqd_async;
462 	u8		remote_mac_addr[6];
463 	u8		local_mac_addr[6];
464 	bool		use_local_mac;
465 	enum roce_mode	roce_mode;
466 };
467 
468 struct ecore_rdma_query_qp_out_params {
469 	/* output variables (given to miniport) */
470 	enum ecore_roce_qp_state	state;
471 	u32		rq_psn; /* responder */
472 	u32		sq_psn; /* requester */
473 	bool		draining; /* send queue is draining */
474 	u16		mtu;
475 	u32		dest_qp;
476 	bool		incoming_rdma_read_en;
477 	bool		incoming_rdma_write_en;
478 	bool		incoming_atomic_en;
479 	bool		e2e_flow_control_en;
480 	union ecore_gid sgid; /* GRH SGID; IPv4/6 Source IP */
481 	union ecore_gid dgid; /* GRH DGID; IPv4/6 Destination IP */
482 	u32		flow_label; /* ignored in IPv4 */
483 	u8		hop_limit_ttl; /* IPv6/GRH hop limit; IPv4 TTL */
484 	u8		traffic_class_tos; /* IPv6/GRH tc; IPv4 TOS */
485 	u32		timeout;
486 	u8		rnr_retry;
487 	u8		retry_cnt;
488 	u8		min_rnr_nak_timer;
489 	u16		pkey_index;
490 	u8		max_rd_atomic;
491 	u8		max_dest_rd_atomic;
492 	bool		sqd_async;
493 };
494 
495 struct ecore_rdma_register_tid_in_params {
496 	/* input variables (given by miniport) */
497 	u32	itid; /* index only, 18 bit long, lkey = itid << 8 | key */
498 	enum ecore_rdma_tid_type tid_type;
499 	u8	key;
500 	u16	pd;
501 	bool	local_read;
502 	bool	local_write;
503 	bool	remote_read;
504 	bool	remote_write;
505 	bool	remote_atomic;
506 	bool	mw_bind;
507 	u64	pbl_ptr;
508 	bool	pbl_two_level;
509 	u8	pbl_page_size_log; /* for the pages that contain the pointers
510 		       * to the MR pages
511 		       */
512 	u8	page_size_log; /* for the MR pages */
513 	u32	fbo;
514 	u64	length; /* only lower 40 bits are valid */
515 	u64	vaddr;
516 	bool	zbva;
517 	bool	phy_mr;
518 	bool	dma_mr;
519 
520 	/* DIF related fields */
521 	bool	dif_enabled;
522 	u64	dif_error_addr;
523 	u64	dif_runt_addr;
524 };
525 
526 struct ecore_rdma_create_srq_in_params	{
527 	u64 pbl_base_addr;
528 	u64 prod_pair_addr;
529 	u16 num_pages;
530 	u16 pd_id;
531 	u16 page_size;
532 };
533 
534 struct ecore_rdma_create_srq_out_params {
535 	u16 srq_id;
536 };
537 
538 struct ecore_rdma_destroy_srq_in_params {
539 	u16 srq_id;
540 };
541 
542 struct ecore_rdma_modify_srq_in_params {
543 	u32 wqe_limit;
544 	u16 srq_id;
545 };
546 
547 struct ecore_rdma_resize_cq_out_params {
548 	/* output variables, provided to the upper layer */
549 	u32 prod; /* CQ producer value on old PBL */
550 	u32 cons; /* CQ consumer value on old PBL */
551 };
552 
553 struct ecore_rdma_resize_cnq_in_params {
554 	/* input variables (given by miniport) */
555 	u32	cnq_id;
556 	u32	pbl_page_size_log; /* for the pages that contain the
557 			* pointers to the cnq pages
558 			*/
559 	u64	pbl_ptr;
560 };
561 
562 struct ecore_rdma_stats_out_params {
563 	u64	sent_bytes;
564 	u64	sent_pkts;
565 	u64	rcv_bytes;
566 	u64	rcv_pkts;
567 
568 	/* RoCE only */
569 	u64	icrc_errors;		/* wraps at 32 bits */
570 	u64	retransmit_events;	/* wraps at 32 bits */
571 	u64	silent_drops;		/* wraps at 16 bits */
572 	u64	rnr_nacks_sent;		/* wraps at 16 bits */
573 
574 	/* iWARP only */
575 	u64	iwarp_tx_fast_rxmit_cnt;
576 	u64	iwarp_tx_slow_start_cnt;
577 	u64	unalign_rx_comp;
578 };
579 
580 struct ecore_rdma_counters_out_params {
581 	u64	pd_count;
582 	u64	max_pd;
583 	u64	dpi_count;
584 	u64	max_dpi;
585 	u64	cq_count;
586 	u64	max_cq;
587 	u64	qp_count;
588 	u64	max_qp;
589 	u64	tid_count;
590 	u64	max_tid;
591 };
592 
593 enum _ecore_status_t
594 ecore_rdma_add_user(void *rdma_cxt,
595 		    struct ecore_rdma_add_user_out_params *out_params);
596 
597 enum _ecore_status_t
598 ecore_rdma_alloc_pd(void *rdma_cxt,
599 		    u16	*pd);
600 
601 enum _ecore_status_t
602 ecore_rdma_alloc_tid(void *rdma_cxt,
603 		     u32 *tid);
604 
605 enum _ecore_status_t
606 ecore_rdma_create_cq(void *rdma_cxt,
607 		     struct ecore_rdma_create_cq_in_params *params,
608 		     u16 *icid);
609 
610 /* Returns a pointer to the responders' CID, which is also a pointer to the
611  * ecore_qp_params struct. Returns NULL in case of failure.
612  */
613 struct ecore_rdma_qp*
614 ecore_rdma_create_qp(void *rdma_cxt,
615 		     struct ecore_rdma_create_qp_in_params  *in_params,
616 		     struct ecore_rdma_create_qp_out_params *out_params);
617 
618 enum _ecore_status_t
619 ecore_roce_create_ud_qp(void *rdma_cxt,
620 			struct ecore_rdma_create_qp_out_params *out_params);
621 
622 enum _ecore_status_t
623 ecore_rdma_deregister_tid(void *rdma_cxt,
624 			  u32		tid);
625 
626 enum _ecore_status_t
627 ecore_rdma_destroy_cq(void *rdma_cxt,
628 		      struct ecore_rdma_destroy_cq_in_params  *in_params,
629 		      struct ecore_rdma_destroy_cq_out_params *out_params);
630 
631 enum _ecore_status_t
632 ecore_rdma_destroy_qp(void *rdma_cxt,
633 		      struct ecore_rdma_qp *qp);
634 
635 enum _ecore_status_t
636 ecore_roce_destroy_ud_qp(void *rdma_cxt, u16 cid);
637 
638 void
639 ecore_rdma_free_pd(void *rdma_cxt,
640 		   u16	pd);
641 
642 void
643 ecore_rdma_free_tid(void *rdma_cxt,
644 		    u32	tid);
645 
646 enum _ecore_status_t
647 ecore_rdma_modify_qp(void *rdma_cxt,
648 		     struct ecore_rdma_qp *qp,
649 		     struct ecore_rdma_modify_qp_in_params *params);
650 
651 struct ecore_rdma_device*
652 ecore_rdma_query_device(void *rdma_cxt);
653 
654 struct ecore_rdma_port*
655 ecore_rdma_query_port(void *rdma_cxt);
656 
657 enum _ecore_status_t
658 ecore_rdma_query_qp(void *rdma_cxt,
659 		    struct ecore_rdma_qp		  *qp,
660 		    struct ecore_rdma_query_qp_out_params *out_params);
661 
662 enum _ecore_status_t
663 ecore_rdma_register_tid(void *rdma_cxt,
664 			struct ecore_rdma_register_tid_in_params *params);
665 
666 void ecore_rdma_remove_user(void *rdma_cxt,
667 			    u16		dpi);
668 
669 enum _ecore_status_t
670 ecore_rdma_resize_cnq(void *rdma_cxt,
671 		      struct ecore_rdma_resize_cnq_in_params *in_params);
672 
673 /*Returns the CQ CID or zero in case of failure */
674 enum _ecore_status_t
675 ecore_rdma_resize_cq(void *rdma_cxt,
676 		     struct ecore_rdma_resize_cq_in_params  *in_params,
677 		     struct ecore_rdma_resize_cq_out_params *out_params);
678 
679 /* Before calling rdma_start upper layer (VBD/qed) should fill the
680  * page-size and mtu in hwfn context
681  */
682 enum _ecore_status_t
683 ecore_rdma_start(void *p_hwfn,
684 		 struct ecore_rdma_start_in_params *params);
685 
686 enum _ecore_status_t
687 ecore_rdma_stop(void *rdma_cxt);
688 
689 enum _ecore_status_t
690 ecore_rdma_query_stats(void *rdma_cxt, u8 stats_queue,
691 		       struct ecore_rdma_stats_out_params *out_parms);
692 
693 enum _ecore_status_t
694 ecore_rdma_query_counters(void *rdma_cxt,
695 			  struct ecore_rdma_counters_out_params *out_parms);
696 
697 u32 ecore_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id);
698 
699 u32 ecore_rdma_query_cau_timer_res(void *p_hwfn);
700 
701 void ecore_rdma_cnq_prod_update(void *rdma_cxt, u8 cnq_index, u16 prod);
702 
703 void ecore_rdma_resc_free(struct ecore_hwfn *p_hwfn);
704 
705 #ifdef CONFIG_ECORE_IWARP
706 
707 /* iWARP API */
708 
709 enum ecore_iwarp_event_type {
710 	ECORE_IWARP_EVENT_MPA_REQUEST, /* Passive side request received */
711 	ECORE_IWARP_EVENT_PASSIVE_COMPLETE, /* Passive side established
712 					     * ( ack on mpa response )
713 					     */
714 	ECORE_IWARP_EVENT_ACTIVE_COMPLETE, /* Active side reply received */
715 	ECORE_IWARP_EVENT_DISCONNECT,
716 	ECORE_IWARP_EVENT_CLOSE,
717 	ECORE_IWARP_EVENT_IRQ_FULL,
718 	ECORE_IWARP_EVENT_RQ_EMPTY,
719 	ECORE_IWARP_EVENT_LLP_TIMEOUT,
720 	ECORE_IWARP_EVENT_REMOTE_PROTECTION_ERROR,
721 	ECORE_IWARP_EVENT_CQ_OVERFLOW,
722 	ECORE_IWARP_EVENT_QP_CATASTROPHIC,
723 	ECORE_IWARP_EVENT_ACTIVE_MPA_REPLY,
724 	ECORE_IWARP_EVENT_LOCAL_ACCESS_ERROR,
725 	ECORE_IWARP_EVENT_REMOTE_OPERATION_ERROR,
726 	ECORE_IWARP_EVENT_TERMINATE_RECEIVED
727 };
728 
729 enum ecore_tcp_ip_version
730 {
731 	ECORE_TCP_IPV4,
732 	ECORE_TCP_IPV6,
733 };
734 
735 struct ecore_iwarp_cm_info {
736 	enum ecore_tcp_ip_version ip_version;
737 	u32 remote_ip[4];
738 	u32 local_ip[4];
739 	u16 remote_port;
740 	u16 local_port;
741 	u16 vlan;
742 	const void *private_data;
743 	u16 private_data_len;
744 	u8 ord;
745 	u8 ird;
746 };
747 
748 struct ecore_iwarp_cm_event_params {
749 	enum ecore_iwarp_event_type event;
750 	const struct ecore_iwarp_cm_info *cm_info;
751 	void *ep_context; /* To be passed to accept call */
752 	int status;
753 };
754 
755 typedef int (*iwarp_event_handler)(void *context,
756 				   struct ecore_iwarp_cm_event_params *event);
757 
758 /* Active Side Connect Flow:
759  * upper layer driver calls ecore_iwarp_connect
760  * Function is blocking: i.e. returns after tcp connection is established
761  * After MPA connection is established ECORE_IWARP_EVENT_ACTIVE_COMPLETE event
762  * will be passed to upperlayer driver using the event_cb passed in
763  * ecore_iwarp_connect_in. Information of the established connection will be
764  * initialized in event data.
765  */
766 struct ecore_iwarp_connect_in {
767 	iwarp_event_handler event_cb;
768 	void *cb_context;
769 	struct ecore_rdma_qp *qp;
770 	struct ecore_iwarp_cm_info cm_info;
771 	u16 mss;
772 	u8 remote_mac_addr[6];
773 	u8 local_mac_addr[6];
774 };
775 
776 struct ecore_iwarp_connect_out {
777 	void *ep_context;
778 };
779 
780 /* Passive side connect flow:
781  * upper layer driver calls ecore_iwarp_create_listen
782  * once Syn packet that matches a ip/port that is listened on arrives, ecore
783  * will offload the tcp connection. After MPA Request is received on the
784  * offload connection, the event ECORE_IWARP_EVENT_MPA_REQUEST will be sent
785  * to upper layer driver using the event_cb passed below. The event data
786  * will be placed in event parameter. After upper layer driver processes the
787  * event, ecore_iwarp_accept or ecore_iwarp_reject should be called to continue
788  * MPA negotiation. Once negotiation is complete the event
789  * ECORE_IWARP_EVENT_PASSIVE_COMPLETE will be passed to the event_cb passed
790  * originally in ecore_iwarp_listen_in structure.
791  */
792 struct ecore_iwarp_listen_in {
793 	iwarp_event_handler event_cb; /* Callback func for delivering events */
794 	void *cb_context; /* passed to event_cb */
795 	u32 max_backlog; /* Max num of pending incoming connection requests */
796 	enum ecore_tcp_ip_version ip_version;
797 	u32 ip_addr[4];
798 	u16 port;
799 	u16 vlan;
800 };
801 
802 struct ecore_iwarp_listen_out {
803 	void *handle; /* to be sent to destroy */
804 };
805 
806 struct ecore_iwarp_accept_in {
807 	void *ep_context; /* From event data of ECORE_IWARP_EVENT_MPA_REQUEST */
808 	void *cb_context; /* context to be passed to event_cb */
809 	struct ecore_rdma_qp *qp;
810 	const void *private_data;
811 	u16 private_data_len;
812 	u8 ord;
813 	u8 ird;
814 };
815 
816 struct ecore_iwarp_reject_in {
817 	void *ep_context; /* From event data of ECORE_IWARP_EVENT_MPA_REQUEST */
818 	void *cb_context; /* context to be passed to event_cb */
819 	const void *private_data;
820 	u16 private_data_len;
821 };
822 
823 struct ecore_iwarp_send_rtr_in {
824 	void *ep_context;
825 };
826 
827 struct ecore_iwarp_tcp_abort_in {
828 	void *ep_context;
829 };
830 
831 enum _ecore_status_t
832 ecore_iwarp_connect(void *rdma_cxt,
833 		    struct ecore_iwarp_connect_in *iparams,
834 		    struct ecore_iwarp_connect_out *oparams);
835 
836 enum _ecore_status_t
837 ecore_iwarp_create_listen(void *rdma_cxt,
838 			  struct ecore_iwarp_listen_in *iparams,
839 			  struct ecore_iwarp_listen_out *oparams);
840 
841 enum _ecore_status_t
842 ecore_iwarp_accept(void *rdma_cxt,
843 		   struct ecore_iwarp_accept_in *iparams);
844 
845 enum _ecore_status_t
846 ecore_iwarp_reject(void *rdma_cxt,
847 		   struct ecore_iwarp_reject_in *iparams);
848 
849 enum _ecore_status_t
850 ecore_iwarp_destroy_listen(void *rdma_cxt, void *handle);
851 
852 enum _ecore_status_t
853 ecore_iwarp_send_rtr(void *rdma_cxt, struct ecore_iwarp_send_rtr_in *iparams);
854 
855 enum _ecore_status_t
856 ecore_iwarp_tcp_abort(void *rdma_cxt, struct ecore_iwarp_tcp_abort_in *iparams);
857 
858 #endif /* CONFIG_ECORE_IWARP */
859 
860 #endif
861