xref: /illumos-gate/usr/src/uts/common/io/qede/579xx/drivers/ecore/ecore_roce_api.h (revision 14b24e2b79293068c8e016a69ef1d872fb5e2fd5)
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, v.1,  (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 
22 /*
23 * Copyright 2014-2017 Cavium, Inc.
24 * The contents of this file are subject to the terms of the Common Development
25 * and Distribution License, v.1,  (the "License").
26 
27 * You may not use this file except in compliance with the License.
28 
29 * You can obtain a copy of the License at available
30 * at http://opensource.org/licenses/CDDL-1.0
31 
32 * See the License for the specific language governing permissions and
33 * limitations under the License.
34 */
35 
36 #ifndef __ECORE_RDMA_API_H__
37 #define __ECORE_RDMA_API_H__
38 
39 #ifndef LINUX_REMOVE
40 #define ETH_ALEN 6
41 #endif
42 
43 #ifndef __EXTRACT__LINUX__
44 
45 enum ecore_roce_ll2_tx_dest
46 {
47 	ECORE_ROCE_LL2_TX_DEST_NW /* Light L2 TX Destination to the Network */,
48 	ECORE_ROCE_LL2_TX_DEST_LB /* Light L2 TX Destination to the Loopback */,
49 	ECORE_ROCE_LL2_TX_DEST_MAX
50 };
51 
52 /* HW/FW RoCE Limitations (external. For internal see ecore_roce.h) */
53 /* CNQ size Limitation
54  * The CNQ size should be set as twice the amount of CQs, since for each CQ one
55  * element may be inserted into the CNQ and another element is used per CQ to
56  * accommodate for a possible race in the arm mechanism.
57  * The FW supports a CNQ of 64k-1 and this apparently causes an issue - notice
58  * that the number of QPs can reach 32k giving 64k CQs and 128k CNQ elements.
59  * Luckily the FW can buffer CNQ elements avoiding an overflow, on the expense
60  * of performance.
61  */
62 #define ECORE_RDMA_MAX_CNQ_SIZE               (0xFFFF) /* 2^16 - 1 */
63 
64 /* rdma interface */
65 enum ecore_rdma_tid_type
66 {
67 	ECORE_RDMA_TID_REGISTERED_MR,
68 	ECORE_RDMA_TID_FMR,
69 	ECORE_RDMA_TID_MW_TYPE1,
70 	ECORE_RDMA_TID_MW_TYPE2A
71 };
72 
73 enum ecore_roce_qp_state {
74 	ECORE_ROCE_QP_STATE_RESET, /* Reset */
75 	ECORE_ROCE_QP_STATE_INIT,  /* Initialized */
76 	ECORE_ROCE_QP_STATE_RTR,   /* Ready to Receive */
77 	ECORE_ROCE_QP_STATE_RTS,   /* Ready to Send */
78 	ECORE_ROCE_QP_STATE_SQD,   /* Send Queue Draining */
79 	ECORE_ROCE_QP_STATE_ERR,   /* Error */
80 	ECORE_ROCE_QP_STATE_SQE    /* Send Queue Error */
81 };
82 
83 typedef
84 void (*affiliated_event_t)(void	*context,
85 			   u8	fw_event_code,
86 			   void	*fw_handle);
87 
88 typedef
89 void (*unaffiliated_event_t)(void *context,
90 			     u8   event_code);
91 
92 struct ecore_rdma_events {
93 	void			*context;
94 	affiliated_event_t	affiliated_event;
95 	unaffiliated_event_t	unaffiliated_event;
96 };
97 
98 struct ecore_rdma_device {
99     /* Vendor specific information */
100 	u32	vendor_id;
101 	u32	vendor_part_id;
102 	u32	hw_ver;
103 	u64	fw_ver;
104 
105 	u64	node_guid; /* node GUID */
106 	u64	sys_image_guid; /* System image GUID */
107 
108 	u8	max_cnq;
109 	u8	max_sge; /* The maximum number of scatter/gather entries
110 			  * per Work Request supported
111 			  */
112 	u8	max_srq_sge; /* The maximum number of scatter/gather entries
113 			      * per Work Request supported for SRQ
114 			      */
115 	u16	max_inline;
116 	u32	max_wqe; /* The maximum number of outstanding work
117 			  * requests on any Work Queue supported
118 			  */
119 	u32	max_srq_wqe; /* The maximum number of outstanding work
120 			      * requests on any Work Queue supported for SRQ
121 			      */
122 	u8	max_qp_resp_rd_atomic_resc; /* The maximum number of RDMA Reads
123 					     * & atomic operation that can be
124 					     * outstanding per QP
125 					     */
126 
127 	u8	max_qp_req_rd_atomic_resc; /* The maximum depth per QP for
128 					    * initiation of RDMA Read
129 					    * & atomic operations
130 					    */
131 	u64	max_dev_resp_rd_atomic_resc;
132 	u32	max_cq;
133 	u32	max_qp;
134 	u32	max_srq; /* Maximum number of SRQs */
135 	u32	max_mr; /* Maximum number of MRs supported by this device */
136 	u64	max_mr_size; /* Size (in bytes) of the largest contiguous memory
137 			      * block that can be registered by this device
138 			      */
139 	u32	max_cqe;
140 	u32	max_mw; /* The maximum number of memory windows supported */
141 	u32	max_fmr;
142 	u32	max_mr_mw_fmr_pbl;
143 	u64	max_mr_mw_fmr_size;
144 	u32	max_pd; /* The maximum number of protection domains supported */
145 	u32	max_ah;
146 	u8	max_pkey;
147 	u16	max_srq_wr; /* Maximum number of WRs per SRQ */
148 	u8	max_stats_queues; /* Maximum number of statistics queues */
149 	u32	dev_caps;
150 
151 	/* Abilty to support RNR-NAK generation */
152 
153 #define ECORE_RDMA_DEV_CAP_RNR_NAK_MASK				0x1
154 #define ECORE_RDMA_DEV_CAP_RNR_NAK_SHIFT			0
155 	/* Abilty to support shutdown port */
156 #define ECORE_RDMA_DEV_CAP_SHUTDOWN_PORT_MASK			0x1
157 #define ECORE_RDMA_DEV_CAP_SHUTDOWN_PORT_SHIFT			1
158 	/* Abilty to support port active event */
159 #define ECORE_RDMA_DEV_CAP_PORT_ACTIVE_EVENT_MASK		0x1
160 #define ECORE_RDMA_DEV_CAP_PORT_ACTIVE_EVENT_SHIFT		2
161 	/* Abilty to support port change event */
162 #define ECORE_RDMA_DEV_CAP_PORT_CHANGE_EVENT_MASK		0x1
163 #define ECORE_RDMA_DEV_CAP_PORT_CHANGE_EVENT_SHIFT		3
164 	/* Abilty to support system image GUID */
165 #define ECORE_RDMA_DEV_CAP_SYS_IMAGE_MASK			0x1
166 #define ECORE_RDMA_DEV_CAP_SYS_IMAGE_SHIFT			4
167 	/* Abilty to support bad P_Key counter support */
168 #define ECORE_RDMA_DEV_CAP_BAD_PKEY_CNT_MASK			0x1
169 #define ECORE_RDMA_DEV_CAP_BAD_PKEY_CNT_SHIFT			5
170 	/* Abilty to support atomic operations */
171 #define ECORE_RDMA_DEV_CAP_ATOMIC_OP_MASK			0x1
172 #define ECORE_RDMA_DEV_CAP_ATOMIC_OP_SHIFT			6
173 #define ECORE_RDMA_DEV_CAP_RESIZE_CQ_MASK			0x1
174 #define ECORE_RDMA_DEV_CAP_RESIZE_CQ_SHIFT			7
175 	/* Abilty to support modifying the maximum number of
176 	 * outstanding work requests per QP
177 	 */
178 #define ECORE_RDMA_DEV_CAP_RESIZE_MAX_WR_MASK			0x1
179 #define ECORE_RDMA_DEV_CAP_RESIZE_MAX_WR_SHIFT			8
180 	/* Abilty to support automatic path migration */
181 #define ECORE_RDMA_DEV_CAP_AUTO_PATH_MIG_MASK			0x1
182 #define ECORE_RDMA_DEV_CAP_AUTO_PATH_MIG_SHIFT			9
183 	/* Abilty to support the base memory management extensions */
184 #define ECORE_RDMA_DEV_CAP_BASE_MEMORY_EXT_MASK			0x1
185 #define ECORE_RDMA_DEV_CAP_BASE_MEMORY_EXT_SHIFT		10
186 #define ECORE_RDMA_DEV_CAP_BASE_QUEUE_EXT_MASK			0x1
187 #define ECORE_RDMA_DEV_CAP_BASE_QUEUE_EXT_SHIFT			11
188 	/* Abilty to support multipile page sizes per memory region */
189 #define ECORE_RDMA_DEV_CAP_MULTI_PAGE_PER_MR_EXT_MASK		0x1
190 #define ECORE_RDMA_DEV_CAP_MULTI_PAGE_PER_MR_EXT_SHIFT		12
191 	/* Abilty to support block list physical buffer list */
192 #define ECORE_RDMA_DEV_CAP_BLOCK_MODE_MASK			0x1
193 #define ECORE_RDMA_DEV_CAP_BLOCK_MODE_SHIFT			13
194 	/* Abilty to support zero based virtual addresses */
195 #define ECORE_RDMA_DEV_CAP_ZBVA_MASK				0x1
196 #define ECORE_RDMA_DEV_CAP_ZBVA_SHIFT				14
197 	/* Abilty to support local invalidate fencing */
198 #define ECORE_RDMA_DEV_CAP_LOCAL_INV_FENCE_MASK			0x1
199 #define ECORE_RDMA_DEV_CAP_LOCAL_INV_FENCE_SHIFT		15
200 	/* Abilty to support Loopback on QP */
201 #define ECORE_RDMA_DEV_CAP_LB_INDICATOR_MASK			0x1
202 #define ECORE_RDMA_DEV_CAP_LB_INDICATOR_SHIFT			16
203 	u64	page_size_caps;
204 	u8	dev_ack_delay;
205 	u32	reserved_lkey; /* Value of reserved L_key */
206 	u32	bad_pkey_counter; /* Bad P_key counter support indicator */
207 	struct ecore_rdma_events events;
208 };
209 
210 enum ecore_port_state {
211 	ECORE_RDMA_PORT_UP,
212 	ECORE_RDMA_PORT_DOWN,
213 };
214 
215 enum ecore_roce_capability {
216 	ECORE_ROCE_V1	= 1 << 0,
217 	ECORE_ROCE_V2	= 1 << 1,
218 };
219 
220 struct ecore_rdma_port {
221 	enum ecore_port_state port_state;
222 	int	link_speed;
223 	u64	max_msg_size;
224 	u8	source_gid_table_len;
225 	void	*source_gid_table_ptr;
226 	u8	pkey_table_len;
227 	void	*pkey_table_ptr;
228 	u32	pkey_bad_counter;
229 	enum ecore_roce_capability capability;
230 };
231 
232 struct ecore_rdma_cnq_params
233 {
234 	u8  num_pbl_pages; /* Number of pages in the PBL allocated
235 				   * for this queue
236 				   */
237 	u64 pbl_ptr; /* Address to the first entry of the queue PBL */
238 };
239 
240 /* The CQ Mode affects the CQ doorbell transaction size.
241  * 64/32 bit machines should configure to 32/16 bits respectively.
242  */
243 enum ecore_rdma_cq_mode {
244 	ECORE_RDMA_CQ_MODE_16_BITS,
245 	ECORE_RDMA_CQ_MODE_32_BITS,
246 };
247 
248 struct ecore_roce_dcqcn_params {
249 	u8	notification_point;
250 	u8	reaction_point;
251 
252 	/* fields for notification point */
253 	u32	cnp_send_timeout;
254 
255 	/* fields for reaction point */
256 	u32	rl_bc_rate;  /* Byte Counter Limit. */
257 	u16	rl_max_rate; /* Maximum rate in 1.6 Mbps resolution */
258 	u16	rl_r_ai;     /* Active increase rate */
259 	u16	rl_r_hai;    /* Hyper active increase rate */
260 	u16	dcqcn_g;     /* Alpha update gain in 1/64K resolution */
261 	u32	dcqcn_k_us;  /* Alpha update interval */
262 	u32	dcqcn_timeout_us;
263 };
264 
265 #ifdef CONFIG_ECORE_IWARP
266 
267 #define ECORE_MPA_RTR_TYPE_NONE		0 /* No RTR type */
268 #define ECORE_MPA_RTR_TYPE_ZERO_SEND	(1 << 0)
269 #define ECORE_MPA_RTR_TYPE_ZERO_WRITE	(1 << 1)
270 #define ECORE_MPA_RTR_TYPE_ZERO_READ	(1 << 2)
271 
272 enum ecore_mpa_rev {
273 	ECORE_MPA_REV1,
274 	ECORE_MPA_REV2,
275 };
276 
277 struct ecore_iwarp_params {
278 	u32				rcv_wnd_size;
279 	u16				ooo_num_rx_bufs;
280 #define ECORE_IWARP_TS_EN (1 << 0)
281 #define ECORE_IWARP_DA_EN (1 << 1)
282 	u8				flags;
283 	u8				crc_needed;
284 	enum ecore_mpa_rev		mpa_rev;
285 	u8				mpa_rtr;
286 	u8				mpa_peer2peer;
287 };
288 
289 #endif
290 
291 struct ecore_roce_params {
292 	enum ecore_rdma_cq_mode		cq_mode;
293 	struct ecore_roce_dcqcn_params	dcqcn_params;
294 	u8				ll2_handle; /* required for UD QPs */
295 };
296 
297 struct ecore_rdma_start_in_params {
298 	struct ecore_rdma_events	*events;
299 	struct ecore_rdma_cnq_params	cnq_pbl_list[128];
300 	u8				desired_cnq;
301 	u16				max_mtu;
302 	u8				mac_addr[ETH_ALEN];
303 #ifdef CONFIG_ECORE_IWARP
304 	struct ecore_iwarp_params	iwarp;
305 #endif
306 	struct ecore_roce_params	roce;
307 };
308 
309 struct ecore_rdma_add_user_out_params {
310 	/* output variables (given to miniport) */
311 	u16	dpi;
312 	u64	dpi_addr;
313 	u64	dpi_phys_addr;
314 	u32	dpi_size;
315 	u16	wid_count;
316 };
317 
318 /*Returns the CQ CID or zero in case of failure */
319 struct ecore_rdma_create_cq_in_params {
320 	/* input variables (given by miniport) */
321 	u32	cq_handle_lo; /* CQ handle to be written in CNQ */
322 	u32	cq_handle_hi;
323 	u32	cq_size;
324 	u16	dpi;
325 	bool	pbl_two_level;
326 	u64	pbl_ptr;
327 	u16	pbl_num_pages;
328 	u8	pbl_page_size_log; /* for the pages that contain the
329 			   * pointers to the CQ pages
330 			   */
331 	u8	cnq_id;
332 	u16	int_timeout;
333 };
334 
335 #endif
336 
337 struct ecore_rdma_resize_cq_in_params {
338 	/* input variables (given by miniport) */
339 
340 	u16	icid;
341 	u32	cq_size;
342 	bool	pbl_two_level;
343 	u64	pbl_ptr;
344 	u16	pbl_num_pages;
345 	u8	pbl_page_size_log; /* for the pages that contain the
346 		       * pointers to the CQ pages
347 		       */
348 };
349 
350 #ifndef __EXTRACT__LINUX__
351 
352 enum roce_mode
353 {
354 	ROCE_V1,
355 	ROCE_V2_IPV4,
356 	ROCE_V2_IPV6,
357 	MAX_ROCE_MODE
358 };
359 
360 struct ecore_rdma_create_qp_in_params {
361 	/* input variables (given by miniport) */
362 	u32	qp_handle_lo; /* QP handle to be written in CQE */
363 	u32	qp_handle_hi;
364 	u32	qp_handle_async_lo; /* QP handle to be written in async event */
365 	u32	qp_handle_async_hi;
366 	bool	use_srq;
367 	bool	signal_all;
368 	bool	fmr_and_reserved_lkey;
369 	u16	pd;
370 	u16	dpi;
371 	u16	sq_cq_id;
372 	u16	sq_num_pages;
373 	u64	sq_pbl_ptr;	/* Not relevant for iWARP */
374 	u8	max_sq_sges;
375 	u16	rq_cq_id;
376 	u16	rq_num_pages;
377 	u64	rq_pbl_ptr;	/* Not relevant for iWARP */
378 	u16	srq_id;
379 	u8	stats_queue;
380 };
381 
382 struct ecore_rdma_create_qp_out_params {
383 	/* output variables (given to miniport) */
384 	u32		qp_id;
385 	u16		icid;
386 	void		*rq_pbl_virt;
387 	dma_addr_t	rq_pbl_phys;
388 	void		*sq_pbl_virt;
389 	dma_addr_t	sq_pbl_phys;
390 };
391 
392 struct ecore_rdma_destroy_cq_in_params {
393 	/* input variables (given by miniport) */
394 	u16 icid;
395 };
396 
397 struct ecore_rdma_destroy_cq_out_params {
398 	/* output variables, provided to the upper layer */
399 
400 	/* Sequence number of completion notification sent for the CQ on
401 	 * the associated CNQ
402 	 */
403 	u16	num_cq_notif;
404 };
405 
406 /* ECORE GID can be used as IPv4/6 address in RoCE v2 */
407 union ecore_gid {
408 	u8 bytes[16];
409 	u16 words[8];
410 	u32 dwords[4];
411 	u64 qwords[2];
412 	u32 ipv4_addr;
413 };
414 
415 struct ecore_rdma_modify_qp_in_params {
416 	/* input variables (given by miniport) */
417 	u32		modify_flags;
418 #define ECORE_RDMA_MODIFY_QP_VALID_NEW_STATE_MASK               0x1
419 #define ECORE_RDMA_MODIFY_QP_VALID_NEW_STATE_SHIFT              0
420 #define ECORE_ROCE_MODIFY_QP_VALID_PKEY_MASK                    0x1
421 #define ECORE_ROCE_MODIFY_QP_VALID_PKEY_SHIFT                   1
422 #define ECORE_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN_MASK             0x1
423 #define ECORE_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN_SHIFT            2
424 #define ECORE_ROCE_MODIFY_QP_VALID_DEST_QP_MASK                 0x1
425 #define ECORE_ROCE_MODIFY_QP_VALID_DEST_QP_SHIFT                3
426 #define ECORE_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR_MASK          0x1
427 #define ECORE_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR_SHIFT         4
428 #define ECORE_ROCE_MODIFY_QP_VALID_RQ_PSN_MASK                  0x1
429 #define ECORE_ROCE_MODIFY_QP_VALID_RQ_PSN_SHIFT                 5
430 #define ECORE_ROCE_MODIFY_QP_VALID_SQ_PSN_MASK                  0x1
431 #define ECORE_ROCE_MODIFY_QP_VALID_SQ_PSN_SHIFT                 6
432 #define ECORE_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ_MASK       0x1
433 #define ECORE_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ_SHIFT      7
434 #define ECORE_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP_MASK      0x1
435 #define ECORE_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP_SHIFT     8
436 #define ECORE_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT_MASK             0x1
437 #define ECORE_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT_SHIFT            9
438 #define ECORE_ROCE_MODIFY_QP_VALID_RETRY_CNT_MASK               0x1
439 #define ECORE_ROCE_MODIFY_QP_VALID_RETRY_CNT_SHIFT              10
440 #define ECORE_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT_MASK           0x1
441 #define ECORE_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT_SHIFT          11
442 #define ECORE_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER_MASK       0x1
443 #define ECORE_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER_SHIFT      12
444 #define ECORE_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN_MASK     0x1
445 #define ECORE_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN_SHIFT    13
446 #define ECORE_ROCE_MODIFY_QP_VALID_ROCE_MODE_MASK               0x1
447 #define ECORE_ROCE_MODIFY_QP_VALID_ROCE_MODE_SHIFT              14
448 
449 	enum ecore_roce_qp_state	new_state;
450 	u16		pkey;
451 	bool		incoming_rdma_read_en;
452 	bool		incoming_rdma_write_en;
453 	bool		incoming_atomic_en;
454 	bool		e2e_flow_control_en;
455 	u32		dest_qp;
456 	u16		mtu;
457 	u8		traffic_class_tos; /* IPv6/GRH tc; IPv4 TOS */
458 	u8		hop_limit_ttl; /* IPv6/GRH hop limit; IPv4 TTL */
459 	u32		flow_label; /* ignored in IPv4 */
460 	union ecore_gid	sgid; /* GRH SGID; IPv4/6 Source IP */
461 	union ecore_gid	dgid; /* GRH DGID; IPv4/6 Destination IP */
462 	u16		udp_src_port; /* RoCEv2 only */
463 
464 	u16		vlan_id;
465 
466 	u32		rq_psn;
467 	u32		sq_psn;
468 	u8		max_rd_atomic_resp;
469 	u8		max_rd_atomic_req;
470 	u32		ack_timeout;
471 	u8		retry_cnt;
472 	u8		rnr_retry_cnt;
473 	u8		min_rnr_nak_timer;
474 	bool		sqd_async;
475 	u8		remote_mac_addr[6];
476 	u8		local_mac_addr[6];
477 	bool		use_local_mac;
478 	enum roce_mode	roce_mode;
479 };
480 
481 struct ecore_rdma_query_qp_out_params {
482 	/* output variables (given to miniport) */
483 	enum ecore_roce_qp_state	state;
484 	u32		rq_psn; /* responder */
485 	u32		sq_psn; /* requester */
486 	bool		draining; /* send queue is draining */
487 	u16		mtu;
488 	u32		dest_qp;
489 	bool		incoming_rdma_read_en;
490 	bool		incoming_rdma_write_en;
491 	bool		incoming_atomic_en;
492 	bool		e2e_flow_control_en;
493 	union ecore_gid sgid; /* GRH SGID; IPv4/6 Source IP */
494 	union ecore_gid dgid; /* GRH DGID; IPv4/6 Destination IP */
495 	u32		flow_label; /* ignored in IPv4 */
496 	u8		hop_limit_ttl; /* IPv6/GRH hop limit; IPv4 TTL */
497 	u8		traffic_class_tos; /* IPv6/GRH tc; IPv4 TOS */
498 	u32		timeout;
499 	u8		rnr_retry;
500 	u8		retry_cnt;
501 	u8		min_rnr_nak_timer;
502 	u16		pkey_index;
503 	u8		max_rd_atomic;
504 	u8		max_dest_rd_atomic;
505 	bool		sqd_async;
506 };
507 
508 struct ecore_rdma_register_tid_in_params {
509 	/* input variables (given by miniport) */
510 	u32	itid; /* index only, 18 bit long, lkey = itid << 8 | key */
511 	enum ecore_rdma_tid_type tid_type;
512 	u8	key;
513 	u16	pd;
514 	bool	local_read;
515 	bool	local_write;
516 	bool	remote_read;
517 	bool	remote_write;
518 	bool	remote_atomic;
519 	bool	mw_bind;
520 	u64	pbl_ptr;
521 	bool	pbl_two_level;
522 	u8	pbl_page_size_log; /* for the pages that contain the pointers
523 		       * to the MR pages
524 		       */
525 	u8	page_size_log; /* for the MR pages */
526 	u32	fbo;
527 	u64	length; /* only lower 40 bits are valid */
528 	u64	vaddr;
529 	bool	zbva;
530 	bool	phy_mr;
531 	bool	dma_mr;
532 
533 	/* DIF related fields */
534 	bool	dif_enabled;
535 	u64	dif_error_addr;
536 	u64	dif_runt_addr;
537 };
538 
539 struct ecore_rdma_create_srq_in_params	{
540 	u64 pbl_base_addr;
541 	u64 prod_pair_addr;
542 	u16 num_pages;
543 	u16 pd_id;
544 	u16 page_size;
545 };
546 
547 struct ecore_rdma_create_srq_out_params {
548 	u16 srq_id;
549 };
550 
551 struct ecore_rdma_destroy_srq_in_params {
552 	u16 srq_id;
553 };
554 
555 struct ecore_rdma_modify_srq_in_params {
556 	u32 wqe_limit;
557 	u16 srq_id;
558 };
559 #endif
560 
561 struct ecore_rdma_resize_cq_out_params {
562 	/* output variables, provided to the upper layer */
563 	u32 prod; /* CQ producer value on old PBL */
564 	u32 cons; /* CQ consumer value on old PBL */
565 };
566 
567 struct ecore_rdma_resize_cnq_in_params {
568 	/* input variables (given by miniport) */
569 	u32	cnq_id;
570 	u32	pbl_page_size_log; /* for the pages that contain the
571 			* pointers to the cnq pages
572 			*/
573 	u64	pbl_ptr;
574 };
575 
576 #ifndef __EXTRACT__LINUX__
577 struct ecore_rdma_stats_out_params {
578 	u64	sent_bytes;
579 	u64	sent_pkts;
580 	u64	rcv_bytes;
581 	u64	rcv_pkts;
582 
583 	/* RoCE only */
584 	u64	icrc_errors;		/* wraps at 32 bits */
585 	u64	retransmit_events;	/* wraps at 32 bits */
586 	u64	silent_drops;		/* wraps at 16 bits */
587 	u64	rnr_nacks_sent;		/* wraps at 16 bits */
588 
589 	/* iWARP only */
590 	u64	iwarp_tx_fast_rxmit_cnt;
591 	u64	iwarp_tx_slow_start_cnt;
592 	u64	unalign_rx_comp;
593 };
594 
595 struct ecore_rdma_counters_out_params {
596 	u64	pd_count;
597 	u64	max_pd;
598 	u64	dpi_count;
599 	u64	max_dpi;
600 	u64	cq_count;
601 	u64	max_cq;
602 	u64	qp_count;
603 	u64	max_qp;
604 	u64	tid_count;
605 	u64	max_tid;
606 };
607 #endif
608 
609 enum _ecore_status_t
610 ecore_rdma_add_user(void *rdma_cxt,
611 		    struct ecore_rdma_add_user_out_params *out_params);
612 
613 enum _ecore_status_t
614 ecore_rdma_alloc_pd(void *rdma_cxt,
615 		    u16	*pd);
616 
617 enum _ecore_status_t
618 ecore_rdma_alloc_tid(void *rdma_cxt,
619 		     u32 *tid);
620 
621 enum _ecore_status_t
622 ecore_rdma_create_cq(void *rdma_cxt,
623 		     struct ecore_rdma_create_cq_in_params *params,
624 		     u16 *icid);
625 
626 /* Returns a pointer to the responders' CID, which is also a pointer to the
627  * ecore_qp_params struct. Returns NULL in case of failure.
628  */
629 struct ecore_rdma_qp*
630 ecore_rdma_create_qp(void *rdma_cxt,
631 		     struct ecore_rdma_create_qp_in_params  *in_params,
632 		     struct ecore_rdma_create_qp_out_params *out_params);
633 
634 enum _ecore_status_t
635 ecore_roce_create_ud_qp(void *rdma_cxt,
636 			struct ecore_rdma_create_qp_out_params *out_params);
637 
638 enum _ecore_status_t
639 ecore_rdma_deregister_tid(void *rdma_cxt,
640 			  u32		tid);
641 
642 enum _ecore_status_t
643 ecore_rdma_destroy_cq(void *rdma_cxt,
644 		      struct ecore_rdma_destroy_cq_in_params  *in_params,
645 		      struct ecore_rdma_destroy_cq_out_params *out_params);
646 
647 enum _ecore_status_t
648 ecore_rdma_destroy_qp(void *rdma_cxt,
649 		      struct ecore_rdma_qp *qp);
650 
651 enum _ecore_status_t
652 ecore_roce_destroy_ud_qp(void *rdma_cxt, u16 cid);
653 
654 void
655 ecore_rdma_free_pd(void *rdma_cxt,
656 		   u16	pd);
657 
658 void
659 ecore_rdma_free_tid(void *rdma_cxt,
660 		    u32	tid);
661 
662 enum _ecore_status_t
663 ecore_rdma_modify_qp(void *rdma_cxt,
664 		     struct ecore_rdma_qp *qp,
665 		     struct ecore_rdma_modify_qp_in_params *params);
666 
667 struct ecore_rdma_device*
668 ecore_rdma_query_device(void *rdma_cxt);
669 
670 struct ecore_rdma_port*
671 ecore_rdma_query_port(void *rdma_cxt);
672 
673 enum _ecore_status_t
674 ecore_rdma_query_qp(void *rdma_cxt,
675 		    struct ecore_rdma_qp		  *qp,
676 		    struct ecore_rdma_query_qp_out_params *out_params);
677 
678 enum _ecore_status_t
679 ecore_rdma_register_tid(void *rdma_cxt,
680 			struct ecore_rdma_register_tid_in_params *params);
681 
682 void ecore_rdma_remove_user(void *rdma_cxt,
683 			    u16		dpi);
684 
685 enum _ecore_status_t
686 ecore_rdma_resize_cnq(void *rdma_cxt,
687 		      struct ecore_rdma_resize_cnq_in_params *in_params);
688 
689 /*Returns the CQ CID or zero in case of failure */
690 enum _ecore_status_t
691 ecore_rdma_resize_cq(void *rdma_cxt,
692 		     struct ecore_rdma_resize_cq_in_params  *in_params,
693 		     struct ecore_rdma_resize_cq_out_params *out_params);
694 
695 /* Before calling rdma_start upper layer (VBD/qed) should fill the
696  * page-size and mtu in hwfn context
697  */
698 enum _ecore_status_t
699 ecore_rdma_start(void *p_hwfn,
700 		 struct ecore_rdma_start_in_params *params);
701 
702 enum _ecore_status_t
703 ecore_rdma_stop(void *rdma_cxt);
704 
705 enum _ecore_status_t
706 ecore_rdma_query_stats(void *rdma_cxt, u8 stats_queue,
707 		       struct ecore_rdma_stats_out_params *out_parms);
708 
709 enum _ecore_status_t
710 ecore_rdma_query_counters(void *rdma_cxt,
711 			  struct ecore_rdma_counters_out_params *out_parms);
712 
713 u32 ecore_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id);
714 
715 u32 ecore_rdma_query_cau_timer_res(void *p_hwfn);
716 
717 void ecore_rdma_cnq_prod_update(void *rdma_cxt, u8 cnq_index, u16 prod);
718 
719 void ecore_rdma_resc_free(struct ecore_hwfn *p_hwfn);
720 
721 #ifdef CONFIG_ECORE_IWARP
722 
723 /* iWARP API */
724 
725 #ifndef __EXTRACT__LINUX__
726 
727 enum ecore_iwarp_event_type {
728 	ECORE_IWARP_EVENT_MPA_REQUEST, /* Passive side request received */
729 	ECORE_IWARP_EVENT_PASSIVE_COMPLETE, /* Passive side established
730 					     * ( ack on mpa response )
731 					     */
732 	ECORE_IWARP_EVENT_ACTIVE_COMPLETE, /* Active side reply received */
733 	ECORE_IWARP_EVENT_DISCONNECT,
734 	ECORE_IWARP_EVENT_CLOSE,
735 	ECORE_IWARP_EVENT_IRQ_FULL,
736 	ECORE_IWARP_EVENT_RQ_EMPTY,
737 	ECORE_IWARP_EVENT_LLP_TIMEOUT,
738 	ECORE_IWARP_EVENT_REMOTE_PROTECTION_ERROR,
739 	ECORE_IWARP_EVENT_CQ_OVERFLOW,
740 	ECORE_IWARP_EVENT_QP_CATASTROPHIC,
741 	ECORE_IWARP_EVENT_ACTIVE_MPA_REPLY,
742 	ECORE_IWARP_EVENT_LOCAL_ACCESS_ERROR,
743 	ECORE_IWARP_EVENT_REMOTE_OPERATION_ERROR,
744 	ECORE_IWARP_EVENT_TERMINATE_RECEIVED
745 };
746 
747 enum ecore_tcp_ip_version
748 {
749 	ECORE_TCP_IPV4,
750 	ECORE_TCP_IPV6,
751 };
752 
753 struct ecore_iwarp_cm_info {
754 	enum ecore_tcp_ip_version ip_version;
755 	u32 remote_ip[4];
756 	u32 local_ip[4];
757 	u16 remote_port;
758 	u16 local_port;
759 	u16 vlan;
760 	const void *private_data;
761 	u16 private_data_len;
762 	u8 ord;
763 	u8 ird;
764 };
765 
766 struct ecore_iwarp_cm_event_params {
767 	enum ecore_iwarp_event_type event;
768 	const struct ecore_iwarp_cm_info *cm_info;
769 	void *ep_context; /* To be passed to accept call */
770 	int status;
771 };
772 
773 typedef int (*iwarp_event_handler)(void *context,
774 				   struct ecore_iwarp_cm_event_params *event);
775 
776 /* Active Side Connect Flow:
777  * upper layer driver calls ecore_iwarp_connect
778  * Function is blocking: i.e. returns after tcp connection is established
779  * After MPA connection is established ECORE_IWARP_EVENT_ACTIVE_COMPLETE event
780  * will be passed to upperlayer driver using the event_cb passed in
781  * ecore_iwarp_connect_in. Information of the established connection will be
782  * initialized in event data.
783  */
784 struct ecore_iwarp_connect_in {
785 	iwarp_event_handler event_cb;
786 	void *cb_context;
787 	struct ecore_rdma_qp *qp;
788 	struct ecore_iwarp_cm_info cm_info;
789 	u16 mss;
790 	u8 remote_mac_addr[6];
791 	u8 local_mac_addr[6];
792 };
793 
794 struct ecore_iwarp_connect_out {
795 	void *ep_context;
796 };
797 
798 /* Passive side connect flow:
799  * upper layer driver calls ecore_iwarp_create_listen
800  * once Syn packet that matches a ip/port that is listened on arrives, ecore
801  * will offload the tcp connection. After MPA Request is received on the
802  * offload connection, the event ECORE_IWARP_EVENT_MPA_REQUEST will be sent
803  * to upper layer driver using the event_cb passed below. The event data
804  * will be placed in event parameter. After upper layer driver processes the
805  * event, ecore_iwarp_accept or ecore_iwarp_reject should be called to continue
806  * MPA negotiation. Once negotiation is complete the event
807  * ECORE_IWARP_EVENT_PASSIVE_COMPLETE will be passed to the event_cb passed
808  * originally in ecore_iwarp_listen_in structure.
809  */
810 struct ecore_iwarp_listen_in {
811 	iwarp_event_handler event_cb; /* Callback func for delivering events */
812 	void *cb_context; /* passed to event_cb */
813 	u32 max_backlog; /* Max num of pending incoming connection requests */
814 	enum ecore_tcp_ip_version ip_version;
815 	u32 ip_addr[4];
816 	u16 port;
817 	u16 vlan;
818 };
819 
820 struct ecore_iwarp_listen_out {
821 	void *handle; /* to be sent to destroy */
822 };
823 
824 struct ecore_iwarp_accept_in {
825 	void *ep_context; /* From event data of ECORE_IWARP_EVENT_MPA_REQUEST */
826 	void *cb_context; /* context to be passed to event_cb */
827 	struct ecore_rdma_qp *qp;
828 	const void *private_data;
829 	u16 private_data_len;
830 	u8 ord;
831 	u8 ird;
832 };
833 
834 struct ecore_iwarp_reject_in {
835 	void *ep_context; /* From event data of ECORE_IWARP_EVENT_MPA_REQUEST */
836 	void *cb_context; /* context to be passed to event_cb */
837 	const void *private_data;
838 	u16 private_data_len;
839 };
840 
841 struct ecore_iwarp_send_rtr_in {
842 	void *ep_context;
843 };
844 
845 struct ecore_iwarp_tcp_abort_in {
846 	void *ep_context;
847 };
848 
849 #endif
850 
851 enum _ecore_status_t
852 ecore_iwarp_connect(void *rdma_cxt,
853 		    struct ecore_iwarp_connect_in *iparams,
854 		    struct ecore_iwarp_connect_out *oparams);
855 
856 enum _ecore_status_t
857 ecore_iwarp_create_listen(void *rdma_cxt,
858 			  struct ecore_iwarp_listen_in *iparams,
859 			  struct ecore_iwarp_listen_out *oparams);
860 
861 enum _ecore_status_t
862 ecore_iwarp_accept(void *rdma_cxt,
863 		   struct ecore_iwarp_accept_in *iparams);
864 
865 enum _ecore_status_t
866 ecore_iwarp_reject(void *rdma_cxt,
867 		   struct ecore_iwarp_reject_in *iparams);
868 
869 enum _ecore_status_t
870 ecore_iwarp_destroy_listen(void *rdma_cxt, void *handle);
871 
872 enum _ecore_status_t
873 ecore_iwarp_send_rtr(void *rdma_cxt, struct ecore_iwarp_send_rtr_in *iparams);
874 
875 enum _ecore_status_t
876 ecore_iwarp_tcp_abort(void *rdma_cxt, struct ecore_iwarp_tcp_abort_in *iparams);
877 
878 #endif /* CONFIG_ECORE_IWARP */
879 
880 #endif
881