xref: /linux/include/linux/mlx5/qp.h (revision ca55b2fef3a9373fcfc30f82fd26bc7fccbda732)
1 /*
2  * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #ifndef MLX5_QP_H
34 #define MLX5_QP_H
35 
36 #include <linux/mlx5/device.h>
37 #include <linux/mlx5/driver.h>
38 
39 #define MLX5_INVALID_LKEY	0x100
40 #define MLX5_SIG_WQE_SIZE	(MLX5_SEND_WQE_BB * 5)
41 #define MLX5_DIF_SIZE		8
42 #define MLX5_STRIDE_BLOCK_OP	0x400
43 #define MLX5_CPY_GRD_MASK	0xc0
44 #define MLX5_CPY_APP_MASK	0x30
45 #define MLX5_CPY_REF_MASK	0x0f
46 #define MLX5_BSF_INC_REFTAG	(1 << 6)
47 #define MLX5_BSF_INL_VALID	(1 << 15)
48 #define MLX5_BSF_REFRESH_DIF	(1 << 14)
49 #define MLX5_BSF_REPEAT_BLOCK	(1 << 7)
50 #define MLX5_BSF_APPTAG_ESCAPE	0x1
51 #define MLX5_BSF_APPREF_ESCAPE	0x2
52 
53 #define MLX5_QPN_BITS		24
54 #define MLX5_QPN_MASK		((1 << MLX5_QPN_BITS) - 1)
55 
56 enum mlx5_qp_optpar {
57 	MLX5_QP_OPTPAR_ALT_ADDR_PATH		= 1 << 0,
58 	MLX5_QP_OPTPAR_RRE			= 1 << 1,
59 	MLX5_QP_OPTPAR_RAE			= 1 << 2,
60 	MLX5_QP_OPTPAR_RWE			= 1 << 3,
61 	MLX5_QP_OPTPAR_PKEY_INDEX		= 1 << 4,
62 	MLX5_QP_OPTPAR_Q_KEY			= 1 << 5,
63 	MLX5_QP_OPTPAR_RNR_TIMEOUT		= 1 << 6,
64 	MLX5_QP_OPTPAR_PRIMARY_ADDR_PATH	= 1 << 7,
65 	MLX5_QP_OPTPAR_SRA_MAX			= 1 << 8,
66 	MLX5_QP_OPTPAR_RRA_MAX			= 1 << 9,
67 	MLX5_QP_OPTPAR_PM_STATE			= 1 << 10,
68 	MLX5_QP_OPTPAR_RETRY_COUNT		= 1 << 12,
69 	MLX5_QP_OPTPAR_RNR_RETRY		= 1 << 13,
70 	MLX5_QP_OPTPAR_ACK_TIMEOUT		= 1 << 14,
71 	MLX5_QP_OPTPAR_PRI_PORT			= 1 << 16,
72 	MLX5_QP_OPTPAR_SRQN			= 1 << 18,
73 	MLX5_QP_OPTPAR_CQN_RCV			= 1 << 19,
74 	MLX5_QP_OPTPAR_DC_HS			= 1 << 20,
75 	MLX5_QP_OPTPAR_DC_KEY			= 1 << 21,
76 };
77 
78 enum mlx5_qp_state {
79 	MLX5_QP_STATE_RST			= 0,
80 	MLX5_QP_STATE_INIT			= 1,
81 	MLX5_QP_STATE_RTR			= 2,
82 	MLX5_QP_STATE_RTS			= 3,
83 	MLX5_QP_STATE_SQER			= 4,
84 	MLX5_QP_STATE_SQD			= 5,
85 	MLX5_QP_STATE_ERR			= 6,
86 	MLX5_QP_STATE_SQ_DRAINING		= 7,
87 	MLX5_QP_STATE_SUSPENDED			= 9,
88 	MLX5_QP_NUM_STATE
89 };
90 
91 enum {
92 	MLX5_QP_ST_RC				= 0x0,
93 	MLX5_QP_ST_UC				= 0x1,
94 	MLX5_QP_ST_UD				= 0x2,
95 	MLX5_QP_ST_XRC				= 0x3,
96 	MLX5_QP_ST_MLX				= 0x4,
97 	MLX5_QP_ST_DCI				= 0x5,
98 	MLX5_QP_ST_DCT				= 0x6,
99 	MLX5_QP_ST_QP0				= 0x7,
100 	MLX5_QP_ST_QP1				= 0x8,
101 	MLX5_QP_ST_RAW_ETHERTYPE		= 0x9,
102 	MLX5_QP_ST_RAW_IPV6			= 0xa,
103 	MLX5_QP_ST_SNIFFER			= 0xb,
104 	MLX5_QP_ST_SYNC_UMR			= 0xe,
105 	MLX5_QP_ST_PTP_1588			= 0xd,
106 	MLX5_QP_ST_REG_UMR			= 0xc,
107 	MLX5_QP_ST_MAX
108 };
109 
110 enum {
111 	MLX5_QP_PM_MIGRATED			= 0x3,
112 	MLX5_QP_PM_ARMED			= 0x0,
113 	MLX5_QP_PM_REARM			= 0x1
114 };
115 
116 enum {
117 	MLX5_NON_ZERO_RQ	= 0 << 24,
118 	MLX5_SRQ_RQ		= 1 << 24,
119 	MLX5_CRQ_RQ		= 2 << 24,
120 	MLX5_ZERO_LEN_RQ	= 3 << 24
121 };
122 
123 enum {
124 	/* params1 */
125 	MLX5_QP_BIT_SRE				= 1 << 15,
126 	MLX5_QP_BIT_SWE				= 1 << 14,
127 	MLX5_QP_BIT_SAE				= 1 << 13,
128 	/* params2 */
129 	MLX5_QP_BIT_RRE				= 1 << 15,
130 	MLX5_QP_BIT_RWE				= 1 << 14,
131 	MLX5_QP_BIT_RAE				= 1 << 13,
132 	MLX5_QP_BIT_RIC				= 1 <<	4,
133 };
134 
135 enum {
136 	MLX5_WQE_CTRL_CQ_UPDATE		= 2 << 2,
137 	MLX5_WQE_CTRL_CQ_UPDATE_AND_EQE	= 3 << 2,
138 	MLX5_WQE_CTRL_SOLICITED		= 1 << 1,
139 };
140 
141 enum {
142 	MLX5_SEND_WQE_DS	= 16,
143 	MLX5_SEND_WQE_BB	= 64,
144 };
145 
146 #define MLX5_SEND_WQEBB_NUM_DS	(MLX5_SEND_WQE_BB / MLX5_SEND_WQE_DS)
147 
148 enum {
149 	MLX5_SEND_WQE_MAX_WQEBBS	= 16,
150 };
151 
152 enum {
153 	MLX5_WQE_FMR_PERM_LOCAL_READ	= 1 << 27,
154 	MLX5_WQE_FMR_PERM_LOCAL_WRITE	= 1 << 28,
155 	MLX5_WQE_FMR_PERM_REMOTE_READ	= 1 << 29,
156 	MLX5_WQE_FMR_PERM_REMOTE_WRITE	= 1 << 30,
157 	MLX5_WQE_FMR_PERM_ATOMIC	= 1 << 31
158 };
159 
160 enum {
161 	MLX5_FENCE_MODE_NONE			= 0 << 5,
162 	MLX5_FENCE_MODE_INITIATOR_SMALL		= 1 << 5,
163 	MLX5_FENCE_MODE_STRONG_ORDERING		= 3 << 5,
164 	MLX5_FENCE_MODE_SMALL_AND_FENCE		= 4 << 5,
165 };
166 
167 enum {
168 	MLX5_QP_LAT_SENSITIVE	= 1 << 28,
169 	MLX5_QP_BLOCK_MCAST	= 1 << 30,
170 	MLX5_QP_ENABLE_SIG	= 1 << 31,
171 };
172 
173 enum {
174 	MLX5_RCV_DBR	= 0,
175 	MLX5_SND_DBR	= 1,
176 };
177 
178 enum {
179 	MLX5_FLAGS_INLINE	= 1<<7,
180 	MLX5_FLAGS_CHECK_FREE   = 1<<5,
181 };
182 
183 struct mlx5_wqe_fmr_seg {
184 	__be32			flags;
185 	__be32			mem_key;
186 	__be64			buf_list;
187 	__be64			start_addr;
188 	__be64			reg_len;
189 	__be32			offset;
190 	__be32			page_size;
191 	u32			reserved[2];
192 };
193 
194 struct mlx5_wqe_ctrl_seg {
195 	__be32			opmod_idx_opcode;
196 	__be32			qpn_ds;
197 	u8			signature;
198 	u8			rsvd[2];
199 	u8			fm_ce_se;
200 	__be32			imm;
201 };
202 
203 #define MLX5_WQE_CTRL_DS_MASK 0x3f
204 #define MLX5_WQE_CTRL_QPN_MASK 0xffffff00
205 #define MLX5_WQE_CTRL_QPN_SHIFT 8
206 #define MLX5_WQE_DS_UNITS 16
207 #define MLX5_WQE_CTRL_OPCODE_MASK 0xff
208 #define MLX5_WQE_CTRL_WQE_INDEX_MASK 0x00ffff00
209 #define MLX5_WQE_CTRL_WQE_INDEX_SHIFT 8
210 
211 enum {
212 	MLX5_ETH_WQE_L3_INNER_CSUM      = 1 << 4,
213 	MLX5_ETH_WQE_L4_INNER_CSUM      = 1 << 5,
214 	MLX5_ETH_WQE_L3_CSUM            = 1 << 6,
215 	MLX5_ETH_WQE_L4_CSUM            = 1 << 7,
216 };
217 
218 struct mlx5_wqe_eth_seg {
219 	u8              rsvd0[4];
220 	u8              cs_flags;
221 	u8              rsvd1;
222 	__be16          mss;
223 	__be32          rsvd2;
224 	__be16          inline_hdr_sz;
225 	u8              inline_hdr_start[2];
226 };
227 
228 struct mlx5_wqe_xrc_seg {
229 	__be32			xrc_srqn;
230 	u8			rsvd[12];
231 };
232 
233 struct mlx5_wqe_masked_atomic_seg {
234 	__be64			swap_add;
235 	__be64			compare;
236 	__be64			swap_add_mask;
237 	__be64			compare_mask;
238 };
239 
240 struct mlx5_av {
241 	union {
242 		struct {
243 			__be32	qkey;
244 			__be32	reserved;
245 		} qkey;
246 		__be64	dc_key;
247 	} key;
248 	__be32	dqp_dct;
249 	u8	stat_rate_sl;
250 	u8	fl_mlid;
251 	__be16	rlid;
252 	u8	reserved0[10];
253 	u8	tclass;
254 	u8	hop_limit;
255 	__be32	grh_gid_fl;
256 	u8	rgid[16];
257 };
258 
259 struct mlx5_wqe_datagram_seg {
260 	struct mlx5_av	av;
261 };
262 
263 struct mlx5_wqe_raddr_seg {
264 	__be64			raddr;
265 	__be32			rkey;
266 	u32			reserved;
267 };
268 
269 struct mlx5_wqe_atomic_seg {
270 	__be64			swap_add;
271 	__be64			compare;
272 };
273 
274 struct mlx5_wqe_data_seg {
275 	__be32			byte_count;
276 	__be32			lkey;
277 	__be64			addr;
278 };
279 
280 struct mlx5_wqe_umr_ctrl_seg {
281 	u8		flags;
282 	u8		rsvd0[3];
283 	__be16		klm_octowords;
284 	__be16		bsf_octowords;
285 	__be64		mkey_mask;
286 	u8		rsvd1[32];
287 };
288 
289 struct mlx5_seg_set_psv {
290 	__be32		psv_num;
291 	__be16		syndrome;
292 	__be16		status;
293 	__be32		transient_sig;
294 	__be32		ref_tag;
295 };
296 
297 struct mlx5_seg_get_psv {
298 	u8		rsvd[19];
299 	u8		num_psv;
300 	__be32		l_key;
301 	__be64		va;
302 	__be32		psv_index[4];
303 };
304 
305 struct mlx5_seg_check_psv {
306 	u8		rsvd0[2];
307 	__be16		err_coalescing_op;
308 	u8		rsvd1[2];
309 	__be16		xport_err_op;
310 	u8		rsvd2[2];
311 	__be16		xport_err_mask;
312 	u8		rsvd3[7];
313 	u8		num_psv;
314 	__be32		l_key;
315 	__be64		va;
316 	__be32		psv_index[4];
317 };
318 
319 struct mlx5_rwqe_sig {
320 	u8	rsvd0[4];
321 	u8	signature;
322 	u8	rsvd1[11];
323 };
324 
325 struct mlx5_wqe_signature_seg {
326 	u8	rsvd0[4];
327 	u8	signature;
328 	u8	rsvd1[11];
329 };
330 
331 #define MLX5_WQE_INLINE_SEG_BYTE_COUNT_MASK 0x3ff
332 
333 struct mlx5_wqe_inline_seg {
334 	__be32	byte_count;
335 };
336 
337 enum mlx5_sig_type {
338 	MLX5_DIF_CRC = 0x1,
339 	MLX5_DIF_IPCS = 0x2,
340 };
341 
342 struct mlx5_bsf_inl {
343 	__be16		vld_refresh;
344 	__be16		dif_apptag;
345 	__be32		dif_reftag;
346 	u8		sig_type;
347 	u8		rp_inv_seed;
348 	u8		rsvd[3];
349 	u8		dif_inc_ref_guard_check;
350 	__be16		dif_app_bitmask_check;
351 };
352 
353 struct mlx5_bsf {
354 	struct mlx5_bsf_basic {
355 		u8		bsf_size_sbs;
356 		u8		check_byte_mask;
357 		union {
358 			u8	copy_byte_mask;
359 			u8	bs_selector;
360 			u8	rsvd_wflags;
361 		} wire;
362 		union {
363 			u8	bs_selector;
364 			u8	rsvd_mflags;
365 		} mem;
366 		__be32		raw_data_size;
367 		__be32		w_bfs_psv;
368 		__be32		m_bfs_psv;
369 	} basic;
370 	struct mlx5_bsf_ext {
371 		__be32		t_init_gen_pro_size;
372 		__be32		rsvd_epi_size;
373 		__be32		w_tfs_psv;
374 		__be32		m_tfs_psv;
375 	} ext;
376 	struct mlx5_bsf_inl	w_inl;
377 	struct mlx5_bsf_inl	m_inl;
378 };
379 
380 struct mlx5_klm {
381 	__be32		bcount;
382 	__be32		key;
383 	__be64		va;
384 };
385 
386 struct mlx5_stride_block_entry {
387 	__be16		stride;
388 	__be16		bcount;
389 	__be32		key;
390 	__be64		va;
391 };
392 
393 struct mlx5_stride_block_ctrl_seg {
394 	__be32		bcount_per_cycle;
395 	__be32		op;
396 	__be32		repeat_count;
397 	u16		rsvd;
398 	__be16		num_entries;
399 };
400 
401 enum mlx5_pagefault_flags {
402 	MLX5_PFAULT_REQUESTOR = 1 << 0,
403 	MLX5_PFAULT_WRITE     = 1 << 1,
404 	MLX5_PFAULT_RDMA      = 1 << 2,
405 };
406 
407 /* Contains the details of a pagefault. */
408 struct mlx5_pagefault {
409 	u32			bytes_committed;
410 	u8			event_subtype;
411 	enum mlx5_pagefault_flags flags;
412 	union {
413 		/* Initiator or send message responder pagefault details. */
414 		struct {
415 			/* Received packet size, only valid for responders. */
416 			u32	packet_size;
417 			/*
418 			 * WQE index. Refers to either the send queue or
419 			 * receive queue, according to event_subtype.
420 			 */
421 			u16	wqe_index;
422 		} wqe;
423 		/* RDMA responder pagefault details */
424 		struct {
425 			u32	r_key;
426 			/*
427 			 * Received packet size, minimal size page fault
428 			 * resolution required for forward progress.
429 			 */
430 			u32	packet_size;
431 			u32	rdma_op_len;
432 			u64	rdma_va;
433 		} rdma;
434 	};
435 };
436 
437 struct mlx5_core_qp {
438 	struct mlx5_core_rsc_common	common; /* must be first */
439 	void (*event)		(struct mlx5_core_qp *, int);
440 	void (*pfault_handler)(struct mlx5_core_qp *, struct mlx5_pagefault *);
441 	int			qpn;
442 	struct mlx5_rsc_debug	*dbg;
443 	int			pid;
444 };
445 
446 struct mlx5_qp_path {
447 	u8			fl;
448 	u8			rsvd3;
449 	u8			free_ar;
450 	u8			pkey_index;
451 	u8			rsvd0;
452 	u8			grh_mlid;
453 	__be16			rlid;
454 	u8			ackto_lt;
455 	u8			mgid_index;
456 	u8			static_rate;
457 	u8			hop_limit;
458 	__be32			tclass_flowlabel;
459 	u8			rgid[16];
460 	u8			rsvd1[4];
461 	u8			sl;
462 	u8			port;
463 	u8			rsvd2[6];
464 };
465 
466 struct mlx5_qp_context {
467 	__be32			flags;
468 	__be32			flags_pd;
469 	u8			mtu_msgmax;
470 	u8			rq_size_stride;
471 	__be16			sq_crq_size;
472 	__be32			qp_counter_set_usr_page;
473 	__be32			wire_qpn;
474 	__be32			log_pg_sz_remote_qpn;
475 	struct			mlx5_qp_path pri_path;
476 	struct			mlx5_qp_path alt_path;
477 	__be32			params1;
478 	u8			reserved2[4];
479 	__be32			next_send_psn;
480 	__be32			cqn_send;
481 	u8			reserved3[8];
482 	__be32			last_acked_psn;
483 	__be32			ssn;
484 	__be32			params2;
485 	__be32			rnr_nextrecvpsn;
486 	__be32			xrcd;
487 	__be32			cqn_recv;
488 	__be64			db_rec_addr;
489 	__be32			qkey;
490 	__be32			rq_type_srqn;
491 	__be32			rmsn;
492 	__be16			hw_sq_wqe_counter;
493 	__be16			sw_sq_wqe_counter;
494 	__be16			hw_rcyclic_byte_counter;
495 	__be16			hw_rq_counter;
496 	__be16			sw_rcyclic_byte_counter;
497 	__be16			sw_rq_counter;
498 	u8			rsvd0[5];
499 	u8			cgs;
500 	u8			cs_req;
501 	u8			cs_res;
502 	__be64			dc_access_key;
503 	u8			rsvd1[24];
504 };
505 
506 struct mlx5_create_qp_mbox_in {
507 	struct mlx5_inbox_hdr	hdr;
508 	__be32			input_qpn;
509 	u8			rsvd0[4];
510 	__be32			opt_param_mask;
511 	u8			rsvd1[4];
512 	struct mlx5_qp_context	ctx;
513 	u8			rsvd3[16];
514 	__be64			pas[0];
515 };
516 
517 struct mlx5_create_qp_mbox_out {
518 	struct mlx5_outbox_hdr	hdr;
519 	__be32			qpn;
520 	u8			rsvd0[4];
521 };
522 
523 struct mlx5_destroy_qp_mbox_in {
524 	struct mlx5_inbox_hdr	hdr;
525 	__be32			qpn;
526 	u8			rsvd0[4];
527 };
528 
529 struct mlx5_destroy_qp_mbox_out {
530 	struct mlx5_outbox_hdr	hdr;
531 	u8			rsvd0[8];
532 };
533 
534 struct mlx5_modify_qp_mbox_in {
535 	struct mlx5_inbox_hdr	hdr;
536 	__be32			qpn;
537 	u8			rsvd1[4];
538 	__be32			optparam;
539 	u8			rsvd0[4];
540 	struct mlx5_qp_context	ctx;
541 };
542 
543 struct mlx5_modify_qp_mbox_out {
544 	struct mlx5_outbox_hdr	hdr;
545 	u8			rsvd0[8];
546 };
547 
548 struct mlx5_query_qp_mbox_in {
549 	struct mlx5_inbox_hdr	hdr;
550 	__be32			qpn;
551 	u8			rsvd[4];
552 };
553 
554 struct mlx5_query_qp_mbox_out {
555 	struct mlx5_outbox_hdr	hdr;
556 	u8			rsvd1[8];
557 	__be32			optparam;
558 	u8			rsvd0[4];
559 	struct mlx5_qp_context	ctx;
560 	u8			rsvd2[16];
561 	__be64			pas[0];
562 };
563 
564 struct mlx5_conf_sqp_mbox_in {
565 	struct mlx5_inbox_hdr	hdr;
566 	__be32			qpn;
567 	u8			rsvd[3];
568 	u8			type;
569 };
570 
571 struct mlx5_conf_sqp_mbox_out {
572 	struct mlx5_outbox_hdr	hdr;
573 	u8			rsvd[8];
574 };
575 
576 struct mlx5_alloc_xrcd_mbox_in {
577 	struct mlx5_inbox_hdr	hdr;
578 	u8			rsvd[8];
579 };
580 
581 struct mlx5_alloc_xrcd_mbox_out {
582 	struct mlx5_outbox_hdr	hdr;
583 	__be32			xrcdn;
584 	u8			rsvd[4];
585 };
586 
587 struct mlx5_dealloc_xrcd_mbox_in {
588 	struct mlx5_inbox_hdr	hdr;
589 	__be32			xrcdn;
590 	u8			rsvd[4];
591 };
592 
593 struct mlx5_dealloc_xrcd_mbox_out {
594 	struct mlx5_outbox_hdr	hdr;
595 	u8			rsvd[8];
596 };
597 
598 static inline struct mlx5_core_qp *__mlx5_qp_lookup(struct mlx5_core_dev *dev, u32 qpn)
599 {
600 	return radix_tree_lookup(&dev->priv.qp_table.tree, qpn);
601 }
602 
603 static inline struct mlx5_core_mr *__mlx5_mr_lookup(struct mlx5_core_dev *dev, u32 key)
604 {
605 	return radix_tree_lookup(&dev->priv.mr_table.tree, key);
606 }
607 
608 struct mlx5_page_fault_resume_mbox_in {
609 	struct mlx5_inbox_hdr	hdr;
610 	__be32			flags_qpn;
611 	u8			reserved[4];
612 };
613 
614 struct mlx5_page_fault_resume_mbox_out {
615 	struct mlx5_outbox_hdr	hdr;
616 	u8			rsvd[8];
617 };
618 
619 int mlx5_core_create_qp(struct mlx5_core_dev *dev,
620 			struct mlx5_core_qp *qp,
621 			struct mlx5_create_qp_mbox_in *in,
622 			int inlen);
623 int mlx5_core_qp_modify(struct mlx5_core_dev *dev, enum mlx5_qp_state cur_state,
624 			enum mlx5_qp_state new_state,
625 			struct mlx5_modify_qp_mbox_in *in, int sqd_event,
626 			struct mlx5_core_qp *qp);
627 int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
628 			 struct mlx5_core_qp *qp);
629 int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
630 		       struct mlx5_query_qp_mbox_out *out, int outlen);
631 
632 int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn);
633 int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn);
634 void mlx5_init_qp_table(struct mlx5_core_dev *dev);
635 void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev);
636 int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp);
637 void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp);
638 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
639 int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 qpn,
640 				u8 context, int error);
641 #endif
642 
643 static inline const char *mlx5_qp_type_str(int type)
644 {
645 	switch (type) {
646 	case MLX5_QP_ST_RC: return "RC";
647 	case MLX5_QP_ST_UC: return "C";
648 	case MLX5_QP_ST_UD: return "UD";
649 	case MLX5_QP_ST_XRC: return "XRC";
650 	case MLX5_QP_ST_MLX: return "MLX";
651 	case MLX5_QP_ST_QP0: return "QP0";
652 	case MLX5_QP_ST_QP1: return "QP1";
653 	case MLX5_QP_ST_RAW_ETHERTYPE: return "RAW_ETHERTYPE";
654 	case MLX5_QP_ST_RAW_IPV6: return "RAW_IPV6";
655 	case MLX5_QP_ST_SNIFFER: return "SNIFFER";
656 	case MLX5_QP_ST_SYNC_UMR: return "SYNC_UMR";
657 	case MLX5_QP_ST_PTP_1588: return "PTP_1588";
658 	case MLX5_QP_ST_REG_UMR: return "REG_UMR";
659 	default: return "Invalid transport type";
660 	}
661 }
662 
663 static inline const char *mlx5_qp_state_str(int state)
664 {
665 	switch (state) {
666 	case MLX5_QP_STATE_RST:
667 	return "RST";
668 	case MLX5_QP_STATE_INIT:
669 	return "INIT";
670 	case MLX5_QP_STATE_RTR:
671 	return "RTR";
672 	case MLX5_QP_STATE_RTS:
673 	return "RTS";
674 	case MLX5_QP_STATE_SQER:
675 	return "SQER";
676 	case MLX5_QP_STATE_SQD:
677 	return "SQD";
678 	case MLX5_QP_STATE_ERR:
679 	return "ERR";
680 	case MLX5_QP_STATE_SQ_DRAINING:
681 	return "SQ_DRAINING";
682 	case MLX5_QP_STATE_SUSPENDED:
683 	return "SUSPENDED";
684 	default: return "Invalid QP state";
685 	}
686 }
687 
688 #endif /* MLX5_QP_H */
689