xref: /linux/include/linux/mlx5/qp.h (revision e8d235d4d8fb8957bae5f6ed4521115203a00d8b)
1 /*
2  * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #ifndef MLX5_QP_H
34 #define MLX5_QP_H
35 
36 #include <linux/mlx5/device.h>
37 #include <linux/mlx5/driver.h>
38 
39 #define MLX5_INVALID_LKEY	0x100
40 #define MLX5_SIG_WQE_SIZE	(MLX5_SEND_WQE_BB * 5)
41 #define MLX5_DIF_SIZE		8
42 #define MLX5_STRIDE_BLOCK_OP	0x400
43 #define MLX5_CPY_GRD_MASK	0xc0
44 #define MLX5_CPY_APP_MASK	0x30
45 #define MLX5_CPY_REF_MASK	0x0f
46 #define MLX5_BSF_INC_REFTAG	(1 << 6)
47 #define MLX5_BSF_INL_VALID	(1 << 15)
48 #define MLX5_BSF_REFRESH_DIF	(1 << 14)
49 #define MLX5_BSF_REPEAT_BLOCK	(1 << 7)
50 #define MLX5_BSF_APPTAG_ESCAPE	0x1
51 #define MLX5_BSF_APPREF_ESCAPE	0x2
52 
53 #define MLX5_QPN_BITS		24
54 #define MLX5_QPN_MASK		((1 << MLX5_QPN_BITS) - 1)
55 
56 enum mlx5_qp_optpar {
57 	MLX5_QP_OPTPAR_ALT_ADDR_PATH		= 1 << 0,
58 	MLX5_QP_OPTPAR_RRE			= 1 << 1,
59 	MLX5_QP_OPTPAR_RAE			= 1 << 2,
60 	MLX5_QP_OPTPAR_RWE			= 1 << 3,
61 	MLX5_QP_OPTPAR_PKEY_INDEX		= 1 << 4,
62 	MLX5_QP_OPTPAR_Q_KEY			= 1 << 5,
63 	MLX5_QP_OPTPAR_RNR_TIMEOUT		= 1 << 6,
64 	MLX5_QP_OPTPAR_PRIMARY_ADDR_PATH	= 1 << 7,
65 	MLX5_QP_OPTPAR_SRA_MAX			= 1 << 8,
66 	MLX5_QP_OPTPAR_RRA_MAX			= 1 << 9,
67 	MLX5_QP_OPTPAR_PM_STATE			= 1 << 10,
68 	MLX5_QP_OPTPAR_RETRY_COUNT		= 1 << 12,
69 	MLX5_QP_OPTPAR_RNR_RETRY		= 1 << 13,
70 	MLX5_QP_OPTPAR_ACK_TIMEOUT		= 1 << 14,
71 	MLX5_QP_OPTPAR_PRI_PORT			= 1 << 16,
72 	MLX5_QP_OPTPAR_SRQN			= 1 << 18,
73 	MLX5_QP_OPTPAR_CQN_RCV			= 1 << 19,
74 	MLX5_QP_OPTPAR_DC_HS			= 1 << 20,
75 	MLX5_QP_OPTPAR_DC_KEY			= 1 << 21,
76 };
77 
78 enum mlx5_qp_state {
79 	MLX5_QP_STATE_RST			= 0,
80 	MLX5_QP_STATE_INIT			= 1,
81 	MLX5_QP_STATE_RTR			= 2,
82 	MLX5_QP_STATE_RTS			= 3,
83 	MLX5_QP_STATE_SQER			= 4,
84 	MLX5_QP_STATE_SQD			= 5,
85 	MLX5_QP_STATE_ERR			= 6,
86 	MLX5_QP_STATE_SQ_DRAINING		= 7,
87 	MLX5_QP_STATE_SUSPENDED			= 9,
88 	MLX5_QP_NUM_STATE
89 };
90 
91 enum {
92 	MLX5_QP_ST_RC				= 0x0,
93 	MLX5_QP_ST_UC				= 0x1,
94 	MLX5_QP_ST_UD				= 0x2,
95 	MLX5_QP_ST_XRC				= 0x3,
96 	MLX5_QP_ST_MLX				= 0x4,
97 	MLX5_QP_ST_DCI				= 0x5,
98 	MLX5_QP_ST_DCT				= 0x6,
99 	MLX5_QP_ST_QP0				= 0x7,
100 	MLX5_QP_ST_QP1				= 0x8,
101 	MLX5_QP_ST_RAW_ETHERTYPE		= 0x9,
102 	MLX5_QP_ST_RAW_IPV6			= 0xa,
103 	MLX5_QP_ST_SNIFFER			= 0xb,
104 	MLX5_QP_ST_SYNC_UMR			= 0xe,
105 	MLX5_QP_ST_PTP_1588			= 0xd,
106 	MLX5_QP_ST_REG_UMR			= 0xc,
107 	MLX5_QP_ST_MAX
108 };
109 
110 enum {
111 	MLX5_QP_PM_MIGRATED			= 0x3,
112 	MLX5_QP_PM_ARMED			= 0x0,
113 	MLX5_QP_PM_REARM			= 0x1
114 };
115 
116 enum {
117 	MLX5_NON_ZERO_RQ	= 0 << 24,
118 	MLX5_SRQ_RQ		= 1 << 24,
119 	MLX5_CRQ_RQ		= 2 << 24,
120 	MLX5_ZERO_LEN_RQ	= 3 << 24
121 };
122 
123 enum {
124 	/* params1 */
125 	MLX5_QP_BIT_SRE				= 1 << 15,
126 	MLX5_QP_BIT_SWE				= 1 << 14,
127 	MLX5_QP_BIT_SAE				= 1 << 13,
128 	/* params2 */
129 	MLX5_QP_BIT_RRE				= 1 << 15,
130 	MLX5_QP_BIT_RWE				= 1 << 14,
131 	MLX5_QP_BIT_RAE				= 1 << 13,
132 	MLX5_QP_BIT_RIC				= 1 <<	4,
133 };
134 
135 enum {
136 	MLX5_WQE_CTRL_CQ_UPDATE		= 2 << 2,
137 	MLX5_WQE_CTRL_SOLICITED		= 1 << 1,
138 };
139 
140 enum {
141 	MLX5_SEND_WQE_BB	= 64,
142 };
143 
144 enum {
145 	MLX5_WQE_FMR_PERM_LOCAL_READ	= 1 << 27,
146 	MLX5_WQE_FMR_PERM_LOCAL_WRITE	= 1 << 28,
147 	MLX5_WQE_FMR_PERM_REMOTE_READ	= 1 << 29,
148 	MLX5_WQE_FMR_PERM_REMOTE_WRITE	= 1 << 30,
149 	MLX5_WQE_FMR_PERM_ATOMIC	= 1 << 31
150 };
151 
152 enum {
153 	MLX5_FENCE_MODE_NONE			= 0 << 5,
154 	MLX5_FENCE_MODE_INITIATOR_SMALL		= 1 << 5,
155 	MLX5_FENCE_MODE_STRONG_ORDERING		= 3 << 5,
156 	MLX5_FENCE_MODE_SMALL_AND_FENCE		= 4 << 5,
157 };
158 
159 enum {
160 	MLX5_QP_LAT_SENSITIVE	= 1 << 28,
161 	MLX5_QP_BLOCK_MCAST	= 1 << 30,
162 	MLX5_QP_ENABLE_SIG	= 1 << 31,
163 };
164 
165 enum {
166 	MLX5_RCV_DBR	= 0,
167 	MLX5_SND_DBR	= 1,
168 };
169 
170 enum {
171 	MLX5_FLAGS_INLINE	= 1<<7,
172 	MLX5_FLAGS_CHECK_FREE   = 1<<5,
173 };
174 
175 struct mlx5_wqe_fmr_seg {
176 	__be32			flags;
177 	__be32			mem_key;
178 	__be64			buf_list;
179 	__be64			start_addr;
180 	__be64			reg_len;
181 	__be32			offset;
182 	__be32			page_size;
183 	u32			reserved[2];
184 };
185 
186 struct mlx5_wqe_ctrl_seg {
187 	__be32			opmod_idx_opcode;
188 	__be32			qpn_ds;
189 	u8			signature;
190 	u8			rsvd[2];
191 	u8			fm_ce_se;
192 	__be32			imm;
193 };
194 
195 #define MLX5_WQE_CTRL_DS_MASK 0x3f
196 #define MLX5_WQE_CTRL_QPN_MASK 0xffffff00
197 #define MLX5_WQE_CTRL_QPN_SHIFT 8
198 #define MLX5_WQE_DS_UNITS 16
199 #define MLX5_WQE_CTRL_OPCODE_MASK 0xff
200 #define MLX5_WQE_CTRL_WQE_INDEX_MASK 0x00ffff00
201 #define MLX5_WQE_CTRL_WQE_INDEX_SHIFT 8
202 
203 struct mlx5_wqe_xrc_seg {
204 	__be32			xrc_srqn;
205 	u8			rsvd[12];
206 };
207 
208 struct mlx5_wqe_masked_atomic_seg {
209 	__be64			swap_add;
210 	__be64			compare;
211 	__be64			swap_add_mask;
212 	__be64			compare_mask;
213 };
214 
215 struct mlx5_av {
216 	union {
217 		struct {
218 			__be32	qkey;
219 			__be32	reserved;
220 		} qkey;
221 		__be64	dc_key;
222 	} key;
223 	__be32	dqp_dct;
224 	u8	stat_rate_sl;
225 	u8	fl_mlid;
226 	__be16	rlid;
227 	u8	reserved0[10];
228 	u8	tclass;
229 	u8	hop_limit;
230 	__be32	grh_gid_fl;
231 	u8	rgid[16];
232 };
233 
234 struct mlx5_wqe_datagram_seg {
235 	struct mlx5_av	av;
236 };
237 
238 struct mlx5_wqe_raddr_seg {
239 	__be64			raddr;
240 	__be32			rkey;
241 	u32			reserved;
242 };
243 
244 struct mlx5_wqe_atomic_seg {
245 	__be64			swap_add;
246 	__be64			compare;
247 };
248 
249 struct mlx5_wqe_data_seg {
250 	__be32			byte_count;
251 	__be32			lkey;
252 	__be64			addr;
253 };
254 
255 struct mlx5_wqe_umr_ctrl_seg {
256 	u8		flags;
257 	u8		rsvd0[3];
258 	__be16		klm_octowords;
259 	__be16		bsf_octowords;
260 	__be64		mkey_mask;
261 	u8		rsvd1[32];
262 };
263 
264 struct mlx5_seg_set_psv {
265 	__be32		psv_num;
266 	__be16		syndrome;
267 	__be16		status;
268 	__be32		transient_sig;
269 	__be32		ref_tag;
270 };
271 
272 struct mlx5_seg_get_psv {
273 	u8		rsvd[19];
274 	u8		num_psv;
275 	__be32		l_key;
276 	__be64		va;
277 	__be32		psv_index[4];
278 };
279 
280 struct mlx5_seg_check_psv {
281 	u8		rsvd0[2];
282 	__be16		err_coalescing_op;
283 	u8		rsvd1[2];
284 	__be16		xport_err_op;
285 	u8		rsvd2[2];
286 	__be16		xport_err_mask;
287 	u8		rsvd3[7];
288 	u8		num_psv;
289 	__be32		l_key;
290 	__be64		va;
291 	__be32		psv_index[4];
292 };
293 
294 struct mlx5_rwqe_sig {
295 	u8	rsvd0[4];
296 	u8	signature;
297 	u8	rsvd1[11];
298 };
299 
300 struct mlx5_wqe_signature_seg {
301 	u8	rsvd0[4];
302 	u8	signature;
303 	u8	rsvd1[11];
304 };
305 
306 #define MLX5_WQE_INLINE_SEG_BYTE_COUNT_MASK 0x3ff
307 
308 struct mlx5_wqe_inline_seg {
309 	__be32	byte_count;
310 };
311 
312 enum mlx5_sig_type {
313 	MLX5_DIF_CRC = 0x1,
314 	MLX5_DIF_IPCS = 0x2,
315 };
316 
317 struct mlx5_bsf_inl {
318 	__be16		vld_refresh;
319 	__be16		dif_apptag;
320 	__be32		dif_reftag;
321 	u8		sig_type;
322 	u8		rp_inv_seed;
323 	u8		rsvd[3];
324 	u8		dif_inc_ref_guard_check;
325 	__be16		dif_app_bitmask_check;
326 };
327 
328 struct mlx5_bsf {
329 	struct mlx5_bsf_basic {
330 		u8		bsf_size_sbs;
331 		u8		check_byte_mask;
332 		union {
333 			u8	copy_byte_mask;
334 			u8	bs_selector;
335 			u8	rsvd_wflags;
336 		} wire;
337 		union {
338 			u8	bs_selector;
339 			u8	rsvd_mflags;
340 		} mem;
341 		__be32		raw_data_size;
342 		__be32		w_bfs_psv;
343 		__be32		m_bfs_psv;
344 	} basic;
345 	struct mlx5_bsf_ext {
346 		__be32		t_init_gen_pro_size;
347 		__be32		rsvd_epi_size;
348 		__be32		w_tfs_psv;
349 		__be32		m_tfs_psv;
350 	} ext;
351 	struct mlx5_bsf_inl	w_inl;
352 	struct mlx5_bsf_inl	m_inl;
353 };
354 
355 struct mlx5_klm {
356 	__be32		bcount;
357 	__be32		key;
358 	__be64		va;
359 };
360 
361 struct mlx5_stride_block_entry {
362 	__be16		stride;
363 	__be16		bcount;
364 	__be32		key;
365 	__be64		va;
366 };
367 
368 struct mlx5_stride_block_ctrl_seg {
369 	__be32		bcount_per_cycle;
370 	__be32		op;
371 	__be32		repeat_count;
372 	u16		rsvd;
373 	__be16		num_entries;
374 };
375 
376 enum mlx5_pagefault_flags {
377 	MLX5_PFAULT_REQUESTOR = 1 << 0,
378 	MLX5_PFAULT_WRITE     = 1 << 1,
379 	MLX5_PFAULT_RDMA      = 1 << 2,
380 };
381 
382 /* Contains the details of a pagefault. */
383 struct mlx5_pagefault {
384 	u32			bytes_committed;
385 	u8			event_subtype;
386 	enum mlx5_pagefault_flags flags;
387 	union {
388 		/* Initiator or send message responder pagefault details. */
389 		struct {
390 			/* Received packet size, only valid for responders. */
391 			u32	packet_size;
392 			/*
393 			 * WQE index. Refers to either the send queue or
394 			 * receive queue, according to event_subtype.
395 			 */
396 			u16	wqe_index;
397 		} wqe;
398 		/* RDMA responder pagefault details */
399 		struct {
400 			u32	r_key;
401 			/*
402 			 * Received packet size, minimal size page fault
403 			 * resolution required for forward progress.
404 			 */
405 			u32	packet_size;
406 			u32	rdma_op_len;
407 			u64	rdma_va;
408 		} rdma;
409 	};
410 };
411 
412 struct mlx5_core_qp {
413 	struct mlx5_core_rsc_common	common; /* must be first */
414 	void (*event)		(struct mlx5_core_qp *, int);
415 	void (*pfault_handler)(struct mlx5_core_qp *, struct mlx5_pagefault *);
416 	int			qpn;
417 	struct mlx5_rsc_debug	*dbg;
418 	int			pid;
419 };
420 
421 struct mlx5_qp_path {
422 	u8			fl;
423 	u8			rsvd3;
424 	u8			free_ar;
425 	u8			pkey_index;
426 	u8			rsvd0;
427 	u8			grh_mlid;
428 	__be16			rlid;
429 	u8			ackto_lt;
430 	u8			mgid_index;
431 	u8			static_rate;
432 	u8			hop_limit;
433 	__be32			tclass_flowlabel;
434 	u8			rgid[16];
435 	u8			rsvd1[4];
436 	u8			sl;
437 	u8			port;
438 	u8			rsvd2[6];
439 };
440 
441 struct mlx5_qp_context {
442 	__be32			flags;
443 	__be32			flags_pd;
444 	u8			mtu_msgmax;
445 	u8			rq_size_stride;
446 	__be16			sq_crq_size;
447 	__be32			qp_counter_set_usr_page;
448 	__be32			wire_qpn;
449 	__be32			log_pg_sz_remote_qpn;
450 	struct			mlx5_qp_path pri_path;
451 	struct			mlx5_qp_path alt_path;
452 	__be32			params1;
453 	u8			reserved2[4];
454 	__be32			next_send_psn;
455 	__be32			cqn_send;
456 	u8			reserved3[8];
457 	__be32			last_acked_psn;
458 	__be32			ssn;
459 	__be32			params2;
460 	__be32			rnr_nextrecvpsn;
461 	__be32			xrcd;
462 	__be32			cqn_recv;
463 	__be64			db_rec_addr;
464 	__be32			qkey;
465 	__be32			rq_type_srqn;
466 	__be32			rmsn;
467 	__be16			hw_sq_wqe_counter;
468 	__be16			sw_sq_wqe_counter;
469 	__be16			hw_rcyclic_byte_counter;
470 	__be16			hw_rq_counter;
471 	__be16			sw_rcyclic_byte_counter;
472 	__be16			sw_rq_counter;
473 	u8			rsvd0[5];
474 	u8			cgs;
475 	u8			cs_req;
476 	u8			cs_res;
477 	__be64			dc_access_key;
478 	u8			rsvd1[24];
479 };
480 
481 struct mlx5_create_qp_mbox_in {
482 	struct mlx5_inbox_hdr	hdr;
483 	__be32			input_qpn;
484 	u8			rsvd0[4];
485 	__be32			opt_param_mask;
486 	u8			rsvd1[4];
487 	struct mlx5_qp_context	ctx;
488 	u8			rsvd3[16];
489 	__be64			pas[0];
490 };
491 
492 struct mlx5_create_qp_mbox_out {
493 	struct mlx5_outbox_hdr	hdr;
494 	__be32			qpn;
495 	u8			rsvd0[4];
496 };
497 
498 struct mlx5_destroy_qp_mbox_in {
499 	struct mlx5_inbox_hdr	hdr;
500 	__be32			qpn;
501 	u8			rsvd0[4];
502 };
503 
504 struct mlx5_destroy_qp_mbox_out {
505 	struct mlx5_outbox_hdr	hdr;
506 	u8			rsvd0[8];
507 };
508 
509 struct mlx5_modify_qp_mbox_in {
510 	struct mlx5_inbox_hdr	hdr;
511 	__be32			qpn;
512 	u8			rsvd1[4];
513 	__be32			optparam;
514 	u8			rsvd0[4];
515 	struct mlx5_qp_context	ctx;
516 };
517 
518 struct mlx5_modify_qp_mbox_out {
519 	struct mlx5_outbox_hdr	hdr;
520 	u8			rsvd0[8];
521 };
522 
523 struct mlx5_query_qp_mbox_in {
524 	struct mlx5_inbox_hdr	hdr;
525 	__be32			qpn;
526 	u8			rsvd[4];
527 };
528 
529 struct mlx5_query_qp_mbox_out {
530 	struct mlx5_outbox_hdr	hdr;
531 	u8			rsvd1[8];
532 	__be32			optparam;
533 	u8			rsvd0[4];
534 	struct mlx5_qp_context	ctx;
535 	u8			rsvd2[16];
536 	__be64			pas[0];
537 };
538 
539 struct mlx5_conf_sqp_mbox_in {
540 	struct mlx5_inbox_hdr	hdr;
541 	__be32			qpn;
542 	u8			rsvd[3];
543 	u8			type;
544 };
545 
546 struct mlx5_conf_sqp_mbox_out {
547 	struct mlx5_outbox_hdr	hdr;
548 	u8			rsvd[8];
549 };
550 
551 struct mlx5_alloc_xrcd_mbox_in {
552 	struct mlx5_inbox_hdr	hdr;
553 	u8			rsvd[8];
554 };
555 
556 struct mlx5_alloc_xrcd_mbox_out {
557 	struct mlx5_outbox_hdr	hdr;
558 	__be32			xrcdn;
559 	u8			rsvd[4];
560 };
561 
562 struct mlx5_dealloc_xrcd_mbox_in {
563 	struct mlx5_inbox_hdr	hdr;
564 	__be32			xrcdn;
565 	u8			rsvd[4];
566 };
567 
568 struct mlx5_dealloc_xrcd_mbox_out {
569 	struct mlx5_outbox_hdr	hdr;
570 	u8			rsvd[8];
571 };
572 
573 static inline struct mlx5_core_qp *__mlx5_qp_lookup(struct mlx5_core_dev *dev, u32 qpn)
574 {
575 	return radix_tree_lookup(&dev->priv.qp_table.tree, qpn);
576 }
577 
578 static inline struct mlx5_core_mr *__mlx5_mr_lookup(struct mlx5_core_dev *dev, u32 key)
579 {
580 	return radix_tree_lookup(&dev->priv.mr_table.tree, key);
581 }
582 
583 struct mlx5_page_fault_resume_mbox_in {
584 	struct mlx5_inbox_hdr	hdr;
585 	__be32			flags_qpn;
586 	u8			reserved[4];
587 };
588 
589 struct mlx5_page_fault_resume_mbox_out {
590 	struct mlx5_outbox_hdr	hdr;
591 	u8			rsvd[8];
592 };
593 
594 int mlx5_core_create_qp(struct mlx5_core_dev *dev,
595 			struct mlx5_core_qp *qp,
596 			struct mlx5_create_qp_mbox_in *in,
597 			int inlen);
598 int mlx5_core_qp_modify(struct mlx5_core_dev *dev, enum mlx5_qp_state cur_state,
599 			enum mlx5_qp_state new_state,
600 			struct mlx5_modify_qp_mbox_in *in, int sqd_event,
601 			struct mlx5_core_qp *qp);
602 int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
603 			 struct mlx5_core_qp *qp);
604 int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
605 		       struct mlx5_query_qp_mbox_out *out, int outlen);
606 
607 int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn);
608 int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn);
609 void mlx5_init_qp_table(struct mlx5_core_dev *dev);
610 void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev);
611 int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp);
612 void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp);
613 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
614 int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 qpn,
615 				u8 context, int error);
616 #endif
617 
618 static inline const char *mlx5_qp_type_str(int type)
619 {
620 	switch (type) {
621 	case MLX5_QP_ST_RC: return "RC";
622 	case MLX5_QP_ST_UC: return "C";
623 	case MLX5_QP_ST_UD: return "UD";
624 	case MLX5_QP_ST_XRC: return "XRC";
625 	case MLX5_QP_ST_MLX: return "MLX";
626 	case MLX5_QP_ST_QP0: return "QP0";
627 	case MLX5_QP_ST_QP1: return "QP1";
628 	case MLX5_QP_ST_RAW_ETHERTYPE: return "RAW_ETHERTYPE";
629 	case MLX5_QP_ST_RAW_IPV6: return "RAW_IPV6";
630 	case MLX5_QP_ST_SNIFFER: return "SNIFFER";
631 	case MLX5_QP_ST_SYNC_UMR: return "SYNC_UMR";
632 	case MLX5_QP_ST_PTP_1588: return "PTP_1588";
633 	case MLX5_QP_ST_REG_UMR: return "REG_UMR";
634 	default: return "Invalid transport type";
635 	}
636 }
637 
638 static inline const char *mlx5_qp_state_str(int state)
639 {
640 	switch (state) {
641 	case MLX5_QP_STATE_RST:
642 	return "RST";
643 	case MLX5_QP_STATE_INIT:
644 	return "INIT";
645 	case MLX5_QP_STATE_RTR:
646 	return "RTR";
647 	case MLX5_QP_STATE_RTS:
648 	return "RTS";
649 	case MLX5_QP_STATE_SQER:
650 	return "SQER";
651 	case MLX5_QP_STATE_SQD:
652 	return "SQD";
653 	case MLX5_QP_STATE_ERR:
654 	return "ERR";
655 	case MLX5_QP_STATE_SQ_DRAINING:
656 	return "SQ_DRAINING";
657 	case MLX5_QP_STATE_SUSPENDED:
658 	return "SUSPENDED";
659 	default: return "Invalid QP state";
660 	}
661 }
662 
663 #endif /* MLX5_QP_H */
664