1 /*- 2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 * $FreeBSD$ 26 */ 27 28 #ifndef MLX5_QP_H 29 #define MLX5_QP_H 30 31 #include <dev/mlx5/driver.h> 32 33 #define MLX5_INVALID_LKEY 0x100 34 #define MLX5_SIG_WQE_SIZE (MLX5_SEND_WQE_BB * 5) 35 #define MLX5_DIF_SIZE 8 36 #define MLX5_STRIDE_BLOCK_OP 0x400 37 #define MLX5_CPY_GRD_MASK 0xc0 38 #define MLX5_CPY_APP_MASK 0x30 39 #define MLX5_CPY_REF_MASK 0x0f 40 #define MLX5_BSF_INC_REFTAG (1 << 6) 41 #define MLX5_BSF_INL_VALID (1 << 15) 42 #define MLX5_BSF_REFRESH_DIF (1 << 14) 43 #define MLX5_BSF_REPEAT_BLOCK (1 << 7) 44 #define MLX5_BSF_APPTAG_ESCAPE 0x1 45 #define MLX5_BSF_APPREF_ESCAPE 0x2 46 #define MLX5_WQE_DS_UNITS 16 47 48 enum mlx5_qp_optpar { 49 MLX5_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0, 50 MLX5_QP_OPTPAR_RRE = 1 << 1, 51 MLX5_QP_OPTPAR_RAE = 1 << 2, 52 MLX5_QP_OPTPAR_RWE = 1 << 3, 53 MLX5_QP_OPTPAR_PKEY_INDEX = 1 << 4, 54 MLX5_QP_OPTPAR_Q_KEY = 1 << 5, 55 MLX5_QP_OPTPAR_RNR_TIMEOUT = 1 << 6, 56 MLX5_QP_OPTPAR_PRIMARY_ADDR_PATH = 1 << 7, 57 MLX5_QP_OPTPAR_SRA_MAX = 1 << 8, 58 MLX5_QP_OPTPAR_RRA_MAX = 1 << 9, 59 MLX5_QP_OPTPAR_PM_STATE = 1 << 10, 60 MLX5_QP_OPTPAR_RETRY_COUNT = 1 << 12, 61 MLX5_QP_OPTPAR_RNR_RETRY = 1 << 13, 62 MLX5_QP_OPTPAR_ACK_TIMEOUT = 1 << 14, 63 MLX5_QP_OPTPAR_PRI_PORT = 1 << 16, 64 MLX5_QP_OPTPAR_SRQN = 1 << 18, 65 MLX5_QP_OPTPAR_CQN_RCV = 1 << 19, 66 MLX5_QP_OPTPAR_DC_HS = 1 << 20, 67 MLX5_QP_OPTPAR_DC_KEY = 1 << 21, 68 }; 69 70 enum mlx5_qp_state { 71 MLX5_QP_STATE_RST = 0, 72 MLX5_QP_STATE_INIT = 1, 73 MLX5_QP_STATE_RTR = 2, 74 MLX5_QP_STATE_RTS = 3, 75 MLX5_QP_STATE_SQER = 4, 76 MLX5_QP_STATE_SQD = 5, 77 MLX5_QP_STATE_ERR = 6, 78 MLX5_QP_STATE_SQ_DRAINING = 7, 79 MLX5_QP_STATE_SUSPENDED = 9, 80 MLX5_QP_NUM_STATE, 81 MLX5_QP_STATE, 82 MLX5_QP_STATE_BAD, 83 }; 84 85 enum { 86 MLX5_SQ_STATE_NA = MLX5_SQC_STATE_ERR + 1, 87 MLX5_SQ_NUM_STATE = MLX5_SQ_STATE_NA + 1, 88 MLX5_RQ_STATE_NA = MLX5_RQC_STATE_ERR + 1, 89 MLX5_RQ_NUM_STATE = MLX5_RQ_STATE_NA + 1, 90 }; 91 92 enum { 93 MLX5_QP_ST_RC = 0x0, 94 MLX5_QP_ST_UC = 0x1, 95 MLX5_QP_ST_UD = 0x2, 96 MLX5_QP_ST_XRC = 0x3, 97 MLX5_QP_ST_MLX = 0x4, 98 MLX5_QP_ST_DCI = 0x5, 99 MLX5_QP_ST_DCT = 0x6, 100 MLX5_QP_ST_QP0 = 0x7, 101 MLX5_QP_ST_QP1 = 0x8, 102 MLX5_QP_ST_RAW_ETHERTYPE = 0x9, 103 MLX5_QP_ST_RAW_IPV6 = 0xa, 104 MLX5_QP_ST_SNIFFER = 0xb, 105 MLX5_QP_ST_SYNC_UMR = 0xe, 106 MLX5_QP_ST_PTP_1588 = 0xd, 107 MLX5_QP_ST_REG_UMR = 0xc, 108 MLX5_QP_ST_SW_CNAK = 0x10, 109 MLX5_QP_ST_MAX 110 }; 111 112 enum { 113 MLX5_NON_ZERO_RQ = 0x0, 114 MLX5_SRQ_RQ = 0x1, 115 MLX5_CRQ_RQ = 0x2, 116 MLX5_ZERO_LEN_RQ = 0x3 117 }; 118 119 enum { 120 /* params1 */ 121 MLX5_QP_BIT_SRE = 1 << 15, 122 MLX5_QP_BIT_SWE = 1 << 14, 123 MLX5_QP_BIT_SAE = 1 << 13, 124 /* params2 */ 125 MLX5_QP_BIT_RRE = 1 << 15, 126 MLX5_QP_BIT_RWE = 1 << 14, 127 MLX5_QP_BIT_RAE = 1 << 13, 128 MLX5_QP_BIT_RIC = 1 << 4, 129 MLX5_QP_BIT_COLL_SYNC_RQ = 1 << 2, 130 MLX5_QP_BIT_COLL_SYNC_SQ = 1 << 1, 131 MLX5_QP_BIT_COLL_MASTER = 1 << 0 132 }; 133 134 enum { 135 MLX5_DCT_BIT_RRE = 1 << 19, 136 MLX5_DCT_BIT_RWE = 1 << 18, 137 MLX5_DCT_BIT_RAE = 1 << 17, 138 }; 139 140 enum { 141 MLX5_WQE_CTRL_CQ_UPDATE = 2 << 2, 142 MLX5_WQE_CTRL_CQ_UPDATE_AND_EQE = 3 << 2, 143 MLX5_WQE_CTRL_SOLICITED = 1 << 1, 144 }; 145 146 enum { 147 MLX5_SEND_WQE_DS = 16, 148 MLX5_SEND_WQE_BB = 64, 149 }; 150 151 #define MLX5_SEND_WQEBB_NUM_DS (MLX5_SEND_WQE_BB / MLX5_SEND_WQE_DS) 152 153 enum { 154 MLX5_SEND_WQE_MAX_WQEBBS = 16, 155 }; 156 157 enum { 158 MLX5_WQE_FMR_PERM_LOCAL_READ = 1 << 27, 159 MLX5_WQE_FMR_PERM_LOCAL_WRITE = 1 << 28, 160 MLX5_WQE_FMR_PERM_REMOTE_READ = 1 << 29, 161 MLX5_WQE_FMR_PERM_REMOTE_WRITE = 1 << 30, 162 MLX5_WQE_FMR_PERM_ATOMIC = 1U << 31 163 }; 164 165 enum { 166 MLX5_FENCE_MODE_NONE = 0 << 5, 167 MLX5_FENCE_MODE_INITIATOR_SMALL = 1 << 5, 168 MLX5_FENCE_MODE_FENCE = 2 << 5, 169 MLX5_FENCE_MODE_STRONG_ORDERING = 3 << 5, 170 MLX5_FENCE_MODE_SMALL_AND_FENCE = 4 << 5, 171 }; 172 173 enum { 174 MLX5_RCV_DBR = 0, 175 MLX5_SND_DBR = 1, 176 }; 177 178 enum { 179 MLX5_FLAGS_INLINE = 1<<7, 180 MLX5_FLAGS_CHECK_FREE = 1<<5, 181 }; 182 183 struct mlx5_wqe_fmr_seg { 184 __be32 flags; 185 __be32 mem_key; 186 __be64 buf_list; 187 __be64 start_addr; 188 __be64 reg_len; 189 __be32 offset; 190 __be32 page_size; 191 u32 reserved[2]; 192 }; 193 194 struct mlx5_wqe_ctrl_seg { 195 __be32 opmod_idx_opcode; 196 __be32 qpn_ds; 197 u8 signature; 198 u8 rsvd[2]; 199 u8 fm_ce_se; 200 __be32 imm; 201 }; 202 203 #define MLX5_WQE_CTRL_DS_MASK 0x3f 204 205 enum { 206 MLX5_MLX_FLAG_MASK_VL15 = 0x40, 207 MLX5_MLX_FLAG_MASK_SLR = 0x20, 208 MLX5_MLX_FLAG_MASK_ICRC = 0x8, 209 MLX5_MLX_FLAG_MASK_FL = 4 210 }; 211 212 struct mlx5_mlx_seg { 213 __be32 rsvd0; 214 u8 flags; 215 u8 stat_rate_sl; 216 u8 rsvd1[8]; 217 __be16 dlid; 218 }; 219 220 enum { 221 MLX5_ETH_WQE_L3_INNER_CSUM = 1 << 4, 222 MLX5_ETH_WQE_L4_INNER_CSUM = 1 << 5, 223 MLX5_ETH_WQE_L3_CSUM = 1 << 6, 224 MLX5_ETH_WQE_L4_CSUM = 1 << 7, 225 }; 226 227 enum { 228 MLX5_ETH_WQE_SWP_INNER_L3_TYPE = 1 << 0, 229 MLX5_ETH_WQE_SWP_INNER_L4_TYPE = 1 << 1, 230 MLX5_ETH_WQE_SWP_OUTER_L3_TYPE = 1 << 4, 231 MLX5_ETH_WQE_SWP_OUTER_L4_TYPE = 1 << 5, 232 }; 233 234 struct mlx5_wqe_eth_seg { 235 u8 swp_outer_l4_offset; 236 u8 swp_outer_l3_offset; 237 u8 swp_inner_l4_offset; 238 u8 swp_inner_l3_offset; 239 u8 cs_flags; 240 u8 swp_flags; 241 __be16 mss; 242 __be32 rsvd2; 243 __be16 inline_hdr_sz; 244 u8 inline_hdr_start[2]; 245 }; 246 247 struct mlx5_wqe_xrc_seg { 248 __be32 xrc_srqn; 249 u8 rsvd[12]; 250 }; 251 252 struct mlx5_wqe_masked_atomic_seg { 253 __be64 swap_add; 254 __be64 compare; 255 __be64 swap_add_mask; 256 __be64 compare_mask; 257 }; 258 259 struct mlx5_av { 260 union { 261 struct { 262 __be32 qkey; 263 __be32 reserved; 264 } qkey; 265 __be64 dc_key; 266 } key; 267 __be32 dqp_dct; 268 u8 stat_rate_sl; 269 u8 fl_mlid; 270 union { 271 __be16 rlid; 272 __be16 udp_sport; 273 }; 274 u8 reserved0[4]; 275 u8 rmac[6]; 276 u8 tclass; 277 u8 hop_limit; 278 __be32 grh_gid_fl; 279 u8 rgid[16]; 280 }; 281 282 struct mlx5_wqe_datagram_seg { 283 struct mlx5_av av; 284 }; 285 286 struct mlx5_wqe_raddr_seg { 287 __be64 raddr; 288 __be32 rkey; 289 u32 reserved; 290 }; 291 292 struct mlx5_wqe_atomic_seg { 293 __be64 swap_add; 294 __be64 compare; 295 }; 296 297 struct mlx5_wqe_data_seg { 298 __be32 byte_count; 299 __be32 lkey; 300 __be64 addr; 301 }; 302 303 struct mlx5_wqe_umr_ctrl_seg { 304 u8 flags; 305 u8 rsvd0[3]; 306 __be16 klm_octowords; 307 __be16 bsf_octowords; 308 __be64 mkey_mask; 309 u8 rsvd1[32]; 310 }; 311 312 struct mlx5_seg_set_psv { 313 __be32 psv_num; 314 __be16 syndrome; 315 __be16 status; 316 __be32 transient_sig; 317 __be32 ref_tag; 318 }; 319 320 struct mlx5_seg_get_psv { 321 u8 rsvd[19]; 322 u8 num_psv; 323 __be32 l_key; 324 __be64 va; 325 __be32 psv_index[4]; 326 }; 327 328 struct mlx5_seg_check_psv { 329 u8 rsvd0[2]; 330 __be16 err_coalescing_op; 331 u8 rsvd1[2]; 332 __be16 xport_err_op; 333 u8 rsvd2[2]; 334 __be16 xport_err_mask; 335 u8 rsvd3[7]; 336 u8 num_psv; 337 __be32 l_key; 338 __be64 va; 339 __be32 psv_index[4]; 340 }; 341 342 struct mlx5_rwqe_sig { 343 u8 rsvd0[4]; 344 u8 signature; 345 u8 rsvd1[11]; 346 }; 347 348 struct mlx5_wqe_signature_seg { 349 u8 rsvd0[4]; 350 u8 signature; 351 u8 rsvd1[11]; 352 }; 353 354 struct mlx5_wqe_inline_seg { 355 __be32 byte_count; 356 }; 357 358 enum mlx5_sig_type { 359 MLX5_DIF_CRC = 0x1, 360 MLX5_DIF_IPCS = 0x2, 361 }; 362 363 struct mlx5_bsf_inl { 364 __be16 vld_refresh; 365 __be16 dif_apptag; 366 __be32 dif_reftag; 367 u8 sig_type; 368 u8 rp_inv_seed; 369 u8 rsvd[3]; 370 u8 dif_inc_ref_guard_check; 371 __be16 dif_app_bitmask_check; 372 }; 373 374 struct mlx5_bsf { 375 struct mlx5_bsf_basic { 376 u8 bsf_size_sbs; 377 u8 check_byte_mask; 378 union { 379 u8 copy_byte_mask; 380 u8 bs_selector; 381 u8 rsvd_wflags; 382 } wire; 383 union { 384 u8 bs_selector; 385 u8 rsvd_mflags; 386 } mem; 387 __be32 raw_data_size; 388 __be32 w_bfs_psv; 389 __be32 m_bfs_psv; 390 } basic; 391 struct mlx5_bsf_ext { 392 __be32 t_init_gen_pro_size; 393 __be32 rsvd_epi_size; 394 __be32 w_tfs_psv; 395 __be32 m_tfs_psv; 396 } ext; 397 struct mlx5_bsf_inl w_inl; 398 struct mlx5_bsf_inl m_inl; 399 }; 400 401 struct mlx5_klm { 402 __be32 bcount; 403 __be32 key; 404 __be64 va; 405 }; 406 407 struct mlx5_stride_block_entry { 408 __be16 stride; 409 __be16 bcount; 410 __be32 key; 411 __be64 va; 412 }; 413 414 struct mlx5_stride_block_ctrl_seg { 415 __be32 bcount_per_cycle; 416 __be32 op; 417 __be32 repeat_count; 418 u16 rsvd; 419 __be16 num_entries; 420 }; 421 422 enum mlx5_pagefault_flags { 423 MLX5_PFAULT_REQUESTOR = 1 << 0, 424 MLX5_PFAULT_WRITE = 1 << 1, 425 MLX5_PFAULT_RDMA = 1 << 2, 426 }; 427 428 /* Contains the details of a pagefault. */ 429 struct mlx5_pagefault { 430 u32 bytes_committed; 431 u8 event_subtype; 432 enum mlx5_pagefault_flags flags; 433 union { 434 /* Initiator or send message responder pagefault details. */ 435 struct { 436 /* Received packet size, only valid for responders. */ 437 u32 packet_size; 438 /* 439 * WQE index. Refers to either the send queue or 440 * receive queue, according to event_subtype. 441 */ 442 u16 wqe_index; 443 } wqe; 444 /* RDMA responder pagefault details */ 445 struct { 446 u32 r_key; 447 /* 448 * Received packet size, minimal size page fault 449 * resolution required for forward progress. 450 */ 451 u32 packet_size; 452 u32 rdma_op_len; 453 u64 rdma_va; 454 } rdma; 455 }; 456 }; 457 458 struct mlx5_core_qp { 459 struct mlx5_core_rsc_common common; /* must be first */ 460 void (*event) (struct mlx5_core_qp *, int); 461 int qpn; 462 struct mlx5_rsc_debug *dbg; 463 int pid; 464 }; 465 466 struct mlx5_qp_path { 467 u8 fl_free_ar; 468 u8 rsvd3; 469 __be16 pkey_index; 470 u8 rsvd0; 471 u8 grh_mlid; 472 __be16 rlid; 473 u8 ackto_lt; 474 u8 mgid_index; 475 u8 static_rate; 476 u8 hop_limit; 477 __be32 tclass_flowlabel; 478 union { 479 u8 rgid[16]; 480 u8 rip[16]; 481 }; 482 u8 f_dscp_ecn_prio; 483 u8 ecn_dscp; 484 __be16 udp_sport; 485 u8 dci_cfi_prio_sl; 486 u8 port; 487 u8 rmac[6]; 488 }; 489 490 struct mlx5_qp_context { 491 __be32 flags; 492 __be32 flags_pd; 493 u8 mtu_msgmax; 494 u8 rq_size_stride; 495 __be16 sq_crq_size; 496 __be32 qp_counter_set_usr_page; 497 __be32 wire_qpn; 498 __be32 log_pg_sz_remote_qpn; 499 struct mlx5_qp_path pri_path; 500 struct mlx5_qp_path alt_path; 501 __be32 params1; 502 u8 reserved2[4]; 503 __be32 next_send_psn; 504 __be32 cqn_send; 505 __be32 deth_sqpn; 506 u8 reserved3[4]; 507 __be32 last_acked_psn; 508 __be32 ssn; 509 __be32 params2; 510 __be32 rnr_nextrecvpsn; 511 __be32 xrcd; 512 __be32 cqn_recv; 513 __be64 db_rec_addr; 514 __be32 qkey; 515 __be32 rq_type_srqn; 516 __be32 rmsn; 517 __be16 hw_sq_wqe_counter; 518 __be16 sw_sq_wqe_counter; 519 __be16 hw_rcyclic_byte_counter; 520 __be16 hw_rq_counter; 521 __be16 sw_rcyclic_byte_counter; 522 __be16 sw_rq_counter; 523 u8 rsvd0[5]; 524 u8 cgs; 525 u8 cs_req; 526 u8 cs_res; 527 __be64 dc_access_key; 528 u8 rsvd1[24]; 529 }; 530 531 struct mlx5_dct_context { 532 u8 state; 533 u8 rsvd0[7]; 534 __be32 cqn; 535 __be32 flags; 536 u8 rsvd1; 537 u8 cs_res; 538 u8 min_rnr; 539 u8 rsvd2; 540 __be32 srqn; 541 __be32 pdn; 542 __be32 tclass_flow_label; 543 __be64 access_key; 544 u8 mtu; 545 u8 port; 546 __be16 pkey_index; 547 u8 rsvd4; 548 u8 mgid_index; 549 u8 rsvd5; 550 u8 hop_limit; 551 __be32 access_violations; 552 u8 rsvd[12]; 553 }; 554 555 static inline struct mlx5_core_qp *__mlx5_qp_lookup(struct mlx5_core_dev *dev, u32 qpn) 556 { 557 return radix_tree_lookup(&dev->priv.qp_table.tree, qpn); 558 } 559 560 static inline struct mlx5_core_mr *__mlx5_mr_lookup(struct mlx5_core_dev *dev, u32 key) 561 { 562 return radix_tree_lookup(&dev->priv.mr_table.tree, key); 563 } 564 565 int mlx5_core_create_qp(struct mlx5_core_dev *dev, 566 struct mlx5_core_qp *qp, 567 u32 *in, 568 int inlen); 569 int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 opcode, 570 u32 opt_param_mask, void *qpc, 571 struct mlx5_core_qp *qp); 572 int mlx5_core_destroy_qp(struct mlx5_core_dev *dev, 573 struct mlx5_core_qp *qp); 574 int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp, 575 u32 *out, int outlen); 576 int mlx5_core_dct_query(struct mlx5_core_dev *dev, struct mlx5_core_dct *dct, 577 u32 *out, int outlen); 578 int mlx5_core_arm_dct(struct mlx5_core_dev *dev, struct mlx5_core_dct *dct); 579 580 int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn); 581 int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn); 582 int mlx5_core_create_dct(struct mlx5_core_dev *dev, 583 struct mlx5_core_dct *dct, 584 u32 *in); 585 int mlx5_core_destroy_dct(struct mlx5_core_dev *dev, 586 struct mlx5_core_dct *dct); 587 int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen, 588 struct mlx5_core_qp *rq); 589 void mlx5_core_destroy_rq_tracked(struct mlx5_core_dev *dev, 590 struct mlx5_core_qp *rq); 591 int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen, 592 struct mlx5_core_qp *sq); 593 void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev *dev, 594 struct mlx5_core_qp *sq); 595 void mlx5_init_qp_table(struct mlx5_core_dev *dev); 596 void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev); 597 int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp); 598 void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp); 599 600 static inline const char *mlx5_qp_type_str(int type) 601 { 602 switch (type) { 603 case MLX5_QP_ST_RC: return "RC"; 604 case MLX5_QP_ST_UC: return "C"; 605 case MLX5_QP_ST_UD: return "UD"; 606 case MLX5_QP_ST_XRC: return "XRC"; 607 case MLX5_QP_ST_MLX: return "MLX"; 608 case MLX5_QP_ST_DCI: return "DCI"; 609 case MLX5_QP_ST_QP0: return "QP0"; 610 case MLX5_QP_ST_QP1: return "QP1"; 611 case MLX5_QP_ST_RAW_ETHERTYPE: return "RAW_ETHERTYPE"; 612 case MLX5_QP_ST_RAW_IPV6: return "RAW_IPV6"; 613 case MLX5_QP_ST_SNIFFER: return "SNIFFER"; 614 case MLX5_QP_ST_SYNC_UMR: return "SYNC_UMR"; 615 case MLX5_QP_ST_PTP_1588: return "PTP_1588"; 616 case MLX5_QP_ST_REG_UMR: return "REG_UMR"; 617 case MLX5_QP_ST_SW_CNAK: return "DC_CNAK"; 618 default: return "Invalid transport type"; 619 } 620 } 621 622 static inline const char *mlx5_qp_state_str(int state) 623 { 624 switch (state) { 625 case MLX5_QP_STATE_RST: 626 return "RST"; 627 case MLX5_QP_STATE_INIT: 628 return "INIT"; 629 case MLX5_QP_STATE_RTR: 630 return "RTR"; 631 case MLX5_QP_STATE_RTS: 632 return "RTS"; 633 case MLX5_QP_STATE_SQER: 634 return "SQER"; 635 case MLX5_QP_STATE_SQD: 636 return "SQD"; 637 case MLX5_QP_STATE_ERR: 638 return "ERR"; 639 case MLX5_QP_STATE_SQ_DRAINING: 640 return "SQ_DRAINING"; 641 case MLX5_QP_STATE_SUSPENDED: 642 return "SUSPENDED"; 643 default: return "Invalid QP state"; 644 } 645 } 646 647 #endif /* MLX5_QP_H */ 648