1 /*- 2 * Copyright (c) 2013-2017, Mellanox Technologies, Ltd. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 * $FreeBSD$ 26 */ 27 28 #ifndef MLX5_DEVICE_H 29 #define MLX5_DEVICE_H 30 31 #include <linux/types.h> 32 #include <rdma/ib_verbs.h> 33 #include <dev/mlx5/mlx5_ifc.h> 34 35 #define FW_INIT_TIMEOUT_MILI 2000 36 #define FW_INIT_WAIT_MS 2 37 38 #if defined(__LITTLE_ENDIAN) 39 #define MLX5_SET_HOST_ENDIANNESS 0 40 #elif defined(__BIG_ENDIAN) 41 #define MLX5_SET_HOST_ENDIANNESS 0x80 42 #else 43 #error Host endianness not defined 44 #endif 45 46 /* helper macros */ 47 #define __mlx5_nullp(typ) ((struct mlx5_ifc_##typ##_bits *)0) 48 #define __mlx5_bit_sz(typ, fld) sizeof(__mlx5_nullp(typ)->fld) 49 #define __mlx5_bit_off(typ, fld) __offsetof(struct mlx5_ifc_##typ##_bits, fld) 50 #define __mlx5_dw_off(typ, fld) (__mlx5_bit_off(typ, fld) / 32) 51 #define __mlx5_64_off(typ, fld) (__mlx5_bit_off(typ, fld) / 64) 52 #define __mlx5_dw_bit_off(typ, fld) (32 - __mlx5_bit_sz(typ, fld) - (__mlx5_bit_off(typ, fld) & 0x1f)) 53 #define __mlx5_mask(typ, fld) ((u32)((1ull << __mlx5_bit_sz(typ, fld)) - 1)) 54 #define __mlx5_dw_mask(typ, fld) (__mlx5_mask(typ, fld) << __mlx5_dw_bit_off(typ, fld)) 55 #define __mlx5_st_sz_bits(typ) sizeof(struct mlx5_ifc_##typ##_bits) 56 57 #define MLX5_FLD_SZ_BYTES(typ, fld) (__mlx5_bit_sz(typ, fld) / 8) 58 #define MLX5_ST_SZ_BYTES(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 8) 59 #define MLX5_ST_SZ_DW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 32) 60 #define MLX5_ST_SZ_QW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 64) 61 #define MLX5_UN_SZ_BYTES(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 8) 62 #define MLX5_UN_SZ_DW(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 32) 63 #define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8) 64 #define MLX5_ADDR_OF(typ, p, fld) ((char *)(p) + MLX5_BYTE_OFF(typ, fld)) 65 66 /* insert a value to a struct */ 67 #define MLX5_SET(typ, p, fld, v) do { \ 68 BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32); \ 69 BUILD_BUG_ON(__mlx5_bit_sz(typ, fld) > 32); \ 70 *((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \ 71 cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \ 72 (~__mlx5_dw_mask(typ, fld))) | (((v) & __mlx5_mask(typ, fld)) \ 73 << __mlx5_dw_bit_off(typ, fld))); \ 74 } while (0) 75 76 #define MLX5_SET_TO_ONES(typ, p, fld) do { \ 77 BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32); \ 78 BUILD_BUG_ON(__mlx5_bit_sz(typ, fld) > 32); \ 79 *((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \ 80 cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \ 81 (~__mlx5_dw_mask(typ, fld))) | ((__mlx5_mask(typ, fld)) \ 82 << __mlx5_dw_bit_off(typ, fld))); \ 83 } while (0) 84 85 #define MLX5_GET(typ, p, fld) ((be32_to_cpu(*((__be32 *)(p) +\ 86 __mlx5_dw_off(typ, fld))) >> __mlx5_dw_bit_off(typ, fld)) & \ 87 __mlx5_mask(typ, fld)) 88 89 #define MLX5_GET_PR(typ, p, fld) ({ \ 90 u32 ___t = MLX5_GET(typ, p, fld); \ 91 pr_debug(#fld " = 0x%x\n", ___t); \ 92 ___t; \ 93 }) 94 95 #define __MLX5_SET64(typ, p, fld, v) do { \ 96 BUILD_BUG_ON(__mlx5_bit_sz(typ, fld) != 64); \ 97 *((__be64 *)(p) + __mlx5_64_off(typ, fld)) = cpu_to_be64(v); \ 98 } while (0) 99 100 #define MLX5_SET64(typ, p, fld, v) do { \ 101 BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 64); \ 102 __MLX5_SET64(typ, p, fld, v); \ 103 } while (0) 104 105 #define MLX5_ARRAY_SET64(typ, p, fld, idx, v) do { \ 106 BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 64); \ 107 __MLX5_SET64(typ, p, fld[idx], v); \ 108 } while (0) 109 110 #define MLX5_GET64(typ, p, fld) be64_to_cpu(*((__be64 *)(p) + __mlx5_64_off(typ, fld))) 111 112 #define MLX5_GET64_BE(typ, p, fld) (*((__be64 *)(p) +\ 113 __mlx5_64_off(typ, fld))) 114 115 #define MLX5_GET_BE(type_t, typ, p, fld) ({ \ 116 type_t tmp; \ 117 switch (sizeof(tmp)) { \ 118 case sizeof(u8): \ 119 tmp = (__force type_t)MLX5_GET(typ, p, fld); \ 120 break; \ 121 case sizeof(u16): \ 122 tmp = (__force type_t)cpu_to_be16(MLX5_GET(typ, p, fld)); \ 123 break; \ 124 case sizeof(u32): \ 125 tmp = (__force type_t)cpu_to_be32(MLX5_GET(typ, p, fld)); \ 126 break; \ 127 case sizeof(u64): \ 128 tmp = (__force type_t)MLX5_GET64_BE(typ, p, fld); \ 129 break; \ 130 } \ 131 tmp; \ 132 }) 133 134 #define MLX5_BY_PASS_NUM_REGULAR_PRIOS 8 135 #define MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS 8 136 #define MLX5_BY_PASS_NUM_MULTICAST_PRIOS 1 137 #define MLX5_BY_PASS_NUM_PRIOS (MLX5_BY_PASS_NUM_REGULAR_PRIOS +\ 138 MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS +\ 139 MLX5_BY_PASS_NUM_MULTICAST_PRIOS) 140 141 /* insert a value to a struct */ 142 #define MLX5_VSC_SET(typ, p, fld, v) do { \ 143 BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32); \ 144 BUILD_BUG_ON(__mlx5_bit_sz(typ, fld) > 32); \ 145 *((__le32 *)(p) + __mlx5_dw_off(typ, fld)) = \ 146 cpu_to_le32((le32_to_cpu(*((__le32 *)(p) + __mlx5_dw_off(typ, fld))) & \ 147 (~__mlx5_dw_mask(typ, fld))) | (((v) & __mlx5_mask(typ, fld)) \ 148 << __mlx5_dw_bit_off(typ, fld))); \ 149 } while (0) 150 151 #define MLX5_VSC_GET(typ, p, fld) ((le32_to_cpu(*((__le32 *)(p) +\ 152 __mlx5_dw_off(typ, fld))) >> __mlx5_dw_bit_off(typ, fld)) & \ 153 __mlx5_mask(typ, fld)) 154 155 #define MLX5_VSC_GET_PR(typ, p, fld) ({ \ 156 u32 ___t = MLX5_VSC_GET(typ, p, fld); \ 157 pr_debug(#fld " = 0x%x\n", ___t); \ 158 ___t; \ 159 }) 160 161 enum { 162 MLX5_MAX_COMMANDS = 32, 163 MLX5_CMD_DATA_BLOCK_SIZE = 512, 164 MLX5_CMD_MBOX_SIZE = 1024, 165 MLX5_PCI_CMD_XPORT = 7, 166 MLX5_MKEY_BSF_OCTO_SIZE = 4, 167 MLX5_MAX_PSVS = 4, 168 }; 169 170 enum { 171 MLX5_EXTENDED_UD_AV = 0x80000000, 172 }; 173 174 enum { 175 MLX5_CQ_FLAGS_OI = 2, 176 }; 177 178 enum { 179 MLX5_STAT_RATE_OFFSET = 5, 180 }; 181 182 enum { 183 MLX5_INLINE_SEG = 0x80000000, 184 }; 185 186 enum { 187 MLX5_HW_START_PADDING = MLX5_INLINE_SEG, 188 }; 189 190 enum { 191 MLX5_MIN_PKEY_TABLE_SIZE = 128, 192 MLX5_MAX_LOG_PKEY_TABLE = 5, 193 }; 194 195 enum { 196 MLX5_MKEY_INBOX_PG_ACCESS = 1U << 31 197 }; 198 199 enum { 200 MLX5_PERM_LOCAL_READ = 1 << 2, 201 MLX5_PERM_LOCAL_WRITE = 1 << 3, 202 MLX5_PERM_REMOTE_READ = 1 << 4, 203 MLX5_PERM_REMOTE_WRITE = 1 << 5, 204 MLX5_PERM_ATOMIC = 1 << 6, 205 MLX5_PERM_UMR_EN = 1 << 7, 206 }; 207 208 enum { 209 MLX5_PCIE_CTRL_SMALL_FENCE = 1 << 0, 210 MLX5_PCIE_CTRL_RELAXED_ORDERING = 1 << 2, 211 MLX5_PCIE_CTRL_NO_SNOOP = 1 << 3, 212 MLX5_PCIE_CTRL_TLP_PROCE_EN = 1 << 6, 213 MLX5_PCIE_CTRL_TPH_MASK = 3 << 4, 214 }; 215 216 enum { 217 MLX5_MKEY_REMOTE_INVAL = 1 << 24, 218 MLX5_MKEY_FLAG_SYNC_UMR = 1 << 29, 219 MLX5_MKEY_BSF_EN = 1 << 30, 220 MLX5_MKEY_LEN64 = 1U << 31, 221 }; 222 223 enum { 224 MLX5_EN_RD = (u64)1, 225 MLX5_EN_WR = (u64)2 226 }; 227 228 enum { 229 MLX5_BF_REGS_PER_PAGE = 4, 230 MLX5_MAX_UAR_PAGES = 1 << 8, 231 MLX5_NON_FP_BF_REGS_PER_PAGE = 2, 232 MLX5_MAX_UUARS = MLX5_MAX_UAR_PAGES * MLX5_NON_FP_BF_REGS_PER_PAGE, 233 }; 234 235 enum { 236 MLX5_MKEY_MASK_LEN = 1ull << 0, 237 MLX5_MKEY_MASK_PAGE_SIZE = 1ull << 1, 238 MLX5_MKEY_MASK_START_ADDR = 1ull << 6, 239 MLX5_MKEY_MASK_PD = 1ull << 7, 240 MLX5_MKEY_MASK_EN_RINVAL = 1ull << 8, 241 MLX5_MKEY_MASK_EN_SIGERR = 1ull << 9, 242 MLX5_MKEY_MASK_BSF_EN = 1ull << 12, 243 MLX5_MKEY_MASK_KEY = 1ull << 13, 244 MLX5_MKEY_MASK_QPN = 1ull << 14, 245 MLX5_MKEY_MASK_LR = 1ull << 17, 246 MLX5_MKEY_MASK_LW = 1ull << 18, 247 MLX5_MKEY_MASK_RR = 1ull << 19, 248 MLX5_MKEY_MASK_RW = 1ull << 20, 249 MLX5_MKEY_MASK_A = 1ull << 21, 250 MLX5_MKEY_MASK_SMALL_FENCE = 1ull << 23, 251 MLX5_MKEY_MASK_FREE = 1ull << 29, 252 }; 253 254 enum { 255 MLX5_UMR_TRANSLATION_OFFSET_EN = (1 << 4), 256 257 MLX5_UMR_CHECK_NOT_FREE = (1 << 5), 258 MLX5_UMR_CHECK_FREE = (2 << 5), 259 260 MLX5_UMR_INLINE = (1 << 7), 261 }; 262 263 #define MLX5_UMR_MTT_ALIGNMENT 0x40 264 #define MLX5_UMR_MTT_MASK (MLX5_UMR_MTT_ALIGNMENT - 1) 265 #define MLX5_UMR_MTT_MIN_CHUNK_SIZE MLX5_UMR_MTT_ALIGNMENT 266 267 enum { 268 MLX5_EVENT_QUEUE_TYPE_QP = 0, 269 MLX5_EVENT_QUEUE_TYPE_RQ = 1, 270 MLX5_EVENT_QUEUE_TYPE_SQ = 2, 271 }; 272 273 enum { 274 MLX5_PORT_CHANGE_SUBTYPE_DOWN = 1, 275 MLX5_PORT_CHANGE_SUBTYPE_ACTIVE = 4, 276 MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED = 5, 277 MLX5_PORT_CHANGE_SUBTYPE_LID = 6, 278 MLX5_PORT_CHANGE_SUBTYPE_PKEY = 7, 279 MLX5_PORT_CHANGE_SUBTYPE_GUID = 8, 280 MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG = 9, 281 }; 282 283 enum { 284 MLX5_DCBX_EVENT_SUBTYPE_ERROR_STATE_DCBX = 1, 285 MLX5_DCBX_EVENT_SUBTYPE_REMOTE_CONFIG_CHANGE, 286 MLX5_DCBX_EVENT_SUBTYPE_LOCAL_OPER_CHANGE, 287 MLX5_DCBX_EVENT_SUBTYPE_REMOTE_CONFIG_APP_PRIORITY_CHANGE, 288 MLX5_MAX_INLINE_RECEIVE_SIZE = 64 289 }; 290 291 enum { 292 MLX5_DEV_CAP_FLAG_XRC = 1LL << 3, 293 MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1LL << 8, 294 MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1LL << 9, 295 MLX5_DEV_CAP_FLAG_APM = 1LL << 17, 296 MLX5_DEV_CAP_FLAG_SCQE_BRK_MOD = 1LL << 21, 297 MLX5_DEV_CAP_FLAG_BLOCK_MCAST = 1LL << 23, 298 MLX5_DEV_CAP_FLAG_CQ_MODER = 1LL << 29, 299 MLX5_DEV_CAP_FLAG_RESIZE_CQ = 1LL << 30, 300 MLX5_DEV_CAP_FLAG_ATOMIC = 1LL << 33, 301 MLX5_DEV_CAP_FLAG_ROCE = 1LL << 34, 302 MLX5_DEV_CAP_FLAG_DCT = 1LL << 37, 303 MLX5_DEV_CAP_FLAG_SIG_HAND_OVER = 1LL << 40, 304 MLX5_DEV_CAP_FLAG_CMDIF_CSUM = 3LL << 46, 305 MLX5_DEV_CAP_FLAG_DRAIN_SIGERR = 1LL << 48, 306 }; 307 308 enum { 309 MLX5_ROCE_VERSION_1 = 0, 310 MLX5_ROCE_VERSION_1_5 = 1, 311 MLX5_ROCE_VERSION_2 = 2, 312 }; 313 314 enum { 315 MLX5_ROCE_VERSION_1_CAP = 1 << MLX5_ROCE_VERSION_1, 316 MLX5_ROCE_VERSION_1_5_CAP = 1 << MLX5_ROCE_VERSION_1_5, 317 MLX5_ROCE_VERSION_2_CAP = 1 << MLX5_ROCE_VERSION_2, 318 }; 319 320 enum { 321 MLX5_ROCE_L3_TYPE_IPV4 = 0, 322 MLX5_ROCE_L3_TYPE_IPV6 = 1, 323 }; 324 325 enum { 326 MLX5_ROCE_L3_TYPE_IPV4_CAP = 1 << 1, 327 MLX5_ROCE_L3_TYPE_IPV6_CAP = 1 << 2, 328 }; 329 330 enum { 331 MLX5_OPCODE_NOP = 0x00, 332 MLX5_OPCODE_SEND_INVAL = 0x01, 333 MLX5_OPCODE_RDMA_WRITE = 0x08, 334 MLX5_OPCODE_RDMA_WRITE_IMM = 0x09, 335 MLX5_OPCODE_SEND = 0x0a, 336 MLX5_OPCODE_SEND_IMM = 0x0b, 337 MLX5_OPCODE_LSO = 0x0e, 338 MLX5_OPCODE_RDMA_READ = 0x10, 339 MLX5_OPCODE_ATOMIC_CS = 0x11, 340 MLX5_OPCODE_ATOMIC_FA = 0x12, 341 MLX5_OPCODE_ATOMIC_MASKED_CS = 0x14, 342 MLX5_OPCODE_ATOMIC_MASKED_FA = 0x15, 343 MLX5_OPCODE_BIND_MW = 0x18, 344 MLX5_OPCODE_CONFIG_CMD = 0x1f, 345 346 MLX5_RECV_OPCODE_RDMA_WRITE_IMM = 0x00, 347 MLX5_RECV_OPCODE_SEND = 0x01, 348 MLX5_RECV_OPCODE_SEND_IMM = 0x02, 349 MLX5_RECV_OPCODE_SEND_INVAL = 0x03, 350 351 MLX5_CQE_OPCODE_ERROR = 0x1e, 352 MLX5_CQE_OPCODE_RESIZE = 0x16, 353 354 MLX5_OPCODE_SET_PSV = 0x20, 355 MLX5_OPCODE_GET_PSV = 0x21, 356 MLX5_OPCODE_CHECK_PSV = 0x22, 357 MLX5_OPCODE_RGET_PSV = 0x26, 358 MLX5_OPCODE_RCHECK_PSV = 0x27, 359 360 MLX5_OPCODE_UMR = 0x25, 361 362 MLX5_OPCODE_SIGNATURE_CANCELED = (1 << 15), 363 }; 364 365 enum { 366 MLX5_SET_PORT_RESET_QKEY = 0, 367 MLX5_SET_PORT_GUID0 = 16, 368 MLX5_SET_PORT_NODE_GUID = 17, 369 MLX5_SET_PORT_SYS_GUID = 18, 370 MLX5_SET_PORT_GID_TABLE = 19, 371 MLX5_SET_PORT_PKEY_TABLE = 20, 372 }; 373 374 enum { 375 MLX5_MAX_PAGE_SHIFT = 31 376 }; 377 378 enum { 379 MLX5_ADAPTER_PAGE_SHIFT = 12, 380 MLX5_ADAPTER_PAGE_SIZE = 1 << MLX5_ADAPTER_PAGE_SHIFT, 381 }; 382 383 enum { 384 MLX5_CAP_OFF_CMDIF_CSUM = 46, 385 }; 386 387 enum { 388 /* 389 * Max wqe size for rdma read is 512 bytes, so this 390 * limits our max_sge_rd as the wqe needs to fit: 391 * - ctrl segment (16 bytes) 392 * - rdma segment (16 bytes) 393 * - scatter elements (16 bytes each) 394 */ 395 MLX5_MAX_SGE_RD = (512 - 16 - 16) / 16 396 }; 397 398 struct mlx5_cmd_layout { 399 u8 type; 400 u8 rsvd0[3]; 401 __be32 inlen; 402 __be64 in_ptr; 403 __be32 in[4]; 404 __be32 out[4]; 405 __be64 out_ptr; 406 __be32 outlen; 407 u8 token; 408 u8 sig; 409 u8 rsvd1; 410 u8 status_own; 411 }; 412 413 struct mlx5_health_buffer { 414 __be32 assert_var[5]; 415 __be32 rsvd0[3]; 416 __be32 assert_exit_ptr; 417 __be32 assert_callra; 418 __be32 rsvd1[2]; 419 __be32 fw_ver; 420 __be32 hw_id; 421 __be32 rsvd2; 422 u8 irisc_index; 423 u8 synd; 424 __be16 ext_synd; 425 }; 426 427 struct mlx5_init_seg { 428 __be32 fw_rev; 429 __be32 cmdif_rev_fw_sub; 430 __be32 rsvd0[2]; 431 __be32 cmdq_addr_h; 432 __be32 cmdq_addr_l_sz; 433 __be32 cmd_dbell; 434 __be32 rsvd1[120]; 435 __be32 initializing; 436 struct mlx5_health_buffer health; 437 __be32 rsvd2[880]; 438 __be32 internal_timer_h; 439 __be32 internal_timer_l; 440 __be32 rsvd3[2]; 441 __be32 health_counter; 442 __be32 rsvd4[1019]; 443 __be64 ieee1588_clk; 444 __be32 ieee1588_clk_type; 445 __be32 clr_intx; 446 }; 447 448 struct mlx5_eqe_comp { 449 __be32 reserved[6]; 450 __be32 cqn; 451 }; 452 453 struct mlx5_eqe_qp_srq { 454 __be32 reserved[6]; 455 __be32 qp_srq_n; 456 }; 457 458 struct mlx5_eqe_cq_err { 459 __be32 cqn; 460 u8 reserved1[7]; 461 u8 syndrome; 462 }; 463 464 struct mlx5_eqe_port_state { 465 u8 reserved0[8]; 466 u8 port; 467 }; 468 469 struct mlx5_eqe_gpio { 470 __be32 reserved0[2]; 471 __be64 gpio_event; 472 }; 473 474 struct mlx5_eqe_congestion { 475 u8 type; 476 u8 rsvd0; 477 u8 congestion_level; 478 }; 479 480 struct mlx5_eqe_stall_vl { 481 u8 rsvd0[3]; 482 u8 port_vl; 483 }; 484 485 struct mlx5_eqe_cmd { 486 __be32 vector; 487 __be32 rsvd[6]; 488 }; 489 490 struct mlx5_eqe_page_req { 491 u8 rsvd0[2]; 492 __be16 func_id; 493 __be32 num_pages; 494 __be32 rsvd1[5]; 495 }; 496 497 struct mlx5_eqe_vport_change { 498 u8 rsvd0[2]; 499 __be16 vport_num; 500 __be32 rsvd1[6]; 501 }; 502 503 504 #define PORT_MODULE_EVENT_MODULE_STATUS_MASK 0xF 505 #define PORT_MODULE_EVENT_ERROR_TYPE_MASK 0xF 506 507 enum { 508 MLX5_MODULE_STATUS_PLUGGED_ENABLED = 0x1, 509 MLX5_MODULE_STATUS_UNPLUGGED = 0x2, 510 MLX5_MODULE_STATUS_ERROR = 0x3, 511 MLX5_MODULE_STATUS_PLUGGED_DISABLED = 0x4, 512 }; 513 514 enum { 515 MLX5_MODULE_EVENT_ERROR_POWER_BUDGET_EXCEEDED = 0x0, 516 MLX5_MODULE_EVENT_ERROR_LONG_RANGE_FOR_NON_MLNX_CABLE_MODULE = 0x1, 517 MLX5_MODULE_EVENT_ERROR_BUS_STUCK = 0x2, 518 MLX5_MODULE_EVENT_ERROR_NO_EEPROM_RETRY_TIMEOUT = 0x3, 519 MLX5_MODULE_EVENT_ERROR_ENFORCE_PART_NUMBER_LIST = 0x4, 520 MLX5_MODULE_EVENT_ERROR_UNSUPPORTED_CABLE = 0x5, 521 MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE = 0x6, 522 MLX5_MODULE_EVENT_ERROR_CABLE_IS_SHORTED = 0x7, 523 }; 524 525 struct mlx5_eqe_port_module_event { 526 u8 rsvd0; 527 u8 module; 528 u8 rsvd1; 529 u8 module_status; 530 u8 rsvd2[2]; 531 u8 error_type; 532 }; 533 534 struct mlx5_eqe_general_notification_event { 535 u32 rq_user_index_delay_drop; 536 u32 rsvd0[6]; 537 }; 538 539 union ev_data { 540 __be32 raw[7]; 541 struct mlx5_eqe_cmd cmd; 542 struct mlx5_eqe_comp comp; 543 struct mlx5_eqe_qp_srq qp_srq; 544 struct mlx5_eqe_cq_err cq_err; 545 struct mlx5_eqe_port_state port; 546 struct mlx5_eqe_gpio gpio; 547 struct mlx5_eqe_congestion cong; 548 struct mlx5_eqe_stall_vl stall_vl; 549 struct mlx5_eqe_page_req req_pages; 550 struct mlx5_eqe_port_module_event port_module_event; 551 struct mlx5_eqe_vport_change vport_change; 552 struct mlx5_eqe_general_notification_event general_notifications; 553 } __packed; 554 555 struct mlx5_eqe { 556 u8 rsvd0; 557 u8 type; 558 u8 rsvd1; 559 u8 sub_type; 560 __be32 rsvd2[7]; 561 union ev_data data; 562 __be16 rsvd3; 563 u8 signature; 564 u8 owner; 565 } __packed; 566 567 struct mlx5_cmd_prot_block { 568 u8 data[MLX5_CMD_DATA_BLOCK_SIZE]; 569 u8 rsvd0[48]; 570 __be64 next; 571 __be32 block_num; 572 u8 rsvd1; 573 u8 token; 574 u8 ctrl_sig; 575 u8 sig; 576 }; 577 578 #define MLX5_NUM_CMDS_IN_ADAPTER_PAGE \ 579 (MLX5_ADAPTER_PAGE_SIZE / MLX5_CMD_MBOX_SIZE) 580 CTASSERT(MLX5_CMD_MBOX_SIZE >= sizeof(struct mlx5_cmd_prot_block)); 581 CTASSERT(MLX5_CMD_MBOX_SIZE <= MLX5_ADAPTER_PAGE_SIZE); 582 583 enum { 584 MLX5_CQE_SYND_FLUSHED_IN_ERROR = 5, 585 }; 586 587 struct mlx5_err_cqe { 588 u8 rsvd0[32]; 589 __be32 srqn; 590 u8 rsvd1[18]; 591 u8 vendor_err_synd; 592 u8 syndrome; 593 __be32 s_wqe_opcode_qpn; 594 __be16 wqe_counter; 595 u8 signature; 596 u8 op_own; 597 }; 598 599 struct mlx5_cqe64 { 600 u8 tunneled_etc; 601 u8 rsvd0[3]; 602 u8 lro_tcppsh_abort_dupack; 603 u8 lro_min_ttl; 604 __be16 lro_tcp_win; 605 __be32 lro_ack_seq_num; 606 __be32 rss_hash_result; 607 u8 rss_hash_type; 608 u8 ml_path; 609 u8 rsvd20[2]; 610 __be16 check_sum; 611 __be16 slid; 612 __be32 flags_rqpn; 613 u8 hds_ip_ext; 614 u8 l4_hdr_type_etc; 615 __be16 vlan_info; 616 __be32 srqn; /* [31:24]: lro_num_seg, [23:0]: srqn */ 617 __be32 imm_inval_pkey; 618 u8 rsvd40[4]; 619 __be32 byte_cnt; 620 __be64 timestamp; 621 __be32 sop_drop_qpn; 622 __be16 wqe_counter; 623 u8 signature; 624 u8 op_own; 625 }; 626 627 #define MLX5_CQE_TSTMP_PTP (1ULL << 63) 628 629 static inline bool get_cqe_lro_timestamp_valid(struct mlx5_cqe64 *cqe) 630 { 631 return (cqe->lro_tcppsh_abort_dupack >> 7) & 1; 632 } 633 634 static inline bool get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe) 635 { 636 return (cqe->lro_tcppsh_abort_dupack >> 6) & 1; 637 } 638 639 static inline u8 get_cqe_l4_hdr_type(struct mlx5_cqe64 *cqe) 640 { 641 return (cqe->l4_hdr_type_etc >> 4) & 0x7; 642 } 643 644 static inline u16 get_cqe_vlan(struct mlx5_cqe64 *cqe) 645 { 646 return be16_to_cpu(cqe->vlan_info) & 0xfff; 647 } 648 649 static inline void get_cqe_smac(struct mlx5_cqe64 *cqe, u8 *smac) 650 { 651 memcpy(smac, &cqe->rss_hash_type , 4); 652 memcpy(smac + 4, &cqe->slid , 2); 653 } 654 655 static inline bool cqe_has_vlan(struct mlx5_cqe64 *cqe) 656 { 657 return cqe->l4_hdr_type_etc & 0x1; 658 } 659 660 static inline bool cqe_is_tunneled(struct mlx5_cqe64 *cqe) 661 { 662 return cqe->tunneled_etc & 0x1; 663 } 664 665 enum { 666 CQE_L4_HDR_TYPE_NONE = 0x0, 667 CQE_L4_HDR_TYPE_TCP_NO_ACK = 0x1, 668 CQE_L4_HDR_TYPE_UDP = 0x2, 669 CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA = 0x3, 670 CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA = 0x4, 671 }; 672 673 enum { 674 /* source L3 hash types */ 675 CQE_RSS_SRC_HTYPE_IP = 0x3 << 0, 676 CQE_RSS_SRC_HTYPE_IPV4 = 0x1 << 0, 677 CQE_RSS_SRC_HTYPE_IPV6 = 0x2 << 0, 678 679 /* destination L3 hash types */ 680 CQE_RSS_DST_HTYPE_IP = 0x3 << 2, 681 CQE_RSS_DST_HTYPE_IPV4 = 0x1 << 2, 682 CQE_RSS_DST_HTYPE_IPV6 = 0x2 << 2, 683 684 /* source L4 hash types */ 685 CQE_RSS_SRC_HTYPE_L4 = 0x3 << 4, 686 CQE_RSS_SRC_HTYPE_TCP = 0x1 << 4, 687 CQE_RSS_SRC_HTYPE_UDP = 0x2 << 4, 688 CQE_RSS_SRC_HTYPE_IPSEC = 0x3 << 4, 689 690 /* destination L4 hash types */ 691 CQE_RSS_DST_HTYPE_L4 = 0x3 << 6, 692 CQE_RSS_DST_HTYPE_TCP = 0x1 << 6, 693 CQE_RSS_DST_HTYPE_UDP = 0x2 << 6, 694 CQE_RSS_DST_HTYPE_IPSEC = 0x3 << 6, 695 }; 696 697 enum { 698 MLX5_CQE_ROCE_L3_HEADER_TYPE_GRH = 0x0, 699 MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV6 = 0x1, 700 MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV4 = 0x2, 701 }; 702 703 enum { 704 CQE_L2_OK = 1 << 0, 705 CQE_L3_OK = 1 << 1, 706 CQE_L4_OK = 1 << 2, 707 }; 708 709 struct mlx5_sig_err_cqe { 710 u8 rsvd0[16]; 711 __be32 expected_trans_sig; 712 __be32 actual_trans_sig; 713 __be32 expected_reftag; 714 __be32 actual_reftag; 715 __be16 syndrome; 716 u8 rsvd22[2]; 717 __be32 mkey; 718 __be64 err_offset; 719 u8 rsvd30[8]; 720 __be32 qpn; 721 u8 rsvd38[2]; 722 u8 signature; 723 u8 op_own; 724 }; 725 726 struct mlx5_wqe_srq_next_seg { 727 u8 rsvd0[2]; 728 __be16 next_wqe_index; 729 u8 signature; 730 u8 rsvd1[11]; 731 }; 732 733 union mlx5_ext_cqe { 734 struct ib_grh grh; 735 u8 inl[64]; 736 }; 737 738 struct mlx5_cqe128 { 739 union mlx5_ext_cqe inl_grh; 740 struct mlx5_cqe64 cqe64; 741 }; 742 743 enum { 744 MLX5_MKEY_STATUS_FREE = 1 << 6, 745 }; 746 747 struct mlx5_mkey_seg { 748 /* This is a two bit field occupying bits 31-30. 749 * bit 31 is always 0, 750 * bit 30 is zero for regular MRs and 1 (e.g free) for UMRs that do not have tanslation 751 */ 752 u8 status; 753 u8 pcie_control; 754 u8 flags; 755 u8 version; 756 __be32 qpn_mkey7_0; 757 u8 rsvd1[4]; 758 __be32 flags_pd; 759 __be64 start_addr; 760 __be64 len; 761 __be32 bsfs_octo_size; 762 u8 rsvd2[16]; 763 __be32 xlt_oct_size; 764 u8 rsvd3[3]; 765 u8 log2_page_size; 766 u8 rsvd4[4]; 767 }; 768 769 #define MLX5_ATTR_EXTENDED_PORT_INFO cpu_to_be16(0xff90) 770 771 enum { 772 MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO = 1 << 0 773 }; 774 775 static inline int mlx5_host_is_le(void) 776 { 777 #if defined(__LITTLE_ENDIAN) 778 return 1; 779 #elif defined(__BIG_ENDIAN) 780 return 0; 781 #else 782 #error Host endianness not defined 783 #endif 784 } 785 786 #define MLX5_CMD_OP_MAX 0x939 787 788 enum { 789 VPORT_STATE_DOWN = 0x0, 790 VPORT_STATE_UP = 0x1, 791 }; 792 793 enum { 794 MLX5_L3_PROT_TYPE_IPV4 = 0, 795 MLX5_L3_PROT_TYPE_IPV6 = 1, 796 }; 797 798 enum { 799 MLX5_L4_PROT_TYPE_TCP = 0, 800 MLX5_L4_PROT_TYPE_UDP = 1, 801 }; 802 803 enum { 804 MLX5_HASH_FIELD_SEL_SRC_IP = 1 << 0, 805 MLX5_HASH_FIELD_SEL_DST_IP = 1 << 1, 806 MLX5_HASH_FIELD_SEL_L4_SPORT = 1 << 2, 807 MLX5_HASH_FIELD_SEL_L4_DPORT = 1 << 3, 808 MLX5_HASH_FIELD_SEL_IPSEC_SPI = 1 << 4, 809 }; 810 811 enum { 812 MLX5_MATCH_OUTER_HEADERS = 1 << 0, 813 MLX5_MATCH_MISC_PARAMETERS = 1 << 1, 814 MLX5_MATCH_INNER_HEADERS = 1 << 2, 815 816 }; 817 818 enum { 819 MLX5_FLOW_TABLE_TYPE_NIC_RCV = 0, 820 MLX5_FLOW_TABLE_TYPE_EGRESS_ACL = 2, 821 MLX5_FLOW_TABLE_TYPE_INGRESS_ACL = 3, 822 MLX5_FLOW_TABLE_TYPE_ESWITCH = 4, 823 MLX5_FLOW_TABLE_TYPE_SNIFFER_RX = 5, 824 MLX5_FLOW_TABLE_TYPE_SNIFFER_TX = 6, 825 MLX5_FLOW_TABLE_TYPE_NIC_RX_RDMA = 7, 826 }; 827 828 enum { 829 MLX5_MODIFY_ESW_VPORT_CONTEXT_CVLAN_INSERT_NONE = 0, 830 MLX5_MODIFY_ESW_VPORT_CONTEXT_CVLAN_INSERT_IF_NO_VLAN = 1, 831 MLX5_MODIFY_ESW_VPORT_CONTEXT_CVLAN_INSERT_OVERWRITE = 2 832 }; 833 834 enum { 835 MLX5_MODIFY_ESW_VPORT_CONTEXT_FIELD_SELECT_SVLAN_STRIP = 1 << 0, 836 MLX5_MODIFY_ESW_VPORT_CONTEXT_FIELD_SELECT_CVLAN_STRIP = 1 << 1, 837 MLX5_MODIFY_ESW_VPORT_CONTEXT_FIELD_SELECT_SVLAN_INSERT = 1 << 2, 838 MLX5_MODIFY_ESW_VPORT_CONTEXT_FIELD_SELECT_CVLAN_INSERT = 1 << 3 839 }; 840 841 enum { 842 MLX5_UC_ADDR_CHANGE = (1 << 0), 843 MLX5_MC_ADDR_CHANGE = (1 << 1), 844 MLX5_VLAN_CHANGE = (1 << 2), 845 MLX5_PROMISC_CHANGE = (1 << 3), 846 MLX5_MTU_CHANGE = (1 << 4), 847 }; 848 849 enum mlx5_list_type { 850 MLX5_NIC_VPORT_LIST_TYPE_UC = 0x0, 851 MLX5_NIC_VPORT_LIST_TYPE_MC = 0x1, 852 MLX5_NIC_VPORT_LIST_TYPE_VLAN = 0x2, 853 }; 854 855 enum { 856 MLX5_ESW_VPORT_ADMIN_STATE_DOWN = 0x0, 857 MLX5_ESW_VPORT_ADMIN_STATE_UP = 0x1, 858 MLX5_ESW_VPORT_ADMIN_STATE_AUTO = 0x2, 859 }; 860 861 /* MLX5 DEV CAPs */ 862 863 /* TODO: EAT.ME */ 864 enum mlx5_cap_mode { 865 HCA_CAP_OPMOD_GET_MAX = 0, 866 HCA_CAP_OPMOD_GET_CUR = 1, 867 }; 868 869 enum mlx5_cap_type { 870 MLX5_CAP_GENERAL = 0, 871 MLX5_CAP_ETHERNET_OFFLOADS, 872 MLX5_CAP_ODP, 873 MLX5_CAP_ATOMIC, 874 MLX5_CAP_ROCE, 875 MLX5_CAP_IPOIB_OFFLOADS, 876 MLX5_CAP_EOIB_OFFLOADS, 877 MLX5_CAP_FLOW_TABLE, 878 MLX5_CAP_ESWITCH_FLOW_TABLE, 879 MLX5_CAP_ESWITCH, 880 MLX5_CAP_SNAPSHOT, 881 MLX5_CAP_VECTOR_CALC, 882 MLX5_CAP_QOS, 883 MLX5_CAP_DEBUG, 884 /* NUM OF CAP Types */ 885 MLX5_CAP_NUM 886 }; 887 888 /* GET Dev Caps macros */ 889 #define MLX5_CAP_GEN(mdev, cap) \ 890 MLX5_GET(cmd_hca_cap, mdev->hca_caps_cur[MLX5_CAP_GENERAL], cap) 891 892 #define MLX5_CAP_GEN_MAX(mdev, cap) \ 893 MLX5_GET(cmd_hca_cap, mdev->hca_caps_max[MLX5_CAP_GENERAL], cap) 894 895 #define MLX5_CAP_ETH(mdev, cap) \ 896 MLX5_GET(per_protocol_networking_offload_caps,\ 897 mdev->hca_caps_cur[MLX5_CAP_ETHERNET_OFFLOADS], cap) 898 899 #define MLX5_CAP_ETH_MAX(mdev, cap) \ 900 MLX5_GET(per_protocol_networking_offload_caps,\ 901 mdev->hca_caps_max[MLX5_CAP_ETHERNET_OFFLOADS], cap) 902 903 #define MLX5_CAP_ROCE(mdev, cap) \ 904 MLX5_GET(roce_cap, mdev->hca_caps_cur[MLX5_CAP_ROCE], cap) 905 906 #define MLX5_CAP_ROCE_MAX(mdev, cap) \ 907 MLX5_GET(roce_cap, mdev->hca_caps_max[MLX5_CAP_ROCE], cap) 908 909 #define MLX5_CAP_ATOMIC(mdev, cap) \ 910 MLX5_GET(atomic_caps, mdev->hca_caps_cur[MLX5_CAP_ATOMIC], cap) 911 912 #define MLX5_CAP_ATOMIC_MAX(mdev, cap) \ 913 MLX5_GET(atomic_caps, mdev->hca_caps_max[MLX5_CAP_ATOMIC], cap) 914 915 #define MLX5_CAP_FLOWTABLE(mdev, cap) \ 916 MLX5_GET(flow_table_nic_cap, mdev->hca_caps_cur[MLX5_CAP_FLOW_TABLE], cap) 917 918 #define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \ 919 MLX5_GET(flow_table_nic_cap, mdev->hca_caps_max[MLX5_CAP_FLOW_TABLE], cap) 920 921 #define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \ 922 MLX5_GET(flow_table_eswitch_cap, \ 923 mdev->hca_caps_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap) 924 925 #define MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, cap) \ 926 MLX5_GET(flow_table_eswitch_cap, \ 927 mdev->hca_caps_max[MLX5_CAP_ESWITCH_FLOW_TABLE], cap) 928 929 #define MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) \ 930 MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_nic_esw_fdb.cap) 931 932 #define MLX5_CAP_ESW_FLOWTABLE_FDB_MAX(mdev, cap) \ 933 MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_nic_esw_fdb.cap) 934 935 #define MLX5_CAP_ESW_EGRESS_ACL(mdev, cap) \ 936 MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_egress.cap) 937 938 #define MLX5_CAP_ESW_EGRESS_ACL_MAX(mdev, cap) \ 939 MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_egress.cap) 940 941 #define MLX5_CAP_ESW_INGRESS_ACL(mdev, cap) \ 942 MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_ingress.cap) 943 944 #define MLX5_CAP_ESW_INGRESS_ACL_MAX(mdev, cap) \ 945 MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_ingress.cap) 946 947 #define MLX5_CAP_ESW(mdev, cap) \ 948 MLX5_GET(e_switch_cap, \ 949 mdev->hca_caps_cur[MLX5_CAP_ESWITCH], cap) 950 951 #define MLX5_CAP_ESW_MAX(mdev, cap) \ 952 MLX5_GET(e_switch_cap, \ 953 mdev->hca_caps_max[MLX5_CAP_ESWITCH], cap) 954 955 #define MLX5_CAP_ODP(mdev, cap)\ 956 MLX5_GET(odp_cap, mdev->hca_caps_cur[MLX5_CAP_ODP], cap) 957 958 #define MLX5_CAP_ODP_MAX(mdev, cap)\ 959 MLX5_GET(odp_cap, mdev->hca_caps_max[MLX5_CAP_ODP], cap) 960 961 #define MLX5_CAP_SNAPSHOT(mdev, cap) \ 962 MLX5_GET(snapshot_cap, \ 963 mdev->hca_caps_cur[MLX5_CAP_SNAPSHOT], cap) 964 965 #define MLX5_CAP_SNAPSHOT_MAX(mdev, cap) \ 966 MLX5_GET(snapshot_cap, \ 967 mdev->hca_caps_max[MLX5_CAP_SNAPSHOT], cap) 968 969 #define MLX5_CAP_EOIB_OFFLOADS(mdev, cap) \ 970 MLX5_GET(per_protocol_networking_offload_caps,\ 971 mdev->hca_caps_cur[MLX5_CAP_EOIB_OFFLOADS], cap) 972 973 #define MLX5_CAP_EOIB_OFFLOADS_MAX(mdev, cap) \ 974 MLX5_GET(per_protocol_networking_offload_caps,\ 975 mdev->hca_caps_max[MLX5_CAP_EOIB_OFFLOADS], cap) 976 977 #define MLX5_CAP_DEBUG(mdev, cap) \ 978 MLX5_GET(debug_cap, \ 979 mdev->hca_caps_cur[MLX5_CAP_DEBUG], cap) 980 981 #define MLX5_CAP_DEBUG_MAX(mdev, cap) \ 982 MLX5_GET(debug_cap, \ 983 mdev->hca_caps_max[MLX5_CAP_DEBUG], cap) 984 985 #define MLX5_CAP_QOS(mdev, cap) \ 986 MLX5_GET(qos_cap,\ 987 mdev->hca_caps_cur[MLX5_CAP_QOS], cap) 988 989 #define MLX5_CAP_QOS_MAX(mdev, cap) \ 990 MLX5_GET(qos_cap,\ 991 mdev->hca_caps_max[MLX5_CAP_QOS], cap) 992 993 enum { 994 MLX5_CMD_STAT_OK = 0x0, 995 MLX5_CMD_STAT_INT_ERR = 0x1, 996 MLX5_CMD_STAT_BAD_OP_ERR = 0x2, 997 MLX5_CMD_STAT_BAD_PARAM_ERR = 0x3, 998 MLX5_CMD_STAT_BAD_SYS_STATE_ERR = 0x4, 999 MLX5_CMD_STAT_BAD_RES_ERR = 0x5, 1000 MLX5_CMD_STAT_RES_BUSY = 0x6, 1001 MLX5_CMD_STAT_LIM_ERR = 0x8, 1002 MLX5_CMD_STAT_BAD_RES_STATE_ERR = 0x9, 1003 MLX5_CMD_STAT_IX_ERR = 0xa, 1004 MLX5_CMD_STAT_NO_RES_ERR = 0xf, 1005 MLX5_CMD_STAT_BAD_INP_LEN_ERR = 0x50, 1006 MLX5_CMD_STAT_BAD_OUTP_LEN_ERR = 0x51, 1007 MLX5_CMD_STAT_BAD_QP_STATE_ERR = 0x10, 1008 MLX5_CMD_STAT_BAD_PKT_ERR = 0x30, 1009 MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR = 0x40, 1010 }; 1011 1012 enum { 1013 MLX5_IEEE_802_3_COUNTERS_GROUP = 0x0, 1014 MLX5_RFC_2863_COUNTERS_GROUP = 0x1, 1015 MLX5_RFC_2819_COUNTERS_GROUP = 0x2, 1016 MLX5_RFC_3635_COUNTERS_GROUP = 0x3, 1017 MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP = 0x5, 1018 MLX5_ETHERNET_DISCARD_COUNTERS_GROUP = 0x6, 1019 MLX5_PER_PRIORITY_COUNTERS_GROUP = 0x10, 1020 MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP = 0x11, 1021 MLX5_PHYSICAL_LAYER_COUNTERS_GROUP = 0x12, 1022 MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP = 0x16, 1023 MLX5_INFINIBAND_PORT_COUNTERS_GROUP = 0x20, 1024 }; 1025 1026 enum { 1027 MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP = 0x0, 1028 MLX5_PCIE_LANE_COUNTERS_GROUP = 0x1, 1029 MLX5_PCIE_TIMERS_AND_STATES_COUNTERS_GROUP = 0x2, 1030 }; 1031 1032 enum { 1033 MLX5_NUM_UUARS_PER_PAGE = MLX5_NON_FP_BF_REGS_PER_PAGE, 1034 MLX5_DEF_TOT_UUARS = 8 * MLX5_NUM_UUARS_PER_PAGE, 1035 }; 1036 1037 enum { 1038 NUM_DRIVER_UARS = 4, 1039 NUM_LOW_LAT_UUARS = 4, 1040 }; 1041 1042 enum { 1043 MLX5_CAP_PORT_TYPE_IB = 0x0, 1044 MLX5_CAP_PORT_TYPE_ETH = 0x1, 1045 }; 1046 1047 enum { 1048 MLX5_CMD_HCA_CAP_MIN_WQE_INLINE_MODE_L2 = 0x0, 1049 MLX5_CMD_HCA_CAP_MIN_WQE_INLINE_MODE_VPORT_CONFIG = 0x1, 1050 MLX5_CMD_HCA_CAP_MIN_WQE_INLINE_MODE_NOT_REQUIRED = 0x2 1051 }; 1052 1053 enum { 1054 MLX5_QUERY_VPORT_STATE_OUT_STATE_FOLLOW = 0x2, 1055 }; 1056 1057 static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz) 1058 { 1059 if (pkey_sz > MLX5_MAX_LOG_PKEY_TABLE) 1060 return 0; 1061 return MLX5_MIN_PKEY_TABLE_SIZE << pkey_sz; 1062 } 1063 1064 struct mlx5_ifc_mcia_reg_bits { 1065 u8 l[0x1]; 1066 u8 reserved_0[0x7]; 1067 u8 module[0x8]; 1068 u8 reserved_1[0x8]; 1069 u8 status[0x8]; 1070 1071 u8 i2c_device_address[0x8]; 1072 u8 page_number[0x8]; 1073 u8 device_address[0x10]; 1074 1075 u8 reserved_2[0x10]; 1076 u8 size[0x10]; 1077 1078 u8 reserved_3[0x20]; 1079 1080 u8 dword_0[0x20]; 1081 u8 dword_1[0x20]; 1082 u8 dword_2[0x20]; 1083 u8 dword_3[0x20]; 1084 u8 dword_4[0x20]; 1085 u8 dword_5[0x20]; 1086 u8 dword_6[0x20]; 1087 u8 dword_7[0x20]; 1088 u8 dword_8[0x20]; 1089 u8 dword_9[0x20]; 1090 u8 dword_10[0x20]; 1091 u8 dword_11[0x20]; 1092 }; 1093 1094 #define MLX5_CMD_OP_QUERY_EEPROM 0x93c 1095 1096 struct mlx5_mini_cqe8 { 1097 union { 1098 __be32 rx_hash_result; 1099 __be16 checksum; 1100 __be16 rsvd; 1101 struct { 1102 __be16 wqe_counter; 1103 u8 s_wqe_opcode; 1104 u8 reserved; 1105 } s_wqe_info; 1106 }; 1107 __be32 byte_cnt; 1108 }; 1109 1110 enum { 1111 MLX5_NO_INLINE_DATA, 1112 MLX5_INLINE_DATA32_SEG, 1113 MLX5_INLINE_DATA64_SEG, 1114 MLX5_COMPRESSED, 1115 }; 1116 1117 enum mlx5_exp_cqe_zip_recv_type { 1118 MLX5_CQE_FORMAT_HASH, 1119 MLX5_CQE_FORMAT_CSUM, 1120 }; 1121 1122 #define MLX5E_CQE_FORMAT_MASK 0xc 1123 static inline int mlx5_get_cqe_format(const struct mlx5_cqe64 *cqe) 1124 { 1125 return (cqe->op_own & MLX5E_CQE_FORMAT_MASK) >> 2; 1126 } 1127 1128 enum { 1129 MLX5_GEN_EVENT_SUBTYPE_DELAY_DROP_TIMEOUT = 0x1, 1130 }; 1131 1132 /* 8 regular priorities + 1 for multicast */ 1133 #define MLX5_NUM_BYPASS_FTS 9 1134 1135 #endif /* MLX5_DEVICE_H */ 1136