1 /*- 2 * Copyright (c) 2013-2019, Mellanox Technologies, Ltd. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 */ 25 26 #include "opt_rss.h" 27 #include "opt_ratelimit.h" 28 29 #include <linux/module.h> 30 #include <linux/errno.h> 31 #include <linux/pci.h> 32 #include <linux/dma-mapping.h> 33 #include <linux/slab.h> 34 #include <linux/delay.h> 35 #include <linux/random.h> 36 #include <linux/io-mapping.h> 37 #include <linux/hardirq.h> 38 #include <linux/ktime.h> 39 #include <dev/mlx5/driver.h> 40 #include <dev/mlx5/cmd.h> 41 #include <dev/mlx5/mlx5_core/mlx5_core.h> 42 43 static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size); 44 static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev, 45 struct mlx5_cmd_msg *msg); 46 static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg); 47 48 enum { 49 CMD_IF_REV = 5, 50 }; 51 52 enum { 53 NUM_LONG_LISTS = 2, 54 NUM_MED_LISTS = 64, 55 LONG_LIST_SIZE = (2ULL * 1024 * 1024 * 1024 / PAGE_SIZE) * 8 + 16 + 56 MLX5_CMD_DATA_BLOCK_SIZE, 57 MED_LIST_SIZE = 16 + MLX5_CMD_DATA_BLOCK_SIZE, 58 }; 59 60 enum { 61 MLX5_CMD_DELIVERY_STAT_OK = 0x0, 62 MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR = 0x1, 63 MLX5_CMD_DELIVERY_STAT_TOK_ERR = 0x2, 64 MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR = 0x3, 65 MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR = 0x4, 66 MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR = 0x5, 67 MLX5_CMD_DELIVERY_STAT_FW_ERR = 0x6, 68 MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR = 0x7, 69 MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR = 0x8, 70 MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR = 0x9, 71 MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR = 0x10, 72 }; 73 74 struct mlx5_ifc_mbox_out_bits { 75 u8 status[0x8]; 76 u8 reserved_at_8[0x18]; 77 78 u8 syndrome[0x20]; 79 80 u8 reserved_at_40[0x40]; 81 }; 82 83 struct mlx5_ifc_mbox_in_bits { 84 u8 opcode[0x10]; 85 u8 reserved_at_10[0x10]; 86 87 u8 reserved_at_20[0x10]; 88 u8 op_mod[0x10]; 89 90 u8 reserved_at_40[0x40]; 91 }; 92 93 94 static struct mlx5_cmd_work_ent *alloc_cmd(struct mlx5_cmd *cmd, 95 struct mlx5_cmd_msg *in, 96 int uin_size, 97 struct mlx5_cmd_msg *out, 98 void *uout, int uout_size, 99 mlx5_cmd_cbk_t cbk, 100 void *context, int page_queue) 101 { 102 gfp_t alloc_flags = cbk ? GFP_ATOMIC : GFP_KERNEL; 103 struct mlx5_cmd_work_ent *ent; 104 105 ent = kzalloc(sizeof(*ent), alloc_flags); 106 if (!ent) 107 return ERR_PTR(-ENOMEM); 108 109 ent->in = in; 110 ent->uin_size = uin_size; 111 ent->out = out; 112 ent->uout = uout; 113 ent->uout_size = uout_size; 114 ent->callback = cbk; 115 ent->context = context; 116 ent->cmd = cmd; 117 ent->page_queue = page_queue; 118 119 return ent; 120 } 121 122 static u8 alloc_token(struct mlx5_cmd *cmd) 123 { 124 u8 token; 125 126 spin_lock(&cmd->token_lock); 127 cmd->token++; 128 if (cmd->token == 0) 129 cmd->token++; 130 token = cmd->token; 131 spin_unlock(&cmd->token_lock); 132 133 return token; 134 } 135 136 static int alloc_ent(struct mlx5_cmd_work_ent *ent) 137 { 138 unsigned long flags; 139 struct mlx5_cmd *cmd = ent->cmd; 140 struct mlx5_core_dev *dev = 141 container_of(cmd, struct mlx5_core_dev, cmd); 142 int ret = cmd->max_reg_cmds; 143 144 spin_lock_irqsave(&cmd->alloc_lock, flags); 145 if (!ent->page_queue) { 146 ret = find_first_bit(&cmd->bitmask, cmd->max_reg_cmds); 147 if (ret >= cmd->max_reg_cmds) 148 ret = -1; 149 } 150 151 if (dev->state != MLX5_DEVICE_STATE_UP) 152 ret = -1; 153 154 if (ret != -1) { 155 ent->busy = 1; 156 ent->idx = ret; 157 clear_bit(ent->idx, &cmd->bitmask); 158 cmd->ent_mode[ent->idx] = 159 ent->polling ? MLX5_CMD_MODE_POLLING : MLX5_CMD_MODE_EVENTS; 160 cmd->ent_arr[ent->idx] = ent; 161 } 162 spin_unlock_irqrestore(&cmd->alloc_lock, flags); 163 164 return ret; 165 } 166 167 static void free_ent(struct mlx5_cmd *cmd, int idx) 168 { 169 unsigned long flags; 170 171 spin_lock_irqsave(&cmd->alloc_lock, flags); 172 cmd->ent_arr[idx] = NULL; /* safety clear */ 173 cmd->ent_mode[idx] = MLX5_CMD_MODE_POLLING; /* reset mode */ 174 set_bit(idx, &cmd->bitmask); 175 spin_unlock_irqrestore(&cmd->alloc_lock, flags); 176 } 177 178 static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx) 179 { 180 return cmd->cmd_buf + (idx << cmd->log_stride); 181 } 182 183 static u8 xor8_buf(void *buf, int len) 184 { 185 u8 *ptr = buf; 186 u8 sum = 0; 187 int i; 188 189 for (i = 0; i < len; i++) 190 sum ^= ptr[i]; 191 192 return sum; 193 } 194 195 static int verify_block_sig(struct mlx5_cmd_prot_block *block) 196 { 197 if (xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 1) != 0xff) 198 return -EINVAL; 199 200 if (xor8_buf(block, sizeof(*block)) != 0xff) 201 return -EINVAL; 202 203 return 0; 204 } 205 206 static void calc_block_sig(struct mlx5_cmd_prot_block *block, u8 token, 207 int csum) 208 { 209 block->token = token; 210 if (csum) { 211 block->ctrl_sig = ~xor8_buf(block->rsvd0, sizeof(*block) - 212 sizeof(block->data) - 2); 213 block->sig = ~xor8_buf(block, sizeof(*block) - 1); 214 } 215 } 216 217 static void 218 calc_chain_sig(struct mlx5_cmd_msg *msg, u8 token, int csum) 219 { 220 size_t i; 221 222 for (i = 0; i != (msg->numpages * MLX5_NUM_CMDS_IN_ADAPTER_PAGE); i++) { 223 struct mlx5_cmd_prot_block *block; 224 225 block = mlx5_fwp_get_virt(msg, i * MLX5_CMD_MBOX_SIZE); 226 227 /* compute signature */ 228 calc_block_sig(block, token, csum); 229 230 /* check for last block */ 231 if (block->next == 0) 232 break; 233 } 234 235 /* make sure data gets written to RAM */ 236 mlx5_fwp_flush(msg); 237 } 238 239 static void set_signature(struct mlx5_cmd_work_ent *ent, int csum) 240 { 241 ent->lay->sig = ~xor8_buf(ent->lay, sizeof(*ent->lay)); 242 calc_chain_sig(ent->in, ent->token, csum); 243 calc_chain_sig(ent->out, ent->token, csum); 244 } 245 246 static void poll_timeout(struct mlx5_cmd_work_ent *ent) 247 { 248 struct mlx5_core_dev *dev = container_of(ent->cmd, 249 struct mlx5_core_dev, cmd); 250 int poll_end = jiffies + 251 msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC + 1000); 252 u8 own; 253 254 do { 255 own = ent->lay->status_own; 256 if (!(own & CMD_OWNER_HW) || 257 dev->state != MLX5_DEVICE_STATE_UP) { 258 ent->ret = 0; 259 return; 260 } 261 usleep_range(5000, 10000); 262 } while (time_before(jiffies, poll_end)); 263 264 ent->ret = -ETIMEDOUT; 265 } 266 267 static void free_cmd(struct mlx5_cmd_work_ent *ent) 268 { 269 cancel_delayed_work_sync(&ent->cb_timeout_work); 270 kfree(ent); 271 } 272 273 static int 274 verify_signature(struct mlx5_cmd_work_ent *ent) 275 { 276 struct mlx5_cmd_msg *msg = ent->out; 277 size_t i; 278 int err; 279 u8 sig; 280 281 sig = xor8_buf(ent->lay, sizeof(*ent->lay)); 282 if (sig != 0xff) 283 return -EINVAL; 284 285 for (i = 0; i != (msg->numpages * MLX5_NUM_CMDS_IN_ADAPTER_PAGE); i++) { 286 struct mlx5_cmd_prot_block *block; 287 288 block = mlx5_fwp_get_virt(msg, i * MLX5_CMD_MBOX_SIZE); 289 290 /* compute signature */ 291 err = verify_block_sig(block); 292 if (err != 0) 293 return (err); 294 295 /* check for last block */ 296 if (block->next == 0) 297 break; 298 } 299 return (0); 300 } 301 302 static void dump_buf(void *buf, int size, int data_only, int offset) 303 { 304 __be32 *p = buf; 305 int i; 306 307 for (i = 0; i < size; i += 16) { 308 pr_debug("%03x: %08x %08x %08x %08x\n", offset, be32_to_cpu(p[0]), 309 be32_to_cpu(p[1]), be32_to_cpu(p[2]), 310 be32_to_cpu(p[3])); 311 p += 4; 312 offset += 16; 313 } 314 if (!data_only) 315 pr_debug("\n"); 316 } 317 318 enum { 319 MLX5_DRIVER_STATUS_ABORTED = 0xfe, 320 MLX5_DRIVER_SYND = 0xbadd00de, 321 }; 322 323 static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, 324 u32 *synd, u8 *status) 325 { 326 *synd = 0; 327 *status = 0; 328 329 switch (op) { 330 case MLX5_CMD_OP_TEARDOWN_HCA: 331 case MLX5_CMD_OP_DISABLE_HCA: 332 case MLX5_CMD_OP_MANAGE_PAGES: 333 case MLX5_CMD_OP_DESTROY_MKEY: 334 case MLX5_CMD_OP_DESTROY_EQ: 335 case MLX5_CMD_OP_DESTROY_CQ: 336 case MLX5_CMD_OP_DESTROY_QP: 337 case MLX5_CMD_OP_DESTROY_PSV: 338 case MLX5_CMD_OP_DESTROY_SRQ: 339 case MLX5_CMD_OP_DESTROY_XRC_SRQ: 340 case MLX5_CMD_OP_DESTROY_DCT: 341 case MLX5_CMD_OP_DEALLOC_Q_COUNTER: 342 case MLX5_CMD_OP_DEALLOC_PD: 343 case MLX5_CMD_OP_DEALLOC_UAR: 344 case MLX5_CMD_OP_DETACH_FROM_MCG: 345 case MLX5_CMD_OP_DEALLOC_XRCD: 346 case MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN: 347 case MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT: 348 case MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY: 349 case MLX5_CMD_OP_DESTROY_TIR: 350 case MLX5_CMD_OP_DESTROY_SQ: 351 case MLX5_CMD_OP_DESTROY_RQ: 352 case MLX5_CMD_OP_DESTROY_RMP: 353 case MLX5_CMD_OP_DESTROY_TIS: 354 case MLX5_CMD_OP_DESTROY_RQT: 355 case MLX5_CMD_OP_DESTROY_FLOW_TABLE: 356 case MLX5_CMD_OP_DESTROY_FLOW_GROUP: 357 case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY: 358 case MLX5_CMD_OP_2ERR_QP: 359 case MLX5_CMD_OP_2RST_QP: 360 case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT: 361 case MLX5_CMD_OP_MODIFY_FLOW_TABLE: 362 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY: 363 case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT: 364 case MLX5_CMD_OP_DESTROY_GENERAL_OBJ: 365 return MLX5_CMD_STAT_OK; 366 367 case MLX5_CMD_OP_QUERY_HCA_CAP: 368 case MLX5_CMD_OP_QUERY_ADAPTER: 369 case MLX5_CMD_OP_INIT_HCA: 370 case MLX5_CMD_OP_ENABLE_HCA: 371 case MLX5_CMD_OP_QUERY_PAGES: 372 case MLX5_CMD_OP_SET_HCA_CAP: 373 case MLX5_CMD_OP_QUERY_ISSI: 374 case MLX5_CMD_OP_SET_ISSI: 375 case MLX5_CMD_OP_CREATE_MKEY: 376 case MLX5_CMD_OP_QUERY_MKEY: 377 case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS: 378 case MLX5_CMD_OP_PAGE_FAULT_RESUME: 379 case MLX5_CMD_OP_CREATE_EQ: 380 case MLX5_CMD_OP_QUERY_EQ: 381 case MLX5_CMD_OP_GEN_EQE: 382 case MLX5_CMD_OP_CREATE_CQ: 383 case MLX5_CMD_OP_QUERY_CQ: 384 case MLX5_CMD_OP_MODIFY_CQ: 385 case MLX5_CMD_OP_CREATE_QP: 386 case MLX5_CMD_OP_RST2INIT_QP: 387 case MLX5_CMD_OP_INIT2RTR_QP: 388 case MLX5_CMD_OP_RTR2RTS_QP: 389 case MLX5_CMD_OP_RTS2RTS_QP: 390 case MLX5_CMD_OP_SQERR2RTS_QP: 391 case MLX5_CMD_OP_QUERY_QP: 392 case MLX5_CMD_OP_SQD_RTS_QP: 393 case MLX5_CMD_OP_INIT2INIT_QP: 394 case MLX5_CMD_OP_CREATE_PSV: 395 case MLX5_CMD_OP_CREATE_SRQ: 396 case MLX5_CMD_OP_QUERY_SRQ: 397 case MLX5_CMD_OP_ARM_RQ: 398 case MLX5_CMD_OP_CREATE_XRC_SRQ: 399 case MLX5_CMD_OP_QUERY_XRC_SRQ: 400 case MLX5_CMD_OP_ARM_XRC_SRQ: 401 case MLX5_CMD_OP_CREATE_DCT: 402 case MLX5_CMD_OP_DRAIN_DCT: 403 case MLX5_CMD_OP_QUERY_DCT: 404 case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION: 405 case MLX5_CMD_OP_QUERY_VPORT_STATE: 406 case MLX5_CMD_OP_MODIFY_VPORT_STATE: 407 case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT: 408 case MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT: 409 case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT: 410 case MLX5_CMD_OP_QUERY_ROCE_ADDRESS: 411 case MLX5_CMD_OP_SET_ROCE_ADDRESS: 412 case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT: 413 case MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT: 414 case MLX5_CMD_OP_QUERY_HCA_VPORT_GID: 415 case MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY: 416 case MLX5_CMD_OP_QUERY_VNIC_ENV: 417 case MLX5_CMD_OP_QUERY_VPORT_COUNTER: 418 case MLX5_CMD_OP_ALLOC_Q_COUNTER: 419 case MLX5_CMD_OP_QUERY_Q_COUNTER: 420 case MLX5_CMD_OP_ALLOC_PD: 421 case MLX5_CMD_OP_ALLOC_UAR: 422 case MLX5_CMD_OP_CONFIG_INT_MODERATION: 423 case MLX5_CMD_OP_ACCESS_REG: 424 case MLX5_CMD_OP_ATTACH_TO_MCG: 425 case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG: 426 case MLX5_CMD_OP_MAD_IFC: 427 case MLX5_CMD_OP_QUERY_MAD_DEMUX: 428 case MLX5_CMD_OP_SET_MAD_DEMUX: 429 case MLX5_CMD_OP_NOP: 430 case MLX5_CMD_OP_ALLOC_XRCD: 431 case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN: 432 case MLX5_CMD_OP_QUERY_CONG_STATUS: 433 case MLX5_CMD_OP_MODIFY_CONG_STATUS: 434 case MLX5_CMD_OP_QUERY_CONG_PARAMS: 435 case MLX5_CMD_OP_MODIFY_CONG_PARAMS: 436 case MLX5_CMD_OP_QUERY_CONG_STATISTICS: 437 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT: 438 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY: 439 case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY: 440 case MLX5_CMD_OP_CREATE_TIR: 441 case MLX5_CMD_OP_MODIFY_TIR: 442 case MLX5_CMD_OP_QUERY_TIR: 443 case MLX5_CMD_OP_CREATE_SQ: 444 case MLX5_CMD_OP_MODIFY_SQ: 445 case MLX5_CMD_OP_QUERY_SQ: 446 case MLX5_CMD_OP_CREATE_RQ: 447 case MLX5_CMD_OP_MODIFY_RQ: 448 case MLX5_CMD_OP_QUERY_RQ: 449 case MLX5_CMD_OP_CREATE_RMP: 450 case MLX5_CMD_OP_MODIFY_RMP: 451 case MLX5_CMD_OP_QUERY_RMP: 452 case MLX5_CMD_OP_CREATE_TIS: 453 case MLX5_CMD_OP_MODIFY_TIS: 454 case MLX5_CMD_OP_QUERY_TIS: 455 case MLX5_CMD_OP_CREATE_RQT: 456 case MLX5_CMD_OP_MODIFY_RQT: 457 case MLX5_CMD_OP_QUERY_RQT: 458 case MLX5_CMD_OP_CREATE_FLOW_TABLE: 459 case MLX5_CMD_OP_QUERY_FLOW_TABLE: 460 case MLX5_CMD_OP_CREATE_FLOW_GROUP: 461 case MLX5_CMD_OP_QUERY_FLOW_GROUP: 462 case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY: 463 case MLX5_CMD_OP_CREATE_GENERAL_OBJ: 464 case MLX5_CMD_OP_MODIFY_GENERAL_OBJ: 465 case MLX5_CMD_OP_QUERY_GENERAL_OBJ: 466 *status = MLX5_DRIVER_STATUS_ABORTED; 467 *synd = MLX5_DRIVER_SYND; 468 return -EIO; 469 default: 470 mlx5_core_err(dev, "Unknown FW command (%d)\n", op); 471 return -EINVAL; 472 } 473 } 474 475 const char *mlx5_command_str(int command) 476 { 477 #define MLX5_COMMAND_STR_CASE(__cmd) case MLX5_CMD_OP_ ## __cmd: return #__cmd 478 479 switch (command) { 480 MLX5_COMMAND_STR_CASE(QUERY_HCA_CAP); 481 MLX5_COMMAND_STR_CASE(SET_HCA_CAP); 482 MLX5_COMMAND_STR_CASE(QUERY_ADAPTER); 483 MLX5_COMMAND_STR_CASE(INIT_HCA); 484 MLX5_COMMAND_STR_CASE(TEARDOWN_HCA); 485 MLX5_COMMAND_STR_CASE(ENABLE_HCA); 486 MLX5_COMMAND_STR_CASE(DISABLE_HCA); 487 MLX5_COMMAND_STR_CASE(QUERY_PAGES); 488 MLX5_COMMAND_STR_CASE(MANAGE_PAGES); 489 MLX5_COMMAND_STR_CASE(QUERY_ISSI); 490 MLX5_COMMAND_STR_CASE(SET_ISSI); 491 MLX5_COMMAND_STR_CASE(CREATE_MKEY); 492 MLX5_COMMAND_STR_CASE(QUERY_MKEY); 493 MLX5_COMMAND_STR_CASE(DESTROY_MKEY); 494 MLX5_COMMAND_STR_CASE(QUERY_SPECIAL_CONTEXTS); 495 MLX5_COMMAND_STR_CASE(PAGE_FAULT_RESUME); 496 MLX5_COMMAND_STR_CASE(CREATE_EQ); 497 MLX5_COMMAND_STR_CASE(DESTROY_EQ); 498 MLX5_COMMAND_STR_CASE(QUERY_EQ); 499 MLX5_COMMAND_STR_CASE(GEN_EQE); 500 MLX5_COMMAND_STR_CASE(CREATE_CQ); 501 MLX5_COMMAND_STR_CASE(DESTROY_CQ); 502 MLX5_COMMAND_STR_CASE(QUERY_CQ); 503 MLX5_COMMAND_STR_CASE(MODIFY_CQ); 504 MLX5_COMMAND_STR_CASE(CREATE_QP); 505 MLX5_COMMAND_STR_CASE(DESTROY_QP); 506 MLX5_COMMAND_STR_CASE(RST2INIT_QP); 507 MLX5_COMMAND_STR_CASE(INIT2RTR_QP); 508 MLX5_COMMAND_STR_CASE(RTR2RTS_QP); 509 MLX5_COMMAND_STR_CASE(RTS2RTS_QP); 510 MLX5_COMMAND_STR_CASE(SQERR2RTS_QP); 511 MLX5_COMMAND_STR_CASE(2ERR_QP); 512 MLX5_COMMAND_STR_CASE(2RST_QP); 513 MLX5_COMMAND_STR_CASE(QUERY_QP); 514 MLX5_COMMAND_STR_CASE(SQD_RTS_QP); 515 MLX5_COMMAND_STR_CASE(MAD_IFC); 516 MLX5_COMMAND_STR_CASE(INIT2INIT_QP); 517 MLX5_COMMAND_STR_CASE(CREATE_PSV); 518 MLX5_COMMAND_STR_CASE(DESTROY_PSV); 519 MLX5_COMMAND_STR_CASE(CREATE_SRQ); 520 MLX5_COMMAND_STR_CASE(DESTROY_SRQ); 521 MLX5_COMMAND_STR_CASE(QUERY_SRQ); 522 MLX5_COMMAND_STR_CASE(ARM_RQ); 523 MLX5_COMMAND_STR_CASE(CREATE_XRC_SRQ); 524 MLX5_COMMAND_STR_CASE(DESTROY_XRC_SRQ); 525 MLX5_COMMAND_STR_CASE(QUERY_XRC_SRQ); 526 MLX5_COMMAND_STR_CASE(ARM_XRC_SRQ); 527 MLX5_COMMAND_STR_CASE(CREATE_DCT); 528 MLX5_COMMAND_STR_CASE(SET_DC_CNAK_TRACE); 529 MLX5_COMMAND_STR_CASE(DESTROY_DCT); 530 MLX5_COMMAND_STR_CASE(DRAIN_DCT); 531 MLX5_COMMAND_STR_CASE(QUERY_DCT); 532 MLX5_COMMAND_STR_CASE(ARM_DCT_FOR_KEY_VIOLATION); 533 MLX5_COMMAND_STR_CASE(QUERY_VPORT_STATE); 534 MLX5_COMMAND_STR_CASE(MODIFY_VPORT_STATE); 535 MLX5_COMMAND_STR_CASE(QUERY_ESW_VPORT_CONTEXT); 536 MLX5_COMMAND_STR_CASE(MODIFY_ESW_VPORT_CONTEXT); 537 MLX5_COMMAND_STR_CASE(QUERY_NIC_VPORT_CONTEXT); 538 MLX5_COMMAND_STR_CASE(MODIFY_NIC_VPORT_CONTEXT); 539 MLX5_COMMAND_STR_CASE(QUERY_ROCE_ADDRESS); 540 MLX5_COMMAND_STR_CASE(SET_ROCE_ADDRESS); 541 MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_CONTEXT); 542 MLX5_COMMAND_STR_CASE(MODIFY_HCA_VPORT_CONTEXT); 543 MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_GID); 544 MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_PKEY); 545 MLX5_COMMAND_STR_CASE(QUERY_VNIC_ENV); 546 MLX5_COMMAND_STR_CASE(QUERY_VPORT_COUNTER); 547 MLX5_COMMAND_STR_CASE(SET_WOL_ROL); 548 MLX5_COMMAND_STR_CASE(QUERY_WOL_ROL); 549 MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER); 550 MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER); 551 MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER); 552 MLX5_COMMAND_STR_CASE(ALLOC_PD); 553 MLX5_COMMAND_STR_CASE(DEALLOC_PD); 554 MLX5_COMMAND_STR_CASE(ALLOC_UAR); 555 MLX5_COMMAND_STR_CASE(DEALLOC_UAR); 556 MLX5_COMMAND_STR_CASE(CONFIG_INT_MODERATION); 557 MLX5_COMMAND_STR_CASE(ATTACH_TO_MCG); 558 MLX5_COMMAND_STR_CASE(DETACH_FROM_MCG); 559 MLX5_COMMAND_STR_CASE(GET_DROPPED_PACKET_LOG); 560 MLX5_COMMAND_STR_CASE(QUERY_MAD_DEMUX); 561 MLX5_COMMAND_STR_CASE(SET_MAD_DEMUX); 562 MLX5_COMMAND_STR_CASE(NOP); 563 MLX5_COMMAND_STR_CASE(ALLOC_XRCD); 564 MLX5_COMMAND_STR_CASE(DEALLOC_XRCD); 565 MLX5_COMMAND_STR_CASE(ALLOC_TRANSPORT_DOMAIN); 566 MLX5_COMMAND_STR_CASE(DEALLOC_TRANSPORT_DOMAIN); 567 MLX5_COMMAND_STR_CASE(QUERY_CONG_STATUS); 568 MLX5_COMMAND_STR_CASE(MODIFY_CONG_STATUS); 569 MLX5_COMMAND_STR_CASE(QUERY_CONG_PARAMS); 570 MLX5_COMMAND_STR_CASE(MODIFY_CONG_PARAMS); 571 MLX5_COMMAND_STR_CASE(QUERY_CONG_STATISTICS); 572 MLX5_COMMAND_STR_CASE(ADD_VXLAN_UDP_DPORT); 573 MLX5_COMMAND_STR_CASE(DELETE_VXLAN_UDP_DPORT); 574 MLX5_COMMAND_STR_CASE(SET_L2_TABLE_ENTRY); 575 MLX5_COMMAND_STR_CASE(QUERY_L2_TABLE_ENTRY); 576 MLX5_COMMAND_STR_CASE(DELETE_L2_TABLE_ENTRY); 577 MLX5_COMMAND_STR_CASE(CREATE_RMP); 578 MLX5_COMMAND_STR_CASE(MODIFY_RMP); 579 MLX5_COMMAND_STR_CASE(DESTROY_RMP); 580 MLX5_COMMAND_STR_CASE(QUERY_RMP); 581 MLX5_COMMAND_STR_CASE(CREATE_RQT); 582 MLX5_COMMAND_STR_CASE(MODIFY_RQT); 583 MLX5_COMMAND_STR_CASE(DESTROY_RQT); 584 MLX5_COMMAND_STR_CASE(QUERY_RQT); 585 MLX5_COMMAND_STR_CASE(ACCESS_REG); 586 MLX5_COMMAND_STR_CASE(CREATE_SQ); 587 MLX5_COMMAND_STR_CASE(MODIFY_SQ); 588 MLX5_COMMAND_STR_CASE(DESTROY_SQ); 589 MLX5_COMMAND_STR_CASE(QUERY_SQ); 590 MLX5_COMMAND_STR_CASE(CREATE_RQ); 591 MLX5_COMMAND_STR_CASE(MODIFY_RQ); 592 MLX5_COMMAND_STR_CASE(DESTROY_RQ); 593 MLX5_COMMAND_STR_CASE(QUERY_RQ); 594 MLX5_COMMAND_STR_CASE(CREATE_TIR); 595 MLX5_COMMAND_STR_CASE(MODIFY_TIR); 596 MLX5_COMMAND_STR_CASE(DESTROY_TIR); 597 MLX5_COMMAND_STR_CASE(QUERY_TIR); 598 MLX5_COMMAND_STR_CASE(CREATE_TIS); 599 MLX5_COMMAND_STR_CASE(MODIFY_TIS); 600 MLX5_COMMAND_STR_CASE(DESTROY_TIS); 601 MLX5_COMMAND_STR_CASE(QUERY_TIS); 602 MLX5_COMMAND_STR_CASE(CREATE_FLOW_TABLE); 603 MLX5_COMMAND_STR_CASE(DESTROY_FLOW_TABLE); 604 MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE); 605 MLX5_COMMAND_STR_CASE(CREATE_FLOW_GROUP); 606 MLX5_COMMAND_STR_CASE(DESTROY_FLOW_GROUP); 607 MLX5_COMMAND_STR_CASE(QUERY_FLOW_GROUP); 608 MLX5_COMMAND_STR_CASE(SET_FLOW_TABLE_ENTRY); 609 MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE_ENTRY); 610 MLX5_COMMAND_STR_CASE(DELETE_FLOW_TABLE_ENTRY); 611 MLX5_COMMAND_STR_CASE(SET_DIAGNOSTICS); 612 MLX5_COMMAND_STR_CASE(QUERY_DIAGNOSTICS); 613 MLX5_COMMAND_STR_CASE(CREATE_GENERAL_OBJ); 614 MLX5_COMMAND_STR_CASE(MODIFY_GENERAL_OBJ); 615 MLX5_COMMAND_STR_CASE(QUERY_GENERAL_OBJ); 616 MLX5_COMMAND_STR_CASE(DESTROY_GENERAL_OBJ); 617 MLX5_COMMAND_STR_CASE(ALLOC_FLOW_COUNTER); 618 MLX5_COMMAND_STR_CASE(DEALLOC_FLOW_COUNTER); 619 MLX5_COMMAND_STR_CASE(QUERY_FLOW_COUNTER); 620 default: return "unknown command opcode"; 621 } 622 } 623 624 static const char *cmd_status_str(u8 status) 625 { 626 switch (status) { 627 case MLX5_CMD_STAT_OK: 628 return "OK"; 629 case MLX5_CMD_STAT_INT_ERR: 630 return "internal error"; 631 case MLX5_CMD_STAT_BAD_OP_ERR: 632 return "bad operation"; 633 case MLX5_CMD_STAT_BAD_PARAM_ERR: 634 return "bad parameter"; 635 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: 636 return "bad system state"; 637 case MLX5_CMD_STAT_BAD_RES_ERR: 638 return "bad resource"; 639 case MLX5_CMD_STAT_RES_BUSY: 640 return "resource busy"; 641 case MLX5_CMD_STAT_LIM_ERR: 642 return "limits exceeded"; 643 case MLX5_CMD_STAT_BAD_RES_STATE_ERR: 644 return "bad resource state"; 645 case MLX5_CMD_STAT_IX_ERR: 646 return "bad index"; 647 case MLX5_CMD_STAT_NO_RES_ERR: 648 return "no resources"; 649 case MLX5_CMD_STAT_BAD_INP_LEN_ERR: 650 return "bad input length"; 651 case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: 652 return "bad output length"; 653 case MLX5_CMD_STAT_BAD_QP_STATE_ERR: 654 return "bad QP state"; 655 case MLX5_CMD_STAT_BAD_PKT_ERR: 656 return "bad packet (discarded)"; 657 case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: 658 return "bad size too many outstanding CQEs"; 659 default: 660 return "unknown status"; 661 } 662 } 663 664 static int cmd_status_to_err_helper(u8 status) 665 { 666 switch (status) { 667 case MLX5_CMD_STAT_OK: return 0; 668 case MLX5_CMD_STAT_INT_ERR: return -EIO; 669 case MLX5_CMD_STAT_BAD_OP_ERR: return -EINVAL; 670 case MLX5_CMD_STAT_BAD_PARAM_ERR: return -EINVAL; 671 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO; 672 case MLX5_CMD_STAT_BAD_RES_ERR: return -EINVAL; 673 case MLX5_CMD_STAT_RES_BUSY: return -EBUSY; 674 case MLX5_CMD_STAT_LIM_ERR: return -ENOMEM; 675 case MLX5_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL; 676 case MLX5_CMD_STAT_IX_ERR: return -EINVAL; 677 case MLX5_CMD_STAT_NO_RES_ERR: return -EAGAIN; 678 case MLX5_CMD_STAT_BAD_INP_LEN_ERR: return -EIO; 679 case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: return -EIO; 680 case MLX5_CMD_STAT_BAD_QP_STATE_ERR: return -EINVAL; 681 case MLX5_CMD_STAT_BAD_PKT_ERR: return -EINVAL; 682 case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: return -EINVAL; 683 default: return -EIO; 684 } 685 } 686 687 void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome) 688 { 689 *status = MLX5_GET(mbox_out, out, status); 690 *syndrome = MLX5_GET(mbox_out, out, syndrome); 691 } 692 693 static int mlx5_cmd_check(struct mlx5_core_dev *dev, void *in, void *out) 694 { 695 u32 syndrome; 696 u8 status; 697 u16 opcode; 698 u16 op_mod; 699 700 mlx5_cmd_mbox_status(out, &status, &syndrome); 701 if (!status) 702 return 0; 703 704 opcode = MLX5_GET(mbox_in, in, opcode); 705 op_mod = MLX5_GET(mbox_in, in, op_mod); 706 707 mlx5_core_err(dev, 708 "%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x)\n", 709 mlx5_command_str(opcode), 710 opcode, op_mod, 711 cmd_status_str(status), 712 status, 713 syndrome); 714 715 return cmd_status_to_err_helper(status); 716 } 717 718 static void dump_command(struct mlx5_core_dev *dev, 719 struct mlx5_cmd_work_ent *ent, int input) 720 { 721 struct mlx5_cmd_msg *msg = input ? ent->in : ent->out; 722 u16 op = MLX5_GET(mbox_in, ent->lay->in, opcode); 723 size_t i; 724 int data_only; 725 int offset = 0; 726 int msg_len = input ? ent->uin_size : ent->uout_size; 727 int dump_len; 728 729 data_only = !!(mlx5_core_debug_mask & (1 << MLX5_CMD_DATA)); 730 731 if (data_only) 732 mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_DATA, 733 "dump command data %s(0x%x) %s\n", 734 mlx5_command_str(op), op, 735 input ? "INPUT" : "OUTPUT"); 736 else 737 mlx5_core_dbg(dev, "dump command %s(0x%x) %s\n", 738 mlx5_command_str(op), op, 739 input ? "INPUT" : "OUTPUT"); 740 741 if (data_only) { 742 if (input) { 743 dump_buf(ent->lay->in, sizeof(ent->lay->in), 1, offset); 744 offset += sizeof(ent->lay->in); 745 } else { 746 dump_buf(ent->lay->out, sizeof(ent->lay->out), 1, offset); 747 offset += sizeof(ent->lay->out); 748 } 749 } else { 750 dump_buf(ent->lay, sizeof(*ent->lay), 0, offset); 751 offset += sizeof(*ent->lay); 752 } 753 754 for (i = 0; i != (msg->numpages * MLX5_NUM_CMDS_IN_ADAPTER_PAGE); i++) { 755 struct mlx5_cmd_prot_block *block; 756 757 block = mlx5_fwp_get_virt(msg, i * MLX5_CMD_MBOX_SIZE); 758 759 if (data_only) { 760 if (offset >= msg_len) 761 break; 762 dump_len = min_t(int, 763 MLX5_CMD_DATA_BLOCK_SIZE, msg_len - offset); 764 765 dump_buf(block->data, dump_len, 1, offset); 766 offset += MLX5_CMD_DATA_BLOCK_SIZE; 767 } else { 768 mlx5_core_dbg(dev, "command block:\n"); 769 dump_buf(block, sizeof(*block), 0, offset); 770 offset += sizeof(*block); 771 } 772 773 /* check for last block */ 774 if (block->next == 0) 775 break; 776 } 777 778 if (data_only) 779 pr_debug("\n"); 780 } 781 782 static u16 msg_to_opcode(struct mlx5_cmd_msg *in) 783 { 784 return MLX5_GET(mbox_in, in->first.data, opcode); 785 } 786 787 static void cb_timeout_handler(struct work_struct *work) 788 { 789 struct delayed_work *dwork = container_of(work, struct delayed_work, 790 work); 791 struct mlx5_cmd_work_ent *ent = container_of(dwork, 792 struct mlx5_cmd_work_ent, 793 cb_timeout_work); 794 struct mlx5_core_dev *dev = container_of(ent->cmd, struct mlx5_core_dev, 795 cmd); 796 797 ent->ret = -ETIMEDOUT; 798 mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n", 799 mlx5_command_str(msg_to_opcode(ent->in)), 800 msg_to_opcode(ent->in)); 801 mlx5_cmd_comp_handler(dev, 1UL << ent->idx, MLX5_CMD_MODE_EVENTS); 802 } 803 804 static void complete_command(struct mlx5_cmd_work_ent *ent) 805 { 806 struct mlx5_cmd *cmd = ent->cmd; 807 struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, 808 cmd); 809 mlx5_cmd_cbk_t callback; 810 void *context; 811 812 s64 ds; 813 struct mlx5_cmd_stats *stats; 814 unsigned long flags; 815 int err; 816 struct semaphore *sem; 817 818 if (ent->page_queue) 819 sem = &cmd->pages_sem; 820 else 821 sem = &cmd->sem; 822 823 if (dev->state != MLX5_DEVICE_STATE_UP) { 824 u8 status = 0; 825 u32 drv_synd; 826 827 ent->ret = mlx5_internal_err_ret_value(dev, msg_to_opcode(ent->in), &drv_synd, &status); 828 MLX5_SET(mbox_out, ent->out, status, status); 829 MLX5_SET(mbox_out, ent->out, syndrome, drv_synd); 830 } 831 832 if (ent->callback) { 833 ds = ent->ts2 - ent->ts1; 834 if (ent->op < ARRAY_SIZE(cmd->stats)) { 835 stats = &cmd->stats[ent->op]; 836 spin_lock_irqsave(&stats->lock, flags); 837 stats->sum += ds; 838 ++stats->n; 839 spin_unlock_irqrestore(&stats->lock, flags); 840 } 841 842 callback = ent->callback; 843 context = ent->context; 844 err = ent->ret; 845 if (!err) { 846 err = mlx5_copy_from_msg(ent->uout, 847 ent->out, 848 ent->uout_size); 849 err = err ? err : mlx5_cmd_check(dev, 850 ent->in->first.data, 851 ent->uout); 852 } 853 854 mlx5_free_cmd_msg(dev, ent->out); 855 free_msg(dev, ent->in); 856 857 err = err ? err : ent->status; 858 free_cmd(ent); 859 callback(err, context); 860 } else { 861 complete(&ent->done); 862 } 863 up(sem); 864 } 865 866 static void cmd_work_handler(struct work_struct *work) 867 { 868 struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work); 869 struct mlx5_cmd *cmd = ent->cmd; 870 struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, cmd); 871 unsigned long cb_timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC); 872 struct mlx5_cmd_layout *lay; 873 struct semaphore *sem; 874 bool poll_cmd = ent->polling; 875 876 sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem; 877 down(sem); 878 879 if (alloc_ent(ent) < 0) { 880 complete_command(ent); 881 return; 882 } 883 884 ent->token = alloc_token(cmd); 885 lay = get_inst(cmd, ent->idx); 886 ent->lay = lay; 887 memset(lay, 0, sizeof(*lay)); 888 memcpy(lay->in, ent->in->first.data, sizeof(lay->in)); 889 ent->op = be32_to_cpu(lay->in[0]) >> 16; 890 if (ent->in->numpages != 0) 891 lay->in_ptr = cpu_to_be64(mlx5_fwp_get_dma(ent->in, 0)); 892 if (ent->out->numpages != 0) 893 lay->out_ptr = cpu_to_be64(mlx5_fwp_get_dma(ent->out, 0)); 894 lay->inlen = cpu_to_be32(ent->uin_size); 895 lay->outlen = cpu_to_be32(ent->uout_size); 896 lay->type = MLX5_PCI_CMD_XPORT; 897 lay->token = ent->token; 898 lay->status_own = CMD_OWNER_HW; 899 set_signature(ent, !cmd->checksum_disabled); 900 dump_command(dev, ent, 1); 901 ent->ts1 = ktime_get_ns(); 902 ent->busy = 0; 903 if (ent->callback) 904 schedule_delayed_work(&ent->cb_timeout_work, cb_timeout); 905 906 /* ring doorbell after the descriptor is valid */ 907 mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx); 908 /* make sure data is written to RAM */ 909 mlx5_fwp_flush(cmd->cmd_page); 910 iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell); 911 mmiowb(); 912 913 /* if not in polling don't use ent after this point */ 914 if (poll_cmd) { 915 poll_timeout(ent); 916 /* make sure we read the descriptor after ownership is SW */ 917 mlx5_cmd_comp_handler(dev, 1U << ent->idx, MLX5_CMD_MODE_POLLING); 918 } 919 } 920 921 static const char *deliv_status_to_str(u8 status) 922 { 923 switch (status) { 924 case MLX5_CMD_DELIVERY_STAT_OK: 925 return "no errors"; 926 case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR: 927 return "signature error"; 928 case MLX5_CMD_DELIVERY_STAT_TOK_ERR: 929 return "token error"; 930 case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR: 931 return "bad block number"; 932 case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR: 933 return "output pointer not aligned to block size"; 934 case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR: 935 return "input pointer not aligned to block size"; 936 case MLX5_CMD_DELIVERY_STAT_FW_ERR: 937 return "firmware internal error"; 938 case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR: 939 return "command input length error"; 940 case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR: 941 return "command output length error"; 942 case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR: 943 return "reserved fields not cleared"; 944 case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR: 945 return "bad command descriptor type"; 946 default: 947 return "unknown status code"; 948 } 949 } 950 951 static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent) 952 { 953 int timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC); 954 int err; 955 956 if (ent->polling) { 957 wait_for_completion(&ent->done); 958 } else if (!wait_for_completion_timeout(&ent->done, timeout)) { 959 ent->ret = -ETIMEDOUT; 960 mlx5_cmd_comp_handler(dev, 1UL << ent->idx, MLX5_CMD_MODE_EVENTS); 961 } 962 963 err = ent->ret; 964 965 if (err == -ETIMEDOUT) { 966 mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n", 967 mlx5_command_str(msg_to_opcode(ent->in)), 968 msg_to_opcode(ent->in)); 969 } 970 mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n", 971 err, deliv_status_to_str(ent->status), ent->status); 972 973 return err; 974 } 975 976 /* Notes: 977 * 1. Callback functions may not sleep 978 * 2. page queue commands do not support asynchrous completion 979 */ 980 static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, 981 int uin_size, 982 struct mlx5_cmd_msg *out, void *uout, int uout_size, 983 mlx5_cmd_cbk_t callback, 984 void *context, int page_queue, u8 *status, 985 bool force_polling) 986 { 987 struct mlx5_cmd *cmd = &dev->cmd; 988 struct mlx5_cmd_work_ent *ent; 989 struct mlx5_cmd_stats *stats; 990 int err = 0; 991 s64 ds; 992 u16 op; 993 994 if (callback && page_queue) 995 return -EINVAL; 996 997 ent = alloc_cmd(cmd, in, uin_size, out, uout, uout_size, callback, 998 context, page_queue); 999 if (IS_ERR(ent)) 1000 return PTR_ERR(ent); 1001 1002 ent->polling = force_polling || (cmd->mode == MLX5_CMD_MODE_POLLING); 1003 1004 if (!callback) 1005 init_completion(&ent->done); 1006 1007 INIT_DELAYED_WORK(&ent->cb_timeout_work, cb_timeout_handler); 1008 INIT_WORK(&ent->work, cmd_work_handler); 1009 if (page_queue) { 1010 cmd_work_handler(&ent->work); 1011 } else if (!queue_work(dev->priv.health.wq_cmd, &ent->work)) { 1012 mlx5_core_warn(dev, "failed to queue work\n"); 1013 err = -ENOMEM; 1014 goto out_free; 1015 } 1016 1017 if (callback) 1018 goto out; 1019 1020 err = wait_func(dev, ent); 1021 if (err == -ETIMEDOUT) 1022 goto out; 1023 1024 ds = ent->ts2 - ent->ts1; 1025 op = MLX5_GET(mbox_in, in->first.data, opcode); 1026 if (op < ARRAY_SIZE(cmd->stats)) { 1027 stats = &cmd->stats[op]; 1028 spin_lock_irq(&stats->lock); 1029 stats->sum += ds; 1030 ++stats->n; 1031 spin_unlock_irq(&stats->lock); 1032 } 1033 mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME, 1034 "fw exec time for %s is %lld nsec\n", 1035 mlx5_command_str(op), (long long)ds); 1036 *status = ent->status; 1037 free_cmd(ent); 1038 1039 return err; 1040 1041 out_free: 1042 free_cmd(ent); 1043 out: 1044 return err; 1045 } 1046 1047 static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, size_t size) 1048 { 1049 size_t delta; 1050 size_t i; 1051 1052 if (to == NULL || from == NULL) 1053 return (-ENOMEM); 1054 1055 delta = min_t(size_t, size, sizeof(to->first.data)); 1056 memcpy(to->first.data, from, delta); 1057 from = (char *)from + delta; 1058 size -= delta; 1059 1060 for (i = 0; size != 0; i++) { 1061 struct mlx5_cmd_prot_block *block; 1062 1063 block = mlx5_fwp_get_virt(to, i * MLX5_CMD_MBOX_SIZE); 1064 1065 delta = min_t(size_t, size, MLX5_CMD_DATA_BLOCK_SIZE); 1066 memcpy(block->data, from, delta); 1067 from = (char *)from + delta; 1068 size -= delta; 1069 } 1070 return (0); 1071 } 1072 1073 static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size) 1074 { 1075 size_t delta; 1076 size_t i; 1077 1078 if (to == NULL || from == NULL) 1079 return (-ENOMEM); 1080 1081 delta = min_t(size_t, size, sizeof(from->first.data)); 1082 memcpy(to, from->first.data, delta); 1083 to = (char *)to + delta; 1084 size -= delta; 1085 1086 for (i = 0; size != 0; i++) { 1087 struct mlx5_cmd_prot_block *block; 1088 1089 block = mlx5_fwp_get_virt(from, i * MLX5_CMD_MBOX_SIZE); 1090 1091 delta = min_t(size_t, size, MLX5_CMD_DATA_BLOCK_SIZE); 1092 memcpy(to, block->data, delta); 1093 to = (char *)to + delta; 1094 size -= delta; 1095 } 1096 return (0); 1097 } 1098 1099 static struct mlx5_cmd_msg * 1100 mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev, gfp_t flags, size_t size) 1101 { 1102 struct mlx5_cmd_msg *msg; 1103 size_t blen; 1104 size_t n; 1105 size_t i; 1106 1107 blen = size - min_t(size_t, sizeof(msg->first.data), size); 1108 n = howmany(blen, MLX5_CMD_DATA_BLOCK_SIZE); 1109 1110 msg = mlx5_fwp_alloc(dev, flags, howmany(n, MLX5_NUM_CMDS_IN_ADAPTER_PAGE)); 1111 if (msg == NULL) 1112 return (ERR_PTR(-ENOMEM)); 1113 1114 for (i = 0; i != n; i++) { 1115 struct mlx5_cmd_prot_block *block; 1116 1117 block = mlx5_fwp_get_virt(msg, i * MLX5_CMD_MBOX_SIZE); 1118 1119 if (i != (n - 1)) { 1120 memset(block, 0, MLX5_CMD_MBOX_SIZE); 1121 1122 u64 dma = mlx5_fwp_get_dma(msg, (i + 1) * MLX5_CMD_MBOX_SIZE); 1123 block->next = cpu_to_be64(dma); 1124 } else { 1125 /* Zero the rest of the page to satisfy KMSAN. */ 1126 memset(block, 0, MLX5_ADAPTER_PAGE_SIZE - 1127 (i % MLX5_NUM_CMDS_IN_ADAPTER_PAGE) * 1128 MLX5_CMD_MBOX_SIZE); 1129 } 1130 block->block_num = cpu_to_be32(i); 1131 } 1132 1133 /* make sure initial data is written to RAM */ 1134 mlx5_fwp_flush(msg); 1135 1136 return (msg); 1137 } 1138 1139 static void 1140 mlx5_free_cmd_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg) 1141 { 1142 1143 mlx5_fwp_free(msg); 1144 } 1145 1146 static void clean_debug_files(struct mlx5_core_dev *dev) 1147 { 1148 } 1149 1150 1151 static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode) 1152 { 1153 struct mlx5_cmd *cmd = &dev->cmd; 1154 int i; 1155 1156 if (cmd->mode == mode) 1157 return; 1158 1159 for (i = 0; i < cmd->max_reg_cmds; i++) 1160 down(&cmd->sem); 1161 1162 down(&cmd->pages_sem); 1163 cmd->mode = mode; 1164 1165 up(&cmd->pages_sem); 1166 for (i = 0; i < cmd->max_reg_cmds; i++) 1167 up(&cmd->sem); 1168 } 1169 1170 void mlx5_cmd_use_events(struct mlx5_core_dev *dev) 1171 { 1172 mlx5_cmd_change_mod(dev, MLX5_CMD_MODE_EVENTS); 1173 } 1174 1175 void mlx5_cmd_use_polling(struct mlx5_core_dev *dev) 1176 { 1177 mlx5_cmd_change_mod(dev, MLX5_CMD_MODE_POLLING); 1178 } 1179 1180 static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg) 1181 { 1182 unsigned long flags; 1183 1184 if (msg->cache) { 1185 spin_lock_irqsave(&msg->cache->lock, flags); 1186 list_add_tail(&msg->list, &msg->cache->head); 1187 spin_unlock_irqrestore(&msg->cache->lock, flags); 1188 } else { 1189 mlx5_free_cmd_msg(dev, msg); 1190 } 1191 } 1192 1193 void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vector_flags, 1194 enum mlx5_cmd_mode cmd_mode) 1195 { 1196 struct mlx5_cmd *cmd = &dev->cmd; 1197 struct mlx5_cmd_work_ent *ent; 1198 bool triggered = (vector_flags & MLX5_TRIGGERED_CMD_COMP) ? 1 : 0; 1199 u32 vector = vector_flags; /* discard flags in the upper dword */ 1200 int i; 1201 1202 /* make sure data gets read from RAM */ 1203 mlx5_fwp_invalidate(cmd->cmd_page); 1204 1205 while (vector != 0) { 1206 i = ffs(vector) - 1; 1207 vector &= ~(1U << i); 1208 /* check command mode */ 1209 if (cmd->ent_mode[i] != cmd_mode) 1210 continue; 1211 ent = cmd->ent_arr[i]; 1212 /* check if command was already handled */ 1213 if (ent == NULL) 1214 continue; 1215 if (ent->callback) 1216 cancel_delayed_work(&ent->cb_timeout_work); 1217 ent->ts2 = ktime_get_ns(); 1218 memcpy(ent->out->first.data, ent->lay->out, 1219 sizeof(ent->lay->out)); 1220 /* make sure data gets read from RAM */ 1221 mlx5_fwp_invalidate(ent->out); 1222 dump_command(dev, ent, 0); 1223 if (!ent->ret) { 1224 if (!cmd->checksum_disabled) 1225 ent->ret = verify_signature(ent); 1226 else 1227 ent->ret = 0; 1228 1229 if (triggered) 1230 ent->status = MLX5_DRIVER_STATUS_ABORTED; 1231 else 1232 ent->status = ent->lay->status_own >> 1; 1233 1234 mlx5_core_dbg(dev, 1235 "FW command ret 0x%x, status %s(0x%x)\n", 1236 ent->ret, 1237 deliv_status_to_str(ent->status), 1238 ent->status); 1239 } 1240 free_ent(cmd, ent->idx); 1241 complete_command(ent); 1242 } 1243 } 1244 EXPORT_SYMBOL(mlx5_cmd_comp_handler); 1245 1246 static int status_to_err(u8 status) 1247 { 1248 return status ? -EIO : 0; /* TBD more meaningful codes */ 1249 } 1250 1251 static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size, 1252 gfp_t gfp) 1253 { 1254 struct mlx5_cmd_msg *msg = ERR_PTR(-ENOMEM); 1255 struct mlx5_cmd *cmd = &dev->cmd; 1256 struct cache_ent *ent = NULL; 1257 1258 if (in_size > MED_LIST_SIZE && in_size <= LONG_LIST_SIZE) 1259 ent = &cmd->cache.large; 1260 else if (in_size > 16 && in_size <= MED_LIST_SIZE) 1261 ent = &cmd->cache.med; 1262 1263 if (ent) { 1264 spin_lock_irq(&ent->lock); 1265 if (!list_empty(&ent->head)) { 1266 msg = list_entry(ent->head.next, struct mlx5_cmd_msg, 1267 list); 1268 list_del(&msg->list); 1269 } 1270 spin_unlock_irq(&ent->lock); 1271 } 1272 1273 if (IS_ERR(msg)) 1274 msg = mlx5_alloc_cmd_msg(dev, gfp, in_size); 1275 1276 return msg; 1277 } 1278 1279 static int is_manage_pages(void *in) 1280 { 1281 return MLX5_GET(mbox_in, in, opcode) == MLX5_CMD_OP_MANAGE_PAGES; 1282 } 1283 1284 static int cmd_exec_helper(struct mlx5_core_dev *dev, 1285 void *in, int in_size, 1286 void *out, int out_size, 1287 mlx5_cmd_cbk_t callback, void *context, 1288 bool force_polling) 1289 { 1290 struct mlx5_cmd_msg *inb; 1291 struct mlx5_cmd_msg *outb; 1292 int pages_queue; 1293 const gfp_t gfp = GFP_KERNEL; 1294 int err; 1295 u8 status = 0; 1296 u32 drv_synd; 1297 1298 if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { 1299 u16 opcode = MLX5_GET(mbox_in, in, opcode); 1300 err = mlx5_internal_err_ret_value(dev, opcode, &drv_synd, &status); 1301 MLX5_SET(mbox_out, out, status, status); 1302 MLX5_SET(mbox_out, out, syndrome, drv_synd); 1303 return err; 1304 } 1305 1306 pages_queue = is_manage_pages(in); 1307 1308 inb = alloc_msg(dev, in_size, gfp); 1309 if (IS_ERR(inb)) { 1310 err = PTR_ERR(inb); 1311 return err; 1312 } 1313 1314 err = mlx5_copy_to_msg(inb, in, in_size); 1315 if (err) { 1316 mlx5_core_warn(dev, "err %d\n", err); 1317 goto out_in; 1318 } 1319 1320 outb = mlx5_alloc_cmd_msg(dev, gfp, out_size); 1321 if (IS_ERR(outb)) { 1322 err = PTR_ERR(outb); 1323 goto out_in; 1324 } 1325 1326 err = mlx5_cmd_invoke(dev, inb, in_size, outb, out, out_size, callback, 1327 context, pages_queue, &status, force_polling); 1328 if (err) { 1329 if (err == -ETIMEDOUT) 1330 return err; 1331 goto out_out; 1332 } 1333 1334 mlx5_core_dbg(dev, "err %d, status %d\n", err, status); 1335 if (status) { 1336 err = status_to_err(status); 1337 goto out_out; 1338 } 1339 1340 if (callback) 1341 return err; 1342 1343 err = mlx5_copy_from_msg(out, outb, out_size); 1344 1345 out_out: 1346 mlx5_free_cmd_msg(dev, outb); 1347 1348 out_in: 1349 free_msg(dev, inb); 1350 return err; 1351 } 1352 1353 int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, 1354 int out_size) 1355 { 1356 int err; 1357 1358 err = cmd_exec_helper(dev, in, in_size, out, out_size, NULL, NULL, false); 1359 return err ? : mlx5_cmd_check(dev, in, out); 1360 } 1361 EXPORT_SYMBOL(mlx5_cmd_exec); 1362 1363 void mlx5_cmd_init_async_ctx(struct mlx5_core_dev *dev, 1364 struct mlx5_async_ctx *ctx) 1365 { 1366 ctx->dev = dev; 1367 /* Starts at 1 to avoid doing wake_up if we are not cleaning up */ 1368 atomic_set(&ctx->num_inflight, 1); 1369 init_waitqueue_head(&ctx->wait); 1370 } 1371 EXPORT_SYMBOL(mlx5_cmd_init_async_ctx); 1372 1373 /** 1374 * mlx5_cmd_cleanup_async_ctx - Clean up an async_ctx 1375 * @ctx: The ctx to clean 1376 * 1377 * Upon return all callbacks given to mlx5_cmd_exec_cb() have been called. The 1378 * caller must ensure that mlx5_cmd_exec_cb() is not called during or after 1379 * the call mlx5_cleanup_async_ctx(). 1380 */ 1381 void mlx5_cmd_cleanup_async_ctx(struct mlx5_async_ctx *ctx) 1382 { 1383 atomic_dec(&ctx->num_inflight); 1384 wait_event(ctx->wait, atomic_read(&ctx->num_inflight) == 0); 1385 } 1386 EXPORT_SYMBOL(mlx5_cmd_cleanup_async_ctx); 1387 1388 static void mlx5_cmd_exec_cb_handler(int status, void *_work) 1389 { 1390 struct mlx5_async_work *work = _work; 1391 struct mlx5_async_ctx *ctx = work->ctx; 1392 1393 work->user_callback(status, work); 1394 if (atomic_dec_and_test(&ctx->num_inflight)) 1395 wake_up(&ctx->wait); 1396 } 1397 1398 int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size, 1399 void *out, int out_size, mlx5_async_cbk_t callback, 1400 struct mlx5_async_work *work) 1401 { 1402 int ret; 1403 1404 work->ctx = ctx; 1405 work->user_callback = callback; 1406 if (WARN_ON(!atomic_inc_not_zero(&ctx->num_inflight))) 1407 return -EIO; 1408 ret = cmd_exec_helper(ctx->dev, in, in_size, out, out_size, 1409 mlx5_cmd_exec_cb_handler, work, false); 1410 if (ret && atomic_dec_and_test(&ctx->num_inflight)) 1411 wake_up(&ctx->wait); 1412 1413 return ret; 1414 } 1415 EXPORT_SYMBOL(mlx5_cmd_exec_cb); 1416 1417 int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size, 1418 void *out, int out_size) 1419 { 1420 int err; 1421 1422 err = cmd_exec_helper(dev, in, in_size, out, out_size, NULL, NULL, true); 1423 return err ? : mlx5_cmd_check(dev, in, out); 1424 } 1425 EXPORT_SYMBOL(mlx5_cmd_exec_polling); 1426 1427 static void destroy_msg_cache(struct mlx5_core_dev *dev) 1428 { 1429 struct mlx5_cmd *cmd = &dev->cmd; 1430 struct mlx5_cmd_msg *msg; 1431 struct mlx5_cmd_msg *n; 1432 1433 list_for_each_entry_safe(msg, n, &cmd->cache.large.head, list) { 1434 list_del(&msg->list); 1435 mlx5_free_cmd_msg(dev, msg); 1436 } 1437 1438 list_for_each_entry_safe(msg, n, &cmd->cache.med.head, list) { 1439 list_del(&msg->list); 1440 mlx5_free_cmd_msg(dev, msg); 1441 } 1442 } 1443 1444 static int create_msg_cache(struct mlx5_core_dev *dev) 1445 { 1446 struct mlx5_cmd *cmd = &dev->cmd; 1447 struct mlx5_cmd_msg *msg; 1448 int err; 1449 int i; 1450 1451 spin_lock_init(&cmd->cache.large.lock); 1452 INIT_LIST_HEAD(&cmd->cache.large.head); 1453 spin_lock_init(&cmd->cache.med.lock); 1454 INIT_LIST_HEAD(&cmd->cache.med.head); 1455 1456 for (i = 0; i < NUM_LONG_LISTS; i++) { 1457 msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, LONG_LIST_SIZE); 1458 if (IS_ERR(msg)) { 1459 err = PTR_ERR(msg); 1460 goto ex_err; 1461 } 1462 msg->cache = &cmd->cache.large; 1463 list_add_tail(&msg->list, &cmd->cache.large.head); 1464 } 1465 1466 for (i = 0; i < NUM_MED_LISTS; i++) { 1467 msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, MED_LIST_SIZE); 1468 if (IS_ERR(msg)) { 1469 err = PTR_ERR(msg); 1470 goto ex_err; 1471 } 1472 msg->cache = &cmd->cache.med; 1473 list_add_tail(&msg->list, &cmd->cache.med.head); 1474 } 1475 1476 return 0; 1477 1478 ex_err: 1479 destroy_msg_cache(dev); 1480 return err; 1481 } 1482 1483 static int 1484 alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd) 1485 { 1486 int err; 1487 1488 sx_init(&cmd->dma_sx, "MLX5-DMA-SX"); 1489 mtx_init(&cmd->dma_mtx, "MLX5-DMA-MTX", NULL, MTX_DEF); 1490 cv_init(&cmd->dma_cv, "MLX5-DMA-CV"); 1491 1492 /* 1493 * Create global DMA descriptor tag for allocating 1494 * 4K firmware pages: 1495 */ 1496 err = -bus_dma_tag_create( 1497 bus_get_dma_tag(dev->pdev->dev.bsddev), 1498 MLX5_ADAPTER_PAGE_SIZE, /* alignment */ 1499 0, /* no boundary */ 1500 BUS_SPACE_MAXADDR, /* lowaddr */ 1501 BUS_SPACE_MAXADDR, /* highaddr */ 1502 NULL, NULL, /* filter, filterarg */ 1503 MLX5_ADAPTER_PAGE_SIZE, /* maxsize */ 1504 1, /* nsegments */ 1505 MLX5_ADAPTER_PAGE_SIZE, /* maxsegsize */ 1506 0, /* flags */ 1507 NULL, NULL, /* lockfunc, lockfuncarg */ 1508 &cmd->dma_tag); 1509 if (err != 0) 1510 goto failure_destroy_sx; 1511 1512 cmd->cmd_page = mlx5_fwp_alloc(dev, GFP_KERNEL, 1); 1513 if (cmd->cmd_page == NULL) { 1514 err = -ENOMEM; 1515 goto failure_alloc_page; 1516 } 1517 cmd->dma = mlx5_fwp_get_dma(cmd->cmd_page, 0); 1518 cmd->cmd_buf = mlx5_fwp_get_virt(cmd->cmd_page, 0); 1519 memset(cmd->cmd_buf, 0, MLX5_ADAPTER_PAGE_SIZE); 1520 return (0); 1521 1522 failure_alloc_page: 1523 bus_dma_tag_destroy(cmd->dma_tag); 1524 1525 failure_destroy_sx: 1526 cv_destroy(&cmd->dma_cv); 1527 mtx_destroy(&cmd->dma_mtx); 1528 sx_destroy(&cmd->dma_sx); 1529 return (err); 1530 } 1531 1532 static void 1533 free_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd) 1534 { 1535 1536 mlx5_fwp_free(cmd->cmd_page); 1537 bus_dma_tag_destroy(cmd->dma_tag); 1538 cv_destroy(&cmd->dma_cv); 1539 mtx_destroy(&cmd->dma_mtx); 1540 sx_destroy(&cmd->dma_sx); 1541 } 1542 1543 int mlx5_cmd_init(struct mlx5_core_dev *dev) 1544 { 1545 struct mlx5_cmd *cmd = &dev->cmd; 1546 u32 cmd_h, cmd_l; 1547 u16 cmd_if_rev; 1548 int err; 1549 int i; 1550 1551 memset(cmd, 0, sizeof(*cmd)); 1552 cmd_if_rev = cmdif_rev_get(dev); 1553 if (cmd_if_rev != CMD_IF_REV) { 1554 mlx5_core_err(dev, 1555 "Driver cmdif rev(%d) differs from firmware's(%d)\n", 1556 CMD_IF_REV, cmd_if_rev); 1557 return -EINVAL; 1558 } 1559 1560 err = alloc_cmd_page(dev, cmd); 1561 if (err) 1562 goto err_free_pool; 1563 1564 cmd_l = ioread32be(&dev->iseg->cmdq_addr_l_sz) & 0xff; 1565 cmd->log_sz = cmd_l >> 4 & 0xf; 1566 cmd->log_stride = cmd_l & 0xf; 1567 if (1 << cmd->log_sz > MLX5_MAX_COMMANDS) { 1568 mlx5_core_err(dev, 1569 "firmware reports too many outstanding commands %d\n", 1570 1 << cmd->log_sz); 1571 err = -EINVAL; 1572 goto err_free_page; 1573 } 1574 1575 if (cmd->log_sz + cmd->log_stride > MLX5_ADAPTER_PAGE_SHIFT) { 1576 mlx5_core_err(dev, 1577 "command queue size overflow\n"); 1578 err = -EINVAL; 1579 goto err_free_page; 1580 } 1581 1582 cmd->checksum_disabled = 1; 1583 cmd->max_reg_cmds = (1 << cmd->log_sz) - 1; 1584 cmd->bitmask = (1 << cmd->max_reg_cmds) - 1; 1585 1586 cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16; 1587 if (cmd->cmdif_rev > CMD_IF_REV) { 1588 mlx5_core_err(dev, 1589 "driver does not support command interface version. driver %d, firmware %d\n", 1590 CMD_IF_REV, cmd->cmdif_rev); 1591 err = -ENOTSUPP; 1592 goto err_free_page; 1593 } 1594 1595 spin_lock_init(&cmd->alloc_lock); 1596 spin_lock_init(&cmd->token_lock); 1597 for (i = 0; i < ARRAY_SIZE(cmd->stats); i++) 1598 spin_lock_init(&cmd->stats[i].lock); 1599 1600 sema_init(&cmd->sem, cmd->max_reg_cmds); 1601 sema_init(&cmd->pages_sem, 1); 1602 1603 cmd_h = (u32)((u64)(cmd->dma) >> 32); 1604 cmd_l = (u32)(cmd->dma); 1605 if (cmd_l & 0xfff) { 1606 mlx5_core_err(dev, "invalid command queue address\n"); 1607 err = -ENOMEM; 1608 goto err_free_page; 1609 } 1610 1611 iowrite32be(cmd_h, &dev->iseg->cmdq_addr_h); 1612 iowrite32be(cmd_l, &dev->iseg->cmdq_addr_l_sz); 1613 1614 /* Make sure firmware sees the complete address before we proceed */ 1615 wmb(); 1616 1617 mlx5_core_dbg(dev, "descriptor at dma 0x%llx\n", (unsigned long long)(cmd->dma)); 1618 1619 cmd->mode = MLX5_CMD_MODE_POLLING; 1620 1621 err = create_msg_cache(dev); 1622 if (err) { 1623 mlx5_core_err(dev, "failed to create command cache\n"); 1624 goto err_free_page; 1625 } 1626 return 0; 1627 1628 err_free_page: 1629 free_cmd_page(dev, cmd); 1630 1631 err_free_pool: 1632 return err; 1633 } 1634 EXPORT_SYMBOL(mlx5_cmd_init); 1635 1636 void mlx5_cmd_cleanup(struct mlx5_core_dev *dev) 1637 { 1638 struct mlx5_cmd *cmd = &dev->cmd; 1639 1640 clean_debug_files(dev); 1641 flush_workqueue(dev->priv.health.wq_cmd); 1642 destroy_msg_cache(dev); 1643 free_cmd_page(dev, cmd); 1644 } 1645 EXPORT_SYMBOL(mlx5_cmd_cleanup); 1646 1647 int mlx5_cmd_query_cong_counter(struct mlx5_core_dev *dev, 1648 bool reset, void *out, int out_size) 1649 { 1650 u32 in[MLX5_ST_SZ_DW(query_cong_statistics_in)] = { }; 1651 1652 MLX5_SET(query_cong_statistics_in, in, opcode, 1653 MLX5_CMD_OP_QUERY_CONG_STATISTICS); 1654 MLX5_SET(query_cong_statistics_in, in, clear, reset); 1655 return mlx5_cmd_exec(dev, in, sizeof(in), out, out_size); 1656 } 1657 EXPORT_SYMBOL(mlx5_cmd_query_cong_counter); 1658 1659 int mlx5_cmd_query_cong_params(struct mlx5_core_dev *dev, int cong_point, 1660 void *out, int out_size) 1661 { 1662 u32 in[MLX5_ST_SZ_DW(query_cong_params_in)] = { }; 1663 1664 MLX5_SET(query_cong_params_in, in, opcode, 1665 MLX5_CMD_OP_QUERY_CONG_PARAMS); 1666 MLX5_SET(query_cong_params_in, in, cong_protocol, cong_point); 1667 1668 return mlx5_cmd_exec(dev, in, sizeof(in), out, out_size); 1669 } 1670 EXPORT_SYMBOL(mlx5_cmd_query_cong_params); 1671 1672 int mlx5_cmd_modify_cong_params(struct mlx5_core_dev *dev, 1673 void *in, int in_size) 1674 { 1675 u32 out[MLX5_ST_SZ_DW(modify_cong_params_out)] = { }; 1676 1677 return mlx5_cmd_exec(dev, in, in_size, out, sizeof(out)); 1678 } 1679 EXPORT_SYMBOL(mlx5_cmd_modify_cong_params); 1680 1681 int mlx5_cmd_query_cong_status(struct mlx5_core_dev *dev, int cong_point, 1682 int prio, void *out, int out_size) 1683 { 1684 u32 in[MLX5_ST_SZ_DW(query_cong_status_in)] = { }; 1685 1686 MLX5_SET(query_cong_status_in, in, opcode, 1687 MLX5_CMD_OP_QUERY_CONG_STATUS); 1688 MLX5_SET(query_cong_status_in, in, priority, prio); 1689 MLX5_SET(query_cong_status_in, in, cong_protocol, cong_point); 1690 1691 return mlx5_cmd_exec(dev, in, sizeof(in), out, out_size); 1692 } 1693 EXPORT_SYMBOL(mlx5_cmd_query_cong_status); 1694 1695 int mlx5_cmd_modify_cong_status(struct mlx5_core_dev *dev, 1696 void *in, int in_size) 1697 { 1698 u32 out[MLX5_ST_SZ_DW(modify_cong_status_out)] = { }; 1699 1700 return mlx5_cmd_exec(dev, in, in_size, out, sizeof(out)); 1701 } 1702 EXPORT_SYMBOL(mlx5_cmd_modify_cong_status); 1703