1 /* 2 * Copyright (c) 2013-2016, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/highmem.h> 34 #include <linux/errno.h> 35 #include <linux/pci.h> 36 #include <linux/dma-mapping.h> 37 #include <linux/slab.h> 38 #include <linux/delay.h> 39 #include <linux/random.h> 40 #include <linux/mlx5/driver.h> 41 #include <linux/mlx5/eq.h> 42 #include <linux/debugfs.h> 43 44 #include "mlx5_core.h" 45 #include "lib/eq.h" 46 #include "lib/tout.h" 47 #define CREATE_TRACE_POINTS 48 #include "diag/cmd_tracepoint.h" 49 50 struct mlx5_ifc_mbox_out_bits { 51 u8 status[0x8]; 52 u8 reserved_at_8[0x18]; 53 54 u8 syndrome[0x20]; 55 56 u8 reserved_at_40[0x40]; 57 }; 58 59 struct mlx5_ifc_mbox_in_bits { 60 u8 opcode[0x10]; 61 u8 uid[0x10]; 62 63 u8 reserved_at_20[0x10]; 64 u8 op_mod[0x10]; 65 66 u8 reserved_at_40[0x40]; 67 }; 68 69 enum { 70 CMD_IF_REV = 5, 71 }; 72 73 enum { 74 CMD_MODE_POLLING, 75 CMD_MODE_EVENTS 76 }; 77 78 enum { 79 MLX5_CMD_DELIVERY_STAT_OK = 0x0, 80 MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR = 0x1, 81 MLX5_CMD_DELIVERY_STAT_TOK_ERR = 0x2, 82 MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR = 0x3, 83 MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR = 0x4, 84 MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR = 0x5, 85 MLX5_CMD_DELIVERY_STAT_FW_ERR = 0x6, 86 MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR = 0x7, 87 MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR = 0x8, 88 MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR = 0x9, 89 MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR = 0x10, 90 }; 91 92 static u16 in_to_opcode(void *in) 93 { 94 return MLX5_GET(mbox_in, in, opcode); 95 } 96 97 static u16 in_to_uid(void *in) 98 { 99 return MLX5_GET(mbox_in, in, uid); 100 } 101 102 /* Returns true for opcodes that might be triggered very frequently and throttle 103 * the command interface. Limit their command slots usage. 104 */ 105 static bool mlx5_cmd_is_throttle_opcode(u16 op) 106 { 107 switch (op) { 108 case MLX5_CMD_OP_CREATE_GENERAL_OBJECT: 109 case MLX5_CMD_OP_DESTROY_GENERAL_OBJECT: 110 case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT: 111 case MLX5_CMD_OP_QUERY_GENERAL_OBJECT: 112 case MLX5_CMD_OP_SYNC_CRYPTO: 113 return true; 114 } 115 return false; 116 } 117 118 static struct mlx5_cmd_work_ent * 119 cmd_alloc_ent(struct mlx5_cmd *cmd, struct mlx5_cmd_msg *in, 120 struct mlx5_cmd_msg *out, void *uout, int uout_size, 121 mlx5_cmd_cbk_t cbk, void *context, int page_queue) 122 { 123 gfp_t alloc_flags = cbk ? GFP_ATOMIC : GFP_KERNEL; 124 struct mlx5_cmd_work_ent *ent; 125 126 ent = kzalloc_obj(*ent, alloc_flags); 127 if (!ent) 128 return ERR_PTR(-ENOMEM); 129 130 ent->idx = -EINVAL; 131 ent->in = in; 132 ent->out = out; 133 ent->uout = uout; 134 ent->uout_size = uout_size; 135 ent->callback = cbk; 136 ent->context = context; 137 ent->cmd = cmd; 138 ent->page_queue = page_queue; 139 ent->op = in_to_opcode(in->first.data); 140 refcount_set(&ent->refcnt, 1); 141 142 return ent; 143 } 144 145 static void cmd_free_ent(struct mlx5_cmd_work_ent *ent) 146 { 147 kfree(ent); 148 } 149 150 static u8 alloc_token(struct mlx5_cmd *cmd) 151 { 152 u8 token; 153 154 spin_lock(&cmd->token_lock); 155 cmd->token++; 156 if (cmd->token == 0) 157 cmd->token++; 158 token = cmd->token; 159 spin_unlock(&cmd->token_lock); 160 161 return token; 162 } 163 164 static int cmd_alloc_index(struct mlx5_cmd *cmd, struct mlx5_cmd_work_ent *ent) 165 { 166 unsigned long flags; 167 int ret; 168 169 spin_lock_irqsave(&cmd->alloc_lock, flags); 170 ret = find_first_bit(&cmd->vars.bitmask, cmd->vars.max_reg_cmds); 171 if (ret < cmd->vars.max_reg_cmds) { 172 clear_bit(ret, &cmd->vars.bitmask); 173 ent->idx = ret; 174 cmd->ent_arr[ent->idx] = ent; 175 } 176 spin_unlock_irqrestore(&cmd->alloc_lock, flags); 177 178 return ret < cmd->vars.max_reg_cmds ? ret : -ENOMEM; 179 } 180 181 static void cmd_free_index(struct mlx5_cmd *cmd, int idx) 182 { 183 lockdep_assert_held(&cmd->alloc_lock); 184 cmd->ent_arr[idx] = NULL; 185 set_bit(idx, &cmd->vars.bitmask); 186 } 187 188 static void cmd_ent_get(struct mlx5_cmd_work_ent *ent) 189 { 190 refcount_inc(&ent->refcnt); 191 } 192 193 static void cmd_ent_put(struct mlx5_cmd_work_ent *ent) 194 { 195 struct mlx5_cmd *cmd = ent->cmd; 196 unsigned long flags; 197 198 spin_lock_irqsave(&cmd->alloc_lock, flags); 199 if (!refcount_dec_and_test(&ent->refcnt)) { 200 spin_unlock_irqrestore(&cmd->alloc_lock, flags); 201 return; 202 } 203 204 if (ent->idx >= 0) { 205 cmd_free_index(cmd, ent->idx); 206 up(ent->page_queue ? &cmd->vars.pages_sem : &cmd->vars.sem); 207 } 208 spin_unlock_irqrestore(&cmd->alloc_lock, flags); 209 210 cmd_free_ent(ent); 211 } 212 213 static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx) 214 { 215 return cmd->cmd_buf + (idx << cmd->vars.log_stride); 216 } 217 218 static int mlx5_calc_cmd_blocks(struct mlx5_cmd_msg *msg) 219 { 220 int size = msg->len; 221 int blen = size - min_t(int, sizeof(msg->first.data), size); 222 223 return DIV_ROUND_UP(blen, MLX5_CMD_DATA_BLOCK_SIZE); 224 } 225 226 static u8 xor8_buf(void *buf, size_t offset, int len) 227 { 228 u8 *ptr = buf; 229 u8 sum = 0; 230 int i; 231 int end = len + offset; 232 233 for (i = offset; i < end; i++) 234 sum ^= ptr[i]; 235 236 return sum; 237 } 238 239 static int verify_block_sig(struct mlx5_cmd_prot_block *block) 240 { 241 size_t rsvd0_off = offsetof(struct mlx5_cmd_prot_block, rsvd0); 242 int xor_len = sizeof(*block) - sizeof(block->data) - 1; 243 244 if (xor8_buf(block, rsvd0_off, xor_len) != 0xff) 245 return -EHWPOISON; 246 247 if (xor8_buf(block, 0, sizeof(*block)) != 0xff) 248 return -EHWPOISON; 249 250 return 0; 251 } 252 253 static void calc_block_sig(struct mlx5_cmd_prot_block *block) 254 { 255 int ctrl_xor_len = sizeof(*block) - sizeof(block->data) - 2; 256 size_t rsvd0_off = offsetof(struct mlx5_cmd_prot_block, rsvd0); 257 258 block->ctrl_sig = ~xor8_buf(block, rsvd0_off, ctrl_xor_len); 259 block->sig = ~xor8_buf(block, 0, sizeof(*block) - 1); 260 } 261 262 static void calc_chain_sig(struct mlx5_cmd_msg *msg) 263 { 264 struct mlx5_cmd_mailbox *next = msg->next; 265 int n = mlx5_calc_cmd_blocks(msg); 266 int i = 0; 267 268 for (i = 0; i < n && next; i++) { 269 calc_block_sig(next->buf); 270 next = next->next; 271 } 272 } 273 274 static void set_signature(struct mlx5_cmd_work_ent *ent, int csum) 275 { 276 ent->lay->sig = ~xor8_buf(ent->lay, 0, sizeof(*ent->lay)); 277 if (csum) { 278 calc_chain_sig(ent->in); 279 calc_chain_sig(ent->out); 280 } 281 } 282 283 static void poll_timeout(struct mlx5_cmd_work_ent *ent) 284 { 285 struct mlx5_core_dev *dev = container_of(ent->cmd, struct mlx5_core_dev, cmd); 286 u64 cmd_to_ms = mlx5_tout_ms(dev, CMD); 287 unsigned long poll_end; 288 u8 own; 289 290 poll_end = jiffies + msecs_to_jiffies(cmd_to_ms + 1000); 291 292 do { 293 own = READ_ONCE(ent->lay->status_own); 294 if (!(own & CMD_OWNER_HW)) { 295 ent->ret = 0; 296 return; 297 } 298 cond_resched(); 299 if (mlx5_cmd_is_down(dev)) { 300 ent->ret = -ENXIO; 301 return; 302 } 303 } while (time_before(jiffies, poll_end)); 304 305 ent->ret = -ETIMEDOUT; 306 } 307 308 static int verify_signature(struct mlx5_cmd_work_ent *ent) 309 { 310 struct mlx5_cmd_mailbox *next = ent->out->next; 311 int n = mlx5_calc_cmd_blocks(ent->out); 312 int err; 313 u8 sig; 314 int i = 0; 315 316 sig = xor8_buf(ent->lay, 0, sizeof(*ent->lay)); 317 if (sig != 0xff) 318 return -EHWPOISON; 319 320 for (i = 0; i < n && next; i++) { 321 err = verify_block_sig(next->buf); 322 if (err) 323 return -EHWPOISON; 324 325 next = next->next; 326 } 327 328 return 0; 329 } 330 331 static void dump_buf(void *buf, int size, int data_only, int offset, int idx) 332 { 333 __be32 *p = buf; 334 int i; 335 336 for (i = 0; i < size; i += 16) { 337 pr_debug("cmd[%d]: %03x: %08x %08x %08x %08x\n", idx, offset, 338 be32_to_cpu(p[0]), be32_to_cpu(p[1]), 339 be32_to_cpu(p[2]), be32_to_cpu(p[3])); 340 p += 4; 341 offset += 16; 342 } 343 if (!data_only) 344 pr_debug("\n"); 345 } 346 347 static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, 348 u32 *synd, u8 *status) 349 { 350 *synd = 0; 351 *status = 0; 352 353 switch (op) { 354 case MLX5_CMD_OP_TEARDOWN_HCA: 355 case MLX5_CMD_OP_DISABLE_HCA: 356 case MLX5_CMD_OP_MANAGE_PAGES: 357 case MLX5_CMD_OP_DESTROY_MKEY: 358 case MLX5_CMD_OP_DESTROY_EQ: 359 case MLX5_CMD_OP_DESTROY_CQ: 360 case MLX5_CMD_OP_DESTROY_QP: 361 case MLX5_CMD_OP_DESTROY_PSV: 362 case MLX5_CMD_OP_DESTROY_SRQ: 363 case MLX5_CMD_OP_DESTROY_XRC_SRQ: 364 case MLX5_CMD_OP_DESTROY_XRQ: 365 case MLX5_CMD_OP_DESTROY_DCT: 366 case MLX5_CMD_OP_DEALLOC_Q_COUNTER: 367 case MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT: 368 case MLX5_CMD_OP_DESTROY_QOS_PARA_VPORT: 369 case MLX5_CMD_OP_DEALLOC_PD: 370 case MLX5_CMD_OP_DEALLOC_UAR: 371 case MLX5_CMD_OP_DETACH_FROM_MCG: 372 case MLX5_CMD_OP_DEALLOC_XRCD: 373 case MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN: 374 case MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT: 375 case MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY: 376 case MLX5_CMD_OP_DESTROY_LAG: 377 case MLX5_CMD_OP_DESTROY_VPORT_LAG: 378 case MLX5_CMD_OP_DESTROY_TIR: 379 case MLX5_CMD_OP_DESTROY_SQ: 380 case MLX5_CMD_OP_DESTROY_RQ: 381 case MLX5_CMD_OP_DESTROY_RMP: 382 case MLX5_CMD_OP_DESTROY_TIS: 383 case MLX5_CMD_OP_DESTROY_RQT: 384 case MLX5_CMD_OP_DESTROY_FLOW_TABLE: 385 case MLX5_CMD_OP_DESTROY_FLOW_GROUP: 386 case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY: 387 case MLX5_CMD_OP_DEALLOC_FLOW_COUNTER: 388 case MLX5_CMD_OP_2ERR_QP: 389 case MLX5_CMD_OP_2RST_QP: 390 case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT: 391 case MLX5_CMD_OP_MODIFY_FLOW_TABLE: 392 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY: 393 case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT: 394 case MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT: 395 case MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT: 396 case MLX5_CMD_OP_FPGA_DESTROY_QP: 397 case MLX5_CMD_OP_DESTROY_GENERAL_OBJECT: 398 case MLX5_CMD_OP_DEALLOC_MEMIC: 399 case MLX5_CMD_OP_PAGE_FAULT_RESUME: 400 case MLX5_CMD_OP_QUERY_ESW_FUNCTIONS: 401 case MLX5_CMD_OP_DEALLOC_SF: 402 case MLX5_CMD_OP_DESTROY_UCTX: 403 case MLX5_CMD_OP_DESTROY_UMEM: 404 case MLX5_CMD_OP_MODIFY_RQT: 405 return MLX5_CMD_STAT_OK; 406 407 case MLX5_CMD_OP_QUERY_HCA_CAP: 408 case MLX5_CMD_OP_QUERY_ADAPTER: 409 case MLX5_CMD_OP_INIT_HCA: 410 case MLX5_CMD_OP_ENABLE_HCA: 411 case MLX5_CMD_OP_QUERY_PAGES: 412 case MLX5_CMD_OP_SET_HCA_CAP: 413 case MLX5_CMD_OP_QUERY_ISSI: 414 case MLX5_CMD_OP_SET_ISSI: 415 case MLX5_CMD_OP_CREATE_MKEY: 416 case MLX5_CMD_OP_QUERY_MKEY: 417 case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS: 418 case MLX5_CMD_OP_CREATE_EQ: 419 case MLX5_CMD_OP_QUERY_EQ: 420 case MLX5_CMD_OP_GEN_EQE: 421 case MLX5_CMD_OP_CREATE_CQ: 422 case MLX5_CMD_OP_QUERY_CQ: 423 case MLX5_CMD_OP_MODIFY_CQ: 424 case MLX5_CMD_OP_CREATE_QP: 425 case MLX5_CMD_OP_RST2INIT_QP: 426 case MLX5_CMD_OP_INIT2RTR_QP: 427 case MLX5_CMD_OP_RTR2RTS_QP: 428 case MLX5_CMD_OP_RTS2RTS_QP: 429 case MLX5_CMD_OP_SQERR2RTS_QP: 430 case MLX5_CMD_OP_QUERY_QP: 431 case MLX5_CMD_OP_SQD_RTS_QP: 432 case MLX5_CMD_OP_INIT2INIT_QP: 433 case MLX5_CMD_OP_CREATE_PSV: 434 case MLX5_CMD_OP_CREATE_SRQ: 435 case MLX5_CMD_OP_QUERY_SRQ: 436 case MLX5_CMD_OP_ARM_RQ: 437 case MLX5_CMD_OP_CREATE_XRC_SRQ: 438 case MLX5_CMD_OP_QUERY_XRC_SRQ: 439 case MLX5_CMD_OP_ARM_XRC_SRQ: 440 case MLX5_CMD_OP_CREATE_XRQ: 441 case MLX5_CMD_OP_QUERY_XRQ: 442 case MLX5_CMD_OP_ARM_XRQ: 443 case MLX5_CMD_OP_CREATE_DCT: 444 case MLX5_CMD_OP_DRAIN_DCT: 445 case MLX5_CMD_OP_QUERY_DCT: 446 case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION: 447 case MLX5_CMD_OP_QUERY_VPORT_STATE: 448 case MLX5_CMD_OP_MODIFY_VPORT_STATE: 449 case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT: 450 case MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT: 451 case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT: 452 case MLX5_CMD_OP_QUERY_ROCE_ADDRESS: 453 case MLX5_CMD_OP_SET_ROCE_ADDRESS: 454 case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT: 455 case MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT: 456 case MLX5_CMD_OP_QUERY_HCA_VPORT_GID: 457 case MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY: 458 case MLX5_CMD_OP_QUERY_VNIC_ENV: 459 case MLX5_CMD_OP_QUERY_VPORT_COUNTER: 460 case MLX5_CMD_OP_ALLOC_Q_COUNTER: 461 case MLX5_CMD_OP_QUERY_Q_COUNTER: 462 case MLX5_CMD_OP_SET_MONITOR_COUNTER: 463 case MLX5_CMD_OP_ARM_MONITOR_COUNTER: 464 case MLX5_CMD_OP_SET_PP_RATE_LIMIT: 465 case MLX5_CMD_OP_QUERY_RATE_LIMIT: 466 case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT: 467 case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT: 468 case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT: 469 case MLX5_CMD_OP_CREATE_QOS_PARA_VPORT: 470 case MLX5_CMD_OP_ALLOC_PD: 471 case MLX5_CMD_OP_ALLOC_UAR: 472 case MLX5_CMD_OP_CONFIG_INT_MODERATION: 473 case MLX5_CMD_OP_ACCESS_REG: 474 case MLX5_CMD_OP_ATTACH_TO_MCG: 475 case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG: 476 case MLX5_CMD_OP_MAD_IFC: 477 case MLX5_CMD_OP_QUERY_MAD_DEMUX: 478 case MLX5_CMD_OP_SET_MAD_DEMUX: 479 case MLX5_CMD_OP_NOP: 480 case MLX5_CMD_OP_ALLOC_XRCD: 481 case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN: 482 case MLX5_CMD_OP_QUERY_CONG_STATUS: 483 case MLX5_CMD_OP_MODIFY_CONG_STATUS: 484 case MLX5_CMD_OP_QUERY_CONG_PARAMS: 485 case MLX5_CMD_OP_MODIFY_CONG_PARAMS: 486 case MLX5_CMD_OP_QUERY_CONG_STATISTICS: 487 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT: 488 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY: 489 case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY: 490 case MLX5_CMD_OP_CREATE_LAG: 491 case MLX5_CMD_OP_MODIFY_LAG: 492 case MLX5_CMD_OP_QUERY_LAG: 493 case MLX5_CMD_OP_CREATE_VPORT_LAG: 494 case MLX5_CMD_OP_CREATE_TIR: 495 case MLX5_CMD_OP_MODIFY_TIR: 496 case MLX5_CMD_OP_QUERY_TIR: 497 case MLX5_CMD_OP_CREATE_SQ: 498 case MLX5_CMD_OP_MODIFY_SQ: 499 case MLX5_CMD_OP_QUERY_SQ: 500 case MLX5_CMD_OP_CREATE_RQ: 501 case MLX5_CMD_OP_MODIFY_RQ: 502 case MLX5_CMD_OP_QUERY_RQ: 503 case MLX5_CMD_OP_CREATE_RMP: 504 case MLX5_CMD_OP_MODIFY_RMP: 505 case MLX5_CMD_OP_QUERY_RMP: 506 case MLX5_CMD_OP_CREATE_TIS: 507 case MLX5_CMD_OP_MODIFY_TIS: 508 case MLX5_CMD_OP_QUERY_TIS: 509 case MLX5_CMD_OP_CREATE_RQT: 510 case MLX5_CMD_OP_QUERY_RQT: 511 512 case MLX5_CMD_OP_CREATE_FLOW_TABLE: 513 case MLX5_CMD_OP_QUERY_FLOW_TABLE: 514 case MLX5_CMD_OP_CREATE_FLOW_GROUP: 515 case MLX5_CMD_OP_QUERY_FLOW_GROUP: 516 case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY: 517 case MLX5_CMD_OP_ALLOC_FLOW_COUNTER: 518 case MLX5_CMD_OP_QUERY_FLOW_COUNTER: 519 case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT: 520 case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT: 521 case MLX5_CMD_OP_FPGA_CREATE_QP: 522 case MLX5_CMD_OP_FPGA_MODIFY_QP: 523 case MLX5_CMD_OP_FPGA_QUERY_QP: 524 case MLX5_CMD_OP_FPGA_QUERY_QP_COUNTERS: 525 case MLX5_CMD_OP_CREATE_GENERAL_OBJECT: 526 case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT: 527 case MLX5_CMD_OP_QUERY_GENERAL_OBJECT: 528 case MLX5_CMD_OP_CREATE_UCTX: 529 case MLX5_CMD_OP_CREATE_UMEM: 530 case MLX5_CMD_OP_ALLOC_MEMIC: 531 case MLX5_CMD_OP_MODIFY_XRQ: 532 case MLX5_CMD_OP_RELEASE_XRQ_ERROR: 533 case MLX5_CMD_OP_QUERY_VHCA_STATE: 534 case MLX5_CMD_OP_MODIFY_VHCA_STATE: 535 case MLX5_CMD_OP_ALLOC_SF: 536 case MLX5_CMD_OP_SUSPEND_VHCA: 537 case MLX5_CMD_OP_RESUME_VHCA: 538 case MLX5_CMD_OP_QUERY_VHCA_MIGRATION_STATE: 539 case MLX5_CMD_OP_SAVE_VHCA_STATE: 540 case MLX5_CMD_OP_LOAD_VHCA_STATE: 541 case MLX5_CMD_OP_SYNC_CRYPTO: 542 case MLX5_CMD_OP_ALLOW_OTHER_VHCA_ACCESS: 543 *status = MLX5_DRIVER_STATUS_ABORTED; 544 *synd = MLX5_DRIVER_SYND; 545 return -ENOLINK; 546 default: 547 mlx5_core_err(dev, "Unknown FW command (%d)\n", op); 548 return -EINVAL; 549 } 550 } 551 552 const char *mlx5_command_str(int command) 553 { 554 #define MLX5_COMMAND_STR_CASE(__cmd) case MLX5_CMD_OP_ ## __cmd: return #__cmd 555 556 switch (command) { 557 MLX5_COMMAND_STR_CASE(QUERY_HCA_CAP); 558 MLX5_COMMAND_STR_CASE(QUERY_ADAPTER); 559 MLX5_COMMAND_STR_CASE(INIT_HCA); 560 MLX5_COMMAND_STR_CASE(TEARDOWN_HCA); 561 MLX5_COMMAND_STR_CASE(ENABLE_HCA); 562 MLX5_COMMAND_STR_CASE(DISABLE_HCA); 563 MLX5_COMMAND_STR_CASE(QUERY_PAGES); 564 MLX5_COMMAND_STR_CASE(MANAGE_PAGES); 565 MLX5_COMMAND_STR_CASE(SET_HCA_CAP); 566 MLX5_COMMAND_STR_CASE(QUERY_ISSI); 567 MLX5_COMMAND_STR_CASE(SET_ISSI); 568 MLX5_COMMAND_STR_CASE(SET_DRIVER_VERSION); 569 MLX5_COMMAND_STR_CASE(CREATE_MKEY); 570 MLX5_COMMAND_STR_CASE(QUERY_MKEY); 571 MLX5_COMMAND_STR_CASE(DESTROY_MKEY); 572 MLX5_COMMAND_STR_CASE(QUERY_SPECIAL_CONTEXTS); 573 MLX5_COMMAND_STR_CASE(PAGE_FAULT_RESUME); 574 MLX5_COMMAND_STR_CASE(CREATE_EQ); 575 MLX5_COMMAND_STR_CASE(DESTROY_EQ); 576 MLX5_COMMAND_STR_CASE(QUERY_EQ); 577 MLX5_COMMAND_STR_CASE(GEN_EQE); 578 MLX5_COMMAND_STR_CASE(CREATE_CQ); 579 MLX5_COMMAND_STR_CASE(DESTROY_CQ); 580 MLX5_COMMAND_STR_CASE(QUERY_CQ); 581 MLX5_COMMAND_STR_CASE(MODIFY_CQ); 582 MLX5_COMMAND_STR_CASE(CREATE_QP); 583 MLX5_COMMAND_STR_CASE(DESTROY_QP); 584 MLX5_COMMAND_STR_CASE(RST2INIT_QP); 585 MLX5_COMMAND_STR_CASE(INIT2RTR_QP); 586 MLX5_COMMAND_STR_CASE(RTR2RTS_QP); 587 MLX5_COMMAND_STR_CASE(RTS2RTS_QP); 588 MLX5_COMMAND_STR_CASE(SQERR2RTS_QP); 589 MLX5_COMMAND_STR_CASE(2ERR_QP); 590 MLX5_COMMAND_STR_CASE(2RST_QP); 591 MLX5_COMMAND_STR_CASE(QUERY_QP); 592 MLX5_COMMAND_STR_CASE(SQD_RTS_QP); 593 MLX5_COMMAND_STR_CASE(INIT2INIT_QP); 594 MLX5_COMMAND_STR_CASE(CREATE_PSV); 595 MLX5_COMMAND_STR_CASE(DESTROY_PSV); 596 MLX5_COMMAND_STR_CASE(CREATE_SRQ); 597 MLX5_COMMAND_STR_CASE(DESTROY_SRQ); 598 MLX5_COMMAND_STR_CASE(QUERY_SRQ); 599 MLX5_COMMAND_STR_CASE(ARM_RQ); 600 MLX5_COMMAND_STR_CASE(CREATE_XRC_SRQ); 601 MLX5_COMMAND_STR_CASE(DESTROY_XRC_SRQ); 602 MLX5_COMMAND_STR_CASE(QUERY_XRC_SRQ); 603 MLX5_COMMAND_STR_CASE(ARM_XRC_SRQ); 604 MLX5_COMMAND_STR_CASE(CREATE_DCT); 605 MLX5_COMMAND_STR_CASE(DESTROY_DCT); 606 MLX5_COMMAND_STR_CASE(DRAIN_DCT); 607 MLX5_COMMAND_STR_CASE(QUERY_DCT); 608 MLX5_COMMAND_STR_CASE(ARM_DCT_FOR_KEY_VIOLATION); 609 MLX5_COMMAND_STR_CASE(QUERY_VPORT_STATE); 610 MLX5_COMMAND_STR_CASE(MODIFY_VPORT_STATE); 611 MLX5_COMMAND_STR_CASE(QUERY_ESW_VPORT_CONTEXT); 612 MLX5_COMMAND_STR_CASE(MODIFY_ESW_VPORT_CONTEXT); 613 MLX5_COMMAND_STR_CASE(QUERY_NIC_VPORT_CONTEXT); 614 MLX5_COMMAND_STR_CASE(MODIFY_NIC_VPORT_CONTEXT); 615 MLX5_COMMAND_STR_CASE(QUERY_ROCE_ADDRESS); 616 MLX5_COMMAND_STR_CASE(SET_ROCE_ADDRESS); 617 MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_CONTEXT); 618 MLX5_COMMAND_STR_CASE(MODIFY_HCA_VPORT_CONTEXT); 619 MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_GID); 620 MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_PKEY); 621 MLX5_COMMAND_STR_CASE(QUERY_VNIC_ENV); 622 MLX5_COMMAND_STR_CASE(QUERY_VPORT_COUNTER); 623 MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER); 624 MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER); 625 MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER); 626 MLX5_COMMAND_STR_CASE(SET_MONITOR_COUNTER); 627 MLX5_COMMAND_STR_CASE(ARM_MONITOR_COUNTER); 628 MLX5_COMMAND_STR_CASE(SET_PP_RATE_LIMIT); 629 MLX5_COMMAND_STR_CASE(QUERY_RATE_LIMIT); 630 MLX5_COMMAND_STR_CASE(CREATE_SCHEDULING_ELEMENT); 631 MLX5_COMMAND_STR_CASE(DESTROY_SCHEDULING_ELEMENT); 632 MLX5_COMMAND_STR_CASE(QUERY_SCHEDULING_ELEMENT); 633 MLX5_COMMAND_STR_CASE(MODIFY_SCHEDULING_ELEMENT); 634 MLX5_COMMAND_STR_CASE(CREATE_QOS_PARA_VPORT); 635 MLX5_COMMAND_STR_CASE(DESTROY_QOS_PARA_VPORT); 636 MLX5_COMMAND_STR_CASE(ALLOC_PD); 637 MLX5_COMMAND_STR_CASE(DEALLOC_PD); 638 MLX5_COMMAND_STR_CASE(ALLOC_UAR); 639 MLX5_COMMAND_STR_CASE(DEALLOC_UAR); 640 MLX5_COMMAND_STR_CASE(CONFIG_INT_MODERATION); 641 MLX5_COMMAND_STR_CASE(ACCESS_REG); 642 MLX5_COMMAND_STR_CASE(ATTACH_TO_MCG); 643 MLX5_COMMAND_STR_CASE(DETACH_FROM_MCG); 644 MLX5_COMMAND_STR_CASE(GET_DROPPED_PACKET_LOG); 645 MLX5_COMMAND_STR_CASE(MAD_IFC); 646 MLX5_COMMAND_STR_CASE(QUERY_MAD_DEMUX); 647 MLX5_COMMAND_STR_CASE(SET_MAD_DEMUX); 648 MLX5_COMMAND_STR_CASE(NOP); 649 MLX5_COMMAND_STR_CASE(ALLOC_XRCD); 650 MLX5_COMMAND_STR_CASE(DEALLOC_XRCD); 651 MLX5_COMMAND_STR_CASE(ALLOC_TRANSPORT_DOMAIN); 652 MLX5_COMMAND_STR_CASE(DEALLOC_TRANSPORT_DOMAIN); 653 MLX5_COMMAND_STR_CASE(QUERY_CONG_STATUS); 654 MLX5_COMMAND_STR_CASE(MODIFY_CONG_STATUS); 655 MLX5_COMMAND_STR_CASE(QUERY_CONG_PARAMS); 656 MLX5_COMMAND_STR_CASE(MODIFY_CONG_PARAMS); 657 MLX5_COMMAND_STR_CASE(QUERY_CONG_STATISTICS); 658 MLX5_COMMAND_STR_CASE(ADD_VXLAN_UDP_DPORT); 659 MLX5_COMMAND_STR_CASE(DELETE_VXLAN_UDP_DPORT); 660 MLX5_COMMAND_STR_CASE(SET_L2_TABLE_ENTRY); 661 MLX5_COMMAND_STR_CASE(QUERY_L2_TABLE_ENTRY); 662 MLX5_COMMAND_STR_CASE(DELETE_L2_TABLE_ENTRY); 663 MLX5_COMMAND_STR_CASE(SET_WOL_ROL); 664 MLX5_COMMAND_STR_CASE(QUERY_WOL_ROL); 665 MLX5_COMMAND_STR_CASE(CREATE_LAG); 666 MLX5_COMMAND_STR_CASE(MODIFY_LAG); 667 MLX5_COMMAND_STR_CASE(QUERY_LAG); 668 MLX5_COMMAND_STR_CASE(DESTROY_LAG); 669 MLX5_COMMAND_STR_CASE(CREATE_VPORT_LAG); 670 MLX5_COMMAND_STR_CASE(DESTROY_VPORT_LAG); 671 MLX5_COMMAND_STR_CASE(CREATE_TIR); 672 MLX5_COMMAND_STR_CASE(MODIFY_TIR); 673 MLX5_COMMAND_STR_CASE(DESTROY_TIR); 674 MLX5_COMMAND_STR_CASE(QUERY_TIR); 675 MLX5_COMMAND_STR_CASE(CREATE_SQ); 676 MLX5_COMMAND_STR_CASE(MODIFY_SQ); 677 MLX5_COMMAND_STR_CASE(DESTROY_SQ); 678 MLX5_COMMAND_STR_CASE(QUERY_SQ); 679 MLX5_COMMAND_STR_CASE(CREATE_RQ); 680 MLX5_COMMAND_STR_CASE(MODIFY_RQ); 681 MLX5_COMMAND_STR_CASE(DESTROY_RQ); 682 MLX5_COMMAND_STR_CASE(QUERY_RQ); 683 MLX5_COMMAND_STR_CASE(CREATE_RMP); 684 MLX5_COMMAND_STR_CASE(MODIFY_RMP); 685 MLX5_COMMAND_STR_CASE(DESTROY_RMP); 686 MLX5_COMMAND_STR_CASE(QUERY_RMP); 687 MLX5_COMMAND_STR_CASE(CREATE_TIS); 688 MLX5_COMMAND_STR_CASE(MODIFY_TIS); 689 MLX5_COMMAND_STR_CASE(DESTROY_TIS); 690 MLX5_COMMAND_STR_CASE(QUERY_TIS); 691 MLX5_COMMAND_STR_CASE(CREATE_RQT); 692 MLX5_COMMAND_STR_CASE(MODIFY_RQT); 693 MLX5_COMMAND_STR_CASE(DESTROY_RQT); 694 MLX5_COMMAND_STR_CASE(QUERY_RQT); 695 MLX5_COMMAND_STR_CASE(SET_FLOW_TABLE_ROOT); 696 MLX5_COMMAND_STR_CASE(CREATE_FLOW_TABLE); 697 MLX5_COMMAND_STR_CASE(DESTROY_FLOW_TABLE); 698 MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE); 699 MLX5_COMMAND_STR_CASE(CREATE_FLOW_GROUP); 700 MLX5_COMMAND_STR_CASE(DESTROY_FLOW_GROUP); 701 MLX5_COMMAND_STR_CASE(QUERY_FLOW_GROUP); 702 MLX5_COMMAND_STR_CASE(SET_FLOW_TABLE_ENTRY); 703 MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE_ENTRY); 704 MLX5_COMMAND_STR_CASE(DELETE_FLOW_TABLE_ENTRY); 705 MLX5_COMMAND_STR_CASE(ALLOC_FLOW_COUNTER); 706 MLX5_COMMAND_STR_CASE(DEALLOC_FLOW_COUNTER); 707 MLX5_COMMAND_STR_CASE(QUERY_FLOW_COUNTER); 708 MLX5_COMMAND_STR_CASE(MODIFY_FLOW_TABLE); 709 MLX5_COMMAND_STR_CASE(ALLOC_PACKET_REFORMAT_CONTEXT); 710 MLX5_COMMAND_STR_CASE(DEALLOC_PACKET_REFORMAT_CONTEXT); 711 MLX5_COMMAND_STR_CASE(ALLOC_MODIFY_HEADER_CONTEXT); 712 MLX5_COMMAND_STR_CASE(DEALLOC_MODIFY_HEADER_CONTEXT); 713 MLX5_COMMAND_STR_CASE(FPGA_CREATE_QP); 714 MLX5_COMMAND_STR_CASE(FPGA_MODIFY_QP); 715 MLX5_COMMAND_STR_CASE(FPGA_QUERY_QP); 716 MLX5_COMMAND_STR_CASE(FPGA_QUERY_QP_COUNTERS); 717 MLX5_COMMAND_STR_CASE(FPGA_DESTROY_QP); 718 MLX5_COMMAND_STR_CASE(CREATE_XRQ); 719 MLX5_COMMAND_STR_CASE(DESTROY_XRQ); 720 MLX5_COMMAND_STR_CASE(QUERY_XRQ); 721 MLX5_COMMAND_STR_CASE(ARM_XRQ); 722 MLX5_COMMAND_STR_CASE(CREATE_GENERAL_OBJECT); 723 MLX5_COMMAND_STR_CASE(DESTROY_GENERAL_OBJECT); 724 MLX5_COMMAND_STR_CASE(MODIFY_GENERAL_OBJECT); 725 MLX5_COMMAND_STR_CASE(QUERY_GENERAL_OBJECT); 726 MLX5_COMMAND_STR_CASE(QUERY_MODIFY_HEADER_CONTEXT); 727 MLX5_COMMAND_STR_CASE(ALLOC_MEMIC); 728 MLX5_COMMAND_STR_CASE(DEALLOC_MEMIC); 729 MLX5_COMMAND_STR_CASE(QUERY_ESW_FUNCTIONS); 730 MLX5_COMMAND_STR_CASE(CREATE_UCTX); 731 MLX5_COMMAND_STR_CASE(DESTROY_UCTX); 732 MLX5_COMMAND_STR_CASE(CREATE_UMEM); 733 MLX5_COMMAND_STR_CASE(DESTROY_UMEM); 734 MLX5_COMMAND_STR_CASE(RELEASE_XRQ_ERROR); 735 MLX5_COMMAND_STR_CASE(MODIFY_XRQ); 736 MLX5_COMMAND_STR_CASE(QUERY_VHCA_STATE); 737 MLX5_COMMAND_STR_CASE(MODIFY_VHCA_STATE); 738 MLX5_COMMAND_STR_CASE(ALLOC_SF); 739 MLX5_COMMAND_STR_CASE(DEALLOC_SF); 740 MLX5_COMMAND_STR_CASE(SUSPEND_VHCA); 741 MLX5_COMMAND_STR_CASE(RESUME_VHCA); 742 MLX5_COMMAND_STR_CASE(QUERY_VHCA_MIGRATION_STATE); 743 MLX5_COMMAND_STR_CASE(SAVE_VHCA_STATE); 744 MLX5_COMMAND_STR_CASE(LOAD_VHCA_STATE); 745 MLX5_COMMAND_STR_CASE(SYNC_CRYPTO); 746 MLX5_COMMAND_STR_CASE(ALLOW_OTHER_VHCA_ACCESS); 747 default: return "unknown command opcode"; 748 } 749 } 750 751 static const char *cmd_status_str(u8 status) 752 { 753 switch (status) { 754 case MLX5_CMD_STAT_OK: 755 return "OK"; 756 case MLX5_CMD_STAT_INT_ERR: 757 return "internal error"; 758 case MLX5_CMD_STAT_BAD_OP_ERR: 759 return "bad operation"; 760 case MLX5_CMD_STAT_BAD_PARAM_ERR: 761 return "bad parameter"; 762 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: 763 return "bad system state"; 764 case MLX5_CMD_STAT_BAD_RES_ERR: 765 return "bad resource"; 766 case MLX5_CMD_STAT_RES_BUSY: 767 return "resource busy"; 768 case MLX5_CMD_STAT_NOT_READY: 769 return "FW not ready"; 770 case MLX5_CMD_STAT_LIM_ERR: 771 return "limits exceeded"; 772 case MLX5_CMD_STAT_BAD_RES_STATE_ERR: 773 return "bad resource state"; 774 case MLX5_CMD_STAT_IX_ERR: 775 return "bad index"; 776 case MLX5_CMD_STAT_NO_RES_ERR: 777 return "no resources"; 778 case MLX5_CMD_STAT_BAD_INP_LEN_ERR: 779 return "bad input length"; 780 case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: 781 return "bad output length"; 782 case MLX5_CMD_STAT_BAD_QP_STATE_ERR: 783 return "bad QP state"; 784 case MLX5_CMD_STAT_BAD_PKT_ERR: 785 return "bad packet (discarded)"; 786 case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: 787 return "bad size too many outstanding CQEs"; 788 default: 789 return "unknown status"; 790 } 791 } 792 793 static int cmd_status_to_err(u8 status) 794 { 795 switch (status) { 796 case MLX5_CMD_STAT_OK: return 0; 797 case MLX5_CMD_STAT_INT_ERR: return -EIO; 798 case MLX5_CMD_STAT_BAD_OP_ERR: return -EINVAL; 799 case MLX5_CMD_STAT_BAD_PARAM_ERR: return -EINVAL; 800 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO; 801 case MLX5_CMD_STAT_BAD_RES_ERR: return -EINVAL; 802 case MLX5_CMD_STAT_RES_BUSY: return -EBUSY; 803 case MLX5_CMD_STAT_NOT_READY: return -EAGAIN; 804 case MLX5_CMD_STAT_LIM_ERR: return -ENOMEM; 805 case MLX5_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL; 806 case MLX5_CMD_STAT_IX_ERR: return -EINVAL; 807 case MLX5_CMD_STAT_NO_RES_ERR: return -EAGAIN; 808 case MLX5_CMD_STAT_BAD_INP_LEN_ERR: return -EIO; 809 case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: return -EIO; 810 case MLX5_CMD_STAT_BAD_QP_STATE_ERR: return -EINVAL; 811 case MLX5_CMD_STAT_BAD_PKT_ERR: return -EINVAL; 812 case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: return -EINVAL; 813 default: return -EIO; 814 } 815 } 816 817 void mlx5_cmd_out_err(struct mlx5_core_dev *dev, u16 opcode, u16 op_mod, void *out) 818 { 819 u32 syndrome = MLX5_GET(mbox_out, out, syndrome); 820 u8 status = MLX5_GET(mbox_out, out, status); 821 822 mlx5_core_err_rl(dev, 823 "%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x), err(%d)\n", 824 mlx5_command_str(opcode), opcode, op_mod, 825 cmd_status_str(status), status, syndrome, cmd_status_to_err(status)); 826 } 827 EXPORT_SYMBOL(mlx5_cmd_out_err); 828 829 static void cmd_status_print(struct mlx5_core_dev *dev, void *in, void *out) 830 { 831 u16 opcode, op_mod; 832 u8 status; 833 u16 uid; 834 835 opcode = in_to_opcode(in); 836 op_mod = MLX5_GET(mbox_in, in, op_mod); 837 uid = in_to_uid(in); 838 status = MLX5_GET(mbox_out, out, status); 839 840 if (!uid && opcode != MLX5_CMD_OP_DESTROY_MKEY && 841 opcode != MLX5_CMD_OP_CREATE_UCTX && status != MLX5_CMD_STAT_NOT_READY) 842 mlx5_cmd_out_err(dev, opcode, op_mod, out); 843 } 844 845 int mlx5_cmd_check(struct mlx5_core_dev *dev, int err, void *in, void *out) 846 { 847 /* aborted due to PCI error or via reset flow mlx5_cmd_trigger_completions() */ 848 if (err == -ENXIO) { 849 u16 opcode = in_to_opcode(in); 850 u32 syndrome; 851 u8 status; 852 853 /* PCI Error, emulate command return status, for smooth reset */ 854 err = mlx5_internal_err_ret_value(dev, opcode, &syndrome, &status); 855 MLX5_SET(mbox_out, out, status, status); 856 MLX5_SET(mbox_out, out, syndrome, syndrome); 857 if (!err) 858 return 0; 859 } 860 861 /* driver or FW delivery error */ 862 if (err != -EREMOTEIO && err) 863 return err; 864 865 /* check outbox status */ 866 err = cmd_status_to_err(MLX5_GET(mbox_out, out, status)); 867 if (err) 868 cmd_status_print(dev, in, out); 869 870 return err; 871 } 872 EXPORT_SYMBOL(mlx5_cmd_check); 873 874 static void dump_command(struct mlx5_core_dev *dev, 875 struct mlx5_cmd_work_ent *ent, int input) 876 { 877 struct mlx5_cmd_msg *msg = input ? ent->in : ent->out; 878 struct mlx5_cmd_mailbox *next = msg->next; 879 int n = mlx5_calc_cmd_blocks(msg); 880 u16 op = ent->op; 881 int data_only; 882 u32 offset = 0; 883 int dump_len; 884 int i; 885 886 mlx5_core_dbg(dev, "cmd[%d]: start dump\n", ent->idx); 887 data_only = !!(mlx5_core_debug_mask & (1 << MLX5_CMD_DATA)); 888 889 if (data_only) 890 mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_DATA, 891 "cmd[%d]: dump command data %s(0x%x) %s\n", 892 ent->idx, mlx5_command_str(op), op, 893 input ? "INPUT" : "OUTPUT"); 894 else 895 mlx5_core_dbg(dev, "cmd[%d]: dump command %s(0x%x) %s\n", 896 ent->idx, mlx5_command_str(op), op, 897 input ? "INPUT" : "OUTPUT"); 898 899 if (data_only) { 900 if (input) { 901 dump_buf(ent->lay->in, sizeof(ent->lay->in), 1, offset, ent->idx); 902 offset += sizeof(ent->lay->in); 903 } else { 904 dump_buf(ent->lay->out, sizeof(ent->lay->out), 1, offset, ent->idx); 905 offset += sizeof(ent->lay->out); 906 } 907 } else { 908 dump_buf(ent->lay, sizeof(*ent->lay), 0, offset, ent->idx); 909 offset += sizeof(*ent->lay); 910 } 911 912 for (i = 0; i < n && next; i++) { 913 if (data_only) { 914 dump_len = min_t(int, MLX5_CMD_DATA_BLOCK_SIZE, msg->len - offset); 915 dump_buf(next->buf, dump_len, 1, offset, ent->idx); 916 offset += MLX5_CMD_DATA_BLOCK_SIZE; 917 } else { 918 mlx5_core_dbg(dev, "cmd[%d]: command block:\n", ent->idx); 919 dump_buf(next->buf, sizeof(struct mlx5_cmd_prot_block), 0, offset, 920 ent->idx); 921 offset += sizeof(struct mlx5_cmd_prot_block); 922 } 923 next = next->next; 924 } 925 926 if (data_only) 927 pr_debug("\n"); 928 929 mlx5_core_dbg(dev, "cmd[%d]: end dump\n", ent->idx); 930 } 931 932 static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced); 933 934 static void cb_timeout_handler(struct work_struct *work) 935 { 936 struct delayed_work *dwork = to_delayed_work(work); 937 struct mlx5_cmd_work_ent *ent = container_of(dwork, 938 struct mlx5_cmd_work_ent, 939 cb_timeout_work); 940 struct mlx5_core_dev *dev = container_of(ent->cmd, struct mlx5_core_dev, 941 cmd); 942 943 mlx5_cmd_eq_recover(dev); 944 945 /* Maybe got handled by eq recover ? */ 946 if (!test_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state)) { 947 mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) Async, recovered after timeout\n", ent->idx, 948 mlx5_command_str(ent->op), ent->op); 949 goto out; /* phew, already handled */ 950 } 951 952 ent->ret = -ETIMEDOUT; 953 mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) Async, timeout. Will cause a leak of a command resource\n", 954 ent->idx, mlx5_command_str(ent->op), ent->op); 955 mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true); 956 957 out: 958 cmd_ent_put(ent); /* for the cmd_ent_get() took on schedule delayed work */ 959 } 960 961 static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg); 962 static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev, 963 struct mlx5_cmd_msg *msg); 964 965 static bool opcode_allowed(struct mlx5_cmd *cmd, u16 opcode) 966 { 967 if (cmd->allowed_opcode == CMD_ALLOWED_OPCODE_ALL) 968 return true; 969 970 return cmd->allowed_opcode == opcode; 971 } 972 973 bool mlx5_cmd_is_down(struct mlx5_core_dev *dev) 974 { 975 return pci_channel_offline(dev->pdev) || 976 dev->cmd.state != MLX5_CMDIF_STATE_UP || 977 dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR; 978 } 979 980 static void cmd_work_handler(struct work_struct *work) 981 { 982 struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work); 983 struct mlx5_cmd *cmd = ent->cmd; 984 bool poll_cmd = ent->polling; 985 struct mlx5_cmd_layout *lay; 986 struct mlx5_core_dev *dev; 987 unsigned long timeout; 988 unsigned long flags; 989 int alloc_ret; 990 int cmd_mode; 991 992 complete(&ent->handling); 993 994 dev = container_of(cmd, struct mlx5_core_dev, cmd); 995 timeout = msecs_to_jiffies(mlx5_tout_ms(dev, CMD)); 996 997 if (!ent->page_queue) { 998 if (down_timeout(&cmd->vars.sem, timeout)) { 999 mlx5_core_warn(dev, "%s(0x%x) timed out while waiting for a slot.\n", 1000 mlx5_command_str(ent->op), ent->op); 1001 if (ent->callback) { 1002 ent->callback(-EBUSY, ent->context); 1003 mlx5_free_cmd_msg(dev, ent->out); 1004 free_msg(dev, ent->in); 1005 cmd_ent_put(ent); 1006 } else { 1007 ent->ret = -EBUSY; 1008 complete(&ent->done); 1009 } 1010 complete(&ent->slotted); 1011 return; 1012 } 1013 alloc_ret = cmd_alloc_index(cmd, ent); 1014 if (alloc_ret < 0) { 1015 mlx5_core_err_rl(dev, "failed to allocate command entry\n"); 1016 if (ent->callback) { 1017 ent->callback(-EAGAIN, ent->context); 1018 mlx5_free_cmd_msg(dev, ent->out); 1019 free_msg(dev, ent->in); 1020 cmd_ent_put(ent); 1021 } else { 1022 ent->ret = -EAGAIN; 1023 complete(&ent->done); 1024 } 1025 up(&cmd->vars.sem); 1026 complete(&ent->slotted); 1027 return; 1028 } 1029 } else { 1030 down(&cmd->vars.pages_sem); 1031 ent->idx = cmd->vars.max_reg_cmds; 1032 spin_lock_irqsave(&cmd->alloc_lock, flags); 1033 clear_bit(ent->idx, &cmd->vars.bitmask); 1034 cmd->ent_arr[ent->idx] = ent; 1035 spin_unlock_irqrestore(&cmd->alloc_lock, flags); 1036 } 1037 1038 complete(&ent->slotted); 1039 1040 lay = get_inst(cmd, ent->idx); 1041 ent->lay = lay; 1042 memset(lay, 0, sizeof(*lay)); 1043 memcpy(lay->in, ent->in->first.data, sizeof(lay->in)); 1044 if (ent->in->next) 1045 lay->in_ptr = cpu_to_be64(ent->in->next->dma); 1046 lay->inlen = cpu_to_be32(ent->in->len); 1047 if (ent->out->next) 1048 lay->out_ptr = cpu_to_be64(ent->out->next->dma); 1049 lay->outlen = cpu_to_be32(ent->out->len); 1050 lay->type = MLX5_PCI_CMD_XPORT; 1051 lay->token = ent->token; 1052 lay->status_own = CMD_OWNER_HW; 1053 set_signature(ent, !cmd->checksum_disabled); 1054 dump_command(dev, ent, 1); 1055 ent->ts1 = ktime_get_ns(); 1056 cmd_mode = cmd->mode; 1057 1058 if (ent->callback && schedule_delayed_work(&ent->cb_timeout_work, timeout)) 1059 cmd_ent_get(ent); 1060 set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state); 1061 1062 cmd_ent_get(ent); /* for the _real_ FW event on completion */ 1063 /* Skip sending command to fw if internal error */ 1064 if (mlx5_cmd_is_down(dev) || !opcode_allowed(&dev->cmd, ent->op)) { 1065 ent->ret = -ENXIO; 1066 mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true); 1067 return; 1068 } 1069 1070 /* ring doorbell after the descriptor is valid */ 1071 mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx); 1072 wmb(); 1073 iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell); 1074 /* if not in polling don't use ent after this point */ 1075 if (cmd_mode == CMD_MODE_POLLING || poll_cmd) { 1076 poll_timeout(ent); 1077 /* make sure we read the descriptor after ownership is SW */ 1078 rmb(); 1079 mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, !!ent->ret); 1080 } 1081 } 1082 1083 static int deliv_status_to_err(u8 status) 1084 { 1085 switch (status) { 1086 case MLX5_CMD_DELIVERY_STAT_OK: 1087 case MLX5_DRIVER_STATUS_ABORTED: 1088 return 0; 1089 case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR: 1090 case MLX5_CMD_DELIVERY_STAT_TOK_ERR: 1091 return -EBADR; 1092 case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR: 1093 case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR: 1094 case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR: 1095 return -EFAULT; /* Bad address */ 1096 case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR: 1097 case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR: 1098 case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR: 1099 case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR: 1100 return -ENOMSG; 1101 case MLX5_CMD_DELIVERY_STAT_FW_ERR: 1102 return -EIO; 1103 default: 1104 return -EINVAL; 1105 } 1106 } 1107 1108 static const char *deliv_status_to_str(u8 status) 1109 { 1110 switch (status) { 1111 case MLX5_CMD_DELIVERY_STAT_OK: 1112 return "no errors"; 1113 case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR: 1114 return "signature error"; 1115 case MLX5_CMD_DELIVERY_STAT_TOK_ERR: 1116 return "token error"; 1117 case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR: 1118 return "bad block number"; 1119 case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR: 1120 return "output pointer not aligned to block size"; 1121 case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR: 1122 return "input pointer not aligned to block size"; 1123 case MLX5_CMD_DELIVERY_STAT_FW_ERR: 1124 return "firmware internal error"; 1125 case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR: 1126 return "command input length error"; 1127 case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR: 1128 return "command output length error"; 1129 case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR: 1130 return "reserved fields not cleared"; 1131 case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR: 1132 return "bad command descriptor type"; 1133 default: 1134 return "unknown status code"; 1135 } 1136 } 1137 1138 enum { 1139 MLX5_CMD_TIMEOUT_RECOVER_MSEC = 5 * 1000, 1140 }; 1141 1142 static void wait_func_handle_exec_timeout(struct mlx5_core_dev *dev, 1143 struct mlx5_cmd_work_ent *ent) 1144 { 1145 unsigned long timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_RECOVER_MSEC); 1146 1147 mlx5_cmd_eq_recover(dev); 1148 1149 /* Re-wait on the ent->done after executing the recovery flow. If the 1150 * recovery flow (or any other recovery flow running simultaneously) 1151 * has recovered an EQE, it should cause the entry to be completed by 1152 * the command interface. 1153 */ 1154 if (wait_for_completion_timeout(&ent->done, timeout)) { 1155 mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) recovered after timeout\n", ent->idx, 1156 mlx5_command_str(ent->op), ent->op); 1157 return; 1158 } 1159 1160 mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) No done completion\n", ent->idx, 1161 mlx5_command_str(ent->op), ent->op); 1162 1163 ent->ret = -ETIMEDOUT; 1164 mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true); 1165 } 1166 1167 static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent) 1168 { 1169 unsigned long timeout = msecs_to_jiffies(mlx5_tout_ms(dev, CMD)); 1170 struct mlx5_cmd *cmd = &dev->cmd; 1171 int err; 1172 1173 if (!wait_for_completion_timeout(&ent->handling, timeout) && 1174 cancel_work_sync(&ent->work)) { 1175 ent->ret = -ECANCELED; 1176 goto out_err; 1177 } 1178 1179 wait_for_completion(&ent->slotted); 1180 1181 if (cmd->mode == CMD_MODE_POLLING || ent->polling) 1182 wait_for_completion(&ent->done); 1183 else if (!wait_for_completion_timeout(&ent->done, timeout)) 1184 wait_func_handle_exec_timeout(dev, ent); 1185 1186 out_err: 1187 err = ent->ret; 1188 1189 if (err == -ETIMEDOUT) { 1190 mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n", 1191 mlx5_command_str(ent->op), ent->op); 1192 } else if (err == -ECANCELED) { 1193 mlx5_core_warn(dev, "%s(0x%x) canceled on out of queue timeout.\n", 1194 mlx5_command_str(ent->op), ent->op); 1195 } else if (err == -EBUSY) { 1196 mlx5_core_warn(dev, "%s(0x%x) timeout while waiting for command semaphore.\n", 1197 mlx5_command_str(ent->op), ent->op); 1198 } 1199 mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n", 1200 err, deliv_status_to_str(ent->status), ent->status); 1201 1202 return err; 1203 } 1204 1205 /* Check if all command slots are stalled (timed out and not recovered). 1206 * returns true if all slots timed out on a recent command and have not been 1207 * completed by FW yet. (stalled state) 1208 * false otherwise (at least one slot is not stalled). 1209 * 1210 * In such odd situation "all_stalled", this serves as a protection mechanism 1211 * to avoid blocking the kernel for long periods of time in case FW is not 1212 * responding to commands. 1213 */ 1214 static bool mlx5_cmd_all_stalled(struct mlx5_core_dev *dev) 1215 { 1216 struct mlx5_cmd *cmd = &dev->cmd; 1217 bool all_stalled = true; 1218 unsigned long flags; 1219 int i; 1220 1221 spin_lock_irqsave(&cmd->alloc_lock, flags); 1222 1223 /* at least one command slot is free */ 1224 if (bitmap_weight(&cmd->vars.bitmask, cmd->vars.max_reg_cmds) > 0) { 1225 all_stalled = false; 1226 goto out; 1227 } 1228 1229 for_each_clear_bit(i, &cmd->vars.bitmask, cmd->vars.max_reg_cmds) { 1230 struct mlx5_cmd_work_ent *ent = dev->cmd.ent_arr[i]; 1231 1232 if (!test_bit(MLX5_CMD_ENT_STATE_TIMEDOUT, &ent->state)) { 1233 all_stalled = false; 1234 break; 1235 } 1236 } 1237 out: 1238 spin_unlock_irqrestore(&cmd->alloc_lock, flags); 1239 1240 return all_stalled; 1241 } 1242 1243 /* Notes: 1244 * 1. Callback functions may not sleep 1245 * 2. page queue commands do not support asynchrous completion 1246 * 1247 * return value in case (!callback): 1248 * ret < 0 : Command execution couldn't be submitted by driver 1249 * ret > 0 : Command execution couldn't be performed by firmware 1250 * ret == 0: Command was executed by FW, Caller must check FW outbox status. 1251 * 1252 * return value in case (callback): 1253 * ret < 0 : Command execution couldn't be submitted by driver 1254 * ret == 0: Command will be submitted to FW for execution 1255 * and the callback will be called for further status updates 1256 */ 1257 static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, 1258 struct mlx5_cmd_msg *out, void *uout, int uout_size, 1259 mlx5_cmd_cbk_t callback, 1260 void *context, int page_queue, 1261 u8 token, bool force_polling) 1262 { 1263 struct mlx5_cmd *cmd = &dev->cmd; 1264 struct mlx5_cmd_work_ent *ent; 1265 struct mlx5_cmd_stats *stats; 1266 u8 status = 0; 1267 int err = 0; 1268 s64 ds; 1269 1270 if (callback && page_queue) 1271 return -EINVAL; 1272 1273 if (!page_queue && mlx5_cmd_all_stalled(dev)) { 1274 mlx5_core_err_rl(dev, 1275 "All CMD slots are stalled, aborting command\n"); 1276 /* there's no reason to wait and block the whole kernel if FW 1277 * isn't currently responding to all slots, fail immediately 1278 */ 1279 return -EAGAIN; 1280 } 1281 1282 ent = cmd_alloc_ent(cmd, in, out, uout, uout_size, 1283 callback, context, page_queue); 1284 if (IS_ERR(ent)) 1285 return PTR_ERR(ent); 1286 1287 /* put for this ent is when consumed, depending on the use case 1288 * 1) (!callback) blocking flow: by caller after wait_func completes 1289 * 2) (callback) flow: by mlx5_cmd_comp_handler() when ent is handled 1290 */ 1291 1292 ent->token = token; 1293 ent->polling = force_polling; 1294 1295 init_completion(&ent->handling); 1296 init_completion(&ent->slotted); 1297 if (!callback) 1298 init_completion(&ent->done); 1299 1300 INIT_DELAYED_WORK(&ent->cb_timeout_work, cb_timeout_handler); 1301 INIT_WORK(&ent->work, cmd_work_handler); 1302 if (page_queue) { 1303 cmd_work_handler(&ent->work); 1304 } else if (!queue_work(cmd->wq, &ent->work)) { 1305 mlx5_core_warn(dev, "failed to queue work\n"); 1306 err = -EALREADY; 1307 goto out_free; 1308 } 1309 1310 if (callback) 1311 return 0; /* mlx5_cmd_comp_handler() will put(ent) */ 1312 1313 err = wait_func(dev, ent); 1314 if (err == -ETIMEDOUT || err == -ECANCELED || err == -EBUSY) 1315 goto out_free; 1316 1317 ds = ent->ts2 - ent->ts1; 1318 stats = xa_load(&cmd->stats, ent->op); 1319 if (stats) { 1320 spin_lock_irq(&stats->lock); 1321 stats->sum += ds; 1322 ++stats->n; 1323 spin_unlock_irq(&stats->lock); 1324 } 1325 mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME, 1326 "fw exec time for %s is %lld nsec\n", 1327 mlx5_command_str(ent->op), ds); 1328 1329 out_free: 1330 status = ent->status; 1331 cmd_ent_put(ent); 1332 return err ? : status; 1333 } 1334 1335 static ssize_t dbg_write(struct file *filp, const char __user *buf, 1336 size_t count, loff_t *pos) 1337 { 1338 struct mlx5_core_dev *dev = filp->private_data; 1339 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; 1340 char lbuf[3]; 1341 int err; 1342 1343 if (!dbg->in_msg || !dbg->out_msg) 1344 return -ENOMEM; 1345 1346 if (count < sizeof(lbuf) - 1) 1347 return -EINVAL; 1348 1349 if (copy_from_user(lbuf, buf, sizeof(lbuf) - 1)) 1350 return -EFAULT; 1351 1352 lbuf[sizeof(lbuf) - 1] = 0; 1353 1354 if (strcmp(lbuf, "go")) 1355 return -EINVAL; 1356 1357 err = mlx5_cmd_exec(dev, dbg->in_msg, dbg->inlen, dbg->out_msg, dbg->outlen); 1358 1359 return err ? err : count; 1360 } 1361 1362 static const struct file_operations fops = { 1363 .owner = THIS_MODULE, 1364 .open = simple_open, 1365 .write = dbg_write, 1366 }; 1367 1368 static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size, 1369 u8 token) 1370 { 1371 struct mlx5_cmd_prot_block *block; 1372 struct mlx5_cmd_mailbox *next; 1373 int copy; 1374 1375 if (!to || !from) 1376 return -ENOMEM; 1377 1378 copy = min_t(int, size, sizeof(to->first.data)); 1379 memcpy(to->first.data, from, copy); 1380 size -= copy; 1381 from += copy; 1382 1383 next = to->next; 1384 while (size) { 1385 if (!next) { 1386 /* this is a BUG */ 1387 return -ENOMEM; 1388 } 1389 1390 copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE); 1391 block = next->buf; 1392 memcpy(block->data, from, copy); 1393 from += copy; 1394 size -= copy; 1395 block->token = token; 1396 next = next->next; 1397 } 1398 1399 return 0; 1400 } 1401 1402 static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size) 1403 { 1404 struct mlx5_cmd_prot_block *block; 1405 struct mlx5_cmd_mailbox *next; 1406 int copy; 1407 1408 if (!to || !from) 1409 return -ENOMEM; 1410 1411 copy = min_t(int, size, sizeof(from->first.data)); 1412 memcpy(to, from->first.data, copy); 1413 size -= copy; 1414 to += copy; 1415 1416 next = from->next; 1417 while (size) { 1418 if (!next) { 1419 /* this is a BUG */ 1420 return -ENOMEM; 1421 } 1422 1423 copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE); 1424 block = next->buf; 1425 1426 memcpy(to, block->data, copy); 1427 to += copy; 1428 size -= copy; 1429 next = next->next; 1430 } 1431 1432 return 0; 1433 } 1434 1435 static struct mlx5_cmd_mailbox *alloc_cmd_box(struct mlx5_core_dev *dev, 1436 gfp_t flags) 1437 { 1438 struct mlx5_cmd_mailbox *mailbox; 1439 1440 mailbox = kmalloc_obj(*mailbox, flags); 1441 if (!mailbox) 1442 return ERR_PTR(-ENOMEM); 1443 1444 mailbox->buf = dma_pool_zalloc(dev->cmd.pool, flags, 1445 &mailbox->dma); 1446 if (!mailbox->buf) { 1447 mlx5_core_dbg(dev, "failed allocation\n"); 1448 kfree(mailbox); 1449 return ERR_PTR(-ENOMEM); 1450 } 1451 mailbox->next = NULL; 1452 1453 return mailbox; 1454 } 1455 1456 static void free_cmd_box(struct mlx5_core_dev *dev, 1457 struct mlx5_cmd_mailbox *mailbox) 1458 { 1459 dma_pool_free(dev->cmd.pool, mailbox->buf, mailbox->dma); 1460 kfree(mailbox); 1461 } 1462 1463 static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev, 1464 gfp_t flags, int size, 1465 u8 token) 1466 { 1467 struct mlx5_cmd_mailbox *tmp, *head = NULL; 1468 struct mlx5_cmd_prot_block *block; 1469 struct mlx5_cmd_msg *msg; 1470 int err; 1471 int n; 1472 int i; 1473 1474 msg = kzalloc_obj(*msg, flags); 1475 if (!msg) 1476 return ERR_PTR(-ENOMEM); 1477 1478 msg->len = size; 1479 n = mlx5_calc_cmd_blocks(msg); 1480 1481 for (i = 0; i < n; i++) { 1482 tmp = alloc_cmd_box(dev, flags); 1483 if (IS_ERR(tmp)) { 1484 mlx5_core_warn(dev, "failed allocating block\n"); 1485 err = PTR_ERR(tmp); 1486 goto err_alloc; 1487 } 1488 1489 block = tmp->buf; 1490 tmp->next = head; 1491 block->next = cpu_to_be64(tmp->next ? tmp->next->dma : 0); 1492 block->block_num = cpu_to_be32(n - i - 1); 1493 block->token = token; 1494 head = tmp; 1495 } 1496 msg->next = head; 1497 return msg; 1498 1499 err_alloc: 1500 while (head) { 1501 tmp = head->next; 1502 free_cmd_box(dev, head); 1503 head = tmp; 1504 } 1505 kfree(msg); 1506 1507 return ERR_PTR(err); 1508 } 1509 1510 static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev, 1511 struct mlx5_cmd_msg *msg) 1512 { 1513 struct mlx5_cmd_mailbox *head = msg->next; 1514 struct mlx5_cmd_mailbox *next; 1515 1516 while (head) { 1517 next = head->next; 1518 free_cmd_box(dev, head); 1519 head = next; 1520 } 1521 kfree(msg); 1522 } 1523 1524 static ssize_t data_write(struct file *filp, const char __user *buf, 1525 size_t count, loff_t *pos) 1526 { 1527 struct mlx5_core_dev *dev = filp->private_data; 1528 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; 1529 void *ptr; 1530 1531 if (*pos != 0) 1532 return -EINVAL; 1533 1534 kfree(dbg->in_msg); 1535 dbg->in_msg = NULL; 1536 dbg->inlen = 0; 1537 ptr = memdup_user(buf, count); 1538 if (IS_ERR(ptr)) 1539 return PTR_ERR(ptr); 1540 dbg->in_msg = ptr; 1541 dbg->inlen = count; 1542 1543 *pos = count; 1544 1545 return count; 1546 } 1547 1548 static ssize_t data_read(struct file *filp, char __user *buf, size_t count, 1549 loff_t *pos) 1550 { 1551 struct mlx5_core_dev *dev = filp->private_data; 1552 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; 1553 1554 if (!dbg->out_msg) 1555 return -ENOMEM; 1556 1557 return simple_read_from_buffer(buf, count, pos, dbg->out_msg, 1558 dbg->outlen); 1559 } 1560 1561 static const struct file_operations dfops = { 1562 .owner = THIS_MODULE, 1563 .open = simple_open, 1564 .write = data_write, 1565 .read = data_read, 1566 }; 1567 1568 static ssize_t outlen_read(struct file *filp, char __user *buf, size_t count, 1569 loff_t *pos) 1570 { 1571 struct mlx5_core_dev *dev = filp->private_data; 1572 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; 1573 char outlen[8]; 1574 int err; 1575 1576 err = snprintf(outlen, sizeof(outlen), "%d", dbg->outlen); 1577 if (err < 0) 1578 return err; 1579 1580 return simple_read_from_buffer(buf, count, pos, outlen, err); 1581 } 1582 1583 static ssize_t outlen_write(struct file *filp, const char __user *buf, 1584 size_t count, loff_t *pos) 1585 { 1586 struct mlx5_core_dev *dev = filp->private_data; 1587 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; 1588 char outlen_str[8] = {0}; 1589 int outlen; 1590 void *ptr; 1591 int err; 1592 1593 if (*pos != 0 || count > 6) 1594 return -EINVAL; 1595 1596 kfree(dbg->out_msg); 1597 dbg->out_msg = NULL; 1598 dbg->outlen = 0; 1599 1600 if (copy_from_user(outlen_str, buf, count)) 1601 return -EFAULT; 1602 1603 err = sscanf(outlen_str, "%d", &outlen); 1604 if (err != 1) 1605 return -EINVAL; 1606 1607 ptr = kzalloc(outlen, GFP_KERNEL); 1608 if (!ptr) 1609 return -ENOMEM; 1610 1611 dbg->out_msg = ptr; 1612 dbg->outlen = outlen; 1613 1614 *pos = count; 1615 1616 return count; 1617 } 1618 1619 static const struct file_operations olfops = { 1620 .owner = THIS_MODULE, 1621 .open = simple_open, 1622 .write = outlen_write, 1623 .read = outlen_read, 1624 }; 1625 1626 static void set_wqname(struct mlx5_core_dev *dev) 1627 { 1628 struct mlx5_cmd *cmd = &dev->cmd; 1629 1630 snprintf(cmd->wq_name, sizeof(cmd->wq_name), "mlx5_cmd_%s", 1631 dev_name(dev->device)); 1632 } 1633 1634 static void clean_debug_files(struct mlx5_core_dev *dev) 1635 { 1636 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; 1637 1638 if (!mlx5_debugfs_root) 1639 return; 1640 1641 debugfs_remove_recursive(dbg->dbg_root); 1642 } 1643 1644 static void create_debugfs_files(struct mlx5_core_dev *dev) 1645 { 1646 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; 1647 1648 dbg->dbg_root = debugfs_create_dir("cmd", mlx5_debugfs_get_dev_root(dev)); 1649 1650 debugfs_create_file("in", 0400, dbg->dbg_root, dev, &dfops); 1651 debugfs_create_file("out", 0200, dbg->dbg_root, dev, &dfops); 1652 debugfs_create_file("out_len", 0600, dbg->dbg_root, dev, &olfops); 1653 debugfs_create_u8("status", 0600, dbg->dbg_root, &dbg->status); 1654 debugfs_create_file("run", 0200, dbg->dbg_root, dev, &fops); 1655 } 1656 1657 void mlx5_cmd_allowed_opcode(struct mlx5_core_dev *dev, u16 opcode) 1658 { 1659 struct mlx5_cmd *cmd = &dev->cmd; 1660 int i; 1661 1662 for (i = 0; i < cmd->vars.max_reg_cmds; i++) 1663 down(&cmd->vars.sem); 1664 down(&cmd->vars.pages_sem); 1665 1666 cmd->allowed_opcode = opcode; 1667 1668 up(&cmd->vars.pages_sem); 1669 for (i = 0; i < cmd->vars.max_reg_cmds; i++) 1670 up(&cmd->vars.sem); 1671 } 1672 1673 static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode) 1674 { 1675 struct mlx5_cmd *cmd = &dev->cmd; 1676 int i; 1677 1678 for (i = 0; i < cmd->vars.max_reg_cmds; i++) 1679 down(&cmd->vars.sem); 1680 down(&cmd->vars.pages_sem); 1681 1682 cmd->mode = mode; 1683 1684 up(&cmd->vars.pages_sem); 1685 for (i = 0; i < cmd->vars.max_reg_cmds; i++) 1686 up(&cmd->vars.sem); 1687 } 1688 1689 static int cmd_comp_notifier(struct notifier_block *nb, 1690 unsigned long type, void *data) 1691 { 1692 struct mlx5_core_dev *dev; 1693 struct mlx5_cmd *cmd; 1694 struct mlx5_eqe *eqe; 1695 1696 cmd = mlx5_nb_cof(nb, struct mlx5_cmd, nb); 1697 dev = container_of(cmd, struct mlx5_core_dev, cmd); 1698 eqe = data; 1699 1700 if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) 1701 return NOTIFY_DONE; 1702 1703 mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector), false); 1704 1705 return NOTIFY_OK; 1706 } 1707 void mlx5_cmd_use_events(struct mlx5_core_dev *dev) 1708 { 1709 MLX5_NB_INIT(&dev->cmd.nb, cmd_comp_notifier, CMD); 1710 mlx5_eq_notifier_register(dev, &dev->cmd.nb); 1711 mlx5_cmd_change_mod(dev, CMD_MODE_EVENTS); 1712 } 1713 1714 void mlx5_cmd_use_polling(struct mlx5_core_dev *dev) 1715 { 1716 mlx5_cmd_change_mod(dev, CMD_MODE_POLLING); 1717 mlx5_eq_notifier_unregister(dev, &dev->cmd.nb); 1718 } 1719 1720 static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg) 1721 { 1722 unsigned long flags; 1723 1724 if (msg->parent) { 1725 spin_lock_irqsave(&msg->parent->lock, flags); 1726 list_add_tail(&msg->list, &msg->parent->head); 1727 spin_unlock_irqrestore(&msg->parent->lock, flags); 1728 } else { 1729 mlx5_free_cmd_msg(dev, msg); 1730 } 1731 } 1732 1733 static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced) 1734 { 1735 struct mlx5_cmd *cmd = &dev->cmd; 1736 struct mlx5_cmd_work_ent *ent; 1737 mlx5_cmd_cbk_t callback; 1738 void *context; 1739 int err; 1740 int i; 1741 s64 ds; 1742 struct mlx5_cmd_stats *stats; 1743 unsigned long flags; 1744 unsigned long vector; 1745 1746 /* there can be at most 32 command queues */ 1747 vector = vec & 0xffffffff; 1748 for (i = 0; i < (1 << cmd->vars.log_sz); i++) { 1749 if (test_bit(i, &vector)) { 1750 ent = cmd->ent_arr[i]; 1751 1752 if (forced && ent->ret == -ETIMEDOUT) 1753 set_bit(MLX5_CMD_ENT_STATE_TIMEDOUT, 1754 &ent->state); 1755 else if (!forced) /* real FW completion */ 1756 clear_bit(MLX5_CMD_ENT_STATE_TIMEDOUT, 1757 &ent->state); 1758 1759 /* if we already completed the command, ignore it */ 1760 if (!test_and_clear_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, 1761 &ent->state)) { 1762 /* only real completion can free the cmd slot */ 1763 if (!forced) { 1764 mlx5_core_err(dev, "Command completion arrived after timeout (entry idx = %d).\n", 1765 ent->idx); 1766 cmd_ent_put(ent); 1767 } 1768 continue; 1769 } 1770 1771 if (ent->callback && cancel_delayed_work(&ent->cb_timeout_work)) 1772 cmd_ent_put(ent); /* timeout work was canceled */ 1773 1774 if (!forced || /* Real FW completion */ 1775 mlx5_cmd_is_down(dev) || /* No real FW completion is expected */ 1776 !opcode_allowed(cmd, ent->op)) 1777 cmd_ent_put(ent); 1778 1779 ent->ts2 = ktime_get_ns(); 1780 memcpy(ent->out->first.data, ent->lay->out, sizeof(ent->lay->out)); 1781 dump_command(dev, ent, 0); 1782 1783 if (vec & MLX5_TRIGGERED_CMD_COMP) 1784 ent->ret = -ENXIO; 1785 1786 if (!ent->ret) { /* Command completed by FW */ 1787 if (!cmd->checksum_disabled) 1788 ent->ret = verify_signature(ent); 1789 1790 ent->status = ent->lay->status_own >> 1; 1791 1792 mlx5_core_dbg(dev, "command completed. ret 0x%x, delivery status %s(0x%x)\n", 1793 ent->ret, deliv_status_to_str(ent->status), ent->status); 1794 } 1795 1796 if (ent->callback) { 1797 ds = ent->ts2 - ent->ts1; 1798 stats = xa_load(&cmd->stats, ent->op); 1799 if (stats) { 1800 spin_lock_irqsave(&stats->lock, flags); 1801 stats->sum += ds; 1802 ++stats->n; 1803 spin_unlock_irqrestore(&stats->lock, flags); 1804 } 1805 1806 callback = ent->callback; 1807 context = ent->context; 1808 err = ent->ret ? : ent->status; 1809 if (err > 0) /* Failed in FW, command didn't execute */ 1810 err = deliv_status_to_err(err); 1811 1812 if (!err) 1813 err = mlx5_copy_from_msg(ent->uout, 1814 ent->out, 1815 ent->uout_size); 1816 1817 mlx5_free_cmd_msg(dev, ent->out); 1818 free_msg(dev, ent->in); 1819 1820 /* final consumer is done, release ent */ 1821 cmd_ent_put(ent); 1822 callback(err, context); 1823 } else { 1824 /* release wait_func() so mlx5_cmd_invoke() 1825 * can make the final ent_put() 1826 */ 1827 complete(&ent->done); 1828 } 1829 } 1830 } 1831 } 1832 1833 #define MLX5_MAX_MANAGE_PAGES_CMD_ENT 1 1834 #define MLX5_CMD_MASK ((1UL << (cmd->vars.max_reg_cmds + \ 1835 MLX5_MAX_MANAGE_PAGES_CMD_ENT)) - 1) 1836 1837 static void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev) 1838 { 1839 struct mlx5_cmd *cmd = &dev->cmd; 1840 unsigned long bitmask; 1841 unsigned long flags; 1842 u64 vector; 1843 int i; 1844 1845 /* wait for pending handlers to complete */ 1846 mlx5_eq_synchronize_cmd_irq(dev); 1847 spin_lock_irqsave(&dev->cmd.alloc_lock, flags); 1848 vector = ~dev->cmd.vars.bitmask & MLX5_CMD_MASK; 1849 if (!vector) 1850 goto no_trig; 1851 1852 bitmask = vector; 1853 /* we must increment the allocated entries refcount before triggering the completions 1854 * to guarantee pending commands will not get freed in the meanwhile. 1855 * For that reason, it also has to be done inside the alloc_lock. 1856 */ 1857 for_each_set_bit(i, &bitmask, (1 << cmd->vars.log_sz)) 1858 cmd_ent_get(cmd->ent_arr[i]); 1859 vector |= MLX5_TRIGGERED_CMD_COMP; 1860 spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags); 1861 1862 mlx5_core_dbg(dev, "vector 0x%llx\n", vector); 1863 mlx5_cmd_comp_handler(dev, vector, true); 1864 for_each_set_bit(i, &bitmask, (1 << cmd->vars.log_sz)) 1865 cmd_ent_put(cmd->ent_arr[i]); 1866 return; 1867 1868 no_trig: 1869 spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags); 1870 } 1871 1872 void mlx5_cmd_flush(struct mlx5_core_dev *dev) 1873 { 1874 struct mlx5_cmd *cmd = &dev->cmd; 1875 int i; 1876 1877 for (i = 0; i < cmd->vars.max_reg_cmds; i++) { 1878 while (down_trylock(&cmd->vars.sem)) { 1879 mlx5_cmd_trigger_completions(dev); 1880 cond_resched(); 1881 } 1882 } 1883 1884 while (down_trylock(&cmd->vars.pages_sem)) { 1885 mlx5_cmd_trigger_completions(dev); 1886 cond_resched(); 1887 } 1888 1889 /* Unlock cmdif */ 1890 up(&cmd->vars.pages_sem); 1891 for (i = 0; i < cmd->vars.max_reg_cmds; i++) 1892 up(&cmd->vars.sem); 1893 } 1894 1895 static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size, 1896 gfp_t gfp) 1897 { 1898 struct mlx5_cmd_msg *msg = ERR_PTR(-ENOMEM); 1899 struct cmd_msg_cache *ch = NULL; 1900 struct mlx5_cmd *cmd = &dev->cmd; 1901 int i; 1902 1903 if (in_size <= 16) 1904 goto cache_miss; 1905 1906 for (i = 0; i < dev->profile.num_cmd_caches; i++) { 1907 ch = &cmd->cache[i]; 1908 if (in_size > ch->max_inbox_size) 1909 continue; 1910 spin_lock_irq(&ch->lock); 1911 if (list_empty(&ch->head)) { 1912 spin_unlock_irq(&ch->lock); 1913 continue; 1914 } 1915 msg = list_entry(ch->head.next, typeof(*msg), list); 1916 /* For cached lists, we must explicitly state what is 1917 * the real size 1918 */ 1919 msg->len = in_size; 1920 list_del(&msg->list); 1921 spin_unlock_irq(&ch->lock); 1922 break; 1923 } 1924 1925 if (!IS_ERR(msg)) 1926 return msg; 1927 1928 cache_miss: 1929 msg = mlx5_alloc_cmd_msg(dev, gfp, in_size, 0); 1930 return msg; 1931 } 1932 1933 static int is_manage_pages(void *in) 1934 { 1935 return in_to_opcode(in) == MLX5_CMD_OP_MANAGE_PAGES; 1936 } 1937 1938 static bool mlx5_has_privileged_uid(struct mlx5_core_dev *dev) 1939 { 1940 return !xa_empty(&dev->cmd.vars.privileged_uids); 1941 } 1942 1943 static bool mlx5_cmd_is_privileged_uid(struct mlx5_core_dev *dev, 1944 u16 uid) 1945 { 1946 return !!xa_load(&dev->cmd.vars.privileged_uids, uid); 1947 } 1948 1949 /* Notes: 1950 * 1. Callback functions may not sleep 1951 * 2. Page queue commands do not support asynchrous completion 1952 */ 1953 static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, 1954 int out_size, mlx5_cmd_cbk_t callback, void *context, 1955 bool force_polling) 1956 { 1957 struct mlx5_cmd_msg *inb, *outb; 1958 u16 opcode = in_to_opcode(in); 1959 bool throttle_locked = false; 1960 bool unpriv_locked = false; 1961 u16 uid = in_to_uid(in); 1962 int pages_queue; 1963 gfp_t gfp; 1964 u8 token; 1965 int err; 1966 1967 if (mlx5_cmd_is_down(dev) || !opcode_allowed(&dev->cmd, opcode)) 1968 return -ENXIO; 1969 1970 if (!callback) { 1971 /* The semaphore is already held for callback commands. It was 1972 * acquired in mlx5_cmd_exec_cb() 1973 */ 1974 if (uid && mlx5_has_privileged_uid(dev)) { 1975 if (!mlx5_cmd_is_privileged_uid(dev, uid)) { 1976 unpriv_locked = true; 1977 down(&dev->cmd.vars.unprivileged_sem); 1978 } 1979 } else if (mlx5_cmd_is_throttle_opcode(opcode)) { 1980 throttle_locked = true; 1981 down(&dev->cmd.vars.throttle_sem); 1982 } 1983 } 1984 1985 pages_queue = is_manage_pages(in); 1986 gfp = callback ? GFP_ATOMIC : GFP_KERNEL; 1987 1988 inb = alloc_msg(dev, in_size, gfp); 1989 if (IS_ERR(inb)) { 1990 err = PTR_ERR(inb); 1991 goto out_up; 1992 } 1993 1994 token = alloc_token(&dev->cmd); 1995 1996 err = mlx5_copy_to_msg(inb, in, in_size, token); 1997 if (err) { 1998 mlx5_core_warn(dev, "err %d\n", err); 1999 goto out_in; 2000 } 2001 2002 outb = mlx5_alloc_cmd_msg(dev, gfp, out_size, token); 2003 if (IS_ERR(outb)) { 2004 err = PTR_ERR(outb); 2005 goto out_in; 2006 } 2007 2008 err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context, 2009 pages_queue, token, force_polling); 2010 if (callback && !err) 2011 return 0; 2012 2013 if (err > 0) /* Failed in FW, command didn't execute */ 2014 err = deliv_status_to_err(err); 2015 2016 if (err) 2017 goto out_out; 2018 2019 /* command completed by FW */ 2020 err = mlx5_copy_from_msg(out, outb, out_size); 2021 out_out: 2022 mlx5_free_cmd_msg(dev, outb); 2023 out_in: 2024 free_msg(dev, inb); 2025 out_up: 2026 if (throttle_locked) 2027 up(&dev->cmd.vars.throttle_sem); 2028 if (unpriv_locked) 2029 up(&dev->cmd.vars.unprivileged_sem); 2030 2031 return err; 2032 } 2033 2034 static void mlx5_cmd_err_trace(struct mlx5_core_dev *dev, u16 opcode, u16 op_mod, void *out) 2035 { 2036 u32 syndrome = MLX5_GET(mbox_out, out, syndrome); 2037 u8 status = MLX5_GET(mbox_out, out, status); 2038 2039 trace_mlx5_cmd(mlx5_command_str(opcode), opcode, op_mod, 2040 cmd_status_str(status), status, syndrome, 2041 cmd_status_to_err(status)); 2042 } 2043 2044 static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status, 2045 u32 syndrome, int err) 2046 { 2047 const char *namep = mlx5_command_str(opcode); 2048 struct mlx5_cmd_stats *stats; 2049 unsigned long flags; 2050 2051 if (!err || !(strcmp(namep, "unknown command opcode"))) 2052 return; 2053 2054 stats = xa_load(&dev->cmd.stats, opcode); 2055 if (!stats) 2056 return; 2057 spin_lock_irqsave(&stats->lock, flags); 2058 stats->failed++; 2059 if (err < 0) 2060 stats->last_failed_errno = -err; 2061 if (err == -EREMOTEIO) { 2062 stats->failed_mbox_status++; 2063 stats->last_failed_mbox_status = status; 2064 stats->last_failed_syndrome = syndrome; 2065 } 2066 spin_unlock_irqrestore(&stats->lock, flags); 2067 } 2068 2069 /* preserve -EREMOTEIO for outbox.status != OK, otherwise return err as is */ 2070 static int cmd_status_err(struct mlx5_core_dev *dev, int err, u16 opcode, u16 op_mod, void *out) 2071 { 2072 u32 syndrome = MLX5_GET(mbox_out, out, syndrome); 2073 u8 status = MLX5_GET(mbox_out, out, status); 2074 2075 if (err == -EREMOTEIO) /* -EREMOTEIO is preserved */ 2076 err = -EIO; 2077 2078 if (!err && status != MLX5_CMD_STAT_OK) { 2079 err = -EREMOTEIO; 2080 mlx5_cmd_err_trace(dev, opcode, op_mod, out); 2081 } 2082 2083 cmd_status_log(dev, opcode, status, syndrome, err); 2084 return err; 2085 } 2086 2087 /** 2088 * mlx5_cmd_do - Executes a fw command, wait for completion. 2089 * Unlike mlx5_cmd_exec, this function will not translate or intercept 2090 * outbox.status and will return -EREMOTEIO when 2091 * outbox.status != MLX5_CMD_STAT_OK 2092 * 2093 * @dev: mlx5 core device 2094 * @in: inbox mlx5_ifc command buffer 2095 * @in_size: inbox buffer size 2096 * @out: outbox mlx5_ifc buffer 2097 * @out_size: outbox size 2098 * 2099 * @return: 2100 * -EREMOTEIO : Command executed by FW, outbox.status != MLX5_CMD_STAT_OK. 2101 * Caller must check FW outbox status. 2102 * 0 : Command execution successful, outbox.status == MLX5_CMD_STAT_OK. 2103 * < 0 : Command execution couldn't be performed by firmware or driver 2104 */ 2105 int mlx5_cmd_do(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size) 2106 { 2107 int err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, false); 2108 u16 op_mod = MLX5_GET(mbox_in, in, op_mod); 2109 u16 opcode = in_to_opcode(in); 2110 2111 return cmd_status_err(dev, err, opcode, op_mod, out); 2112 } 2113 EXPORT_SYMBOL(mlx5_cmd_do); 2114 2115 /** 2116 * mlx5_cmd_exec - Executes a fw command, wait for completion 2117 * 2118 * @dev: mlx5 core device 2119 * @in: inbox mlx5_ifc command buffer 2120 * @in_size: inbox buffer size 2121 * @out: outbox mlx5_ifc buffer 2122 * @out_size: outbox size 2123 * 2124 * @return: 0 if no error, FW command execution was successful 2125 * and outbox status is ok. 2126 */ 2127 int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, 2128 int out_size) 2129 { 2130 int err = mlx5_cmd_do(dev, in, in_size, out, out_size); 2131 2132 return mlx5_cmd_check(dev, err, in, out); 2133 } 2134 EXPORT_SYMBOL(mlx5_cmd_exec); 2135 2136 /** 2137 * mlx5_cmd_exec_polling - Executes a fw command, poll for completion 2138 * Needed for driver force teardown, when command completion EQ 2139 * will not be available to complete the command 2140 * 2141 * @dev: mlx5 core device 2142 * @in: inbox mlx5_ifc command buffer 2143 * @in_size: inbox buffer size 2144 * @out: outbox mlx5_ifc buffer 2145 * @out_size: outbox size 2146 * 2147 * @return: 0 if no error, FW command execution was successful 2148 * and outbox status is ok. 2149 */ 2150 int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size, 2151 void *out, int out_size) 2152 { 2153 int err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, true); 2154 u16 op_mod = MLX5_GET(mbox_in, in, op_mod); 2155 u16 opcode = in_to_opcode(in); 2156 2157 err = cmd_status_err(dev, err, opcode, op_mod, out); 2158 return mlx5_cmd_check(dev, err, in, out); 2159 } 2160 EXPORT_SYMBOL(mlx5_cmd_exec_polling); 2161 2162 void mlx5_cmd_init_async_ctx(struct mlx5_core_dev *dev, 2163 struct mlx5_async_ctx *ctx) 2164 { 2165 ctx->dev = dev; 2166 /* Starts at 1 to avoid doing wake_up if we are not cleaning up */ 2167 atomic_set(&ctx->num_inflight, 1); 2168 init_completion(&ctx->inflight_done); 2169 } 2170 EXPORT_SYMBOL(mlx5_cmd_init_async_ctx); 2171 2172 /** 2173 * mlx5_cmd_cleanup_async_ctx - Clean up an async_ctx 2174 * @ctx: The ctx to clean 2175 * 2176 * Upon return all callbacks given to mlx5_cmd_exec_cb() have been called. The 2177 * caller must ensure that mlx5_cmd_exec_cb() is not called during or after 2178 * the call mlx5_cleanup_async_ctx(). 2179 */ 2180 void mlx5_cmd_cleanup_async_ctx(struct mlx5_async_ctx *ctx) 2181 { 2182 if (!atomic_dec_and_test(&ctx->num_inflight)) 2183 wait_for_completion(&ctx->inflight_done); 2184 } 2185 EXPORT_SYMBOL(mlx5_cmd_cleanup_async_ctx); 2186 2187 static void mlx5_cmd_exec_cb_handler(int status, void *_work) 2188 { 2189 struct mlx5_async_work *work = _work; 2190 struct mlx5_async_ctx *ctx; 2191 struct mlx5_core_dev *dev; 2192 bool throttle_locked; 2193 bool unpriv_locked; 2194 2195 ctx = work->ctx; 2196 dev = ctx->dev; 2197 throttle_locked = work->throttle_locked; 2198 unpriv_locked = work->unpriv_locked; 2199 status = cmd_status_err(dev, status, work->opcode, work->op_mod, work->out); 2200 work->user_callback(status, work); 2201 /* Can't access "work" from this point on. It could have been freed in 2202 * the callback. 2203 */ 2204 if (throttle_locked) 2205 up(&dev->cmd.vars.throttle_sem); 2206 if (unpriv_locked) 2207 up(&dev->cmd.vars.unprivileged_sem); 2208 if (atomic_dec_and_test(&ctx->num_inflight)) 2209 complete(&ctx->inflight_done); 2210 } 2211 2212 int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size, 2213 void *out, int out_size, mlx5_async_cbk_t callback, 2214 struct mlx5_async_work *work) 2215 { 2216 struct mlx5_core_dev *dev = ctx->dev; 2217 u16 uid; 2218 int ret; 2219 2220 work->ctx = ctx; 2221 work->user_callback = callback; 2222 work->opcode = in_to_opcode(in); 2223 work->op_mod = MLX5_GET(mbox_in, in, op_mod); 2224 work->out = out; 2225 work->throttle_locked = false; 2226 work->unpriv_locked = false; 2227 uid = in_to_uid(in); 2228 2229 if (WARN_ON(!atomic_inc_not_zero(&ctx->num_inflight))) 2230 return -EIO; 2231 2232 if (uid && mlx5_has_privileged_uid(dev)) { 2233 if (!mlx5_cmd_is_privileged_uid(dev, uid)) { 2234 if (down_trylock(&dev->cmd.vars.unprivileged_sem)) { 2235 ret = -EBUSY; 2236 goto dec_num_inflight; 2237 } 2238 work->unpriv_locked = true; 2239 } 2240 } else if (mlx5_cmd_is_throttle_opcode(in_to_opcode(in))) { 2241 if (down_trylock(&dev->cmd.vars.throttle_sem)) { 2242 ret = -EBUSY; 2243 goto dec_num_inflight; 2244 } 2245 work->throttle_locked = true; 2246 } 2247 2248 ret = cmd_exec(dev, in, in_size, out, out_size, 2249 mlx5_cmd_exec_cb_handler, work, false); 2250 if (ret) 2251 goto sem_up; 2252 2253 return 0; 2254 2255 sem_up: 2256 if (work->throttle_locked) 2257 up(&dev->cmd.vars.throttle_sem); 2258 if (work->unpriv_locked) 2259 up(&dev->cmd.vars.unprivileged_sem); 2260 dec_num_inflight: 2261 if (atomic_dec_and_test(&ctx->num_inflight)) 2262 complete(&ctx->inflight_done); 2263 2264 return ret; 2265 } 2266 EXPORT_SYMBOL(mlx5_cmd_exec_cb); 2267 2268 int mlx5_cmd_allow_other_vhca_access(struct mlx5_core_dev *dev, 2269 struct mlx5_cmd_allow_other_vhca_access_attr *attr) 2270 { 2271 u32 out[MLX5_ST_SZ_DW(allow_other_vhca_access_out)] = {}; 2272 u32 in[MLX5_ST_SZ_DW(allow_other_vhca_access_in)] = {}; 2273 void *key; 2274 2275 MLX5_SET(allow_other_vhca_access_in, 2276 in, opcode, MLX5_CMD_OP_ALLOW_OTHER_VHCA_ACCESS); 2277 MLX5_SET(allow_other_vhca_access_in, 2278 in, object_type_to_be_accessed, attr->obj_type); 2279 MLX5_SET(allow_other_vhca_access_in, 2280 in, object_id_to_be_accessed, attr->obj_id); 2281 2282 key = MLX5_ADDR_OF(allow_other_vhca_access_in, in, access_key); 2283 memcpy(key, attr->access_key, sizeof(attr->access_key)); 2284 2285 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); 2286 } 2287 2288 int mlx5_cmd_alias_obj_create(struct mlx5_core_dev *dev, 2289 struct mlx5_cmd_alias_obj_create_attr *alias_attr, 2290 u32 *obj_id) 2291 { 2292 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {}; 2293 u32 in[MLX5_ST_SZ_DW(create_alias_obj_in)] = {}; 2294 void *param; 2295 void *attr; 2296 void *key; 2297 int ret; 2298 2299 attr = MLX5_ADDR_OF(create_alias_obj_in, in, hdr); 2300 MLX5_SET(general_obj_in_cmd_hdr, 2301 attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT); 2302 MLX5_SET(general_obj_in_cmd_hdr, 2303 attr, obj_type, alias_attr->obj_type); 2304 param = MLX5_ADDR_OF(general_obj_in_cmd_hdr, in, op_param); 2305 MLX5_SET(general_obj_create_param, param, alias_object, 1); 2306 2307 attr = MLX5_ADDR_OF(create_alias_obj_in, in, alias_ctx); 2308 MLX5_SET(alias_context, attr, vhca_id_to_be_accessed, alias_attr->vhca_id); 2309 MLX5_SET(alias_context, attr, object_id_to_be_accessed, alias_attr->obj_id); 2310 2311 key = MLX5_ADDR_OF(alias_context, attr, access_key); 2312 memcpy(key, alias_attr->access_key, sizeof(alias_attr->access_key)); 2313 2314 ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); 2315 if (ret) 2316 return ret; 2317 2318 *obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); 2319 2320 return 0; 2321 } 2322 2323 int mlx5_cmd_alias_obj_destroy(struct mlx5_core_dev *dev, u32 obj_id, 2324 u16 obj_type) 2325 { 2326 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {}; 2327 u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {}; 2328 2329 MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT); 2330 MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, obj_type); 2331 MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, obj_id); 2332 2333 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); 2334 } 2335 2336 static void destroy_msg_cache(struct mlx5_core_dev *dev) 2337 { 2338 struct cmd_msg_cache *ch; 2339 struct mlx5_cmd_msg *msg; 2340 struct mlx5_cmd_msg *n; 2341 int i; 2342 2343 for (i = 0; i < dev->profile.num_cmd_caches; i++) { 2344 ch = &dev->cmd.cache[i]; 2345 list_for_each_entry_safe(msg, n, &ch->head, list) { 2346 list_del(&msg->list); 2347 mlx5_free_cmd_msg(dev, msg); 2348 } 2349 } 2350 } 2351 2352 static unsigned cmd_cache_num_ent[MLX5_NUM_COMMAND_CACHES] = { 2353 512, 32, 16, 8, 2 2354 }; 2355 2356 static unsigned cmd_cache_ent_size[MLX5_NUM_COMMAND_CACHES] = { 2357 16 + MLX5_CMD_DATA_BLOCK_SIZE, 2358 16 + MLX5_CMD_DATA_BLOCK_SIZE * 2, 2359 16 + MLX5_CMD_DATA_BLOCK_SIZE * 16, 2360 16 + MLX5_CMD_DATA_BLOCK_SIZE * 256, 2361 16 + MLX5_CMD_DATA_BLOCK_SIZE * 512, 2362 }; 2363 2364 static void create_msg_cache(struct mlx5_core_dev *dev) 2365 { 2366 struct mlx5_cmd *cmd = &dev->cmd; 2367 struct cmd_msg_cache *ch; 2368 struct mlx5_cmd_msg *msg; 2369 int i; 2370 int k; 2371 2372 /* Initialize and fill the caches with initial entries */ 2373 for (k = 0; k < dev->profile.num_cmd_caches; k++) { 2374 ch = &cmd->cache[k]; 2375 spin_lock_init(&ch->lock); 2376 INIT_LIST_HEAD(&ch->head); 2377 ch->num_ent = cmd_cache_num_ent[k]; 2378 ch->max_inbox_size = cmd_cache_ent_size[k]; 2379 for (i = 0; i < ch->num_ent; i++) { 2380 msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL | __GFP_NOWARN, 2381 ch->max_inbox_size, 0); 2382 if (IS_ERR(msg)) 2383 break; 2384 msg->parent = ch; 2385 list_add_tail(&msg->list, &ch->head); 2386 } 2387 } 2388 } 2389 2390 static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd) 2391 { 2392 cmd->cmd_alloc_buf = dma_alloc_coherent(mlx5_core_dma_dev(dev), MLX5_ADAPTER_PAGE_SIZE, 2393 &cmd->alloc_dma, GFP_KERNEL); 2394 if (!cmd->cmd_alloc_buf) 2395 return -ENOMEM; 2396 2397 /* make sure it is aligned to 4K */ 2398 if (!((uintptr_t)cmd->cmd_alloc_buf & (MLX5_ADAPTER_PAGE_SIZE - 1))) { 2399 cmd->cmd_buf = cmd->cmd_alloc_buf; 2400 cmd->dma = cmd->alloc_dma; 2401 cmd->alloc_size = MLX5_ADAPTER_PAGE_SIZE; 2402 return 0; 2403 } 2404 2405 dma_free_coherent(mlx5_core_dma_dev(dev), MLX5_ADAPTER_PAGE_SIZE, cmd->cmd_alloc_buf, 2406 cmd->alloc_dma); 2407 cmd->cmd_alloc_buf = dma_alloc_coherent(mlx5_core_dma_dev(dev), 2408 2 * MLX5_ADAPTER_PAGE_SIZE - 1, 2409 &cmd->alloc_dma, GFP_KERNEL); 2410 if (!cmd->cmd_alloc_buf) 2411 return -ENOMEM; 2412 2413 cmd->cmd_buf = PTR_ALIGN(cmd->cmd_alloc_buf, MLX5_ADAPTER_PAGE_SIZE); 2414 cmd->dma = ALIGN(cmd->alloc_dma, MLX5_ADAPTER_PAGE_SIZE); 2415 cmd->alloc_size = 2 * MLX5_ADAPTER_PAGE_SIZE - 1; 2416 return 0; 2417 } 2418 2419 static void free_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd) 2420 { 2421 dma_free_coherent(mlx5_core_dma_dev(dev), cmd->alloc_size, cmd->cmd_alloc_buf, 2422 cmd->alloc_dma); 2423 } 2424 2425 static u16 cmdif_rev(struct mlx5_core_dev *dev) 2426 { 2427 return ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16; 2428 } 2429 2430 int mlx5_cmd_init(struct mlx5_core_dev *dev) 2431 { 2432 struct mlx5_cmd *cmd = &dev->cmd; 2433 2434 cmd->checksum_disabled = 1; 2435 2436 spin_lock_init(&cmd->alloc_lock); 2437 spin_lock_init(&cmd->token_lock); 2438 2439 set_wqname(dev); 2440 cmd->wq = create_singlethread_workqueue(cmd->wq_name); 2441 if (!cmd->wq) { 2442 mlx5_core_err(dev, "failed to create command workqueue\n"); 2443 return -ENOMEM; 2444 } 2445 2446 mlx5_cmdif_debugfs_init(dev); 2447 2448 return 0; 2449 } 2450 2451 void mlx5_cmd_cleanup(struct mlx5_core_dev *dev) 2452 { 2453 struct mlx5_cmd *cmd = &dev->cmd; 2454 2455 mlx5_cmdif_debugfs_cleanup(dev); 2456 destroy_workqueue(cmd->wq); 2457 } 2458 2459 int mlx5_cmd_enable(struct mlx5_core_dev *dev) 2460 { 2461 int size = sizeof(struct mlx5_cmd_prot_block); 2462 int align = roundup_pow_of_two(size); 2463 struct mlx5_cmd *cmd = &dev->cmd; 2464 u32 cmd_h, cmd_l; 2465 int err; 2466 2467 memset(&cmd->vars, 0, sizeof(cmd->vars)); 2468 cmd->vars.cmdif_rev = cmdif_rev(dev); 2469 if (cmd->vars.cmdif_rev != CMD_IF_REV) { 2470 mlx5_core_err(dev, 2471 "Driver cmdif rev(%d) differs from firmware's(%d)\n", 2472 CMD_IF_REV, cmd->vars.cmdif_rev); 2473 return -EINVAL; 2474 } 2475 2476 cmd_l = ioread32be(&dev->iseg->cmdq_addr_l_sz) & 0xff; 2477 cmd->vars.log_sz = cmd_l >> 4 & 0xf; 2478 cmd->vars.log_stride = cmd_l & 0xf; 2479 if (1 << cmd->vars.log_sz > MLX5_MAX_COMMANDS) { 2480 mlx5_core_err(dev, "firmware reports too many outstanding commands %d\n", 2481 1 << cmd->vars.log_sz); 2482 return -EINVAL; 2483 } 2484 2485 if (cmd->vars.log_sz + cmd->vars.log_stride > MLX5_ADAPTER_PAGE_SHIFT) { 2486 mlx5_core_err(dev, "command queue size overflow\n"); 2487 return -EINVAL; 2488 } 2489 2490 cmd->state = MLX5_CMDIF_STATE_DOWN; 2491 cmd->vars.max_reg_cmds = (1 << cmd->vars.log_sz) - 1; 2492 cmd->vars.bitmask = MLX5_CMD_MASK; 2493 2494 sema_init(&cmd->vars.sem, cmd->vars.max_reg_cmds); 2495 sema_init(&cmd->vars.pages_sem, 1); 2496 sema_init(&cmd->vars.throttle_sem, DIV_ROUND_UP(cmd->vars.max_reg_cmds, 2)); 2497 sema_init(&cmd->vars.unprivileged_sem, 2498 DIV_ROUND_UP(cmd->vars.max_reg_cmds, 2)); 2499 2500 xa_init(&cmd->vars.privileged_uids); 2501 2502 cmd->pool = dma_pool_create("mlx5_cmd", mlx5_core_dma_dev(dev), size, align, 0); 2503 if (!cmd->pool) { 2504 err = -ENOMEM; 2505 goto err_destroy_xa; 2506 } 2507 2508 err = alloc_cmd_page(dev, cmd); 2509 if (err) 2510 goto err_free_pool; 2511 2512 cmd_h = (u32)((u64)(cmd->dma) >> 32); 2513 cmd_l = (u32)(cmd->dma); 2514 if (cmd_l & 0xfff) { 2515 mlx5_core_err(dev, "invalid command queue address\n"); 2516 err = -ENOMEM; 2517 goto err_cmd_page; 2518 } 2519 2520 iowrite32be(cmd_h, &dev->iseg->cmdq_addr_h); 2521 iowrite32be(cmd_l, &dev->iseg->cmdq_addr_l_sz); 2522 2523 /* Make sure firmware sees the complete address before we proceed */ 2524 wmb(); 2525 2526 mlx5_core_dbg(dev, "descriptor at dma 0x%llx\n", (unsigned long long)(cmd->dma)); 2527 2528 cmd->mode = CMD_MODE_POLLING; 2529 cmd->allowed_opcode = CMD_ALLOWED_OPCODE_ALL; 2530 2531 create_msg_cache(dev); 2532 create_debugfs_files(dev); 2533 2534 return 0; 2535 2536 err_cmd_page: 2537 free_cmd_page(dev, cmd); 2538 err_free_pool: 2539 dma_pool_destroy(cmd->pool); 2540 err_destroy_xa: 2541 xa_destroy(&dev->cmd.vars.privileged_uids); 2542 return err; 2543 } 2544 2545 void mlx5_cmd_disable(struct mlx5_core_dev *dev) 2546 { 2547 struct mlx5_cmd *cmd = &dev->cmd; 2548 2549 flush_workqueue(cmd->wq); 2550 clean_debug_files(dev); 2551 destroy_msg_cache(dev); 2552 free_cmd_page(dev, cmd); 2553 dma_pool_destroy(cmd->pool); 2554 xa_destroy(&dev->cmd.vars.privileged_uids); 2555 } 2556 2557 void mlx5_cmd_set_state(struct mlx5_core_dev *dev, 2558 enum mlx5_cmdif_state cmdif_state) 2559 { 2560 dev->cmd.state = cmdif_state; 2561 } 2562 2563 int mlx5_cmd_add_privileged_uid(struct mlx5_core_dev *dev, u16 uid) 2564 { 2565 return xa_insert(&dev->cmd.vars.privileged_uids, uid, 2566 xa_mk_value(uid), GFP_KERNEL); 2567 } 2568 EXPORT_SYMBOL(mlx5_cmd_add_privileged_uid); 2569 2570 void mlx5_cmd_remove_privileged_uid(struct mlx5_core_dev *dev, u16 uid) 2571 { 2572 void *data = xa_erase(&dev->cmd.vars.privileged_uids, uid); 2573 2574 WARN(!data, "Privileged UID %u does not exist\n", uid); 2575 } 2576 EXPORT_SYMBOL(mlx5_cmd_remove_privileged_uid); 2577