1 /* 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. 4 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include <linux/sched.h> 36 #include <linux/slab.h> 37 #include <linux/export.h> 38 #include <linux/pci.h> 39 #include <linux/errno.h> 40 41 #include <linux/mlx4/cmd.h> 42 #include <linux/mlx4/device.h> 43 #include <linux/semaphore.h> 44 #include <rdma/ib_smi.h> 45 #include <linux/delay.h> 46 47 #include <asm/io.h> 48 49 #include "mlx4.h" 50 #include "fw.h" 51 52 #define CMD_POLL_TOKEN 0xffff 53 #define INBOX_MASK 0xffffffffffffff00ULL 54 55 #define CMD_CHAN_VER 1 56 #define CMD_CHAN_IF_REV 1 57 58 enum { 59 /* command completed successfully: */ 60 CMD_STAT_OK = 0x00, 61 /* Internal error (such as a bus error) occurred while processing command: */ 62 CMD_STAT_INTERNAL_ERR = 0x01, 63 /* Operation/command not supported or opcode modifier not supported: */ 64 CMD_STAT_BAD_OP = 0x02, 65 /* Parameter not supported or parameter out of range: */ 66 CMD_STAT_BAD_PARAM = 0x03, 67 /* System not enabled or bad system state: */ 68 CMD_STAT_BAD_SYS_STATE = 0x04, 69 /* Attempt to access reserved or unallocaterd resource: */ 70 CMD_STAT_BAD_RESOURCE = 0x05, 71 /* Requested resource is currently executing a command, or is otherwise busy: */ 72 CMD_STAT_RESOURCE_BUSY = 0x06, 73 /* Required capability exceeds device limits: */ 74 CMD_STAT_EXCEED_LIM = 0x08, 75 /* Resource is not in the appropriate state or ownership: */ 76 CMD_STAT_BAD_RES_STATE = 0x09, 77 /* Index out of range: */ 78 CMD_STAT_BAD_INDEX = 0x0a, 79 /* FW image corrupted: */ 80 CMD_STAT_BAD_NVMEM = 0x0b, 81 /* Error in ICM mapping (e.g. not enough auxiliary ICM pages to execute command): */ 82 CMD_STAT_ICM_ERROR = 0x0c, 83 /* Attempt to modify a QP/EE which is not in the presumed state: */ 84 CMD_STAT_BAD_QP_STATE = 0x10, 85 /* Bad segment parameters (Address/Size): */ 86 CMD_STAT_BAD_SEG_PARAM = 0x20, 87 /* Memory Region has Memory Windows bound to: */ 88 CMD_STAT_REG_BOUND = 0x21, 89 /* HCA local attached memory not present: */ 90 CMD_STAT_LAM_NOT_PRE = 0x22, 91 /* Bad management packet (silently discarded): */ 92 CMD_STAT_BAD_PKT = 0x30, 93 /* More outstanding CQEs in CQ than new CQ size: */ 94 CMD_STAT_BAD_SIZE = 0x40, 95 /* Multi Function device support required: */ 96 CMD_STAT_MULTI_FUNC_REQ = 0x50, 97 }; 98 99 enum { 100 HCR_IN_PARAM_OFFSET = 0x00, 101 HCR_IN_MODIFIER_OFFSET = 0x08, 102 HCR_OUT_PARAM_OFFSET = 0x0c, 103 HCR_TOKEN_OFFSET = 0x14, 104 HCR_STATUS_OFFSET = 0x18, 105 106 HCR_OPMOD_SHIFT = 12, 107 HCR_T_BIT = 21, 108 HCR_E_BIT = 22, 109 HCR_GO_BIT = 23 110 }; 111 112 enum { 113 GO_BIT_TIMEOUT_MSECS = 10000 114 }; 115 116 enum mlx4_vlan_transition { 117 MLX4_VLAN_TRANSITION_VST_VST = 0, 118 MLX4_VLAN_TRANSITION_VST_VGT = 1, 119 MLX4_VLAN_TRANSITION_VGT_VST = 2, 120 MLX4_VLAN_TRANSITION_VGT_VGT = 3, 121 }; 122 123 124 struct mlx4_cmd_context { 125 struct completion done; 126 int result; 127 int next; 128 u64 out_param; 129 u16 token; 130 u8 fw_status; 131 }; 132 133 static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave, 134 struct mlx4_vhcr_cmd *in_vhcr); 135 136 static int mlx4_status_to_errno(u8 status) 137 { 138 static const int trans_table[] = { 139 [CMD_STAT_INTERNAL_ERR] = -EIO, 140 [CMD_STAT_BAD_OP] = -EPERM, 141 [CMD_STAT_BAD_PARAM] = -EINVAL, 142 [CMD_STAT_BAD_SYS_STATE] = -ENXIO, 143 [CMD_STAT_BAD_RESOURCE] = -EBADF, 144 [CMD_STAT_RESOURCE_BUSY] = -EBUSY, 145 [CMD_STAT_EXCEED_LIM] = -ENOMEM, 146 [CMD_STAT_BAD_RES_STATE] = -EBADF, 147 [CMD_STAT_BAD_INDEX] = -EBADF, 148 [CMD_STAT_BAD_NVMEM] = -EFAULT, 149 [CMD_STAT_ICM_ERROR] = -ENFILE, 150 [CMD_STAT_BAD_QP_STATE] = -EINVAL, 151 [CMD_STAT_BAD_SEG_PARAM] = -EFAULT, 152 [CMD_STAT_REG_BOUND] = -EBUSY, 153 [CMD_STAT_LAM_NOT_PRE] = -EAGAIN, 154 [CMD_STAT_BAD_PKT] = -EINVAL, 155 [CMD_STAT_BAD_SIZE] = -ENOMEM, 156 [CMD_STAT_MULTI_FUNC_REQ] = -EACCES, 157 }; 158 159 if (status >= ARRAY_SIZE(trans_table) || 160 (status != CMD_STAT_OK && trans_table[status] == 0)) 161 return -EIO; 162 163 return trans_table[status]; 164 } 165 166 static u8 mlx4_errno_to_status(int errno) 167 { 168 switch (errno) { 169 case -EPERM: 170 return CMD_STAT_BAD_OP; 171 case -EINVAL: 172 return CMD_STAT_BAD_PARAM; 173 case -ENXIO: 174 return CMD_STAT_BAD_SYS_STATE; 175 case -EBUSY: 176 return CMD_STAT_RESOURCE_BUSY; 177 case -ENOMEM: 178 return CMD_STAT_EXCEED_LIM; 179 case -ENFILE: 180 return CMD_STAT_ICM_ERROR; 181 default: 182 return CMD_STAT_INTERNAL_ERR; 183 } 184 } 185 186 static int mlx4_internal_err_ret_value(struct mlx4_dev *dev, u16 op, 187 u8 op_modifier) 188 { 189 switch (op) { 190 case MLX4_CMD_UNMAP_ICM: 191 case MLX4_CMD_UNMAP_ICM_AUX: 192 case MLX4_CMD_UNMAP_FA: 193 case MLX4_CMD_2RST_QP: 194 case MLX4_CMD_HW2SW_EQ: 195 case MLX4_CMD_HW2SW_CQ: 196 case MLX4_CMD_HW2SW_SRQ: 197 case MLX4_CMD_HW2SW_MPT: 198 case MLX4_CMD_CLOSE_HCA: 199 case MLX4_QP_FLOW_STEERING_DETACH: 200 case MLX4_CMD_FREE_RES: 201 case MLX4_CMD_CLOSE_PORT: 202 return CMD_STAT_OK; 203 204 case MLX4_CMD_QP_ATTACH: 205 /* On Detach case return success */ 206 if (op_modifier == 0) 207 return CMD_STAT_OK; 208 return mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR); 209 210 default: 211 return mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR); 212 } 213 } 214 215 static int mlx4_closing_cmd_fatal_error(u16 op, u8 fw_status) 216 { 217 /* Any error during the closing commands below is considered fatal */ 218 if (op == MLX4_CMD_CLOSE_HCA || 219 op == MLX4_CMD_HW2SW_EQ || 220 op == MLX4_CMD_HW2SW_CQ || 221 op == MLX4_CMD_2RST_QP || 222 op == MLX4_CMD_HW2SW_SRQ || 223 op == MLX4_CMD_SYNC_TPT || 224 op == MLX4_CMD_UNMAP_ICM || 225 op == MLX4_CMD_UNMAP_ICM_AUX || 226 op == MLX4_CMD_UNMAP_FA) 227 return 1; 228 /* Error on MLX4_CMD_HW2SW_MPT is fatal except when fw status equals 229 * CMD_STAT_REG_BOUND. 230 * This status indicates that memory region has memory windows bound to it 231 * which may result from invalid user space usage and is not fatal. 232 */ 233 if (op == MLX4_CMD_HW2SW_MPT && fw_status != CMD_STAT_REG_BOUND) 234 return 1; 235 return 0; 236 } 237 238 static int mlx4_cmd_reset_flow(struct mlx4_dev *dev, u16 op, u8 op_modifier, 239 int err) 240 { 241 /* Only if reset flow is really active return code is based on 242 * command, otherwise current error code is returned. 243 */ 244 if (mlx4_internal_err_reset) { 245 mlx4_enter_error_state(dev->persist); 246 err = mlx4_internal_err_ret_value(dev, op, op_modifier); 247 } 248 249 return err; 250 } 251 252 static int comm_pending(struct mlx4_dev *dev) 253 { 254 struct mlx4_priv *priv = mlx4_priv(dev); 255 u32 status = readl(&priv->mfunc.comm->slave_read); 256 257 return (swab32(status) >> 31) != priv->cmd.comm_toggle; 258 } 259 260 static int mlx4_comm_cmd_post(struct mlx4_dev *dev, u8 cmd, u16 param) 261 { 262 struct mlx4_priv *priv = mlx4_priv(dev); 263 u32 val; 264 265 /* To avoid writing to unknown addresses after the device state was 266 * changed to internal error and the function was rest, 267 * check the INTERNAL_ERROR flag which is updated under 268 * device_state_mutex lock. 269 */ 270 mutex_lock(&dev->persist->device_state_mutex); 271 272 if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) { 273 mutex_unlock(&dev->persist->device_state_mutex); 274 return -EIO; 275 } 276 277 priv->cmd.comm_toggle ^= 1; 278 val = param | (cmd << 16) | (priv->cmd.comm_toggle << 31); 279 __raw_writel((__force u32) cpu_to_be32(val), 280 &priv->mfunc.comm->slave_write); 281 mmiowb(); 282 mutex_unlock(&dev->persist->device_state_mutex); 283 return 0; 284 } 285 286 static int mlx4_comm_cmd_poll(struct mlx4_dev *dev, u8 cmd, u16 param, 287 unsigned long timeout) 288 { 289 struct mlx4_priv *priv = mlx4_priv(dev); 290 unsigned long end; 291 int err = 0; 292 int ret_from_pending = 0; 293 294 /* First, verify that the master reports correct status */ 295 if (comm_pending(dev)) { 296 mlx4_warn(dev, "Communication channel is not idle - my toggle is %d (cmd:0x%x)\n", 297 priv->cmd.comm_toggle, cmd); 298 return -EAGAIN; 299 } 300 301 /* Write command */ 302 down(&priv->cmd.poll_sem); 303 if (mlx4_comm_cmd_post(dev, cmd, param)) { 304 /* Only in case the device state is INTERNAL_ERROR, 305 * mlx4_comm_cmd_post returns with an error 306 */ 307 err = mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR); 308 goto out; 309 } 310 311 end = msecs_to_jiffies(timeout) + jiffies; 312 while (comm_pending(dev) && time_before(jiffies, end)) 313 cond_resched(); 314 ret_from_pending = comm_pending(dev); 315 if (ret_from_pending) { 316 /* check if the slave is trying to boot in the middle of 317 * FLR process. The only non-zero result in the RESET command 318 * is MLX4_DELAY_RESET_SLAVE*/ 319 if ((MLX4_COMM_CMD_RESET == cmd)) { 320 err = MLX4_DELAY_RESET_SLAVE; 321 goto out; 322 } else { 323 mlx4_warn(dev, "Communication channel command 0x%x timed out\n", 324 cmd); 325 err = mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR); 326 } 327 } 328 329 if (err) 330 mlx4_enter_error_state(dev->persist); 331 out: 332 up(&priv->cmd.poll_sem); 333 return err; 334 } 335 336 static int mlx4_comm_cmd_wait(struct mlx4_dev *dev, u8 vhcr_cmd, 337 u16 param, u16 op, unsigned long timeout) 338 { 339 struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd; 340 struct mlx4_cmd_context *context; 341 unsigned long end; 342 int err = 0; 343 344 down(&cmd->event_sem); 345 346 spin_lock(&cmd->context_lock); 347 BUG_ON(cmd->free_head < 0); 348 context = &cmd->context[cmd->free_head]; 349 context->token += cmd->token_mask + 1; 350 cmd->free_head = context->next; 351 spin_unlock(&cmd->context_lock); 352 353 reinit_completion(&context->done); 354 355 if (mlx4_comm_cmd_post(dev, vhcr_cmd, param)) { 356 /* Only in case the device state is INTERNAL_ERROR, 357 * mlx4_comm_cmd_post returns with an error 358 */ 359 err = mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR); 360 goto out; 361 } 362 363 if (!wait_for_completion_timeout(&context->done, 364 msecs_to_jiffies(timeout))) { 365 mlx4_warn(dev, "communication channel command 0x%x (op=0x%x) timed out\n", 366 vhcr_cmd, op); 367 goto out_reset; 368 } 369 370 err = context->result; 371 if (err && context->fw_status != CMD_STAT_MULTI_FUNC_REQ) { 372 mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n", 373 vhcr_cmd, context->fw_status); 374 if (mlx4_closing_cmd_fatal_error(op, context->fw_status)) 375 goto out_reset; 376 } 377 378 /* wait for comm channel ready 379 * this is necessary for prevention the race 380 * when switching between event to polling mode 381 * Skipping this section in case the device is in FATAL_ERROR state, 382 * In this state, no commands are sent via the comm channel until 383 * the device has returned from reset. 384 */ 385 if (!(dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)) { 386 end = msecs_to_jiffies(timeout) + jiffies; 387 while (comm_pending(dev) && time_before(jiffies, end)) 388 cond_resched(); 389 } 390 goto out; 391 392 out_reset: 393 err = mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR); 394 mlx4_enter_error_state(dev->persist); 395 out: 396 spin_lock(&cmd->context_lock); 397 context->next = cmd->free_head; 398 cmd->free_head = context - cmd->context; 399 spin_unlock(&cmd->context_lock); 400 401 up(&cmd->event_sem); 402 return err; 403 } 404 405 int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param, 406 u16 op, unsigned long timeout) 407 { 408 if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) 409 return mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR); 410 411 if (mlx4_priv(dev)->cmd.use_events) 412 return mlx4_comm_cmd_wait(dev, cmd, param, op, timeout); 413 return mlx4_comm_cmd_poll(dev, cmd, param, timeout); 414 } 415 416 static int cmd_pending(struct mlx4_dev *dev) 417 { 418 u32 status; 419 420 if (pci_channel_offline(dev->persist->pdev)) 421 return -EIO; 422 423 status = readl(mlx4_priv(dev)->cmd.hcr + HCR_STATUS_OFFSET); 424 425 return (status & swab32(1 << HCR_GO_BIT)) || 426 (mlx4_priv(dev)->cmd.toggle == 427 !!(status & swab32(1 << HCR_T_BIT))); 428 } 429 430 static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param, 431 u32 in_modifier, u8 op_modifier, u16 op, u16 token, 432 int event) 433 { 434 struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd; 435 u32 __iomem *hcr = cmd->hcr; 436 int ret = -EIO; 437 unsigned long end; 438 439 mutex_lock(&dev->persist->device_state_mutex); 440 /* To avoid writing to unknown addresses after the device state was 441 * changed to internal error and the chip was reset, 442 * check the INTERNAL_ERROR flag which is updated under 443 * device_state_mutex lock. 444 */ 445 if (pci_channel_offline(dev->persist->pdev) || 446 (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)) { 447 /* 448 * Device is going through error recovery 449 * and cannot accept commands. 450 */ 451 goto out; 452 } 453 454 end = jiffies; 455 if (event) 456 end += msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS); 457 458 while (cmd_pending(dev)) { 459 if (pci_channel_offline(dev->persist->pdev)) { 460 /* 461 * Device is going through error recovery 462 * and cannot accept commands. 463 */ 464 goto out; 465 } 466 467 if (time_after_eq(jiffies, end)) { 468 mlx4_err(dev, "%s:cmd_pending failed\n", __func__); 469 goto out; 470 } 471 cond_resched(); 472 } 473 474 /* 475 * We use writel (instead of something like memcpy_toio) 476 * because writes of less than 32 bits to the HCR don't work 477 * (and some architectures such as ia64 implement memcpy_toio 478 * in terms of writeb). 479 */ 480 __raw_writel((__force u32) cpu_to_be32(in_param >> 32), hcr + 0); 481 __raw_writel((__force u32) cpu_to_be32(in_param & 0xfffffffful), hcr + 1); 482 __raw_writel((__force u32) cpu_to_be32(in_modifier), hcr + 2); 483 __raw_writel((__force u32) cpu_to_be32(out_param >> 32), hcr + 3); 484 __raw_writel((__force u32) cpu_to_be32(out_param & 0xfffffffful), hcr + 4); 485 __raw_writel((__force u32) cpu_to_be32(token << 16), hcr + 5); 486 487 /* __raw_writel may not order writes. */ 488 wmb(); 489 490 __raw_writel((__force u32) cpu_to_be32((1 << HCR_GO_BIT) | 491 (cmd->toggle << HCR_T_BIT) | 492 (event ? (1 << HCR_E_BIT) : 0) | 493 (op_modifier << HCR_OPMOD_SHIFT) | 494 op), hcr + 6); 495 496 /* 497 * Make sure that our HCR writes don't get mixed in with 498 * writes from another CPU starting a FW command. 499 */ 500 mmiowb(); 501 502 cmd->toggle = cmd->toggle ^ 1; 503 504 ret = 0; 505 506 out: 507 if (ret) 508 mlx4_warn(dev, "Could not post command 0x%x: ret=%d, in_param=0x%llx, in_mod=0x%x, op_mod=0x%x\n", 509 op, ret, in_param, in_modifier, op_modifier); 510 mutex_unlock(&dev->persist->device_state_mutex); 511 512 return ret; 513 } 514 515 static int mlx4_slave_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param, 516 int out_is_imm, u32 in_modifier, u8 op_modifier, 517 u16 op, unsigned long timeout) 518 { 519 struct mlx4_priv *priv = mlx4_priv(dev); 520 struct mlx4_vhcr_cmd *vhcr = priv->mfunc.vhcr; 521 int ret; 522 523 mutex_lock(&priv->cmd.slave_cmd_mutex); 524 525 vhcr->in_param = cpu_to_be64(in_param); 526 vhcr->out_param = out_param ? cpu_to_be64(*out_param) : 0; 527 vhcr->in_modifier = cpu_to_be32(in_modifier); 528 vhcr->opcode = cpu_to_be16((((u16) op_modifier) << 12) | (op & 0xfff)); 529 vhcr->token = cpu_to_be16(CMD_POLL_TOKEN); 530 vhcr->status = 0; 531 vhcr->flags = !!(priv->cmd.use_events) << 6; 532 533 if (mlx4_is_master(dev)) { 534 ret = mlx4_master_process_vhcr(dev, dev->caps.function, vhcr); 535 if (!ret) { 536 if (out_is_imm) { 537 if (out_param) 538 *out_param = 539 be64_to_cpu(vhcr->out_param); 540 else { 541 mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n", 542 op); 543 vhcr->status = CMD_STAT_BAD_PARAM; 544 } 545 } 546 ret = mlx4_status_to_errno(vhcr->status); 547 } 548 if (ret && 549 dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) 550 ret = mlx4_internal_err_ret_value(dev, op, op_modifier); 551 } else { 552 ret = mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_POST, 0, op, 553 MLX4_COMM_TIME + timeout); 554 if (!ret) { 555 if (out_is_imm) { 556 if (out_param) 557 *out_param = 558 be64_to_cpu(vhcr->out_param); 559 else { 560 mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n", 561 op); 562 vhcr->status = CMD_STAT_BAD_PARAM; 563 } 564 } 565 ret = mlx4_status_to_errno(vhcr->status); 566 } else { 567 if (dev->persist->state & 568 MLX4_DEVICE_STATE_INTERNAL_ERROR) 569 ret = mlx4_internal_err_ret_value(dev, op, 570 op_modifier); 571 else 572 mlx4_err(dev, "failed execution of VHCR_POST command opcode 0x%x\n", op); 573 } 574 } 575 576 mutex_unlock(&priv->cmd.slave_cmd_mutex); 577 return ret; 578 } 579 580 static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param, 581 int out_is_imm, u32 in_modifier, u8 op_modifier, 582 u16 op, unsigned long timeout) 583 { 584 struct mlx4_priv *priv = mlx4_priv(dev); 585 void __iomem *hcr = priv->cmd.hcr; 586 int err = 0; 587 unsigned long end; 588 u32 stat; 589 590 down(&priv->cmd.poll_sem); 591 592 if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) { 593 /* 594 * Device is going through error recovery 595 * and cannot accept commands. 596 */ 597 err = mlx4_internal_err_ret_value(dev, op, op_modifier); 598 goto out; 599 } 600 601 if (out_is_imm && !out_param) { 602 mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n", 603 op); 604 err = -EINVAL; 605 goto out; 606 } 607 608 err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0, 609 in_modifier, op_modifier, op, CMD_POLL_TOKEN, 0); 610 if (err) 611 goto out_reset; 612 613 end = msecs_to_jiffies(timeout) + jiffies; 614 while (cmd_pending(dev) && time_before(jiffies, end)) { 615 if (pci_channel_offline(dev->persist->pdev)) { 616 /* 617 * Device is going through error recovery 618 * and cannot accept commands. 619 */ 620 err = -EIO; 621 goto out_reset; 622 } 623 624 if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) { 625 err = mlx4_internal_err_ret_value(dev, op, op_modifier); 626 goto out; 627 } 628 629 cond_resched(); 630 } 631 632 if (cmd_pending(dev)) { 633 mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n", 634 op); 635 err = -EIO; 636 goto out_reset; 637 } 638 639 if (out_is_imm) 640 *out_param = 641 (u64) be32_to_cpu((__force __be32) 642 __raw_readl(hcr + HCR_OUT_PARAM_OFFSET)) << 32 | 643 (u64) be32_to_cpu((__force __be32) 644 __raw_readl(hcr + HCR_OUT_PARAM_OFFSET + 4)); 645 stat = be32_to_cpu((__force __be32) 646 __raw_readl(hcr + HCR_STATUS_OFFSET)) >> 24; 647 err = mlx4_status_to_errno(stat); 648 if (err) { 649 mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n", 650 op, stat); 651 if (mlx4_closing_cmd_fatal_error(op, stat)) 652 goto out_reset; 653 goto out; 654 } 655 656 out_reset: 657 if (err) 658 err = mlx4_cmd_reset_flow(dev, op, op_modifier, err); 659 out: 660 up(&priv->cmd.poll_sem); 661 return err; 662 } 663 664 void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param) 665 { 666 struct mlx4_priv *priv = mlx4_priv(dev); 667 struct mlx4_cmd_context *context = 668 &priv->cmd.context[token & priv->cmd.token_mask]; 669 670 /* previously timed out command completing at long last */ 671 if (token != context->token) 672 return; 673 674 context->fw_status = status; 675 context->result = mlx4_status_to_errno(status); 676 context->out_param = out_param; 677 678 complete(&context->done); 679 } 680 681 static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param, 682 int out_is_imm, u32 in_modifier, u8 op_modifier, 683 u16 op, unsigned long timeout) 684 { 685 struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd; 686 struct mlx4_cmd_context *context; 687 int err = 0; 688 689 down(&cmd->event_sem); 690 691 spin_lock(&cmd->context_lock); 692 BUG_ON(cmd->free_head < 0); 693 context = &cmd->context[cmd->free_head]; 694 context->token += cmd->token_mask + 1; 695 cmd->free_head = context->next; 696 spin_unlock(&cmd->context_lock); 697 698 if (out_is_imm && !out_param) { 699 mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n", 700 op); 701 err = -EINVAL; 702 goto out; 703 } 704 705 reinit_completion(&context->done); 706 707 err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0, 708 in_modifier, op_modifier, op, context->token, 1); 709 if (err) 710 goto out_reset; 711 712 if (!wait_for_completion_timeout(&context->done, 713 msecs_to_jiffies(timeout))) { 714 mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n", 715 op); 716 err = -EIO; 717 goto out_reset; 718 } 719 720 err = context->result; 721 if (err) { 722 /* Since we do not want to have this error message always 723 * displayed at driver start when there are ConnectX2 HCAs 724 * on the host, we deprecate the error message for this 725 * specific command/input_mod/opcode_mod/fw-status to be debug. 726 */ 727 if (op == MLX4_CMD_SET_PORT && 728 (in_modifier == 1 || in_modifier == 2) && 729 op_modifier == 0 && context->fw_status == CMD_STAT_BAD_SIZE) 730 mlx4_dbg(dev, "command 0x%x failed: fw status = 0x%x\n", 731 op, context->fw_status); 732 else 733 mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n", 734 op, context->fw_status); 735 if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) 736 err = mlx4_internal_err_ret_value(dev, op, op_modifier); 737 else if (mlx4_closing_cmd_fatal_error(op, context->fw_status)) 738 goto out_reset; 739 740 goto out; 741 } 742 743 if (out_is_imm) 744 *out_param = context->out_param; 745 746 out_reset: 747 if (err) 748 err = mlx4_cmd_reset_flow(dev, op, op_modifier, err); 749 out: 750 spin_lock(&cmd->context_lock); 751 context->next = cmd->free_head; 752 cmd->free_head = context - cmd->context; 753 spin_unlock(&cmd->context_lock); 754 755 up(&cmd->event_sem); 756 return err; 757 } 758 759 int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param, 760 int out_is_imm, u32 in_modifier, u8 op_modifier, 761 u16 op, unsigned long timeout, int native) 762 { 763 if (pci_channel_offline(dev->persist->pdev)) 764 return mlx4_cmd_reset_flow(dev, op, op_modifier, -EIO); 765 766 if (!mlx4_is_mfunc(dev) || (native && mlx4_is_master(dev))) { 767 if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) 768 return mlx4_internal_err_ret_value(dev, op, 769 op_modifier); 770 if (mlx4_priv(dev)->cmd.use_events) 771 return mlx4_cmd_wait(dev, in_param, out_param, 772 out_is_imm, in_modifier, 773 op_modifier, op, timeout); 774 else 775 return mlx4_cmd_poll(dev, in_param, out_param, 776 out_is_imm, in_modifier, 777 op_modifier, op, timeout); 778 } 779 return mlx4_slave_cmd(dev, in_param, out_param, out_is_imm, 780 in_modifier, op_modifier, op, timeout); 781 } 782 EXPORT_SYMBOL_GPL(__mlx4_cmd); 783 784 785 int mlx4_ARM_COMM_CHANNEL(struct mlx4_dev *dev) 786 { 787 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_ARM_COMM_CHANNEL, 788 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); 789 } 790 791 static int mlx4_ACCESS_MEM(struct mlx4_dev *dev, u64 master_addr, 792 int slave, u64 slave_addr, 793 int size, int is_read) 794 { 795 u64 in_param; 796 u64 out_param; 797 798 if ((slave_addr & 0xfff) | (master_addr & 0xfff) | 799 (slave & ~0x7f) | (size & 0xff)) { 800 mlx4_err(dev, "Bad access mem params - slave_addr:0x%llx master_addr:0x%llx slave_id:%d size:%d\n", 801 slave_addr, master_addr, slave, size); 802 return -EINVAL; 803 } 804 805 if (is_read) { 806 in_param = (u64) slave | slave_addr; 807 out_param = (u64) dev->caps.function | master_addr; 808 } else { 809 in_param = (u64) dev->caps.function | master_addr; 810 out_param = (u64) slave | slave_addr; 811 } 812 813 return mlx4_cmd_imm(dev, in_param, &out_param, size, 0, 814 MLX4_CMD_ACCESS_MEM, 815 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 816 } 817 818 static int query_pkey_block(struct mlx4_dev *dev, u8 port, u16 index, u16 *pkey, 819 struct mlx4_cmd_mailbox *inbox, 820 struct mlx4_cmd_mailbox *outbox) 821 { 822 struct ib_smp *in_mad = (struct ib_smp *)(inbox->buf); 823 struct ib_smp *out_mad = (struct ib_smp *)(outbox->buf); 824 int err; 825 int i; 826 827 if (index & 0x1f) 828 return -EINVAL; 829 830 in_mad->attr_mod = cpu_to_be32(index / 32); 831 832 err = mlx4_cmd_box(dev, inbox->dma, outbox->dma, port, 3, 833 MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C, 834 MLX4_CMD_NATIVE); 835 if (err) 836 return err; 837 838 for (i = 0; i < 32; ++i) 839 pkey[i] = be16_to_cpu(((__be16 *) out_mad->data)[i]); 840 841 return err; 842 } 843 844 static int get_full_pkey_table(struct mlx4_dev *dev, u8 port, u16 *table, 845 struct mlx4_cmd_mailbox *inbox, 846 struct mlx4_cmd_mailbox *outbox) 847 { 848 int i; 849 int err; 850 851 for (i = 0; i < dev->caps.pkey_table_len[port]; i += 32) { 852 err = query_pkey_block(dev, port, i, table + i, inbox, outbox); 853 if (err) 854 return err; 855 } 856 857 return 0; 858 } 859 #define PORT_CAPABILITY_LOCATION_IN_SMP 20 860 #define PORT_STATE_OFFSET 32 861 862 static enum ib_port_state vf_port_state(struct mlx4_dev *dev, int port, int vf) 863 { 864 if (mlx4_get_slave_port_state(dev, vf, port) == SLAVE_PORT_UP) 865 return IB_PORT_ACTIVE; 866 else 867 return IB_PORT_DOWN; 868 } 869 870 static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave, 871 struct mlx4_vhcr *vhcr, 872 struct mlx4_cmd_mailbox *inbox, 873 struct mlx4_cmd_mailbox *outbox, 874 struct mlx4_cmd_info *cmd) 875 { 876 struct ib_smp *smp = inbox->buf; 877 u32 index; 878 u8 port; 879 u8 opcode_modifier; 880 u16 *table; 881 int err; 882 int vidx, pidx; 883 int network_view; 884 struct mlx4_priv *priv = mlx4_priv(dev); 885 struct ib_smp *outsmp = outbox->buf; 886 __be16 *outtab = (__be16 *)(outsmp->data); 887 __be32 slave_cap_mask; 888 __be64 slave_node_guid; 889 890 port = vhcr->in_modifier; 891 892 /* network-view bit is for driver use only, and should not be passed to FW */ 893 opcode_modifier = vhcr->op_modifier & ~0x8; /* clear netw view bit */ 894 network_view = !!(vhcr->op_modifier & 0x8); 895 896 if (smp->base_version == 1 && 897 smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED && 898 smp->class_version == 1) { 899 /* host view is paravirtualized */ 900 if (!network_view && smp->method == IB_MGMT_METHOD_GET) { 901 if (smp->attr_id == IB_SMP_ATTR_PKEY_TABLE) { 902 index = be32_to_cpu(smp->attr_mod); 903 if (port < 1 || port > dev->caps.num_ports) 904 return -EINVAL; 905 table = kcalloc((dev->caps.pkey_table_len[port] / 32) + 1, 906 sizeof(*table) * 32, GFP_KERNEL); 907 908 if (!table) 909 return -ENOMEM; 910 /* need to get the full pkey table because the paravirtualized 911 * pkeys may be scattered among several pkey blocks. 912 */ 913 err = get_full_pkey_table(dev, port, table, inbox, outbox); 914 if (!err) { 915 for (vidx = index * 32; vidx < (index + 1) * 32; ++vidx) { 916 pidx = priv->virt2phys_pkey[slave][port - 1][vidx]; 917 outtab[vidx % 32] = cpu_to_be16(table[pidx]); 918 } 919 } 920 kfree(table); 921 return err; 922 } 923 if (smp->attr_id == IB_SMP_ATTR_PORT_INFO) { 924 /*get the slave specific caps:*/ 925 /*do the command */ 926 err = mlx4_cmd_box(dev, inbox->dma, outbox->dma, 927 vhcr->in_modifier, opcode_modifier, 928 vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); 929 /* modify the response for slaves */ 930 if (!err && slave != mlx4_master_func_num(dev)) { 931 u8 *state = outsmp->data + PORT_STATE_OFFSET; 932 933 *state = (*state & 0xf0) | vf_port_state(dev, port, slave); 934 slave_cap_mask = priv->mfunc.master.slave_state[slave].ib_cap_mask[port]; 935 memcpy(outsmp->data + PORT_CAPABILITY_LOCATION_IN_SMP, &slave_cap_mask, 4); 936 } 937 return err; 938 } 939 if (smp->attr_id == IB_SMP_ATTR_GUID_INFO) { 940 /* compute slave's gid block */ 941 smp->attr_mod = cpu_to_be32(slave / 8); 942 /* execute cmd */ 943 err = mlx4_cmd_box(dev, inbox->dma, outbox->dma, 944 vhcr->in_modifier, opcode_modifier, 945 vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); 946 if (!err) { 947 /* if needed, move slave gid to index 0 */ 948 if (slave % 8) 949 memcpy(outsmp->data, 950 outsmp->data + (slave % 8) * 8, 8); 951 /* delete all other gids */ 952 memset(outsmp->data + 8, 0, 56); 953 } 954 return err; 955 } 956 if (smp->attr_id == IB_SMP_ATTR_NODE_INFO) { 957 err = mlx4_cmd_box(dev, inbox->dma, outbox->dma, 958 vhcr->in_modifier, opcode_modifier, 959 vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); 960 if (!err) { 961 slave_node_guid = mlx4_get_slave_node_guid(dev, slave); 962 memcpy(outsmp->data + 12, &slave_node_guid, 8); 963 } 964 return err; 965 } 966 } 967 } 968 969 /* Non-privileged VFs are only allowed "host" view LID-routed 'Get' MADs. 970 * These are the MADs used by ib verbs (such as ib_query_gids). 971 */ 972 if (slave != mlx4_master_func_num(dev) && 973 !mlx4_vf_smi_enabled(dev, slave, port)) { 974 if (!(smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED && 975 smp->method == IB_MGMT_METHOD_GET) || network_view) { 976 mlx4_err(dev, "Unprivileged slave %d is trying to execute a Subnet MGMT MAD, class 0x%x, method 0x%x, view=%s for attr 0x%x. Rejecting\n", 977 slave, smp->method, smp->mgmt_class, 978 network_view ? "Network" : "Host", 979 be16_to_cpu(smp->attr_id)); 980 return -EPERM; 981 } 982 } 983 984 return mlx4_cmd_box(dev, inbox->dma, outbox->dma, 985 vhcr->in_modifier, opcode_modifier, 986 vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); 987 } 988 989 static int mlx4_CMD_EPERM_wrapper(struct mlx4_dev *dev, int slave, 990 struct mlx4_vhcr *vhcr, 991 struct mlx4_cmd_mailbox *inbox, 992 struct mlx4_cmd_mailbox *outbox, 993 struct mlx4_cmd_info *cmd) 994 { 995 return -EPERM; 996 } 997 998 int mlx4_DMA_wrapper(struct mlx4_dev *dev, int slave, 999 struct mlx4_vhcr *vhcr, 1000 struct mlx4_cmd_mailbox *inbox, 1001 struct mlx4_cmd_mailbox *outbox, 1002 struct mlx4_cmd_info *cmd) 1003 { 1004 u64 in_param; 1005 u64 out_param; 1006 int err; 1007 1008 in_param = cmd->has_inbox ? (u64) inbox->dma : vhcr->in_param; 1009 out_param = cmd->has_outbox ? (u64) outbox->dma : vhcr->out_param; 1010 if (cmd->encode_slave_id) { 1011 in_param &= 0xffffffffffffff00ll; 1012 in_param |= slave; 1013 } 1014 1015 err = __mlx4_cmd(dev, in_param, &out_param, cmd->out_is_imm, 1016 vhcr->in_modifier, vhcr->op_modifier, vhcr->op, 1017 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 1018 1019 if (cmd->out_is_imm) 1020 vhcr->out_param = out_param; 1021 1022 return err; 1023 } 1024 1025 static struct mlx4_cmd_info cmd_info[] = { 1026 { 1027 .opcode = MLX4_CMD_QUERY_FW, 1028 .has_inbox = false, 1029 .has_outbox = true, 1030 .out_is_imm = false, 1031 .encode_slave_id = false, 1032 .verify = NULL, 1033 .wrapper = mlx4_QUERY_FW_wrapper 1034 }, 1035 { 1036 .opcode = MLX4_CMD_QUERY_HCA, 1037 .has_inbox = false, 1038 .has_outbox = true, 1039 .out_is_imm = false, 1040 .encode_slave_id = false, 1041 .verify = NULL, 1042 .wrapper = NULL 1043 }, 1044 { 1045 .opcode = MLX4_CMD_QUERY_DEV_CAP, 1046 .has_inbox = false, 1047 .has_outbox = true, 1048 .out_is_imm = false, 1049 .encode_slave_id = false, 1050 .verify = NULL, 1051 .wrapper = mlx4_QUERY_DEV_CAP_wrapper 1052 }, 1053 { 1054 .opcode = MLX4_CMD_QUERY_FUNC_CAP, 1055 .has_inbox = false, 1056 .has_outbox = true, 1057 .out_is_imm = false, 1058 .encode_slave_id = false, 1059 .verify = NULL, 1060 .wrapper = mlx4_QUERY_FUNC_CAP_wrapper 1061 }, 1062 { 1063 .opcode = MLX4_CMD_QUERY_ADAPTER, 1064 .has_inbox = false, 1065 .has_outbox = true, 1066 .out_is_imm = false, 1067 .encode_slave_id = false, 1068 .verify = NULL, 1069 .wrapper = NULL 1070 }, 1071 { 1072 .opcode = MLX4_CMD_INIT_PORT, 1073 .has_inbox = false, 1074 .has_outbox = false, 1075 .out_is_imm = false, 1076 .encode_slave_id = false, 1077 .verify = NULL, 1078 .wrapper = mlx4_INIT_PORT_wrapper 1079 }, 1080 { 1081 .opcode = MLX4_CMD_CLOSE_PORT, 1082 .has_inbox = false, 1083 .has_outbox = false, 1084 .out_is_imm = false, 1085 .encode_slave_id = false, 1086 .verify = NULL, 1087 .wrapper = mlx4_CLOSE_PORT_wrapper 1088 }, 1089 { 1090 .opcode = MLX4_CMD_QUERY_PORT, 1091 .has_inbox = false, 1092 .has_outbox = true, 1093 .out_is_imm = false, 1094 .encode_slave_id = false, 1095 .verify = NULL, 1096 .wrapper = mlx4_QUERY_PORT_wrapper 1097 }, 1098 { 1099 .opcode = MLX4_CMD_SET_PORT, 1100 .has_inbox = true, 1101 .has_outbox = false, 1102 .out_is_imm = false, 1103 .encode_slave_id = false, 1104 .verify = NULL, 1105 .wrapper = mlx4_SET_PORT_wrapper 1106 }, 1107 { 1108 .opcode = MLX4_CMD_MAP_EQ, 1109 .has_inbox = false, 1110 .has_outbox = false, 1111 .out_is_imm = false, 1112 .encode_slave_id = false, 1113 .verify = NULL, 1114 .wrapper = mlx4_MAP_EQ_wrapper 1115 }, 1116 { 1117 .opcode = MLX4_CMD_SW2HW_EQ, 1118 .has_inbox = true, 1119 .has_outbox = false, 1120 .out_is_imm = false, 1121 .encode_slave_id = true, 1122 .verify = NULL, 1123 .wrapper = mlx4_SW2HW_EQ_wrapper 1124 }, 1125 { 1126 .opcode = MLX4_CMD_HW_HEALTH_CHECK, 1127 .has_inbox = false, 1128 .has_outbox = false, 1129 .out_is_imm = false, 1130 .encode_slave_id = false, 1131 .verify = NULL, 1132 .wrapper = NULL 1133 }, 1134 { 1135 .opcode = MLX4_CMD_NOP, 1136 .has_inbox = false, 1137 .has_outbox = false, 1138 .out_is_imm = false, 1139 .encode_slave_id = false, 1140 .verify = NULL, 1141 .wrapper = NULL 1142 }, 1143 { 1144 .opcode = MLX4_CMD_CONFIG_DEV, 1145 .has_inbox = false, 1146 .has_outbox = true, 1147 .out_is_imm = false, 1148 .encode_slave_id = false, 1149 .verify = NULL, 1150 .wrapper = mlx4_CONFIG_DEV_wrapper 1151 }, 1152 { 1153 .opcode = MLX4_CMD_ALLOC_RES, 1154 .has_inbox = false, 1155 .has_outbox = false, 1156 .out_is_imm = true, 1157 .encode_slave_id = false, 1158 .verify = NULL, 1159 .wrapper = mlx4_ALLOC_RES_wrapper 1160 }, 1161 { 1162 .opcode = MLX4_CMD_FREE_RES, 1163 .has_inbox = false, 1164 .has_outbox = false, 1165 .out_is_imm = false, 1166 .encode_slave_id = false, 1167 .verify = NULL, 1168 .wrapper = mlx4_FREE_RES_wrapper 1169 }, 1170 { 1171 .opcode = MLX4_CMD_SW2HW_MPT, 1172 .has_inbox = true, 1173 .has_outbox = false, 1174 .out_is_imm = false, 1175 .encode_slave_id = true, 1176 .verify = NULL, 1177 .wrapper = mlx4_SW2HW_MPT_wrapper 1178 }, 1179 { 1180 .opcode = MLX4_CMD_QUERY_MPT, 1181 .has_inbox = false, 1182 .has_outbox = true, 1183 .out_is_imm = false, 1184 .encode_slave_id = false, 1185 .verify = NULL, 1186 .wrapper = mlx4_QUERY_MPT_wrapper 1187 }, 1188 { 1189 .opcode = MLX4_CMD_HW2SW_MPT, 1190 .has_inbox = false, 1191 .has_outbox = false, 1192 .out_is_imm = false, 1193 .encode_slave_id = false, 1194 .verify = NULL, 1195 .wrapper = mlx4_HW2SW_MPT_wrapper 1196 }, 1197 { 1198 .opcode = MLX4_CMD_READ_MTT, 1199 .has_inbox = false, 1200 .has_outbox = true, 1201 .out_is_imm = false, 1202 .encode_slave_id = false, 1203 .verify = NULL, 1204 .wrapper = NULL 1205 }, 1206 { 1207 .opcode = MLX4_CMD_WRITE_MTT, 1208 .has_inbox = true, 1209 .has_outbox = false, 1210 .out_is_imm = false, 1211 .encode_slave_id = false, 1212 .verify = NULL, 1213 .wrapper = mlx4_WRITE_MTT_wrapper 1214 }, 1215 { 1216 .opcode = MLX4_CMD_SYNC_TPT, 1217 .has_inbox = true, 1218 .has_outbox = false, 1219 .out_is_imm = false, 1220 .encode_slave_id = false, 1221 .verify = NULL, 1222 .wrapper = NULL 1223 }, 1224 { 1225 .opcode = MLX4_CMD_HW2SW_EQ, 1226 .has_inbox = false, 1227 .has_outbox = false, 1228 .out_is_imm = false, 1229 .encode_slave_id = true, 1230 .verify = NULL, 1231 .wrapper = mlx4_HW2SW_EQ_wrapper 1232 }, 1233 { 1234 .opcode = MLX4_CMD_QUERY_EQ, 1235 .has_inbox = false, 1236 .has_outbox = true, 1237 .out_is_imm = false, 1238 .encode_slave_id = true, 1239 .verify = NULL, 1240 .wrapper = mlx4_QUERY_EQ_wrapper 1241 }, 1242 { 1243 .opcode = MLX4_CMD_SW2HW_CQ, 1244 .has_inbox = true, 1245 .has_outbox = false, 1246 .out_is_imm = false, 1247 .encode_slave_id = true, 1248 .verify = NULL, 1249 .wrapper = mlx4_SW2HW_CQ_wrapper 1250 }, 1251 { 1252 .opcode = MLX4_CMD_HW2SW_CQ, 1253 .has_inbox = false, 1254 .has_outbox = false, 1255 .out_is_imm = false, 1256 .encode_slave_id = false, 1257 .verify = NULL, 1258 .wrapper = mlx4_HW2SW_CQ_wrapper 1259 }, 1260 { 1261 .opcode = MLX4_CMD_QUERY_CQ, 1262 .has_inbox = false, 1263 .has_outbox = true, 1264 .out_is_imm = false, 1265 .encode_slave_id = false, 1266 .verify = NULL, 1267 .wrapper = mlx4_QUERY_CQ_wrapper 1268 }, 1269 { 1270 .opcode = MLX4_CMD_MODIFY_CQ, 1271 .has_inbox = true, 1272 .has_outbox = false, 1273 .out_is_imm = true, 1274 .encode_slave_id = false, 1275 .verify = NULL, 1276 .wrapper = mlx4_MODIFY_CQ_wrapper 1277 }, 1278 { 1279 .opcode = MLX4_CMD_SW2HW_SRQ, 1280 .has_inbox = true, 1281 .has_outbox = false, 1282 .out_is_imm = false, 1283 .encode_slave_id = true, 1284 .verify = NULL, 1285 .wrapper = mlx4_SW2HW_SRQ_wrapper 1286 }, 1287 { 1288 .opcode = MLX4_CMD_HW2SW_SRQ, 1289 .has_inbox = false, 1290 .has_outbox = false, 1291 .out_is_imm = false, 1292 .encode_slave_id = false, 1293 .verify = NULL, 1294 .wrapper = mlx4_HW2SW_SRQ_wrapper 1295 }, 1296 { 1297 .opcode = MLX4_CMD_QUERY_SRQ, 1298 .has_inbox = false, 1299 .has_outbox = true, 1300 .out_is_imm = false, 1301 .encode_slave_id = false, 1302 .verify = NULL, 1303 .wrapper = mlx4_QUERY_SRQ_wrapper 1304 }, 1305 { 1306 .opcode = MLX4_CMD_ARM_SRQ, 1307 .has_inbox = false, 1308 .has_outbox = false, 1309 .out_is_imm = false, 1310 .encode_slave_id = false, 1311 .verify = NULL, 1312 .wrapper = mlx4_ARM_SRQ_wrapper 1313 }, 1314 { 1315 .opcode = MLX4_CMD_RST2INIT_QP, 1316 .has_inbox = true, 1317 .has_outbox = false, 1318 .out_is_imm = false, 1319 .encode_slave_id = true, 1320 .verify = NULL, 1321 .wrapper = mlx4_RST2INIT_QP_wrapper 1322 }, 1323 { 1324 .opcode = MLX4_CMD_INIT2INIT_QP, 1325 .has_inbox = true, 1326 .has_outbox = false, 1327 .out_is_imm = false, 1328 .encode_slave_id = false, 1329 .verify = NULL, 1330 .wrapper = mlx4_INIT2INIT_QP_wrapper 1331 }, 1332 { 1333 .opcode = MLX4_CMD_INIT2RTR_QP, 1334 .has_inbox = true, 1335 .has_outbox = false, 1336 .out_is_imm = false, 1337 .encode_slave_id = false, 1338 .verify = NULL, 1339 .wrapper = mlx4_INIT2RTR_QP_wrapper 1340 }, 1341 { 1342 .opcode = MLX4_CMD_RTR2RTS_QP, 1343 .has_inbox = true, 1344 .has_outbox = false, 1345 .out_is_imm = false, 1346 .encode_slave_id = false, 1347 .verify = NULL, 1348 .wrapper = mlx4_RTR2RTS_QP_wrapper 1349 }, 1350 { 1351 .opcode = MLX4_CMD_RTS2RTS_QP, 1352 .has_inbox = true, 1353 .has_outbox = false, 1354 .out_is_imm = false, 1355 .encode_slave_id = false, 1356 .verify = NULL, 1357 .wrapper = mlx4_RTS2RTS_QP_wrapper 1358 }, 1359 { 1360 .opcode = MLX4_CMD_SQERR2RTS_QP, 1361 .has_inbox = true, 1362 .has_outbox = false, 1363 .out_is_imm = false, 1364 .encode_slave_id = false, 1365 .verify = NULL, 1366 .wrapper = mlx4_SQERR2RTS_QP_wrapper 1367 }, 1368 { 1369 .opcode = MLX4_CMD_2ERR_QP, 1370 .has_inbox = false, 1371 .has_outbox = false, 1372 .out_is_imm = false, 1373 .encode_slave_id = false, 1374 .verify = NULL, 1375 .wrapper = mlx4_GEN_QP_wrapper 1376 }, 1377 { 1378 .opcode = MLX4_CMD_RTS2SQD_QP, 1379 .has_inbox = false, 1380 .has_outbox = false, 1381 .out_is_imm = false, 1382 .encode_slave_id = false, 1383 .verify = NULL, 1384 .wrapper = mlx4_GEN_QP_wrapper 1385 }, 1386 { 1387 .opcode = MLX4_CMD_SQD2SQD_QP, 1388 .has_inbox = true, 1389 .has_outbox = false, 1390 .out_is_imm = false, 1391 .encode_slave_id = false, 1392 .verify = NULL, 1393 .wrapper = mlx4_SQD2SQD_QP_wrapper 1394 }, 1395 { 1396 .opcode = MLX4_CMD_SQD2RTS_QP, 1397 .has_inbox = true, 1398 .has_outbox = false, 1399 .out_is_imm = false, 1400 .encode_slave_id = false, 1401 .verify = NULL, 1402 .wrapper = mlx4_SQD2RTS_QP_wrapper 1403 }, 1404 { 1405 .opcode = MLX4_CMD_2RST_QP, 1406 .has_inbox = false, 1407 .has_outbox = false, 1408 .out_is_imm = false, 1409 .encode_slave_id = false, 1410 .verify = NULL, 1411 .wrapper = mlx4_2RST_QP_wrapper 1412 }, 1413 { 1414 .opcode = MLX4_CMD_QUERY_QP, 1415 .has_inbox = false, 1416 .has_outbox = true, 1417 .out_is_imm = false, 1418 .encode_slave_id = false, 1419 .verify = NULL, 1420 .wrapper = mlx4_GEN_QP_wrapper 1421 }, 1422 { 1423 .opcode = MLX4_CMD_SUSPEND_QP, 1424 .has_inbox = false, 1425 .has_outbox = false, 1426 .out_is_imm = false, 1427 .encode_slave_id = false, 1428 .verify = NULL, 1429 .wrapper = mlx4_GEN_QP_wrapper 1430 }, 1431 { 1432 .opcode = MLX4_CMD_UNSUSPEND_QP, 1433 .has_inbox = false, 1434 .has_outbox = false, 1435 .out_is_imm = false, 1436 .encode_slave_id = false, 1437 .verify = NULL, 1438 .wrapper = mlx4_GEN_QP_wrapper 1439 }, 1440 { 1441 .opcode = MLX4_CMD_UPDATE_QP, 1442 .has_inbox = true, 1443 .has_outbox = false, 1444 .out_is_imm = false, 1445 .encode_slave_id = false, 1446 .verify = NULL, 1447 .wrapper = mlx4_UPDATE_QP_wrapper 1448 }, 1449 { 1450 .opcode = MLX4_CMD_GET_OP_REQ, 1451 .has_inbox = false, 1452 .has_outbox = false, 1453 .out_is_imm = false, 1454 .encode_slave_id = false, 1455 .verify = NULL, 1456 .wrapper = mlx4_CMD_EPERM_wrapper, 1457 }, 1458 { 1459 .opcode = MLX4_CMD_CONF_SPECIAL_QP, 1460 .has_inbox = false, 1461 .has_outbox = false, 1462 .out_is_imm = false, 1463 .encode_slave_id = false, 1464 .verify = NULL, /* XXX verify: only demux can do this */ 1465 .wrapper = NULL 1466 }, 1467 { 1468 .opcode = MLX4_CMD_MAD_IFC, 1469 .has_inbox = true, 1470 .has_outbox = true, 1471 .out_is_imm = false, 1472 .encode_slave_id = false, 1473 .verify = NULL, 1474 .wrapper = mlx4_MAD_IFC_wrapper 1475 }, 1476 { 1477 .opcode = MLX4_CMD_MAD_DEMUX, 1478 .has_inbox = false, 1479 .has_outbox = false, 1480 .out_is_imm = false, 1481 .encode_slave_id = false, 1482 .verify = NULL, 1483 .wrapper = mlx4_CMD_EPERM_wrapper 1484 }, 1485 { 1486 .opcode = MLX4_CMD_QUERY_IF_STAT, 1487 .has_inbox = false, 1488 .has_outbox = true, 1489 .out_is_imm = false, 1490 .encode_slave_id = false, 1491 .verify = NULL, 1492 .wrapper = mlx4_QUERY_IF_STAT_wrapper 1493 }, 1494 { 1495 .opcode = MLX4_CMD_ACCESS_REG, 1496 .has_inbox = true, 1497 .has_outbox = true, 1498 .out_is_imm = false, 1499 .encode_slave_id = false, 1500 .verify = NULL, 1501 .wrapper = mlx4_ACCESS_REG_wrapper, 1502 }, 1503 /* Native multicast commands are not available for guests */ 1504 { 1505 .opcode = MLX4_CMD_QP_ATTACH, 1506 .has_inbox = true, 1507 .has_outbox = false, 1508 .out_is_imm = false, 1509 .encode_slave_id = false, 1510 .verify = NULL, 1511 .wrapper = mlx4_QP_ATTACH_wrapper 1512 }, 1513 { 1514 .opcode = MLX4_CMD_PROMISC, 1515 .has_inbox = false, 1516 .has_outbox = false, 1517 .out_is_imm = false, 1518 .encode_slave_id = false, 1519 .verify = NULL, 1520 .wrapper = mlx4_PROMISC_wrapper 1521 }, 1522 /* Ethernet specific commands */ 1523 { 1524 .opcode = MLX4_CMD_SET_VLAN_FLTR, 1525 .has_inbox = true, 1526 .has_outbox = false, 1527 .out_is_imm = false, 1528 .encode_slave_id = false, 1529 .verify = NULL, 1530 .wrapper = mlx4_SET_VLAN_FLTR_wrapper 1531 }, 1532 { 1533 .opcode = MLX4_CMD_SET_MCAST_FLTR, 1534 .has_inbox = false, 1535 .has_outbox = false, 1536 .out_is_imm = false, 1537 .encode_slave_id = false, 1538 .verify = NULL, 1539 .wrapper = mlx4_SET_MCAST_FLTR_wrapper 1540 }, 1541 { 1542 .opcode = MLX4_CMD_DUMP_ETH_STATS, 1543 .has_inbox = false, 1544 .has_outbox = true, 1545 .out_is_imm = false, 1546 .encode_slave_id = false, 1547 .verify = NULL, 1548 .wrapper = mlx4_DUMP_ETH_STATS_wrapper 1549 }, 1550 { 1551 .opcode = MLX4_CMD_INFORM_FLR_DONE, 1552 .has_inbox = false, 1553 .has_outbox = false, 1554 .out_is_imm = false, 1555 .encode_slave_id = false, 1556 .verify = NULL, 1557 .wrapper = NULL 1558 }, 1559 /* flow steering commands */ 1560 { 1561 .opcode = MLX4_QP_FLOW_STEERING_ATTACH, 1562 .has_inbox = true, 1563 .has_outbox = false, 1564 .out_is_imm = true, 1565 .encode_slave_id = false, 1566 .verify = NULL, 1567 .wrapper = mlx4_QP_FLOW_STEERING_ATTACH_wrapper 1568 }, 1569 { 1570 .opcode = MLX4_QP_FLOW_STEERING_DETACH, 1571 .has_inbox = false, 1572 .has_outbox = false, 1573 .out_is_imm = false, 1574 .encode_slave_id = false, 1575 .verify = NULL, 1576 .wrapper = mlx4_QP_FLOW_STEERING_DETACH_wrapper 1577 }, 1578 { 1579 .opcode = MLX4_FLOW_STEERING_IB_UC_QP_RANGE, 1580 .has_inbox = false, 1581 .has_outbox = false, 1582 .out_is_imm = false, 1583 .encode_slave_id = false, 1584 .verify = NULL, 1585 .wrapper = mlx4_CMD_EPERM_wrapper 1586 }, 1587 { 1588 .opcode = MLX4_CMD_VIRT_PORT_MAP, 1589 .has_inbox = false, 1590 .has_outbox = false, 1591 .out_is_imm = false, 1592 .encode_slave_id = false, 1593 .verify = NULL, 1594 .wrapper = mlx4_CMD_EPERM_wrapper 1595 }, 1596 }; 1597 1598 static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave, 1599 struct mlx4_vhcr_cmd *in_vhcr) 1600 { 1601 struct mlx4_priv *priv = mlx4_priv(dev); 1602 struct mlx4_cmd_info *cmd = NULL; 1603 struct mlx4_vhcr_cmd *vhcr_cmd = in_vhcr ? in_vhcr : priv->mfunc.vhcr; 1604 struct mlx4_vhcr *vhcr; 1605 struct mlx4_cmd_mailbox *inbox = NULL; 1606 struct mlx4_cmd_mailbox *outbox = NULL; 1607 u64 in_param; 1608 u64 out_param; 1609 int ret = 0; 1610 int i; 1611 int err = 0; 1612 1613 /* Create sw representation of Virtual HCR */ 1614 vhcr = kzalloc(sizeof(struct mlx4_vhcr), GFP_KERNEL); 1615 if (!vhcr) 1616 return -ENOMEM; 1617 1618 /* DMA in the vHCR */ 1619 if (!in_vhcr) { 1620 ret = mlx4_ACCESS_MEM(dev, priv->mfunc.vhcr_dma, slave, 1621 priv->mfunc.master.slave_state[slave].vhcr_dma, 1622 ALIGN(sizeof(struct mlx4_vhcr_cmd), 1623 MLX4_ACCESS_MEM_ALIGN), 1); 1624 if (ret) { 1625 if (!(dev->persist->state & 1626 MLX4_DEVICE_STATE_INTERNAL_ERROR)) 1627 mlx4_err(dev, "%s: Failed reading vhcr ret: 0x%x\n", 1628 __func__, ret); 1629 kfree(vhcr); 1630 return ret; 1631 } 1632 } 1633 1634 /* Fill SW VHCR fields */ 1635 vhcr->in_param = be64_to_cpu(vhcr_cmd->in_param); 1636 vhcr->out_param = be64_to_cpu(vhcr_cmd->out_param); 1637 vhcr->in_modifier = be32_to_cpu(vhcr_cmd->in_modifier); 1638 vhcr->token = be16_to_cpu(vhcr_cmd->token); 1639 vhcr->op = be16_to_cpu(vhcr_cmd->opcode) & 0xfff; 1640 vhcr->op_modifier = (u8) (be16_to_cpu(vhcr_cmd->opcode) >> 12); 1641 vhcr->e_bit = vhcr_cmd->flags & (1 << 6); 1642 1643 /* Lookup command */ 1644 for (i = 0; i < ARRAY_SIZE(cmd_info); ++i) { 1645 if (vhcr->op == cmd_info[i].opcode) { 1646 cmd = &cmd_info[i]; 1647 break; 1648 } 1649 } 1650 if (!cmd) { 1651 mlx4_err(dev, "Unknown command:0x%x accepted from slave:%d\n", 1652 vhcr->op, slave); 1653 vhcr_cmd->status = CMD_STAT_BAD_PARAM; 1654 goto out_status; 1655 } 1656 1657 /* Read inbox */ 1658 if (cmd->has_inbox) { 1659 vhcr->in_param &= INBOX_MASK; 1660 inbox = mlx4_alloc_cmd_mailbox(dev); 1661 if (IS_ERR(inbox)) { 1662 vhcr_cmd->status = CMD_STAT_BAD_SIZE; 1663 inbox = NULL; 1664 goto out_status; 1665 } 1666 1667 ret = mlx4_ACCESS_MEM(dev, inbox->dma, slave, 1668 vhcr->in_param, 1669 MLX4_MAILBOX_SIZE, 1); 1670 if (ret) { 1671 if (!(dev->persist->state & 1672 MLX4_DEVICE_STATE_INTERNAL_ERROR)) 1673 mlx4_err(dev, "%s: Failed reading inbox (cmd:0x%x)\n", 1674 __func__, cmd->opcode); 1675 vhcr_cmd->status = CMD_STAT_INTERNAL_ERR; 1676 goto out_status; 1677 } 1678 } 1679 1680 /* Apply permission and bound checks if applicable */ 1681 if (cmd->verify && cmd->verify(dev, slave, vhcr, inbox)) { 1682 mlx4_warn(dev, "Command:0x%x from slave: %d failed protection checks for resource_id:%d\n", 1683 vhcr->op, slave, vhcr->in_modifier); 1684 vhcr_cmd->status = CMD_STAT_BAD_OP; 1685 goto out_status; 1686 } 1687 1688 /* Allocate outbox */ 1689 if (cmd->has_outbox) { 1690 outbox = mlx4_alloc_cmd_mailbox(dev); 1691 if (IS_ERR(outbox)) { 1692 vhcr_cmd->status = CMD_STAT_BAD_SIZE; 1693 outbox = NULL; 1694 goto out_status; 1695 } 1696 } 1697 1698 /* Execute the command! */ 1699 if (cmd->wrapper) { 1700 err = cmd->wrapper(dev, slave, vhcr, inbox, outbox, 1701 cmd); 1702 if (cmd->out_is_imm) 1703 vhcr_cmd->out_param = cpu_to_be64(vhcr->out_param); 1704 } else { 1705 in_param = cmd->has_inbox ? (u64) inbox->dma : 1706 vhcr->in_param; 1707 out_param = cmd->has_outbox ? (u64) outbox->dma : 1708 vhcr->out_param; 1709 err = __mlx4_cmd(dev, in_param, &out_param, 1710 cmd->out_is_imm, vhcr->in_modifier, 1711 vhcr->op_modifier, vhcr->op, 1712 MLX4_CMD_TIME_CLASS_A, 1713 MLX4_CMD_NATIVE); 1714 1715 if (cmd->out_is_imm) { 1716 vhcr->out_param = out_param; 1717 vhcr_cmd->out_param = cpu_to_be64(vhcr->out_param); 1718 } 1719 } 1720 1721 if (err) { 1722 if (!(dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)) 1723 mlx4_warn(dev, "vhcr command:0x%x slave:%d failed with error:%d, status %d\n", 1724 vhcr->op, slave, vhcr->errno, err); 1725 vhcr_cmd->status = mlx4_errno_to_status(err); 1726 goto out_status; 1727 } 1728 1729 1730 /* Write outbox if command completed successfully */ 1731 if (cmd->has_outbox && !vhcr_cmd->status) { 1732 ret = mlx4_ACCESS_MEM(dev, outbox->dma, slave, 1733 vhcr->out_param, 1734 MLX4_MAILBOX_SIZE, MLX4_CMD_WRAPPED); 1735 if (ret) { 1736 /* If we failed to write back the outbox after the 1737 *command was successfully executed, we must fail this 1738 * slave, as it is now in undefined state */ 1739 if (!(dev->persist->state & 1740 MLX4_DEVICE_STATE_INTERNAL_ERROR)) 1741 mlx4_err(dev, "%s:Failed writing outbox\n", __func__); 1742 goto out; 1743 } 1744 } 1745 1746 out_status: 1747 /* DMA back vhcr result */ 1748 if (!in_vhcr) { 1749 ret = mlx4_ACCESS_MEM(dev, priv->mfunc.vhcr_dma, slave, 1750 priv->mfunc.master.slave_state[slave].vhcr_dma, 1751 ALIGN(sizeof(struct mlx4_vhcr), 1752 MLX4_ACCESS_MEM_ALIGN), 1753 MLX4_CMD_WRAPPED); 1754 if (ret) 1755 mlx4_err(dev, "%s:Failed writing vhcr result\n", 1756 __func__); 1757 else if (vhcr->e_bit && 1758 mlx4_GEN_EQE(dev, slave, &priv->mfunc.master.cmd_eqe)) 1759 mlx4_warn(dev, "Failed to generate command completion eqe for slave %d\n", 1760 slave); 1761 } 1762 1763 out: 1764 kfree(vhcr); 1765 mlx4_free_cmd_mailbox(dev, inbox); 1766 mlx4_free_cmd_mailbox(dev, outbox); 1767 return ret; 1768 } 1769 1770 static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv, 1771 int slave, int port) 1772 { 1773 struct mlx4_vport_oper_state *vp_oper; 1774 struct mlx4_vport_state *vp_admin; 1775 struct mlx4_vf_immed_vlan_work *work; 1776 struct mlx4_dev *dev = &(priv->dev); 1777 int err; 1778 int admin_vlan_ix = NO_INDX; 1779 1780 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; 1781 vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port]; 1782 1783 if (vp_oper->state.default_vlan == vp_admin->default_vlan && 1784 vp_oper->state.default_qos == vp_admin->default_qos && 1785 vp_oper->state.link_state == vp_admin->link_state) 1786 return 0; 1787 1788 if (!(priv->mfunc.master.slave_state[slave].active && 1789 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP)) { 1790 /* even if the UPDATE_QP command isn't supported, we still want 1791 * to set this VF link according to the admin directive 1792 */ 1793 vp_oper->state.link_state = vp_admin->link_state; 1794 return -1; 1795 } 1796 1797 mlx4_dbg(dev, "updating immediately admin params slave %d port %d\n", 1798 slave, port); 1799 mlx4_dbg(dev, "vlan %d QoS %d link down %d\n", 1800 vp_admin->default_vlan, vp_admin->default_qos, 1801 vp_admin->link_state); 1802 1803 work = kzalloc(sizeof(*work), GFP_KERNEL); 1804 if (!work) 1805 return -ENOMEM; 1806 1807 if (vp_oper->state.default_vlan != vp_admin->default_vlan) { 1808 if (MLX4_VGT != vp_admin->default_vlan) { 1809 err = __mlx4_register_vlan(&priv->dev, port, 1810 vp_admin->default_vlan, 1811 &admin_vlan_ix); 1812 if (err) { 1813 kfree(work); 1814 mlx4_warn(&priv->dev, 1815 "No vlan resources slave %d, port %d\n", 1816 slave, port); 1817 return err; 1818 } 1819 } else { 1820 admin_vlan_ix = NO_INDX; 1821 } 1822 work->flags |= MLX4_VF_IMMED_VLAN_FLAG_VLAN; 1823 mlx4_dbg(&priv->dev, 1824 "alloc vlan %d idx %d slave %d port %d\n", 1825 (int)(vp_admin->default_vlan), 1826 admin_vlan_ix, slave, port); 1827 } 1828 1829 /* save original vlan ix and vlan id */ 1830 work->orig_vlan_id = vp_oper->state.default_vlan; 1831 work->orig_vlan_ix = vp_oper->vlan_idx; 1832 1833 /* handle new qos */ 1834 if (vp_oper->state.default_qos != vp_admin->default_qos) 1835 work->flags |= MLX4_VF_IMMED_VLAN_FLAG_QOS; 1836 1837 if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN) 1838 vp_oper->vlan_idx = admin_vlan_ix; 1839 1840 vp_oper->state.default_vlan = vp_admin->default_vlan; 1841 vp_oper->state.default_qos = vp_admin->default_qos; 1842 vp_oper->state.link_state = vp_admin->link_state; 1843 1844 if (vp_admin->link_state == IFLA_VF_LINK_STATE_DISABLE) 1845 work->flags |= MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE; 1846 1847 /* iterate over QPs owned by this slave, using UPDATE_QP */ 1848 work->port = port; 1849 work->slave = slave; 1850 work->qos = vp_oper->state.default_qos; 1851 work->vlan_id = vp_oper->state.default_vlan; 1852 work->vlan_ix = vp_oper->vlan_idx; 1853 work->priv = priv; 1854 INIT_WORK(&work->work, mlx4_vf_immed_vlan_work_handler); 1855 queue_work(priv->mfunc.master.comm_wq, &work->work); 1856 1857 return 0; 1858 } 1859 1860 1861 static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave) 1862 { 1863 int port, err; 1864 struct mlx4_vport_state *vp_admin; 1865 struct mlx4_vport_oper_state *vp_oper; 1866 struct mlx4_active_ports actv_ports = mlx4_get_active_ports( 1867 &priv->dev, slave); 1868 int min_port = find_first_bit(actv_ports.ports, 1869 priv->dev.caps.num_ports) + 1; 1870 int max_port = min_port - 1 + 1871 bitmap_weight(actv_ports.ports, priv->dev.caps.num_ports); 1872 1873 for (port = min_port; port <= max_port; port++) { 1874 if (!test_bit(port - 1, actv_ports.ports)) 1875 continue; 1876 priv->mfunc.master.vf_oper[slave].smi_enabled[port] = 1877 priv->mfunc.master.vf_admin[slave].enable_smi[port]; 1878 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; 1879 vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port]; 1880 vp_oper->state = *vp_admin; 1881 if (MLX4_VGT != vp_admin->default_vlan) { 1882 err = __mlx4_register_vlan(&priv->dev, port, 1883 vp_admin->default_vlan, &(vp_oper->vlan_idx)); 1884 if (err) { 1885 vp_oper->vlan_idx = NO_INDX; 1886 mlx4_warn(&priv->dev, 1887 "No vlan resources slave %d, port %d\n", 1888 slave, port); 1889 return err; 1890 } 1891 mlx4_dbg(&priv->dev, "alloc vlan %d idx %d slave %d port %d\n", 1892 (int)(vp_oper->state.default_vlan), 1893 vp_oper->vlan_idx, slave, port); 1894 } 1895 if (vp_admin->spoofchk) { 1896 vp_oper->mac_idx = __mlx4_register_mac(&priv->dev, 1897 port, 1898 vp_admin->mac); 1899 if (0 > vp_oper->mac_idx) { 1900 err = vp_oper->mac_idx; 1901 vp_oper->mac_idx = NO_INDX; 1902 mlx4_warn(&priv->dev, 1903 "No mac resources slave %d, port %d\n", 1904 slave, port); 1905 return err; 1906 } 1907 mlx4_dbg(&priv->dev, "alloc mac %llx idx %d slave %d port %d\n", 1908 vp_oper->state.mac, vp_oper->mac_idx, slave, port); 1909 } 1910 } 1911 return 0; 1912 } 1913 1914 static void mlx4_master_deactivate_admin_state(struct mlx4_priv *priv, int slave) 1915 { 1916 int port; 1917 struct mlx4_vport_oper_state *vp_oper; 1918 struct mlx4_active_ports actv_ports = mlx4_get_active_ports( 1919 &priv->dev, slave); 1920 int min_port = find_first_bit(actv_ports.ports, 1921 priv->dev.caps.num_ports) + 1; 1922 int max_port = min_port - 1 + 1923 bitmap_weight(actv_ports.ports, priv->dev.caps.num_ports); 1924 1925 1926 for (port = min_port; port <= max_port; port++) { 1927 if (!test_bit(port - 1, actv_ports.ports)) 1928 continue; 1929 priv->mfunc.master.vf_oper[slave].smi_enabled[port] = 1930 MLX4_VF_SMI_DISABLED; 1931 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; 1932 if (NO_INDX != vp_oper->vlan_idx) { 1933 __mlx4_unregister_vlan(&priv->dev, 1934 port, vp_oper->state.default_vlan); 1935 vp_oper->vlan_idx = NO_INDX; 1936 } 1937 if (NO_INDX != vp_oper->mac_idx) { 1938 __mlx4_unregister_mac(&priv->dev, port, vp_oper->state.mac); 1939 vp_oper->mac_idx = NO_INDX; 1940 } 1941 } 1942 return; 1943 } 1944 1945 static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd, 1946 u16 param, u8 toggle) 1947 { 1948 struct mlx4_priv *priv = mlx4_priv(dev); 1949 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state; 1950 u32 reply; 1951 u8 is_going_down = 0; 1952 int i; 1953 unsigned long flags; 1954 1955 slave_state[slave].comm_toggle ^= 1; 1956 reply = (u32) slave_state[slave].comm_toggle << 31; 1957 if (toggle != slave_state[slave].comm_toggle) { 1958 mlx4_warn(dev, "Incorrect toggle %d from slave %d. *** MASTER STATE COMPROMISED ***\n", 1959 toggle, slave); 1960 goto reset_slave; 1961 } 1962 if (cmd == MLX4_COMM_CMD_RESET) { 1963 mlx4_warn(dev, "Received reset from slave:%d\n", slave); 1964 slave_state[slave].active = false; 1965 slave_state[slave].old_vlan_api = false; 1966 mlx4_master_deactivate_admin_state(priv, slave); 1967 for (i = 0; i < MLX4_EVENT_TYPES_NUM; ++i) { 1968 slave_state[slave].event_eq[i].eqn = -1; 1969 slave_state[slave].event_eq[i].token = 0; 1970 } 1971 /*check if we are in the middle of FLR process, 1972 if so return "retry" status to the slave*/ 1973 if (MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) 1974 goto inform_slave_state; 1975 1976 mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_SHUTDOWN, slave); 1977 1978 /* write the version in the event field */ 1979 reply |= mlx4_comm_get_version(); 1980 1981 goto reset_slave; 1982 } 1983 /*command from slave in the middle of FLR*/ 1984 if (cmd != MLX4_COMM_CMD_RESET && 1985 MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) { 1986 mlx4_warn(dev, "slave:%d is Trying to run cmd(0x%x) in the middle of FLR\n", 1987 slave, cmd); 1988 return; 1989 } 1990 1991 switch (cmd) { 1992 case MLX4_COMM_CMD_VHCR0: 1993 if (slave_state[slave].last_cmd != MLX4_COMM_CMD_RESET) 1994 goto reset_slave; 1995 slave_state[slave].vhcr_dma = ((u64) param) << 48; 1996 priv->mfunc.master.slave_state[slave].cookie = 0; 1997 break; 1998 case MLX4_COMM_CMD_VHCR1: 1999 if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR0) 2000 goto reset_slave; 2001 slave_state[slave].vhcr_dma |= ((u64) param) << 32; 2002 break; 2003 case MLX4_COMM_CMD_VHCR2: 2004 if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR1) 2005 goto reset_slave; 2006 slave_state[slave].vhcr_dma |= ((u64) param) << 16; 2007 break; 2008 case MLX4_COMM_CMD_VHCR_EN: 2009 if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR2) 2010 goto reset_slave; 2011 slave_state[slave].vhcr_dma |= param; 2012 if (mlx4_master_activate_admin_state(priv, slave)) 2013 goto reset_slave; 2014 slave_state[slave].active = true; 2015 mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_INIT, slave); 2016 break; 2017 case MLX4_COMM_CMD_VHCR_POST: 2018 if ((slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_EN) && 2019 (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_POST)) { 2020 mlx4_warn(dev, "slave:%d is out of sync, cmd=0x%x, last command=0x%x, reset is needed\n", 2021 slave, cmd, slave_state[slave].last_cmd); 2022 goto reset_slave; 2023 } 2024 2025 mutex_lock(&priv->cmd.slave_cmd_mutex); 2026 if (mlx4_master_process_vhcr(dev, slave, NULL)) { 2027 mlx4_err(dev, "Failed processing vhcr for slave:%d, resetting slave\n", 2028 slave); 2029 mutex_unlock(&priv->cmd.slave_cmd_mutex); 2030 goto reset_slave; 2031 } 2032 mutex_unlock(&priv->cmd.slave_cmd_mutex); 2033 break; 2034 default: 2035 mlx4_warn(dev, "Bad comm cmd:%d from slave:%d\n", cmd, slave); 2036 goto reset_slave; 2037 } 2038 spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags); 2039 if (!slave_state[slave].is_slave_going_down) 2040 slave_state[slave].last_cmd = cmd; 2041 else 2042 is_going_down = 1; 2043 spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags); 2044 if (is_going_down) { 2045 mlx4_warn(dev, "Slave is going down aborting command(%d) executing from slave:%d\n", 2046 cmd, slave); 2047 return; 2048 } 2049 __raw_writel((__force u32) cpu_to_be32(reply), 2050 &priv->mfunc.comm[slave].slave_read); 2051 mmiowb(); 2052 2053 return; 2054 2055 reset_slave: 2056 /* cleanup any slave resources */ 2057 if (dev->persist->interface_state & MLX4_INTERFACE_STATE_UP) 2058 mlx4_delete_all_resources_for_slave(dev, slave); 2059 2060 if (cmd != MLX4_COMM_CMD_RESET) { 2061 mlx4_warn(dev, "Turn on internal error to force reset, slave=%d, cmd=0x%x\n", 2062 slave, cmd); 2063 /* Turn on internal error letting slave reset itself immeditaly, 2064 * otherwise it might take till timeout on command is passed 2065 */ 2066 reply |= ((u32)COMM_CHAN_EVENT_INTERNAL_ERR); 2067 } 2068 2069 spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags); 2070 if (!slave_state[slave].is_slave_going_down) 2071 slave_state[slave].last_cmd = MLX4_COMM_CMD_RESET; 2072 spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags); 2073 /*with slave in the middle of flr, no need to clean resources again.*/ 2074 inform_slave_state: 2075 memset(&slave_state[slave].event_eq, 0, 2076 sizeof(struct mlx4_slave_event_eq_info)); 2077 __raw_writel((__force u32) cpu_to_be32(reply), 2078 &priv->mfunc.comm[slave].slave_read); 2079 wmb(); 2080 } 2081 2082 /* master command processing */ 2083 void mlx4_master_comm_channel(struct work_struct *work) 2084 { 2085 struct mlx4_mfunc_master_ctx *master = 2086 container_of(work, 2087 struct mlx4_mfunc_master_ctx, 2088 comm_work); 2089 struct mlx4_mfunc *mfunc = 2090 container_of(master, struct mlx4_mfunc, master); 2091 struct mlx4_priv *priv = 2092 container_of(mfunc, struct mlx4_priv, mfunc); 2093 struct mlx4_dev *dev = &priv->dev; 2094 __be32 *bit_vec; 2095 u32 comm_cmd; 2096 u32 vec; 2097 int i, j, slave; 2098 int toggle; 2099 int served = 0; 2100 int reported = 0; 2101 u32 slt; 2102 2103 bit_vec = master->comm_arm_bit_vector; 2104 for (i = 0; i < COMM_CHANNEL_BIT_ARRAY_SIZE; i++) { 2105 vec = be32_to_cpu(bit_vec[i]); 2106 for (j = 0; j < 32; j++) { 2107 if (!(vec & (1 << j))) 2108 continue; 2109 ++reported; 2110 slave = (i * 32) + j; 2111 comm_cmd = swab32(readl( 2112 &mfunc->comm[slave].slave_write)); 2113 slt = swab32(readl(&mfunc->comm[slave].slave_read)) 2114 >> 31; 2115 toggle = comm_cmd >> 31; 2116 if (toggle != slt) { 2117 if (master->slave_state[slave].comm_toggle 2118 != slt) { 2119 pr_info("slave %d out of sync. read toggle %d, state toggle %d. Resynching.\n", 2120 slave, slt, 2121 master->slave_state[slave].comm_toggle); 2122 master->slave_state[slave].comm_toggle = 2123 slt; 2124 } 2125 mlx4_master_do_cmd(dev, slave, 2126 comm_cmd >> 16 & 0xff, 2127 comm_cmd & 0xffff, toggle); 2128 ++served; 2129 } 2130 } 2131 } 2132 2133 if (reported && reported != served) 2134 mlx4_warn(dev, "Got command event with bitmask from %d slaves but %d were served\n", 2135 reported, served); 2136 2137 if (mlx4_ARM_COMM_CHANNEL(dev)) 2138 mlx4_warn(dev, "Failed to arm comm channel events\n"); 2139 } 2140 2141 static int sync_toggles(struct mlx4_dev *dev) 2142 { 2143 struct mlx4_priv *priv = mlx4_priv(dev); 2144 u32 wr_toggle; 2145 u32 rd_toggle; 2146 unsigned long end; 2147 2148 wr_toggle = swab32(readl(&priv->mfunc.comm->slave_write)); 2149 if (wr_toggle == 0xffffffff) 2150 end = jiffies + msecs_to_jiffies(30000); 2151 else 2152 end = jiffies + msecs_to_jiffies(5000); 2153 2154 while (time_before(jiffies, end)) { 2155 rd_toggle = swab32(readl(&priv->mfunc.comm->slave_read)); 2156 if (wr_toggle == 0xffffffff || rd_toggle == 0xffffffff) { 2157 /* PCI might be offline */ 2158 msleep(100); 2159 wr_toggle = swab32(readl(&priv->mfunc.comm-> 2160 slave_write)); 2161 continue; 2162 } 2163 2164 if (rd_toggle >> 31 == wr_toggle >> 31) { 2165 priv->cmd.comm_toggle = rd_toggle >> 31; 2166 return 0; 2167 } 2168 2169 cond_resched(); 2170 } 2171 2172 /* 2173 * we could reach here if for example the previous VM using this 2174 * function misbehaved and left the channel with unsynced state. We 2175 * should fix this here and give this VM a chance to use a properly 2176 * synced channel 2177 */ 2178 mlx4_warn(dev, "recovering from previously mis-behaved VM\n"); 2179 __raw_writel((__force u32) 0, &priv->mfunc.comm->slave_read); 2180 __raw_writel((__force u32) 0, &priv->mfunc.comm->slave_write); 2181 priv->cmd.comm_toggle = 0; 2182 2183 return 0; 2184 } 2185 2186 int mlx4_multi_func_init(struct mlx4_dev *dev) 2187 { 2188 struct mlx4_priv *priv = mlx4_priv(dev); 2189 struct mlx4_slave_state *s_state; 2190 int i, j, err, port; 2191 2192 if (mlx4_is_master(dev)) 2193 priv->mfunc.comm = 2194 ioremap(pci_resource_start(dev->persist->pdev, 2195 priv->fw.comm_bar) + 2196 priv->fw.comm_base, MLX4_COMM_PAGESIZE); 2197 else 2198 priv->mfunc.comm = 2199 ioremap(pci_resource_start(dev->persist->pdev, 2) + 2200 MLX4_SLAVE_COMM_BASE, MLX4_COMM_PAGESIZE); 2201 if (!priv->mfunc.comm) { 2202 mlx4_err(dev, "Couldn't map communication vector\n"); 2203 goto err_vhcr; 2204 } 2205 2206 if (mlx4_is_master(dev)) { 2207 priv->mfunc.master.slave_state = 2208 kzalloc(dev->num_slaves * 2209 sizeof(struct mlx4_slave_state), GFP_KERNEL); 2210 if (!priv->mfunc.master.slave_state) 2211 goto err_comm; 2212 2213 priv->mfunc.master.vf_admin = 2214 kzalloc(dev->num_slaves * 2215 sizeof(struct mlx4_vf_admin_state), GFP_KERNEL); 2216 if (!priv->mfunc.master.vf_admin) 2217 goto err_comm_admin; 2218 2219 priv->mfunc.master.vf_oper = 2220 kzalloc(dev->num_slaves * 2221 sizeof(struct mlx4_vf_oper_state), GFP_KERNEL); 2222 if (!priv->mfunc.master.vf_oper) 2223 goto err_comm_oper; 2224 2225 for (i = 0; i < dev->num_slaves; ++i) { 2226 s_state = &priv->mfunc.master.slave_state[i]; 2227 s_state->last_cmd = MLX4_COMM_CMD_RESET; 2228 mutex_init(&priv->mfunc.master.gen_eqe_mutex[i]); 2229 for (j = 0; j < MLX4_EVENT_TYPES_NUM; ++j) 2230 s_state->event_eq[j].eqn = -1; 2231 __raw_writel((__force u32) 0, 2232 &priv->mfunc.comm[i].slave_write); 2233 __raw_writel((__force u32) 0, 2234 &priv->mfunc.comm[i].slave_read); 2235 mmiowb(); 2236 for (port = 1; port <= MLX4_MAX_PORTS; port++) { 2237 s_state->vlan_filter[port] = 2238 kzalloc(sizeof(struct mlx4_vlan_fltr), 2239 GFP_KERNEL); 2240 if (!s_state->vlan_filter[port]) { 2241 if (--port) 2242 kfree(s_state->vlan_filter[port]); 2243 goto err_slaves; 2244 } 2245 INIT_LIST_HEAD(&s_state->mcast_filters[port]); 2246 priv->mfunc.master.vf_admin[i].vport[port].default_vlan = MLX4_VGT; 2247 priv->mfunc.master.vf_oper[i].vport[port].state.default_vlan = MLX4_VGT; 2248 priv->mfunc.master.vf_oper[i].vport[port].vlan_idx = NO_INDX; 2249 priv->mfunc.master.vf_oper[i].vport[port].mac_idx = NO_INDX; 2250 } 2251 spin_lock_init(&s_state->lock); 2252 } 2253 2254 memset(&priv->mfunc.master.cmd_eqe, 0, dev->caps.eqe_size); 2255 priv->mfunc.master.cmd_eqe.type = MLX4_EVENT_TYPE_CMD; 2256 INIT_WORK(&priv->mfunc.master.comm_work, 2257 mlx4_master_comm_channel); 2258 INIT_WORK(&priv->mfunc.master.slave_event_work, 2259 mlx4_gen_slave_eqe); 2260 INIT_WORK(&priv->mfunc.master.slave_flr_event_work, 2261 mlx4_master_handle_slave_flr); 2262 spin_lock_init(&priv->mfunc.master.slave_state_lock); 2263 spin_lock_init(&priv->mfunc.master.slave_eq.event_lock); 2264 priv->mfunc.master.comm_wq = 2265 create_singlethread_workqueue("mlx4_comm"); 2266 if (!priv->mfunc.master.comm_wq) 2267 goto err_slaves; 2268 2269 if (mlx4_init_resource_tracker(dev)) 2270 goto err_thread; 2271 2272 } else { 2273 err = sync_toggles(dev); 2274 if (err) { 2275 mlx4_err(dev, "Couldn't sync toggles\n"); 2276 goto err_comm; 2277 } 2278 } 2279 return 0; 2280 2281 err_thread: 2282 flush_workqueue(priv->mfunc.master.comm_wq); 2283 destroy_workqueue(priv->mfunc.master.comm_wq); 2284 err_slaves: 2285 while (--i) { 2286 for (port = 1; port <= MLX4_MAX_PORTS; port++) 2287 kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]); 2288 } 2289 kfree(priv->mfunc.master.vf_oper); 2290 err_comm_oper: 2291 kfree(priv->mfunc.master.vf_admin); 2292 err_comm_admin: 2293 kfree(priv->mfunc.master.slave_state); 2294 err_comm: 2295 iounmap(priv->mfunc.comm); 2296 err_vhcr: 2297 dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE, 2298 priv->mfunc.vhcr, 2299 priv->mfunc.vhcr_dma); 2300 priv->mfunc.vhcr = NULL; 2301 return -ENOMEM; 2302 } 2303 2304 int mlx4_cmd_init(struct mlx4_dev *dev) 2305 { 2306 struct mlx4_priv *priv = mlx4_priv(dev); 2307 int flags = 0; 2308 2309 if (!priv->cmd.initialized) { 2310 mutex_init(&priv->cmd.slave_cmd_mutex); 2311 sema_init(&priv->cmd.poll_sem, 1); 2312 priv->cmd.use_events = 0; 2313 priv->cmd.toggle = 1; 2314 priv->cmd.initialized = 1; 2315 flags |= MLX4_CMD_CLEANUP_STRUCT; 2316 } 2317 2318 if (!mlx4_is_slave(dev) && !priv->cmd.hcr) { 2319 priv->cmd.hcr = ioremap(pci_resource_start(dev->persist->pdev, 2320 0) + MLX4_HCR_BASE, MLX4_HCR_SIZE); 2321 if (!priv->cmd.hcr) { 2322 mlx4_err(dev, "Couldn't map command register\n"); 2323 goto err; 2324 } 2325 flags |= MLX4_CMD_CLEANUP_HCR; 2326 } 2327 2328 if (mlx4_is_mfunc(dev) && !priv->mfunc.vhcr) { 2329 priv->mfunc.vhcr = dma_alloc_coherent(&dev->persist->pdev->dev, 2330 PAGE_SIZE, 2331 &priv->mfunc.vhcr_dma, 2332 GFP_KERNEL); 2333 if (!priv->mfunc.vhcr) 2334 goto err; 2335 2336 flags |= MLX4_CMD_CLEANUP_VHCR; 2337 } 2338 2339 if (!priv->cmd.pool) { 2340 priv->cmd.pool = pci_pool_create("mlx4_cmd", 2341 dev->persist->pdev, 2342 MLX4_MAILBOX_SIZE, 2343 MLX4_MAILBOX_SIZE, 0); 2344 if (!priv->cmd.pool) 2345 goto err; 2346 2347 flags |= MLX4_CMD_CLEANUP_POOL; 2348 } 2349 2350 return 0; 2351 2352 err: 2353 mlx4_cmd_cleanup(dev, flags); 2354 return -ENOMEM; 2355 } 2356 2357 void mlx4_report_internal_err_comm_event(struct mlx4_dev *dev) 2358 { 2359 struct mlx4_priv *priv = mlx4_priv(dev); 2360 int slave; 2361 u32 slave_read; 2362 2363 /* Report an internal error event to all 2364 * communication channels. 2365 */ 2366 for (slave = 0; slave < dev->num_slaves; slave++) { 2367 slave_read = swab32(readl(&priv->mfunc.comm[slave].slave_read)); 2368 slave_read |= (u32)COMM_CHAN_EVENT_INTERNAL_ERR; 2369 __raw_writel((__force u32)cpu_to_be32(slave_read), 2370 &priv->mfunc.comm[slave].slave_read); 2371 /* Make sure that our comm channel write doesn't 2372 * get mixed in with writes from another CPU. 2373 */ 2374 mmiowb(); 2375 } 2376 } 2377 2378 void mlx4_multi_func_cleanup(struct mlx4_dev *dev) 2379 { 2380 struct mlx4_priv *priv = mlx4_priv(dev); 2381 int i, port; 2382 2383 if (mlx4_is_master(dev)) { 2384 flush_workqueue(priv->mfunc.master.comm_wq); 2385 destroy_workqueue(priv->mfunc.master.comm_wq); 2386 for (i = 0; i < dev->num_slaves; i++) { 2387 for (port = 1; port <= MLX4_MAX_PORTS; port++) 2388 kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]); 2389 } 2390 kfree(priv->mfunc.master.slave_state); 2391 kfree(priv->mfunc.master.vf_admin); 2392 kfree(priv->mfunc.master.vf_oper); 2393 dev->num_slaves = 0; 2394 } 2395 2396 iounmap(priv->mfunc.comm); 2397 } 2398 2399 void mlx4_cmd_cleanup(struct mlx4_dev *dev, int cleanup_mask) 2400 { 2401 struct mlx4_priv *priv = mlx4_priv(dev); 2402 2403 if (priv->cmd.pool && (cleanup_mask & MLX4_CMD_CLEANUP_POOL)) { 2404 pci_pool_destroy(priv->cmd.pool); 2405 priv->cmd.pool = NULL; 2406 } 2407 2408 if (!mlx4_is_slave(dev) && priv->cmd.hcr && 2409 (cleanup_mask & MLX4_CMD_CLEANUP_HCR)) { 2410 iounmap(priv->cmd.hcr); 2411 priv->cmd.hcr = NULL; 2412 } 2413 if (mlx4_is_mfunc(dev) && priv->mfunc.vhcr && 2414 (cleanup_mask & MLX4_CMD_CLEANUP_VHCR)) { 2415 dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE, 2416 priv->mfunc.vhcr, priv->mfunc.vhcr_dma); 2417 priv->mfunc.vhcr = NULL; 2418 } 2419 if (priv->cmd.initialized && (cleanup_mask & MLX4_CMD_CLEANUP_STRUCT)) 2420 priv->cmd.initialized = 0; 2421 } 2422 2423 /* 2424 * Switch to using events to issue FW commands (can only be called 2425 * after event queue for command events has been initialized). 2426 */ 2427 int mlx4_cmd_use_events(struct mlx4_dev *dev) 2428 { 2429 struct mlx4_priv *priv = mlx4_priv(dev); 2430 int i; 2431 int err = 0; 2432 2433 priv->cmd.context = kmalloc(priv->cmd.max_cmds * 2434 sizeof (struct mlx4_cmd_context), 2435 GFP_KERNEL); 2436 if (!priv->cmd.context) 2437 return -ENOMEM; 2438 2439 for (i = 0; i < priv->cmd.max_cmds; ++i) { 2440 priv->cmd.context[i].token = i; 2441 priv->cmd.context[i].next = i + 1; 2442 /* To support fatal error flow, initialize all 2443 * cmd contexts to allow simulating completions 2444 * with complete() at any time. 2445 */ 2446 init_completion(&priv->cmd.context[i].done); 2447 } 2448 2449 priv->cmd.context[priv->cmd.max_cmds - 1].next = -1; 2450 priv->cmd.free_head = 0; 2451 2452 sema_init(&priv->cmd.event_sem, priv->cmd.max_cmds); 2453 spin_lock_init(&priv->cmd.context_lock); 2454 2455 for (priv->cmd.token_mask = 1; 2456 priv->cmd.token_mask < priv->cmd.max_cmds; 2457 priv->cmd.token_mask <<= 1) 2458 ; /* nothing */ 2459 --priv->cmd.token_mask; 2460 2461 down(&priv->cmd.poll_sem); 2462 priv->cmd.use_events = 1; 2463 2464 return err; 2465 } 2466 2467 /* 2468 * Switch back to polling (used when shutting down the device) 2469 */ 2470 void mlx4_cmd_use_polling(struct mlx4_dev *dev) 2471 { 2472 struct mlx4_priv *priv = mlx4_priv(dev); 2473 int i; 2474 2475 priv->cmd.use_events = 0; 2476 2477 for (i = 0; i < priv->cmd.max_cmds; ++i) 2478 down(&priv->cmd.event_sem); 2479 2480 kfree(priv->cmd.context); 2481 2482 up(&priv->cmd.poll_sem); 2483 } 2484 2485 struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev) 2486 { 2487 struct mlx4_cmd_mailbox *mailbox; 2488 2489 mailbox = kmalloc(sizeof *mailbox, GFP_KERNEL); 2490 if (!mailbox) 2491 return ERR_PTR(-ENOMEM); 2492 2493 mailbox->buf = pci_pool_alloc(mlx4_priv(dev)->cmd.pool, GFP_KERNEL, 2494 &mailbox->dma); 2495 if (!mailbox->buf) { 2496 kfree(mailbox); 2497 return ERR_PTR(-ENOMEM); 2498 } 2499 2500 memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE); 2501 2502 return mailbox; 2503 } 2504 EXPORT_SYMBOL_GPL(mlx4_alloc_cmd_mailbox); 2505 2506 void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, 2507 struct mlx4_cmd_mailbox *mailbox) 2508 { 2509 if (!mailbox) 2510 return; 2511 2512 pci_pool_free(mlx4_priv(dev)->cmd.pool, mailbox->buf, mailbox->dma); 2513 kfree(mailbox); 2514 } 2515 EXPORT_SYMBOL_GPL(mlx4_free_cmd_mailbox); 2516 2517 u32 mlx4_comm_get_version(void) 2518 { 2519 return ((u32) CMD_CHAN_IF_REV << 8) | (u32) CMD_CHAN_VER; 2520 } 2521 2522 static int mlx4_get_slave_indx(struct mlx4_dev *dev, int vf) 2523 { 2524 if ((vf < 0) || (vf >= dev->persist->num_vfs)) { 2525 mlx4_err(dev, "Bad vf number:%d (number of activated vf: %d)\n", 2526 vf, dev->persist->num_vfs); 2527 return -EINVAL; 2528 } 2529 2530 return vf+1; 2531 } 2532 2533 int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave) 2534 { 2535 if (slave < 1 || slave > dev->persist->num_vfs) { 2536 mlx4_err(dev, 2537 "Bad slave number:%d (number of activated slaves: %lu)\n", 2538 slave, dev->num_slaves); 2539 return -EINVAL; 2540 } 2541 return slave - 1; 2542 } 2543 2544 void mlx4_cmd_wake_completions(struct mlx4_dev *dev) 2545 { 2546 struct mlx4_priv *priv = mlx4_priv(dev); 2547 struct mlx4_cmd_context *context; 2548 int i; 2549 2550 spin_lock(&priv->cmd.context_lock); 2551 if (priv->cmd.context) { 2552 for (i = 0; i < priv->cmd.max_cmds; ++i) { 2553 context = &priv->cmd.context[i]; 2554 context->fw_status = CMD_STAT_INTERNAL_ERR; 2555 context->result = 2556 mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR); 2557 complete(&context->done); 2558 } 2559 } 2560 spin_unlock(&priv->cmd.context_lock); 2561 } 2562 2563 struct mlx4_active_ports mlx4_get_active_ports(struct mlx4_dev *dev, int slave) 2564 { 2565 struct mlx4_active_ports actv_ports; 2566 int vf; 2567 2568 bitmap_zero(actv_ports.ports, MLX4_MAX_PORTS); 2569 2570 if (slave == 0) { 2571 bitmap_fill(actv_ports.ports, dev->caps.num_ports); 2572 return actv_ports; 2573 } 2574 2575 vf = mlx4_get_vf_indx(dev, slave); 2576 if (vf < 0) 2577 return actv_ports; 2578 2579 bitmap_set(actv_ports.ports, dev->dev_vfs[vf].min_port - 1, 2580 min((int)dev->dev_vfs[mlx4_get_vf_indx(dev, slave)].n_ports, 2581 dev->caps.num_ports)); 2582 2583 return actv_ports; 2584 } 2585 EXPORT_SYMBOL_GPL(mlx4_get_active_ports); 2586 2587 int mlx4_slave_convert_port(struct mlx4_dev *dev, int slave, int port) 2588 { 2589 unsigned n; 2590 struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave); 2591 unsigned m = bitmap_weight(actv_ports.ports, dev->caps.num_ports); 2592 2593 if (port <= 0 || port > m) 2594 return -EINVAL; 2595 2596 n = find_first_bit(actv_ports.ports, dev->caps.num_ports); 2597 if (port <= n) 2598 port = n + 1; 2599 2600 return port; 2601 } 2602 EXPORT_SYMBOL_GPL(mlx4_slave_convert_port); 2603 2604 int mlx4_phys_to_slave_port(struct mlx4_dev *dev, int slave, int port) 2605 { 2606 struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave); 2607 if (test_bit(port - 1, actv_ports.ports)) 2608 return port - 2609 find_first_bit(actv_ports.ports, dev->caps.num_ports); 2610 2611 return -1; 2612 } 2613 EXPORT_SYMBOL_GPL(mlx4_phys_to_slave_port); 2614 2615 struct mlx4_slaves_pport mlx4_phys_to_slaves_pport(struct mlx4_dev *dev, 2616 int port) 2617 { 2618 unsigned i; 2619 struct mlx4_slaves_pport slaves_pport; 2620 2621 bitmap_zero(slaves_pport.slaves, MLX4_MFUNC_MAX); 2622 2623 if (port <= 0 || port > dev->caps.num_ports) 2624 return slaves_pport; 2625 2626 for (i = 0; i < dev->persist->num_vfs + 1; i++) { 2627 struct mlx4_active_ports actv_ports = 2628 mlx4_get_active_ports(dev, i); 2629 if (test_bit(port - 1, actv_ports.ports)) 2630 set_bit(i, slaves_pport.slaves); 2631 } 2632 2633 return slaves_pport; 2634 } 2635 EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport); 2636 2637 struct mlx4_slaves_pport mlx4_phys_to_slaves_pport_actv( 2638 struct mlx4_dev *dev, 2639 const struct mlx4_active_ports *crit_ports) 2640 { 2641 unsigned i; 2642 struct mlx4_slaves_pport slaves_pport; 2643 2644 bitmap_zero(slaves_pport.slaves, MLX4_MFUNC_MAX); 2645 2646 for (i = 0; i < dev->persist->num_vfs + 1; i++) { 2647 struct mlx4_active_ports actv_ports = 2648 mlx4_get_active_ports(dev, i); 2649 if (bitmap_equal(crit_ports->ports, actv_ports.ports, 2650 dev->caps.num_ports)) 2651 set_bit(i, slaves_pport.slaves); 2652 } 2653 2654 return slaves_pport; 2655 } 2656 EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport_actv); 2657 2658 static int mlx4_slaves_closest_port(struct mlx4_dev *dev, int slave, int port) 2659 { 2660 struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave); 2661 int min_port = find_first_bit(actv_ports.ports, dev->caps.num_ports) 2662 + 1; 2663 int max_port = min_port + 2664 bitmap_weight(actv_ports.ports, dev->caps.num_ports); 2665 2666 if (port < min_port) 2667 port = min_port; 2668 else if (port >= max_port) 2669 port = max_port - 1; 2670 2671 return port; 2672 } 2673 2674 int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac) 2675 { 2676 struct mlx4_priv *priv = mlx4_priv(dev); 2677 struct mlx4_vport_state *s_info; 2678 int slave; 2679 2680 if (!mlx4_is_master(dev)) 2681 return -EPROTONOSUPPORT; 2682 2683 slave = mlx4_get_slave_indx(dev, vf); 2684 if (slave < 0) 2685 return -EINVAL; 2686 2687 port = mlx4_slaves_closest_port(dev, slave, port); 2688 s_info = &priv->mfunc.master.vf_admin[slave].vport[port]; 2689 s_info->mac = mac; 2690 mlx4_info(dev, "default mac on vf %d port %d to %llX will take afect only after vf restart\n", 2691 vf, port, s_info->mac); 2692 return 0; 2693 } 2694 EXPORT_SYMBOL_GPL(mlx4_set_vf_mac); 2695 2696 2697 int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos) 2698 { 2699 struct mlx4_priv *priv = mlx4_priv(dev); 2700 struct mlx4_vport_state *vf_admin; 2701 int slave; 2702 2703 if ((!mlx4_is_master(dev)) || 2704 !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VLAN_CONTROL)) 2705 return -EPROTONOSUPPORT; 2706 2707 if ((vlan > 4095) || (qos > 7)) 2708 return -EINVAL; 2709 2710 slave = mlx4_get_slave_indx(dev, vf); 2711 if (slave < 0) 2712 return -EINVAL; 2713 2714 port = mlx4_slaves_closest_port(dev, slave, port); 2715 vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port]; 2716 2717 if ((0 == vlan) && (0 == qos)) 2718 vf_admin->default_vlan = MLX4_VGT; 2719 else 2720 vf_admin->default_vlan = vlan; 2721 vf_admin->default_qos = qos; 2722 2723 if (mlx4_master_immediate_activate_vlan_qos(priv, slave, port)) 2724 mlx4_info(dev, 2725 "updating vf %d port %d config will take effect on next VF restart\n", 2726 vf, port); 2727 return 0; 2728 } 2729 EXPORT_SYMBOL_GPL(mlx4_set_vf_vlan); 2730 2731 /* mlx4_get_slave_default_vlan - 2732 * return true if VST ( default vlan) 2733 * if VST, will return vlan & qos (if not NULL) 2734 */ 2735 bool mlx4_get_slave_default_vlan(struct mlx4_dev *dev, int port, int slave, 2736 u16 *vlan, u8 *qos) 2737 { 2738 struct mlx4_vport_oper_state *vp_oper; 2739 struct mlx4_priv *priv; 2740 2741 priv = mlx4_priv(dev); 2742 port = mlx4_slaves_closest_port(dev, slave, port); 2743 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; 2744 2745 if (MLX4_VGT != vp_oper->state.default_vlan) { 2746 if (vlan) 2747 *vlan = vp_oper->state.default_vlan; 2748 if (qos) 2749 *qos = vp_oper->state.default_qos; 2750 return true; 2751 } 2752 return false; 2753 } 2754 EXPORT_SYMBOL_GPL(mlx4_get_slave_default_vlan); 2755 2756 int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting) 2757 { 2758 struct mlx4_priv *priv = mlx4_priv(dev); 2759 struct mlx4_vport_state *s_info; 2760 int slave; 2761 2762 if ((!mlx4_is_master(dev)) || 2763 !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FSM)) 2764 return -EPROTONOSUPPORT; 2765 2766 slave = mlx4_get_slave_indx(dev, vf); 2767 if (slave < 0) 2768 return -EINVAL; 2769 2770 port = mlx4_slaves_closest_port(dev, slave, port); 2771 s_info = &priv->mfunc.master.vf_admin[slave].vport[port]; 2772 s_info->spoofchk = setting; 2773 2774 return 0; 2775 } 2776 EXPORT_SYMBOL_GPL(mlx4_set_vf_spoofchk); 2777 2778 int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_info *ivf) 2779 { 2780 struct mlx4_priv *priv = mlx4_priv(dev); 2781 struct mlx4_vport_state *s_info; 2782 int slave; 2783 2784 if (!mlx4_is_master(dev)) 2785 return -EPROTONOSUPPORT; 2786 2787 slave = mlx4_get_slave_indx(dev, vf); 2788 if (slave < 0) 2789 return -EINVAL; 2790 2791 s_info = &priv->mfunc.master.vf_admin[slave].vport[port]; 2792 ivf->vf = vf; 2793 2794 /* need to convert it to a func */ 2795 ivf->mac[0] = ((s_info->mac >> (5*8)) & 0xff); 2796 ivf->mac[1] = ((s_info->mac >> (4*8)) & 0xff); 2797 ivf->mac[2] = ((s_info->mac >> (3*8)) & 0xff); 2798 ivf->mac[3] = ((s_info->mac >> (2*8)) & 0xff); 2799 ivf->mac[4] = ((s_info->mac >> (1*8)) & 0xff); 2800 ivf->mac[5] = ((s_info->mac) & 0xff); 2801 2802 ivf->vlan = s_info->default_vlan; 2803 ivf->qos = s_info->default_qos; 2804 ivf->max_tx_rate = s_info->tx_rate; 2805 ivf->min_tx_rate = 0; 2806 ivf->spoofchk = s_info->spoofchk; 2807 ivf->linkstate = s_info->link_state; 2808 2809 return 0; 2810 } 2811 EXPORT_SYMBOL_GPL(mlx4_get_vf_config); 2812 2813 int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_state) 2814 { 2815 struct mlx4_priv *priv = mlx4_priv(dev); 2816 struct mlx4_vport_state *s_info; 2817 int slave; 2818 u8 link_stat_event; 2819 2820 slave = mlx4_get_slave_indx(dev, vf); 2821 if (slave < 0) 2822 return -EINVAL; 2823 2824 port = mlx4_slaves_closest_port(dev, slave, port); 2825 switch (link_state) { 2826 case IFLA_VF_LINK_STATE_AUTO: 2827 /* get current link state */ 2828 if (!priv->sense.do_sense_port[port]) 2829 link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_ACTIVE; 2830 else 2831 link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_DOWN; 2832 break; 2833 2834 case IFLA_VF_LINK_STATE_ENABLE: 2835 link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_ACTIVE; 2836 break; 2837 2838 case IFLA_VF_LINK_STATE_DISABLE: 2839 link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_DOWN; 2840 break; 2841 2842 default: 2843 mlx4_warn(dev, "unknown value for link_state %02x on slave %d port %d\n", 2844 link_state, slave, port); 2845 return -EINVAL; 2846 }; 2847 s_info = &priv->mfunc.master.vf_admin[slave].vport[port]; 2848 s_info->link_state = link_state; 2849 2850 /* send event */ 2851 mlx4_gen_port_state_change_eqe(dev, slave, port, link_stat_event); 2852 2853 if (mlx4_master_immediate_activate_vlan_qos(priv, slave, port)) 2854 mlx4_dbg(dev, 2855 "updating vf %d port %d no link state HW enforcment\n", 2856 vf, port); 2857 return 0; 2858 } 2859 EXPORT_SYMBOL_GPL(mlx4_set_vf_link_state); 2860 2861 int mlx4_vf_smi_enabled(struct mlx4_dev *dev, int slave, int port) 2862 { 2863 struct mlx4_priv *priv = mlx4_priv(dev); 2864 2865 if (slave < 1 || slave >= dev->num_slaves || 2866 port < 1 || port > MLX4_MAX_PORTS) 2867 return 0; 2868 2869 return priv->mfunc.master.vf_oper[slave].smi_enabled[port] == 2870 MLX4_VF_SMI_ENABLED; 2871 } 2872 EXPORT_SYMBOL_GPL(mlx4_vf_smi_enabled); 2873 2874 int mlx4_vf_get_enable_smi_admin(struct mlx4_dev *dev, int slave, int port) 2875 { 2876 struct mlx4_priv *priv = mlx4_priv(dev); 2877 2878 if (slave == mlx4_master_func_num(dev)) 2879 return 1; 2880 2881 if (slave < 1 || slave >= dev->num_slaves || 2882 port < 1 || port > MLX4_MAX_PORTS) 2883 return 0; 2884 2885 return priv->mfunc.master.vf_admin[slave].enable_smi[port] == 2886 MLX4_VF_SMI_ENABLED; 2887 } 2888 EXPORT_SYMBOL_GPL(mlx4_vf_get_enable_smi_admin); 2889 2890 int mlx4_vf_set_enable_smi_admin(struct mlx4_dev *dev, int slave, int port, 2891 int enabled) 2892 { 2893 struct mlx4_priv *priv = mlx4_priv(dev); 2894 2895 if (slave == mlx4_master_func_num(dev)) 2896 return 0; 2897 2898 if (slave < 1 || slave >= dev->num_slaves || 2899 port < 1 || port > MLX4_MAX_PORTS || 2900 enabled < 0 || enabled > 1) 2901 return -EINVAL; 2902 2903 priv->mfunc.master.vf_admin[slave].enable_smi[port] = enabled; 2904 return 0; 2905 } 2906 EXPORT_SYMBOL_GPL(mlx4_vf_set_enable_smi_admin); 2907