1 /*- 2 * Copyright (c) 2013-2021, Mellanox Technologies, Ltd. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 */ 25 26 #include "opt_rss.h" 27 #include "opt_ratelimit.h" 28 29 #include <linux/interrupt.h> 30 #include <linux/module.h> 31 #include <dev/mlx5/port.h> 32 #include <dev/mlx5/mlx5_ifc.h> 33 #include <dev/mlx5/mlx5_fpga/core.h> 34 #include <dev/mlx5/mlx5_core/mlx5_core.h> 35 #include <dev/mlx5/mlx5_core/eswitch.h> 36 #include <dev/mlx5/mlx5_accel/ipsec.h> 37 38 #ifdef RSS 39 #include <net/rss_config.h> 40 #include <netinet/in_rss.h> 41 #endif 42 43 enum { 44 MLX5_EQE_SIZE = sizeof(struct mlx5_eqe), 45 MLX5_EQE_OWNER_INIT_VAL = 0x1, 46 }; 47 48 enum { 49 MLX5_NUM_SPARE_EQE = 0x80, 50 MLX5_NUM_ASYNC_EQE = 0x100, 51 MLX5_NUM_CMD_EQE = 32, 52 }; 53 54 enum { 55 MLX5_EQ_DOORBEL_OFFSET = 0x40, 56 }; 57 58 #define MLX5_ASYNC_EVENT_MASK ((1ull << MLX5_EVENT_TYPE_PATH_MIG) | \ 59 (1ull << MLX5_EVENT_TYPE_COMM_EST) | \ 60 (1ull << MLX5_EVENT_TYPE_SQ_DRAINED) | \ 61 (1ull << MLX5_EVENT_TYPE_CQ_ERROR) | \ 62 (1ull << MLX5_EVENT_TYPE_WQ_CATAS_ERROR) | \ 63 (1ull << MLX5_EVENT_TYPE_PATH_MIG_FAILED) | \ 64 (1ull << MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \ 65 (1ull << MLX5_EVENT_TYPE_WQ_ACCESS_ERROR) | \ 66 (1ull << MLX5_EVENT_TYPE_PORT_CHANGE) | \ 67 (1ull << MLX5_EVENT_TYPE_SRQ_CATAS_ERROR) | \ 68 (1ull << MLX5_EVENT_TYPE_SRQ_LAST_WQE) | \ 69 (1ull << MLX5_EVENT_TYPE_SRQ_RQ_LIMIT) | \ 70 (1ull << MLX5_EVENT_TYPE_NIC_VPORT_CHANGE)) 71 72 struct map_eq_in { 73 u64 mask; 74 u32 reserved; 75 u32 unmap_eqn; 76 }; 77 78 struct cre_des_eq { 79 u8 reserved[15]; 80 u8 eqn; 81 }; 82 83 /*Function prototype*/ 84 static void mlx5_port_module_event(struct mlx5_core_dev *dev, 85 struct mlx5_eqe *eqe); 86 static void mlx5_port_general_notification_event(struct mlx5_core_dev *dev, 87 struct mlx5_eqe *eqe); 88 89 static int mlx5_cmd_destroy_eq(struct mlx5_core_dev *dev, u8 eqn) 90 { 91 u32 in[MLX5_ST_SZ_DW(destroy_eq_in)] = {0}; 92 u32 out[MLX5_ST_SZ_DW(destroy_eq_out)] = {0}; 93 94 MLX5_SET(destroy_eq_in, in, opcode, MLX5_CMD_OP_DESTROY_EQ); 95 MLX5_SET(destroy_eq_in, in, eq_number, eqn); 96 97 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); 98 } 99 100 static struct mlx5_eqe *get_eqe(struct mlx5_eq *eq, u32 entry) 101 { 102 return mlx5_buf_offset(&eq->buf, entry * MLX5_EQE_SIZE); 103 } 104 105 static struct mlx5_eqe *next_eqe_sw(struct mlx5_eq *eq) 106 { 107 struct mlx5_eqe *eqe = get_eqe(eq, eq->cons_index & (eq->nent - 1)); 108 109 return ((eqe->owner & 1) ^ !!(eq->cons_index & eq->nent)) ? NULL : eqe; 110 } 111 112 static const char *eqe_type_str(u8 type) 113 { 114 switch (type) { 115 case MLX5_EVENT_TYPE_COMP: 116 return "MLX5_EVENT_TYPE_COMP"; 117 case MLX5_EVENT_TYPE_PATH_MIG: 118 return "MLX5_EVENT_TYPE_PATH_MIG"; 119 case MLX5_EVENT_TYPE_COMM_EST: 120 return "MLX5_EVENT_TYPE_COMM_EST"; 121 case MLX5_EVENT_TYPE_SQ_DRAINED: 122 return "MLX5_EVENT_TYPE_SQ_DRAINED"; 123 case MLX5_EVENT_TYPE_SRQ_LAST_WQE: 124 return "MLX5_EVENT_TYPE_SRQ_LAST_WQE"; 125 case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT: 126 return "MLX5_EVENT_TYPE_SRQ_RQ_LIMIT"; 127 case MLX5_EVENT_TYPE_CQ_ERROR: 128 return "MLX5_EVENT_TYPE_CQ_ERROR"; 129 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR: 130 return "MLX5_EVENT_TYPE_WQ_CATAS_ERROR"; 131 case MLX5_EVENT_TYPE_PATH_MIG_FAILED: 132 return "MLX5_EVENT_TYPE_PATH_MIG_FAILED"; 133 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR: 134 return "MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR"; 135 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR: 136 return "MLX5_EVENT_TYPE_WQ_ACCESS_ERROR"; 137 case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR: 138 return "MLX5_EVENT_TYPE_SRQ_CATAS_ERROR"; 139 case MLX5_EVENT_TYPE_INTERNAL_ERROR: 140 return "MLX5_EVENT_TYPE_INTERNAL_ERROR"; 141 case MLX5_EVENT_TYPE_PORT_CHANGE: 142 return "MLX5_EVENT_TYPE_PORT_CHANGE"; 143 case MLX5_EVENT_TYPE_GPIO_EVENT: 144 return "MLX5_EVENT_TYPE_GPIO_EVENT"; 145 case MLX5_EVENT_TYPE_CODING_PORT_MODULE_EVENT: 146 return "MLX5_EVENT_TYPE_PORT_MODULE_EVENT"; 147 case MLX5_EVENT_TYPE_TEMP_WARN_EVENT: 148 return "MLX5_EVENT_TYPE_TEMP_WARN_EVENT"; 149 case MLX5_EVENT_TYPE_REMOTE_CONFIG: 150 return "MLX5_EVENT_TYPE_REMOTE_CONFIG"; 151 case MLX5_EVENT_TYPE_DB_BF_CONGESTION: 152 return "MLX5_EVENT_TYPE_DB_BF_CONGESTION"; 153 case MLX5_EVENT_TYPE_STALL_EVENT: 154 return "MLX5_EVENT_TYPE_STALL_EVENT"; 155 case MLX5_EVENT_TYPE_CMD: 156 return "MLX5_EVENT_TYPE_CMD"; 157 case MLX5_EVENT_TYPE_PAGE_REQUEST: 158 return "MLX5_EVENT_TYPE_PAGE_REQUEST"; 159 case MLX5_EVENT_TYPE_NIC_VPORT_CHANGE: 160 return "MLX5_EVENT_TYPE_NIC_VPORT_CHANGE"; 161 case MLX5_EVENT_TYPE_FPGA_ERROR: 162 return "MLX5_EVENT_TYPE_FPGA_ERROR"; 163 case MLX5_EVENT_TYPE_FPGA_QP_ERROR: 164 return "MLX5_EVENT_TYPE_FPGA_QP_ERROR"; 165 case MLX5_EVENT_TYPE_CODING_DCBX_CHANGE_EVENT: 166 return "MLX5_EVENT_TYPE_CODING_DCBX_CHANGE_EVENT"; 167 case MLX5_EVENT_TYPE_CODING_GENERAL_NOTIFICATION_EVENT: 168 return "MLX5_EVENT_TYPE_CODING_GENERAL_NOTIFICATION_EVENT"; 169 case MLX5_EVENT_TYPE_OBJECT_CHANGE: 170 return "MLX5_EVENT_TYPE_OBJECT_CHANGE"; 171 default: 172 return "Unrecognized event"; 173 } 174 } 175 176 static enum mlx5_dev_event port_subtype_event(u8 subtype) 177 { 178 switch (subtype) { 179 case MLX5_PORT_CHANGE_SUBTYPE_DOWN: 180 return MLX5_DEV_EVENT_PORT_DOWN; 181 case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE: 182 return MLX5_DEV_EVENT_PORT_UP; 183 case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED: 184 return MLX5_DEV_EVENT_PORT_INITIALIZED; 185 case MLX5_PORT_CHANGE_SUBTYPE_LID: 186 return MLX5_DEV_EVENT_LID_CHANGE; 187 case MLX5_PORT_CHANGE_SUBTYPE_PKEY: 188 return MLX5_DEV_EVENT_PKEY_CHANGE; 189 case MLX5_PORT_CHANGE_SUBTYPE_GUID: 190 return MLX5_DEV_EVENT_GUID_CHANGE; 191 case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG: 192 return MLX5_DEV_EVENT_CLIENT_REREG; 193 } 194 return -1; 195 } 196 197 static enum mlx5_dev_event dcbx_subevent(u8 subtype) 198 { 199 switch (subtype) { 200 case MLX5_DCBX_EVENT_SUBTYPE_ERROR_STATE_DCBX: 201 return MLX5_DEV_EVENT_ERROR_STATE_DCBX; 202 case MLX5_DCBX_EVENT_SUBTYPE_REMOTE_CONFIG_CHANGE: 203 return MLX5_DEV_EVENT_REMOTE_CONFIG_CHANGE; 204 case MLX5_DCBX_EVENT_SUBTYPE_LOCAL_OPER_CHANGE: 205 return MLX5_DEV_EVENT_LOCAL_OPER_CHANGE; 206 case MLX5_DCBX_EVENT_SUBTYPE_REMOTE_CONFIG_APP_PRIORITY_CHANGE: 207 return MLX5_DEV_EVENT_REMOTE_CONFIG_APPLICATION_PRIORITY_CHANGE; 208 } 209 return -1; 210 } 211 212 static void eq_update_ci(struct mlx5_eq *eq, int arm) 213 { 214 __be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2); 215 u32 val = (eq->cons_index & 0xffffff) | (eq->eqn << 24); 216 __raw_writel((__force u32) cpu_to_be32(val), addr); 217 /* We still want ordering, just not swabbing, so add a barrier */ 218 mb(); 219 } 220 221 static void 222 mlx5_temp_warning_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe) 223 { 224 225 mlx5_core_warn(dev, 226 "High temperature on sensors with bit set %#jx %#jx\n", 227 (uintmax_t)be64_to_cpu(eqe->data.temp_warning.sensor_warning_msb), 228 (uintmax_t)be64_to_cpu(eqe->data.temp_warning.sensor_warning_lsb)); 229 } 230 231 static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq) 232 { 233 struct mlx5_eqe *eqe; 234 int eqes_found = 0; 235 int set_ci = 0; 236 u32 cqn; 237 u32 rsn; 238 u8 port; 239 240 while ((eqe = next_eqe_sw(eq))) { 241 /* 242 * Make sure we read EQ entry contents after we've 243 * checked the ownership bit. 244 */ 245 atomic_thread_fence_acq(); 246 247 mlx5_core_dbg(eq->dev, "eqn %d, eqe type %s\n", 248 eq->eqn, eqe_type_str(eqe->type)); 249 250 if (dev->priv.eq_table.cb != NULL && 251 dev->priv.eq_table.cb(dev, eqe->type, &eqe->data)) { 252 /* FALLTHROUGH */ 253 } else switch (eqe->type) { 254 case MLX5_EVENT_TYPE_COMP: 255 mlx5_cq_completion(dev, eqe); 256 break; 257 258 case MLX5_EVENT_TYPE_PATH_MIG: 259 case MLX5_EVENT_TYPE_COMM_EST: 260 case MLX5_EVENT_TYPE_SQ_DRAINED: 261 case MLX5_EVENT_TYPE_SRQ_LAST_WQE: 262 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR: 263 case MLX5_EVENT_TYPE_PATH_MIG_FAILED: 264 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR: 265 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR: 266 rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff; 267 mlx5_core_dbg(dev, "event %s(%d) arrived on resource 0x%x\n", 268 eqe_type_str(eqe->type), eqe->type, rsn); 269 mlx5_rsc_event(dev, rsn, eqe->type); 270 break; 271 272 case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT: 273 case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR: 274 rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff; 275 mlx5_core_dbg(dev, "SRQ event %s(%d): srqn 0x%x\n", 276 eqe_type_str(eqe->type), eqe->type, rsn); 277 mlx5_srq_event(dev, rsn, eqe->type); 278 break; 279 280 case MLX5_EVENT_TYPE_CMD: 281 if (dev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) { 282 mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector), 283 MLX5_CMD_MODE_EVENTS); 284 } 285 break; 286 287 case MLX5_EVENT_TYPE_PORT_CHANGE: 288 port = (eqe->data.port.port >> 4) & 0xf; 289 switch (eqe->sub_type) { 290 case MLX5_PORT_CHANGE_SUBTYPE_DOWN: 291 case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE: 292 case MLX5_PORT_CHANGE_SUBTYPE_LID: 293 case MLX5_PORT_CHANGE_SUBTYPE_PKEY: 294 case MLX5_PORT_CHANGE_SUBTYPE_GUID: 295 case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG: 296 case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED: 297 if (dev->event) 298 dev->event(dev, port_subtype_event(eqe->sub_type), 299 (unsigned long)port); 300 break; 301 default: 302 mlx5_core_warn(dev, "Port event with unrecognized subtype: port %d, sub_type %d\n", 303 port, eqe->sub_type); 304 } 305 break; 306 307 case MLX5_EVENT_TYPE_CODING_DCBX_CHANGE_EVENT: 308 port = (eqe->data.port.port >> 4) & 0xf; 309 switch (eqe->sub_type) { 310 case MLX5_DCBX_EVENT_SUBTYPE_ERROR_STATE_DCBX: 311 case MLX5_DCBX_EVENT_SUBTYPE_REMOTE_CONFIG_CHANGE: 312 case MLX5_DCBX_EVENT_SUBTYPE_LOCAL_OPER_CHANGE: 313 case MLX5_DCBX_EVENT_SUBTYPE_REMOTE_CONFIG_APP_PRIORITY_CHANGE: 314 if (dev->event) 315 dev->event(dev, 316 dcbx_subevent(eqe->sub_type), 317 0); 318 break; 319 default: 320 mlx5_core_warn(dev, 321 "dcbx event with unrecognized subtype: port %d, sub_type %d\n", 322 port, eqe->sub_type); 323 } 324 break; 325 326 case MLX5_EVENT_TYPE_CODING_GENERAL_NOTIFICATION_EVENT: 327 mlx5_port_general_notification_event(dev, eqe); 328 break; 329 330 case MLX5_EVENT_TYPE_CQ_ERROR: 331 cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff; 332 mlx5_core_warn(dev, "CQ error on CQN 0x%x, syndrom 0x%x\n", 333 cqn, eqe->data.cq_err.syndrome); 334 mlx5_cq_event(dev, cqn, eqe->type); 335 break; 336 337 case MLX5_EVENT_TYPE_PAGE_REQUEST: 338 { 339 u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id); 340 s32 npages = be32_to_cpu(eqe->data.req_pages.num_pages); 341 342 mlx5_core_dbg(dev, "page request for func 0x%x, npages %d\n", 343 func_id, npages); 344 mlx5_core_req_pages_handler(dev, func_id, npages); 345 } 346 break; 347 348 case MLX5_EVENT_TYPE_CODING_PORT_MODULE_EVENT: 349 mlx5_port_module_event(dev, eqe); 350 break; 351 352 case MLX5_EVENT_TYPE_NIC_VPORT_CHANGE: 353 { 354 struct mlx5_eqe_vport_change *vc_eqe = 355 &eqe->data.vport_change; 356 u16 vport_num = be16_to_cpu(vc_eqe->vport_num); 357 358 if (dev->event) 359 dev->event(dev, 360 MLX5_DEV_EVENT_VPORT_CHANGE, 361 (unsigned long)vport_num); 362 } 363 if (dev->priv.eswitch != NULL) 364 mlx5_eswitch_vport_event(dev->priv.eswitch, 365 eqe); 366 break; 367 368 case MLX5_EVENT_TYPE_FPGA_ERROR: 369 case MLX5_EVENT_TYPE_FPGA_QP_ERROR: 370 mlx5_fpga_event(dev, eqe->type, &eqe->data.raw); 371 break; 372 case MLX5_EVENT_TYPE_TEMP_WARN_EVENT: 373 mlx5_temp_warning_event(dev, eqe); 374 break; 375 376 case MLX5_EVENT_TYPE_OBJECT_CHANGE: 377 mlx5_object_change_event(dev, eqe); 378 break; 379 380 default: 381 mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n", 382 eqe->type, eq->eqn); 383 break; 384 } 385 386 ++eq->cons_index; 387 eqes_found = 1; 388 ++set_ci; 389 390 /* The HCA will think the queue has overflowed if we 391 * don't tell it we've been processing events. We 392 * create our EQs with MLX5_NUM_SPARE_EQE extra 393 * entries, so we must update our consumer index at 394 * least that often. 395 */ 396 if (unlikely(set_ci >= MLX5_NUM_SPARE_EQE)) { 397 eq_update_ci(eq, 0); 398 set_ci = 0; 399 } 400 } 401 402 eq_update_ci(eq, 1); 403 404 return eqes_found; 405 } 406 407 static irqreturn_t mlx5_msix_handler(int irq, void *eq_ptr) 408 { 409 struct mlx5_eq *eq = eq_ptr; 410 struct mlx5_core_dev *dev = eq->dev; 411 412 /* check if IRQs are not disabled */ 413 if (likely(dev->priv.disable_irqs == 0)) 414 mlx5_eq_int(dev, eq); 415 416 /* MSI-X vectors always belong to us */ 417 return IRQ_HANDLED; 418 } 419 420 static void init_eq_buf(struct mlx5_eq *eq) 421 { 422 struct mlx5_eqe *eqe; 423 int i; 424 425 for (i = 0; i < eq->nent; i++) { 426 eqe = get_eqe(eq, i); 427 eqe->owner = MLX5_EQE_OWNER_INIT_VAL; 428 } 429 } 430 431 int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, 432 int nent, u64 mask) 433 { 434 u32 out[MLX5_ST_SZ_DW(create_eq_out)] = {0}; 435 struct mlx5_priv *priv = &dev->priv; 436 __be64 *pas; 437 void *eqc; 438 int inlen; 439 u32 *in; 440 int err; 441 442 eq->nent = roundup_pow_of_two(nent + MLX5_NUM_SPARE_EQE); 443 eq->cons_index = 0; 444 err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, 2 * PAGE_SIZE, 445 &eq->buf); 446 if (err) 447 return err; 448 449 init_eq_buf(eq); 450 451 inlen = MLX5_ST_SZ_BYTES(create_eq_in) + 452 MLX5_FLD_SZ_BYTES(create_eq_in, pas[0]) * eq->buf.npages; 453 in = mlx5_vzalloc(inlen); 454 if (!in) { 455 err = -ENOMEM; 456 goto err_buf; 457 } 458 459 pas = (__be64 *)MLX5_ADDR_OF(create_eq_in, in, pas); 460 mlx5_fill_page_array(&eq->buf, pas); 461 462 MLX5_SET(create_eq_in, in, opcode, MLX5_CMD_OP_CREATE_EQ); 463 MLX5_SET64(create_eq_in, in, event_bitmask, mask); 464 465 eqc = MLX5_ADDR_OF(create_eq_in, in, eq_context_entry); 466 MLX5_SET(eqc, eqc, log_eq_size, ilog2(eq->nent)); 467 MLX5_SET(eqc, eqc, uar_page, priv->uar->index); 468 MLX5_SET(eqc, eqc, intr, vecidx); 469 MLX5_SET(eqc, eqc, log_page_size, 470 eq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); 471 472 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); 473 if (err) 474 goto err_in; 475 476 eq->eqn = MLX5_GET(create_eq_out, out, eq_number); 477 eq->irqn = vecidx; 478 eq->dev = dev; 479 eq->doorbell = priv->uar->map + MLX5_EQ_DOORBEL_OFFSET; 480 err = request_irq(priv->msix_arr[vecidx].vector, mlx5_msix_handler, 0, 481 "mlx5_core", eq); 482 if (err) 483 goto err_eq; 484 #ifdef RSS 485 if (vecidx >= MLX5_EQ_VEC_COMP_BASE) { 486 u8 bucket = vecidx - MLX5_EQ_VEC_COMP_BASE; 487 err = bind_irq_to_cpu(priv->msix_arr[vecidx].vector, 488 rss_getcpu(bucket % rss_getnumbuckets())); 489 if (err) 490 goto err_irq; 491 } 492 #else 493 if (0) 494 goto err_irq; 495 #endif 496 497 498 /* EQs are created in ARMED state 499 */ 500 eq_update_ci(eq, 1); 501 502 kvfree(in); 503 return 0; 504 505 err_irq: 506 free_irq(priv->msix_arr[vecidx].vector, eq); 507 508 err_eq: 509 mlx5_cmd_destroy_eq(dev, eq->eqn); 510 511 err_in: 512 kvfree(in); 513 514 err_buf: 515 mlx5_buf_free(dev, &eq->buf); 516 return err; 517 } 518 EXPORT_SYMBOL_GPL(mlx5_create_map_eq); 519 520 int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq) 521 { 522 int err; 523 524 free_irq(dev->priv.msix_arr[eq->irqn].vector, eq); 525 err = mlx5_cmd_destroy_eq(dev, eq->eqn); 526 if (err) 527 mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n", 528 eq->eqn); 529 mlx5_buf_free(dev, &eq->buf); 530 531 return err; 532 } 533 EXPORT_SYMBOL_GPL(mlx5_destroy_unmap_eq); 534 535 int mlx5_eq_init(struct mlx5_core_dev *dev) 536 { 537 int err; 538 539 spin_lock_init(&dev->priv.eq_table.lock); 540 541 err = 0; 542 543 return err; 544 } 545 546 547 void mlx5_eq_cleanup(struct mlx5_core_dev *dev) 548 { 549 } 550 551 int mlx5_start_eqs(struct mlx5_core_dev *dev) 552 { 553 struct mlx5_eq_table *table = &dev->priv.eq_table; 554 u64 async_event_mask = MLX5_ASYNC_EVENT_MASK; 555 int err; 556 557 if (MLX5_CAP_GEN(dev, port_module_event)) 558 async_event_mask |= (1ull << 559 MLX5_EVENT_TYPE_CODING_PORT_MODULE_EVENT); 560 561 if (MLX5_CAP_GEN(dev, nic_vport_change_event)) 562 async_event_mask |= (1ull << 563 MLX5_EVENT_TYPE_NIC_VPORT_CHANGE); 564 565 if (MLX5_CAP_GEN(dev, dcbx)) 566 async_event_mask |= (1ull << 567 MLX5_EVENT_TYPE_CODING_DCBX_CHANGE_EVENT); 568 569 if (MLX5_CAP_GEN(dev, fpga)) 570 async_event_mask |= (1ull << MLX5_EVENT_TYPE_FPGA_ERROR) | 571 (1ull << MLX5_EVENT_TYPE_FPGA_QP_ERROR); 572 573 if (MLX5_CAP_GEN(dev, temp_warn_event)) 574 async_event_mask |= (1ull << MLX5_EVENT_TYPE_TEMP_WARN_EVENT); 575 576 if (MLX5_CAP_GEN(dev, general_notification_event)) { 577 async_event_mask |= (1ull << 578 MLX5_EVENT_TYPE_CODING_GENERAL_NOTIFICATION_EVENT); 579 } 580 581 if (mlx5_ipsec_device_caps(dev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD) 582 async_event_mask |= 583 (1ull << MLX5_EVENT_TYPE_OBJECT_CHANGE); 584 585 err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD, 586 MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD); 587 if (err) { 588 mlx5_core_warn(dev, "failed to create cmd EQ %d\n", err); 589 return err; 590 } 591 592 mlx5_cmd_use_events(dev); 593 594 err = mlx5_create_map_eq(dev, &table->async_eq, MLX5_EQ_VEC_ASYNC, 595 MLX5_NUM_ASYNC_EQE, async_event_mask); 596 if (err) { 597 mlx5_core_warn(dev, "failed to create async EQ %d\n", err); 598 goto err1; 599 } 600 601 err = mlx5_create_map_eq(dev, &table->pages_eq, 602 MLX5_EQ_VEC_PAGES, 603 /* TODO: sriov max_vf + */ 1, 604 1 << MLX5_EVENT_TYPE_PAGE_REQUEST); 605 if (err) { 606 mlx5_core_warn(dev, "failed to create pages EQ %d\n", err); 607 goto err2; 608 } 609 610 return err; 611 612 err2: 613 mlx5_destroy_unmap_eq(dev, &table->async_eq); 614 615 err1: 616 mlx5_cmd_use_polling(dev); 617 mlx5_destroy_unmap_eq(dev, &table->cmd_eq); 618 return err; 619 } 620 621 int mlx5_stop_eqs(struct mlx5_core_dev *dev) 622 { 623 struct mlx5_eq_table *table = &dev->priv.eq_table; 624 int err; 625 626 err = mlx5_destroy_unmap_eq(dev, &table->pages_eq); 627 if (err) 628 return err; 629 630 mlx5_destroy_unmap_eq(dev, &table->async_eq); 631 mlx5_cmd_use_polling(dev); 632 633 err = mlx5_destroy_unmap_eq(dev, &table->cmd_eq); 634 if (err) 635 mlx5_cmd_use_events(dev); 636 637 return err; 638 } 639 640 int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq, 641 u32 *out, int outlen) 642 { 643 u32 in[MLX5_ST_SZ_DW(query_eq_in)] = {0}; 644 645 memset(out, 0, outlen); 646 MLX5_SET(query_eq_in, in, opcode, MLX5_CMD_OP_QUERY_EQ); 647 MLX5_SET(query_eq_in, in, eq_number, eq->eqn); 648 649 return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen); 650 } 651 EXPORT_SYMBOL_GPL(mlx5_core_eq_query); 652 653 static const char *mlx5_port_module_event_error_type_to_string(u8 error_type) 654 { 655 switch (error_type) { 656 case MLX5_MODULE_EVENT_ERROR_POWER_BUDGET_EXCEEDED: 657 return "Power budget exceeded"; 658 case MLX5_MODULE_EVENT_ERROR_LONG_RANGE_FOR_NON_MLNX_CABLE_MODULE: 659 return "Long Range for non MLNX cable"; 660 case MLX5_MODULE_EVENT_ERROR_BUS_STUCK: 661 return "Bus stuck(I2C or data shorted)"; 662 case MLX5_MODULE_EVENT_ERROR_NO_EEPROM_RETRY_TIMEOUT: 663 return "No EEPROM/retry timeout"; 664 case MLX5_MODULE_EVENT_ERROR_ENFORCE_PART_NUMBER_LIST: 665 return "Enforce part number list"; 666 case MLX5_MODULE_EVENT_ERROR_UNSUPPORTED_CABLE: 667 return "Unknown identifier"; 668 case MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE: 669 return "High Temperature"; 670 case MLX5_MODULE_EVENT_ERROR_CABLE_IS_SHORTED: 671 return "Bad or shorted cable/module"; 672 case MLX5_MODULE_EVENT_ERROR_PMD_TYPE_NOT_ENABLED: 673 return "PMD type is not enabled"; 674 case MLX5_MODULE_EVENT_ERROR_LASTER_TEC_FAILURE: 675 return "Laster_TEC_failure"; 676 case MLX5_MODULE_EVENT_ERROR_HIGH_CURRENT: 677 return "High_current"; 678 case MLX5_MODULE_EVENT_ERROR_HIGH_VOLTAGE: 679 return "High_voltage"; 680 case MLX5_MODULE_EVENT_ERROR_PCIE_SYS_POWER_SLOT_EXCEEDED: 681 return "pcie_system_power_slot_Exceeded"; 682 case MLX5_MODULE_EVENT_ERROR_HIGH_POWER: 683 return "High_power"; 684 case MLX5_MODULE_EVENT_ERROR_MODULE_STATE_MACHINE_FAULT: 685 return "Module_state_machine_fault"; 686 default: 687 return "Unknown error type"; 688 } 689 } 690 691 unsigned int mlx5_query_module_status(struct mlx5_core_dev *dev, int module_num) 692 { 693 if (module_num < 0 || module_num >= MLX5_MAX_PORTS) 694 return 0; /* undefined */ 695 return dev->module_status[module_num]; 696 } 697 698 static void mlx5_port_module_event(struct mlx5_core_dev *dev, 699 struct mlx5_eqe *eqe) 700 { 701 unsigned int module_num; 702 unsigned int module_status; 703 unsigned int error_type; 704 struct mlx5_eqe_port_module_event *module_event_eqe; 705 706 module_event_eqe = &eqe->data.port_module_event; 707 708 module_num = (unsigned int)module_event_eqe->module; 709 module_status = (unsigned int)module_event_eqe->module_status & 710 PORT_MODULE_EVENT_MODULE_STATUS_MASK; 711 error_type = (unsigned int)module_event_eqe->error_type & 712 PORT_MODULE_EVENT_ERROR_TYPE_MASK; 713 714 if (module_status < MLX5_MODULE_STATUS_NUM) 715 dev->priv.pme_stats.status_counters[module_status]++; 716 switch (module_status) { 717 case MLX5_MODULE_STATUS_PLUGGED_ENABLED: 718 mlx5_core_info(dev, 719 "Module %u, status: plugged and enabled\n", 720 module_num); 721 break; 722 723 case MLX5_MODULE_STATUS_UNPLUGGED: 724 mlx5_core_info(dev, 725 "Module %u, status: unplugged\n", module_num); 726 break; 727 728 case MLX5_MODULE_STATUS_ERROR: 729 mlx5_core_err(dev, 730 "Module %u, status: error, %s (%d)\n", 731 module_num, 732 mlx5_port_module_event_error_type_to_string(error_type), 733 error_type); 734 if (error_type < MLX5_MODULE_EVENT_ERROR_NUM) 735 dev->priv.pme_stats.error_counters[error_type]++; 736 break; 737 738 default: 739 mlx5_core_info(dev, 740 "Module %u, unknown status %d\n", module_num, module_status); 741 } 742 /* store module status */ 743 if (module_num < MLX5_MAX_PORTS) 744 dev->module_status[module_num] = module_status; 745 } 746 747 static void mlx5_port_general_notification_event(struct mlx5_core_dev *dev, 748 struct mlx5_eqe *eqe) 749 { 750 u8 port = (eqe->data.port.port >> 4) & 0xf; 751 752 switch (eqe->sub_type) { 753 case MLX5_GEN_EVENT_SUBTYPE_DELAY_DROP_TIMEOUT: 754 break; 755 case MLX5_GEN_EVENT_SUBTYPE_PCI_POWER_CHANGE_EVENT: 756 mlx5_trigger_health_watchdog(dev); 757 break; 758 default: 759 mlx5_core_warn(dev, 760 "general event with unrecognized subtype: port %d, sub_type %d\n", 761 port, eqe->sub_type); 762 break; 763 } 764 } 765 766 void 767 mlx5_disable_interrupts(struct mlx5_core_dev *dev) 768 { 769 int nvec = dev->priv.eq_table.num_comp_vectors + MLX5_EQ_VEC_COMP_BASE; 770 int x; 771 772 for (x = 0; x != nvec; x++) 773 disable_irq(dev->priv.msix_arr[x].vector); 774 } 775 776 void 777 mlx5_poll_interrupts(struct mlx5_core_dev *dev) 778 { 779 struct mlx5_eq *eq; 780 781 if (unlikely(dev->priv.disable_irqs != 0)) 782 return; 783 784 mlx5_eq_int(dev, &dev->priv.eq_table.cmd_eq); 785 mlx5_eq_int(dev, &dev->priv.eq_table.async_eq); 786 mlx5_eq_int(dev, &dev->priv.eq_table.pages_eq); 787 788 list_for_each_entry(eq, &dev->priv.eq_table.comp_eqs_list, list) 789 mlx5_eq_int(dev, eq); 790 } 791