1 /* 2 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. 3 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #include <linux/interrupt.h> 35 #include <linux/slab.h> 36 #include <linux/export.h> 37 #include <linux/mm.h> 38 #include <linux/dma-mapping.h> 39 40 #include <linux/mlx4/cmd.h> 41 42 #include "mlx4.h" 43 #include "fw.h" 44 45 enum { 46 MLX4_IRQNAME_SIZE = 32 47 }; 48 49 enum { 50 MLX4_NUM_ASYNC_EQE = 0x100, 51 MLX4_NUM_SPARE_EQE = 0x80, 52 MLX4_EQ_ENTRY_SIZE = 0x20 53 }; 54 55 /* 56 * Must be packed because start is 64 bits but only aligned to 32 bits. 57 */ 58 struct mlx4_eq_context { 59 __be32 flags; 60 u16 reserved1[3]; 61 __be16 page_offset; 62 u8 log_eq_size; 63 u8 reserved2[4]; 64 u8 eq_period; 65 u8 reserved3; 66 u8 eq_max_count; 67 u8 reserved4[3]; 68 u8 intr; 69 u8 log_page_size; 70 u8 reserved5[2]; 71 u8 mtt_base_addr_h; 72 __be32 mtt_base_addr_l; 73 u32 reserved6[2]; 74 __be32 consumer_index; 75 __be32 producer_index; 76 u32 reserved7[4]; 77 }; 78 79 #define MLX4_EQ_STATUS_OK ( 0 << 28) 80 #define MLX4_EQ_STATUS_WRITE_FAIL (10 << 28) 81 #define MLX4_EQ_OWNER_SW ( 0 << 24) 82 #define MLX4_EQ_OWNER_HW ( 1 << 24) 83 #define MLX4_EQ_FLAG_EC ( 1 << 18) 84 #define MLX4_EQ_FLAG_OI ( 1 << 17) 85 #define MLX4_EQ_STATE_ARMED ( 9 << 8) 86 #define MLX4_EQ_STATE_FIRED (10 << 8) 87 #define MLX4_EQ_STATE_ALWAYS_ARMED (11 << 8) 88 89 #define MLX4_ASYNC_EVENT_MASK ((1ull << MLX4_EVENT_TYPE_PATH_MIG) | \ 90 (1ull << MLX4_EVENT_TYPE_COMM_EST) | \ 91 (1ull << MLX4_EVENT_TYPE_SQ_DRAINED) | \ 92 (1ull << MLX4_EVENT_TYPE_CQ_ERROR) | \ 93 (1ull << MLX4_EVENT_TYPE_WQ_CATAS_ERROR) | \ 94 (1ull << MLX4_EVENT_TYPE_EEC_CATAS_ERROR) | \ 95 (1ull << MLX4_EVENT_TYPE_PATH_MIG_FAILED) | \ 96 (1ull << MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \ 97 (1ull << MLX4_EVENT_TYPE_WQ_ACCESS_ERROR) | \ 98 (1ull << MLX4_EVENT_TYPE_PORT_CHANGE) | \ 99 (1ull << MLX4_EVENT_TYPE_ECC_DETECT) | \ 100 (1ull << MLX4_EVENT_TYPE_SRQ_CATAS_ERROR) | \ 101 (1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE) | \ 102 (1ull << MLX4_EVENT_TYPE_SRQ_LIMIT) | \ 103 (1ull << MLX4_EVENT_TYPE_CMD)) 104 105 struct mlx4_eqe { 106 u8 reserved1; 107 u8 type; 108 u8 reserved2; 109 u8 subtype; 110 union { 111 u32 raw[6]; 112 struct { 113 __be32 cqn; 114 } __packed comp; 115 struct { 116 u16 reserved1; 117 __be16 token; 118 u32 reserved2; 119 u8 reserved3[3]; 120 u8 status; 121 __be64 out_param; 122 } __packed cmd; 123 struct { 124 __be32 qpn; 125 } __packed qp; 126 struct { 127 __be32 srqn; 128 } __packed srq; 129 struct { 130 __be32 cqn; 131 u32 reserved1; 132 u8 reserved2[3]; 133 u8 syndrome; 134 } __packed cq_err; 135 struct { 136 u32 reserved1[2]; 137 __be32 port; 138 } __packed port_change; 139 } event; 140 u8 reserved3[3]; 141 u8 owner; 142 } __packed; 143 144 static void eq_set_ci(struct mlx4_eq *eq, int req_not) 145 { 146 __raw_writel((__force u32) cpu_to_be32((eq->cons_index & 0xffffff) | 147 req_not << 31), 148 eq->doorbell); 149 /* We still want ordering, just not swabbing, so add a barrier */ 150 mb(); 151 } 152 153 static struct mlx4_eqe *get_eqe(struct mlx4_eq *eq, u32 entry) 154 { 155 unsigned long off = (entry & (eq->nent - 1)) * MLX4_EQ_ENTRY_SIZE; 156 return eq->page_list[off / PAGE_SIZE].buf + off % PAGE_SIZE; 157 } 158 159 static struct mlx4_eqe *next_eqe_sw(struct mlx4_eq *eq) 160 { 161 struct mlx4_eqe *eqe = get_eqe(eq, eq->cons_index); 162 return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe; 163 } 164 165 static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) 166 { 167 struct mlx4_eqe *eqe; 168 int cqn; 169 int eqes_found = 0; 170 int set_ci = 0; 171 int port; 172 173 while ((eqe = next_eqe_sw(eq))) { 174 /* 175 * Make sure we read EQ entry contents after we've 176 * checked the ownership bit. 177 */ 178 rmb(); 179 180 switch (eqe->type) { 181 case MLX4_EVENT_TYPE_COMP: 182 cqn = be32_to_cpu(eqe->event.comp.cqn) & 0xffffff; 183 mlx4_cq_completion(dev, cqn); 184 break; 185 186 case MLX4_EVENT_TYPE_PATH_MIG: 187 case MLX4_EVENT_TYPE_COMM_EST: 188 case MLX4_EVENT_TYPE_SQ_DRAINED: 189 case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE: 190 case MLX4_EVENT_TYPE_WQ_CATAS_ERROR: 191 case MLX4_EVENT_TYPE_PATH_MIG_FAILED: 192 case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR: 193 case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR: 194 mlx4_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, 195 eqe->type); 196 break; 197 198 case MLX4_EVENT_TYPE_SRQ_LIMIT: 199 case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR: 200 mlx4_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) & 0xffffff, 201 eqe->type); 202 break; 203 204 case MLX4_EVENT_TYPE_CMD: 205 mlx4_cmd_event(dev, 206 be16_to_cpu(eqe->event.cmd.token), 207 eqe->event.cmd.status, 208 be64_to_cpu(eqe->event.cmd.out_param)); 209 break; 210 211 case MLX4_EVENT_TYPE_PORT_CHANGE: 212 port = be32_to_cpu(eqe->event.port_change.port) >> 28; 213 if (eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_DOWN) { 214 mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_DOWN, 215 port); 216 mlx4_priv(dev)->sense.do_sense_port[port] = 1; 217 } else { 218 mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_UP, 219 port); 220 mlx4_priv(dev)->sense.do_sense_port[port] = 0; 221 } 222 break; 223 224 case MLX4_EVENT_TYPE_CQ_ERROR: 225 mlx4_warn(dev, "CQ %s on CQN %06x\n", 226 eqe->event.cq_err.syndrome == 1 ? 227 "overrun" : "access violation", 228 be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff); 229 mlx4_cq_event(dev, be32_to_cpu(eqe->event.cq_err.cqn), 230 eqe->type); 231 break; 232 233 case MLX4_EVENT_TYPE_EQ_OVERFLOW: 234 mlx4_warn(dev, "EQ overrun on EQN %d\n", eq->eqn); 235 break; 236 237 case MLX4_EVENT_TYPE_EEC_CATAS_ERROR: 238 case MLX4_EVENT_TYPE_ECC_DETECT: 239 default: 240 mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at index %u\n", 241 eqe->type, eqe->subtype, eq->eqn, eq->cons_index); 242 break; 243 } 244 245 ++eq->cons_index; 246 eqes_found = 1; 247 ++set_ci; 248 249 /* 250 * The HCA will think the queue has overflowed if we 251 * don't tell it we've been processing events. We 252 * create our EQs with MLX4_NUM_SPARE_EQE extra 253 * entries, so we must update our consumer index at 254 * least that often. 255 */ 256 if (unlikely(set_ci >= MLX4_NUM_SPARE_EQE)) { 257 eq_set_ci(eq, 0); 258 set_ci = 0; 259 } 260 } 261 262 eq_set_ci(eq, 1); 263 264 return eqes_found; 265 } 266 267 static irqreturn_t mlx4_interrupt(int irq, void *dev_ptr) 268 { 269 struct mlx4_dev *dev = dev_ptr; 270 struct mlx4_priv *priv = mlx4_priv(dev); 271 int work = 0; 272 int i; 273 274 writel(priv->eq_table.clr_mask, priv->eq_table.clr_int); 275 276 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) 277 work |= mlx4_eq_int(dev, &priv->eq_table.eq[i]); 278 279 return IRQ_RETVAL(work); 280 } 281 282 static irqreturn_t mlx4_msi_x_interrupt(int irq, void *eq_ptr) 283 { 284 struct mlx4_eq *eq = eq_ptr; 285 struct mlx4_dev *dev = eq->dev; 286 287 mlx4_eq_int(dev, eq); 288 289 /* MSI-X vectors always belong to us */ 290 return IRQ_HANDLED; 291 } 292 293 static int mlx4_MAP_EQ(struct mlx4_dev *dev, u64 event_mask, int unmap, 294 int eq_num) 295 { 296 return mlx4_cmd(dev, event_mask, (unmap << 31) | eq_num, 297 0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B); 298 } 299 300 static int mlx4_SW2HW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, 301 int eq_num) 302 { 303 return mlx4_cmd(dev, mailbox->dma, eq_num, 0, MLX4_CMD_SW2HW_EQ, 304 MLX4_CMD_TIME_CLASS_A); 305 } 306 307 static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, 308 int eq_num) 309 { 310 return mlx4_cmd_box(dev, 0, mailbox->dma, eq_num, 0, MLX4_CMD_HW2SW_EQ, 311 MLX4_CMD_TIME_CLASS_A); 312 } 313 314 static int mlx4_num_eq_uar(struct mlx4_dev *dev) 315 { 316 /* 317 * Each UAR holds 4 EQ doorbells. To figure out how many UARs 318 * we need to map, take the difference of highest index and 319 * the lowest index we'll use and add 1. 320 */ 321 return (dev->caps.num_comp_vectors + 1 + dev->caps.reserved_eqs + 322 dev->caps.comp_pool)/4 - dev->caps.reserved_eqs/4 + 1; 323 } 324 325 static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq) 326 { 327 struct mlx4_priv *priv = mlx4_priv(dev); 328 int index; 329 330 index = eq->eqn / 4 - dev->caps.reserved_eqs / 4; 331 332 if (!priv->eq_table.uar_map[index]) { 333 priv->eq_table.uar_map[index] = 334 ioremap(pci_resource_start(dev->pdev, 2) + 335 ((eq->eqn / 4) << PAGE_SHIFT), 336 PAGE_SIZE); 337 if (!priv->eq_table.uar_map[index]) { 338 mlx4_err(dev, "Couldn't map EQ doorbell for EQN 0x%06x\n", 339 eq->eqn); 340 return NULL; 341 } 342 } 343 344 return priv->eq_table.uar_map[index] + 0x800 + 8 * (eq->eqn % 4); 345 } 346 347 static int mlx4_create_eq(struct mlx4_dev *dev, int nent, 348 u8 intr, struct mlx4_eq *eq) 349 { 350 struct mlx4_priv *priv = mlx4_priv(dev); 351 struct mlx4_cmd_mailbox *mailbox; 352 struct mlx4_eq_context *eq_context; 353 int npages; 354 u64 *dma_list = NULL; 355 dma_addr_t t; 356 u64 mtt_addr; 357 int err = -ENOMEM; 358 int i; 359 360 eq->dev = dev; 361 eq->nent = roundup_pow_of_two(max(nent, 2)); 362 npages = PAGE_ALIGN(eq->nent * MLX4_EQ_ENTRY_SIZE) / PAGE_SIZE; 363 364 eq->page_list = kmalloc(npages * sizeof *eq->page_list, 365 GFP_KERNEL); 366 if (!eq->page_list) 367 goto err_out; 368 369 for (i = 0; i < npages; ++i) 370 eq->page_list[i].buf = NULL; 371 372 dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL); 373 if (!dma_list) 374 goto err_out_free; 375 376 mailbox = mlx4_alloc_cmd_mailbox(dev); 377 if (IS_ERR(mailbox)) 378 goto err_out_free; 379 eq_context = mailbox->buf; 380 381 for (i = 0; i < npages; ++i) { 382 eq->page_list[i].buf = dma_alloc_coherent(&dev->pdev->dev, 383 PAGE_SIZE, &t, GFP_KERNEL); 384 if (!eq->page_list[i].buf) 385 goto err_out_free_pages; 386 387 dma_list[i] = t; 388 eq->page_list[i].map = t; 389 390 memset(eq->page_list[i].buf, 0, PAGE_SIZE); 391 } 392 393 eq->eqn = mlx4_bitmap_alloc(&priv->eq_table.bitmap); 394 if (eq->eqn == -1) 395 goto err_out_free_pages; 396 397 eq->doorbell = mlx4_get_eq_uar(dev, eq); 398 if (!eq->doorbell) { 399 err = -ENOMEM; 400 goto err_out_free_eq; 401 } 402 403 err = mlx4_mtt_init(dev, npages, PAGE_SHIFT, &eq->mtt); 404 if (err) 405 goto err_out_free_eq; 406 407 err = mlx4_write_mtt(dev, &eq->mtt, 0, npages, dma_list); 408 if (err) 409 goto err_out_free_mtt; 410 411 memset(eq_context, 0, sizeof *eq_context); 412 eq_context->flags = cpu_to_be32(MLX4_EQ_STATUS_OK | 413 MLX4_EQ_STATE_ARMED); 414 eq_context->log_eq_size = ilog2(eq->nent); 415 eq_context->intr = intr; 416 eq_context->log_page_size = PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT; 417 418 mtt_addr = mlx4_mtt_addr(dev, &eq->mtt); 419 eq_context->mtt_base_addr_h = mtt_addr >> 32; 420 eq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff); 421 422 err = mlx4_SW2HW_EQ(dev, mailbox, eq->eqn); 423 if (err) { 424 mlx4_warn(dev, "SW2HW_EQ failed (%d)\n", err); 425 goto err_out_free_mtt; 426 } 427 428 kfree(dma_list); 429 mlx4_free_cmd_mailbox(dev, mailbox); 430 431 eq->cons_index = 0; 432 433 return err; 434 435 err_out_free_mtt: 436 mlx4_mtt_cleanup(dev, &eq->mtt); 437 438 err_out_free_eq: 439 mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn); 440 441 err_out_free_pages: 442 for (i = 0; i < npages; ++i) 443 if (eq->page_list[i].buf) 444 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, 445 eq->page_list[i].buf, 446 eq->page_list[i].map); 447 448 mlx4_free_cmd_mailbox(dev, mailbox); 449 450 err_out_free: 451 kfree(eq->page_list); 452 kfree(dma_list); 453 454 err_out: 455 return err; 456 } 457 458 static void mlx4_free_eq(struct mlx4_dev *dev, 459 struct mlx4_eq *eq) 460 { 461 struct mlx4_priv *priv = mlx4_priv(dev); 462 struct mlx4_cmd_mailbox *mailbox; 463 int err; 464 int npages = PAGE_ALIGN(MLX4_EQ_ENTRY_SIZE * eq->nent) / PAGE_SIZE; 465 int i; 466 467 mailbox = mlx4_alloc_cmd_mailbox(dev); 468 if (IS_ERR(mailbox)) 469 return; 470 471 err = mlx4_HW2SW_EQ(dev, mailbox, eq->eqn); 472 if (err) 473 mlx4_warn(dev, "HW2SW_EQ failed (%d)\n", err); 474 475 if (0) { 476 mlx4_dbg(dev, "Dumping EQ context %02x:\n", eq->eqn); 477 for (i = 0; i < sizeof (struct mlx4_eq_context) / 4; ++i) { 478 if (i % 4 == 0) 479 pr_cont("[%02x] ", i * 4); 480 pr_cont(" %08x", be32_to_cpup(mailbox->buf + i * 4)); 481 if ((i + 1) % 4 == 0) 482 pr_cont("\n"); 483 } 484 } 485 486 mlx4_mtt_cleanup(dev, &eq->mtt); 487 for (i = 0; i < npages; ++i) 488 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, 489 eq->page_list[i].buf, 490 eq->page_list[i].map); 491 492 kfree(eq->page_list); 493 mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn); 494 mlx4_free_cmd_mailbox(dev, mailbox); 495 } 496 497 static void mlx4_free_irqs(struct mlx4_dev *dev) 498 { 499 struct mlx4_eq_table *eq_table = &mlx4_priv(dev)->eq_table; 500 struct mlx4_priv *priv = mlx4_priv(dev); 501 int i, vec; 502 503 if (eq_table->have_irq) 504 free_irq(dev->pdev->irq, dev); 505 506 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) 507 if (eq_table->eq[i].have_irq) { 508 free_irq(eq_table->eq[i].irq, eq_table->eq + i); 509 eq_table->eq[i].have_irq = 0; 510 } 511 512 for (i = 0; i < dev->caps.comp_pool; i++) { 513 /* 514 * Freeing the assigned irq's 515 * all bits should be 0, but we need to validate 516 */ 517 if (priv->msix_ctl.pool_bm & 1ULL << i) { 518 /* NO need protecting*/ 519 vec = dev->caps.num_comp_vectors + 1 + i; 520 free_irq(priv->eq_table.eq[vec].irq, 521 &priv->eq_table.eq[vec]); 522 } 523 } 524 525 526 kfree(eq_table->irq_names); 527 } 528 529 static int mlx4_map_clr_int(struct mlx4_dev *dev) 530 { 531 struct mlx4_priv *priv = mlx4_priv(dev); 532 533 priv->clr_base = ioremap(pci_resource_start(dev->pdev, priv->fw.clr_int_bar) + 534 priv->fw.clr_int_base, MLX4_CLR_INT_SIZE); 535 if (!priv->clr_base) { 536 mlx4_err(dev, "Couldn't map interrupt clear register, aborting.\n"); 537 return -ENOMEM; 538 } 539 540 return 0; 541 } 542 543 static void mlx4_unmap_clr_int(struct mlx4_dev *dev) 544 { 545 struct mlx4_priv *priv = mlx4_priv(dev); 546 547 iounmap(priv->clr_base); 548 } 549 550 int mlx4_alloc_eq_table(struct mlx4_dev *dev) 551 { 552 struct mlx4_priv *priv = mlx4_priv(dev); 553 554 priv->eq_table.eq = kcalloc(dev->caps.num_eqs - dev->caps.reserved_eqs, 555 sizeof *priv->eq_table.eq, GFP_KERNEL); 556 if (!priv->eq_table.eq) 557 return -ENOMEM; 558 559 return 0; 560 } 561 562 void mlx4_free_eq_table(struct mlx4_dev *dev) 563 { 564 kfree(mlx4_priv(dev)->eq_table.eq); 565 } 566 567 int mlx4_init_eq_table(struct mlx4_dev *dev) 568 { 569 struct mlx4_priv *priv = mlx4_priv(dev); 570 int err; 571 int i; 572 573 priv->eq_table.uar_map = kcalloc(sizeof *priv->eq_table.uar_map, 574 mlx4_num_eq_uar(dev), GFP_KERNEL); 575 if (!priv->eq_table.uar_map) { 576 err = -ENOMEM; 577 goto err_out_free; 578 } 579 580 err = mlx4_bitmap_init(&priv->eq_table.bitmap, dev->caps.num_eqs, 581 dev->caps.num_eqs - 1, dev->caps.reserved_eqs, 0); 582 if (err) 583 goto err_out_free; 584 585 for (i = 0; i < mlx4_num_eq_uar(dev); ++i) 586 priv->eq_table.uar_map[i] = NULL; 587 588 err = mlx4_map_clr_int(dev); 589 if (err) 590 goto err_out_bitmap; 591 592 priv->eq_table.clr_mask = 593 swab32(1 << (priv->eq_table.inta_pin & 31)); 594 priv->eq_table.clr_int = priv->clr_base + 595 (priv->eq_table.inta_pin < 32 ? 4 : 0); 596 597 priv->eq_table.irq_names = 598 kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1 + 599 dev->caps.comp_pool), 600 GFP_KERNEL); 601 if (!priv->eq_table.irq_names) { 602 err = -ENOMEM; 603 goto err_out_bitmap; 604 } 605 606 for (i = 0; i < dev->caps.num_comp_vectors; ++i) { 607 err = mlx4_create_eq(dev, dev->caps.num_cqs - 608 dev->caps.reserved_cqs + 609 MLX4_NUM_SPARE_EQE, 610 (dev->flags & MLX4_FLAG_MSI_X) ? i : 0, 611 &priv->eq_table.eq[i]); 612 if (err) { 613 --i; 614 goto err_out_unmap; 615 } 616 } 617 618 err = mlx4_create_eq(dev, MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE, 619 (dev->flags & MLX4_FLAG_MSI_X) ? dev->caps.num_comp_vectors : 0, 620 &priv->eq_table.eq[dev->caps.num_comp_vectors]); 621 if (err) 622 goto err_out_comp; 623 624 /*if additional completion vectors poolsize is 0 this loop will not run*/ 625 for (i = dev->caps.num_comp_vectors + 1; 626 i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i) { 627 628 err = mlx4_create_eq(dev, dev->caps.num_cqs - 629 dev->caps.reserved_cqs + 630 MLX4_NUM_SPARE_EQE, 631 (dev->flags & MLX4_FLAG_MSI_X) ? i : 0, 632 &priv->eq_table.eq[i]); 633 if (err) { 634 --i; 635 goto err_out_unmap; 636 } 637 } 638 639 640 if (dev->flags & MLX4_FLAG_MSI_X) { 641 const char *eq_name; 642 643 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) { 644 if (i < dev->caps.num_comp_vectors) { 645 snprintf(priv->eq_table.irq_names + 646 i * MLX4_IRQNAME_SIZE, 647 MLX4_IRQNAME_SIZE, 648 "mlx4-comp-%d@pci:%s", i, 649 pci_name(dev->pdev)); 650 } else { 651 snprintf(priv->eq_table.irq_names + 652 i * MLX4_IRQNAME_SIZE, 653 MLX4_IRQNAME_SIZE, 654 "mlx4-async@pci:%s", 655 pci_name(dev->pdev)); 656 } 657 658 eq_name = priv->eq_table.irq_names + 659 i * MLX4_IRQNAME_SIZE; 660 err = request_irq(priv->eq_table.eq[i].irq, 661 mlx4_msi_x_interrupt, 0, eq_name, 662 priv->eq_table.eq + i); 663 if (err) 664 goto err_out_async; 665 666 priv->eq_table.eq[i].have_irq = 1; 667 } 668 } else { 669 snprintf(priv->eq_table.irq_names, 670 MLX4_IRQNAME_SIZE, 671 DRV_NAME "@pci:%s", 672 pci_name(dev->pdev)); 673 err = request_irq(dev->pdev->irq, mlx4_interrupt, 674 IRQF_SHARED, priv->eq_table.irq_names, dev); 675 if (err) 676 goto err_out_async; 677 678 priv->eq_table.have_irq = 1; 679 } 680 681 err = mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0, 682 priv->eq_table.eq[dev->caps.num_comp_vectors].eqn); 683 if (err) 684 mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n", 685 priv->eq_table.eq[dev->caps.num_comp_vectors].eqn, err); 686 687 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) 688 eq_set_ci(&priv->eq_table.eq[i], 1); 689 690 return 0; 691 692 err_out_async: 693 mlx4_free_eq(dev, &priv->eq_table.eq[dev->caps.num_comp_vectors]); 694 695 err_out_comp: 696 i = dev->caps.num_comp_vectors - 1; 697 698 err_out_unmap: 699 while (i >= 0) { 700 mlx4_free_eq(dev, &priv->eq_table.eq[i]); 701 --i; 702 } 703 mlx4_unmap_clr_int(dev); 704 mlx4_free_irqs(dev); 705 706 err_out_bitmap: 707 mlx4_bitmap_cleanup(&priv->eq_table.bitmap); 708 709 err_out_free: 710 kfree(priv->eq_table.uar_map); 711 712 return err; 713 } 714 715 void mlx4_cleanup_eq_table(struct mlx4_dev *dev) 716 { 717 struct mlx4_priv *priv = mlx4_priv(dev); 718 int i; 719 720 mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 1, 721 priv->eq_table.eq[dev->caps.num_comp_vectors].eqn); 722 723 mlx4_free_irqs(dev); 724 725 for (i = 0; i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i) 726 mlx4_free_eq(dev, &priv->eq_table.eq[i]); 727 728 mlx4_unmap_clr_int(dev); 729 730 for (i = 0; i < mlx4_num_eq_uar(dev); ++i) 731 if (priv->eq_table.uar_map[i]) 732 iounmap(priv->eq_table.uar_map[i]); 733 734 mlx4_bitmap_cleanup(&priv->eq_table.bitmap); 735 736 kfree(priv->eq_table.uar_map); 737 } 738 739 /* A test that verifies that we can accept interrupts on all 740 * the irq vectors of the device. 741 * Interrupts are checked using the NOP command. 742 */ 743 int mlx4_test_interrupts(struct mlx4_dev *dev) 744 { 745 struct mlx4_priv *priv = mlx4_priv(dev); 746 int i; 747 int err; 748 749 err = mlx4_NOP(dev); 750 /* When not in MSI_X, there is only one irq to check */ 751 if (!(dev->flags & MLX4_FLAG_MSI_X)) 752 return err; 753 754 /* A loop over all completion vectors, for each vector we will check 755 * whether it works by mapping command completions to that vector 756 * and performing a NOP command 757 */ 758 for(i = 0; !err && (i < dev->caps.num_comp_vectors); ++i) { 759 /* Temporary use polling for command completions */ 760 mlx4_cmd_use_polling(dev); 761 762 /* Map the new eq to handle all asyncronous events */ 763 err = mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0, 764 priv->eq_table.eq[i].eqn); 765 if (err) { 766 mlx4_warn(dev, "Failed mapping eq for interrupt test\n"); 767 mlx4_cmd_use_events(dev); 768 break; 769 } 770 771 /* Go back to using events */ 772 mlx4_cmd_use_events(dev); 773 err = mlx4_NOP(dev); 774 } 775 776 /* Return to default */ 777 mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0, 778 priv->eq_table.eq[dev->caps.num_comp_vectors].eqn); 779 return err; 780 } 781 EXPORT_SYMBOL(mlx4_test_interrupts); 782 783 int mlx4_assign_eq(struct mlx4_dev *dev, char* name, int * vector) 784 { 785 786 struct mlx4_priv *priv = mlx4_priv(dev); 787 int vec = 0, err = 0, i; 788 789 spin_lock(&priv->msix_ctl.pool_lock); 790 for (i = 0; !vec && i < dev->caps.comp_pool; i++) { 791 if (~priv->msix_ctl.pool_bm & 1ULL << i) { 792 priv->msix_ctl.pool_bm |= 1ULL << i; 793 vec = dev->caps.num_comp_vectors + 1 + i; 794 snprintf(priv->eq_table.irq_names + 795 vec * MLX4_IRQNAME_SIZE, 796 MLX4_IRQNAME_SIZE, "%s", name); 797 err = request_irq(priv->eq_table.eq[vec].irq, 798 mlx4_msi_x_interrupt, 0, 799 &priv->eq_table.irq_names[vec<<5], 800 priv->eq_table.eq + vec); 801 if (err) { 802 /*zero out bit by fliping it*/ 803 priv->msix_ctl.pool_bm ^= 1 << i; 804 vec = 0; 805 continue; 806 /*we dont want to break here*/ 807 } 808 eq_set_ci(&priv->eq_table.eq[vec], 1); 809 } 810 } 811 spin_unlock(&priv->msix_ctl.pool_lock); 812 813 if (vec) { 814 *vector = vec; 815 } else { 816 *vector = 0; 817 err = (i == dev->caps.comp_pool) ? -ENOSPC : err; 818 } 819 return err; 820 } 821 EXPORT_SYMBOL(mlx4_assign_eq); 822 823 void mlx4_release_eq(struct mlx4_dev *dev, int vec) 824 { 825 struct mlx4_priv *priv = mlx4_priv(dev); 826 /*bm index*/ 827 int i = vec - dev->caps.num_comp_vectors - 1; 828 829 if (likely(i >= 0)) { 830 /*sanity check , making sure were not trying to free irq's 831 Belonging to a legacy EQ*/ 832 spin_lock(&priv->msix_ctl.pool_lock); 833 if (priv->msix_ctl.pool_bm & 1ULL << i) { 834 free_irq(priv->eq_table.eq[vec].irq, 835 &priv->eq_table.eq[vec]); 836 priv->msix_ctl.pool_bm &= ~(1ULL << i); 837 } 838 spin_unlock(&priv->msix_ctl.pool_lock); 839 } 840 841 } 842 EXPORT_SYMBOL(mlx4_release_eq); 843 844