1 /* 2 * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved. 3 * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved. 4 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include <linux/pci.h> 36 #include <linux/netdevice.h> 37 #include <linux/vmalloc.h> 38 #include <linux/delay.h> 39 #include <linux/idr.h> 40 #include <linux/module.h> 41 #include <linux/printk.h> 42 #ifdef CONFIG_INFINIBAND_QIB_DCA 43 #include <linux/dca.h> 44 #endif 45 #include <rdma/rdma_vt.h> 46 47 #include "qib.h" 48 #include "qib_common.h" 49 #include "qib_mad.h" 50 #ifdef CONFIG_DEBUG_FS 51 #include "qib_debugfs.h" 52 #include "qib_verbs.h" 53 #endif 54 55 #undef pr_fmt 56 #define pr_fmt(fmt) QIB_DRV_NAME ": " fmt 57 58 /* 59 * min buffers we want to have per context, after driver 60 */ 61 #define QIB_MIN_USER_CTXT_BUFCNT 7 62 63 #define QLOGIC_IB_R_SOFTWARE_MASK 0xFF 64 #define QLOGIC_IB_R_SOFTWARE_SHIFT 24 65 #define QLOGIC_IB_R_EMULATOR_MASK (1ULL<<62) 66 67 /* 68 * Number of ctxts we are configured to use (to allow for more pio 69 * buffers per ctxt, etc.) Zero means use chip value. 70 */ 71 ushort qib_cfgctxts; 72 module_param_named(cfgctxts, qib_cfgctxts, ushort, S_IRUGO); 73 MODULE_PARM_DESC(cfgctxts, "Set max number of contexts to use"); 74 75 unsigned qib_numa_aware; 76 module_param_named(numa_aware, qib_numa_aware, uint, S_IRUGO); 77 MODULE_PARM_DESC(numa_aware, 78 "0 -> PSM allocation close to HCA, 1 -> PSM allocation local to process"); 79 80 /* 81 * If set, do not write to any regs if avoidable, hack to allow 82 * check for deranged default register values. 83 */ 84 ushort qib_mini_init; 85 module_param_named(mini_init, qib_mini_init, ushort, S_IRUGO); 86 MODULE_PARM_DESC(mini_init, "If set, do minimal diag init"); 87 88 unsigned qib_n_krcv_queues; 89 module_param_named(krcvqs, qib_n_krcv_queues, uint, S_IRUGO); 90 MODULE_PARM_DESC(krcvqs, "number of kernel receive queues per IB port"); 91 92 unsigned qib_cc_table_size; 93 module_param_named(cc_table_size, qib_cc_table_size, uint, S_IRUGO); 94 MODULE_PARM_DESC(cc_table_size, "Congestion control table entries 0 (CCA disabled - default), min = 128, max = 1984"); 95 96 static void verify_interrupt(struct timer_list *); 97 98 static struct idr qib_unit_table; 99 u32 qib_cpulist_count; 100 unsigned long *qib_cpulist; 101 102 /* set number of contexts we'll actually use */ 103 void qib_set_ctxtcnt(struct qib_devdata *dd) 104 { 105 if (!qib_cfgctxts) { 106 dd->cfgctxts = dd->first_user_ctxt + num_online_cpus(); 107 if (dd->cfgctxts > dd->ctxtcnt) 108 dd->cfgctxts = dd->ctxtcnt; 109 } else if (qib_cfgctxts < dd->num_pports) 110 dd->cfgctxts = dd->ctxtcnt; 111 else if (qib_cfgctxts <= dd->ctxtcnt) 112 dd->cfgctxts = qib_cfgctxts; 113 else 114 dd->cfgctxts = dd->ctxtcnt; 115 dd->freectxts = (dd->first_user_ctxt > dd->cfgctxts) ? 0 : 116 dd->cfgctxts - dd->first_user_ctxt; 117 } 118 119 /* 120 * Common code for creating the receive context array. 121 */ 122 int qib_create_ctxts(struct qib_devdata *dd) 123 { 124 unsigned i; 125 int local_node_id = pcibus_to_node(dd->pcidev->bus); 126 127 if (local_node_id < 0) 128 local_node_id = numa_node_id(); 129 dd->assigned_node_id = local_node_id; 130 131 /* 132 * Allocate full ctxtcnt array, rather than just cfgctxts, because 133 * cleanup iterates across all possible ctxts. 134 */ 135 dd->rcd = kcalloc(dd->ctxtcnt, sizeof(*dd->rcd), GFP_KERNEL); 136 if (!dd->rcd) 137 return -ENOMEM; 138 139 /* create (one or more) kctxt */ 140 for (i = 0; i < dd->first_user_ctxt; ++i) { 141 struct qib_pportdata *ppd; 142 struct qib_ctxtdata *rcd; 143 144 if (dd->skip_kctxt_mask & (1 << i)) 145 continue; 146 147 ppd = dd->pport + (i % dd->num_pports); 148 149 rcd = qib_create_ctxtdata(ppd, i, dd->assigned_node_id); 150 if (!rcd) { 151 qib_dev_err(dd, 152 "Unable to allocate ctxtdata for Kernel ctxt, failing\n"); 153 kfree(dd->rcd); 154 dd->rcd = NULL; 155 return -ENOMEM; 156 } 157 rcd->pkeys[0] = QIB_DEFAULT_P_KEY; 158 rcd->seq_cnt = 1; 159 } 160 return 0; 161 } 162 163 /* 164 * Common code for user and kernel context setup. 165 */ 166 struct qib_ctxtdata *qib_create_ctxtdata(struct qib_pportdata *ppd, u32 ctxt, 167 int node_id) 168 { 169 struct qib_devdata *dd = ppd->dd; 170 struct qib_ctxtdata *rcd; 171 172 rcd = kzalloc_node(sizeof(*rcd), GFP_KERNEL, node_id); 173 if (rcd) { 174 INIT_LIST_HEAD(&rcd->qp_wait_list); 175 rcd->node_id = node_id; 176 rcd->ppd = ppd; 177 rcd->dd = dd; 178 rcd->cnt = 1; 179 rcd->ctxt = ctxt; 180 dd->rcd[ctxt] = rcd; 181 #ifdef CONFIG_DEBUG_FS 182 if (ctxt < dd->first_user_ctxt) { /* N/A for PSM contexts */ 183 rcd->opstats = kzalloc_node(sizeof(*rcd->opstats), 184 GFP_KERNEL, node_id); 185 if (!rcd->opstats) { 186 kfree(rcd); 187 qib_dev_err(dd, 188 "Unable to allocate per ctxt stats buffer\n"); 189 return NULL; 190 } 191 } 192 #endif 193 dd->f_init_ctxt(rcd); 194 195 /* 196 * To avoid wasting a lot of memory, we allocate 32KB chunks 197 * of physically contiguous memory, advance through it until 198 * used up and then allocate more. Of course, we need 199 * memory to store those extra pointers, now. 32KB seems to 200 * be the most that is "safe" under memory pressure 201 * (creating large files and then copying them over 202 * NFS while doing lots of MPI jobs). The OOM killer can 203 * get invoked, even though we say we can sleep and this can 204 * cause significant system problems.... 205 */ 206 rcd->rcvegrbuf_size = 0x8000; 207 rcd->rcvegrbufs_perchunk = 208 rcd->rcvegrbuf_size / dd->rcvegrbufsize; 209 rcd->rcvegrbuf_chunks = (rcd->rcvegrcnt + 210 rcd->rcvegrbufs_perchunk - 1) / 211 rcd->rcvegrbufs_perchunk; 212 BUG_ON(!is_power_of_2(rcd->rcvegrbufs_perchunk)); 213 rcd->rcvegrbufs_perchunk_shift = 214 ilog2(rcd->rcvegrbufs_perchunk); 215 } 216 return rcd; 217 } 218 219 /* 220 * Common code for initializing the physical port structure. 221 */ 222 int qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd, 223 u8 hw_pidx, u8 port) 224 { 225 int size; 226 227 ppd->dd = dd; 228 ppd->hw_pidx = hw_pidx; 229 ppd->port = port; /* IB port number, not index */ 230 231 spin_lock_init(&ppd->sdma_lock); 232 spin_lock_init(&ppd->lflags_lock); 233 spin_lock_init(&ppd->cc_shadow_lock); 234 init_waitqueue_head(&ppd->state_wait); 235 236 timer_setup(&ppd->symerr_clear_timer, qib_clear_symerror_on_linkup, 0); 237 238 ppd->qib_wq = NULL; 239 ppd->ibport_data.pmastats = 240 alloc_percpu(struct qib_pma_counters); 241 if (!ppd->ibport_data.pmastats) 242 return -ENOMEM; 243 ppd->ibport_data.rvp.rc_acks = alloc_percpu(u64); 244 ppd->ibport_data.rvp.rc_qacks = alloc_percpu(u64); 245 ppd->ibport_data.rvp.rc_delayed_comp = alloc_percpu(u64); 246 if (!(ppd->ibport_data.rvp.rc_acks) || 247 !(ppd->ibport_data.rvp.rc_qacks) || 248 !(ppd->ibport_data.rvp.rc_delayed_comp)) 249 return -ENOMEM; 250 251 if (qib_cc_table_size < IB_CCT_MIN_ENTRIES) 252 goto bail; 253 254 ppd->cc_supported_table_entries = min(max_t(int, qib_cc_table_size, 255 IB_CCT_MIN_ENTRIES), IB_CCT_ENTRIES*IB_CC_TABLE_CAP_DEFAULT); 256 257 ppd->cc_max_table_entries = 258 ppd->cc_supported_table_entries/IB_CCT_ENTRIES; 259 260 size = IB_CC_TABLE_CAP_DEFAULT * sizeof(struct ib_cc_table_entry) 261 * IB_CCT_ENTRIES; 262 ppd->ccti_entries = kzalloc(size, GFP_KERNEL); 263 if (!ppd->ccti_entries) 264 goto bail; 265 266 size = IB_CC_CCS_ENTRIES * sizeof(struct ib_cc_congestion_entry); 267 ppd->congestion_entries = kzalloc(size, GFP_KERNEL); 268 if (!ppd->congestion_entries) 269 goto bail_1; 270 271 size = sizeof(struct cc_table_shadow); 272 ppd->ccti_entries_shadow = kzalloc(size, GFP_KERNEL); 273 if (!ppd->ccti_entries_shadow) 274 goto bail_2; 275 276 size = sizeof(struct ib_cc_congestion_setting_attr); 277 ppd->congestion_entries_shadow = kzalloc(size, GFP_KERNEL); 278 if (!ppd->congestion_entries_shadow) 279 goto bail_3; 280 281 return 0; 282 283 bail_3: 284 kfree(ppd->ccti_entries_shadow); 285 ppd->ccti_entries_shadow = NULL; 286 bail_2: 287 kfree(ppd->congestion_entries); 288 ppd->congestion_entries = NULL; 289 bail_1: 290 kfree(ppd->ccti_entries); 291 ppd->ccti_entries = NULL; 292 bail: 293 /* User is intentionally disabling the congestion control agent */ 294 if (!qib_cc_table_size) 295 return 0; 296 297 if (qib_cc_table_size < IB_CCT_MIN_ENTRIES) { 298 qib_cc_table_size = 0; 299 qib_dev_err(dd, 300 "Congestion Control table size %d less than minimum %d for port %d\n", 301 qib_cc_table_size, IB_CCT_MIN_ENTRIES, port); 302 } 303 304 qib_dev_err(dd, "Congestion Control Agent disabled for port %d\n", 305 port); 306 return 0; 307 } 308 309 static int init_pioavailregs(struct qib_devdata *dd) 310 { 311 int ret, pidx; 312 u64 *status_page; 313 314 dd->pioavailregs_dma = dma_alloc_coherent( 315 &dd->pcidev->dev, PAGE_SIZE, &dd->pioavailregs_phys, 316 GFP_KERNEL); 317 if (!dd->pioavailregs_dma) { 318 qib_dev_err(dd, 319 "failed to allocate PIOavail reg area in memory\n"); 320 ret = -ENOMEM; 321 goto done; 322 } 323 324 /* 325 * We really want L2 cache aligned, but for current CPUs of 326 * interest, they are the same. 327 */ 328 status_page = (u64 *) 329 ((char *) dd->pioavailregs_dma + 330 ((2 * L1_CACHE_BYTES + 331 dd->pioavregs * sizeof(u64)) & ~L1_CACHE_BYTES)); 332 /* device status comes first, for backwards compatibility */ 333 dd->devstatusp = status_page; 334 *status_page++ = 0; 335 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 336 dd->pport[pidx].statusp = status_page; 337 *status_page++ = 0; 338 } 339 340 /* 341 * Setup buffer to hold freeze and other messages, accessible to 342 * apps, following statusp. This is per-unit, not per port. 343 */ 344 dd->freezemsg = (char *) status_page; 345 *dd->freezemsg = 0; 346 /* length of msg buffer is "whatever is left" */ 347 ret = (char *) status_page - (char *) dd->pioavailregs_dma; 348 dd->freezelen = PAGE_SIZE - ret; 349 350 ret = 0; 351 352 done: 353 return ret; 354 } 355 356 /** 357 * init_shadow_tids - allocate the shadow TID array 358 * @dd: the qlogic_ib device 359 * 360 * allocate the shadow TID array, so we can qib_munlock previous 361 * entries. It may make more sense to move the pageshadow to the 362 * ctxt data structure, so we only allocate memory for ctxts actually 363 * in use, since we at 8k per ctxt, now. 364 * We don't want failures here to prevent use of the driver/chip, 365 * so no return value. 366 */ 367 static void init_shadow_tids(struct qib_devdata *dd) 368 { 369 struct page **pages; 370 dma_addr_t *addrs; 371 372 pages = vzalloc(array_size(sizeof(struct page *), 373 dd->cfgctxts * dd->rcvtidcnt)); 374 if (!pages) 375 goto bail; 376 377 addrs = vzalloc(array_size(sizeof(dma_addr_t), 378 dd->cfgctxts * dd->rcvtidcnt)); 379 if (!addrs) 380 goto bail_free; 381 382 dd->pageshadow = pages; 383 dd->physshadow = addrs; 384 return; 385 386 bail_free: 387 vfree(pages); 388 bail: 389 dd->pageshadow = NULL; 390 } 391 392 /* 393 * Do initialization for device that is only needed on 394 * first detect, not on resets. 395 */ 396 static int loadtime_init(struct qib_devdata *dd) 397 { 398 int ret = 0; 399 400 if (((dd->revision >> QLOGIC_IB_R_SOFTWARE_SHIFT) & 401 QLOGIC_IB_R_SOFTWARE_MASK) != QIB_CHIP_SWVERSION) { 402 qib_dev_err(dd, 403 "Driver only handles version %d, chip swversion is %d (%llx), failing\n", 404 QIB_CHIP_SWVERSION, 405 (int)(dd->revision >> 406 QLOGIC_IB_R_SOFTWARE_SHIFT) & 407 QLOGIC_IB_R_SOFTWARE_MASK, 408 (unsigned long long) dd->revision); 409 ret = -ENOSYS; 410 goto done; 411 } 412 413 if (dd->revision & QLOGIC_IB_R_EMULATOR_MASK) 414 qib_devinfo(dd->pcidev, "%s", dd->boardversion); 415 416 spin_lock_init(&dd->pioavail_lock); 417 spin_lock_init(&dd->sendctrl_lock); 418 spin_lock_init(&dd->uctxt_lock); 419 spin_lock_init(&dd->qib_diag_trans_lock); 420 spin_lock_init(&dd->eep_st_lock); 421 mutex_init(&dd->eep_lock); 422 423 if (qib_mini_init) 424 goto done; 425 426 ret = init_pioavailregs(dd); 427 init_shadow_tids(dd); 428 429 qib_get_eeprom_info(dd); 430 431 /* setup time (don't start yet) to verify we got interrupt */ 432 timer_setup(&dd->intrchk_timer, verify_interrupt, 0); 433 done: 434 return ret; 435 } 436 437 /** 438 * init_after_reset - re-initialize after a reset 439 * @dd: the qlogic_ib device 440 * 441 * sanity check at least some of the values after reset, and 442 * ensure no receive or transmit (explicitly, in case reset 443 * failed 444 */ 445 static int init_after_reset(struct qib_devdata *dd) 446 { 447 int i; 448 449 /* 450 * Ensure chip does no sends or receives, tail updates, or 451 * pioavail updates while we re-initialize. This is mostly 452 * for the driver data structures, not chip registers. 453 */ 454 for (i = 0; i < dd->num_pports; ++i) { 455 /* 456 * ctxt == -1 means "all contexts". Only really safe for 457 * _dis_abling things, as here. 458 */ 459 dd->f_rcvctrl(dd->pport + i, QIB_RCVCTRL_CTXT_DIS | 460 QIB_RCVCTRL_INTRAVAIL_DIS | 461 QIB_RCVCTRL_TAILUPD_DIS, -1); 462 /* Redundant across ports for some, but no big deal. */ 463 dd->f_sendctrl(dd->pport + i, QIB_SENDCTRL_SEND_DIS | 464 QIB_SENDCTRL_AVAIL_DIS); 465 } 466 467 return 0; 468 } 469 470 static void enable_chip(struct qib_devdata *dd) 471 { 472 u64 rcvmask; 473 int i; 474 475 /* 476 * Enable PIO send, and update of PIOavail regs to memory. 477 */ 478 for (i = 0; i < dd->num_pports; ++i) 479 dd->f_sendctrl(dd->pport + i, QIB_SENDCTRL_SEND_ENB | 480 QIB_SENDCTRL_AVAIL_ENB); 481 /* 482 * Enable kernel ctxts' receive and receive interrupt. 483 * Other ctxts done as user opens and inits them. 484 */ 485 rcvmask = QIB_RCVCTRL_CTXT_ENB | QIB_RCVCTRL_INTRAVAIL_ENB; 486 rcvmask |= (dd->flags & QIB_NODMA_RTAIL) ? 487 QIB_RCVCTRL_TAILUPD_DIS : QIB_RCVCTRL_TAILUPD_ENB; 488 for (i = 0; dd->rcd && i < dd->first_user_ctxt; ++i) { 489 struct qib_ctxtdata *rcd = dd->rcd[i]; 490 491 if (rcd) 492 dd->f_rcvctrl(rcd->ppd, rcvmask, i); 493 } 494 } 495 496 static void verify_interrupt(struct timer_list *t) 497 { 498 struct qib_devdata *dd = from_timer(dd, t, intrchk_timer); 499 u64 int_counter; 500 501 if (!dd) 502 return; /* being torn down */ 503 504 /* 505 * If we don't have a lid or any interrupts, let the user know and 506 * don't bother checking again. 507 */ 508 int_counter = qib_int_counter(dd) - dd->z_int_counter; 509 if (int_counter == 0) { 510 if (!dd->f_intr_fallback(dd)) 511 dev_err(&dd->pcidev->dev, 512 "No interrupts detected, not usable.\n"); 513 else /* re-arm the timer to see if fallback works */ 514 mod_timer(&dd->intrchk_timer, jiffies + HZ/2); 515 } 516 } 517 518 static void init_piobuf_state(struct qib_devdata *dd) 519 { 520 int i, pidx; 521 u32 uctxts; 522 523 /* 524 * Ensure all buffers are free, and fifos empty. Buffers 525 * are common, so only do once for port 0. 526 * 527 * After enable and qib_chg_pioavailkernel so we can safely 528 * enable pioavail updates and PIOENABLE. After this, packets 529 * are ready and able to go out. 530 */ 531 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_ALL); 532 for (pidx = 0; pidx < dd->num_pports; ++pidx) 533 dd->f_sendctrl(dd->pport + pidx, QIB_SENDCTRL_FLUSH); 534 535 /* 536 * If not all sendbufs are used, add the one to each of the lower 537 * numbered contexts. pbufsctxt and lastctxt_piobuf are 538 * calculated in chip-specific code because it may cause some 539 * chip-specific adjustments to be made. 540 */ 541 uctxts = dd->cfgctxts - dd->first_user_ctxt; 542 dd->ctxts_extrabuf = dd->pbufsctxt ? 543 dd->lastctxt_piobuf - (dd->pbufsctxt * uctxts) : 0; 544 545 /* 546 * Set up the shadow copies of the piobufavail registers, 547 * which we compare against the chip registers for now, and 548 * the in memory DMA'ed copies of the registers. 549 * By now pioavail updates to memory should have occurred, so 550 * copy them into our working/shadow registers; this is in 551 * case something went wrong with abort, but mostly to get the 552 * initial values of the generation bit correct. 553 */ 554 for (i = 0; i < dd->pioavregs; i++) { 555 __le64 tmp; 556 557 tmp = dd->pioavailregs_dma[i]; 558 /* 559 * Don't need to worry about pioavailkernel here 560 * because we will call qib_chg_pioavailkernel() later 561 * in initialization, to busy out buffers as needed. 562 */ 563 dd->pioavailshadow[i] = le64_to_cpu(tmp); 564 } 565 while (i < ARRAY_SIZE(dd->pioavailshadow)) 566 dd->pioavailshadow[i++] = 0; /* for debugging sanity */ 567 568 /* after pioavailshadow is setup */ 569 qib_chg_pioavailkernel(dd, 0, dd->piobcnt2k + dd->piobcnt4k, 570 TXCHK_CHG_TYPE_KERN, NULL); 571 dd->f_initvl15_bufs(dd); 572 } 573 574 /** 575 * qib_create_workqueues - create per port workqueues 576 * @dd: the qlogic_ib device 577 */ 578 static int qib_create_workqueues(struct qib_devdata *dd) 579 { 580 int pidx; 581 struct qib_pportdata *ppd; 582 583 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 584 ppd = dd->pport + pidx; 585 if (!ppd->qib_wq) { 586 char wq_name[8]; /* 3 + 2 + 1 + 1 + 1 */ 587 588 snprintf(wq_name, sizeof(wq_name), "qib%d_%d", 589 dd->unit, pidx); 590 ppd->qib_wq = alloc_ordered_workqueue(wq_name, 591 WQ_MEM_RECLAIM); 592 if (!ppd->qib_wq) 593 goto wq_error; 594 } 595 } 596 return 0; 597 wq_error: 598 pr_err("create_singlethread_workqueue failed for port %d\n", 599 pidx + 1); 600 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 601 ppd = dd->pport + pidx; 602 if (ppd->qib_wq) { 603 destroy_workqueue(ppd->qib_wq); 604 ppd->qib_wq = NULL; 605 } 606 } 607 return -ENOMEM; 608 } 609 610 static void qib_free_pportdata(struct qib_pportdata *ppd) 611 { 612 free_percpu(ppd->ibport_data.pmastats); 613 free_percpu(ppd->ibport_data.rvp.rc_acks); 614 free_percpu(ppd->ibport_data.rvp.rc_qacks); 615 free_percpu(ppd->ibport_data.rvp.rc_delayed_comp); 616 ppd->ibport_data.pmastats = NULL; 617 } 618 619 /** 620 * qib_init - do the actual initialization sequence on the chip 621 * @dd: the qlogic_ib device 622 * @reinit: reinitializing, so don't allocate new memory 623 * 624 * Do the actual initialization sequence on the chip. This is done 625 * both from the init routine called from the PCI infrastructure, and 626 * when we reset the chip, or detect that it was reset internally, 627 * or it's administratively re-enabled. 628 * 629 * Memory allocation here and in called routines is only done in 630 * the first case (reinit == 0). We have to be careful, because even 631 * without memory allocation, we need to re-write all the chip registers 632 * TIDs, etc. after the reset or enable has completed. 633 */ 634 int qib_init(struct qib_devdata *dd, int reinit) 635 { 636 int ret = 0, pidx, lastfail = 0; 637 u32 portok = 0; 638 unsigned i; 639 struct qib_ctxtdata *rcd; 640 struct qib_pportdata *ppd; 641 unsigned long flags; 642 643 /* Set linkstate to unknown, so we can watch for a transition. */ 644 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 645 ppd = dd->pport + pidx; 646 spin_lock_irqsave(&ppd->lflags_lock, flags); 647 ppd->lflags &= ~(QIBL_LINKACTIVE | QIBL_LINKARMED | 648 QIBL_LINKDOWN | QIBL_LINKINIT | 649 QIBL_LINKV); 650 spin_unlock_irqrestore(&ppd->lflags_lock, flags); 651 } 652 653 if (reinit) 654 ret = init_after_reset(dd); 655 else 656 ret = loadtime_init(dd); 657 if (ret) 658 goto done; 659 660 /* Bypass most chip-init, to get to device creation */ 661 if (qib_mini_init) 662 return 0; 663 664 ret = dd->f_late_initreg(dd); 665 if (ret) 666 goto done; 667 668 /* dd->rcd can be NULL if early init failed */ 669 for (i = 0; dd->rcd && i < dd->first_user_ctxt; ++i) { 670 /* 671 * Set up the (kernel) rcvhdr queue and egr TIDs. If doing 672 * re-init, the simplest way to handle this is to free 673 * existing, and re-allocate. 674 * Need to re-create rest of ctxt 0 ctxtdata as well. 675 */ 676 rcd = dd->rcd[i]; 677 if (!rcd) 678 continue; 679 680 lastfail = qib_create_rcvhdrq(dd, rcd); 681 if (!lastfail) 682 lastfail = qib_setup_eagerbufs(rcd); 683 if (lastfail) 684 qib_dev_err(dd, 685 "failed to allocate kernel ctxt's rcvhdrq and/or egr bufs\n"); 686 } 687 688 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 689 int mtu; 690 691 if (lastfail) 692 ret = lastfail; 693 ppd = dd->pport + pidx; 694 mtu = ib_mtu_enum_to_int(qib_ibmtu); 695 if (mtu == -1) { 696 mtu = QIB_DEFAULT_MTU; 697 qib_ibmtu = 0; /* don't leave invalid value */ 698 } 699 /* set max we can ever have for this driver load */ 700 ppd->init_ibmaxlen = min(mtu > 2048 ? 701 dd->piosize4k : dd->piosize2k, 702 dd->rcvegrbufsize + 703 (dd->rcvhdrentsize << 2)); 704 /* 705 * Have to initialize ibmaxlen, but this will normally 706 * change immediately in qib_set_mtu(). 707 */ 708 ppd->ibmaxlen = ppd->init_ibmaxlen; 709 qib_set_mtu(ppd, mtu); 710 711 spin_lock_irqsave(&ppd->lflags_lock, flags); 712 ppd->lflags |= QIBL_IB_LINK_DISABLED; 713 spin_unlock_irqrestore(&ppd->lflags_lock, flags); 714 715 lastfail = dd->f_bringup_serdes(ppd); 716 if (lastfail) { 717 qib_devinfo(dd->pcidev, 718 "Failed to bringup IB port %u\n", ppd->port); 719 lastfail = -ENETDOWN; 720 continue; 721 } 722 723 portok++; 724 } 725 726 if (!portok) { 727 /* none of the ports initialized */ 728 if (!ret && lastfail) 729 ret = lastfail; 730 else if (!ret) 731 ret = -ENETDOWN; 732 /* but continue on, so we can debug cause */ 733 } 734 735 enable_chip(dd); 736 737 init_piobuf_state(dd); 738 739 done: 740 if (!ret) { 741 /* chip is OK for user apps; mark it as initialized */ 742 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 743 ppd = dd->pport + pidx; 744 /* 745 * Set status even if port serdes is not initialized 746 * so that diags will work. 747 */ 748 *ppd->statusp |= QIB_STATUS_CHIP_PRESENT | 749 QIB_STATUS_INITTED; 750 if (!ppd->link_speed_enabled) 751 continue; 752 if (dd->flags & QIB_HAS_SEND_DMA) 753 ret = qib_setup_sdma(ppd); 754 timer_setup(&ppd->hol_timer, qib_hol_event, 0); 755 ppd->hol_state = QIB_HOL_UP; 756 } 757 758 /* now we can enable all interrupts from the chip */ 759 dd->f_set_intr_state(dd, 1); 760 761 /* 762 * Setup to verify we get an interrupt, and fallback 763 * to an alternate if necessary and possible. 764 */ 765 mod_timer(&dd->intrchk_timer, jiffies + HZ/2); 766 /* start stats retrieval timer */ 767 mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER); 768 } 769 770 /* if ret is non-zero, we probably should do some cleanup here... */ 771 return ret; 772 } 773 774 /* 775 * These next two routines are placeholders in case we don't have per-arch 776 * code for controlling write combining. If explicit control of write 777 * combining is not available, performance will probably be awful. 778 */ 779 780 int __attribute__((weak)) qib_enable_wc(struct qib_devdata *dd) 781 { 782 return -EOPNOTSUPP; 783 } 784 785 void __attribute__((weak)) qib_disable_wc(struct qib_devdata *dd) 786 { 787 } 788 789 static inline struct qib_devdata *__qib_lookup(int unit) 790 { 791 return idr_find(&qib_unit_table, unit); 792 } 793 794 struct qib_devdata *qib_lookup(int unit) 795 { 796 struct qib_devdata *dd; 797 unsigned long flags; 798 799 spin_lock_irqsave(&qib_devs_lock, flags); 800 dd = __qib_lookup(unit); 801 spin_unlock_irqrestore(&qib_devs_lock, flags); 802 803 return dd; 804 } 805 806 /* 807 * Stop the timers during unit shutdown, or after an error late 808 * in initialization. 809 */ 810 static void qib_stop_timers(struct qib_devdata *dd) 811 { 812 struct qib_pportdata *ppd; 813 int pidx; 814 815 if (dd->stats_timer.function) 816 del_timer_sync(&dd->stats_timer); 817 if (dd->intrchk_timer.function) 818 del_timer_sync(&dd->intrchk_timer); 819 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 820 ppd = dd->pport + pidx; 821 if (ppd->hol_timer.function) 822 del_timer_sync(&ppd->hol_timer); 823 if (ppd->led_override_timer.function) { 824 del_timer_sync(&ppd->led_override_timer); 825 atomic_set(&ppd->led_override_timer_active, 0); 826 } 827 if (ppd->symerr_clear_timer.function) 828 del_timer_sync(&ppd->symerr_clear_timer); 829 } 830 } 831 832 /** 833 * qib_shutdown_device - shut down a device 834 * @dd: the qlogic_ib device 835 * 836 * This is called to make the device quiet when we are about to 837 * unload the driver, and also when the device is administratively 838 * disabled. It does not free any data structures. 839 * Everything it does has to be setup again by qib_init(dd, 1) 840 */ 841 static void qib_shutdown_device(struct qib_devdata *dd) 842 { 843 struct qib_pportdata *ppd; 844 unsigned pidx; 845 846 if (dd->flags & QIB_SHUTDOWN) 847 return; 848 dd->flags |= QIB_SHUTDOWN; 849 850 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 851 ppd = dd->pport + pidx; 852 853 spin_lock_irq(&ppd->lflags_lock); 854 ppd->lflags &= ~(QIBL_LINKDOWN | QIBL_LINKINIT | 855 QIBL_LINKARMED | QIBL_LINKACTIVE | 856 QIBL_LINKV); 857 spin_unlock_irq(&ppd->lflags_lock); 858 *ppd->statusp &= ~(QIB_STATUS_IB_CONF | QIB_STATUS_IB_READY); 859 } 860 dd->flags &= ~QIB_INITTED; 861 862 /* mask interrupts, but not errors */ 863 dd->f_set_intr_state(dd, 0); 864 865 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 866 ppd = dd->pport + pidx; 867 dd->f_rcvctrl(ppd, QIB_RCVCTRL_TAILUPD_DIS | 868 QIB_RCVCTRL_CTXT_DIS | 869 QIB_RCVCTRL_INTRAVAIL_DIS | 870 QIB_RCVCTRL_PKEY_ENB, -1); 871 /* 872 * Gracefully stop all sends allowing any in progress to 873 * trickle out first. 874 */ 875 dd->f_sendctrl(ppd, QIB_SENDCTRL_CLEAR); 876 } 877 878 /* 879 * Enough for anything that's going to trickle out to have actually 880 * done so. 881 */ 882 udelay(20); 883 884 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 885 ppd = dd->pport + pidx; 886 dd->f_setextled(ppd, 0); /* make sure LEDs are off */ 887 888 if (dd->flags & QIB_HAS_SEND_DMA) 889 qib_teardown_sdma(ppd); 890 891 dd->f_sendctrl(ppd, QIB_SENDCTRL_AVAIL_DIS | 892 QIB_SENDCTRL_SEND_DIS); 893 /* 894 * Clear SerdesEnable. 895 * We can't count on interrupts since we are stopping. 896 */ 897 dd->f_quiet_serdes(ppd); 898 899 if (ppd->qib_wq) { 900 destroy_workqueue(ppd->qib_wq); 901 ppd->qib_wq = NULL; 902 } 903 qib_free_pportdata(ppd); 904 } 905 906 } 907 908 /** 909 * qib_free_ctxtdata - free a context's allocated data 910 * @dd: the qlogic_ib device 911 * @rcd: the ctxtdata structure 912 * 913 * free up any allocated data for a context 914 * This should not touch anything that would affect a simultaneous 915 * re-allocation of context data, because it is called after qib_mutex 916 * is released (and can be called from reinit as well). 917 * It should never change any chip state, or global driver state. 918 */ 919 void qib_free_ctxtdata(struct qib_devdata *dd, struct qib_ctxtdata *rcd) 920 { 921 if (!rcd) 922 return; 923 924 if (rcd->rcvhdrq) { 925 dma_free_coherent(&dd->pcidev->dev, rcd->rcvhdrq_size, 926 rcd->rcvhdrq, rcd->rcvhdrq_phys); 927 rcd->rcvhdrq = NULL; 928 if (rcd->rcvhdrtail_kvaddr) { 929 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE, 930 rcd->rcvhdrtail_kvaddr, 931 rcd->rcvhdrqtailaddr_phys); 932 rcd->rcvhdrtail_kvaddr = NULL; 933 } 934 } 935 if (rcd->rcvegrbuf) { 936 unsigned e; 937 938 for (e = 0; e < rcd->rcvegrbuf_chunks; e++) { 939 void *base = rcd->rcvegrbuf[e]; 940 size_t size = rcd->rcvegrbuf_size; 941 942 dma_free_coherent(&dd->pcidev->dev, size, 943 base, rcd->rcvegrbuf_phys[e]); 944 } 945 kfree(rcd->rcvegrbuf); 946 rcd->rcvegrbuf = NULL; 947 kfree(rcd->rcvegrbuf_phys); 948 rcd->rcvegrbuf_phys = NULL; 949 rcd->rcvegrbuf_chunks = 0; 950 } 951 952 kfree(rcd->tid_pg_list); 953 vfree(rcd->user_event_mask); 954 vfree(rcd->subctxt_uregbase); 955 vfree(rcd->subctxt_rcvegrbuf); 956 vfree(rcd->subctxt_rcvhdr_base); 957 #ifdef CONFIG_DEBUG_FS 958 kfree(rcd->opstats); 959 rcd->opstats = NULL; 960 #endif 961 kfree(rcd); 962 } 963 964 /* 965 * Perform a PIO buffer bandwidth write test, to verify proper system 966 * configuration. Even when all the setup calls work, occasionally 967 * BIOS or other issues can prevent write combining from working, or 968 * can cause other bandwidth problems to the chip. 969 * 970 * This test simply writes the same buffer over and over again, and 971 * measures close to the peak bandwidth to the chip (not testing 972 * data bandwidth to the wire). On chips that use an address-based 973 * trigger to send packets to the wire, this is easy. On chips that 974 * use a count to trigger, we want to make sure that the packet doesn't 975 * go out on the wire, or trigger flow control checks. 976 */ 977 static void qib_verify_pioperf(struct qib_devdata *dd) 978 { 979 u32 pbnum, cnt, lcnt; 980 u32 __iomem *piobuf; 981 u32 *addr; 982 u64 msecs, emsecs; 983 984 piobuf = dd->f_getsendbuf(dd->pport, 0ULL, &pbnum); 985 if (!piobuf) { 986 qib_devinfo(dd->pcidev, 987 "No PIObufs for checking perf, skipping\n"); 988 return; 989 } 990 991 /* 992 * Enough to give us a reasonable test, less than piobuf size, and 993 * likely multiple of store buffer length. 994 */ 995 cnt = 1024; 996 997 addr = vmalloc(cnt); 998 if (!addr) 999 goto done; 1000 1001 preempt_disable(); /* we want reasonably accurate elapsed time */ 1002 msecs = 1 + jiffies_to_msecs(jiffies); 1003 for (lcnt = 0; lcnt < 10000U; lcnt++) { 1004 /* wait until we cross msec boundary */ 1005 if (jiffies_to_msecs(jiffies) >= msecs) 1006 break; 1007 udelay(1); 1008 } 1009 1010 dd->f_set_armlaunch(dd, 0); 1011 1012 /* 1013 * length 0, no dwords actually sent 1014 */ 1015 writeq(0, piobuf); 1016 qib_flush_wc(); 1017 1018 /* 1019 * This is only roughly accurate, since even with preempt we 1020 * still take interrupts that could take a while. Running for 1021 * >= 5 msec seems to get us "close enough" to accurate values. 1022 */ 1023 msecs = jiffies_to_msecs(jiffies); 1024 for (emsecs = lcnt = 0; emsecs <= 5UL; lcnt++) { 1025 qib_pio_copy(piobuf + 64, addr, cnt >> 2); 1026 emsecs = jiffies_to_msecs(jiffies) - msecs; 1027 } 1028 1029 /* 1 GiB/sec, slightly over IB SDR line rate */ 1030 if (lcnt < (emsecs * 1024U)) 1031 qib_dev_err(dd, 1032 "Performance problem: bandwidth to PIO buffers is only %u MiB/sec\n", 1033 lcnt / (u32) emsecs); 1034 1035 preempt_enable(); 1036 1037 vfree(addr); 1038 1039 done: 1040 /* disarm piobuf, so it's available again */ 1041 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(pbnum)); 1042 qib_sendbuf_done(dd, pbnum); 1043 dd->f_set_armlaunch(dd, 1); 1044 } 1045 1046 void qib_free_devdata(struct qib_devdata *dd) 1047 { 1048 unsigned long flags; 1049 1050 spin_lock_irqsave(&qib_devs_lock, flags); 1051 idr_remove(&qib_unit_table, dd->unit); 1052 list_del(&dd->list); 1053 spin_unlock_irqrestore(&qib_devs_lock, flags); 1054 1055 #ifdef CONFIG_DEBUG_FS 1056 qib_dbg_ibdev_exit(&dd->verbs_dev); 1057 #endif 1058 free_percpu(dd->int_counter); 1059 rvt_dealloc_device(&dd->verbs_dev.rdi); 1060 } 1061 1062 u64 qib_int_counter(struct qib_devdata *dd) 1063 { 1064 int cpu; 1065 u64 int_counter = 0; 1066 1067 for_each_possible_cpu(cpu) 1068 int_counter += *per_cpu_ptr(dd->int_counter, cpu); 1069 return int_counter; 1070 } 1071 1072 u64 qib_sps_ints(void) 1073 { 1074 unsigned long flags; 1075 struct qib_devdata *dd; 1076 u64 sps_ints = 0; 1077 1078 spin_lock_irqsave(&qib_devs_lock, flags); 1079 list_for_each_entry(dd, &qib_dev_list, list) { 1080 sps_ints += qib_int_counter(dd); 1081 } 1082 spin_unlock_irqrestore(&qib_devs_lock, flags); 1083 return sps_ints; 1084 } 1085 1086 /* 1087 * Allocate our primary per-unit data structure. Must be done via verbs 1088 * allocator, because the verbs cleanup process both does cleanup and 1089 * free of the data structure. 1090 * "extra" is for chip-specific data. 1091 * 1092 * Use the idr mechanism to get a unit number for this unit. 1093 */ 1094 struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra) 1095 { 1096 unsigned long flags; 1097 struct qib_devdata *dd; 1098 int ret, nports; 1099 1100 /* extra is * number of ports */ 1101 nports = extra / sizeof(struct qib_pportdata); 1102 dd = (struct qib_devdata *)rvt_alloc_device(sizeof(*dd) + extra, 1103 nports); 1104 if (!dd) 1105 return ERR_PTR(-ENOMEM); 1106 1107 INIT_LIST_HEAD(&dd->list); 1108 1109 idr_preload(GFP_KERNEL); 1110 spin_lock_irqsave(&qib_devs_lock, flags); 1111 1112 ret = idr_alloc(&qib_unit_table, dd, 0, 0, GFP_NOWAIT); 1113 if (ret >= 0) { 1114 dd->unit = ret; 1115 list_add(&dd->list, &qib_dev_list); 1116 } 1117 1118 spin_unlock_irqrestore(&qib_devs_lock, flags); 1119 idr_preload_end(); 1120 1121 if (ret < 0) { 1122 qib_early_err(&pdev->dev, 1123 "Could not allocate unit ID: error %d\n", -ret); 1124 goto bail; 1125 } 1126 rvt_set_ibdev_name(&dd->verbs_dev.rdi, "%s%d", "qib", dd->unit); 1127 1128 dd->int_counter = alloc_percpu(u64); 1129 if (!dd->int_counter) { 1130 ret = -ENOMEM; 1131 qib_early_err(&pdev->dev, 1132 "Could not allocate per-cpu int_counter\n"); 1133 goto bail; 1134 } 1135 1136 if (!qib_cpulist_count) { 1137 u32 count = num_online_cpus(); 1138 1139 qib_cpulist = kcalloc(BITS_TO_LONGS(count), sizeof(long), 1140 GFP_KERNEL); 1141 if (qib_cpulist) 1142 qib_cpulist_count = count; 1143 } 1144 #ifdef CONFIG_DEBUG_FS 1145 qib_dbg_ibdev_init(&dd->verbs_dev); 1146 #endif 1147 return dd; 1148 bail: 1149 if (!list_empty(&dd->list)) 1150 list_del_init(&dd->list); 1151 rvt_dealloc_device(&dd->verbs_dev.rdi); 1152 return ERR_PTR(ret); 1153 } 1154 1155 /* 1156 * Called from freeze mode handlers, and from PCI error 1157 * reporting code. Should be paranoid about state of 1158 * system and data structures. 1159 */ 1160 void qib_disable_after_error(struct qib_devdata *dd) 1161 { 1162 if (dd->flags & QIB_INITTED) { 1163 u32 pidx; 1164 1165 dd->flags &= ~QIB_INITTED; 1166 if (dd->pport) 1167 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 1168 struct qib_pportdata *ppd; 1169 1170 ppd = dd->pport + pidx; 1171 if (dd->flags & QIB_PRESENT) { 1172 qib_set_linkstate(ppd, 1173 QIB_IB_LINKDOWN_DISABLE); 1174 dd->f_setextled(ppd, 0); 1175 } 1176 *ppd->statusp &= ~QIB_STATUS_IB_READY; 1177 } 1178 } 1179 1180 /* 1181 * Mark as having had an error for driver, and also 1182 * for /sys and status word mapped to user programs. 1183 * This marks unit as not usable, until reset. 1184 */ 1185 if (dd->devstatusp) 1186 *dd->devstatusp |= QIB_STATUS_HWERROR; 1187 } 1188 1189 static void qib_remove_one(struct pci_dev *); 1190 static int qib_init_one(struct pci_dev *, const struct pci_device_id *); 1191 static void qib_shutdown_one(struct pci_dev *); 1192 1193 #define DRIVER_LOAD_MSG "Intel " QIB_DRV_NAME " loaded: " 1194 #define PFX QIB_DRV_NAME ": " 1195 1196 static const struct pci_device_id qib_pci_tbl[] = { 1197 { PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_QLOGIC_IB_6120) }, 1198 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_IB_7220) }, 1199 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_IB_7322) }, 1200 { 0, } 1201 }; 1202 1203 MODULE_DEVICE_TABLE(pci, qib_pci_tbl); 1204 1205 static struct pci_driver qib_driver = { 1206 .name = QIB_DRV_NAME, 1207 .probe = qib_init_one, 1208 .remove = qib_remove_one, 1209 .shutdown = qib_shutdown_one, 1210 .id_table = qib_pci_tbl, 1211 .err_handler = &qib_pci_err_handler, 1212 }; 1213 1214 #ifdef CONFIG_INFINIBAND_QIB_DCA 1215 1216 static int qib_notify_dca(struct notifier_block *, unsigned long, void *); 1217 static struct notifier_block dca_notifier = { 1218 .notifier_call = qib_notify_dca, 1219 .next = NULL, 1220 .priority = 0 1221 }; 1222 1223 static int qib_notify_dca_device(struct device *device, void *data) 1224 { 1225 struct qib_devdata *dd = dev_get_drvdata(device); 1226 unsigned long event = *(unsigned long *)data; 1227 1228 return dd->f_notify_dca(dd, event); 1229 } 1230 1231 static int qib_notify_dca(struct notifier_block *nb, unsigned long event, 1232 void *p) 1233 { 1234 int rval; 1235 1236 rval = driver_for_each_device(&qib_driver.driver, NULL, 1237 &event, qib_notify_dca_device); 1238 return rval ? NOTIFY_BAD : NOTIFY_DONE; 1239 } 1240 1241 #endif 1242 1243 /* 1244 * Do all the generic driver unit- and chip-independent memory 1245 * allocation and initialization. 1246 */ 1247 static int __init qib_ib_init(void) 1248 { 1249 int ret; 1250 1251 ret = qib_dev_init(); 1252 if (ret) 1253 goto bail; 1254 1255 /* 1256 * These must be called before the driver is registered with 1257 * the PCI subsystem. 1258 */ 1259 idr_init(&qib_unit_table); 1260 1261 #ifdef CONFIG_INFINIBAND_QIB_DCA 1262 dca_register_notify(&dca_notifier); 1263 #endif 1264 #ifdef CONFIG_DEBUG_FS 1265 qib_dbg_init(); 1266 #endif 1267 ret = pci_register_driver(&qib_driver); 1268 if (ret < 0) { 1269 pr_err("Unable to register driver: error %d\n", -ret); 1270 goto bail_dev; 1271 } 1272 1273 /* not fatal if it doesn't work */ 1274 if (qib_init_qibfs()) 1275 pr_err("Unable to register ipathfs\n"); 1276 goto bail; /* all OK */ 1277 1278 bail_dev: 1279 #ifdef CONFIG_INFINIBAND_QIB_DCA 1280 dca_unregister_notify(&dca_notifier); 1281 #endif 1282 #ifdef CONFIG_DEBUG_FS 1283 qib_dbg_exit(); 1284 #endif 1285 idr_destroy(&qib_unit_table); 1286 qib_dev_cleanup(); 1287 bail: 1288 return ret; 1289 } 1290 1291 module_init(qib_ib_init); 1292 1293 /* 1294 * Do the non-unit driver cleanup, memory free, etc. at unload. 1295 */ 1296 static void __exit qib_ib_cleanup(void) 1297 { 1298 int ret; 1299 1300 ret = qib_exit_qibfs(); 1301 if (ret) 1302 pr_err( 1303 "Unable to cleanup counter filesystem: error %d\n", 1304 -ret); 1305 1306 #ifdef CONFIG_INFINIBAND_QIB_DCA 1307 dca_unregister_notify(&dca_notifier); 1308 #endif 1309 pci_unregister_driver(&qib_driver); 1310 #ifdef CONFIG_DEBUG_FS 1311 qib_dbg_exit(); 1312 #endif 1313 1314 qib_cpulist_count = 0; 1315 kfree(qib_cpulist); 1316 1317 idr_destroy(&qib_unit_table); 1318 qib_dev_cleanup(); 1319 } 1320 1321 module_exit(qib_ib_cleanup); 1322 1323 /* this can only be called after a successful initialization */ 1324 static void cleanup_device_data(struct qib_devdata *dd) 1325 { 1326 int ctxt; 1327 int pidx; 1328 struct qib_ctxtdata **tmp; 1329 unsigned long flags; 1330 1331 /* users can't do anything more with chip */ 1332 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 1333 if (dd->pport[pidx].statusp) 1334 *dd->pport[pidx].statusp &= ~QIB_STATUS_CHIP_PRESENT; 1335 1336 spin_lock(&dd->pport[pidx].cc_shadow_lock); 1337 1338 kfree(dd->pport[pidx].congestion_entries); 1339 dd->pport[pidx].congestion_entries = NULL; 1340 kfree(dd->pport[pidx].ccti_entries); 1341 dd->pport[pidx].ccti_entries = NULL; 1342 kfree(dd->pport[pidx].ccti_entries_shadow); 1343 dd->pport[pidx].ccti_entries_shadow = NULL; 1344 kfree(dd->pport[pidx].congestion_entries_shadow); 1345 dd->pport[pidx].congestion_entries_shadow = NULL; 1346 1347 spin_unlock(&dd->pport[pidx].cc_shadow_lock); 1348 } 1349 1350 qib_disable_wc(dd); 1351 1352 if (dd->pioavailregs_dma) { 1353 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE, 1354 (void *) dd->pioavailregs_dma, 1355 dd->pioavailregs_phys); 1356 dd->pioavailregs_dma = NULL; 1357 } 1358 1359 if (dd->pageshadow) { 1360 struct page **tmpp = dd->pageshadow; 1361 dma_addr_t *tmpd = dd->physshadow; 1362 int i; 1363 1364 for (ctxt = 0; ctxt < dd->cfgctxts; ctxt++) { 1365 int ctxt_tidbase = ctxt * dd->rcvtidcnt; 1366 int maxtid = ctxt_tidbase + dd->rcvtidcnt; 1367 1368 for (i = ctxt_tidbase; i < maxtid; i++) { 1369 if (!tmpp[i]) 1370 continue; 1371 pci_unmap_page(dd->pcidev, tmpd[i], 1372 PAGE_SIZE, PCI_DMA_FROMDEVICE); 1373 qib_release_user_pages(&tmpp[i], 1); 1374 tmpp[i] = NULL; 1375 } 1376 } 1377 1378 dd->pageshadow = NULL; 1379 vfree(tmpp); 1380 dd->physshadow = NULL; 1381 vfree(tmpd); 1382 } 1383 1384 /* 1385 * Free any resources still in use (usually just kernel contexts) 1386 * at unload; we do for ctxtcnt, because that's what we allocate. 1387 * We acquire lock to be really paranoid that rcd isn't being 1388 * accessed from some interrupt-related code (that should not happen, 1389 * but best to be sure). 1390 */ 1391 spin_lock_irqsave(&dd->uctxt_lock, flags); 1392 tmp = dd->rcd; 1393 dd->rcd = NULL; 1394 spin_unlock_irqrestore(&dd->uctxt_lock, flags); 1395 for (ctxt = 0; tmp && ctxt < dd->ctxtcnt; ctxt++) { 1396 struct qib_ctxtdata *rcd = tmp[ctxt]; 1397 1398 tmp[ctxt] = NULL; /* debugging paranoia */ 1399 qib_free_ctxtdata(dd, rcd); 1400 } 1401 kfree(tmp); 1402 } 1403 1404 /* 1405 * Clean up on unit shutdown, or error during unit load after 1406 * successful initialization. 1407 */ 1408 static void qib_postinit_cleanup(struct qib_devdata *dd) 1409 { 1410 /* 1411 * Clean up chip-specific stuff. 1412 * We check for NULL here, because it's outside 1413 * the kregbase check, and we need to call it 1414 * after the free_irq. Thus it's possible that 1415 * the function pointers were never initialized. 1416 */ 1417 if (dd->f_cleanup) 1418 dd->f_cleanup(dd); 1419 1420 qib_pcie_ddcleanup(dd); 1421 1422 cleanup_device_data(dd); 1423 1424 qib_free_devdata(dd); 1425 } 1426 1427 static int qib_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 1428 { 1429 int ret, j, pidx, initfail; 1430 struct qib_devdata *dd = NULL; 1431 1432 ret = qib_pcie_init(pdev, ent); 1433 if (ret) 1434 goto bail; 1435 1436 /* 1437 * Do device-specific initialiation, function table setup, dd 1438 * allocation, etc. 1439 */ 1440 switch (ent->device) { 1441 case PCI_DEVICE_ID_QLOGIC_IB_6120: 1442 #ifdef CONFIG_PCI_MSI 1443 dd = qib_init_iba6120_funcs(pdev, ent); 1444 #else 1445 qib_early_err(&pdev->dev, 1446 "Intel PCIE device 0x%x cannot work if CONFIG_PCI_MSI is not enabled\n", 1447 ent->device); 1448 dd = ERR_PTR(-ENODEV); 1449 #endif 1450 break; 1451 1452 case PCI_DEVICE_ID_QLOGIC_IB_7220: 1453 dd = qib_init_iba7220_funcs(pdev, ent); 1454 break; 1455 1456 case PCI_DEVICE_ID_QLOGIC_IB_7322: 1457 dd = qib_init_iba7322_funcs(pdev, ent); 1458 break; 1459 1460 default: 1461 qib_early_err(&pdev->dev, 1462 "Failing on unknown Intel deviceid 0x%x\n", 1463 ent->device); 1464 ret = -ENODEV; 1465 } 1466 1467 if (IS_ERR(dd)) 1468 ret = PTR_ERR(dd); 1469 if (ret) 1470 goto bail; /* error already printed */ 1471 1472 ret = qib_create_workqueues(dd); 1473 if (ret) 1474 goto bail; 1475 1476 /* do the generic initialization */ 1477 initfail = qib_init(dd, 0); 1478 1479 ret = qib_register_ib_device(dd); 1480 1481 /* 1482 * Now ready for use. this should be cleared whenever we 1483 * detect a reset, or initiate one. If earlier failure, 1484 * we still create devices, so diags, etc. can be used 1485 * to determine cause of problem. 1486 */ 1487 if (!qib_mini_init && !initfail && !ret) 1488 dd->flags |= QIB_INITTED; 1489 1490 j = qib_device_create(dd); 1491 if (j) 1492 qib_dev_err(dd, "Failed to create /dev devices: %d\n", -j); 1493 j = qibfs_add(dd); 1494 if (j) 1495 qib_dev_err(dd, "Failed filesystem setup for counters: %d\n", 1496 -j); 1497 1498 if (qib_mini_init || initfail || ret) { 1499 qib_stop_timers(dd); 1500 flush_workqueue(ib_wq); 1501 for (pidx = 0; pidx < dd->num_pports; ++pidx) 1502 dd->f_quiet_serdes(dd->pport + pidx); 1503 if (qib_mini_init) 1504 goto bail; 1505 if (!j) { 1506 (void) qibfs_remove(dd); 1507 qib_device_remove(dd); 1508 } 1509 if (!ret) 1510 qib_unregister_ib_device(dd); 1511 qib_postinit_cleanup(dd); 1512 if (initfail) 1513 ret = initfail; 1514 goto bail; 1515 } 1516 1517 ret = qib_enable_wc(dd); 1518 if (ret) { 1519 qib_dev_err(dd, 1520 "Write combining not enabled (err %d): performance may be poor\n", 1521 -ret); 1522 ret = 0; 1523 } 1524 1525 qib_verify_pioperf(dd); 1526 bail: 1527 return ret; 1528 } 1529 1530 static void qib_remove_one(struct pci_dev *pdev) 1531 { 1532 struct qib_devdata *dd = pci_get_drvdata(pdev); 1533 int ret; 1534 1535 /* unregister from IB core */ 1536 qib_unregister_ib_device(dd); 1537 1538 /* 1539 * Disable the IB link, disable interrupts on the device, 1540 * clear dma engines, etc. 1541 */ 1542 if (!qib_mini_init) 1543 qib_shutdown_device(dd); 1544 1545 qib_stop_timers(dd); 1546 1547 /* wait until all of our (qsfp) queue_work() calls complete */ 1548 flush_workqueue(ib_wq); 1549 1550 ret = qibfs_remove(dd); 1551 if (ret) 1552 qib_dev_err(dd, "Failed counters filesystem cleanup: %d\n", 1553 -ret); 1554 1555 qib_device_remove(dd); 1556 1557 qib_postinit_cleanup(dd); 1558 } 1559 1560 static void qib_shutdown_one(struct pci_dev *pdev) 1561 { 1562 struct qib_devdata *dd = pci_get_drvdata(pdev); 1563 1564 qib_shutdown_device(dd); 1565 } 1566 1567 /** 1568 * qib_create_rcvhdrq - create a receive header queue 1569 * @dd: the qlogic_ib device 1570 * @rcd: the context data 1571 * 1572 * This must be contiguous memory (from an i/o perspective), and must be 1573 * DMA'able (which means for some systems, it will go through an IOMMU, 1574 * or be forced into a low address range). 1575 */ 1576 int qib_create_rcvhdrq(struct qib_devdata *dd, struct qib_ctxtdata *rcd) 1577 { 1578 unsigned amt; 1579 int old_node_id; 1580 1581 if (!rcd->rcvhdrq) { 1582 dma_addr_t phys_hdrqtail; 1583 gfp_t gfp_flags; 1584 1585 amt = ALIGN(dd->rcvhdrcnt * dd->rcvhdrentsize * 1586 sizeof(u32), PAGE_SIZE); 1587 gfp_flags = (rcd->ctxt >= dd->first_user_ctxt) ? 1588 GFP_USER : GFP_KERNEL; 1589 1590 old_node_id = dev_to_node(&dd->pcidev->dev); 1591 set_dev_node(&dd->pcidev->dev, rcd->node_id); 1592 rcd->rcvhdrq = dma_alloc_coherent( 1593 &dd->pcidev->dev, amt, &rcd->rcvhdrq_phys, 1594 gfp_flags | __GFP_COMP); 1595 set_dev_node(&dd->pcidev->dev, old_node_id); 1596 1597 if (!rcd->rcvhdrq) { 1598 qib_dev_err(dd, 1599 "attempt to allocate %d bytes for ctxt %u rcvhdrq failed\n", 1600 amt, rcd->ctxt); 1601 goto bail; 1602 } 1603 1604 if (rcd->ctxt >= dd->first_user_ctxt) { 1605 rcd->user_event_mask = vmalloc_user(PAGE_SIZE); 1606 if (!rcd->user_event_mask) 1607 goto bail_free_hdrq; 1608 } 1609 1610 if (!(dd->flags & QIB_NODMA_RTAIL)) { 1611 set_dev_node(&dd->pcidev->dev, rcd->node_id); 1612 rcd->rcvhdrtail_kvaddr = dma_alloc_coherent( 1613 &dd->pcidev->dev, PAGE_SIZE, &phys_hdrqtail, 1614 gfp_flags); 1615 set_dev_node(&dd->pcidev->dev, old_node_id); 1616 if (!rcd->rcvhdrtail_kvaddr) 1617 goto bail_free; 1618 rcd->rcvhdrqtailaddr_phys = phys_hdrqtail; 1619 } 1620 1621 rcd->rcvhdrq_size = amt; 1622 } 1623 1624 /* clear for security and sanity on each use */ 1625 memset(rcd->rcvhdrq, 0, rcd->rcvhdrq_size); 1626 if (rcd->rcvhdrtail_kvaddr) 1627 memset(rcd->rcvhdrtail_kvaddr, 0, PAGE_SIZE); 1628 return 0; 1629 1630 bail_free: 1631 qib_dev_err(dd, 1632 "attempt to allocate 1 page for ctxt %u rcvhdrqtailaddr failed\n", 1633 rcd->ctxt); 1634 vfree(rcd->user_event_mask); 1635 rcd->user_event_mask = NULL; 1636 bail_free_hdrq: 1637 dma_free_coherent(&dd->pcidev->dev, amt, rcd->rcvhdrq, 1638 rcd->rcvhdrq_phys); 1639 rcd->rcvhdrq = NULL; 1640 bail: 1641 return -ENOMEM; 1642 } 1643 1644 /** 1645 * allocate eager buffers, both kernel and user contexts. 1646 * @rcd: the context we are setting up. 1647 * 1648 * Allocate the eager TID buffers and program them into hip. 1649 * They are no longer completely contiguous, we do multiple allocation 1650 * calls. Otherwise we get the OOM code involved, by asking for too 1651 * much per call, with disastrous results on some kernels. 1652 */ 1653 int qib_setup_eagerbufs(struct qib_ctxtdata *rcd) 1654 { 1655 struct qib_devdata *dd = rcd->dd; 1656 unsigned e, egrcnt, egrperchunk, chunk, egrsize, egroff; 1657 size_t size; 1658 gfp_t gfp_flags; 1659 int old_node_id; 1660 1661 /* 1662 * GFP_USER, but without GFP_FS, so buffer cache can be 1663 * coalesced (we hope); otherwise, even at order 4, 1664 * heavy filesystem activity makes these fail, and we can 1665 * use compound pages. 1666 */ 1667 gfp_flags = __GFP_RECLAIM | __GFP_IO | __GFP_COMP; 1668 1669 egrcnt = rcd->rcvegrcnt; 1670 egroff = rcd->rcvegr_tid_base; 1671 egrsize = dd->rcvegrbufsize; 1672 1673 chunk = rcd->rcvegrbuf_chunks; 1674 egrperchunk = rcd->rcvegrbufs_perchunk; 1675 size = rcd->rcvegrbuf_size; 1676 if (!rcd->rcvegrbuf) { 1677 rcd->rcvegrbuf = 1678 kcalloc_node(chunk, sizeof(rcd->rcvegrbuf[0]), 1679 GFP_KERNEL, rcd->node_id); 1680 if (!rcd->rcvegrbuf) 1681 goto bail; 1682 } 1683 if (!rcd->rcvegrbuf_phys) { 1684 rcd->rcvegrbuf_phys = 1685 kmalloc_array_node(chunk, 1686 sizeof(rcd->rcvegrbuf_phys[0]), 1687 GFP_KERNEL, rcd->node_id); 1688 if (!rcd->rcvegrbuf_phys) 1689 goto bail_rcvegrbuf; 1690 } 1691 for (e = 0; e < rcd->rcvegrbuf_chunks; e++) { 1692 if (rcd->rcvegrbuf[e]) 1693 continue; 1694 1695 old_node_id = dev_to_node(&dd->pcidev->dev); 1696 set_dev_node(&dd->pcidev->dev, rcd->node_id); 1697 rcd->rcvegrbuf[e] = 1698 dma_alloc_coherent(&dd->pcidev->dev, size, 1699 &rcd->rcvegrbuf_phys[e], 1700 gfp_flags); 1701 set_dev_node(&dd->pcidev->dev, old_node_id); 1702 if (!rcd->rcvegrbuf[e]) 1703 goto bail_rcvegrbuf_phys; 1704 } 1705 1706 rcd->rcvegr_phys = rcd->rcvegrbuf_phys[0]; 1707 1708 for (e = chunk = 0; chunk < rcd->rcvegrbuf_chunks; chunk++) { 1709 dma_addr_t pa = rcd->rcvegrbuf_phys[chunk]; 1710 unsigned i; 1711 1712 /* clear for security and sanity on each use */ 1713 memset(rcd->rcvegrbuf[chunk], 0, size); 1714 1715 for (i = 0; e < egrcnt && i < egrperchunk; e++, i++) { 1716 dd->f_put_tid(dd, e + egroff + 1717 (u64 __iomem *) 1718 ((char __iomem *) 1719 dd->kregbase + 1720 dd->rcvegrbase), 1721 RCVHQ_RCV_TYPE_EAGER, pa); 1722 pa += egrsize; 1723 } 1724 cond_resched(); /* don't hog the cpu */ 1725 } 1726 1727 return 0; 1728 1729 bail_rcvegrbuf_phys: 1730 for (e = 0; e < rcd->rcvegrbuf_chunks && rcd->rcvegrbuf[e]; e++) 1731 dma_free_coherent(&dd->pcidev->dev, size, 1732 rcd->rcvegrbuf[e], rcd->rcvegrbuf_phys[e]); 1733 kfree(rcd->rcvegrbuf_phys); 1734 rcd->rcvegrbuf_phys = NULL; 1735 bail_rcvegrbuf: 1736 kfree(rcd->rcvegrbuf); 1737 rcd->rcvegrbuf = NULL; 1738 bail: 1739 return -ENOMEM; 1740 } 1741 1742 /* 1743 * Note: Changes to this routine should be mirrored 1744 * for the diagnostics routine qib_remap_ioaddr32(). 1745 * There is also related code for VL15 buffers in qib_init_7322_variables(). 1746 * The teardown code that unmaps is in qib_pcie_ddcleanup() 1747 */ 1748 int init_chip_wc_pat(struct qib_devdata *dd, u32 vl15buflen) 1749 { 1750 u64 __iomem *qib_kregbase = NULL; 1751 void __iomem *qib_piobase = NULL; 1752 u64 __iomem *qib_userbase = NULL; 1753 u64 qib_kreglen; 1754 u64 qib_pio2koffset = dd->piobufbase & 0xffffffff; 1755 u64 qib_pio4koffset = dd->piobufbase >> 32; 1756 u64 qib_pio2klen = dd->piobcnt2k * dd->palign; 1757 u64 qib_pio4klen = dd->piobcnt4k * dd->align4k; 1758 u64 qib_physaddr = dd->physaddr; 1759 u64 qib_piolen; 1760 u64 qib_userlen = 0; 1761 1762 /* 1763 * Free the old mapping because the kernel will try to reuse the 1764 * old mapping and not create a new mapping with the 1765 * write combining attribute. 1766 */ 1767 iounmap(dd->kregbase); 1768 dd->kregbase = NULL; 1769 1770 /* 1771 * Assumes chip address space looks like: 1772 * - kregs + sregs + cregs + uregs (in any order) 1773 * - piobufs (2K and 4K bufs in either order) 1774 * or: 1775 * - kregs + sregs + cregs (in any order) 1776 * - piobufs (2K and 4K bufs in either order) 1777 * - uregs 1778 */ 1779 if (dd->piobcnt4k == 0) { 1780 qib_kreglen = qib_pio2koffset; 1781 qib_piolen = qib_pio2klen; 1782 } else if (qib_pio2koffset < qib_pio4koffset) { 1783 qib_kreglen = qib_pio2koffset; 1784 qib_piolen = qib_pio4koffset + qib_pio4klen - qib_kreglen; 1785 } else { 1786 qib_kreglen = qib_pio4koffset; 1787 qib_piolen = qib_pio2koffset + qib_pio2klen - qib_kreglen; 1788 } 1789 qib_piolen += vl15buflen; 1790 /* Map just the configured ports (not all hw ports) */ 1791 if (dd->uregbase > qib_kreglen) 1792 qib_userlen = dd->ureg_align * dd->cfgctxts; 1793 1794 /* Sanity checks passed, now create the new mappings */ 1795 qib_kregbase = ioremap_nocache(qib_physaddr, qib_kreglen); 1796 if (!qib_kregbase) 1797 goto bail; 1798 1799 qib_piobase = ioremap_wc(qib_physaddr + qib_kreglen, qib_piolen); 1800 if (!qib_piobase) 1801 goto bail_kregbase; 1802 1803 if (qib_userlen) { 1804 qib_userbase = ioremap_nocache(qib_physaddr + dd->uregbase, 1805 qib_userlen); 1806 if (!qib_userbase) 1807 goto bail_piobase; 1808 } 1809 1810 dd->kregbase = qib_kregbase; 1811 dd->kregend = (u64 __iomem *) 1812 ((char __iomem *) qib_kregbase + qib_kreglen); 1813 dd->piobase = qib_piobase; 1814 dd->pio2kbase = (void __iomem *) 1815 (((char __iomem *) dd->piobase) + 1816 qib_pio2koffset - qib_kreglen); 1817 if (dd->piobcnt4k) 1818 dd->pio4kbase = (void __iomem *) 1819 (((char __iomem *) dd->piobase) + 1820 qib_pio4koffset - qib_kreglen); 1821 if (qib_userlen) 1822 /* ureg will now be accessed relative to dd->userbase */ 1823 dd->userbase = qib_userbase; 1824 return 0; 1825 1826 bail_piobase: 1827 iounmap(qib_piobase); 1828 bail_kregbase: 1829 iounmap(qib_kregbase); 1830 bail: 1831 return -ENOMEM; 1832 } 1833