1 /* 2 * Copyright (c) 2008, 2009, 2010 QLogic Corporation. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/spinlock.h> 34 #include <linux/pci.h> 35 #include <linux/io.h> 36 #include <linux/delay.h> 37 #include <linux/netdevice.h> 38 #include <linux/vmalloc.h> 39 #include <linux/moduleparam.h> 40 41 #include "qib.h" 42 43 static unsigned qib_hol_timeout_ms = 3000; 44 module_param_named(hol_timeout_ms, qib_hol_timeout_ms, uint, S_IRUGO); 45 MODULE_PARM_DESC(hol_timeout_ms, 46 "duration of user app suspension after link failure"); 47 48 unsigned qib_sdma_fetch_arb = 1; 49 module_param_named(fetch_arb, qib_sdma_fetch_arb, uint, S_IRUGO); 50 MODULE_PARM_DESC(fetch_arb, "IBA7220: change SDMA descriptor arbitration"); 51 52 /** 53 * qib_disarm_piobufs - cancel a range of PIO buffers 54 * @dd: the qlogic_ib device 55 * @first: the first PIO buffer to cancel 56 * @cnt: the number of PIO buffers to cancel 57 * 58 * Cancel a range of PIO buffers. Used at user process close, 59 * in case it died while writing to a PIO buffer. 60 */ 61 void qib_disarm_piobufs(struct qib_devdata *dd, unsigned first, unsigned cnt) 62 { 63 unsigned long flags; 64 unsigned i; 65 unsigned last; 66 67 last = first + cnt; 68 spin_lock_irqsave(&dd->pioavail_lock, flags); 69 for (i = first; i < last; i++) { 70 __clear_bit(i, dd->pio_need_disarm); 71 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(i)); 72 } 73 spin_unlock_irqrestore(&dd->pioavail_lock, flags); 74 } 75 76 /* 77 * This is called by a user process when it sees the DISARM_BUFS event 78 * bit is set. 79 */ 80 int qib_disarm_piobufs_ifneeded(struct qib_ctxtdata *rcd) 81 { 82 struct qib_devdata *dd = rcd->dd; 83 unsigned i; 84 unsigned last; 85 unsigned n = 0; 86 87 last = rcd->pio_base + rcd->piocnt; 88 /* 89 * Don't need uctxt_lock here, since user has called in to us. 90 * Clear at start in case more interrupts set bits while we 91 * are disarming 92 */ 93 if (rcd->user_event_mask) { 94 /* 95 * subctxt_cnt is 0 if not shared, so do base 96 * separately, first, then remaining subctxt, if any 97 */ 98 clear_bit(_QIB_EVENT_DISARM_BUFS_BIT, &rcd->user_event_mask[0]); 99 for (i = 1; i < rcd->subctxt_cnt; i++) 100 clear_bit(_QIB_EVENT_DISARM_BUFS_BIT, 101 &rcd->user_event_mask[i]); 102 } 103 spin_lock_irq(&dd->pioavail_lock); 104 for (i = rcd->pio_base; i < last; i++) { 105 if (__test_and_clear_bit(i, dd->pio_need_disarm)) { 106 n++; 107 dd->f_sendctrl(rcd->ppd, QIB_SENDCTRL_DISARM_BUF(i)); 108 } 109 } 110 spin_unlock_irq(&dd->pioavail_lock); 111 return 0; 112 } 113 114 static struct qib_pportdata *is_sdma_buf(struct qib_devdata *dd, unsigned i) 115 { 116 struct qib_pportdata *ppd; 117 unsigned pidx; 118 119 for (pidx = 0; pidx < dd->num_pports; pidx++) { 120 ppd = dd->pport + pidx; 121 if (i >= ppd->sdma_state.first_sendbuf && 122 i < ppd->sdma_state.last_sendbuf) 123 return ppd; 124 } 125 return NULL; 126 } 127 128 /* 129 * Return true if send buffer is being used by a user context. 130 * Sets _QIB_EVENT_DISARM_BUFS_BIT in user_event_mask as a side effect 131 */ 132 static int find_ctxt(struct qib_devdata *dd, unsigned bufn) 133 { 134 struct qib_ctxtdata *rcd; 135 unsigned ctxt; 136 int ret = 0; 137 138 spin_lock(&dd->uctxt_lock); 139 for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) { 140 rcd = dd->rcd[ctxt]; 141 if (!rcd || bufn < rcd->pio_base || 142 bufn >= rcd->pio_base + rcd->piocnt) 143 continue; 144 if (rcd->user_event_mask) { 145 int i; 146 /* 147 * subctxt_cnt is 0 if not shared, so do base 148 * separately, first, then remaining subctxt, if any 149 */ 150 set_bit(_QIB_EVENT_DISARM_BUFS_BIT, 151 &rcd->user_event_mask[0]); 152 for (i = 1; i < rcd->subctxt_cnt; i++) 153 set_bit(_QIB_EVENT_DISARM_BUFS_BIT, 154 &rcd->user_event_mask[i]); 155 } 156 ret = 1; 157 break; 158 } 159 spin_unlock(&dd->uctxt_lock); 160 161 return ret; 162 } 163 164 /* 165 * Disarm a set of send buffers. If the buffer might be actively being 166 * written to, mark the buffer to be disarmed later when it is not being 167 * written to. 168 * 169 * This should only be called from the IRQ error handler. 170 */ 171 void qib_disarm_piobufs_set(struct qib_devdata *dd, unsigned long *mask, 172 unsigned cnt) 173 { 174 struct qib_pportdata *ppd, *pppd[QIB_MAX_IB_PORTS]; 175 unsigned i; 176 unsigned long flags; 177 178 for (i = 0; i < dd->num_pports; i++) 179 pppd[i] = NULL; 180 181 for (i = 0; i < cnt; i++) { 182 int which; 183 if (!test_bit(i, mask)) 184 continue; 185 /* 186 * If the buffer is owned by the DMA hardware, 187 * reset the DMA engine. 188 */ 189 ppd = is_sdma_buf(dd, i); 190 if (ppd) { 191 pppd[ppd->port] = ppd; 192 continue; 193 } 194 /* 195 * If the kernel is writing the buffer or the buffer is 196 * owned by a user process, we can't clear it yet. 197 */ 198 spin_lock_irqsave(&dd->pioavail_lock, flags); 199 if (test_bit(i, dd->pio_writing) || 200 (!test_bit(i << 1, dd->pioavailkernel) && 201 find_ctxt(dd, i))) { 202 __set_bit(i, dd->pio_need_disarm); 203 which = 0; 204 } else { 205 which = 1; 206 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(i)); 207 } 208 spin_unlock_irqrestore(&dd->pioavail_lock, flags); 209 } 210 211 /* do cancel_sends once per port that had sdma piobufs in error */ 212 for (i = 0; i < dd->num_pports; i++) 213 if (pppd[i]) 214 qib_cancel_sends(pppd[i]); 215 } 216 217 /** 218 * update_send_bufs - update shadow copy of the PIO availability map 219 * @dd: the qlogic_ib device 220 * 221 * called whenever our local copy indicates we have run out of send buffers 222 */ 223 static void update_send_bufs(struct qib_devdata *dd) 224 { 225 unsigned long flags; 226 unsigned i; 227 const unsigned piobregs = dd->pioavregs; 228 229 /* 230 * If the generation (check) bits have changed, then we update the 231 * busy bit for the corresponding PIO buffer. This algorithm will 232 * modify positions to the value they already have in some cases 233 * (i.e., no change), but it's faster than changing only the bits 234 * that have changed. 235 * 236 * We would like to do this atomicly, to avoid spinlocks in the 237 * critical send path, but that's not really possible, given the 238 * type of changes, and that this routine could be called on 239 * multiple cpu's simultaneously, so we lock in this routine only, 240 * to avoid conflicting updates; all we change is the shadow, and 241 * it's a single 64 bit memory location, so by definition the update 242 * is atomic in terms of what other cpu's can see in testing the 243 * bits. The spin_lock overhead isn't too bad, since it only 244 * happens when all buffers are in use, so only cpu overhead, not 245 * latency or bandwidth is affected. 246 */ 247 if (!dd->pioavailregs_dma) 248 return; 249 spin_lock_irqsave(&dd->pioavail_lock, flags); 250 for (i = 0; i < piobregs; i++) { 251 u64 pchbusy, pchg, piov, pnew; 252 253 piov = le64_to_cpu(dd->pioavailregs_dma[i]); 254 pchg = dd->pioavailkernel[i] & 255 ~(dd->pioavailshadow[i] ^ piov); 256 pchbusy = pchg << QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT; 257 if (pchg && (pchbusy & dd->pioavailshadow[i])) { 258 pnew = dd->pioavailshadow[i] & ~pchbusy; 259 pnew |= piov & pchbusy; 260 dd->pioavailshadow[i] = pnew; 261 } 262 } 263 spin_unlock_irqrestore(&dd->pioavail_lock, flags); 264 } 265 266 /* 267 * Debugging code and stats updates if no pio buffers available. 268 */ 269 static noinline void no_send_bufs(struct qib_devdata *dd) 270 { 271 dd->upd_pio_shadow = 1; 272 273 /* not atomic, but if we lose a stat count in a while, that's OK */ 274 qib_stats.sps_nopiobufs++; 275 } 276 277 /* 278 * Common code for normal driver send buffer allocation, and reserved 279 * allocation. 280 * 281 * Do appropriate marking as busy, etc. 282 * Returns buffer pointer if one is found, otherwise NULL. 283 */ 284 u32 __iomem *qib_getsendbuf_range(struct qib_devdata *dd, u32 *pbufnum, 285 u32 first, u32 last) 286 { 287 unsigned i, j, updated = 0; 288 unsigned nbufs; 289 unsigned long flags; 290 unsigned long *shadow = dd->pioavailshadow; 291 u32 __iomem *buf; 292 293 if (!(dd->flags & QIB_PRESENT)) 294 return NULL; 295 296 nbufs = last - first + 1; /* number in range to check */ 297 if (dd->upd_pio_shadow) { 298 /* 299 * Minor optimization. If we had no buffers on last call, 300 * start out by doing the update; continue and do scan even 301 * if no buffers were updated, to be paranoid. 302 */ 303 update_send_bufs(dd); 304 updated++; 305 } 306 i = first; 307 rescan: 308 /* 309 * While test_and_set_bit() is atomic, we do that and then the 310 * change_bit(), and the pair is not. See if this is the cause 311 * of the remaining armlaunch errors. 312 */ 313 spin_lock_irqsave(&dd->pioavail_lock, flags); 314 for (j = 0; j < nbufs; j++, i++) { 315 if (i > last) 316 i = first; 317 if (__test_and_set_bit((2 * i) + 1, shadow)) 318 continue; 319 /* flip generation bit */ 320 __change_bit(2 * i, shadow); 321 /* remember that the buffer can be written to now */ 322 __set_bit(i, dd->pio_writing); 323 break; 324 } 325 spin_unlock_irqrestore(&dd->pioavail_lock, flags); 326 327 if (j == nbufs) { 328 if (!updated) { 329 /* 330 * First time through; shadow exhausted, but may be 331 * buffers available, try an update and then rescan. 332 */ 333 update_send_bufs(dd); 334 updated++; 335 i = first; 336 goto rescan; 337 } 338 no_send_bufs(dd); 339 buf = NULL; 340 } else { 341 if (i < dd->piobcnt2k) 342 buf = (u32 __iomem *)(dd->pio2kbase + 343 i * dd->palign); 344 else if (i < dd->piobcnt2k + dd->piobcnt4k || !dd->piovl15base) 345 buf = (u32 __iomem *)(dd->pio4kbase + 346 (i - dd->piobcnt2k) * dd->align4k); 347 else 348 buf = (u32 __iomem *)(dd->piovl15base + 349 (i - (dd->piobcnt2k + dd->piobcnt4k)) * 350 dd->align4k); 351 if (pbufnum) 352 *pbufnum = i; 353 dd->upd_pio_shadow = 0; 354 } 355 356 return buf; 357 } 358 359 /* 360 * Record that the caller is finished writing to the buffer so we don't 361 * disarm it while it is being written and disarm it now if needed. 362 */ 363 void qib_sendbuf_done(struct qib_devdata *dd, unsigned n) 364 { 365 unsigned long flags; 366 367 spin_lock_irqsave(&dd->pioavail_lock, flags); 368 __clear_bit(n, dd->pio_writing); 369 if (__test_and_clear_bit(n, dd->pio_need_disarm)) 370 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(n)); 371 spin_unlock_irqrestore(&dd->pioavail_lock, flags); 372 } 373 374 /** 375 * qib_chg_pioavailkernel - change which send buffers are available for kernel 376 * @dd: the qlogic_ib device 377 * @start: the starting send buffer number 378 * @len: the number of send buffers 379 * @avail: true if the buffers are available for kernel use, false otherwise 380 */ 381 void qib_chg_pioavailkernel(struct qib_devdata *dd, unsigned start, 382 unsigned len, u32 avail, struct qib_ctxtdata *rcd) 383 { 384 unsigned long flags; 385 unsigned end; 386 unsigned ostart = start; 387 388 /* There are two bits per send buffer (busy and generation) */ 389 start *= 2; 390 end = start + len * 2; 391 392 spin_lock_irqsave(&dd->pioavail_lock, flags); 393 /* Set or clear the busy bit in the shadow. */ 394 while (start < end) { 395 if (avail) { 396 unsigned long dma; 397 int i; 398 399 /* 400 * The BUSY bit will never be set, because we disarm 401 * the user buffers before we hand them back to the 402 * kernel. We do have to make sure the generation 403 * bit is set correctly in shadow, since it could 404 * have changed many times while allocated to user. 405 * We can't use the bitmap functions on the full 406 * dma array because it is always little-endian, so 407 * we have to flip to host-order first. 408 * BITS_PER_LONG is slightly wrong, since it's 409 * always 64 bits per register in chip... 410 * We only work on 64 bit kernels, so that's OK. 411 */ 412 i = start / BITS_PER_LONG; 413 __clear_bit(QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT + start, 414 dd->pioavailshadow); 415 dma = (unsigned long) 416 le64_to_cpu(dd->pioavailregs_dma[i]); 417 if (test_bit((QLOGIC_IB_SENDPIOAVAIL_CHECK_SHIFT + 418 start) % BITS_PER_LONG, &dma)) 419 __set_bit(QLOGIC_IB_SENDPIOAVAIL_CHECK_SHIFT + 420 start, dd->pioavailshadow); 421 else 422 __clear_bit(QLOGIC_IB_SENDPIOAVAIL_CHECK_SHIFT 423 + start, dd->pioavailshadow); 424 __set_bit(start, dd->pioavailkernel); 425 } else { 426 __set_bit(start + QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT, 427 dd->pioavailshadow); 428 __clear_bit(start, dd->pioavailkernel); 429 } 430 start += 2; 431 } 432 433 spin_unlock_irqrestore(&dd->pioavail_lock, flags); 434 435 dd->f_txchk_change(dd, ostart, len, avail, rcd); 436 } 437 438 /* 439 * Flush all sends that might be in the ready to send state, as well as any 440 * that are in the process of being sent. Used whenever we need to be 441 * sure the send side is idle. Cleans up all buffer state by canceling 442 * all pio buffers, and issuing an abort, which cleans up anything in the 443 * launch fifo. The cancel is superfluous on some chip versions, but 444 * it's safer to always do it. 445 * PIOAvail bits are updated by the chip as if a normal send had happened. 446 */ 447 void qib_cancel_sends(struct qib_pportdata *ppd) 448 { 449 struct qib_devdata *dd = ppd->dd; 450 struct qib_ctxtdata *rcd; 451 unsigned long flags; 452 unsigned ctxt; 453 unsigned i; 454 unsigned last; 455 456 /* 457 * Tell PSM to disarm buffers again before trying to reuse them. 458 * We need to be sure the rcd doesn't change out from under us 459 * while we do so. We hold the two locks sequentially. We might 460 * needlessly set some need_disarm bits as a result, if the 461 * context is closed after we release the uctxt_lock, but that's 462 * fairly benign, and safer than nesting the locks. 463 */ 464 for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) { 465 spin_lock_irqsave(&dd->uctxt_lock, flags); 466 rcd = dd->rcd[ctxt]; 467 if (rcd && rcd->ppd == ppd) { 468 last = rcd->pio_base + rcd->piocnt; 469 if (rcd->user_event_mask) { 470 /* 471 * subctxt_cnt is 0 if not shared, so do base 472 * separately, first, then remaining subctxt, 473 * if any 474 */ 475 set_bit(_QIB_EVENT_DISARM_BUFS_BIT, 476 &rcd->user_event_mask[0]); 477 for (i = 1; i < rcd->subctxt_cnt; i++) 478 set_bit(_QIB_EVENT_DISARM_BUFS_BIT, 479 &rcd->user_event_mask[i]); 480 } 481 i = rcd->pio_base; 482 spin_unlock_irqrestore(&dd->uctxt_lock, flags); 483 spin_lock_irqsave(&dd->pioavail_lock, flags); 484 for (; i < last; i++) 485 __set_bit(i, dd->pio_need_disarm); 486 spin_unlock_irqrestore(&dd->pioavail_lock, flags); 487 } else 488 spin_unlock_irqrestore(&dd->uctxt_lock, flags); 489 } 490 491 if (!(dd->flags & QIB_HAS_SEND_DMA)) 492 dd->f_sendctrl(ppd, QIB_SENDCTRL_DISARM_ALL | 493 QIB_SENDCTRL_FLUSH); 494 } 495 496 /* 497 * Force an update of in-memory copy of the pioavail registers, when 498 * needed for any of a variety of reasons. 499 * If already off, this routine is a nop, on the assumption that the 500 * caller (or set of callers) will "do the right thing". 501 * This is a per-device operation, so just the first port. 502 */ 503 void qib_force_pio_avail_update(struct qib_devdata *dd) 504 { 505 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_AVAIL_BLIP); 506 } 507 508 void qib_hol_down(struct qib_pportdata *ppd) 509 { 510 /* 511 * Cancel sends when the link goes DOWN so that we aren't doing it 512 * at INIT when we might be trying to send SMI packets. 513 */ 514 if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) 515 qib_cancel_sends(ppd); 516 } 517 518 /* 519 * Link is at INIT. 520 * We start the HoL timer so we can detect stuck packets blocking SMP replies. 521 * Timer may already be running, so use mod_timer, not add_timer. 522 */ 523 void qib_hol_init(struct qib_pportdata *ppd) 524 { 525 if (ppd->hol_state != QIB_HOL_INIT) { 526 ppd->hol_state = QIB_HOL_INIT; 527 mod_timer(&ppd->hol_timer, 528 jiffies + msecs_to_jiffies(qib_hol_timeout_ms)); 529 } 530 } 531 532 /* 533 * Link is up, continue any user processes, and ensure timer 534 * is a nop, if running. Let timer keep running, if set; it 535 * will nop when it sees the link is up. 536 */ 537 void qib_hol_up(struct qib_pportdata *ppd) 538 { 539 ppd->hol_state = QIB_HOL_UP; 540 } 541 542 /* 543 * This is only called via the timer. 544 */ 545 void qib_hol_event(unsigned long opaque) 546 { 547 struct qib_pportdata *ppd = (struct qib_pportdata *)opaque; 548 549 /* If hardware error, etc, skip. */ 550 if (!(ppd->dd->flags & QIB_INITTED)) 551 return; 552 553 if (ppd->hol_state != QIB_HOL_UP) { 554 /* 555 * Try to flush sends in case a stuck packet is blocking 556 * SMP replies. 557 */ 558 qib_hol_down(ppd); 559 mod_timer(&ppd->hol_timer, 560 jiffies + msecs_to_jiffies(qib_hol_timeout_ms)); 561 } 562 } 563